[binutils, ARM, 16/16] Add support to VLDR and VSTR of system registers
[binutils-gdb.git] / gas / config / tc-arm.c
1 /* tc-arm.c -- Assemble for the ARM
2 Copyright (C) 1994-2019 Free Software Foundation, Inc.
3 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
4 Modified by David Taylor (dtaylor@armltd.co.uk)
5 Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
6 Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
7 Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
8
9 This file is part of GAS, the GNU Assembler.
10
11 GAS is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 3, or (at your option)
14 any later version.
15
16 GAS is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
20
21 You should have received a copy of the GNU General Public License
22 along with GAS; see the file COPYING. If not, write to the Free
23 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
24 02110-1301, USA. */
25
26 #include "as.h"
27 #include <limits.h>
28 #include <stdarg.h>
29 #define NO_RELOC 0
30 #include "safe-ctype.h"
31 #include "subsegs.h"
32 #include "obstack.h"
33 #include "libiberty.h"
34 #include "opcode/arm.h"
35
36 #ifdef OBJ_ELF
37 #include "elf/arm.h"
38 #include "dw2gencfi.h"
39 #endif
40
41 #include "dwarf2dbg.h"
42
43 #ifdef OBJ_ELF
44 /* Must be at least the size of the largest unwind opcode (currently two). */
45 #define ARM_OPCODE_CHUNK_SIZE 8
46
47 /* This structure holds the unwinding state. */
48
49 static struct
50 {
51 symbolS * proc_start;
52 symbolS * table_entry;
53 symbolS * personality_routine;
54 int personality_index;
55 /* The segment containing the function. */
56 segT saved_seg;
57 subsegT saved_subseg;
58 /* Opcodes generated from this function. */
59 unsigned char * opcodes;
60 int opcode_count;
61 int opcode_alloc;
62 /* The number of bytes pushed to the stack. */
63 offsetT frame_size;
64 /* We don't add stack adjustment opcodes immediately so that we can merge
65 multiple adjustments. We can also omit the final adjustment
66 when using a frame pointer. */
67 offsetT pending_offset;
68 /* These two fields are set by both unwind_movsp and unwind_setfp. They
69 hold the reg+offset to use when restoring sp from a frame pointer. */
70 offsetT fp_offset;
71 int fp_reg;
72 /* Nonzero if an unwind_setfp directive has been seen. */
73 unsigned fp_used:1;
74 /* Nonzero if the last opcode restores sp from fp_reg. */
75 unsigned sp_restored:1;
76 } unwind;
77
78 /* Whether --fdpic was given. */
79 static int arm_fdpic;
80
81 #endif /* OBJ_ELF */
82
83 /* Results from operand parsing worker functions. */
84
85 typedef enum
86 {
87 PARSE_OPERAND_SUCCESS,
88 PARSE_OPERAND_FAIL,
89 PARSE_OPERAND_FAIL_NO_BACKTRACK
90 } parse_operand_result;
91
92 enum arm_float_abi
93 {
94 ARM_FLOAT_ABI_HARD,
95 ARM_FLOAT_ABI_SOFTFP,
96 ARM_FLOAT_ABI_SOFT
97 };
98
99 /* Types of processor to assemble for. */
100 #ifndef CPU_DEFAULT
101 /* The code that was here used to select a default CPU depending on compiler
102 pre-defines which were only present when doing native builds, thus
103 changing gas' default behaviour depending upon the build host.
104
105 If you have a target that requires a default CPU option then the you
106 should define CPU_DEFAULT here. */
107 #endif
108
109 #ifndef FPU_DEFAULT
110 # ifdef TE_LINUX
111 # define FPU_DEFAULT FPU_ARCH_FPA
112 # elif defined (TE_NetBSD)
113 # ifdef OBJ_ELF
114 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */
115 # else
116 /* Legacy a.out format. */
117 # define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */
118 # endif
119 # elif defined (TE_VXWORKS)
120 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */
121 # else
122 /* For backwards compatibility, default to FPA. */
123 # define FPU_DEFAULT FPU_ARCH_FPA
124 # endif
125 #endif /* ifndef FPU_DEFAULT */
126
127 #define streq(a, b) (strcmp (a, b) == 0)
128
129 /* Current set of feature bits available (CPU+FPU). Different from
130 selected_cpu + selected_fpu in case of autodetection since the CPU
131 feature bits are then all set. */
132 static arm_feature_set cpu_variant;
133 /* Feature bits used in each execution state. Used to set build attribute
134 (in particular Tag_*_ISA_use) in CPU autodetection mode. */
135 static arm_feature_set arm_arch_used;
136 static arm_feature_set thumb_arch_used;
137
138 /* Flags stored in private area of BFD structure. */
139 static int uses_apcs_26 = FALSE;
140 static int atpcs = FALSE;
141 static int support_interwork = FALSE;
142 static int uses_apcs_float = FALSE;
143 static int pic_code = FALSE;
144 static int fix_v4bx = FALSE;
145 /* Warn on using deprecated features. */
146 static int warn_on_deprecated = TRUE;
147
148 /* Understand CodeComposer Studio assembly syntax. */
149 bfd_boolean codecomposer_syntax = FALSE;
150
151 /* Variables that we set while parsing command-line options. Once all
152 options have been read we re-process these values to set the real
153 assembly flags. */
154
155 /* CPU and FPU feature bits set for legacy CPU and FPU options (eg. -marm1
156 instead of -mcpu=arm1). */
157 static const arm_feature_set *legacy_cpu = NULL;
158 static const arm_feature_set *legacy_fpu = NULL;
159
160 /* CPU, extension and FPU feature bits selected by -mcpu. */
161 static const arm_feature_set *mcpu_cpu_opt = NULL;
162 static arm_feature_set *mcpu_ext_opt = NULL;
163 static const arm_feature_set *mcpu_fpu_opt = NULL;
164
165 /* CPU, extension and FPU feature bits selected by -march. */
166 static const arm_feature_set *march_cpu_opt = NULL;
167 static arm_feature_set *march_ext_opt = NULL;
168 static const arm_feature_set *march_fpu_opt = NULL;
169
170 /* Feature bits selected by -mfpu. */
171 static const arm_feature_set *mfpu_opt = NULL;
172
173 /* Constants for known architecture features. */
174 static const arm_feature_set fpu_default = FPU_DEFAULT;
175 static const arm_feature_set fpu_arch_vfp_v1 ATTRIBUTE_UNUSED = FPU_ARCH_VFP_V1;
176 static const arm_feature_set fpu_arch_vfp_v2 = FPU_ARCH_VFP_V2;
177 static const arm_feature_set fpu_arch_vfp_v3 ATTRIBUTE_UNUSED = FPU_ARCH_VFP_V3;
178 static const arm_feature_set fpu_arch_neon_v1 ATTRIBUTE_UNUSED = FPU_ARCH_NEON_V1;
179 static const arm_feature_set fpu_arch_fpa = FPU_ARCH_FPA;
180 static const arm_feature_set fpu_any_hard = FPU_ANY_HARD;
181 #ifdef OBJ_ELF
182 static const arm_feature_set fpu_arch_maverick = FPU_ARCH_MAVERICK;
183 #endif
184 static const arm_feature_set fpu_endian_pure = FPU_ARCH_ENDIAN_PURE;
185
186 #ifdef CPU_DEFAULT
187 static const arm_feature_set cpu_default = CPU_DEFAULT;
188 #endif
189
190 static const arm_feature_set arm_ext_v1 = ARM_FEATURE_CORE_LOW (ARM_EXT_V1);
191 static const arm_feature_set arm_ext_v2 = ARM_FEATURE_CORE_LOW (ARM_EXT_V2);
192 static const arm_feature_set arm_ext_v2s = ARM_FEATURE_CORE_LOW (ARM_EXT_V2S);
193 static const arm_feature_set arm_ext_v3 = ARM_FEATURE_CORE_LOW (ARM_EXT_V3);
194 static const arm_feature_set arm_ext_v3m = ARM_FEATURE_CORE_LOW (ARM_EXT_V3M);
195 static const arm_feature_set arm_ext_v4 = ARM_FEATURE_CORE_LOW (ARM_EXT_V4);
196 static const arm_feature_set arm_ext_v4t = ARM_FEATURE_CORE_LOW (ARM_EXT_V4T);
197 static const arm_feature_set arm_ext_v5 = ARM_FEATURE_CORE_LOW (ARM_EXT_V5);
198 static const arm_feature_set arm_ext_v4t_5 =
199 ARM_FEATURE_CORE_LOW (ARM_EXT_V4T | ARM_EXT_V5);
200 static const arm_feature_set arm_ext_v5t = ARM_FEATURE_CORE_LOW (ARM_EXT_V5T);
201 static const arm_feature_set arm_ext_v5e = ARM_FEATURE_CORE_LOW (ARM_EXT_V5E);
202 static const arm_feature_set arm_ext_v5exp = ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP);
203 static const arm_feature_set arm_ext_v5j = ARM_FEATURE_CORE_LOW (ARM_EXT_V5J);
204 static const arm_feature_set arm_ext_v6 = ARM_FEATURE_CORE_LOW (ARM_EXT_V6);
205 static const arm_feature_set arm_ext_v6k = ARM_FEATURE_CORE_LOW (ARM_EXT_V6K);
206 static const arm_feature_set arm_ext_v6t2 = ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2);
207 /* Only for compatability of hint instructions. */
208 static const arm_feature_set arm_ext_v6k_v6t2 =
209 ARM_FEATURE_CORE_LOW (ARM_EXT_V6K | ARM_EXT_V6T2);
210 static const arm_feature_set arm_ext_v6_notm =
211 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_NOTM);
212 static const arm_feature_set arm_ext_v6_dsp =
213 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_DSP);
214 static const arm_feature_set arm_ext_barrier =
215 ARM_FEATURE_CORE_LOW (ARM_EXT_BARRIER);
216 static const arm_feature_set arm_ext_msr =
217 ARM_FEATURE_CORE_LOW (ARM_EXT_THUMB_MSR);
218 static const arm_feature_set arm_ext_div = ARM_FEATURE_CORE_LOW (ARM_EXT_DIV);
219 static const arm_feature_set arm_ext_v7 = ARM_FEATURE_CORE_LOW (ARM_EXT_V7);
220 static const arm_feature_set arm_ext_v7a = ARM_FEATURE_CORE_LOW (ARM_EXT_V7A);
221 static const arm_feature_set arm_ext_v7r = ARM_FEATURE_CORE_LOW (ARM_EXT_V7R);
222 #ifdef OBJ_ELF
223 static const arm_feature_set ATTRIBUTE_UNUSED arm_ext_v7m = ARM_FEATURE_CORE_LOW (ARM_EXT_V7M);
224 #endif
225 static const arm_feature_set arm_ext_v8 = ARM_FEATURE_CORE_LOW (ARM_EXT_V8);
226 static const arm_feature_set arm_ext_m =
227 ARM_FEATURE_CORE (ARM_EXT_V6M | ARM_EXT_V7M,
228 ARM_EXT2_V8M | ARM_EXT2_V8M_MAIN);
229 static const arm_feature_set arm_ext_mp = ARM_FEATURE_CORE_LOW (ARM_EXT_MP);
230 static const arm_feature_set arm_ext_sec = ARM_FEATURE_CORE_LOW (ARM_EXT_SEC);
231 static const arm_feature_set arm_ext_os = ARM_FEATURE_CORE_LOW (ARM_EXT_OS);
232 static const arm_feature_set arm_ext_adiv = ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV);
233 static const arm_feature_set arm_ext_virt = ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT);
234 static const arm_feature_set arm_ext_pan = ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN);
235 static const arm_feature_set arm_ext_v8m = ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M);
236 static const arm_feature_set arm_ext_v8m_main =
237 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M_MAIN);
238 static const arm_feature_set arm_ext_v8_1m_main =
239 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN);
240 /* Instructions in ARMv8-M only found in M profile architectures. */
241 static const arm_feature_set arm_ext_v8m_m_only =
242 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M | ARM_EXT2_V8M_MAIN);
243 static const arm_feature_set arm_ext_v6t2_v8m =
244 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V6T2_V8M);
245 /* Instructions shared between ARMv8-A and ARMv8-M. */
246 static const arm_feature_set arm_ext_atomics =
247 ARM_FEATURE_CORE_HIGH (ARM_EXT2_ATOMICS);
248 #ifdef OBJ_ELF
249 /* DSP instructions Tag_DSP_extension refers to. */
250 static const arm_feature_set arm_ext_dsp =
251 ARM_FEATURE_CORE_LOW (ARM_EXT_V5E | ARM_EXT_V5ExP | ARM_EXT_V6_DSP);
252 #endif
253 static const arm_feature_set arm_ext_ras =
254 ARM_FEATURE_CORE_HIGH (ARM_EXT2_RAS);
255 /* FP16 instructions. */
256 static const arm_feature_set arm_ext_fp16 =
257 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST);
258 static const arm_feature_set arm_ext_fp16_fml =
259 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_FML);
260 static const arm_feature_set arm_ext_v8_2 =
261 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_2A);
262 static const arm_feature_set arm_ext_v8_3 =
263 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_3A);
264 static const arm_feature_set arm_ext_sb =
265 ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB);
266 static const arm_feature_set arm_ext_predres =
267 ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES);
268
269 static const arm_feature_set arm_arch_any = ARM_ANY;
270 #ifdef OBJ_ELF
271 static const arm_feature_set fpu_any = FPU_ANY;
272 #endif
273 static const arm_feature_set arm_arch_full ATTRIBUTE_UNUSED = ARM_FEATURE (-1, -1, -1);
274 static const arm_feature_set arm_arch_t2 = ARM_ARCH_THUMB2;
275 static const arm_feature_set arm_arch_none = ARM_ARCH_NONE;
276
277 static const arm_feature_set arm_cext_iwmmxt2 =
278 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2);
279 static const arm_feature_set arm_cext_iwmmxt =
280 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT);
281 static const arm_feature_set arm_cext_xscale =
282 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE);
283 static const arm_feature_set arm_cext_maverick =
284 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK);
285 static const arm_feature_set fpu_fpa_ext_v1 =
286 ARM_FEATURE_COPROC (FPU_FPA_EXT_V1);
287 static const arm_feature_set fpu_fpa_ext_v2 =
288 ARM_FEATURE_COPROC (FPU_FPA_EXT_V2);
289 static const arm_feature_set fpu_vfp_ext_v1xd =
290 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1xD);
291 static const arm_feature_set fpu_vfp_ext_v1 =
292 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1);
293 static const arm_feature_set fpu_vfp_ext_v2 =
294 ARM_FEATURE_COPROC (FPU_VFP_EXT_V2);
295 static const arm_feature_set fpu_vfp_ext_v3xd =
296 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3xD);
297 static const arm_feature_set fpu_vfp_ext_v3 =
298 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3);
299 static const arm_feature_set fpu_vfp_ext_d32 =
300 ARM_FEATURE_COPROC (FPU_VFP_EXT_D32);
301 static const arm_feature_set fpu_neon_ext_v1 =
302 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1);
303 static const arm_feature_set fpu_vfp_v3_or_neon_ext =
304 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1 | FPU_VFP_EXT_V3);
305 #ifdef OBJ_ELF
306 static const arm_feature_set fpu_vfp_fp16 =
307 ARM_FEATURE_COPROC (FPU_VFP_EXT_FP16);
308 static const arm_feature_set fpu_neon_ext_fma =
309 ARM_FEATURE_COPROC (FPU_NEON_EXT_FMA);
310 #endif
311 static const arm_feature_set fpu_vfp_ext_fma =
312 ARM_FEATURE_COPROC (FPU_VFP_EXT_FMA);
313 static const arm_feature_set fpu_vfp_ext_armv8 =
314 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8);
315 static const arm_feature_set fpu_vfp_ext_armv8xd =
316 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8xD);
317 static const arm_feature_set fpu_neon_ext_armv8 =
318 ARM_FEATURE_COPROC (FPU_NEON_EXT_ARMV8);
319 static const arm_feature_set fpu_crypto_ext_armv8 =
320 ARM_FEATURE_COPROC (FPU_CRYPTO_EXT_ARMV8);
321 static const arm_feature_set crc_ext_armv8 =
322 ARM_FEATURE_COPROC (CRC_EXT_ARMV8);
323 static const arm_feature_set fpu_neon_ext_v8_1 =
324 ARM_FEATURE_COPROC (FPU_NEON_EXT_RDMA);
325 static const arm_feature_set fpu_neon_ext_dotprod =
326 ARM_FEATURE_COPROC (FPU_NEON_EXT_DOTPROD);
327
328 static int mfloat_abi_opt = -1;
329 /* Architecture feature bits selected by the last -mcpu/-march or .cpu/.arch
330 directive. */
331 static arm_feature_set selected_arch = ARM_ARCH_NONE;
332 /* Extension feature bits selected by the last -mcpu/-march or .arch_extension
333 directive. */
334 static arm_feature_set selected_ext = ARM_ARCH_NONE;
335 /* Feature bits selected by the last -mcpu/-march or by the combination of the
336 last .cpu/.arch directive .arch_extension directives since that
337 directive. */
338 static arm_feature_set selected_cpu = ARM_ARCH_NONE;
339 /* FPU feature bits selected by the last -mfpu or .fpu directive. */
340 static arm_feature_set selected_fpu = FPU_NONE;
341 /* Feature bits selected by the last .object_arch directive. */
342 static arm_feature_set selected_object_arch = ARM_ARCH_NONE;
343 /* Must be long enough to hold any of the names in arm_cpus. */
344 static char selected_cpu_name[20];
345
346 extern FLONUM_TYPE generic_floating_point_number;
347
348 /* Return if no cpu was selected on command-line. */
349 static bfd_boolean
350 no_cpu_selected (void)
351 {
352 return ARM_FEATURE_EQUAL (selected_cpu, arm_arch_none);
353 }
354
355 #ifdef OBJ_ELF
356 # ifdef EABI_DEFAULT
357 static int meabi_flags = EABI_DEFAULT;
358 # else
359 static int meabi_flags = EF_ARM_EABI_UNKNOWN;
360 # endif
361
362 static int attributes_set_explicitly[NUM_KNOWN_OBJ_ATTRIBUTES];
363
364 bfd_boolean
365 arm_is_eabi (void)
366 {
367 return (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4);
368 }
369 #endif
370
371 #ifdef OBJ_ELF
372 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
373 symbolS * GOT_symbol;
374 #endif
375
376 /* 0: assemble for ARM,
377 1: assemble for Thumb,
378 2: assemble for Thumb even though target CPU does not support thumb
379 instructions. */
380 static int thumb_mode = 0;
381 /* A value distinct from the possible values for thumb_mode that we
382 can use to record whether thumb_mode has been copied into the
383 tc_frag_data field of a frag. */
384 #define MODE_RECORDED (1 << 4)
385
386 /* Specifies the intrinsic IT insn behavior mode. */
387 enum implicit_it_mode
388 {
389 IMPLICIT_IT_MODE_NEVER = 0x00,
390 IMPLICIT_IT_MODE_ARM = 0x01,
391 IMPLICIT_IT_MODE_THUMB = 0x02,
392 IMPLICIT_IT_MODE_ALWAYS = (IMPLICIT_IT_MODE_ARM | IMPLICIT_IT_MODE_THUMB)
393 };
394 static int implicit_it_mode = IMPLICIT_IT_MODE_ARM;
395
396 /* If unified_syntax is true, we are processing the new unified
397 ARM/Thumb syntax. Important differences from the old ARM mode:
398
399 - Immediate operands do not require a # prefix.
400 - Conditional affixes always appear at the end of the
401 instruction. (For backward compatibility, those instructions
402 that formerly had them in the middle, continue to accept them
403 there.)
404 - The IT instruction may appear, and if it does is validated
405 against subsequent conditional affixes. It does not generate
406 machine code.
407
408 Important differences from the old Thumb mode:
409
410 - Immediate operands do not require a # prefix.
411 - Most of the V6T2 instructions are only available in unified mode.
412 - The .N and .W suffixes are recognized and honored (it is an error
413 if they cannot be honored).
414 - All instructions set the flags if and only if they have an 's' affix.
415 - Conditional affixes may be used. They are validated against
416 preceding IT instructions. Unlike ARM mode, you cannot use a
417 conditional affix except in the scope of an IT instruction. */
418
419 static bfd_boolean unified_syntax = FALSE;
420
421 /* An immediate operand can start with #, and ld*, st*, pld operands
422 can contain [ and ]. We need to tell APP not to elide whitespace
423 before a [, which can appear as the first operand for pld.
424 Likewise, a { can appear as the first operand for push, pop, vld*, etc. */
425 const char arm_symbol_chars[] = "#[]{}";
426
427 enum neon_el_type
428 {
429 NT_invtype,
430 NT_untyped,
431 NT_integer,
432 NT_float,
433 NT_poly,
434 NT_signed,
435 NT_unsigned
436 };
437
438 struct neon_type_el
439 {
440 enum neon_el_type type;
441 unsigned size;
442 };
443
444 #define NEON_MAX_TYPE_ELS 4
445
446 struct neon_type
447 {
448 struct neon_type_el el[NEON_MAX_TYPE_ELS];
449 unsigned elems;
450 };
451
452 enum it_instruction_type
453 {
454 OUTSIDE_IT_INSN,
455 INSIDE_IT_INSN,
456 INSIDE_IT_LAST_INSN,
457 IF_INSIDE_IT_LAST_INSN, /* Either outside or inside;
458 if inside, should be the last one. */
459 NEUTRAL_IT_INSN, /* This could be either inside or outside,
460 i.e. BKPT and NOP. */
461 IT_INSN /* The IT insn has been parsed. */
462 };
463
464 /* The maximum number of operands we need. */
465 #define ARM_IT_MAX_OPERANDS 6
466 #define ARM_IT_MAX_RELOCS 3
467
468 struct arm_it
469 {
470 const char * error;
471 unsigned long instruction;
472 int size;
473 int size_req;
474 int cond;
475 /* "uncond_value" is set to the value in place of the conditional field in
476 unconditional versions of the instruction, or -1 if nothing is
477 appropriate. */
478 int uncond_value;
479 struct neon_type vectype;
480 /* This does not indicate an actual NEON instruction, only that
481 the mnemonic accepts neon-style type suffixes. */
482 int is_neon;
483 /* Set to the opcode if the instruction needs relaxation.
484 Zero if the instruction is not relaxed. */
485 unsigned long relax;
486 struct
487 {
488 bfd_reloc_code_real_type type;
489 expressionS exp;
490 int pc_rel;
491 } relocs[ARM_IT_MAX_RELOCS];
492
493 enum it_instruction_type it_insn_type;
494
495 struct
496 {
497 unsigned reg;
498 signed int imm;
499 struct neon_type_el vectype;
500 unsigned present : 1; /* Operand present. */
501 unsigned isreg : 1; /* Operand was a register. */
502 unsigned immisreg : 1; /* .imm field is a second register. */
503 unsigned isscalar : 1; /* Operand is a (Neon) scalar. */
504 unsigned immisalign : 1; /* Immediate is an alignment specifier. */
505 unsigned immisfloat : 1; /* Immediate was parsed as a float. */
506 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
507 instructions. This allows us to disambiguate ARM <-> vector insns. */
508 unsigned regisimm : 1; /* 64-bit immediate, reg forms high 32 bits. */
509 unsigned isvec : 1; /* Is a single, double or quad VFP/Neon reg. */
510 unsigned isquad : 1; /* Operand is Neon quad-precision register. */
511 unsigned issingle : 1; /* Operand is VFP single-precision register. */
512 unsigned hasreloc : 1; /* Operand has relocation suffix. */
513 unsigned writeback : 1; /* Operand has trailing ! */
514 unsigned preind : 1; /* Preindexed address. */
515 unsigned postind : 1; /* Postindexed address. */
516 unsigned negative : 1; /* Index register was negated. */
517 unsigned shifted : 1; /* Shift applied to operation. */
518 unsigned shift_kind : 3; /* Shift operation (enum shift_kind). */
519 } operands[ARM_IT_MAX_OPERANDS];
520 };
521
522 static struct arm_it inst;
523
524 #define NUM_FLOAT_VALS 8
525
526 const char * fp_const[] =
527 {
528 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
529 };
530
531 /* Number of littlenums required to hold an extended precision number. */
532 #define MAX_LITTLENUMS 6
533
534 LITTLENUM_TYPE fp_values[NUM_FLOAT_VALS][MAX_LITTLENUMS];
535
536 #define FAIL (-1)
537 #define SUCCESS (0)
538
539 #define SUFF_S 1
540 #define SUFF_D 2
541 #define SUFF_E 3
542 #define SUFF_P 4
543
544 #define CP_T_X 0x00008000
545 #define CP_T_Y 0x00400000
546
547 #define CONDS_BIT 0x00100000
548 #define LOAD_BIT 0x00100000
549
550 #define DOUBLE_LOAD_FLAG 0x00000001
551
552 struct asm_cond
553 {
554 const char * template_name;
555 unsigned long value;
556 };
557
558 #define COND_ALWAYS 0xE
559
560 struct asm_psr
561 {
562 const char * template_name;
563 unsigned long field;
564 };
565
566 struct asm_barrier_opt
567 {
568 const char * template_name;
569 unsigned long value;
570 const arm_feature_set arch;
571 };
572
573 /* The bit that distinguishes CPSR and SPSR. */
574 #define SPSR_BIT (1 << 22)
575
576 /* The individual PSR flag bits. */
577 #define PSR_c (1 << 16)
578 #define PSR_x (1 << 17)
579 #define PSR_s (1 << 18)
580 #define PSR_f (1 << 19)
581
582 struct reloc_entry
583 {
584 const char * name;
585 bfd_reloc_code_real_type reloc;
586 };
587
588 enum vfp_reg_pos
589 {
590 VFP_REG_Sd, VFP_REG_Sm, VFP_REG_Sn,
591 VFP_REG_Dd, VFP_REG_Dm, VFP_REG_Dn
592 };
593
594 enum vfp_ldstm_type
595 {
596 VFP_LDSTMIA, VFP_LDSTMDB, VFP_LDSTMIAX, VFP_LDSTMDBX
597 };
598
599 /* Bits for DEFINED field in neon_typed_alias. */
600 #define NTA_HASTYPE 1
601 #define NTA_HASINDEX 2
602
603 struct neon_typed_alias
604 {
605 unsigned char defined;
606 unsigned char index;
607 struct neon_type_el eltype;
608 };
609
610 /* ARM register categories. This includes coprocessor numbers and various
611 architecture extensions' registers. Each entry should have an error message
612 in reg_expected_msgs below. */
613 enum arm_reg_type
614 {
615 REG_TYPE_RN,
616 REG_TYPE_CP,
617 REG_TYPE_CN,
618 REG_TYPE_FN,
619 REG_TYPE_VFS,
620 REG_TYPE_VFD,
621 REG_TYPE_NQ,
622 REG_TYPE_VFSD,
623 REG_TYPE_NDQ,
624 REG_TYPE_NSD,
625 REG_TYPE_NSDQ,
626 REG_TYPE_VFC,
627 REG_TYPE_MVF,
628 REG_TYPE_MVD,
629 REG_TYPE_MVFX,
630 REG_TYPE_MVDX,
631 REG_TYPE_MVAX,
632 REG_TYPE_DSPSC,
633 REG_TYPE_MMXWR,
634 REG_TYPE_MMXWC,
635 REG_TYPE_MMXWCG,
636 REG_TYPE_XSCALE,
637 REG_TYPE_RNB
638 };
639
640 /* Structure for a hash table entry for a register.
641 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
642 information which states whether a vector type or index is specified (for a
643 register alias created with .dn or .qn). Otherwise NEON should be NULL. */
644 struct reg_entry
645 {
646 const char * name;
647 unsigned int number;
648 unsigned char type;
649 unsigned char builtin;
650 struct neon_typed_alias * neon;
651 };
652
653 /* Diagnostics used when we don't get a register of the expected type. */
654 const char * const reg_expected_msgs[] =
655 {
656 [REG_TYPE_RN] = N_("ARM register expected"),
657 [REG_TYPE_CP] = N_("bad or missing co-processor number"),
658 [REG_TYPE_CN] = N_("co-processor register expected"),
659 [REG_TYPE_FN] = N_("FPA register expected"),
660 [REG_TYPE_VFS] = N_("VFP single precision register expected"),
661 [REG_TYPE_VFD] = N_("VFP/Neon double precision register expected"),
662 [REG_TYPE_NQ] = N_("Neon quad precision register expected"),
663 [REG_TYPE_VFSD] = N_("VFP single or double precision register expected"),
664 [REG_TYPE_NDQ] = N_("Neon double or quad precision register expected"),
665 [REG_TYPE_NSD] = N_("Neon single or double precision register expected"),
666 [REG_TYPE_NSDQ] = N_("VFP single, double or Neon quad precision register"
667 " expected"),
668 [REG_TYPE_VFC] = N_("VFP system register expected"),
669 [REG_TYPE_MVF] = N_("Maverick MVF register expected"),
670 [REG_TYPE_MVD] = N_("Maverick MVD register expected"),
671 [REG_TYPE_MVFX] = N_("Maverick MVFX register expected"),
672 [REG_TYPE_MVDX] = N_("Maverick MVDX register expected"),
673 [REG_TYPE_MVAX] = N_("Maverick MVAX register expected"),
674 [REG_TYPE_DSPSC] = N_("Maverick DSPSC register expected"),
675 [REG_TYPE_MMXWR] = N_("iWMMXt data register expected"),
676 [REG_TYPE_MMXWC] = N_("iWMMXt control register expected"),
677 [REG_TYPE_MMXWCG] = N_("iWMMXt scalar register expected"),
678 [REG_TYPE_XSCALE] = N_("XScale accumulator register expected"),
679 [REG_TYPE_RNB] = N_("")
680 };
681
682 /* Some well known registers that we refer to directly elsewhere. */
683 #define REG_R12 12
684 #define REG_SP 13
685 #define REG_LR 14
686 #define REG_PC 15
687
688 /* ARM instructions take 4bytes in the object file, Thumb instructions
689 take 2: */
690 #define INSN_SIZE 4
691
692 struct asm_opcode
693 {
694 /* Basic string to match. */
695 const char * template_name;
696
697 /* Parameters to instruction. */
698 unsigned int operands[8];
699
700 /* Conditional tag - see opcode_lookup. */
701 unsigned int tag : 4;
702
703 /* Basic instruction code. */
704 unsigned int avalue : 28;
705
706 /* Thumb-format instruction code. */
707 unsigned int tvalue;
708
709 /* Which architecture variant provides this instruction. */
710 const arm_feature_set * avariant;
711 const arm_feature_set * tvariant;
712
713 /* Function to call to encode instruction in ARM format. */
714 void (* aencode) (void);
715
716 /* Function to call to encode instruction in Thumb format. */
717 void (* tencode) (void);
718 };
719
720 /* Defines for various bits that we will want to toggle. */
721 #define INST_IMMEDIATE 0x02000000
722 #define OFFSET_REG 0x02000000
723 #define HWOFFSET_IMM 0x00400000
724 #define SHIFT_BY_REG 0x00000010
725 #define PRE_INDEX 0x01000000
726 #define INDEX_UP 0x00800000
727 #define WRITE_BACK 0x00200000
728 #define LDM_TYPE_2_OR_3 0x00400000
729 #define CPSI_MMOD 0x00020000
730
731 #define LITERAL_MASK 0xf000f000
732 #define OPCODE_MASK 0xfe1fffff
733 #define V4_STR_BIT 0x00000020
734 #define VLDR_VMOV_SAME 0x0040f000
735
736 #define T2_SUBS_PC_LR 0xf3de8f00
737
738 #define DATA_OP_SHIFT 21
739 #define SBIT_SHIFT 20
740
741 #define T2_OPCODE_MASK 0xfe1fffff
742 #define T2_DATA_OP_SHIFT 21
743 #define T2_SBIT_SHIFT 20
744
745 #define A_COND_MASK 0xf0000000
746 #define A_PUSH_POP_OP_MASK 0x0fff0000
747
748 /* Opcodes for pushing/poping registers to/from the stack. */
749 #define A1_OPCODE_PUSH 0x092d0000
750 #define A2_OPCODE_PUSH 0x052d0004
751 #define A2_OPCODE_POP 0x049d0004
752
753 /* Codes to distinguish the arithmetic instructions. */
754 #define OPCODE_AND 0
755 #define OPCODE_EOR 1
756 #define OPCODE_SUB 2
757 #define OPCODE_RSB 3
758 #define OPCODE_ADD 4
759 #define OPCODE_ADC 5
760 #define OPCODE_SBC 6
761 #define OPCODE_RSC 7
762 #define OPCODE_TST 8
763 #define OPCODE_TEQ 9
764 #define OPCODE_CMP 10
765 #define OPCODE_CMN 11
766 #define OPCODE_ORR 12
767 #define OPCODE_MOV 13
768 #define OPCODE_BIC 14
769 #define OPCODE_MVN 15
770
771 #define T2_OPCODE_AND 0
772 #define T2_OPCODE_BIC 1
773 #define T2_OPCODE_ORR 2
774 #define T2_OPCODE_ORN 3
775 #define T2_OPCODE_EOR 4
776 #define T2_OPCODE_ADD 8
777 #define T2_OPCODE_ADC 10
778 #define T2_OPCODE_SBC 11
779 #define T2_OPCODE_SUB 13
780 #define T2_OPCODE_RSB 14
781
782 #define T_OPCODE_MUL 0x4340
783 #define T_OPCODE_TST 0x4200
784 #define T_OPCODE_CMN 0x42c0
785 #define T_OPCODE_NEG 0x4240
786 #define T_OPCODE_MVN 0x43c0
787
788 #define T_OPCODE_ADD_R3 0x1800
789 #define T_OPCODE_SUB_R3 0x1a00
790 #define T_OPCODE_ADD_HI 0x4400
791 #define T_OPCODE_ADD_ST 0xb000
792 #define T_OPCODE_SUB_ST 0xb080
793 #define T_OPCODE_ADD_SP 0xa800
794 #define T_OPCODE_ADD_PC 0xa000
795 #define T_OPCODE_ADD_I8 0x3000
796 #define T_OPCODE_SUB_I8 0x3800
797 #define T_OPCODE_ADD_I3 0x1c00
798 #define T_OPCODE_SUB_I3 0x1e00
799
800 #define T_OPCODE_ASR_R 0x4100
801 #define T_OPCODE_LSL_R 0x4080
802 #define T_OPCODE_LSR_R 0x40c0
803 #define T_OPCODE_ROR_R 0x41c0
804 #define T_OPCODE_ASR_I 0x1000
805 #define T_OPCODE_LSL_I 0x0000
806 #define T_OPCODE_LSR_I 0x0800
807
808 #define T_OPCODE_MOV_I8 0x2000
809 #define T_OPCODE_CMP_I8 0x2800
810 #define T_OPCODE_CMP_LR 0x4280
811 #define T_OPCODE_MOV_HR 0x4600
812 #define T_OPCODE_CMP_HR 0x4500
813
814 #define T_OPCODE_LDR_PC 0x4800
815 #define T_OPCODE_LDR_SP 0x9800
816 #define T_OPCODE_STR_SP 0x9000
817 #define T_OPCODE_LDR_IW 0x6800
818 #define T_OPCODE_STR_IW 0x6000
819 #define T_OPCODE_LDR_IH 0x8800
820 #define T_OPCODE_STR_IH 0x8000
821 #define T_OPCODE_LDR_IB 0x7800
822 #define T_OPCODE_STR_IB 0x7000
823 #define T_OPCODE_LDR_RW 0x5800
824 #define T_OPCODE_STR_RW 0x5000
825 #define T_OPCODE_LDR_RH 0x5a00
826 #define T_OPCODE_STR_RH 0x5200
827 #define T_OPCODE_LDR_RB 0x5c00
828 #define T_OPCODE_STR_RB 0x5400
829
830 #define T_OPCODE_PUSH 0xb400
831 #define T_OPCODE_POP 0xbc00
832
833 #define T_OPCODE_BRANCH 0xe000
834
835 #define THUMB_SIZE 2 /* Size of thumb instruction. */
836 #define THUMB_PP_PC_LR 0x0100
837 #define THUMB_LOAD_BIT 0x0800
838 #define THUMB2_LOAD_BIT 0x00100000
839
840 #define BAD_ARGS _("bad arguments to instruction")
841 #define BAD_SP _("r13 not allowed here")
842 #define BAD_PC _("r15 not allowed here")
843 #define BAD_COND _("instruction cannot be conditional")
844 #define BAD_OVERLAP _("registers may not be the same")
845 #define BAD_HIREG _("lo register required")
846 #define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
847 #define BAD_ADDR_MODE _("instruction does not accept this addressing mode");
848 #define BAD_BRANCH _("branch must be last instruction in IT block")
849 #define BAD_BRANCH_OFF _("branch out of range or not a multiple of 2")
850 #define BAD_NOT_IT _("instruction not allowed in IT block")
851 #define BAD_FPU _("selected FPU does not support instruction")
852 #define BAD_OUT_IT _("thumb conditional instruction should be in IT block")
853 #define BAD_IT_COND _("incorrect condition in IT block")
854 #define BAD_IT_IT _("IT falling in the range of a previous IT block")
855 #define MISSING_FNSTART _("missing .fnstart before unwinding directive")
856 #define BAD_PC_ADDRESSING \
857 _("cannot use register index with PC-relative addressing")
858 #define BAD_PC_WRITEBACK \
859 _("cannot use writeback with PC-relative addressing")
860 #define BAD_RANGE _("branch out of range")
861 #define BAD_FP16 _("selected processor does not support fp16 instruction")
862 #define UNPRED_REG(R) _("using " R " results in unpredictable behaviour")
863 #define THUMB1_RELOC_ONLY _("relocation valid in thumb1 code only")
864
865 static struct hash_control * arm_ops_hsh;
866 static struct hash_control * arm_cond_hsh;
867 static struct hash_control * arm_shift_hsh;
868 static struct hash_control * arm_psr_hsh;
869 static struct hash_control * arm_v7m_psr_hsh;
870 static struct hash_control * arm_reg_hsh;
871 static struct hash_control * arm_reloc_hsh;
872 static struct hash_control * arm_barrier_opt_hsh;
873
874 /* Stuff needed to resolve the label ambiguity
875 As:
876 ...
877 label: <insn>
878 may differ from:
879 ...
880 label:
881 <insn> */
882
883 symbolS * last_label_seen;
884 static int label_is_thumb_function_name = FALSE;
885
886 /* Literal pool structure. Held on a per-section
887 and per-sub-section basis. */
888
889 #define MAX_LITERAL_POOL_SIZE 1024
890 typedef struct literal_pool
891 {
892 expressionS literals [MAX_LITERAL_POOL_SIZE];
893 unsigned int next_free_entry;
894 unsigned int id;
895 symbolS * symbol;
896 segT section;
897 subsegT sub_section;
898 #ifdef OBJ_ELF
899 struct dwarf2_line_info locs [MAX_LITERAL_POOL_SIZE];
900 #endif
901 struct literal_pool * next;
902 unsigned int alignment;
903 } literal_pool;
904
905 /* Pointer to a linked list of literal pools. */
906 literal_pool * list_of_pools = NULL;
907
908 typedef enum asmfunc_states
909 {
910 OUTSIDE_ASMFUNC,
911 WAITING_ASMFUNC_NAME,
912 WAITING_ENDASMFUNC
913 } asmfunc_states;
914
915 static asmfunc_states asmfunc_state = OUTSIDE_ASMFUNC;
916
917 #ifdef OBJ_ELF
918 # define now_it seg_info (now_seg)->tc_segment_info_data.current_it
919 #else
920 static struct current_it now_it;
921 #endif
922
923 static inline int
924 now_it_compatible (int cond)
925 {
926 return (cond & ~1) == (now_it.cc & ~1);
927 }
928
929 static inline int
930 conditional_insn (void)
931 {
932 return inst.cond != COND_ALWAYS;
933 }
934
935 static int in_it_block (void);
936
937 static int handle_it_state (void);
938
939 static void force_automatic_it_block_close (void);
940
941 static void it_fsm_post_encode (void);
942
943 #define set_it_insn_type(type) \
944 do \
945 { \
946 inst.it_insn_type = type; \
947 if (handle_it_state () == FAIL) \
948 return; \
949 } \
950 while (0)
951
952 #define set_it_insn_type_nonvoid(type, failret) \
953 do \
954 { \
955 inst.it_insn_type = type; \
956 if (handle_it_state () == FAIL) \
957 return failret; \
958 } \
959 while(0)
960
961 #define set_it_insn_type_last() \
962 do \
963 { \
964 if (inst.cond == COND_ALWAYS) \
965 set_it_insn_type (IF_INSIDE_IT_LAST_INSN); \
966 else \
967 set_it_insn_type (INSIDE_IT_LAST_INSN); \
968 } \
969 while (0)
970
971 /* Pure syntax. */
972
973 /* This array holds the chars that always start a comment. If the
974 pre-processor is disabled, these aren't very useful. */
975 char arm_comment_chars[] = "@";
976
977 /* This array holds the chars that only start a comment at the beginning of
978 a line. If the line seems to have the form '# 123 filename'
979 .line and .file directives will appear in the pre-processed output. */
980 /* Note that input_file.c hand checks for '#' at the beginning of the
981 first line of the input file. This is because the compiler outputs
982 #NO_APP at the beginning of its output. */
983 /* Also note that comments like this one will always work. */
984 const char line_comment_chars[] = "#";
985
986 char arm_line_separator_chars[] = ";";
987
988 /* Chars that can be used to separate mant
989 from exp in floating point numbers. */
990 const char EXP_CHARS[] = "eE";
991
992 /* Chars that mean this number is a floating point constant. */
993 /* As in 0f12.456 */
994 /* or 0d1.2345e12 */
995
996 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
997
998 /* Prefix characters that indicate the start of an immediate
999 value. */
1000 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
1001
1002 /* Separator character handling. */
1003
1004 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
1005
1006 static inline int
1007 skip_past_char (char ** str, char c)
1008 {
1009 /* PR gas/14987: Allow for whitespace before the expected character. */
1010 skip_whitespace (*str);
1011
1012 if (**str == c)
1013 {
1014 (*str)++;
1015 return SUCCESS;
1016 }
1017 else
1018 return FAIL;
1019 }
1020
1021 #define skip_past_comma(str) skip_past_char (str, ',')
1022
1023 /* Arithmetic expressions (possibly involving symbols). */
1024
1025 /* Return TRUE if anything in the expression is a bignum. */
1026
1027 static bfd_boolean
1028 walk_no_bignums (symbolS * sp)
1029 {
1030 if (symbol_get_value_expression (sp)->X_op == O_big)
1031 return TRUE;
1032
1033 if (symbol_get_value_expression (sp)->X_add_symbol)
1034 {
1035 return (walk_no_bignums (symbol_get_value_expression (sp)->X_add_symbol)
1036 || (symbol_get_value_expression (sp)->X_op_symbol
1037 && walk_no_bignums (symbol_get_value_expression (sp)->X_op_symbol)));
1038 }
1039
1040 return FALSE;
1041 }
1042
1043 static bfd_boolean in_my_get_expression = FALSE;
1044
1045 /* Third argument to my_get_expression. */
1046 #define GE_NO_PREFIX 0
1047 #define GE_IMM_PREFIX 1
1048 #define GE_OPT_PREFIX 2
1049 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
1050 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
1051 #define GE_OPT_PREFIX_BIG 3
1052
1053 static int
1054 my_get_expression (expressionS * ep, char ** str, int prefix_mode)
1055 {
1056 char * save_in;
1057
1058 /* In unified syntax, all prefixes are optional. */
1059 if (unified_syntax)
1060 prefix_mode = (prefix_mode == GE_OPT_PREFIX_BIG) ? prefix_mode
1061 : GE_OPT_PREFIX;
1062
1063 switch (prefix_mode)
1064 {
1065 case GE_NO_PREFIX: break;
1066 case GE_IMM_PREFIX:
1067 if (!is_immediate_prefix (**str))
1068 {
1069 inst.error = _("immediate expression requires a # prefix");
1070 return FAIL;
1071 }
1072 (*str)++;
1073 break;
1074 case GE_OPT_PREFIX:
1075 case GE_OPT_PREFIX_BIG:
1076 if (is_immediate_prefix (**str))
1077 (*str)++;
1078 break;
1079 default:
1080 abort ();
1081 }
1082
1083 memset (ep, 0, sizeof (expressionS));
1084
1085 save_in = input_line_pointer;
1086 input_line_pointer = *str;
1087 in_my_get_expression = TRUE;
1088 expression (ep);
1089 in_my_get_expression = FALSE;
1090
1091 if (ep->X_op == O_illegal || ep->X_op == O_absent)
1092 {
1093 /* We found a bad or missing expression in md_operand(). */
1094 *str = input_line_pointer;
1095 input_line_pointer = save_in;
1096 if (inst.error == NULL)
1097 inst.error = (ep->X_op == O_absent
1098 ? _("missing expression") :_("bad expression"));
1099 return 1;
1100 }
1101
1102 /* Get rid of any bignums now, so that we don't generate an error for which
1103 we can't establish a line number later on. Big numbers are never valid
1104 in instructions, which is where this routine is always called. */
1105 if (prefix_mode != GE_OPT_PREFIX_BIG
1106 && (ep->X_op == O_big
1107 || (ep->X_add_symbol
1108 && (walk_no_bignums (ep->X_add_symbol)
1109 || (ep->X_op_symbol
1110 && walk_no_bignums (ep->X_op_symbol))))))
1111 {
1112 inst.error = _("invalid constant");
1113 *str = input_line_pointer;
1114 input_line_pointer = save_in;
1115 return 1;
1116 }
1117
1118 *str = input_line_pointer;
1119 input_line_pointer = save_in;
1120 return SUCCESS;
1121 }
1122
1123 /* Turn a string in input_line_pointer into a floating point constant
1124 of type TYPE, and store the appropriate bytes in *LITP. The number
1125 of LITTLENUMS emitted is stored in *SIZEP. An error message is
1126 returned, or NULL on OK.
1127
1128 Note that fp constants aren't represent in the normal way on the ARM.
1129 In big endian mode, things are as expected. However, in little endian
1130 mode fp constants are big-endian word-wise, and little-endian byte-wise
1131 within the words. For example, (double) 1.1 in big endian mode is
1132 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
1133 the byte sequence 99 99 f1 3f 9a 99 99 99.
1134
1135 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
1136
1137 const char *
1138 md_atof (int type, char * litP, int * sizeP)
1139 {
1140 int prec;
1141 LITTLENUM_TYPE words[MAX_LITTLENUMS];
1142 char *t;
1143 int i;
1144
1145 switch (type)
1146 {
1147 case 'f':
1148 case 'F':
1149 case 's':
1150 case 'S':
1151 prec = 2;
1152 break;
1153
1154 case 'd':
1155 case 'D':
1156 case 'r':
1157 case 'R':
1158 prec = 4;
1159 break;
1160
1161 case 'x':
1162 case 'X':
1163 prec = 5;
1164 break;
1165
1166 case 'p':
1167 case 'P':
1168 prec = 5;
1169 break;
1170
1171 default:
1172 *sizeP = 0;
1173 return _("Unrecognized or unsupported floating point constant");
1174 }
1175
1176 t = atof_ieee (input_line_pointer, type, words);
1177 if (t)
1178 input_line_pointer = t;
1179 *sizeP = prec * sizeof (LITTLENUM_TYPE);
1180
1181 if (target_big_endian)
1182 {
1183 for (i = 0; i < prec; i++)
1184 {
1185 md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1186 litP += sizeof (LITTLENUM_TYPE);
1187 }
1188 }
1189 else
1190 {
1191 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
1192 for (i = prec - 1; i >= 0; i--)
1193 {
1194 md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1195 litP += sizeof (LITTLENUM_TYPE);
1196 }
1197 else
1198 /* For a 4 byte float the order of elements in `words' is 1 0.
1199 For an 8 byte float the order is 1 0 3 2. */
1200 for (i = 0; i < prec; i += 2)
1201 {
1202 md_number_to_chars (litP, (valueT) words[i + 1],
1203 sizeof (LITTLENUM_TYPE));
1204 md_number_to_chars (litP + sizeof (LITTLENUM_TYPE),
1205 (valueT) words[i], sizeof (LITTLENUM_TYPE));
1206 litP += 2 * sizeof (LITTLENUM_TYPE);
1207 }
1208 }
1209
1210 return NULL;
1211 }
1212
1213 /* We handle all bad expressions here, so that we can report the faulty
1214 instruction in the error message. */
1215
1216 void
1217 md_operand (expressionS * exp)
1218 {
1219 if (in_my_get_expression)
1220 exp->X_op = O_illegal;
1221 }
1222
1223 /* Immediate values. */
1224
1225 #ifdef OBJ_ELF
1226 /* Generic immediate-value read function for use in directives.
1227 Accepts anything that 'expression' can fold to a constant.
1228 *val receives the number. */
1229
1230 static int
1231 immediate_for_directive (int *val)
1232 {
1233 expressionS exp;
1234 exp.X_op = O_illegal;
1235
1236 if (is_immediate_prefix (*input_line_pointer))
1237 {
1238 input_line_pointer++;
1239 expression (&exp);
1240 }
1241
1242 if (exp.X_op != O_constant)
1243 {
1244 as_bad (_("expected #constant"));
1245 ignore_rest_of_line ();
1246 return FAIL;
1247 }
1248 *val = exp.X_add_number;
1249 return SUCCESS;
1250 }
1251 #endif
1252
1253 /* Register parsing. */
1254
1255 /* Generic register parser. CCP points to what should be the
1256 beginning of a register name. If it is indeed a valid register
1257 name, advance CCP over it and return the reg_entry structure;
1258 otherwise return NULL. Does not issue diagnostics. */
1259
1260 static struct reg_entry *
1261 arm_reg_parse_multi (char **ccp)
1262 {
1263 char *start = *ccp;
1264 char *p;
1265 struct reg_entry *reg;
1266
1267 skip_whitespace (start);
1268
1269 #ifdef REGISTER_PREFIX
1270 if (*start != REGISTER_PREFIX)
1271 return NULL;
1272 start++;
1273 #endif
1274 #ifdef OPTIONAL_REGISTER_PREFIX
1275 if (*start == OPTIONAL_REGISTER_PREFIX)
1276 start++;
1277 #endif
1278
1279 p = start;
1280 if (!ISALPHA (*p) || !is_name_beginner (*p))
1281 return NULL;
1282
1283 do
1284 p++;
1285 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
1286
1287 reg = (struct reg_entry *) hash_find_n (arm_reg_hsh, start, p - start);
1288
1289 if (!reg)
1290 return NULL;
1291
1292 *ccp = p;
1293 return reg;
1294 }
1295
1296 static int
1297 arm_reg_alt_syntax (char **ccp, char *start, struct reg_entry *reg,
1298 enum arm_reg_type type)
1299 {
1300 /* Alternative syntaxes are accepted for a few register classes. */
1301 switch (type)
1302 {
1303 case REG_TYPE_MVF:
1304 case REG_TYPE_MVD:
1305 case REG_TYPE_MVFX:
1306 case REG_TYPE_MVDX:
1307 /* Generic coprocessor register names are allowed for these. */
1308 if (reg && reg->type == REG_TYPE_CN)
1309 return reg->number;
1310 break;
1311
1312 case REG_TYPE_CP:
1313 /* For backward compatibility, a bare number is valid here. */
1314 {
1315 unsigned long processor = strtoul (start, ccp, 10);
1316 if (*ccp != start && processor <= 15)
1317 return processor;
1318 }
1319 /* Fall through. */
1320
1321 case REG_TYPE_MMXWC:
1322 /* WC includes WCG. ??? I'm not sure this is true for all
1323 instructions that take WC registers. */
1324 if (reg && reg->type == REG_TYPE_MMXWCG)
1325 return reg->number;
1326 break;
1327
1328 default:
1329 break;
1330 }
1331
1332 return FAIL;
1333 }
1334
1335 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1336 return value is the register number or FAIL. */
1337
1338 static int
1339 arm_reg_parse (char **ccp, enum arm_reg_type type)
1340 {
1341 char *start = *ccp;
1342 struct reg_entry *reg = arm_reg_parse_multi (ccp);
1343 int ret;
1344
1345 /* Do not allow a scalar (reg+index) to parse as a register. */
1346 if (reg && reg->neon && (reg->neon->defined & NTA_HASINDEX))
1347 return FAIL;
1348
1349 if (reg && reg->type == type)
1350 return reg->number;
1351
1352 if ((ret = arm_reg_alt_syntax (ccp, start, reg, type)) != FAIL)
1353 return ret;
1354
1355 *ccp = start;
1356 return FAIL;
1357 }
1358
1359 /* Parse a Neon type specifier. *STR should point at the leading '.'
1360 character. Does no verification at this stage that the type fits the opcode
1361 properly. E.g.,
1362
1363 .i32.i32.s16
1364 .s32.f32
1365 .u16
1366
1367 Can all be legally parsed by this function.
1368
1369 Fills in neon_type struct pointer with parsed information, and updates STR
1370 to point after the parsed type specifier. Returns SUCCESS if this was a legal
1371 type, FAIL if not. */
1372
1373 static int
1374 parse_neon_type (struct neon_type *type, char **str)
1375 {
1376 char *ptr = *str;
1377
1378 if (type)
1379 type->elems = 0;
1380
1381 while (type->elems < NEON_MAX_TYPE_ELS)
1382 {
1383 enum neon_el_type thistype = NT_untyped;
1384 unsigned thissize = -1u;
1385
1386 if (*ptr != '.')
1387 break;
1388
1389 ptr++;
1390
1391 /* Just a size without an explicit type. */
1392 if (ISDIGIT (*ptr))
1393 goto parsesize;
1394
1395 switch (TOLOWER (*ptr))
1396 {
1397 case 'i': thistype = NT_integer; break;
1398 case 'f': thistype = NT_float; break;
1399 case 'p': thistype = NT_poly; break;
1400 case 's': thistype = NT_signed; break;
1401 case 'u': thistype = NT_unsigned; break;
1402 case 'd':
1403 thistype = NT_float;
1404 thissize = 64;
1405 ptr++;
1406 goto done;
1407 default:
1408 as_bad (_("unexpected character `%c' in type specifier"), *ptr);
1409 return FAIL;
1410 }
1411
1412 ptr++;
1413
1414 /* .f is an abbreviation for .f32. */
1415 if (thistype == NT_float && !ISDIGIT (*ptr))
1416 thissize = 32;
1417 else
1418 {
1419 parsesize:
1420 thissize = strtoul (ptr, &ptr, 10);
1421
1422 if (thissize != 8 && thissize != 16 && thissize != 32
1423 && thissize != 64)
1424 {
1425 as_bad (_("bad size %d in type specifier"), thissize);
1426 return FAIL;
1427 }
1428 }
1429
1430 done:
1431 if (type)
1432 {
1433 type->el[type->elems].type = thistype;
1434 type->el[type->elems].size = thissize;
1435 type->elems++;
1436 }
1437 }
1438
1439 /* Empty/missing type is not a successful parse. */
1440 if (type->elems == 0)
1441 return FAIL;
1442
1443 *str = ptr;
1444
1445 return SUCCESS;
1446 }
1447
1448 /* Errors may be set multiple times during parsing or bit encoding
1449 (particularly in the Neon bits), but usually the earliest error which is set
1450 will be the most meaningful. Avoid overwriting it with later (cascading)
1451 errors by calling this function. */
1452
1453 static void
1454 first_error (const char *err)
1455 {
1456 if (!inst.error)
1457 inst.error = err;
1458 }
1459
1460 /* Parse a single type, e.g. ".s32", leading period included. */
1461 static int
1462 parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
1463 {
1464 char *str = *ccp;
1465 struct neon_type optype;
1466
1467 if (*str == '.')
1468 {
1469 if (parse_neon_type (&optype, &str) == SUCCESS)
1470 {
1471 if (optype.elems == 1)
1472 *vectype = optype.el[0];
1473 else
1474 {
1475 first_error (_("only one type should be specified for operand"));
1476 return FAIL;
1477 }
1478 }
1479 else
1480 {
1481 first_error (_("vector type expected"));
1482 return FAIL;
1483 }
1484 }
1485 else
1486 return FAIL;
1487
1488 *ccp = str;
1489
1490 return SUCCESS;
1491 }
1492
1493 /* Special meanings for indices (which have a range of 0-7), which will fit into
1494 a 4-bit integer. */
1495
1496 #define NEON_ALL_LANES 15
1497 #define NEON_INTERLEAVE_LANES 14
1498
1499 /* Parse either a register or a scalar, with an optional type. Return the
1500 register number, and optionally fill in the actual type of the register
1501 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1502 type/index information in *TYPEINFO. */
1503
1504 static int
1505 parse_typed_reg_or_scalar (char **ccp, enum arm_reg_type type,
1506 enum arm_reg_type *rtype,
1507 struct neon_typed_alias *typeinfo)
1508 {
1509 char *str = *ccp;
1510 struct reg_entry *reg = arm_reg_parse_multi (&str);
1511 struct neon_typed_alias atype;
1512 struct neon_type_el parsetype;
1513
1514 atype.defined = 0;
1515 atype.index = -1;
1516 atype.eltype.type = NT_invtype;
1517 atype.eltype.size = -1;
1518
1519 /* Try alternate syntax for some types of register. Note these are mutually
1520 exclusive with the Neon syntax extensions. */
1521 if (reg == NULL)
1522 {
1523 int altreg = arm_reg_alt_syntax (&str, *ccp, reg, type);
1524 if (altreg != FAIL)
1525 *ccp = str;
1526 if (typeinfo)
1527 *typeinfo = atype;
1528 return altreg;
1529 }
1530
1531 /* Undo polymorphism when a set of register types may be accepted. */
1532 if ((type == REG_TYPE_NDQ
1533 && (reg->type == REG_TYPE_NQ || reg->type == REG_TYPE_VFD))
1534 || (type == REG_TYPE_VFSD
1535 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD))
1536 || (type == REG_TYPE_NSDQ
1537 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD
1538 || reg->type == REG_TYPE_NQ))
1539 || (type == REG_TYPE_NSD
1540 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD))
1541 || (type == REG_TYPE_MMXWC
1542 && (reg->type == REG_TYPE_MMXWCG)))
1543 type = (enum arm_reg_type) reg->type;
1544
1545 if (type != reg->type)
1546 return FAIL;
1547
1548 if (reg->neon)
1549 atype = *reg->neon;
1550
1551 if (parse_neon_operand_type (&parsetype, &str) == SUCCESS)
1552 {
1553 if ((atype.defined & NTA_HASTYPE) != 0)
1554 {
1555 first_error (_("can't redefine type for operand"));
1556 return FAIL;
1557 }
1558 atype.defined |= NTA_HASTYPE;
1559 atype.eltype = parsetype;
1560 }
1561
1562 if (skip_past_char (&str, '[') == SUCCESS)
1563 {
1564 if (type != REG_TYPE_VFD
1565 && !(type == REG_TYPE_VFS
1566 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8_2)))
1567 {
1568 first_error (_("only D registers may be indexed"));
1569 return FAIL;
1570 }
1571
1572 if ((atype.defined & NTA_HASINDEX) != 0)
1573 {
1574 first_error (_("can't change index for operand"));
1575 return FAIL;
1576 }
1577
1578 atype.defined |= NTA_HASINDEX;
1579
1580 if (skip_past_char (&str, ']') == SUCCESS)
1581 atype.index = NEON_ALL_LANES;
1582 else
1583 {
1584 expressionS exp;
1585
1586 my_get_expression (&exp, &str, GE_NO_PREFIX);
1587
1588 if (exp.X_op != O_constant)
1589 {
1590 first_error (_("constant expression required"));
1591 return FAIL;
1592 }
1593
1594 if (skip_past_char (&str, ']') == FAIL)
1595 return FAIL;
1596
1597 atype.index = exp.X_add_number;
1598 }
1599 }
1600
1601 if (typeinfo)
1602 *typeinfo = atype;
1603
1604 if (rtype)
1605 *rtype = type;
1606
1607 *ccp = str;
1608
1609 return reg->number;
1610 }
1611
1612 /* Like arm_reg_parse, but also allow the following extra features:
1613 - If RTYPE is non-zero, return the (possibly restricted) type of the
1614 register (e.g. Neon double or quad reg when either has been requested).
1615 - If this is a Neon vector type with additional type information, fill
1616 in the struct pointed to by VECTYPE (if non-NULL).
1617 This function will fault on encountering a scalar. */
1618
1619 static int
1620 arm_typed_reg_parse (char **ccp, enum arm_reg_type type,
1621 enum arm_reg_type *rtype, struct neon_type_el *vectype)
1622 {
1623 struct neon_typed_alias atype;
1624 char *str = *ccp;
1625 int reg = parse_typed_reg_or_scalar (&str, type, rtype, &atype);
1626
1627 if (reg == FAIL)
1628 return FAIL;
1629
1630 /* Do not allow regname(... to parse as a register. */
1631 if (*str == '(')
1632 return FAIL;
1633
1634 /* Do not allow a scalar (reg+index) to parse as a register. */
1635 if ((atype.defined & NTA_HASINDEX) != 0)
1636 {
1637 first_error (_("register operand expected, but got scalar"));
1638 return FAIL;
1639 }
1640
1641 if (vectype)
1642 *vectype = atype.eltype;
1643
1644 *ccp = str;
1645
1646 return reg;
1647 }
1648
1649 #define NEON_SCALAR_REG(X) ((X) >> 4)
1650 #define NEON_SCALAR_INDEX(X) ((X) & 15)
1651
1652 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1653 have enough information to be able to do a good job bounds-checking. So, we
1654 just do easy checks here, and do further checks later. */
1655
1656 static int
1657 parse_scalar (char **ccp, int elsize, struct neon_type_el *type)
1658 {
1659 int reg;
1660 char *str = *ccp;
1661 struct neon_typed_alias atype;
1662 enum arm_reg_type reg_type = REG_TYPE_VFD;
1663
1664 if (elsize == 4)
1665 reg_type = REG_TYPE_VFS;
1666
1667 reg = parse_typed_reg_or_scalar (&str, reg_type, NULL, &atype);
1668
1669 if (reg == FAIL || (atype.defined & NTA_HASINDEX) == 0)
1670 return FAIL;
1671
1672 if (atype.index == NEON_ALL_LANES)
1673 {
1674 first_error (_("scalar must have an index"));
1675 return FAIL;
1676 }
1677 else if (atype.index >= 64 / elsize)
1678 {
1679 first_error (_("scalar index out of range"));
1680 return FAIL;
1681 }
1682
1683 if (type)
1684 *type = atype.eltype;
1685
1686 *ccp = str;
1687
1688 return reg * 16 + atype.index;
1689 }
1690
1691 /* Types of registers in a list. */
1692
1693 enum reg_list_els
1694 {
1695 REGLIST_RN,
1696 REGLIST_CLRM,
1697 REGLIST_VFP_S,
1698 REGLIST_VFP_S_VPR,
1699 REGLIST_VFP_D,
1700 REGLIST_VFP_D_VPR,
1701 REGLIST_NEON_D
1702 };
1703
1704 /* Parse an ARM register list. Returns the bitmask, or FAIL. */
1705
1706 static long
1707 parse_reg_list (char ** strp, enum reg_list_els etype)
1708 {
1709 char *str = *strp;
1710 long range = 0;
1711 int another_range;
1712
1713 gas_assert (etype == REGLIST_RN || etype == REGLIST_CLRM);
1714
1715 /* We come back here if we get ranges concatenated by '+' or '|'. */
1716 do
1717 {
1718 skip_whitespace (str);
1719
1720 another_range = 0;
1721
1722 if (*str == '{')
1723 {
1724 int in_range = 0;
1725 int cur_reg = -1;
1726
1727 str++;
1728 do
1729 {
1730 int reg;
1731 const char apsr_str[] = "apsr";
1732 int apsr_str_len = strlen (apsr_str);
1733
1734 reg = arm_reg_parse (&str, REGLIST_RN);
1735 if (etype == REGLIST_CLRM)
1736 {
1737 if (reg == REG_SP || reg == REG_PC)
1738 reg = FAIL;
1739 else if (reg == FAIL
1740 && !strncasecmp (str, apsr_str, apsr_str_len)
1741 && !ISALPHA (*(str + apsr_str_len)))
1742 {
1743 reg = 15;
1744 str += apsr_str_len;
1745 }
1746
1747 if (reg == FAIL)
1748 {
1749 first_error (_("r0-r12, lr or APSR expected"));
1750 return FAIL;
1751 }
1752 }
1753 else /* etype == REGLIST_RN. */
1754 {
1755 if (reg == FAIL)
1756 {
1757 first_error (_(reg_expected_msgs[REGLIST_RN]));
1758 return FAIL;
1759 }
1760 }
1761
1762 if (in_range)
1763 {
1764 int i;
1765
1766 if (reg <= cur_reg)
1767 {
1768 first_error (_("bad range in register list"));
1769 return FAIL;
1770 }
1771
1772 for (i = cur_reg + 1; i < reg; i++)
1773 {
1774 if (range & (1 << i))
1775 as_tsktsk
1776 (_("Warning: duplicated register (r%d) in register list"),
1777 i);
1778 else
1779 range |= 1 << i;
1780 }
1781 in_range = 0;
1782 }
1783
1784 if (range & (1 << reg))
1785 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1786 reg);
1787 else if (reg <= cur_reg)
1788 as_tsktsk (_("Warning: register range not in ascending order"));
1789
1790 range |= 1 << reg;
1791 cur_reg = reg;
1792 }
1793 while (skip_past_comma (&str) != FAIL
1794 || (in_range = 1, *str++ == '-'));
1795 str--;
1796
1797 if (skip_past_char (&str, '}') == FAIL)
1798 {
1799 first_error (_("missing `}'"));
1800 return FAIL;
1801 }
1802 }
1803 else if (etype == REGLIST_RN)
1804 {
1805 expressionS exp;
1806
1807 if (my_get_expression (&exp, &str, GE_NO_PREFIX))
1808 return FAIL;
1809
1810 if (exp.X_op == O_constant)
1811 {
1812 if (exp.X_add_number
1813 != (exp.X_add_number & 0x0000ffff))
1814 {
1815 inst.error = _("invalid register mask");
1816 return FAIL;
1817 }
1818
1819 if ((range & exp.X_add_number) != 0)
1820 {
1821 int regno = range & exp.X_add_number;
1822
1823 regno &= -regno;
1824 regno = (1 << regno) - 1;
1825 as_tsktsk
1826 (_("Warning: duplicated register (r%d) in register list"),
1827 regno);
1828 }
1829
1830 range |= exp.X_add_number;
1831 }
1832 else
1833 {
1834 if (inst.relocs[0].type != 0)
1835 {
1836 inst.error = _("expression too complex");
1837 return FAIL;
1838 }
1839
1840 memcpy (&inst.relocs[0].exp, &exp, sizeof (expressionS));
1841 inst.relocs[0].type = BFD_RELOC_ARM_MULTI;
1842 inst.relocs[0].pc_rel = 0;
1843 }
1844 }
1845
1846 if (*str == '|' || *str == '+')
1847 {
1848 str++;
1849 another_range = 1;
1850 }
1851 }
1852 while (another_range);
1853
1854 *strp = str;
1855 return range;
1856 }
1857
1858 /* Parse a VFP register list. If the string is invalid return FAIL.
1859 Otherwise return the number of registers, and set PBASE to the first
1860 register. Parses registers of type ETYPE.
1861 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1862 - Q registers can be used to specify pairs of D registers
1863 - { } can be omitted from around a singleton register list
1864 FIXME: This is not implemented, as it would require backtracking in
1865 some cases, e.g.:
1866 vtbl.8 d3,d4,d5
1867 This could be done (the meaning isn't really ambiguous), but doesn't
1868 fit in well with the current parsing framework.
1869 - 32 D registers may be used (also true for VFPv3).
1870 FIXME: Types are ignored in these register lists, which is probably a
1871 bug. */
1872
1873 static int
1874 parse_vfp_reg_list (char **ccp, unsigned int *pbase, enum reg_list_els etype,
1875 bfd_boolean *partial_match)
1876 {
1877 char *str = *ccp;
1878 int base_reg;
1879 int new_base;
1880 enum arm_reg_type regtype = (enum arm_reg_type) 0;
1881 int max_regs = 0;
1882 int count = 0;
1883 int warned = 0;
1884 unsigned long mask = 0;
1885 int i;
1886 bfd_boolean vpr_seen = FALSE;
1887 bfd_boolean expect_vpr =
1888 (etype == REGLIST_VFP_S_VPR) || (etype == REGLIST_VFP_D_VPR);
1889
1890 if (skip_past_char (&str, '{') == FAIL)
1891 {
1892 inst.error = _("expecting {");
1893 return FAIL;
1894 }
1895
1896 switch (etype)
1897 {
1898 case REGLIST_VFP_S:
1899 case REGLIST_VFP_S_VPR:
1900 regtype = REG_TYPE_VFS;
1901 max_regs = 32;
1902 break;
1903
1904 case REGLIST_VFP_D:
1905 case REGLIST_VFP_D_VPR:
1906 regtype = REG_TYPE_VFD;
1907 break;
1908
1909 case REGLIST_NEON_D:
1910 regtype = REG_TYPE_NDQ;
1911 break;
1912
1913 default:
1914 gas_assert (0);
1915 }
1916
1917 if (etype != REGLIST_VFP_S && etype != REGLIST_VFP_S_VPR)
1918 {
1919 /* VFPv3 allows 32 D registers, except for the VFPv3-D16 variant. */
1920 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
1921 {
1922 max_regs = 32;
1923 if (thumb_mode)
1924 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
1925 fpu_vfp_ext_d32);
1926 else
1927 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
1928 fpu_vfp_ext_d32);
1929 }
1930 else
1931 max_regs = 16;
1932 }
1933
1934 base_reg = max_regs;
1935 *partial_match = FALSE;
1936
1937 do
1938 {
1939 int setmask = 1, addregs = 1;
1940 const char vpr_str[] = "vpr";
1941 int vpr_str_len = strlen (vpr_str);
1942
1943 new_base = arm_typed_reg_parse (&str, regtype, &regtype, NULL);
1944
1945 if (expect_vpr)
1946 {
1947 if (new_base == FAIL
1948 && !strncasecmp (str, vpr_str, vpr_str_len)
1949 && !ISALPHA (*(str + vpr_str_len))
1950 && !vpr_seen)
1951 {
1952 vpr_seen = TRUE;
1953 str += vpr_str_len;
1954 if (count == 0)
1955 base_reg = 0; /* Canonicalize VPR only on d0 with 0 regs. */
1956 }
1957 else if (vpr_seen)
1958 {
1959 first_error (_("VPR expected last"));
1960 return FAIL;
1961 }
1962 else if (new_base == FAIL)
1963 {
1964 if (regtype == REG_TYPE_VFS)
1965 first_error (_("VFP single precision register or VPR "
1966 "expected"));
1967 else /* regtype == REG_TYPE_VFD. */
1968 first_error (_("VFP/Neon double precision register or VPR "
1969 "expected"));
1970 return FAIL;
1971 }
1972 }
1973 else if (new_base == FAIL)
1974 {
1975 first_error (_(reg_expected_msgs[regtype]));
1976 return FAIL;
1977 }
1978
1979 *partial_match = TRUE;
1980 if (vpr_seen)
1981 continue;
1982
1983 if (new_base >= max_regs)
1984 {
1985 first_error (_("register out of range in list"));
1986 return FAIL;
1987 }
1988
1989 /* Note: a value of 2 * n is returned for the register Q<n>. */
1990 if (regtype == REG_TYPE_NQ)
1991 {
1992 setmask = 3;
1993 addregs = 2;
1994 }
1995
1996 if (new_base < base_reg)
1997 base_reg = new_base;
1998
1999 if (mask & (setmask << new_base))
2000 {
2001 first_error (_("invalid register list"));
2002 return FAIL;
2003 }
2004
2005 if ((mask >> new_base) != 0 && ! warned && !vpr_seen)
2006 {
2007 as_tsktsk (_("register list not in ascending order"));
2008 warned = 1;
2009 }
2010
2011 mask |= setmask << new_base;
2012 count += addregs;
2013
2014 if (*str == '-') /* We have the start of a range expression */
2015 {
2016 int high_range;
2017
2018 str++;
2019
2020 if ((high_range = arm_typed_reg_parse (&str, regtype, NULL, NULL))
2021 == FAIL)
2022 {
2023 inst.error = gettext (reg_expected_msgs[regtype]);
2024 return FAIL;
2025 }
2026
2027 if (high_range >= max_regs)
2028 {
2029 first_error (_("register out of range in list"));
2030 return FAIL;
2031 }
2032
2033 if (regtype == REG_TYPE_NQ)
2034 high_range = high_range + 1;
2035
2036 if (high_range <= new_base)
2037 {
2038 inst.error = _("register range not in ascending order");
2039 return FAIL;
2040 }
2041
2042 for (new_base += addregs; new_base <= high_range; new_base += addregs)
2043 {
2044 if (mask & (setmask << new_base))
2045 {
2046 inst.error = _("invalid register list");
2047 return FAIL;
2048 }
2049
2050 mask |= setmask << new_base;
2051 count += addregs;
2052 }
2053 }
2054 }
2055 while (skip_past_comma (&str) != FAIL);
2056
2057 str++;
2058
2059 /* Sanity check -- should have raised a parse error above. */
2060 if ((!vpr_seen && count == 0) || count > max_regs)
2061 abort ();
2062
2063 *pbase = base_reg;
2064
2065 if (expect_vpr && !vpr_seen)
2066 {
2067 first_error (_("VPR expected last"));
2068 return FAIL;
2069 }
2070
2071 /* Final test -- the registers must be consecutive. */
2072 mask >>= base_reg;
2073 for (i = 0; i < count; i++)
2074 {
2075 if ((mask & (1u << i)) == 0)
2076 {
2077 inst.error = _("non-contiguous register range");
2078 return FAIL;
2079 }
2080 }
2081
2082 *ccp = str;
2083
2084 return count;
2085 }
2086
2087 /* True if two alias types are the same. */
2088
2089 static bfd_boolean
2090 neon_alias_types_same (struct neon_typed_alias *a, struct neon_typed_alias *b)
2091 {
2092 if (!a && !b)
2093 return TRUE;
2094
2095 if (!a || !b)
2096 return FALSE;
2097
2098 if (a->defined != b->defined)
2099 return FALSE;
2100
2101 if ((a->defined & NTA_HASTYPE) != 0
2102 && (a->eltype.type != b->eltype.type
2103 || a->eltype.size != b->eltype.size))
2104 return FALSE;
2105
2106 if ((a->defined & NTA_HASINDEX) != 0
2107 && (a->index != b->index))
2108 return FALSE;
2109
2110 return TRUE;
2111 }
2112
2113 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
2114 The base register is put in *PBASE.
2115 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
2116 the return value.
2117 The register stride (minus one) is put in bit 4 of the return value.
2118 Bits [6:5] encode the list length (minus one).
2119 The type of the list elements is put in *ELTYPE, if non-NULL. */
2120
2121 #define NEON_LANE(X) ((X) & 0xf)
2122 #define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1)
2123 #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
2124
2125 static int
2126 parse_neon_el_struct_list (char **str, unsigned *pbase,
2127 struct neon_type_el *eltype)
2128 {
2129 char *ptr = *str;
2130 int base_reg = -1;
2131 int reg_incr = -1;
2132 int count = 0;
2133 int lane = -1;
2134 int leading_brace = 0;
2135 enum arm_reg_type rtype = REG_TYPE_NDQ;
2136 const char *const incr_error = _("register stride must be 1 or 2");
2137 const char *const type_error = _("mismatched element/structure types in list");
2138 struct neon_typed_alias firsttype;
2139 firsttype.defined = 0;
2140 firsttype.eltype.type = NT_invtype;
2141 firsttype.eltype.size = -1;
2142 firsttype.index = -1;
2143
2144 if (skip_past_char (&ptr, '{') == SUCCESS)
2145 leading_brace = 1;
2146
2147 do
2148 {
2149 struct neon_typed_alias atype;
2150 int getreg = parse_typed_reg_or_scalar (&ptr, rtype, &rtype, &atype);
2151
2152 if (getreg == FAIL)
2153 {
2154 first_error (_(reg_expected_msgs[rtype]));
2155 return FAIL;
2156 }
2157
2158 if (base_reg == -1)
2159 {
2160 base_reg = getreg;
2161 if (rtype == REG_TYPE_NQ)
2162 {
2163 reg_incr = 1;
2164 }
2165 firsttype = atype;
2166 }
2167 else if (reg_incr == -1)
2168 {
2169 reg_incr = getreg - base_reg;
2170 if (reg_incr < 1 || reg_incr > 2)
2171 {
2172 first_error (_(incr_error));
2173 return FAIL;
2174 }
2175 }
2176 else if (getreg != base_reg + reg_incr * count)
2177 {
2178 first_error (_(incr_error));
2179 return FAIL;
2180 }
2181
2182 if (! neon_alias_types_same (&atype, &firsttype))
2183 {
2184 first_error (_(type_error));
2185 return FAIL;
2186 }
2187
2188 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
2189 modes. */
2190 if (ptr[0] == '-')
2191 {
2192 struct neon_typed_alias htype;
2193 int hireg, dregs = (rtype == REG_TYPE_NQ) ? 2 : 1;
2194 if (lane == -1)
2195 lane = NEON_INTERLEAVE_LANES;
2196 else if (lane != NEON_INTERLEAVE_LANES)
2197 {
2198 first_error (_(type_error));
2199 return FAIL;
2200 }
2201 if (reg_incr == -1)
2202 reg_incr = 1;
2203 else if (reg_incr != 1)
2204 {
2205 first_error (_("don't use Rn-Rm syntax with non-unit stride"));
2206 return FAIL;
2207 }
2208 ptr++;
2209 hireg = parse_typed_reg_or_scalar (&ptr, rtype, NULL, &htype);
2210 if (hireg == FAIL)
2211 {
2212 first_error (_(reg_expected_msgs[rtype]));
2213 return FAIL;
2214 }
2215 if (! neon_alias_types_same (&htype, &firsttype))
2216 {
2217 first_error (_(type_error));
2218 return FAIL;
2219 }
2220 count += hireg + dregs - getreg;
2221 continue;
2222 }
2223
2224 /* If we're using Q registers, we can't use [] or [n] syntax. */
2225 if (rtype == REG_TYPE_NQ)
2226 {
2227 count += 2;
2228 continue;
2229 }
2230
2231 if ((atype.defined & NTA_HASINDEX) != 0)
2232 {
2233 if (lane == -1)
2234 lane = atype.index;
2235 else if (lane != atype.index)
2236 {
2237 first_error (_(type_error));
2238 return FAIL;
2239 }
2240 }
2241 else if (lane == -1)
2242 lane = NEON_INTERLEAVE_LANES;
2243 else if (lane != NEON_INTERLEAVE_LANES)
2244 {
2245 first_error (_(type_error));
2246 return FAIL;
2247 }
2248 count++;
2249 }
2250 while ((count != 1 || leading_brace) && skip_past_comma (&ptr) != FAIL);
2251
2252 /* No lane set by [x]. We must be interleaving structures. */
2253 if (lane == -1)
2254 lane = NEON_INTERLEAVE_LANES;
2255
2256 /* Sanity check. */
2257 if (lane == -1 || base_reg == -1 || count < 1 || count > 4
2258 || (count > 1 && reg_incr == -1))
2259 {
2260 first_error (_("error parsing element/structure list"));
2261 return FAIL;
2262 }
2263
2264 if ((count > 1 || leading_brace) && skip_past_char (&ptr, '}') == FAIL)
2265 {
2266 first_error (_("expected }"));
2267 return FAIL;
2268 }
2269
2270 if (reg_incr == -1)
2271 reg_incr = 1;
2272
2273 if (eltype)
2274 *eltype = firsttype.eltype;
2275
2276 *pbase = base_reg;
2277 *str = ptr;
2278
2279 return lane | ((reg_incr - 1) << 4) | ((count - 1) << 5);
2280 }
2281
2282 /* Parse an explicit relocation suffix on an expression. This is
2283 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
2284 arm_reloc_hsh contains no entries, so this function can only
2285 succeed if there is no () after the word. Returns -1 on error,
2286 BFD_RELOC_UNUSED if there wasn't any suffix. */
2287
2288 static int
2289 parse_reloc (char **str)
2290 {
2291 struct reloc_entry *r;
2292 char *p, *q;
2293
2294 if (**str != '(')
2295 return BFD_RELOC_UNUSED;
2296
2297 p = *str + 1;
2298 q = p;
2299
2300 while (*q && *q != ')' && *q != ',')
2301 q++;
2302 if (*q != ')')
2303 return -1;
2304
2305 if ((r = (struct reloc_entry *)
2306 hash_find_n (arm_reloc_hsh, p, q - p)) == NULL)
2307 return -1;
2308
2309 *str = q + 1;
2310 return r->reloc;
2311 }
2312
2313 /* Directives: register aliases. */
2314
2315 static struct reg_entry *
2316 insert_reg_alias (char *str, unsigned number, int type)
2317 {
2318 struct reg_entry *new_reg;
2319 const char *name;
2320
2321 if ((new_reg = (struct reg_entry *) hash_find (arm_reg_hsh, str)) != 0)
2322 {
2323 if (new_reg->builtin)
2324 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str);
2325
2326 /* Only warn about a redefinition if it's not defined as the
2327 same register. */
2328 else if (new_reg->number != number || new_reg->type != type)
2329 as_warn (_("ignoring redefinition of register alias '%s'"), str);
2330
2331 return NULL;
2332 }
2333
2334 name = xstrdup (str);
2335 new_reg = XNEW (struct reg_entry);
2336
2337 new_reg->name = name;
2338 new_reg->number = number;
2339 new_reg->type = type;
2340 new_reg->builtin = FALSE;
2341 new_reg->neon = NULL;
2342
2343 if (hash_insert (arm_reg_hsh, name, (void *) new_reg))
2344 abort ();
2345
2346 return new_reg;
2347 }
2348
2349 static void
2350 insert_neon_reg_alias (char *str, int number, int type,
2351 struct neon_typed_alias *atype)
2352 {
2353 struct reg_entry *reg = insert_reg_alias (str, number, type);
2354
2355 if (!reg)
2356 {
2357 first_error (_("attempt to redefine typed alias"));
2358 return;
2359 }
2360
2361 if (atype)
2362 {
2363 reg->neon = XNEW (struct neon_typed_alias);
2364 *reg->neon = *atype;
2365 }
2366 }
2367
2368 /* Look for the .req directive. This is of the form:
2369
2370 new_register_name .req existing_register_name
2371
2372 If we find one, or if it looks sufficiently like one that we want to
2373 handle any error here, return TRUE. Otherwise return FALSE. */
2374
2375 static bfd_boolean
2376 create_register_alias (char * newname, char *p)
2377 {
2378 struct reg_entry *old;
2379 char *oldname, *nbuf;
2380 size_t nlen;
2381
2382 /* The input scrubber ensures that whitespace after the mnemonic is
2383 collapsed to single spaces. */
2384 oldname = p;
2385 if (strncmp (oldname, " .req ", 6) != 0)
2386 return FALSE;
2387
2388 oldname += 6;
2389 if (*oldname == '\0')
2390 return FALSE;
2391
2392 old = (struct reg_entry *) hash_find (arm_reg_hsh, oldname);
2393 if (!old)
2394 {
2395 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
2396 return TRUE;
2397 }
2398
2399 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2400 the desired alias name, and p points to its end. If not, then
2401 the desired alias name is in the global original_case_string. */
2402 #ifdef TC_CASE_SENSITIVE
2403 nlen = p - newname;
2404 #else
2405 newname = original_case_string;
2406 nlen = strlen (newname);
2407 #endif
2408
2409 nbuf = xmemdup0 (newname, nlen);
2410
2411 /* Create aliases under the new name as stated; an all-lowercase
2412 version of the new name; and an all-uppercase version of the new
2413 name. */
2414 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
2415 {
2416 for (p = nbuf; *p; p++)
2417 *p = TOUPPER (*p);
2418
2419 if (strncmp (nbuf, newname, nlen))
2420 {
2421 /* If this attempt to create an additional alias fails, do not bother
2422 trying to create the all-lower case alias. We will fail and issue
2423 a second, duplicate error message. This situation arises when the
2424 programmer does something like:
2425 foo .req r0
2426 Foo .req r1
2427 The second .req creates the "Foo" alias but then fails to create
2428 the artificial FOO alias because it has already been created by the
2429 first .req. */
2430 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
2431 {
2432 free (nbuf);
2433 return TRUE;
2434 }
2435 }
2436
2437 for (p = nbuf; *p; p++)
2438 *p = TOLOWER (*p);
2439
2440 if (strncmp (nbuf, newname, nlen))
2441 insert_reg_alias (nbuf, old->number, old->type);
2442 }
2443
2444 free (nbuf);
2445 return TRUE;
2446 }
2447
2448 /* Create a Neon typed/indexed register alias using directives, e.g.:
2449 X .dn d5.s32[1]
2450 Y .qn 6.s16
2451 Z .dn d7
2452 T .dn Z[0]
2453 These typed registers can be used instead of the types specified after the
2454 Neon mnemonic, so long as all operands given have types. Types can also be
2455 specified directly, e.g.:
2456 vadd d0.s32, d1.s32, d2.s32 */
2457
2458 static bfd_boolean
2459 create_neon_reg_alias (char *newname, char *p)
2460 {
2461 enum arm_reg_type basetype;
2462 struct reg_entry *basereg;
2463 struct reg_entry mybasereg;
2464 struct neon_type ntype;
2465 struct neon_typed_alias typeinfo;
2466 char *namebuf, *nameend ATTRIBUTE_UNUSED;
2467 int namelen;
2468
2469 typeinfo.defined = 0;
2470 typeinfo.eltype.type = NT_invtype;
2471 typeinfo.eltype.size = -1;
2472 typeinfo.index = -1;
2473
2474 nameend = p;
2475
2476 if (strncmp (p, " .dn ", 5) == 0)
2477 basetype = REG_TYPE_VFD;
2478 else if (strncmp (p, " .qn ", 5) == 0)
2479 basetype = REG_TYPE_NQ;
2480 else
2481 return FALSE;
2482
2483 p += 5;
2484
2485 if (*p == '\0')
2486 return FALSE;
2487
2488 basereg = arm_reg_parse_multi (&p);
2489
2490 if (basereg && basereg->type != basetype)
2491 {
2492 as_bad (_("bad type for register"));
2493 return FALSE;
2494 }
2495
2496 if (basereg == NULL)
2497 {
2498 expressionS exp;
2499 /* Try parsing as an integer. */
2500 my_get_expression (&exp, &p, GE_NO_PREFIX);
2501 if (exp.X_op != O_constant)
2502 {
2503 as_bad (_("expression must be constant"));
2504 return FALSE;
2505 }
2506 basereg = &mybasereg;
2507 basereg->number = (basetype == REG_TYPE_NQ) ? exp.X_add_number * 2
2508 : exp.X_add_number;
2509 basereg->neon = 0;
2510 }
2511
2512 if (basereg->neon)
2513 typeinfo = *basereg->neon;
2514
2515 if (parse_neon_type (&ntype, &p) == SUCCESS)
2516 {
2517 /* We got a type. */
2518 if (typeinfo.defined & NTA_HASTYPE)
2519 {
2520 as_bad (_("can't redefine the type of a register alias"));
2521 return FALSE;
2522 }
2523
2524 typeinfo.defined |= NTA_HASTYPE;
2525 if (ntype.elems != 1)
2526 {
2527 as_bad (_("you must specify a single type only"));
2528 return FALSE;
2529 }
2530 typeinfo.eltype = ntype.el[0];
2531 }
2532
2533 if (skip_past_char (&p, '[') == SUCCESS)
2534 {
2535 expressionS exp;
2536 /* We got a scalar index. */
2537
2538 if (typeinfo.defined & NTA_HASINDEX)
2539 {
2540 as_bad (_("can't redefine the index of a scalar alias"));
2541 return FALSE;
2542 }
2543
2544 my_get_expression (&exp, &p, GE_NO_PREFIX);
2545
2546 if (exp.X_op != O_constant)
2547 {
2548 as_bad (_("scalar index must be constant"));
2549 return FALSE;
2550 }
2551
2552 typeinfo.defined |= NTA_HASINDEX;
2553 typeinfo.index = exp.X_add_number;
2554
2555 if (skip_past_char (&p, ']') == FAIL)
2556 {
2557 as_bad (_("expecting ]"));
2558 return FALSE;
2559 }
2560 }
2561
2562 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2563 the desired alias name, and p points to its end. If not, then
2564 the desired alias name is in the global original_case_string. */
2565 #ifdef TC_CASE_SENSITIVE
2566 namelen = nameend - newname;
2567 #else
2568 newname = original_case_string;
2569 namelen = strlen (newname);
2570 #endif
2571
2572 namebuf = xmemdup0 (newname, namelen);
2573
2574 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2575 typeinfo.defined != 0 ? &typeinfo : NULL);
2576
2577 /* Insert name in all uppercase. */
2578 for (p = namebuf; *p; p++)
2579 *p = TOUPPER (*p);
2580
2581 if (strncmp (namebuf, newname, namelen))
2582 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2583 typeinfo.defined != 0 ? &typeinfo : NULL);
2584
2585 /* Insert name in all lowercase. */
2586 for (p = namebuf; *p; p++)
2587 *p = TOLOWER (*p);
2588
2589 if (strncmp (namebuf, newname, namelen))
2590 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2591 typeinfo.defined != 0 ? &typeinfo : NULL);
2592
2593 free (namebuf);
2594 return TRUE;
2595 }
2596
2597 /* Should never be called, as .req goes between the alias and the
2598 register name, not at the beginning of the line. */
2599
2600 static void
2601 s_req (int a ATTRIBUTE_UNUSED)
2602 {
2603 as_bad (_("invalid syntax for .req directive"));
2604 }
2605
2606 static void
2607 s_dn (int a ATTRIBUTE_UNUSED)
2608 {
2609 as_bad (_("invalid syntax for .dn directive"));
2610 }
2611
2612 static void
2613 s_qn (int a ATTRIBUTE_UNUSED)
2614 {
2615 as_bad (_("invalid syntax for .qn directive"));
2616 }
2617
2618 /* The .unreq directive deletes an alias which was previously defined
2619 by .req. For example:
2620
2621 my_alias .req r11
2622 .unreq my_alias */
2623
2624 static void
2625 s_unreq (int a ATTRIBUTE_UNUSED)
2626 {
2627 char * name;
2628 char saved_char;
2629
2630 name = input_line_pointer;
2631
2632 while (*input_line_pointer != 0
2633 && *input_line_pointer != ' '
2634 && *input_line_pointer != '\n')
2635 ++input_line_pointer;
2636
2637 saved_char = *input_line_pointer;
2638 *input_line_pointer = 0;
2639
2640 if (!*name)
2641 as_bad (_("invalid syntax for .unreq directive"));
2642 else
2643 {
2644 struct reg_entry *reg = (struct reg_entry *) hash_find (arm_reg_hsh,
2645 name);
2646
2647 if (!reg)
2648 as_bad (_("unknown register alias '%s'"), name);
2649 else if (reg->builtin)
2650 as_warn (_("ignoring attempt to use .unreq on fixed register name: '%s'"),
2651 name);
2652 else
2653 {
2654 char * p;
2655 char * nbuf;
2656
2657 hash_delete (arm_reg_hsh, name, FALSE);
2658 free ((char *) reg->name);
2659 if (reg->neon)
2660 free (reg->neon);
2661 free (reg);
2662
2663 /* Also locate the all upper case and all lower case versions.
2664 Do not complain if we cannot find one or the other as it
2665 was probably deleted above. */
2666
2667 nbuf = strdup (name);
2668 for (p = nbuf; *p; p++)
2669 *p = TOUPPER (*p);
2670 reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
2671 if (reg)
2672 {
2673 hash_delete (arm_reg_hsh, nbuf, FALSE);
2674 free ((char *) reg->name);
2675 if (reg->neon)
2676 free (reg->neon);
2677 free (reg);
2678 }
2679
2680 for (p = nbuf; *p; p++)
2681 *p = TOLOWER (*p);
2682 reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
2683 if (reg)
2684 {
2685 hash_delete (arm_reg_hsh, nbuf, FALSE);
2686 free ((char *) reg->name);
2687 if (reg->neon)
2688 free (reg->neon);
2689 free (reg);
2690 }
2691
2692 free (nbuf);
2693 }
2694 }
2695
2696 *input_line_pointer = saved_char;
2697 demand_empty_rest_of_line ();
2698 }
2699
2700 /* Directives: Instruction set selection. */
2701
2702 #ifdef OBJ_ELF
2703 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
2704 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2705 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2706 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
2707
2708 /* Create a new mapping symbol for the transition to STATE. */
2709
2710 static void
2711 make_mapping_symbol (enum mstate state, valueT value, fragS *frag)
2712 {
2713 symbolS * symbolP;
2714 const char * symname;
2715 int type;
2716
2717 switch (state)
2718 {
2719 case MAP_DATA:
2720 symname = "$d";
2721 type = BSF_NO_FLAGS;
2722 break;
2723 case MAP_ARM:
2724 symname = "$a";
2725 type = BSF_NO_FLAGS;
2726 break;
2727 case MAP_THUMB:
2728 symname = "$t";
2729 type = BSF_NO_FLAGS;
2730 break;
2731 default:
2732 abort ();
2733 }
2734
2735 symbolP = symbol_new (symname, now_seg, value, frag);
2736 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
2737
2738 switch (state)
2739 {
2740 case MAP_ARM:
2741 THUMB_SET_FUNC (symbolP, 0);
2742 ARM_SET_THUMB (symbolP, 0);
2743 ARM_SET_INTERWORK (symbolP, support_interwork);
2744 break;
2745
2746 case MAP_THUMB:
2747 THUMB_SET_FUNC (symbolP, 1);
2748 ARM_SET_THUMB (symbolP, 1);
2749 ARM_SET_INTERWORK (symbolP, support_interwork);
2750 break;
2751
2752 case MAP_DATA:
2753 default:
2754 break;
2755 }
2756
2757 /* Save the mapping symbols for future reference. Also check that
2758 we do not place two mapping symbols at the same offset within a
2759 frag. We'll handle overlap between frags in
2760 check_mapping_symbols.
2761
2762 If .fill or other data filling directive generates zero sized data,
2763 the mapping symbol for the following code will have the same value
2764 as the one generated for the data filling directive. In this case,
2765 we replace the old symbol with the new one at the same address. */
2766 if (value == 0)
2767 {
2768 if (frag->tc_frag_data.first_map != NULL)
2769 {
2770 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
2771 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP, &symbol_lastP);
2772 }
2773 frag->tc_frag_data.first_map = symbolP;
2774 }
2775 if (frag->tc_frag_data.last_map != NULL)
2776 {
2777 know (S_GET_VALUE (frag->tc_frag_data.last_map) <= S_GET_VALUE (symbolP));
2778 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
2779 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP, &symbol_lastP);
2780 }
2781 frag->tc_frag_data.last_map = symbolP;
2782 }
2783
2784 /* We must sometimes convert a region marked as code to data during
2785 code alignment, if an odd number of bytes have to be padded. The
2786 code mapping symbol is pushed to an aligned address. */
2787
2788 static void
2789 insert_data_mapping_symbol (enum mstate state,
2790 valueT value, fragS *frag, offsetT bytes)
2791 {
2792 /* If there was already a mapping symbol, remove it. */
2793 if (frag->tc_frag_data.last_map != NULL
2794 && S_GET_VALUE (frag->tc_frag_data.last_map) == frag->fr_address + value)
2795 {
2796 symbolS *symp = frag->tc_frag_data.last_map;
2797
2798 if (value == 0)
2799 {
2800 know (frag->tc_frag_data.first_map == symp);
2801 frag->tc_frag_data.first_map = NULL;
2802 }
2803 frag->tc_frag_data.last_map = NULL;
2804 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
2805 }
2806
2807 make_mapping_symbol (MAP_DATA, value, frag);
2808 make_mapping_symbol (state, value + bytes, frag);
2809 }
2810
2811 static void mapping_state_2 (enum mstate state, int max_chars);
2812
2813 /* Set the mapping state to STATE. Only call this when about to
2814 emit some STATE bytes to the file. */
2815
2816 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
2817 void
2818 mapping_state (enum mstate state)
2819 {
2820 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2821
2822 if (mapstate == state)
2823 /* The mapping symbol has already been emitted.
2824 There is nothing else to do. */
2825 return;
2826
2827 if (state == MAP_ARM || state == MAP_THUMB)
2828 /* PR gas/12931
2829 All ARM instructions require 4-byte alignment.
2830 (Almost) all Thumb instructions require 2-byte alignment.
2831
2832 When emitting instructions into any section, mark the section
2833 appropriately.
2834
2835 Some Thumb instructions are alignment-sensitive modulo 4 bytes,
2836 but themselves require 2-byte alignment; this applies to some
2837 PC- relative forms. However, these cases will involve implicit
2838 literal pool generation or an explicit .align >=2, both of
2839 which will cause the section to me marked with sufficient
2840 alignment. Thus, we don't handle those cases here. */
2841 record_alignment (now_seg, state == MAP_ARM ? 2 : 1);
2842
2843 if (TRANSITION (MAP_UNDEFINED, MAP_DATA))
2844 /* This case will be evaluated later. */
2845 return;
2846
2847 mapping_state_2 (state, 0);
2848 }
2849
2850 /* Same as mapping_state, but MAX_CHARS bytes have already been
2851 allocated. Put the mapping symbol that far back. */
2852
2853 static void
2854 mapping_state_2 (enum mstate state, int max_chars)
2855 {
2856 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2857
2858 if (!SEG_NORMAL (now_seg))
2859 return;
2860
2861 if (mapstate == state)
2862 /* The mapping symbol has already been emitted.
2863 There is nothing else to do. */
2864 return;
2865
2866 if (TRANSITION (MAP_UNDEFINED, MAP_ARM)
2867 || TRANSITION (MAP_UNDEFINED, MAP_THUMB))
2868 {
2869 struct frag * const frag_first = seg_info (now_seg)->frchainP->frch_root;
2870 const int add_symbol = (frag_now != frag_first) || (frag_now_fix () > 0);
2871
2872 if (add_symbol)
2873 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
2874 }
2875
2876 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
2877 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
2878 }
2879 #undef TRANSITION
2880 #else
2881 #define mapping_state(x) ((void)0)
2882 #define mapping_state_2(x, y) ((void)0)
2883 #endif
2884
2885 /* Find the real, Thumb encoded start of a Thumb function. */
2886
2887 #ifdef OBJ_COFF
2888 static symbolS *
2889 find_real_start (symbolS * symbolP)
2890 {
2891 char * real_start;
2892 const char * name = S_GET_NAME (symbolP);
2893 symbolS * new_target;
2894
2895 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
2896 #define STUB_NAME ".real_start_of"
2897
2898 if (name == NULL)
2899 abort ();
2900
2901 /* The compiler may generate BL instructions to local labels because
2902 it needs to perform a branch to a far away location. These labels
2903 do not have a corresponding ".real_start_of" label. We check
2904 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2905 the ".real_start_of" convention for nonlocal branches. */
2906 if (S_IS_LOCAL (symbolP) || name[0] == '.')
2907 return symbolP;
2908
2909 real_start = concat (STUB_NAME, name, NULL);
2910 new_target = symbol_find (real_start);
2911 free (real_start);
2912
2913 if (new_target == NULL)
2914 {
2915 as_warn (_("Failed to find real start of function: %s\n"), name);
2916 new_target = symbolP;
2917 }
2918
2919 return new_target;
2920 }
2921 #endif
2922
2923 static void
2924 opcode_select (int width)
2925 {
2926 switch (width)
2927 {
2928 case 16:
2929 if (! thumb_mode)
2930 {
2931 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
2932 as_bad (_("selected processor does not support THUMB opcodes"));
2933
2934 thumb_mode = 1;
2935 /* No need to force the alignment, since we will have been
2936 coming from ARM mode, which is word-aligned. */
2937 record_alignment (now_seg, 1);
2938 }
2939 break;
2940
2941 case 32:
2942 if (thumb_mode)
2943 {
2944 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
2945 as_bad (_("selected processor does not support ARM opcodes"));
2946
2947 thumb_mode = 0;
2948
2949 if (!need_pass_2)
2950 frag_align (2, 0, 0);
2951
2952 record_alignment (now_seg, 1);
2953 }
2954 break;
2955
2956 default:
2957 as_bad (_("invalid instruction size selected (%d)"), width);
2958 }
2959 }
2960
2961 static void
2962 s_arm (int ignore ATTRIBUTE_UNUSED)
2963 {
2964 opcode_select (32);
2965 demand_empty_rest_of_line ();
2966 }
2967
2968 static void
2969 s_thumb (int ignore ATTRIBUTE_UNUSED)
2970 {
2971 opcode_select (16);
2972 demand_empty_rest_of_line ();
2973 }
2974
2975 static void
2976 s_code (int unused ATTRIBUTE_UNUSED)
2977 {
2978 int temp;
2979
2980 temp = get_absolute_expression ();
2981 switch (temp)
2982 {
2983 case 16:
2984 case 32:
2985 opcode_select (temp);
2986 break;
2987
2988 default:
2989 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp);
2990 }
2991 }
2992
2993 static void
2994 s_force_thumb (int ignore ATTRIBUTE_UNUSED)
2995 {
2996 /* If we are not already in thumb mode go into it, EVEN if
2997 the target processor does not support thumb instructions.
2998 This is used by gcc/config/arm/lib1funcs.asm for example
2999 to compile interworking support functions even if the
3000 target processor should not support interworking. */
3001 if (! thumb_mode)
3002 {
3003 thumb_mode = 2;
3004 record_alignment (now_seg, 1);
3005 }
3006
3007 demand_empty_rest_of_line ();
3008 }
3009
3010 static void
3011 s_thumb_func (int ignore ATTRIBUTE_UNUSED)
3012 {
3013 s_thumb (0);
3014
3015 /* The following label is the name/address of the start of a Thumb function.
3016 We need to know this for the interworking support. */
3017 label_is_thumb_function_name = TRUE;
3018 }
3019
3020 /* Perform a .set directive, but also mark the alias as
3021 being a thumb function. */
3022
3023 static void
3024 s_thumb_set (int equiv)
3025 {
3026 /* XXX the following is a duplicate of the code for s_set() in read.c
3027 We cannot just call that code as we need to get at the symbol that
3028 is created. */
3029 char * name;
3030 char delim;
3031 char * end_name;
3032 symbolS * symbolP;
3033
3034 /* Especial apologies for the random logic:
3035 This just grew, and could be parsed much more simply!
3036 Dean - in haste. */
3037 delim = get_symbol_name (& name);
3038 end_name = input_line_pointer;
3039 (void) restore_line_pointer (delim);
3040
3041 if (*input_line_pointer != ',')
3042 {
3043 *end_name = 0;
3044 as_bad (_("expected comma after name \"%s\""), name);
3045 *end_name = delim;
3046 ignore_rest_of_line ();
3047 return;
3048 }
3049
3050 input_line_pointer++;
3051 *end_name = 0;
3052
3053 if (name[0] == '.' && name[1] == '\0')
3054 {
3055 /* XXX - this should not happen to .thumb_set. */
3056 abort ();
3057 }
3058
3059 if ((symbolP = symbol_find (name)) == NULL
3060 && (symbolP = md_undefined_symbol (name)) == NULL)
3061 {
3062 #ifndef NO_LISTING
3063 /* When doing symbol listings, play games with dummy fragments living
3064 outside the normal fragment chain to record the file and line info
3065 for this symbol. */
3066 if (listing & LISTING_SYMBOLS)
3067 {
3068 extern struct list_info_struct * listing_tail;
3069 fragS * dummy_frag = (fragS * ) xmalloc (sizeof (fragS));
3070
3071 memset (dummy_frag, 0, sizeof (fragS));
3072 dummy_frag->fr_type = rs_fill;
3073 dummy_frag->line = listing_tail;
3074 symbolP = symbol_new (name, undefined_section, 0, dummy_frag);
3075 dummy_frag->fr_symbol = symbolP;
3076 }
3077 else
3078 #endif
3079 symbolP = symbol_new (name, undefined_section, 0, &zero_address_frag);
3080
3081 #ifdef OBJ_COFF
3082 /* "set" symbols are local unless otherwise specified. */
3083 SF_SET_LOCAL (symbolP);
3084 #endif /* OBJ_COFF */
3085 } /* Make a new symbol. */
3086
3087 symbol_table_insert (symbolP);
3088
3089 * end_name = delim;
3090
3091 if (equiv
3092 && S_IS_DEFINED (symbolP)
3093 && S_GET_SEGMENT (symbolP) != reg_section)
3094 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP));
3095
3096 pseudo_set (symbolP);
3097
3098 demand_empty_rest_of_line ();
3099
3100 /* XXX Now we come to the Thumb specific bit of code. */
3101
3102 THUMB_SET_FUNC (symbolP, 1);
3103 ARM_SET_THUMB (symbolP, 1);
3104 #if defined OBJ_ELF || defined OBJ_COFF
3105 ARM_SET_INTERWORK (symbolP, support_interwork);
3106 #endif
3107 }
3108
3109 /* Directives: Mode selection. */
3110
3111 /* .syntax [unified|divided] - choose the new unified syntax
3112 (same for Arm and Thumb encoding, modulo slight differences in what
3113 can be represented) or the old divergent syntax for each mode. */
3114 static void
3115 s_syntax (int unused ATTRIBUTE_UNUSED)
3116 {
3117 char *name, delim;
3118
3119 delim = get_symbol_name (& name);
3120
3121 if (!strcasecmp (name, "unified"))
3122 unified_syntax = TRUE;
3123 else if (!strcasecmp (name, "divided"))
3124 unified_syntax = FALSE;
3125 else
3126 {
3127 as_bad (_("unrecognized syntax mode \"%s\""), name);
3128 return;
3129 }
3130 (void) restore_line_pointer (delim);
3131 demand_empty_rest_of_line ();
3132 }
3133
3134 /* Directives: sectioning and alignment. */
3135
3136 static void
3137 s_bss (int ignore ATTRIBUTE_UNUSED)
3138 {
3139 /* We don't support putting frags in the BSS segment, we fake it by
3140 marking in_bss, then looking at s_skip for clues. */
3141 subseg_set (bss_section, 0);
3142 demand_empty_rest_of_line ();
3143
3144 #ifdef md_elf_section_change_hook
3145 md_elf_section_change_hook ();
3146 #endif
3147 }
3148
3149 static void
3150 s_even (int ignore ATTRIBUTE_UNUSED)
3151 {
3152 /* Never make frag if expect extra pass. */
3153 if (!need_pass_2)
3154 frag_align (1, 0, 0);
3155
3156 record_alignment (now_seg, 1);
3157
3158 demand_empty_rest_of_line ();
3159 }
3160
3161 /* Directives: CodeComposer Studio. */
3162
3163 /* .ref (for CodeComposer Studio syntax only). */
3164 static void
3165 s_ccs_ref (int unused ATTRIBUTE_UNUSED)
3166 {
3167 if (codecomposer_syntax)
3168 ignore_rest_of_line ();
3169 else
3170 as_bad (_(".ref pseudo-op only available with -mccs flag."));
3171 }
3172
3173 /* If name is not NULL, then it is used for marking the beginning of a
3174 function, whereas if it is NULL then it means the function end. */
3175 static void
3176 asmfunc_debug (const char * name)
3177 {
3178 static const char * last_name = NULL;
3179
3180 if (name != NULL)
3181 {
3182 gas_assert (last_name == NULL);
3183 last_name = name;
3184
3185 if (debug_type == DEBUG_STABS)
3186 stabs_generate_asm_func (name, name);
3187 }
3188 else
3189 {
3190 gas_assert (last_name != NULL);
3191
3192 if (debug_type == DEBUG_STABS)
3193 stabs_generate_asm_endfunc (last_name, last_name);
3194
3195 last_name = NULL;
3196 }
3197 }
3198
3199 static void
3200 s_ccs_asmfunc (int unused ATTRIBUTE_UNUSED)
3201 {
3202 if (codecomposer_syntax)
3203 {
3204 switch (asmfunc_state)
3205 {
3206 case OUTSIDE_ASMFUNC:
3207 asmfunc_state = WAITING_ASMFUNC_NAME;
3208 break;
3209
3210 case WAITING_ASMFUNC_NAME:
3211 as_bad (_(".asmfunc repeated."));
3212 break;
3213
3214 case WAITING_ENDASMFUNC:
3215 as_bad (_(".asmfunc without function."));
3216 break;
3217 }
3218 demand_empty_rest_of_line ();
3219 }
3220 else
3221 as_bad (_(".asmfunc pseudo-op only available with -mccs flag."));
3222 }
3223
3224 static void
3225 s_ccs_endasmfunc (int unused ATTRIBUTE_UNUSED)
3226 {
3227 if (codecomposer_syntax)
3228 {
3229 switch (asmfunc_state)
3230 {
3231 case OUTSIDE_ASMFUNC:
3232 as_bad (_(".endasmfunc without a .asmfunc."));
3233 break;
3234
3235 case WAITING_ASMFUNC_NAME:
3236 as_bad (_(".endasmfunc without function."));
3237 break;
3238
3239 case WAITING_ENDASMFUNC:
3240 asmfunc_state = OUTSIDE_ASMFUNC;
3241 asmfunc_debug (NULL);
3242 break;
3243 }
3244 demand_empty_rest_of_line ();
3245 }
3246 else
3247 as_bad (_(".endasmfunc pseudo-op only available with -mccs flag."));
3248 }
3249
3250 static void
3251 s_ccs_def (int name)
3252 {
3253 if (codecomposer_syntax)
3254 s_globl (name);
3255 else
3256 as_bad (_(".def pseudo-op only available with -mccs flag."));
3257 }
3258
3259 /* Directives: Literal pools. */
3260
3261 static literal_pool *
3262 find_literal_pool (void)
3263 {
3264 literal_pool * pool;
3265
3266 for (pool = list_of_pools; pool != NULL; pool = pool->next)
3267 {
3268 if (pool->section == now_seg
3269 && pool->sub_section == now_subseg)
3270 break;
3271 }
3272
3273 return pool;
3274 }
3275
3276 static literal_pool *
3277 find_or_make_literal_pool (void)
3278 {
3279 /* Next literal pool ID number. */
3280 static unsigned int latest_pool_num = 1;
3281 literal_pool * pool;
3282
3283 pool = find_literal_pool ();
3284
3285 if (pool == NULL)
3286 {
3287 /* Create a new pool. */
3288 pool = XNEW (literal_pool);
3289 if (! pool)
3290 return NULL;
3291
3292 pool->next_free_entry = 0;
3293 pool->section = now_seg;
3294 pool->sub_section = now_subseg;
3295 pool->next = list_of_pools;
3296 pool->symbol = NULL;
3297 pool->alignment = 2;
3298
3299 /* Add it to the list. */
3300 list_of_pools = pool;
3301 }
3302
3303 /* New pools, and emptied pools, will have a NULL symbol. */
3304 if (pool->symbol == NULL)
3305 {
3306 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
3307 (valueT) 0, &zero_address_frag);
3308 pool->id = latest_pool_num ++;
3309 }
3310
3311 /* Done. */
3312 return pool;
3313 }
3314
3315 /* Add the literal in the global 'inst'
3316 structure to the relevant literal pool. */
3317
3318 static int
3319 add_to_lit_pool (unsigned int nbytes)
3320 {
3321 #define PADDING_SLOT 0x1
3322 #define LIT_ENTRY_SIZE_MASK 0xFF
3323 literal_pool * pool;
3324 unsigned int entry, pool_size = 0;
3325 bfd_boolean padding_slot_p = FALSE;
3326 unsigned imm1 = 0;
3327 unsigned imm2 = 0;
3328
3329 if (nbytes == 8)
3330 {
3331 imm1 = inst.operands[1].imm;
3332 imm2 = (inst.operands[1].regisimm ? inst.operands[1].reg
3333 : inst.relocs[0].exp.X_unsigned ? 0
3334 : ((bfd_int64_t) inst.operands[1].imm) >> 32);
3335 if (target_big_endian)
3336 {
3337 imm1 = imm2;
3338 imm2 = inst.operands[1].imm;
3339 }
3340 }
3341
3342 pool = find_or_make_literal_pool ();
3343
3344 /* Check if this literal value is already in the pool. */
3345 for (entry = 0; entry < pool->next_free_entry; entry ++)
3346 {
3347 if (nbytes == 4)
3348 {
3349 if ((pool->literals[entry].X_op == inst.relocs[0].exp.X_op)
3350 && (inst.relocs[0].exp.X_op == O_constant)
3351 && (pool->literals[entry].X_add_number
3352 == inst.relocs[0].exp.X_add_number)
3353 && (pool->literals[entry].X_md == nbytes)
3354 && (pool->literals[entry].X_unsigned
3355 == inst.relocs[0].exp.X_unsigned))
3356 break;
3357
3358 if ((pool->literals[entry].X_op == inst.relocs[0].exp.X_op)
3359 && (inst.relocs[0].exp.X_op == O_symbol)
3360 && (pool->literals[entry].X_add_number
3361 == inst.relocs[0].exp.X_add_number)
3362 && (pool->literals[entry].X_add_symbol
3363 == inst.relocs[0].exp.X_add_symbol)
3364 && (pool->literals[entry].X_op_symbol
3365 == inst.relocs[0].exp.X_op_symbol)
3366 && (pool->literals[entry].X_md == nbytes))
3367 break;
3368 }
3369 else if ((nbytes == 8)
3370 && !(pool_size & 0x7)
3371 && ((entry + 1) != pool->next_free_entry)
3372 && (pool->literals[entry].X_op == O_constant)
3373 && (pool->literals[entry].X_add_number == (offsetT) imm1)
3374 && (pool->literals[entry].X_unsigned
3375 == inst.relocs[0].exp.X_unsigned)
3376 && (pool->literals[entry + 1].X_op == O_constant)
3377 && (pool->literals[entry + 1].X_add_number == (offsetT) imm2)
3378 && (pool->literals[entry + 1].X_unsigned
3379 == inst.relocs[0].exp.X_unsigned))
3380 break;
3381
3382 padding_slot_p = ((pool->literals[entry].X_md >> 8) == PADDING_SLOT);
3383 if (padding_slot_p && (nbytes == 4))
3384 break;
3385
3386 pool_size += 4;
3387 }
3388
3389 /* Do we need to create a new entry? */
3390 if (entry == pool->next_free_entry)
3391 {
3392 if (entry >= MAX_LITERAL_POOL_SIZE)
3393 {
3394 inst.error = _("literal pool overflow");
3395 return FAIL;
3396 }
3397
3398 if (nbytes == 8)
3399 {
3400 /* For 8-byte entries, we align to an 8-byte boundary,
3401 and split it into two 4-byte entries, because on 32-bit
3402 host, 8-byte constants are treated as big num, thus
3403 saved in "generic_bignum" which will be overwritten
3404 by later assignments.
3405
3406 We also need to make sure there is enough space for
3407 the split.
3408
3409 We also check to make sure the literal operand is a
3410 constant number. */
3411 if (!(inst.relocs[0].exp.X_op == O_constant
3412 || inst.relocs[0].exp.X_op == O_big))
3413 {
3414 inst.error = _("invalid type for literal pool");
3415 return FAIL;
3416 }
3417 else if (pool_size & 0x7)
3418 {
3419 if ((entry + 2) >= MAX_LITERAL_POOL_SIZE)
3420 {
3421 inst.error = _("literal pool overflow");
3422 return FAIL;
3423 }
3424
3425 pool->literals[entry] = inst.relocs[0].exp;
3426 pool->literals[entry].X_op = O_constant;
3427 pool->literals[entry].X_add_number = 0;
3428 pool->literals[entry++].X_md = (PADDING_SLOT << 8) | 4;
3429 pool->next_free_entry += 1;
3430 pool_size += 4;
3431 }
3432 else if ((entry + 1) >= MAX_LITERAL_POOL_SIZE)
3433 {
3434 inst.error = _("literal pool overflow");
3435 return FAIL;
3436 }
3437
3438 pool->literals[entry] = inst.relocs[0].exp;
3439 pool->literals[entry].X_op = O_constant;
3440 pool->literals[entry].X_add_number = imm1;
3441 pool->literals[entry].X_unsigned = inst.relocs[0].exp.X_unsigned;
3442 pool->literals[entry++].X_md = 4;
3443 pool->literals[entry] = inst.relocs[0].exp;
3444 pool->literals[entry].X_op = O_constant;
3445 pool->literals[entry].X_add_number = imm2;
3446 pool->literals[entry].X_unsigned = inst.relocs[0].exp.X_unsigned;
3447 pool->literals[entry].X_md = 4;
3448 pool->alignment = 3;
3449 pool->next_free_entry += 1;
3450 }
3451 else
3452 {
3453 pool->literals[entry] = inst.relocs[0].exp;
3454 pool->literals[entry].X_md = 4;
3455 }
3456
3457 #ifdef OBJ_ELF
3458 /* PR ld/12974: Record the location of the first source line to reference
3459 this entry in the literal pool. If it turns out during linking that the
3460 symbol does not exist we will be able to give an accurate line number for
3461 the (first use of the) missing reference. */
3462 if (debug_type == DEBUG_DWARF2)
3463 dwarf2_where (pool->locs + entry);
3464 #endif
3465 pool->next_free_entry += 1;
3466 }
3467 else if (padding_slot_p)
3468 {
3469 pool->literals[entry] = inst.relocs[0].exp;
3470 pool->literals[entry].X_md = nbytes;
3471 }
3472
3473 inst.relocs[0].exp.X_op = O_symbol;
3474 inst.relocs[0].exp.X_add_number = pool_size;
3475 inst.relocs[0].exp.X_add_symbol = pool->symbol;
3476
3477 return SUCCESS;
3478 }
3479
3480 bfd_boolean
3481 tc_start_label_without_colon (void)
3482 {
3483 bfd_boolean ret = TRUE;
3484
3485 if (codecomposer_syntax && asmfunc_state == WAITING_ASMFUNC_NAME)
3486 {
3487 const char *label = input_line_pointer;
3488
3489 while (!is_end_of_line[(int) label[-1]])
3490 --label;
3491
3492 if (*label == '.')
3493 {
3494 as_bad (_("Invalid label '%s'"), label);
3495 ret = FALSE;
3496 }
3497
3498 asmfunc_debug (label);
3499
3500 asmfunc_state = WAITING_ENDASMFUNC;
3501 }
3502
3503 return ret;
3504 }
3505
3506 /* Can't use symbol_new here, so have to create a symbol and then at
3507 a later date assign it a value. That's what these functions do. */
3508
3509 static void
3510 symbol_locate (symbolS * symbolP,
3511 const char * name, /* It is copied, the caller can modify. */
3512 segT segment, /* Segment identifier (SEG_<something>). */
3513 valueT valu, /* Symbol value. */
3514 fragS * frag) /* Associated fragment. */
3515 {
3516 size_t name_length;
3517 char * preserved_copy_of_name;
3518
3519 name_length = strlen (name) + 1; /* +1 for \0. */
3520 obstack_grow (&notes, name, name_length);
3521 preserved_copy_of_name = (char *) obstack_finish (&notes);
3522
3523 #ifdef tc_canonicalize_symbol_name
3524 preserved_copy_of_name =
3525 tc_canonicalize_symbol_name (preserved_copy_of_name);
3526 #endif
3527
3528 S_SET_NAME (symbolP, preserved_copy_of_name);
3529
3530 S_SET_SEGMENT (symbolP, segment);
3531 S_SET_VALUE (symbolP, valu);
3532 symbol_clear_list_pointers (symbolP);
3533
3534 symbol_set_frag (symbolP, frag);
3535
3536 /* Link to end of symbol chain. */
3537 {
3538 extern int symbol_table_frozen;
3539
3540 if (symbol_table_frozen)
3541 abort ();
3542 }
3543
3544 symbol_append (symbolP, symbol_lastP, & symbol_rootP, & symbol_lastP);
3545
3546 obj_symbol_new_hook (symbolP);
3547
3548 #ifdef tc_symbol_new_hook
3549 tc_symbol_new_hook (symbolP);
3550 #endif
3551
3552 #ifdef DEBUG_SYMS
3553 verify_symbol_chain (symbol_rootP, symbol_lastP);
3554 #endif /* DEBUG_SYMS */
3555 }
3556
3557 static void
3558 s_ltorg (int ignored ATTRIBUTE_UNUSED)
3559 {
3560 unsigned int entry;
3561 literal_pool * pool;
3562 char sym_name[20];
3563
3564 pool = find_literal_pool ();
3565 if (pool == NULL
3566 || pool->symbol == NULL
3567 || pool->next_free_entry == 0)
3568 return;
3569
3570 /* Align pool as you have word accesses.
3571 Only make a frag if we have to. */
3572 if (!need_pass_2)
3573 frag_align (pool->alignment, 0, 0);
3574
3575 record_alignment (now_seg, 2);
3576
3577 #ifdef OBJ_ELF
3578 seg_info (now_seg)->tc_segment_info_data.mapstate = MAP_DATA;
3579 make_mapping_symbol (MAP_DATA, (valueT) frag_now_fix (), frag_now);
3580 #endif
3581 sprintf (sym_name, "$$lit_\002%x", pool->id);
3582
3583 symbol_locate (pool->symbol, sym_name, now_seg,
3584 (valueT) frag_now_fix (), frag_now);
3585 symbol_table_insert (pool->symbol);
3586
3587 ARM_SET_THUMB (pool->symbol, thumb_mode);
3588
3589 #if defined OBJ_COFF || defined OBJ_ELF
3590 ARM_SET_INTERWORK (pool->symbol, support_interwork);
3591 #endif
3592
3593 for (entry = 0; entry < pool->next_free_entry; entry ++)
3594 {
3595 #ifdef OBJ_ELF
3596 if (debug_type == DEBUG_DWARF2)
3597 dwarf2_gen_line_info (frag_now_fix (), pool->locs + entry);
3598 #endif
3599 /* First output the expression in the instruction to the pool. */
3600 emit_expr (&(pool->literals[entry]),
3601 pool->literals[entry].X_md & LIT_ENTRY_SIZE_MASK);
3602 }
3603
3604 /* Mark the pool as empty. */
3605 pool->next_free_entry = 0;
3606 pool->symbol = NULL;
3607 }
3608
3609 #ifdef OBJ_ELF
3610 /* Forward declarations for functions below, in the MD interface
3611 section. */
3612 static void fix_new_arm (fragS *, int, short, expressionS *, int, int);
3613 static valueT create_unwind_entry (int);
3614 static void start_unwind_section (const segT, int);
3615 static void add_unwind_opcode (valueT, int);
3616 static void flush_pending_unwind (void);
3617
3618 /* Directives: Data. */
3619
3620 static void
3621 s_arm_elf_cons (int nbytes)
3622 {
3623 expressionS exp;
3624
3625 #ifdef md_flush_pending_output
3626 md_flush_pending_output ();
3627 #endif
3628
3629 if (is_it_end_of_statement ())
3630 {
3631 demand_empty_rest_of_line ();
3632 return;
3633 }
3634
3635 #ifdef md_cons_align
3636 md_cons_align (nbytes);
3637 #endif
3638
3639 mapping_state (MAP_DATA);
3640 do
3641 {
3642 int reloc;
3643 char *base = input_line_pointer;
3644
3645 expression (& exp);
3646
3647 if (exp.X_op != O_symbol)
3648 emit_expr (&exp, (unsigned int) nbytes);
3649 else
3650 {
3651 char *before_reloc = input_line_pointer;
3652 reloc = parse_reloc (&input_line_pointer);
3653 if (reloc == -1)
3654 {
3655 as_bad (_("unrecognized relocation suffix"));
3656 ignore_rest_of_line ();
3657 return;
3658 }
3659 else if (reloc == BFD_RELOC_UNUSED)
3660 emit_expr (&exp, (unsigned int) nbytes);
3661 else
3662 {
3663 reloc_howto_type *howto = (reloc_howto_type *)
3664 bfd_reloc_type_lookup (stdoutput,
3665 (bfd_reloc_code_real_type) reloc);
3666 int size = bfd_get_reloc_size (howto);
3667
3668 if (reloc == BFD_RELOC_ARM_PLT32)
3669 {
3670 as_bad (_("(plt) is only valid on branch targets"));
3671 reloc = BFD_RELOC_UNUSED;
3672 size = 0;
3673 }
3674
3675 if (size > nbytes)
3676 as_bad (ngettext ("%s relocations do not fit in %d byte",
3677 "%s relocations do not fit in %d bytes",
3678 nbytes),
3679 howto->name, nbytes);
3680 else
3681 {
3682 /* We've parsed an expression stopping at O_symbol.
3683 But there may be more expression left now that we
3684 have parsed the relocation marker. Parse it again.
3685 XXX Surely there is a cleaner way to do this. */
3686 char *p = input_line_pointer;
3687 int offset;
3688 char *save_buf = XNEWVEC (char, input_line_pointer - base);
3689
3690 memcpy (save_buf, base, input_line_pointer - base);
3691 memmove (base + (input_line_pointer - before_reloc),
3692 base, before_reloc - base);
3693
3694 input_line_pointer = base + (input_line_pointer-before_reloc);
3695 expression (&exp);
3696 memcpy (base, save_buf, p - base);
3697
3698 offset = nbytes - size;
3699 p = frag_more (nbytes);
3700 memset (p, 0, nbytes);
3701 fix_new_exp (frag_now, p - frag_now->fr_literal + offset,
3702 size, &exp, 0, (enum bfd_reloc_code_real) reloc);
3703 free (save_buf);
3704 }
3705 }
3706 }
3707 }
3708 while (*input_line_pointer++ == ',');
3709
3710 /* Put terminator back into stream. */
3711 input_line_pointer --;
3712 demand_empty_rest_of_line ();
3713 }
3714
3715 /* Emit an expression containing a 32-bit thumb instruction.
3716 Implementation based on put_thumb32_insn. */
3717
3718 static void
3719 emit_thumb32_expr (expressionS * exp)
3720 {
3721 expressionS exp_high = *exp;
3722
3723 exp_high.X_add_number = (unsigned long)exp_high.X_add_number >> 16;
3724 emit_expr (& exp_high, (unsigned int) THUMB_SIZE);
3725 exp->X_add_number &= 0xffff;
3726 emit_expr (exp, (unsigned int) THUMB_SIZE);
3727 }
3728
3729 /* Guess the instruction size based on the opcode. */
3730
3731 static int
3732 thumb_insn_size (int opcode)
3733 {
3734 if ((unsigned int) opcode < 0xe800u)
3735 return 2;
3736 else if ((unsigned int) opcode >= 0xe8000000u)
3737 return 4;
3738 else
3739 return 0;
3740 }
3741
3742 static bfd_boolean
3743 emit_insn (expressionS *exp, int nbytes)
3744 {
3745 int size = 0;
3746
3747 if (exp->X_op == O_constant)
3748 {
3749 size = nbytes;
3750
3751 if (size == 0)
3752 size = thumb_insn_size (exp->X_add_number);
3753
3754 if (size != 0)
3755 {
3756 if (size == 2 && (unsigned int)exp->X_add_number > 0xffffu)
3757 {
3758 as_bad (_(".inst.n operand too big. "\
3759 "Use .inst.w instead"));
3760 size = 0;
3761 }
3762 else
3763 {
3764 if (now_it.state == AUTOMATIC_IT_BLOCK)
3765 set_it_insn_type_nonvoid (OUTSIDE_IT_INSN, 0);
3766 else
3767 set_it_insn_type_nonvoid (NEUTRAL_IT_INSN, 0);
3768
3769 if (thumb_mode && (size > THUMB_SIZE) && !target_big_endian)
3770 emit_thumb32_expr (exp);
3771 else
3772 emit_expr (exp, (unsigned int) size);
3773
3774 it_fsm_post_encode ();
3775 }
3776 }
3777 else
3778 as_bad (_("cannot determine Thumb instruction size. " \
3779 "Use .inst.n/.inst.w instead"));
3780 }
3781 else
3782 as_bad (_("constant expression required"));
3783
3784 return (size != 0);
3785 }
3786
3787 /* Like s_arm_elf_cons but do not use md_cons_align and
3788 set the mapping state to MAP_ARM/MAP_THUMB. */
3789
3790 static void
3791 s_arm_elf_inst (int nbytes)
3792 {
3793 if (is_it_end_of_statement ())
3794 {
3795 demand_empty_rest_of_line ();
3796 return;
3797 }
3798
3799 /* Calling mapping_state () here will not change ARM/THUMB,
3800 but will ensure not to be in DATA state. */
3801
3802 if (thumb_mode)
3803 mapping_state (MAP_THUMB);
3804 else
3805 {
3806 if (nbytes != 0)
3807 {
3808 as_bad (_("width suffixes are invalid in ARM mode"));
3809 ignore_rest_of_line ();
3810 return;
3811 }
3812
3813 nbytes = 4;
3814
3815 mapping_state (MAP_ARM);
3816 }
3817
3818 do
3819 {
3820 expressionS exp;
3821
3822 expression (& exp);
3823
3824 if (! emit_insn (& exp, nbytes))
3825 {
3826 ignore_rest_of_line ();
3827 return;
3828 }
3829 }
3830 while (*input_line_pointer++ == ',');
3831
3832 /* Put terminator back into stream. */
3833 input_line_pointer --;
3834 demand_empty_rest_of_line ();
3835 }
3836
3837 /* Parse a .rel31 directive. */
3838
3839 static void
3840 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED)
3841 {
3842 expressionS exp;
3843 char *p;
3844 valueT highbit;
3845
3846 highbit = 0;
3847 if (*input_line_pointer == '1')
3848 highbit = 0x80000000;
3849 else if (*input_line_pointer != '0')
3850 as_bad (_("expected 0 or 1"));
3851
3852 input_line_pointer++;
3853 if (*input_line_pointer != ',')
3854 as_bad (_("missing comma"));
3855 input_line_pointer++;
3856
3857 #ifdef md_flush_pending_output
3858 md_flush_pending_output ();
3859 #endif
3860
3861 #ifdef md_cons_align
3862 md_cons_align (4);
3863 #endif
3864
3865 mapping_state (MAP_DATA);
3866
3867 expression (&exp);
3868
3869 p = frag_more (4);
3870 md_number_to_chars (p, highbit, 4);
3871 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 1,
3872 BFD_RELOC_ARM_PREL31);
3873
3874 demand_empty_rest_of_line ();
3875 }
3876
3877 /* Directives: AEABI stack-unwind tables. */
3878
3879 /* Parse an unwind_fnstart directive. Simply records the current location. */
3880
3881 static void
3882 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED)
3883 {
3884 demand_empty_rest_of_line ();
3885 if (unwind.proc_start)
3886 {
3887 as_bad (_("duplicate .fnstart directive"));
3888 return;
3889 }
3890
3891 /* Mark the start of the function. */
3892 unwind.proc_start = expr_build_dot ();
3893
3894 /* Reset the rest of the unwind info. */
3895 unwind.opcode_count = 0;
3896 unwind.table_entry = NULL;
3897 unwind.personality_routine = NULL;
3898 unwind.personality_index = -1;
3899 unwind.frame_size = 0;
3900 unwind.fp_offset = 0;
3901 unwind.fp_reg = REG_SP;
3902 unwind.fp_used = 0;
3903 unwind.sp_restored = 0;
3904 }
3905
3906
3907 /* Parse a handlerdata directive. Creates the exception handling table entry
3908 for the function. */
3909
3910 static void
3911 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED)
3912 {
3913 demand_empty_rest_of_line ();
3914 if (!unwind.proc_start)
3915 as_bad (MISSING_FNSTART);
3916
3917 if (unwind.table_entry)
3918 as_bad (_("duplicate .handlerdata directive"));
3919
3920 create_unwind_entry (1);
3921 }
3922
3923 /* Parse an unwind_fnend directive. Generates the index table entry. */
3924
3925 static void
3926 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED)
3927 {
3928 long where;
3929 char *ptr;
3930 valueT val;
3931 unsigned int marked_pr_dependency;
3932
3933 demand_empty_rest_of_line ();
3934
3935 if (!unwind.proc_start)
3936 {
3937 as_bad (_(".fnend directive without .fnstart"));
3938 return;
3939 }
3940
3941 /* Add eh table entry. */
3942 if (unwind.table_entry == NULL)
3943 val = create_unwind_entry (0);
3944 else
3945 val = 0;
3946
3947 /* Add index table entry. This is two words. */
3948 start_unwind_section (unwind.saved_seg, 1);
3949 frag_align (2, 0, 0);
3950 record_alignment (now_seg, 2);
3951
3952 ptr = frag_more (8);
3953 memset (ptr, 0, 8);
3954 where = frag_now_fix () - 8;
3955
3956 /* Self relative offset of the function start. */
3957 fix_new (frag_now, where, 4, unwind.proc_start, 0, 1,
3958 BFD_RELOC_ARM_PREL31);
3959
3960 /* Indicate dependency on EHABI-defined personality routines to the
3961 linker, if it hasn't been done already. */
3962 marked_pr_dependency
3963 = seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency;
3964 if (unwind.personality_index >= 0 && unwind.personality_index < 3
3965 && !(marked_pr_dependency & (1 << unwind.personality_index)))
3966 {
3967 static const char *const name[] =
3968 {
3969 "__aeabi_unwind_cpp_pr0",
3970 "__aeabi_unwind_cpp_pr1",
3971 "__aeabi_unwind_cpp_pr2"
3972 };
3973 symbolS *pr = symbol_find_or_make (name[unwind.personality_index]);
3974 fix_new (frag_now, where, 0, pr, 0, 1, BFD_RELOC_NONE);
3975 seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency
3976 |= 1 << unwind.personality_index;
3977 }
3978
3979 if (val)
3980 /* Inline exception table entry. */
3981 md_number_to_chars (ptr + 4, val, 4);
3982 else
3983 /* Self relative offset of the table entry. */
3984 fix_new (frag_now, where + 4, 4, unwind.table_entry, 0, 1,
3985 BFD_RELOC_ARM_PREL31);
3986
3987 /* Restore the original section. */
3988 subseg_set (unwind.saved_seg, unwind.saved_subseg);
3989
3990 unwind.proc_start = NULL;
3991 }
3992
3993
3994 /* Parse an unwind_cantunwind directive. */
3995
3996 static void
3997 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED)
3998 {
3999 demand_empty_rest_of_line ();
4000 if (!unwind.proc_start)
4001 as_bad (MISSING_FNSTART);
4002
4003 if (unwind.personality_routine || unwind.personality_index != -1)
4004 as_bad (_("personality routine specified for cantunwind frame"));
4005
4006 unwind.personality_index = -2;
4007 }
4008
4009
4010 /* Parse a personalityindex directive. */
4011
4012 static void
4013 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED)
4014 {
4015 expressionS exp;
4016
4017 if (!unwind.proc_start)
4018 as_bad (MISSING_FNSTART);
4019
4020 if (unwind.personality_routine || unwind.personality_index != -1)
4021 as_bad (_("duplicate .personalityindex directive"));
4022
4023 expression (&exp);
4024
4025 if (exp.X_op != O_constant
4026 || exp.X_add_number < 0 || exp.X_add_number > 15)
4027 {
4028 as_bad (_("bad personality routine number"));
4029 ignore_rest_of_line ();
4030 return;
4031 }
4032
4033 unwind.personality_index = exp.X_add_number;
4034
4035 demand_empty_rest_of_line ();
4036 }
4037
4038
4039 /* Parse a personality directive. */
4040
4041 static void
4042 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED)
4043 {
4044 char *name, *p, c;
4045
4046 if (!unwind.proc_start)
4047 as_bad (MISSING_FNSTART);
4048
4049 if (unwind.personality_routine || unwind.personality_index != -1)
4050 as_bad (_("duplicate .personality directive"));
4051
4052 c = get_symbol_name (& name);
4053 p = input_line_pointer;
4054 if (c == '"')
4055 ++ input_line_pointer;
4056 unwind.personality_routine = symbol_find_or_make (name);
4057 *p = c;
4058 demand_empty_rest_of_line ();
4059 }
4060
4061
4062 /* Parse a directive saving core registers. */
4063
4064 static void
4065 s_arm_unwind_save_core (void)
4066 {
4067 valueT op;
4068 long range;
4069 int n;
4070
4071 range = parse_reg_list (&input_line_pointer, REGLIST_RN);
4072 if (range == FAIL)
4073 {
4074 as_bad (_("expected register list"));
4075 ignore_rest_of_line ();
4076 return;
4077 }
4078
4079 demand_empty_rest_of_line ();
4080
4081 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
4082 into .unwind_save {..., sp...}. We aren't bothered about the value of
4083 ip because it is clobbered by calls. */
4084 if (unwind.sp_restored && unwind.fp_reg == 12
4085 && (range & 0x3000) == 0x1000)
4086 {
4087 unwind.opcode_count--;
4088 unwind.sp_restored = 0;
4089 range = (range | 0x2000) & ~0x1000;
4090 unwind.pending_offset = 0;
4091 }
4092
4093 /* Pop r4-r15. */
4094 if (range & 0xfff0)
4095 {
4096 /* See if we can use the short opcodes. These pop a block of up to 8
4097 registers starting with r4, plus maybe r14. */
4098 for (n = 0; n < 8; n++)
4099 {
4100 /* Break at the first non-saved register. */
4101 if ((range & (1 << (n + 4))) == 0)
4102 break;
4103 }
4104 /* See if there are any other bits set. */
4105 if (n == 0 || (range & (0xfff0 << n) & 0xbff0) != 0)
4106 {
4107 /* Use the long form. */
4108 op = 0x8000 | ((range >> 4) & 0xfff);
4109 add_unwind_opcode (op, 2);
4110 }
4111 else
4112 {
4113 /* Use the short form. */
4114 if (range & 0x4000)
4115 op = 0xa8; /* Pop r14. */
4116 else
4117 op = 0xa0; /* Do not pop r14. */
4118 op |= (n - 1);
4119 add_unwind_opcode (op, 1);
4120 }
4121 }
4122
4123 /* Pop r0-r3. */
4124 if (range & 0xf)
4125 {
4126 op = 0xb100 | (range & 0xf);
4127 add_unwind_opcode (op, 2);
4128 }
4129
4130 /* Record the number of bytes pushed. */
4131 for (n = 0; n < 16; n++)
4132 {
4133 if (range & (1 << n))
4134 unwind.frame_size += 4;
4135 }
4136 }
4137
4138
4139 /* Parse a directive saving FPA registers. */
4140
4141 static void
4142 s_arm_unwind_save_fpa (int reg)
4143 {
4144 expressionS exp;
4145 int num_regs;
4146 valueT op;
4147
4148 /* Get Number of registers to transfer. */
4149 if (skip_past_comma (&input_line_pointer) != FAIL)
4150 expression (&exp);
4151 else
4152 exp.X_op = O_illegal;
4153
4154 if (exp.X_op != O_constant)
4155 {
4156 as_bad (_("expected , <constant>"));
4157 ignore_rest_of_line ();
4158 return;
4159 }
4160
4161 num_regs = exp.X_add_number;
4162
4163 if (num_regs < 1 || num_regs > 4)
4164 {
4165 as_bad (_("number of registers must be in the range [1:4]"));
4166 ignore_rest_of_line ();
4167 return;
4168 }
4169
4170 demand_empty_rest_of_line ();
4171
4172 if (reg == 4)
4173 {
4174 /* Short form. */
4175 op = 0xb4 | (num_regs - 1);
4176 add_unwind_opcode (op, 1);
4177 }
4178 else
4179 {
4180 /* Long form. */
4181 op = 0xc800 | (reg << 4) | (num_regs - 1);
4182 add_unwind_opcode (op, 2);
4183 }
4184 unwind.frame_size += num_regs * 12;
4185 }
4186
4187
4188 /* Parse a directive saving VFP registers for ARMv6 and above. */
4189
4190 static void
4191 s_arm_unwind_save_vfp_armv6 (void)
4192 {
4193 int count;
4194 unsigned int start;
4195 valueT op;
4196 int num_vfpv3_regs = 0;
4197 int num_regs_below_16;
4198 bfd_boolean partial_match;
4199
4200 count = parse_vfp_reg_list (&input_line_pointer, &start, REGLIST_VFP_D,
4201 &partial_match);
4202 if (count == FAIL)
4203 {
4204 as_bad (_("expected register list"));
4205 ignore_rest_of_line ();
4206 return;
4207 }
4208
4209 demand_empty_rest_of_line ();
4210
4211 /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather
4212 than FSTMX/FLDMX-style ones). */
4213
4214 /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31. */
4215 if (start >= 16)
4216 num_vfpv3_regs = count;
4217 else if (start + count > 16)
4218 num_vfpv3_regs = start + count - 16;
4219
4220 if (num_vfpv3_regs > 0)
4221 {
4222 int start_offset = start > 16 ? start - 16 : 0;
4223 op = 0xc800 | (start_offset << 4) | (num_vfpv3_regs - 1);
4224 add_unwind_opcode (op, 2);
4225 }
4226
4227 /* Generate opcode for registers numbered in the range 0 .. 15. */
4228 num_regs_below_16 = num_vfpv3_regs > 0 ? 16 - (int) start : count;
4229 gas_assert (num_regs_below_16 + num_vfpv3_regs == count);
4230 if (num_regs_below_16 > 0)
4231 {
4232 op = 0xc900 | (start << 4) | (num_regs_below_16 - 1);
4233 add_unwind_opcode (op, 2);
4234 }
4235
4236 unwind.frame_size += count * 8;
4237 }
4238
4239
4240 /* Parse a directive saving VFP registers for pre-ARMv6. */
4241
4242 static void
4243 s_arm_unwind_save_vfp (void)
4244 {
4245 int count;
4246 unsigned int reg;
4247 valueT op;
4248 bfd_boolean partial_match;
4249
4250 count = parse_vfp_reg_list (&input_line_pointer, &reg, REGLIST_VFP_D,
4251 &partial_match);
4252 if (count == FAIL)
4253 {
4254 as_bad (_("expected register list"));
4255 ignore_rest_of_line ();
4256 return;
4257 }
4258
4259 demand_empty_rest_of_line ();
4260
4261 if (reg == 8)
4262 {
4263 /* Short form. */
4264 op = 0xb8 | (count - 1);
4265 add_unwind_opcode (op, 1);
4266 }
4267 else
4268 {
4269 /* Long form. */
4270 op = 0xb300 | (reg << 4) | (count - 1);
4271 add_unwind_opcode (op, 2);
4272 }
4273 unwind.frame_size += count * 8 + 4;
4274 }
4275
4276
4277 /* Parse a directive saving iWMMXt data registers. */
4278
4279 static void
4280 s_arm_unwind_save_mmxwr (void)
4281 {
4282 int reg;
4283 int hi_reg;
4284 int i;
4285 unsigned mask = 0;
4286 valueT op;
4287
4288 if (*input_line_pointer == '{')
4289 input_line_pointer++;
4290
4291 do
4292 {
4293 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
4294
4295 if (reg == FAIL)
4296 {
4297 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
4298 goto error;
4299 }
4300
4301 if (mask >> reg)
4302 as_tsktsk (_("register list not in ascending order"));
4303 mask |= 1 << reg;
4304
4305 if (*input_line_pointer == '-')
4306 {
4307 input_line_pointer++;
4308 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
4309 if (hi_reg == FAIL)
4310 {
4311 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
4312 goto error;
4313 }
4314 else if (reg >= hi_reg)
4315 {
4316 as_bad (_("bad register range"));
4317 goto error;
4318 }
4319 for (; reg < hi_reg; reg++)
4320 mask |= 1 << reg;
4321 }
4322 }
4323 while (skip_past_comma (&input_line_pointer) != FAIL);
4324
4325 skip_past_char (&input_line_pointer, '}');
4326
4327 demand_empty_rest_of_line ();
4328
4329 /* Generate any deferred opcodes because we're going to be looking at
4330 the list. */
4331 flush_pending_unwind ();
4332
4333 for (i = 0; i < 16; i++)
4334 {
4335 if (mask & (1 << i))
4336 unwind.frame_size += 8;
4337 }
4338
4339 /* Attempt to combine with a previous opcode. We do this because gcc
4340 likes to output separate unwind directives for a single block of
4341 registers. */
4342 if (unwind.opcode_count > 0)
4343 {
4344 i = unwind.opcodes[unwind.opcode_count - 1];
4345 if ((i & 0xf8) == 0xc0)
4346 {
4347 i &= 7;
4348 /* Only merge if the blocks are contiguous. */
4349 if (i < 6)
4350 {
4351 if ((mask & 0xfe00) == (1 << 9))
4352 {
4353 mask |= ((1 << (i + 11)) - 1) & 0xfc00;
4354 unwind.opcode_count--;
4355 }
4356 }
4357 else if (i == 6 && unwind.opcode_count >= 2)
4358 {
4359 i = unwind.opcodes[unwind.opcode_count - 2];
4360 reg = i >> 4;
4361 i &= 0xf;
4362
4363 op = 0xffff << (reg - 1);
4364 if (reg > 0
4365 && ((mask & op) == (1u << (reg - 1))))
4366 {
4367 op = (1 << (reg + i + 1)) - 1;
4368 op &= ~((1 << reg) - 1);
4369 mask |= op;
4370 unwind.opcode_count -= 2;
4371 }
4372 }
4373 }
4374 }
4375
4376 hi_reg = 15;
4377 /* We want to generate opcodes in the order the registers have been
4378 saved, ie. descending order. */
4379 for (reg = 15; reg >= -1; reg--)
4380 {
4381 /* Save registers in blocks. */
4382 if (reg < 0
4383 || !(mask & (1 << reg)))
4384 {
4385 /* We found an unsaved reg. Generate opcodes to save the
4386 preceding block. */
4387 if (reg != hi_reg)
4388 {
4389 if (reg == 9)
4390 {
4391 /* Short form. */
4392 op = 0xc0 | (hi_reg - 10);
4393 add_unwind_opcode (op, 1);
4394 }
4395 else
4396 {
4397 /* Long form. */
4398 op = 0xc600 | ((reg + 1) << 4) | ((hi_reg - reg) - 1);
4399 add_unwind_opcode (op, 2);
4400 }
4401 }
4402 hi_reg = reg - 1;
4403 }
4404 }
4405
4406 return;
4407 error:
4408 ignore_rest_of_line ();
4409 }
4410
4411 static void
4412 s_arm_unwind_save_mmxwcg (void)
4413 {
4414 int reg;
4415 int hi_reg;
4416 unsigned mask = 0;
4417 valueT op;
4418
4419 if (*input_line_pointer == '{')
4420 input_line_pointer++;
4421
4422 skip_whitespace (input_line_pointer);
4423
4424 do
4425 {
4426 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
4427
4428 if (reg == FAIL)
4429 {
4430 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
4431 goto error;
4432 }
4433
4434 reg -= 8;
4435 if (mask >> reg)
4436 as_tsktsk (_("register list not in ascending order"));
4437 mask |= 1 << reg;
4438
4439 if (*input_line_pointer == '-')
4440 {
4441 input_line_pointer++;
4442 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
4443 if (hi_reg == FAIL)
4444 {
4445 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
4446 goto error;
4447 }
4448 else if (reg >= hi_reg)
4449 {
4450 as_bad (_("bad register range"));
4451 goto error;
4452 }
4453 for (; reg < hi_reg; reg++)
4454 mask |= 1 << reg;
4455 }
4456 }
4457 while (skip_past_comma (&input_line_pointer) != FAIL);
4458
4459 skip_past_char (&input_line_pointer, '}');
4460
4461 demand_empty_rest_of_line ();
4462
4463 /* Generate any deferred opcodes because we're going to be looking at
4464 the list. */
4465 flush_pending_unwind ();
4466
4467 for (reg = 0; reg < 16; reg++)
4468 {
4469 if (mask & (1 << reg))
4470 unwind.frame_size += 4;
4471 }
4472 op = 0xc700 | mask;
4473 add_unwind_opcode (op, 2);
4474 return;
4475 error:
4476 ignore_rest_of_line ();
4477 }
4478
4479
4480 /* Parse an unwind_save directive.
4481 If the argument is non-zero, this is a .vsave directive. */
4482
4483 static void
4484 s_arm_unwind_save (int arch_v6)
4485 {
4486 char *peek;
4487 struct reg_entry *reg;
4488 bfd_boolean had_brace = FALSE;
4489
4490 if (!unwind.proc_start)
4491 as_bad (MISSING_FNSTART);
4492
4493 /* Figure out what sort of save we have. */
4494 peek = input_line_pointer;
4495
4496 if (*peek == '{')
4497 {
4498 had_brace = TRUE;
4499 peek++;
4500 }
4501
4502 reg = arm_reg_parse_multi (&peek);
4503
4504 if (!reg)
4505 {
4506 as_bad (_("register expected"));
4507 ignore_rest_of_line ();
4508 return;
4509 }
4510
4511 switch (reg->type)
4512 {
4513 case REG_TYPE_FN:
4514 if (had_brace)
4515 {
4516 as_bad (_("FPA .unwind_save does not take a register list"));
4517 ignore_rest_of_line ();
4518 return;
4519 }
4520 input_line_pointer = peek;
4521 s_arm_unwind_save_fpa (reg->number);
4522 return;
4523
4524 case REG_TYPE_RN:
4525 s_arm_unwind_save_core ();
4526 return;
4527
4528 case REG_TYPE_VFD:
4529 if (arch_v6)
4530 s_arm_unwind_save_vfp_armv6 ();
4531 else
4532 s_arm_unwind_save_vfp ();
4533 return;
4534
4535 case REG_TYPE_MMXWR:
4536 s_arm_unwind_save_mmxwr ();
4537 return;
4538
4539 case REG_TYPE_MMXWCG:
4540 s_arm_unwind_save_mmxwcg ();
4541 return;
4542
4543 default:
4544 as_bad (_(".unwind_save does not support this kind of register"));
4545 ignore_rest_of_line ();
4546 }
4547 }
4548
4549
4550 /* Parse an unwind_movsp directive. */
4551
4552 static void
4553 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED)
4554 {
4555 int reg;
4556 valueT op;
4557 int offset;
4558
4559 if (!unwind.proc_start)
4560 as_bad (MISSING_FNSTART);
4561
4562 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4563 if (reg == FAIL)
4564 {
4565 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_RN]));
4566 ignore_rest_of_line ();
4567 return;
4568 }
4569
4570 /* Optional constant. */
4571 if (skip_past_comma (&input_line_pointer) != FAIL)
4572 {
4573 if (immediate_for_directive (&offset) == FAIL)
4574 return;
4575 }
4576 else
4577 offset = 0;
4578
4579 demand_empty_rest_of_line ();
4580
4581 if (reg == REG_SP || reg == REG_PC)
4582 {
4583 as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
4584 return;
4585 }
4586
4587 if (unwind.fp_reg != REG_SP)
4588 as_bad (_("unexpected .unwind_movsp directive"));
4589
4590 /* Generate opcode to restore the value. */
4591 op = 0x90 | reg;
4592 add_unwind_opcode (op, 1);
4593
4594 /* Record the information for later. */
4595 unwind.fp_reg = reg;
4596 unwind.fp_offset = unwind.frame_size - offset;
4597 unwind.sp_restored = 1;
4598 }
4599
4600 /* Parse an unwind_pad directive. */
4601
4602 static void
4603 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED)
4604 {
4605 int offset;
4606
4607 if (!unwind.proc_start)
4608 as_bad (MISSING_FNSTART);
4609
4610 if (immediate_for_directive (&offset) == FAIL)
4611 return;
4612
4613 if (offset & 3)
4614 {
4615 as_bad (_("stack increment must be multiple of 4"));
4616 ignore_rest_of_line ();
4617 return;
4618 }
4619
4620 /* Don't generate any opcodes, just record the details for later. */
4621 unwind.frame_size += offset;
4622 unwind.pending_offset += offset;
4623
4624 demand_empty_rest_of_line ();
4625 }
4626
4627 /* Parse an unwind_setfp directive. */
4628
4629 static void
4630 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED)
4631 {
4632 int sp_reg;
4633 int fp_reg;
4634 int offset;
4635
4636 if (!unwind.proc_start)
4637 as_bad (MISSING_FNSTART);
4638
4639 fp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4640 if (skip_past_comma (&input_line_pointer) == FAIL)
4641 sp_reg = FAIL;
4642 else
4643 sp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4644
4645 if (fp_reg == FAIL || sp_reg == FAIL)
4646 {
4647 as_bad (_("expected <reg>, <reg>"));
4648 ignore_rest_of_line ();
4649 return;
4650 }
4651
4652 /* Optional constant. */
4653 if (skip_past_comma (&input_line_pointer) != FAIL)
4654 {
4655 if (immediate_for_directive (&offset) == FAIL)
4656 return;
4657 }
4658 else
4659 offset = 0;
4660
4661 demand_empty_rest_of_line ();
4662
4663 if (sp_reg != REG_SP && sp_reg != unwind.fp_reg)
4664 {
4665 as_bad (_("register must be either sp or set by a previous"
4666 "unwind_movsp directive"));
4667 return;
4668 }
4669
4670 /* Don't generate any opcodes, just record the information for later. */
4671 unwind.fp_reg = fp_reg;
4672 unwind.fp_used = 1;
4673 if (sp_reg == REG_SP)
4674 unwind.fp_offset = unwind.frame_size - offset;
4675 else
4676 unwind.fp_offset -= offset;
4677 }
4678
4679 /* Parse an unwind_raw directive. */
4680
4681 static void
4682 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED)
4683 {
4684 expressionS exp;
4685 /* This is an arbitrary limit. */
4686 unsigned char op[16];
4687 int count;
4688
4689 if (!unwind.proc_start)
4690 as_bad (MISSING_FNSTART);
4691
4692 expression (&exp);
4693 if (exp.X_op == O_constant
4694 && skip_past_comma (&input_line_pointer) != FAIL)
4695 {
4696 unwind.frame_size += exp.X_add_number;
4697 expression (&exp);
4698 }
4699 else
4700 exp.X_op = O_illegal;
4701
4702 if (exp.X_op != O_constant)
4703 {
4704 as_bad (_("expected <offset>, <opcode>"));
4705 ignore_rest_of_line ();
4706 return;
4707 }
4708
4709 count = 0;
4710
4711 /* Parse the opcode. */
4712 for (;;)
4713 {
4714 if (count >= 16)
4715 {
4716 as_bad (_("unwind opcode too long"));
4717 ignore_rest_of_line ();
4718 }
4719 if (exp.X_op != O_constant || exp.X_add_number & ~0xff)
4720 {
4721 as_bad (_("invalid unwind opcode"));
4722 ignore_rest_of_line ();
4723 return;
4724 }
4725 op[count++] = exp.X_add_number;
4726
4727 /* Parse the next byte. */
4728 if (skip_past_comma (&input_line_pointer) == FAIL)
4729 break;
4730
4731 expression (&exp);
4732 }
4733
4734 /* Add the opcode bytes in reverse order. */
4735 while (count--)
4736 add_unwind_opcode (op[count], 1);
4737
4738 demand_empty_rest_of_line ();
4739 }
4740
4741
4742 /* Parse a .eabi_attribute directive. */
4743
4744 static void
4745 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED)
4746 {
4747 int tag = obj_elf_vendor_attribute (OBJ_ATTR_PROC);
4748
4749 if (tag < NUM_KNOWN_OBJ_ATTRIBUTES)
4750 attributes_set_explicitly[tag] = 1;
4751 }
4752
4753 /* Emit a tls fix for the symbol. */
4754
4755 static void
4756 s_arm_tls_descseq (int ignored ATTRIBUTE_UNUSED)
4757 {
4758 char *p;
4759 expressionS exp;
4760 #ifdef md_flush_pending_output
4761 md_flush_pending_output ();
4762 #endif
4763
4764 #ifdef md_cons_align
4765 md_cons_align (4);
4766 #endif
4767
4768 /* Since we're just labelling the code, there's no need to define a
4769 mapping symbol. */
4770 expression (&exp);
4771 p = obstack_next_free (&frchain_now->frch_obstack);
4772 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 0,
4773 thumb_mode ? BFD_RELOC_ARM_THM_TLS_DESCSEQ
4774 : BFD_RELOC_ARM_TLS_DESCSEQ);
4775 }
4776 #endif /* OBJ_ELF */
4777
4778 static void s_arm_arch (int);
4779 static void s_arm_object_arch (int);
4780 static void s_arm_cpu (int);
4781 static void s_arm_fpu (int);
4782 static void s_arm_arch_extension (int);
4783
4784 #ifdef TE_PE
4785
4786 static void
4787 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
4788 {
4789 expressionS exp;
4790
4791 do
4792 {
4793 expression (&exp);
4794 if (exp.X_op == O_symbol)
4795 exp.X_op = O_secrel;
4796
4797 emit_expr (&exp, 4);
4798 }
4799 while (*input_line_pointer++ == ',');
4800
4801 input_line_pointer--;
4802 demand_empty_rest_of_line ();
4803 }
4804 #endif /* TE_PE */
4805
4806 /* This table describes all the machine specific pseudo-ops the assembler
4807 has to support. The fields are:
4808 pseudo-op name without dot
4809 function to call to execute this pseudo-op
4810 Integer arg to pass to the function. */
4811
4812 const pseudo_typeS md_pseudo_table[] =
4813 {
4814 /* Never called because '.req' does not start a line. */
4815 { "req", s_req, 0 },
4816 /* Following two are likewise never called. */
4817 { "dn", s_dn, 0 },
4818 { "qn", s_qn, 0 },
4819 { "unreq", s_unreq, 0 },
4820 { "bss", s_bss, 0 },
4821 { "align", s_align_ptwo, 2 },
4822 { "arm", s_arm, 0 },
4823 { "thumb", s_thumb, 0 },
4824 { "code", s_code, 0 },
4825 { "force_thumb", s_force_thumb, 0 },
4826 { "thumb_func", s_thumb_func, 0 },
4827 { "thumb_set", s_thumb_set, 0 },
4828 { "even", s_even, 0 },
4829 { "ltorg", s_ltorg, 0 },
4830 { "pool", s_ltorg, 0 },
4831 { "syntax", s_syntax, 0 },
4832 { "cpu", s_arm_cpu, 0 },
4833 { "arch", s_arm_arch, 0 },
4834 { "object_arch", s_arm_object_arch, 0 },
4835 { "fpu", s_arm_fpu, 0 },
4836 { "arch_extension", s_arm_arch_extension, 0 },
4837 #ifdef OBJ_ELF
4838 { "word", s_arm_elf_cons, 4 },
4839 { "long", s_arm_elf_cons, 4 },
4840 { "inst.n", s_arm_elf_inst, 2 },
4841 { "inst.w", s_arm_elf_inst, 4 },
4842 { "inst", s_arm_elf_inst, 0 },
4843 { "rel31", s_arm_rel31, 0 },
4844 { "fnstart", s_arm_unwind_fnstart, 0 },
4845 { "fnend", s_arm_unwind_fnend, 0 },
4846 { "cantunwind", s_arm_unwind_cantunwind, 0 },
4847 { "personality", s_arm_unwind_personality, 0 },
4848 { "personalityindex", s_arm_unwind_personalityindex, 0 },
4849 { "handlerdata", s_arm_unwind_handlerdata, 0 },
4850 { "save", s_arm_unwind_save, 0 },
4851 { "vsave", s_arm_unwind_save, 1 },
4852 { "movsp", s_arm_unwind_movsp, 0 },
4853 { "pad", s_arm_unwind_pad, 0 },
4854 { "setfp", s_arm_unwind_setfp, 0 },
4855 { "unwind_raw", s_arm_unwind_raw, 0 },
4856 { "eabi_attribute", s_arm_eabi_attribute, 0 },
4857 { "tlsdescseq", s_arm_tls_descseq, 0 },
4858 #else
4859 { "word", cons, 4},
4860
4861 /* These are used for dwarf. */
4862 {"2byte", cons, 2},
4863 {"4byte", cons, 4},
4864 {"8byte", cons, 8},
4865 /* These are used for dwarf2. */
4866 { "file", dwarf2_directive_file, 0 },
4867 { "loc", dwarf2_directive_loc, 0 },
4868 { "loc_mark_labels", dwarf2_directive_loc_mark_labels, 0 },
4869 #endif
4870 { "extend", float_cons, 'x' },
4871 { "ldouble", float_cons, 'x' },
4872 { "packed", float_cons, 'p' },
4873 #ifdef TE_PE
4874 {"secrel32", pe_directive_secrel, 0},
4875 #endif
4876
4877 /* These are for compatibility with CodeComposer Studio. */
4878 {"ref", s_ccs_ref, 0},
4879 {"def", s_ccs_def, 0},
4880 {"asmfunc", s_ccs_asmfunc, 0},
4881 {"endasmfunc", s_ccs_endasmfunc, 0},
4882
4883 { 0, 0, 0 }
4884 };
4885 \f
4886 /* Parser functions used exclusively in instruction operands. */
4887
4888 /* Generic immediate-value read function for use in insn parsing.
4889 STR points to the beginning of the immediate (the leading #);
4890 VAL receives the value; if the value is outside [MIN, MAX]
4891 issue an error. PREFIX_OPT is true if the immediate prefix is
4892 optional. */
4893
4894 static int
4895 parse_immediate (char **str, int *val, int min, int max,
4896 bfd_boolean prefix_opt)
4897 {
4898 expressionS exp;
4899
4900 my_get_expression (&exp, str, prefix_opt ? GE_OPT_PREFIX : GE_IMM_PREFIX);
4901 if (exp.X_op != O_constant)
4902 {
4903 inst.error = _("constant expression required");
4904 return FAIL;
4905 }
4906
4907 if (exp.X_add_number < min || exp.X_add_number > max)
4908 {
4909 inst.error = _("immediate value out of range");
4910 return FAIL;
4911 }
4912
4913 *val = exp.X_add_number;
4914 return SUCCESS;
4915 }
4916
4917 /* Less-generic immediate-value read function with the possibility of loading a
4918 big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate
4919 instructions. Puts the result directly in inst.operands[i]. */
4920
4921 static int
4922 parse_big_immediate (char **str, int i, expressionS *in_exp,
4923 bfd_boolean allow_symbol_p)
4924 {
4925 expressionS exp;
4926 expressionS *exp_p = in_exp ? in_exp : &exp;
4927 char *ptr = *str;
4928
4929 my_get_expression (exp_p, &ptr, GE_OPT_PREFIX_BIG);
4930
4931 if (exp_p->X_op == O_constant)
4932 {
4933 inst.operands[i].imm = exp_p->X_add_number & 0xffffffff;
4934 /* If we're on a 64-bit host, then a 64-bit number can be returned using
4935 O_constant. We have to be careful not to break compilation for
4936 32-bit X_add_number, though. */
4937 if ((exp_p->X_add_number & ~(offsetT)(0xffffffffU)) != 0)
4938 {
4939 /* X >> 32 is illegal if sizeof (exp_p->X_add_number) == 4. */
4940 inst.operands[i].reg = (((exp_p->X_add_number >> 16) >> 16)
4941 & 0xffffffff);
4942 inst.operands[i].regisimm = 1;
4943 }
4944 }
4945 else if (exp_p->X_op == O_big
4946 && LITTLENUM_NUMBER_OF_BITS * exp_p->X_add_number > 32)
4947 {
4948 unsigned parts = 32 / LITTLENUM_NUMBER_OF_BITS, j, idx = 0;
4949
4950 /* Bignums have their least significant bits in
4951 generic_bignum[0]. Make sure we put 32 bits in imm and
4952 32 bits in reg, in a (hopefully) portable way. */
4953 gas_assert (parts != 0);
4954
4955 /* Make sure that the number is not too big.
4956 PR 11972: Bignums can now be sign-extended to the
4957 size of a .octa so check that the out of range bits
4958 are all zero or all one. */
4959 if (LITTLENUM_NUMBER_OF_BITS * exp_p->X_add_number > 64)
4960 {
4961 LITTLENUM_TYPE m = -1;
4962
4963 if (generic_bignum[parts * 2] != 0
4964 && generic_bignum[parts * 2] != m)
4965 return FAIL;
4966
4967 for (j = parts * 2 + 1; j < (unsigned) exp_p->X_add_number; j++)
4968 if (generic_bignum[j] != generic_bignum[j-1])
4969 return FAIL;
4970 }
4971
4972 inst.operands[i].imm = 0;
4973 for (j = 0; j < parts; j++, idx++)
4974 inst.operands[i].imm |= generic_bignum[idx]
4975 << (LITTLENUM_NUMBER_OF_BITS * j);
4976 inst.operands[i].reg = 0;
4977 for (j = 0; j < parts; j++, idx++)
4978 inst.operands[i].reg |= generic_bignum[idx]
4979 << (LITTLENUM_NUMBER_OF_BITS * j);
4980 inst.operands[i].regisimm = 1;
4981 }
4982 else if (!(exp_p->X_op == O_symbol && allow_symbol_p))
4983 return FAIL;
4984
4985 *str = ptr;
4986
4987 return SUCCESS;
4988 }
4989
4990 /* Returns the pseudo-register number of an FPA immediate constant,
4991 or FAIL if there isn't a valid constant here. */
4992
4993 static int
4994 parse_fpa_immediate (char ** str)
4995 {
4996 LITTLENUM_TYPE words[MAX_LITTLENUMS];
4997 char * save_in;
4998 expressionS exp;
4999 int i;
5000 int j;
5001
5002 /* First try and match exact strings, this is to guarantee
5003 that some formats will work even for cross assembly. */
5004
5005 for (i = 0; fp_const[i]; i++)
5006 {
5007 if (strncmp (*str, fp_const[i], strlen (fp_const[i])) == 0)
5008 {
5009 char *start = *str;
5010
5011 *str += strlen (fp_const[i]);
5012 if (is_end_of_line[(unsigned char) **str])
5013 return i + 8;
5014 *str = start;
5015 }
5016 }
5017
5018 /* Just because we didn't get a match doesn't mean that the constant
5019 isn't valid, just that it is in a format that we don't
5020 automatically recognize. Try parsing it with the standard
5021 expression routines. */
5022
5023 memset (words, 0, MAX_LITTLENUMS * sizeof (LITTLENUM_TYPE));
5024
5025 /* Look for a raw floating point number. */
5026 if ((save_in = atof_ieee (*str, 'x', words)) != NULL
5027 && is_end_of_line[(unsigned char) *save_in])
5028 {
5029 for (i = 0; i < NUM_FLOAT_VALS; i++)
5030 {
5031 for (j = 0; j < MAX_LITTLENUMS; j++)
5032 {
5033 if (words[j] != fp_values[i][j])
5034 break;
5035 }
5036
5037 if (j == MAX_LITTLENUMS)
5038 {
5039 *str = save_in;
5040 return i + 8;
5041 }
5042 }
5043 }
5044
5045 /* Try and parse a more complex expression, this will probably fail
5046 unless the code uses a floating point prefix (eg "0f"). */
5047 save_in = input_line_pointer;
5048 input_line_pointer = *str;
5049 if (expression (&exp) == absolute_section
5050 && exp.X_op == O_big
5051 && exp.X_add_number < 0)
5052 {
5053 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
5054 Ditto for 15. */
5055 #define X_PRECISION 5
5056 #define E_PRECISION 15L
5057 if (gen_to_words (words, X_PRECISION, E_PRECISION) == 0)
5058 {
5059 for (i = 0; i < NUM_FLOAT_VALS; i++)
5060 {
5061 for (j = 0; j < MAX_LITTLENUMS; j++)
5062 {
5063 if (words[j] != fp_values[i][j])
5064 break;
5065 }
5066
5067 if (j == MAX_LITTLENUMS)
5068 {
5069 *str = input_line_pointer;
5070 input_line_pointer = save_in;
5071 return i + 8;
5072 }
5073 }
5074 }
5075 }
5076
5077 *str = input_line_pointer;
5078 input_line_pointer = save_in;
5079 inst.error = _("invalid FPA immediate expression");
5080 return FAIL;
5081 }
5082
5083 /* Returns 1 if a number has "quarter-precision" float format
5084 0baBbbbbbc defgh000 00000000 00000000. */
5085
5086 static int
5087 is_quarter_float (unsigned imm)
5088 {
5089 int bs = (imm & 0x20000000) ? 0x3e000000 : 0x40000000;
5090 return (imm & 0x7ffff) == 0 && ((imm & 0x7e000000) ^ bs) == 0;
5091 }
5092
5093
5094 /* Detect the presence of a floating point or integer zero constant,
5095 i.e. #0.0 or #0. */
5096
5097 static bfd_boolean
5098 parse_ifimm_zero (char **in)
5099 {
5100 int error_code;
5101
5102 if (!is_immediate_prefix (**in))
5103 {
5104 /* In unified syntax, all prefixes are optional. */
5105 if (!unified_syntax)
5106 return FALSE;
5107 }
5108 else
5109 ++*in;
5110
5111 /* Accept #0x0 as a synonym for #0. */
5112 if (strncmp (*in, "0x", 2) == 0)
5113 {
5114 int val;
5115 if (parse_immediate (in, &val, 0, 0, TRUE) == FAIL)
5116 return FALSE;
5117 return TRUE;
5118 }
5119
5120 error_code = atof_generic (in, ".", EXP_CHARS,
5121 &generic_floating_point_number);
5122
5123 if (!error_code
5124 && generic_floating_point_number.sign == '+'
5125 && (generic_floating_point_number.low
5126 > generic_floating_point_number.leader))
5127 return TRUE;
5128
5129 return FALSE;
5130 }
5131
5132 /* Parse an 8-bit "quarter-precision" floating point number of the form:
5133 0baBbbbbbc defgh000 00000000 00000000.
5134 The zero and minus-zero cases need special handling, since they can't be
5135 encoded in the "quarter-precision" float format, but can nonetheless be
5136 loaded as integer constants. */
5137
5138 static unsigned
5139 parse_qfloat_immediate (char **ccp, int *immed)
5140 {
5141 char *str = *ccp;
5142 char *fpnum;
5143 LITTLENUM_TYPE words[MAX_LITTLENUMS];
5144 int found_fpchar = 0;
5145
5146 skip_past_char (&str, '#');
5147
5148 /* We must not accidentally parse an integer as a floating-point number. Make
5149 sure that the value we parse is not an integer by checking for special
5150 characters '.' or 'e'.
5151 FIXME: This is a horrible hack, but doing better is tricky because type
5152 information isn't in a very usable state at parse time. */
5153 fpnum = str;
5154 skip_whitespace (fpnum);
5155
5156 if (strncmp (fpnum, "0x", 2) == 0)
5157 return FAIL;
5158 else
5159 {
5160 for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
5161 if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
5162 {
5163 found_fpchar = 1;
5164 break;
5165 }
5166
5167 if (!found_fpchar)
5168 return FAIL;
5169 }
5170
5171 if ((str = atof_ieee (str, 's', words)) != NULL)
5172 {
5173 unsigned fpword = 0;
5174 int i;
5175
5176 /* Our FP word must be 32 bits (single-precision FP). */
5177 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
5178 {
5179 fpword <<= LITTLENUM_NUMBER_OF_BITS;
5180 fpword |= words[i];
5181 }
5182
5183 if (is_quarter_float (fpword) || (fpword & 0x7fffffff) == 0)
5184 *immed = fpword;
5185 else
5186 return FAIL;
5187
5188 *ccp = str;
5189
5190 return SUCCESS;
5191 }
5192
5193 return FAIL;
5194 }
5195
5196 /* Shift operands. */
5197 enum shift_kind
5198 {
5199 SHIFT_LSL, SHIFT_LSR, SHIFT_ASR, SHIFT_ROR, SHIFT_RRX
5200 };
5201
5202 struct asm_shift_name
5203 {
5204 const char *name;
5205 enum shift_kind kind;
5206 };
5207
5208 /* Third argument to parse_shift. */
5209 enum parse_shift_mode
5210 {
5211 NO_SHIFT_RESTRICT, /* Any kind of shift is accepted. */
5212 SHIFT_IMMEDIATE, /* Shift operand must be an immediate. */
5213 SHIFT_LSL_OR_ASR_IMMEDIATE, /* Shift must be LSL or ASR immediate. */
5214 SHIFT_ASR_IMMEDIATE, /* Shift must be ASR immediate. */
5215 SHIFT_LSL_IMMEDIATE, /* Shift must be LSL immediate. */
5216 };
5217
5218 /* Parse a <shift> specifier on an ARM data processing instruction.
5219 This has three forms:
5220
5221 (LSL|LSR|ASL|ASR|ROR) Rs
5222 (LSL|LSR|ASL|ASR|ROR) #imm
5223 RRX
5224
5225 Note that ASL is assimilated to LSL in the instruction encoding, and
5226 RRX to ROR #0 (which cannot be written as such). */
5227
5228 static int
5229 parse_shift (char **str, int i, enum parse_shift_mode mode)
5230 {
5231 const struct asm_shift_name *shift_name;
5232 enum shift_kind shift;
5233 char *s = *str;
5234 char *p = s;
5235 int reg;
5236
5237 for (p = *str; ISALPHA (*p); p++)
5238 ;
5239
5240 if (p == *str)
5241 {
5242 inst.error = _("shift expression expected");
5243 return FAIL;
5244 }
5245
5246 shift_name = (const struct asm_shift_name *) hash_find_n (arm_shift_hsh, *str,
5247 p - *str);
5248
5249 if (shift_name == NULL)
5250 {
5251 inst.error = _("shift expression expected");
5252 return FAIL;
5253 }
5254
5255 shift = shift_name->kind;
5256
5257 switch (mode)
5258 {
5259 case NO_SHIFT_RESTRICT:
5260 case SHIFT_IMMEDIATE: break;
5261
5262 case SHIFT_LSL_OR_ASR_IMMEDIATE:
5263 if (shift != SHIFT_LSL && shift != SHIFT_ASR)
5264 {
5265 inst.error = _("'LSL' or 'ASR' required");
5266 return FAIL;
5267 }
5268 break;
5269
5270 case SHIFT_LSL_IMMEDIATE:
5271 if (shift != SHIFT_LSL)
5272 {
5273 inst.error = _("'LSL' required");
5274 return FAIL;
5275 }
5276 break;
5277
5278 case SHIFT_ASR_IMMEDIATE:
5279 if (shift != SHIFT_ASR)
5280 {
5281 inst.error = _("'ASR' required");
5282 return FAIL;
5283 }
5284 break;
5285
5286 default: abort ();
5287 }
5288
5289 if (shift != SHIFT_RRX)
5290 {
5291 /* Whitespace can appear here if the next thing is a bare digit. */
5292 skip_whitespace (p);
5293
5294 if (mode == NO_SHIFT_RESTRICT
5295 && (reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5296 {
5297 inst.operands[i].imm = reg;
5298 inst.operands[i].immisreg = 1;
5299 }
5300 else if (my_get_expression (&inst.relocs[0].exp, &p, GE_IMM_PREFIX))
5301 return FAIL;
5302 }
5303 inst.operands[i].shift_kind = shift;
5304 inst.operands[i].shifted = 1;
5305 *str = p;
5306 return SUCCESS;
5307 }
5308
5309 /* Parse a <shifter_operand> for an ARM data processing instruction:
5310
5311 #<immediate>
5312 #<immediate>, <rotate>
5313 <Rm>
5314 <Rm>, <shift>
5315
5316 where <shift> is defined by parse_shift above, and <rotate> is a
5317 multiple of 2 between 0 and 30. Validation of immediate operands
5318 is deferred to md_apply_fix. */
5319
5320 static int
5321 parse_shifter_operand (char **str, int i)
5322 {
5323 int value;
5324 expressionS exp;
5325
5326 if ((value = arm_reg_parse (str, REG_TYPE_RN)) != FAIL)
5327 {
5328 inst.operands[i].reg = value;
5329 inst.operands[i].isreg = 1;
5330
5331 /* parse_shift will override this if appropriate */
5332 inst.relocs[0].exp.X_op = O_constant;
5333 inst.relocs[0].exp.X_add_number = 0;
5334
5335 if (skip_past_comma (str) == FAIL)
5336 return SUCCESS;
5337
5338 /* Shift operation on register. */
5339 return parse_shift (str, i, NO_SHIFT_RESTRICT);
5340 }
5341
5342 if (my_get_expression (&inst.relocs[0].exp, str, GE_IMM_PREFIX))
5343 return FAIL;
5344
5345 if (skip_past_comma (str) == SUCCESS)
5346 {
5347 /* #x, y -- ie explicit rotation by Y. */
5348 if (my_get_expression (&exp, str, GE_NO_PREFIX))
5349 return FAIL;
5350
5351 if (exp.X_op != O_constant || inst.relocs[0].exp.X_op != O_constant)
5352 {
5353 inst.error = _("constant expression expected");
5354 return FAIL;
5355 }
5356
5357 value = exp.X_add_number;
5358 if (value < 0 || value > 30 || value % 2 != 0)
5359 {
5360 inst.error = _("invalid rotation");
5361 return FAIL;
5362 }
5363 if (inst.relocs[0].exp.X_add_number < 0
5364 || inst.relocs[0].exp.X_add_number > 255)
5365 {
5366 inst.error = _("invalid constant");
5367 return FAIL;
5368 }
5369
5370 /* Encode as specified. */
5371 inst.operands[i].imm = inst.relocs[0].exp.X_add_number | value << 7;
5372 return SUCCESS;
5373 }
5374
5375 inst.relocs[0].type = BFD_RELOC_ARM_IMMEDIATE;
5376 inst.relocs[0].pc_rel = 0;
5377 return SUCCESS;
5378 }
5379
5380 /* Group relocation information. Each entry in the table contains the
5381 textual name of the relocation as may appear in assembler source
5382 and must end with a colon.
5383 Along with this textual name are the relocation codes to be used if
5384 the corresponding instruction is an ALU instruction (ADD or SUB only),
5385 an LDR, an LDRS, or an LDC. */
5386
5387 struct group_reloc_table_entry
5388 {
5389 const char *name;
5390 int alu_code;
5391 int ldr_code;
5392 int ldrs_code;
5393 int ldc_code;
5394 };
5395
5396 typedef enum
5397 {
5398 /* Varieties of non-ALU group relocation. */
5399
5400 GROUP_LDR,
5401 GROUP_LDRS,
5402 GROUP_LDC
5403 } group_reloc_type;
5404
5405 static struct group_reloc_table_entry group_reloc_table[] =
5406 { /* Program counter relative: */
5407 { "pc_g0_nc",
5408 BFD_RELOC_ARM_ALU_PC_G0_NC, /* ALU */
5409 0, /* LDR */
5410 0, /* LDRS */
5411 0 }, /* LDC */
5412 { "pc_g0",
5413 BFD_RELOC_ARM_ALU_PC_G0, /* ALU */
5414 BFD_RELOC_ARM_LDR_PC_G0, /* LDR */
5415 BFD_RELOC_ARM_LDRS_PC_G0, /* LDRS */
5416 BFD_RELOC_ARM_LDC_PC_G0 }, /* LDC */
5417 { "pc_g1_nc",
5418 BFD_RELOC_ARM_ALU_PC_G1_NC, /* ALU */
5419 0, /* LDR */
5420 0, /* LDRS */
5421 0 }, /* LDC */
5422 { "pc_g1",
5423 BFD_RELOC_ARM_ALU_PC_G1, /* ALU */
5424 BFD_RELOC_ARM_LDR_PC_G1, /* LDR */
5425 BFD_RELOC_ARM_LDRS_PC_G1, /* LDRS */
5426 BFD_RELOC_ARM_LDC_PC_G1 }, /* LDC */
5427 { "pc_g2",
5428 BFD_RELOC_ARM_ALU_PC_G2, /* ALU */
5429 BFD_RELOC_ARM_LDR_PC_G2, /* LDR */
5430 BFD_RELOC_ARM_LDRS_PC_G2, /* LDRS */
5431 BFD_RELOC_ARM_LDC_PC_G2 }, /* LDC */
5432 /* Section base relative */
5433 { "sb_g0_nc",
5434 BFD_RELOC_ARM_ALU_SB_G0_NC, /* ALU */
5435 0, /* LDR */
5436 0, /* LDRS */
5437 0 }, /* LDC */
5438 { "sb_g0",
5439 BFD_RELOC_ARM_ALU_SB_G0, /* ALU */
5440 BFD_RELOC_ARM_LDR_SB_G0, /* LDR */
5441 BFD_RELOC_ARM_LDRS_SB_G0, /* LDRS */
5442 BFD_RELOC_ARM_LDC_SB_G0 }, /* LDC */
5443 { "sb_g1_nc",
5444 BFD_RELOC_ARM_ALU_SB_G1_NC, /* ALU */
5445 0, /* LDR */
5446 0, /* LDRS */
5447 0 }, /* LDC */
5448 { "sb_g1",
5449 BFD_RELOC_ARM_ALU_SB_G1, /* ALU */
5450 BFD_RELOC_ARM_LDR_SB_G1, /* LDR */
5451 BFD_RELOC_ARM_LDRS_SB_G1, /* LDRS */
5452 BFD_RELOC_ARM_LDC_SB_G1 }, /* LDC */
5453 { "sb_g2",
5454 BFD_RELOC_ARM_ALU_SB_G2, /* ALU */
5455 BFD_RELOC_ARM_LDR_SB_G2, /* LDR */
5456 BFD_RELOC_ARM_LDRS_SB_G2, /* LDRS */
5457 BFD_RELOC_ARM_LDC_SB_G2 }, /* LDC */
5458 /* Absolute thumb alu relocations. */
5459 { "lower0_7",
5460 BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC,/* ALU. */
5461 0, /* LDR. */
5462 0, /* LDRS. */
5463 0 }, /* LDC. */
5464 { "lower8_15",
5465 BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC,/* ALU. */
5466 0, /* LDR. */
5467 0, /* LDRS. */
5468 0 }, /* LDC. */
5469 { "upper0_7",
5470 BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC,/* ALU. */
5471 0, /* LDR. */
5472 0, /* LDRS. */
5473 0 }, /* LDC. */
5474 { "upper8_15",
5475 BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC,/* ALU. */
5476 0, /* LDR. */
5477 0, /* LDRS. */
5478 0 } }; /* LDC. */
5479
5480 /* Given the address of a pointer pointing to the textual name of a group
5481 relocation as may appear in assembler source, attempt to find its details
5482 in group_reloc_table. The pointer will be updated to the character after
5483 the trailing colon. On failure, FAIL will be returned; SUCCESS
5484 otherwise. On success, *entry will be updated to point at the relevant
5485 group_reloc_table entry. */
5486
5487 static int
5488 find_group_reloc_table_entry (char **str, struct group_reloc_table_entry **out)
5489 {
5490 unsigned int i;
5491 for (i = 0; i < ARRAY_SIZE (group_reloc_table); i++)
5492 {
5493 int length = strlen (group_reloc_table[i].name);
5494
5495 if (strncasecmp (group_reloc_table[i].name, *str, length) == 0
5496 && (*str)[length] == ':')
5497 {
5498 *out = &group_reloc_table[i];
5499 *str += (length + 1);
5500 return SUCCESS;
5501 }
5502 }
5503
5504 return FAIL;
5505 }
5506
5507 /* Parse a <shifter_operand> for an ARM data processing instruction
5508 (as for parse_shifter_operand) where group relocations are allowed:
5509
5510 #<immediate>
5511 #<immediate>, <rotate>
5512 #:<group_reloc>:<expression>
5513 <Rm>
5514 <Rm>, <shift>
5515
5516 where <group_reloc> is one of the strings defined in group_reloc_table.
5517 The hashes are optional.
5518
5519 Everything else is as for parse_shifter_operand. */
5520
5521 static parse_operand_result
5522 parse_shifter_operand_group_reloc (char **str, int i)
5523 {
5524 /* Determine if we have the sequence of characters #: or just :
5525 coming next. If we do, then we check for a group relocation.
5526 If we don't, punt the whole lot to parse_shifter_operand. */
5527
5528 if (((*str)[0] == '#' && (*str)[1] == ':')
5529 || (*str)[0] == ':')
5530 {
5531 struct group_reloc_table_entry *entry;
5532
5533 if ((*str)[0] == '#')
5534 (*str) += 2;
5535 else
5536 (*str)++;
5537
5538 /* Try to parse a group relocation. Anything else is an error. */
5539 if (find_group_reloc_table_entry (str, &entry) == FAIL)
5540 {
5541 inst.error = _("unknown group relocation");
5542 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5543 }
5544
5545 /* We now have the group relocation table entry corresponding to
5546 the name in the assembler source. Next, we parse the expression. */
5547 if (my_get_expression (&inst.relocs[0].exp, str, GE_NO_PREFIX))
5548 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5549
5550 /* Record the relocation type (always the ALU variant here). */
5551 inst.relocs[0].type = (bfd_reloc_code_real_type) entry->alu_code;
5552 gas_assert (inst.relocs[0].type != 0);
5553
5554 return PARSE_OPERAND_SUCCESS;
5555 }
5556 else
5557 return parse_shifter_operand (str, i) == SUCCESS
5558 ? PARSE_OPERAND_SUCCESS : PARSE_OPERAND_FAIL;
5559
5560 /* Never reached. */
5561 }
5562
5563 /* Parse a Neon alignment expression. Information is written to
5564 inst.operands[i]. We assume the initial ':' has been skipped.
5565
5566 align .imm = align << 8, .immisalign=1, .preind=0 */
5567 static parse_operand_result
5568 parse_neon_alignment (char **str, int i)
5569 {
5570 char *p = *str;
5571 expressionS exp;
5572
5573 my_get_expression (&exp, &p, GE_NO_PREFIX);
5574
5575 if (exp.X_op != O_constant)
5576 {
5577 inst.error = _("alignment must be constant");
5578 return PARSE_OPERAND_FAIL;
5579 }
5580
5581 inst.operands[i].imm = exp.X_add_number << 8;
5582 inst.operands[i].immisalign = 1;
5583 /* Alignments are not pre-indexes. */
5584 inst.operands[i].preind = 0;
5585
5586 *str = p;
5587 return PARSE_OPERAND_SUCCESS;
5588 }
5589
5590 /* Parse all forms of an ARM address expression. Information is written
5591 to inst.operands[i] and/or inst.relocs[0].
5592
5593 Preindexed addressing (.preind=1):
5594
5595 [Rn, #offset] .reg=Rn .relocs[0].exp=offset
5596 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5597 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5598 .shift_kind=shift .relocs[0].exp=shift_imm
5599
5600 These three may have a trailing ! which causes .writeback to be set also.
5601
5602 Postindexed addressing (.postind=1, .writeback=1):
5603
5604 [Rn], #offset .reg=Rn .relocs[0].exp=offset
5605 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5606 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5607 .shift_kind=shift .relocs[0].exp=shift_imm
5608
5609 Unindexed addressing (.preind=0, .postind=0):
5610
5611 [Rn], {option} .reg=Rn .imm=option .immisreg=0
5612
5613 Other:
5614
5615 [Rn]{!} shorthand for [Rn,#0]{!}
5616 =immediate .isreg=0 .relocs[0].exp=immediate
5617 label .reg=PC .relocs[0].pc_rel=1 .relocs[0].exp=label
5618
5619 It is the caller's responsibility to check for addressing modes not
5620 supported by the instruction, and to set inst.relocs[0].type. */
5621
5622 static parse_operand_result
5623 parse_address_main (char **str, int i, int group_relocations,
5624 group_reloc_type group_type)
5625 {
5626 char *p = *str;
5627 int reg;
5628
5629 if (skip_past_char (&p, '[') == FAIL)
5630 {
5631 if (skip_past_char (&p, '=') == FAIL)
5632 {
5633 /* Bare address - translate to PC-relative offset. */
5634 inst.relocs[0].pc_rel = 1;
5635 inst.operands[i].reg = REG_PC;
5636 inst.operands[i].isreg = 1;
5637 inst.operands[i].preind = 1;
5638
5639 if (my_get_expression (&inst.relocs[0].exp, &p, GE_OPT_PREFIX_BIG))
5640 return PARSE_OPERAND_FAIL;
5641 }
5642 else if (parse_big_immediate (&p, i, &inst.relocs[0].exp,
5643 /*allow_symbol_p=*/TRUE))
5644 return PARSE_OPERAND_FAIL;
5645
5646 *str = p;
5647 return PARSE_OPERAND_SUCCESS;
5648 }
5649
5650 /* PR gas/14887: Allow for whitespace after the opening bracket. */
5651 skip_whitespace (p);
5652
5653 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5654 {
5655 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
5656 return PARSE_OPERAND_FAIL;
5657 }
5658 inst.operands[i].reg = reg;
5659 inst.operands[i].isreg = 1;
5660
5661 if (skip_past_comma (&p) == SUCCESS)
5662 {
5663 inst.operands[i].preind = 1;
5664
5665 if (*p == '+') p++;
5666 else if (*p == '-') p++, inst.operands[i].negative = 1;
5667
5668 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5669 {
5670 inst.operands[i].imm = reg;
5671 inst.operands[i].immisreg = 1;
5672
5673 if (skip_past_comma (&p) == SUCCESS)
5674 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
5675 return PARSE_OPERAND_FAIL;
5676 }
5677 else if (skip_past_char (&p, ':') == SUCCESS)
5678 {
5679 /* FIXME: '@' should be used here, but it's filtered out by generic
5680 code before we get to see it here. This may be subject to
5681 change. */
5682 parse_operand_result result = parse_neon_alignment (&p, i);
5683
5684 if (result != PARSE_OPERAND_SUCCESS)
5685 return result;
5686 }
5687 else
5688 {
5689 if (inst.operands[i].negative)
5690 {
5691 inst.operands[i].negative = 0;
5692 p--;
5693 }
5694
5695 if (group_relocations
5696 && ((*p == '#' && *(p + 1) == ':') || *p == ':'))
5697 {
5698 struct group_reloc_table_entry *entry;
5699
5700 /* Skip over the #: or : sequence. */
5701 if (*p == '#')
5702 p += 2;
5703 else
5704 p++;
5705
5706 /* Try to parse a group relocation. Anything else is an
5707 error. */
5708 if (find_group_reloc_table_entry (&p, &entry) == FAIL)
5709 {
5710 inst.error = _("unknown group relocation");
5711 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5712 }
5713
5714 /* We now have the group relocation table entry corresponding to
5715 the name in the assembler source. Next, we parse the
5716 expression. */
5717 if (my_get_expression (&inst.relocs[0].exp, &p, GE_NO_PREFIX))
5718 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5719
5720 /* Record the relocation type. */
5721 switch (group_type)
5722 {
5723 case GROUP_LDR:
5724 inst.relocs[0].type
5725 = (bfd_reloc_code_real_type) entry->ldr_code;
5726 break;
5727
5728 case GROUP_LDRS:
5729 inst.relocs[0].type
5730 = (bfd_reloc_code_real_type) entry->ldrs_code;
5731 break;
5732
5733 case GROUP_LDC:
5734 inst.relocs[0].type
5735 = (bfd_reloc_code_real_type) entry->ldc_code;
5736 break;
5737
5738 default:
5739 gas_assert (0);
5740 }
5741
5742 if (inst.relocs[0].type == 0)
5743 {
5744 inst.error = _("this group relocation is not allowed on this instruction");
5745 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5746 }
5747 }
5748 else
5749 {
5750 char *q = p;
5751
5752 if (my_get_expression (&inst.relocs[0].exp, &p, GE_IMM_PREFIX))
5753 return PARSE_OPERAND_FAIL;
5754 /* If the offset is 0, find out if it's a +0 or -0. */
5755 if (inst.relocs[0].exp.X_op == O_constant
5756 && inst.relocs[0].exp.X_add_number == 0)
5757 {
5758 skip_whitespace (q);
5759 if (*q == '#')
5760 {
5761 q++;
5762 skip_whitespace (q);
5763 }
5764 if (*q == '-')
5765 inst.operands[i].negative = 1;
5766 }
5767 }
5768 }
5769 }
5770 else if (skip_past_char (&p, ':') == SUCCESS)
5771 {
5772 /* FIXME: '@' should be used here, but it's filtered out by generic code
5773 before we get to see it here. This may be subject to change. */
5774 parse_operand_result result = parse_neon_alignment (&p, i);
5775
5776 if (result != PARSE_OPERAND_SUCCESS)
5777 return result;
5778 }
5779
5780 if (skip_past_char (&p, ']') == FAIL)
5781 {
5782 inst.error = _("']' expected");
5783 return PARSE_OPERAND_FAIL;
5784 }
5785
5786 if (skip_past_char (&p, '!') == SUCCESS)
5787 inst.operands[i].writeback = 1;
5788
5789 else if (skip_past_comma (&p) == SUCCESS)
5790 {
5791 if (skip_past_char (&p, '{') == SUCCESS)
5792 {
5793 /* [Rn], {expr} - unindexed, with option */
5794 if (parse_immediate (&p, &inst.operands[i].imm,
5795 0, 255, TRUE) == FAIL)
5796 return PARSE_OPERAND_FAIL;
5797
5798 if (skip_past_char (&p, '}') == FAIL)
5799 {
5800 inst.error = _("'}' expected at end of 'option' field");
5801 return PARSE_OPERAND_FAIL;
5802 }
5803 if (inst.operands[i].preind)
5804 {
5805 inst.error = _("cannot combine index with option");
5806 return PARSE_OPERAND_FAIL;
5807 }
5808 *str = p;
5809 return PARSE_OPERAND_SUCCESS;
5810 }
5811 else
5812 {
5813 inst.operands[i].postind = 1;
5814 inst.operands[i].writeback = 1;
5815
5816 if (inst.operands[i].preind)
5817 {
5818 inst.error = _("cannot combine pre- and post-indexing");
5819 return PARSE_OPERAND_FAIL;
5820 }
5821
5822 if (*p == '+') p++;
5823 else if (*p == '-') p++, inst.operands[i].negative = 1;
5824
5825 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5826 {
5827 /* We might be using the immediate for alignment already. If we
5828 are, OR the register number into the low-order bits. */
5829 if (inst.operands[i].immisalign)
5830 inst.operands[i].imm |= reg;
5831 else
5832 inst.operands[i].imm = reg;
5833 inst.operands[i].immisreg = 1;
5834
5835 if (skip_past_comma (&p) == SUCCESS)
5836 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
5837 return PARSE_OPERAND_FAIL;
5838 }
5839 else
5840 {
5841 char *q = p;
5842
5843 if (inst.operands[i].negative)
5844 {
5845 inst.operands[i].negative = 0;
5846 p--;
5847 }
5848 if (my_get_expression (&inst.relocs[0].exp, &p, GE_IMM_PREFIX))
5849 return PARSE_OPERAND_FAIL;
5850 /* If the offset is 0, find out if it's a +0 or -0. */
5851 if (inst.relocs[0].exp.X_op == O_constant
5852 && inst.relocs[0].exp.X_add_number == 0)
5853 {
5854 skip_whitespace (q);
5855 if (*q == '#')
5856 {
5857 q++;
5858 skip_whitespace (q);
5859 }
5860 if (*q == '-')
5861 inst.operands[i].negative = 1;
5862 }
5863 }
5864 }
5865 }
5866
5867 /* If at this point neither .preind nor .postind is set, we have a
5868 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
5869 if (inst.operands[i].preind == 0 && inst.operands[i].postind == 0)
5870 {
5871 inst.operands[i].preind = 1;
5872 inst.relocs[0].exp.X_op = O_constant;
5873 inst.relocs[0].exp.X_add_number = 0;
5874 }
5875 *str = p;
5876 return PARSE_OPERAND_SUCCESS;
5877 }
5878
5879 static int
5880 parse_address (char **str, int i)
5881 {
5882 return parse_address_main (str, i, 0, GROUP_LDR) == PARSE_OPERAND_SUCCESS
5883 ? SUCCESS : FAIL;
5884 }
5885
5886 static parse_operand_result
5887 parse_address_group_reloc (char **str, int i, group_reloc_type type)
5888 {
5889 return parse_address_main (str, i, 1, type);
5890 }
5891
5892 /* Parse an operand for a MOVW or MOVT instruction. */
5893 static int
5894 parse_half (char **str)
5895 {
5896 char * p;
5897
5898 p = *str;
5899 skip_past_char (&p, '#');
5900 if (strncasecmp (p, ":lower16:", 9) == 0)
5901 inst.relocs[0].type = BFD_RELOC_ARM_MOVW;
5902 else if (strncasecmp (p, ":upper16:", 9) == 0)
5903 inst.relocs[0].type = BFD_RELOC_ARM_MOVT;
5904
5905 if (inst.relocs[0].type != BFD_RELOC_UNUSED)
5906 {
5907 p += 9;
5908 skip_whitespace (p);
5909 }
5910
5911 if (my_get_expression (&inst.relocs[0].exp, &p, GE_NO_PREFIX))
5912 return FAIL;
5913
5914 if (inst.relocs[0].type == BFD_RELOC_UNUSED)
5915 {
5916 if (inst.relocs[0].exp.X_op != O_constant)
5917 {
5918 inst.error = _("constant expression expected");
5919 return FAIL;
5920 }
5921 if (inst.relocs[0].exp.X_add_number < 0
5922 || inst.relocs[0].exp.X_add_number > 0xffff)
5923 {
5924 inst.error = _("immediate value out of range");
5925 return FAIL;
5926 }
5927 }
5928 *str = p;
5929 return SUCCESS;
5930 }
5931
5932 /* Miscellaneous. */
5933
5934 /* Parse a PSR flag operand. The value returned is FAIL on syntax error,
5935 or a bitmask suitable to be or-ed into the ARM msr instruction. */
5936 static int
5937 parse_psr (char **str, bfd_boolean lhs)
5938 {
5939 char *p;
5940 unsigned long psr_field;
5941 const struct asm_psr *psr;
5942 char *start;
5943 bfd_boolean is_apsr = FALSE;
5944 bfd_boolean m_profile = ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m);
5945
5946 /* PR gas/12698: If the user has specified -march=all then m_profile will
5947 be TRUE, but we want to ignore it in this case as we are building for any
5948 CPU type, including non-m variants. */
5949 if (ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any))
5950 m_profile = FALSE;
5951
5952 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
5953 feature for ease of use and backwards compatibility. */
5954 p = *str;
5955 if (strncasecmp (p, "SPSR", 4) == 0)
5956 {
5957 if (m_profile)
5958 goto unsupported_psr;
5959
5960 psr_field = SPSR_BIT;
5961 }
5962 else if (strncasecmp (p, "CPSR", 4) == 0)
5963 {
5964 if (m_profile)
5965 goto unsupported_psr;
5966
5967 psr_field = 0;
5968 }
5969 else if (strncasecmp (p, "APSR", 4) == 0)
5970 {
5971 /* APSR[_<bits>] can be used as a synonym for CPSR[_<flags>] on ARMv7-A
5972 and ARMv7-R architecture CPUs. */
5973 is_apsr = TRUE;
5974 psr_field = 0;
5975 }
5976 else if (m_profile)
5977 {
5978 start = p;
5979 do
5980 p++;
5981 while (ISALNUM (*p) || *p == '_');
5982
5983 if (strncasecmp (start, "iapsr", 5) == 0
5984 || strncasecmp (start, "eapsr", 5) == 0
5985 || strncasecmp (start, "xpsr", 4) == 0
5986 || strncasecmp (start, "psr", 3) == 0)
5987 p = start + strcspn (start, "rR") + 1;
5988
5989 psr = (const struct asm_psr *) hash_find_n (arm_v7m_psr_hsh, start,
5990 p - start);
5991
5992 if (!psr)
5993 return FAIL;
5994
5995 /* If APSR is being written, a bitfield may be specified. Note that
5996 APSR itself is handled above. */
5997 if (psr->field <= 3)
5998 {
5999 psr_field = psr->field;
6000 is_apsr = TRUE;
6001 goto check_suffix;
6002 }
6003
6004 *str = p;
6005 /* M-profile MSR instructions have the mask field set to "10", except
6006 *PSR variants which modify APSR, which may use a different mask (and
6007 have been handled already). Do that by setting the PSR_f field
6008 here. */
6009 return psr->field | (lhs ? PSR_f : 0);
6010 }
6011 else
6012 goto unsupported_psr;
6013
6014 p += 4;
6015 check_suffix:
6016 if (*p == '_')
6017 {
6018 /* A suffix follows. */
6019 p++;
6020 start = p;
6021
6022 do
6023 p++;
6024 while (ISALNUM (*p) || *p == '_');
6025
6026 if (is_apsr)
6027 {
6028 /* APSR uses a notation for bits, rather than fields. */
6029 unsigned int nzcvq_bits = 0;
6030 unsigned int g_bit = 0;
6031 char *bit;
6032
6033 for (bit = start; bit != p; bit++)
6034 {
6035 switch (TOLOWER (*bit))
6036 {
6037 case 'n':
6038 nzcvq_bits |= (nzcvq_bits & 0x01) ? 0x20 : 0x01;
6039 break;
6040
6041 case 'z':
6042 nzcvq_bits |= (nzcvq_bits & 0x02) ? 0x20 : 0x02;
6043 break;
6044
6045 case 'c':
6046 nzcvq_bits |= (nzcvq_bits & 0x04) ? 0x20 : 0x04;
6047 break;
6048
6049 case 'v':
6050 nzcvq_bits |= (nzcvq_bits & 0x08) ? 0x20 : 0x08;
6051 break;
6052
6053 case 'q':
6054 nzcvq_bits |= (nzcvq_bits & 0x10) ? 0x20 : 0x10;
6055 break;
6056
6057 case 'g':
6058 g_bit |= (g_bit & 0x1) ? 0x2 : 0x1;
6059 break;
6060
6061 default:
6062 inst.error = _("unexpected bit specified after APSR");
6063 return FAIL;
6064 }
6065 }
6066
6067 if (nzcvq_bits == 0x1f)
6068 psr_field |= PSR_f;
6069
6070 if (g_bit == 0x1)
6071 {
6072 if (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp))
6073 {
6074 inst.error = _("selected processor does not "
6075 "support DSP extension");
6076 return FAIL;
6077 }
6078
6079 psr_field |= PSR_s;
6080 }
6081
6082 if ((nzcvq_bits & 0x20) != 0
6083 || (nzcvq_bits != 0x1f && nzcvq_bits != 0)
6084 || (g_bit & 0x2) != 0)
6085 {
6086 inst.error = _("bad bitmask specified after APSR");
6087 return FAIL;
6088 }
6089 }
6090 else
6091 {
6092 psr = (const struct asm_psr *) hash_find_n (arm_psr_hsh, start,
6093 p - start);
6094 if (!psr)
6095 goto error;
6096
6097 psr_field |= psr->field;
6098 }
6099 }
6100 else
6101 {
6102 if (ISALNUM (*p))
6103 goto error; /* Garbage after "[CS]PSR". */
6104
6105 /* Unadorned APSR is equivalent to APSR_nzcvq/CPSR_f (for writes). This
6106 is deprecated, but allow it anyway. */
6107 if (is_apsr && lhs)
6108 {
6109 psr_field |= PSR_f;
6110 as_tsktsk (_("writing to APSR without specifying a bitmask is "
6111 "deprecated"));
6112 }
6113 else if (!m_profile)
6114 /* These bits are never right for M-profile devices: don't set them
6115 (only code paths which read/write APSR reach here). */
6116 psr_field |= (PSR_c | PSR_f);
6117 }
6118 *str = p;
6119 return psr_field;
6120
6121 unsupported_psr:
6122 inst.error = _("selected processor does not support requested special "
6123 "purpose register");
6124 return FAIL;
6125
6126 error:
6127 inst.error = _("flag for {c}psr instruction expected");
6128 return FAIL;
6129 }
6130
6131 static int
6132 parse_sys_vldr_vstr (char **str)
6133 {
6134 unsigned i;
6135 int val = FAIL;
6136 struct {
6137 const char *name;
6138 int regl;
6139 int regh;
6140 } sysregs[] = {
6141 {"FPSCR", 0x1, 0x0},
6142 {"FPSCR_nzcvqc", 0x2, 0x0},
6143 {"VPR", 0x4, 0x1},
6144 {"P0", 0x5, 0x1},
6145 {"FPCXTNS", 0x6, 0x1},
6146 {"FPCXTS", 0x7, 0x1}
6147 };
6148 char *op_end = strchr (*str, ',');
6149 size_t op_strlen = op_end - *str;
6150
6151 for (i = 0; i < sizeof (sysregs) / sizeof (sysregs[0]); i++)
6152 {
6153 if (!strncmp (*str, sysregs[i].name, op_strlen))
6154 {
6155 val = sysregs[i].regl | (sysregs[i].regh << 3);
6156 *str = op_end;
6157 break;
6158 }
6159 }
6160
6161 return val;
6162 }
6163
6164 /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
6165 value suitable for splatting into the AIF field of the instruction. */
6166
6167 static int
6168 parse_cps_flags (char **str)
6169 {
6170 int val = 0;
6171 int saw_a_flag = 0;
6172 char *s = *str;
6173
6174 for (;;)
6175 switch (*s++)
6176 {
6177 case '\0': case ',':
6178 goto done;
6179
6180 case 'a': case 'A': saw_a_flag = 1; val |= 0x4; break;
6181 case 'i': case 'I': saw_a_flag = 1; val |= 0x2; break;
6182 case 'f': case 'F': saw_a_flag = 1; val |= 0x1; break;
6183
6184 default:
6185 inst.error = _("unrecognized CPS flag");
6186 return FAIL;
6187 }
6188
6189 done:
6190 if (saw_a_flag == 0)
6191 {
6192 inst.error = _("missing CPS flags");
6193 return FAIL;
6194 }
6195
6196 *str = s - 1;
6197 return val;
6198 }
6199
6200 /* Parse an endian specifier ("BE" or "LE", case insensitive);
6201 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
6202
6203 static int
6204 parse_endian_specifier (char **str)
6205 {
6206 int little_endian;
6207 char *s = *str;
6208
6209 if (strncasecmp (s, "BE", 2))
6210 little_endian = 0;
6211 else if (strncasecmp (s, "LE", 2))
6212 little_endian = 1;
6213 else
6214 {
6215 inst.error = _("valid endian specifiers are be or le");
6216 return FAIL;
6217 }
6218
6219 if (ISALNUM (s[2]) || s[2] == '_')
6220 {
6221 inst.error = _("valid endian specifiers are be or le");
6222 return FAIL;
6223 }
6224
6225 *str = s + 2;
6226 return little_endian;
6227 }
6228
6229 /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
6230 value suitable for poking into the rotate field of an sxt or sxta
6231 instruction, or FAIL on error. */
6232
6233 static int
6234 parse_ror (char **str)
6235 {
6236 int rot;
6237 char *s = *str;
6238
6239 if (strncasecmp (s, "ROR", 3) == 0)
6240 s += 3;
6241 else
6242 {
6243 inst.error = _("missing rotation field after comma");
6244 return FAIL;
6245 }
6246
6247 if (parse_immediate (&s, &rot, 0, 24, FALSE) == FAIL)
6248 return FAIL;
6249
6250 switch (rot)
6251 {
6252 case 0: *str = s; return 0x0;
6253 case 8: *str = s; return 0x1;
6254 case 16: *str = s; return 0x2;
6255 case 24: *str = s; return 0x3;
6256
6257 default:
6258 inst.error = _("rotation can only be 0, 8, 16, or 24");
6259 return FAIL;
6260 }
6261 }
6262
6263 /* Parse a conditional code (from conds[] below). The value returned is in the
6264 range 0 .. 14, or FAIL. */
6265 static int
6266 parse_cond (char **str)
6267 {
6268 char *q;
6269 const struct asm_cond *c;
6270 int n;
6271 /* Condition codes are always 2 characters, so matching up to
6272 3 characters is sufficient. */
6273 char cond[3];
6274
6275 q = *str;
6276 n = 0;
6277 while (ISALPHA (*q) && n < 3)
6278 {
6279 cond[n] = TOLOWER (*q);
6280 q++;
6281 n++;
6282 }
6283
6284 c = (const struct asm_cond *) hash_find_n (arm_cond_hsh, cond, n);
6285 if (!c)
6286 {
6287 inst.error = _("condition required");
6288 return FAIL;
6289 }
6290
6291 *str = q;
6292 return c->value;
6293 }
6294
6295 /* Record a use of the given feature. */
6296 static void
6297 record_feature_use (const arm_feature_set *feature)
6298 {
6299 if (thumb_mode)
6300 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, *feature);
6301 else
6302 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, *feature);
6303 }
6304
6305 /* If the given feature is currently allowed, mark it as used and return TRUE.
6306 Return FALSE otherwise. */
6307 static bfd_boolean
6308 mark_feature_used (const arm_feature_set *feature)
6309 {
6310 /* Ensure the option is currently allowed. */
6311 if (!ARM_CPU_HAS_FEATURE (cpu_variant, *feature))
6312 return FALSE;
6313
6314 /* Add the appropriate architecture feature for the barrier option used. */
6315 record_feature_use (feature);
6316
6317 return TRUE;
6318 }
6319
6320 /* Parse an option for a barrier instruction. Returns the encoding for the
6321 option, or FAIL. */
6322 static int
6323 parse_barrier (char **str)
6324 {
6325 char *p, *q;
6326 const struct asm_barrier_opt *o;
6327
6328 p = q = *str;
6329 while (ISALPHA (*q))
6330 q++;
6331
6332 o = (const struct asm_barrier_opt *) hash_find_n (arm_barrier_opt_hsh, p,
6333 q - p);
6334 if (!o)
6335 return FAIL;
6336
6337 if (!mark_feature_used (&o->arch))
6338 return FAIL;
6339
6340 *str = q;
6341 return o->value;
6342 }
6343
6344 /* Parse the operands of a table branch instruction. Similar to a memory
6345 operand. */
6346 static int
6347 parse_tb (char **str)
6348 {
6349 char * p = *str;
6350 int reg;
6351
6352 if (skip_past_char (&p, '[') == FAIL)
6353 {
6354 inst.error = _("'[' expected");
6355 return FAIL;
6356 }
6357
6358 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
6359 {
6360 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
6361 return FAIL;
6362 }
6363 inst.operands[0].reg = reg;
6364
6365 if (skip_past_comma (&p) == FAIL)
6366 {
6367 inst.error = _("',' expected");
6368 return FAIL;
6369 }
6370
6371 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
6372 {
6373 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
6374 return FAIL;
6375 }
6376 inst.operands[0].imm = reg;
6377
6378 if (skip_past_comma (&p) == SUCCESS)
6379 {
6380 if (parse_shift (&p, 0, SHIFT_LSL_IMMEDIATE) == FAIL)
6381 return FAIL;
6382 if (inst.relocs[0].exp.X_add_number != 1)
6383 {
6384 inst.error = _("invalid shift");
6385 return FAIL;
6386 }
6387 inst.operands[0].shifted = 1;
6388 }
6389
6390 if (skip_past_char (&p, ']') == FAIL)
6391 {
6392 inst.error = _("']' expected");
6393 return FAIL;
6394 }
6395 *str = p;
6396 return SUCCESS;
6397 }
6398
6399 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
6400 information on the types the operands can take and how they are encoded.
6401 Up to four operands may be read; this function handles setting the
6402 ".present" field for each read operand itself.
6403 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
6404 else returns FAIL. */
6405
6406 static int
6407 parse_neon_mov (char **str, int *which_operand)
6408 {
6409 int i = *which_operand, val;
6410 enum arm_reg_type rtype;
6411 char *ptr = *str;
6412 struct neon_type_el optype;
6413
6414 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
6415 {
6416 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
6417 inst.operands[i].reg = val;
6418 inst.operands[i].isscalar = 1;
6419 inst.operands[i].vectype = optype;
6420 inst.operands[i++].present = 1;
6421
6422 if (skip_past_comma (&ptr) == FAIL)
6423 goto wanted_comma;
6424
6425 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6426 goto wanted_arm;
6427
6428 inst.operands[i].reg = val;
6429 inst.operands[i].isreg = 1;
6430 inst.operands[i].present = 1;
6431 }
6432 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype, &optype))
6433 != FAIL)
6434 {
6435 /* Cases 0, 1, 2, 3, 5 (D only). */
6436 if (skip_past_comma (&ptr) == FAIL)
6437 goto wanted_comma;
6438
6439 inst.operands[i].reg = val;
6440 inst.operands[i].isreg = 1;
6441 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
6442 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6443 inst.operands[i].isvec = 1;
6444 inst.operands[i].vectype = optype;
6445 inst.operands[i++].present = 1;
6446
6447 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6448 {
6449 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
6450 Case 13: VMOV <Sd>, <Rm> */
6451 inst.operands[i].reg = val;
6452 inst.operands[i].isreg = 1;
6453 inst.operands[i].present = 1;
6454
6455 if (rtype == REG_TYPE_NQ)
6456 {
6457 first_error (_("can't use Neon quad register here"));
6458 return FAIL;
6459 }
6460 else if (rtype != REG_TYPE_VFS)
6461 {
6462 i++;
6463 if (skip_past_comma (&ptr) == FAIL)
6464 goto wanted_comma;
6465 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6466 goto wanted_arm;
6467 inst.operands[i].reg = val;
6468 inst.operands[i].isreg = 1;
6469 inst.operands[i].present = 1;
6470 }
6471 }
6472 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype,
6473 &optype)) != FAIL)
6474 {
6475 /* Case 0: VMOV<c><q> <Qd>, <Qm>
6476 Case 1: VMOV<c><q> <Dd>, <Dm>
6477 Case 8: VMOV.F32 <Sd>, <Sm>
6478 Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm> */
6479
6480 inst.operands[i].reg = val;
6481 inst.operands[i].isreg = 1;
6482 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
6483 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6484 inst.operands[i].isvec = 1;
6485 inst.operands[i].vectype = optype;
6486 inst.operands[i].present = 1;
6487
6488 if (skip_past_comma (&ptr) == SUCCESS)
6489 {
6490 /* Case 15. */
6491 i++;
6492
6493 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6494 goto wanted_arm;
6495
6496 inst.operands[i].reg = val;
6497 inst.operands[i].isreg = 1;
6498 inst.operands[i++].present = 1;
6499
6500 if (skip_past_comma (&ptr) == FAIL)
6501 goto wanted_comma;
6502
6503 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6504 goto wanted_arm;
6505
6506 inst.operands[i].reg = val;
6507 inst.operands[i].isreg = 1;
6508 inst.operands[i].present = 1;
6509 }
6510 }
6511 else if (parse_qfloat_immediate (&ptr, &inst.operands[i].imm) == SUCCESS)
6512 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
6513 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
6514 Case 10: VMOV.F32 <Sd>, #<imm>
6515 Case 11: VMOV.F64 <Dd>, #<imm> */
6516 inst.operands[i].immisfloat = 1;
6517 else if (parse_big_immediate (&ptr, i, NULL, /*allow_symbol_p=*/FALSE)
6518 == SUCCESS)
6519 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
6520 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
6521 ;
6522 else
6523 {
6524 first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
6525 return FAIL;
6526 }
6527 }
6528 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6529 {
6530 /* Cases 6, 7. */
6531 inst.operands[i].reg = val;
6532 inst.operands[i].isreg = 1;
6533 inst.operands[i++].present = 1;
6534
6535 if (skip_past_comma (&ptr) == FAIL)
6536 goto wanted_comma;
6537
6538 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
6539 {
6540 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
6541 inst.operands[i].reg = val;
6542 inst.operands[i].isscalar = 1;
6543 inst.operands[i].present = 1;
6544 inst.operands[i].vectype = optype;
6545 }
6546 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6547 {
6548 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
6549 inst.operands[i].reg = val;
6550 inst.operands[i].isreg = 1;
6551 inst.operands[i++].present = 1;
6552
6553 if (skip_past_comma (&ptr) == FAIL)
6554 goto wanted_comma;
6555
6556 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFSD, &rtype, &optype))
6557 == FAIL)
6558 {
6559 first_error (_(reg_expected_msgs[REG_TYPE_VFSD]));
6560 return FAIL;
6561 }
6562
6563 inst.operands[i].reg = val;
6564 inst.operands[i].isreg = 1;
6565 inst.operands[i].isvec = 1;
6566 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6567 inst.operands[i].vectype = optype;
6568 inst.operands[i].present = 1;
6569
6570 if (rtype == REG_TYPE_VFS)
6571 {
6572 /* Case 14. */
6573 i++;
6574 if (skip_past_comma (&ptr) == FAIL)
6575 goto wanted_comma;
6576 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL,
6577 &optype)) == FAIL)
6578 {
6579 first_error (_(reg_expected_msgs[REG_TYPE_VFS]));
6580 return FAIL;
6581 }
6582 inst.operands[i].reg = val;
6583 inst.operands[i].isreg = 1;
6584 inst.operands[i].isvec = 1;
6585 inst.operands[i].issingle = 1;
6586 inst.operands[i].vectype = optype;
6587 inst.operands[i].present = 1;
6588 }
6589 }
6590 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL, &optype))
6591 != FAIL)
6592 {
6593 /* Case 13. */
6594 inst.operands[i].reg = val;
6595 inst.operands[i].isreg = 1;
6596 inst.operands[i].isvec = 1;
6597 inst.operands[i].issingle = 1;
6598 inst.operands[i].vectype = optype;
6599 inst.operands[i].present = 1;
6600 }
6601 }
6602 else
6603 {
6604 first_error (_("parse error"));
6605 return FAIL;
6606 }
6607
6608 /* Successfully parsed the operands. Update args. */
6609 *which_operand = i;
6610 *str = ptr;
6611 return SUCCESS;
6612
6613 wanted_comma:
6614 first_error (_("expected comma"));
6615 return FAIL;
6616
6617 wanted_arm:
6618 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
6619 return FAIL;
6620 }
6621
6622 /* Use this macro when the operand constraints are different
6623 for ARM and THUMB (e.g. ldrd). */
6624 #define MIX_ARM_THUMB_OPERANDS(arm_operand, thumb_operand) \
6625 ((arm_operand) | ((thumb_operand) << 16))
6626
6627 /* Matcher codes for parse_operands. */
6628 enum operand_parse_code
6629 {
6630 OP_stop, /* end of line */
6631
6632 OP_RR, /* ARM register */
6633 OP_RRnpc, /* ARM register, not r15 */
6634 OP_RRnpcsp, /* ARM register, neither r15 nor r13 (a.k.a. 'BadReg') */
6635 OP_RRnpcb, /* ARM register, not r15, in square brackets */
6636 OP_RRnpctw, /* ARM register, not r15 in Thumb-state or with writeback,
6637 optional trailing ! */
6638 OP_RRw, /* ARM register, not r15, optional trailing ! */
6639 OP_RCP, /* Coprocessor number */
6640 OP_RCN, /* Coprocessor register */
6641 OP_RF, /* FPA register */
6642 OP_RVS, /* VFP single precision register */
6643 OP_RVD, /* VFP double precision register (0..15) */
6644 OP_RND, /* Neon double precision register (0..31) */
6645 OP_RNQ, /* Neon quad precision register */
6646 OP_RVSD, /* VFP single or double precision register */
6647 OP_RNSD, /* Neon single or double precision register */
6648 OP_RNDQ, /* Neon double or quad precision register */
6649 OP_RNSDQ, /* Neon single, double or quad precision register */
6650 OP_RNSC, /* Neon scalar D[X] */
6651 OP_RVC, /* VFP control register */
6652 OP_RMF, /* Maverick F register */
6653 OP_RMD, /* Maverick D register */
6654 OP_RMFX, /* Maverick FX register */
6655 OP_RMDX, /* Maverick DX register */
6656 OP_RMAX, /* Maverick AX register */
6657 OP_RMDS, /* Maverick DSPSC register */
6658 OP_RIWR, /* iWMMXt wR register */
6659 OP_RIWC, /* iWMMXt wC register */
6660 OP_RIWG, /* iWMMXt wCG register */
6661 OP_RXA, /* XScale accumulator register */
6662
6663 /* New operands for Armv8.1-M Mainline. */
6664 OP_LR, /* ARM LR register */
6665 OP_RRnpcsp_I32, /* ARM register (no BadReg) or literal 1 .. 32 */
6666
6667 OP_REGLST, /* ARM register list */
6668 OP_CLRMLST, /* CLRM register list */
6669 OP_VRSLST, /* VFP single-precision register list */
6670 OP_VRDLST, /* VFP double-precision register list */
6671 OP_VRSDLST, /* VFP single or double-precision register list (& quad) */
6672 OP_NRDLST, /* Neon double-precision register list (d0-d31, qN aliases) */
6673 OP_NSTRLST, /* Neon element/structure list */
6674 OP_VRSDVLST, /* VFP single or double-precision register list and VPR */
6675
6676 OP_RNDQ_I0, /* Neon D or Q reg, or immediate zero. */
6677 OP_RVSD_I0, /* VFP S or D reg, or immediate zero. */
6678 OP_RSVD_FI0, /* VFP S or D reg, or floating point immediate zero. */
6679 OP_RR_RNSC, /* ARM reg or Neon scalar. */
6680 OP_RNSD_RNSC, /* Neon S or D reg, or Neon scalar. */
6681 OP_RNSDQ_RNSC, /* Vector S, D or Q reg, or Neon scalar. */
6682 OP_RNDQ_RNSC, /* Neon D or Q reg, or Neon scalar. */
6683 OP_RND_RNSC, /* Neon D reg, or Neon scalar. */
6684 OP_VMOV, /* Neon VMOV operands. */
6685 OP_RNDQ_Ibig, /* Neon D or Q reg, or big immediate for logic and VMVN. */
6686 OP_RNDQ_I63b, /* Neon D or Q reg, or immediate for shift. */
6687 OP_RIWR_I32z, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2. */
6688 OP_VLDR, /* VLDR operand. */
6689
6690 OP_I0, /* immediate zero */
6691 OP_I7, /* immediate value 0 .. 7 */
6692 OP_I15, /* 0 .. 15 */
6693 OP_I16, /* 1 .. 16 */
6694 OP_I16z, /* 0 .. 16 */
6695 OP_I31, /* 0 .. 31 */
6696 OP_I31w, /* 0 .. 31, optional trailing ! */
6697 OP_I32, /* 1 .. 32 */
6698 OP_I32z, /* 0 .. 32 */
6699 OP_I63, /* 0 .. 63 */
6700 OP_I63s, /* -64 .. 63 */
6701 OP_I64, /* 1 .. 64 */
6702 OP_I64z, /* 0 .. 64 */
6703 OP_I255, /* 0 .. 255 */
6704
6705 OP_I4b, /* immediate, prefix optional, 1 .. 4 */
6706 OP_I7b, /* 0 .. 7 */
6707 OP_I15b, /* 0 .. 15 */
6708 OP_I31b, /* 0 .. 31 */
6709
6710 OP_SH, /* shifter operand */
6711 OP_SHG, /* shifter operand with possible group relocation */
6712 OP_ADDR, /* Memory address expression (any mode) */
6713 OP_ADDRGLDR, /* Mem addr expr (any mode) with possible LDR group reloc */
6714 OP_ADDRGLDRS, /* Mem addr expr (any mode) with possible LDRS group reloc */
6715 OP_ADDRGLDC, /* Mem addr expr (any mode) with possible LDC group reloc */
6716 OP_EXP, /* arbitrary expression */
6717 OP_EXPi, /* same, with optional immediate prefix */
6718 OP_EXPr, /* same, with optional relocation suffix */
6719 OP_EXPs, /* same, with optional non-first operand relocation suffix */
6720 OP_HALF, /* 0 .. 65535 or low/high reloc. */
6721 OP_IROT1, /* VCADD rotate immediate: 90, 270. */
6722 OP_IROT2, /* VCMLA rotate immediate: 0, 90, 180, 270. */
6723
6724 OP_CPSF, /* CPS flags */
6725 OP_ENDI, /* Endianness specifier */
6726 OP_wPSR, /* CPSR/SPSR/APSR mask for msr (writing). */
6727 OP_rPSR, /* CPSR/SPSR/APSR mask for msr (reading). */
6728 OP_COND, /* conditional code */
6729 OP_TB, /* Table branch. */
6730
6731 OP_APSR_RR, /* ARM register or "APSR_nzcv". */
6732
6733 OP_RRnpc_I0, /* ARM register or literal 0 */
6734 OP_RR_EXr, /* ARM register or expression with opt. reloc stuff. */
6735 OP_RR_EXi, /* ARM register or expression with imm prefix */
6736 OP_RF_IF, /* FPA register or immediate */
6737 OP_RIWR_RIWC, /* iWMMXt R or C reg */
6738 OP_RIWC_RIWG, /* iWMMXt wC or wCG reg */
6739
6740 /* Optional operands. */
6741 OP_oI7b, /* immediate, prefix optional, 0 .. 7 */
6742 OP_oI31b, /* 0 .. 31 */
6743 OP_oI32b, /* 1 .. 32 */
6744 OP_oI32z, /* 0 .. 32 */
6745 OP_oIffffb, /* 0 .. 65535 */
6746 OP_oI255c, /* curly-brace enclosed, 0 .. 255 */
6747
6748 OP_oRR, /* ARM register */
6749 OP_oLR, /* ARM LR register */
6750 OP_oRRnpc, /* ARM register, not the PC */
6751 OP_oRRnpcsp, /* ARM register, neither the PC nor the SP (a.k.a. BadReg) */
6752 OP_oRRw, /* ARM register, not r15, optional trailing ! */
6753 OP_oRND, /* Optional Neon double precision register */
6754 OP_oRNQ, /* Optional Neon quad precision register */
6755 OP_oRNDQ, /* Optional Neon double or quad precision register */
6756 OP_oRNSDQ, /* Optional single, double or quad precision vector register */
6757 OP_oSHll, /* LSL immediate */
6758 OP_oSHar, /* ASR immediate */
6759 OP_oSHllar, /* LSL or ASR immediate */
6760 OP_oROR, /* ROR 0/8/16/24 */
6761 OP_oBARRIER_I15, /* Option argument for a barrier instruction. */
6762
6763 /* Some pre-defined mixed (ARM/THUMB) operands. */
6764 OP_RR_npcsp = MIX_ARM_THUMB_OPERANDS (OP_RR, OP_RRnpcsp),
6765 OP_RRnpc_npcsp = MIX_ARM_THUMB_OPERANDS (OP_RRnpc, OP_RRnpcsp),
6766 OP_oRRnpc_npcsp = MIX_ARM_THUMB_OPERANDS (OP_oRRnpc, OP_oRRnpcsp),
6767
6768 OP_FIRST_OPTIONAL = OP_oI7b
6769 };
6770
6771 /* Generic instruction operand parser. This does no encoding and no
6772 semantic validation; it merely squirrels values away in the inst
6773 structure. Returns SUCCESS or FAIL depending on whether the
6774 specified grammar matched. */
6775 static int
6776 parse_operands (char *str, const unsigned int *pattern, bfd_boolean thumb)
6777 {
6778 unsigned const int *upat = pattern;
6779 char *backtrack_pos = 0;
6780 const char *backtrack_error = 0;
6781 int i, val = 0, backtrack_index = 0;
6782 enum arm_reg_type rtype;
6783 parse_operand_result result;
6784 unsigned int op_parse_code;
6785 bfd_boolean partial_match;
6786
6787 #define po_char_or_fail(chr) \
6788 do \
6789 { \
6790 if (skip_past_char (&str, chr) == FAIL) \
6791 goto bad_args; \
6792 } \
6793 while (0)
6794
6795 #define po_reg_or_fail(regtype) \
6796 do \
6797 { \
6798 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6799 & inst.operands[i].vectype); \
6800 if (val == FAIL) \
6801 { \
6802 first_error (_(reg_expected_msgs[regtype])); \
6803 goto failure; \
6804 } \
6805 inst.operands[i].reg = val; \
6806 inst.operands[i].isreg = 1; \
6807 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6808 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6809 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6810 || rtype == REG_TYPE_VFD \
6811 || rtype == REG_TYPE_NQ); \
6812 } \
6813 while (0)
6814
6815 #define po_reg_or_goto(regtype, label) \
6816 do \
6817 { \
6818 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6819 & inst.operands[i].vectype); \
6820 if (val == FAIL) \
6821 goto label; \
6822 \
6823 inst.operands[i].reg = val; \
6824 inst.operands[i].isreg = 1; \
6825 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6826 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6827 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6828 || rtype == REG_TYPE_VFD \
6829 || rtype == REG_TYPE_NQ); \
6830 } \
6831 while (0)
6832
6833 #define po_imm_or_fail(min, max, popt) \
6834 do \
6835 { \
6836 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
6837 goto failure; \
6838 inst.operands[i].imm = val; \
6839 } \
6840 while (0)
6841
6842 #define po_scalar_or_goto(elsz, label) \
6843 do \
6844 { \
6845 val = parse_scalar (& str, elsz, & inst.operands[i].vectype); \
6846 if (val == FAIL) \
6847 goto label; \
6848 inst.operands[i].reg = val; \
6849 inst.operands[i].isscalar = 1; \
6850 } \
6851 while (0)
6852
6853 #define po_misc_or_fail(expr) \
6854 do \
6855 { \
6856 if (expr) \
6857 goto failure; \
6858 } \
6859 while (0)
6860
6861 #define po_misc_or_fail_no_backtrack(expr) \
6862 do \
6863 { \
6864 result = expr; \
6865 if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK) \
6866 backtrack_pos = 0; \
6867 if (result != PARSE_OPERAND_SUCCESS) \
6868 goto failure; \
6869 } \
6870 while (0)
6871
6872 #define po_barrier_or_imm(str) \
6873 do \
6874 { \
6875 val = parse_barrier (&str); \
6876 if (val == FAIL && ! ISALPHA (*str)) \
6877 goto immediate; \
6878 if (val == FAIL \
6879 /* ISB can only take SY as an option. */ \
6880 || ((inst.instruction & 0xf0) == 0x60 \
6881 && val != 0xf)) \
6882 { \
6883 inst.error = _("invalid barrier type"); \
6884 backtrack_pos = 0; \
6885 goto failure; \
6886 } \
6887 } \
6888 while (0)
6889
6890 skip_whitespace (str);
6891
6892 for (i = 0; upat[i] != OP_stop; i++)
6893 {
6894 op_parse_code = upat[i];
6895 if (op_parse_code >= 1<<16)
6896 op_parse_code = thumb ? (op_parse_code >> 16)
6897 : (op_parse_code & ((1<<16)-1));
6898
6899 if (op_parse_code >= OP_FIRST_OPTIONAL)
6900 {
6901 /* Remember where we are in case we need to backtrack. */
6902 gas_assert (!backtrack_pos);
6903 backtrack_pos = str;
6904 backtrack_error = inst.error;
6905 backtrack_index = i;
6906 }
6907
6908 if (i > 0 && (i > 1 || inst.operands[0].present))
6909 po_char_or_fail (',');
6910
6911 switch (op_parse_code)
6912 {
6913 /* Registers */
6914 case OP_oRRnpc:
6915 case OP_oRRnpcsp:
6916 case OP_RRnpc:
6917 case OP_RRnpcsp:
6918 case OP_oRR:
6919 case OP_LR:
6920 case OP_oLR:
6921 case OP_RR: po_reg_or_fail (REG_TYPE_RN); break;
6922 case OP_RCP: po_reg_or_fail (REG_TYPE_CP); break;
6923 case OP_RCN: po_reg_or_fail (REG_TYPE_CN); break;
6924 case OP_RF: po_reg_or_fail (REG_TYPE_FN); break;
6925 case OP_RVS: po_reg_or_fail (REG_TYPE_VFS); break;
6926 case OP_RVD: po_reg_or_fail (REG_TYPE_VFD); break;
6927 case OP_oRND:
6928 case OP_RND: po_reg_or_fail (REG_TYPE_VFD); break;
6929 case OP_RVC:
6930 po_reg_or_goto (REG_TYPE_VFC, coproc_reg);
6931 break;
6932 /* Also accept generic coprocessor regs for unknown registers. */
6933 coproc_reg:
6934 po_reg_or_fail (REG_TYPE_CN);
6935 break;
6936 case OP_RMF: po_reg_or_fail (REG_TYPE_MVF); break;
6937 case OP_RMD: po_reg_or_fail (REG_TYPE_MVD); break;
6938 case OP_RMFX: po_reg_or_fail (REG_TYPE_MVFX); break;
6939 case OP_RMDX: po_reg_or_fail (REG_TYPE_MVDX); break;
6940 case OP_RMAX: po_reg_or_fail (REG_TYPE_MVAX); break;
6941 case OP_RMDS: po_reg_or_fail (REG_TYPE_DSPSC); break;
6942 case OP_RIWR: po_reg_or_fail (REG_TYPE_MMXWR); break;
6943 case OP_RIWC: po_reg_or_fail (REG_TYPE_MMXWC); break;
6944 case OP_RIWG: po_reg_or_fail (REG_TYPE_MMXWCG); break;
6945 case OP_RXA: po_reg_or_fail (REG_TYPE_XSCALE); break;
6946 case OP_oRNQ:
6947 case OP_RNQ: po_reg_or_fail (REG_TYPE_NQ); break;
6948 case OP_RNSD: po_reg_or_fail (REG_TYPE_NSD); break;
6949 case OP_oRNDQ:
6950 case OP_RNDQ: po_reg_or_fail (REG_TYPE_NDQ); break;
6951 case OP_RVSD: po_reg_or_fail (REG_TYPE_VFSD); break;
6952 case OP_oRNSDQ:
6953 case OP_RNSDQ: po_reg_or_fail (REG_TYPE_NSDQ); break;
6954
6955 /* Neon scalar. Using an element size of 8 means that some invalid
6956 scalars are accepted here, so deal with those in later code. */
6957 case OP_RNSC: po_scalar_or_goto (8, failure); break;
6958
6959 case OP_RNDQ_I0:
6960 {
6961 po_reg_or_goto (REG_TYPE_NDQ, try_imm0);
6962 break;
6963 try_imm0:
6964 po_imm_or_fail (0, 0, TRUE);
6965 }
6966 break;
6967
6968 case OP_RVSD_I0:
6969 po_reg_or_goto (REG_TYPE_VFSD, try_imm0);
6970 break;
6971
6972 case OP_RSVD_FI0:
6973 {
6974 po_reg_or_goto (REG_TYPE_VFSD, try_ifimm0);
6975 break;
6976 try_ifimm0:
6977 if (parse_ifimm_zero (&str))
6978 inst.operands[i].imm = 0;
6979 else
6980 {
6981 inst.error
6982 = _("only floating point zero is allowed as immediate value");
6983 goto failure;
6984 }
6985 }
6986 break;
6987
6988 case OP_RR_RNSC:
6989 {
6990 po_scalar_or_goto (8, try_rr);
6991 break;
6992 try_rr:
6993 po_reg_or_fail (REG_TYPE_RN);
6994 }
6995 break;
6996
6997 case OP_RNSDQ_RNSC:
6998 {
6999 po_scalar_or_goto (8, try_nsdq);
7000 break;
7001 try_nsdq:
7002 po_reg_or_fail (REG_TYPE_NSDQ);
7003 }
7004 break;
7005
7006 case OP_RNSD_RNSC:
7007 {
7008 po_scalar_or_goto (8, try_s_scalar);
7009 break;
7010 try_s_scalar:
7011 po_scalar_or_goto (4, try_nsd);
7012 break;
7013 try_nsd:
7014 po_reg_or_fail (REG_TYPE_NSD);
7015 }
7016 break;
7017
7018 case OP_RNDQ_RNSC:
7019 {
7020 po_scalar_or_goto (8, try_ndq);
7021 break;
7022 try_ndq:
7023 po_reg_or_fail (REG_TYPE_NDQ);
7024 }
7025 break;
7026
7027 case OP_RND_RNSC:
7028 {
7029 po_scalar_or_goto (8, try_vfd);
7030 break;
7031 try_vfd:
7032 po_reg_or_fail (REG_TYPE_VFD);
7033 }
7034 break;
7035
7036 case OP_VMOV:
7037 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
7038 not careful then bad things might happen. */
7039 po_misc_or_fail (parse_neon_mov (&str, &i) == FAIL);
7040 break;
7041
7042 case OP_RNDQ_Ibig:
7043 {
7044 po_reg_or_goto (REG_TYPE_NDQ, try_immbig);
7045 break;
7046 try_immbig:
7047 /* There's a possibility of getting a 64-bit immediate here, so
7048 we need special handling. */
7049 if (parse_big_immediate (&str, i, NULL, /*allow_symbol_p=*/FALSE)
7050 == FAIL)
7051 {
7052 inst.error = _("immediate value is out of range");
7053 goto failure;
7054 }
7055 }
7056 break;
7057
7058 case OP_RNDQ_I63b:
7059 {
7060 po_reg_or_goto (REG_TYPE_NDQ, try_shimm);
7061 break;
7062 try_shimm:
7063 po_imm_or_fail (0, 63, TRUE);
7064 }
7065 break;
7066
7067 case OP_RRnpcb:
7068 po_char_or_fail ('[');
7069 po_reg_or_fail (REG_TYPE_RN);
7070 po_char_or_fail (']');
7071 break;
7072
7073 case OP_RRnpctw:
7074 case OP_RRw:
7075 case OP_oRRw:
7076 po_reg_or_fail (REG_TYPE_RN);
7077 if (skip_past_char (&str, '!') == SUCCESS)
7078 inst.operands[i].writeback = 1;
7079 break;
7080
7081 /* Immediates */
7082 case OP_I7: po_imm_or_fail ( 0, 7, FALSE); break;
7083 case OP_I15: po_imm_or_fail ( 0, 15, FALSE); break;
7084 case OP_I16: po_imm_or_fail ( 1, 16, FALSE); break;
7085 case OP_I16z: po_imm_or_fail ( 0, 16, FALSE); break;
7086 case OP_I31: po_imm_or_fail ( 0, 31, FALSE); break;
7087 case OP_I32: po_imm_or_fail ( 1, 32, FALSE); break;
7088 case OP_I32z: po_imm_or_fail ( 0, 32, FALSE); break;
7089 case OP_I63s: po_imm_or_fail (-64, 63, FALSE); break;
7090 case OP_I63: po_imm_or_fail ( 0, 63, FALSE); break;
7091 case OP_I64: po_imm_or_fail ( 1, 64, FALSE); break;
7092 case OP_I64z: po_imm_or_fail ( 0, 64, FALSE); break;
7093 case OP_I255: po_imm_or_fail ( 0, 255, FALSE); break;
7094
7095 case OP_I4b: po_imm_or_fail ( 1, 4, TRUE); break;
7096 case OP_oI7b:
7097 case OP_I7b: po_imm_or_fail ( 0, 7, TRUE); break;
7098 case OP_I15b: po_imm_or_fail ( 0, 15, TRUE); break;
7099 case OP_oI31b:
7100 case OP_I31b: po_imm_or_fail ( 0, 31, TRUE); break;
7101 case OP_oI32b: po_imm_or_fail ( 1, 32, TRUE); break;
7102 case OP_oI32z: po_imm_or_fail ( 0, 32, TRUE); break;
7103 case OP_oIffffb: po_imm_or_fail ( 0, 0xffff, TRUE); break;
7104
7105 /* Immediate variants */
7106 case OP_oI255c:
7107 po_char_or_fail ('{');
7108 po_imm_or_fail (0, 255, TRUE);
7109 po_char_or_fail ('}');
7110 break;
7111
7112 case OP_I31w:
7113 /* The expression parser chokes on a trailing !, so we have
7114 to find it first and zap it. */
7115 {
7116 char *s = str;
7117 while (*s && *s != ',')
7118 s++;
7119 if (s[-1] == '!')
7120 {
7121 s[-1] = '\0';
7122 inst.operands[i].writeback = 1;
7123 }
7124 po_imm_or_fail (0, 31, TRUE);
7125 if (str == s - 1)
7126 str = s;
7127 }
7128 break;
7129
7130 /* Expressions */
7131 case OP_EXPi: EXPi:
7132 po_misc_or_fail (my_get_expression (&inst.relocs[0].exp, &str,
7133 GE_OPT_PREFIX));
7134 break;
7135
7136 case OP_EXP:
7137 po_misc_or_fail (my_get_expression (&inst.relocs[0].exp, &str,
7138 GE_NO_PREFIX));
7139 break;
7140
7141 case OP_EXPr: EXPr:
7142 po_misc_or_fail (my_get_expression (&inst.relocs[0].exp, &str,
7143 GE_NO_PREFIX));
7144 if (inst.relocs[0].exp.X_op == O_symbol)
7145 {
7146 val = parse_reloc (&str);
7147 if (val == -1)
7148 {
7149 inst.error = _("unrecognized relocation suffix");
7150 goto failure;
7151 }
7152 else if (val != BFD_RELOC_UNUSED)
7153 {
7154 inst.operands[i].imm = val;
7155 inst.operands[i].hasreloc = 1;
7156 }
7157 }
7158 break;
7159
7160 case OP_EXPs:
7161 po_misc_or_fail (my_get_expression (&inst.relocs[i].exp, &str,
7162 GE_NO_PREFIX));
7163 if (inst.relocs[i].exp.X_op == O_symbol)
7164 {
7165 inst.operands[i].hasreloc = 1;
7166 }
7167 else if (inst.relocs[i].exp.X_op == O_constant)
7168 {
7169 inst.operands[i].imm = inst.relocs[i].exp.X_add_number;
7170 inst.operands[i].hasreloc = 0;
7171 }
7172 break;
7173
7174 /* Operand for MOVW or MOVT. */
7175 case OP_HALF:
7176 po_misc_or_fail (parse_half (&str));
7177 break;
7178
7179 /* Register or expression. */
7180 case OP_RR_EXr: po_reg_or_goto (REG_TYPE_RN, EXPr); break;
7181 case OP_RR_EXi: po_reg_or_goto (REG_TYPE_RN, EXPi); break;
7182
7183 /* Register or immediate. */
7184 case OP_RRnpc_I0: po_reg_or_goto (REG_TYPE_RN, I0); break;
7185 I0: po_imm_or_fail (0, 0, FALSE); break;
7186
7187 case OP_RF_IF: po_reg_or_goto (REG_TYPE_FN, IF); break;
7188 IF:
7189 if (!is_immediate_prefix (*str))
7190 goto bad_args;
7191 str++;
7192 val = parse_fpa_immediate (&str);
7193 if (val == FAIL)
7194 goto failure;
7195 /* FPA immediates are encoded as registers 8-15.
7196 parse_fpa_immediate has already applied the offset. */
7197 inst.operands[i].reg = val;
7198 inst.operands[i].isreg = 1;
7199 break;
7200
7201 case OP_RIWR_I32z: po_reg_or_goto (REG_TYPE_MMXWR, I32z); break;
7202 I32z: po_imm_or_fail (0, 32, FALSE); break;
7203
7204 /* Two kinds of register. */
7205 case OP_RIWR_RIWC:
7206 {
7207 struct reg_entry *rege = arm_reg_parse_multi (&str);
7208 if (!rege
7209 || (rege->type != REG_TYPE_MMXWR
7210 && rege->type != REG_TYPE_MMXWC
7211 && rege->type != REG_TYPE_MMXWCG))
7212 {
7213 inst.error = _("iWMMXt data or control register expected");
7214 goto failure;
7215 }
7216 inst.operands[i].reg = rege->number;
7217 inst.operands[i].isreg = (rege->type == REG_TYPE_MMXWR);
7218 }
7219 break;
7220
7221 case OP_RIWC_RIWG:
7222 {
7223 struct reg_entry *rege = arm_reg_parse_multi (&str);
7224 if (!rege
7225 || (rege->type != REG_TYPE_MMXWC
7226 && rege->type != REG_TYPE_MMXWCG))
7227 {
7228 inst.error = _("iWMMXt control register expected");
7229 goto failure;
7230 }
7231 inst.operands[i].reg = rege->number;
7232 inst.operands[i].isreg = 1;
7233 }
7234 break;
7235
7236 /* Misc */
7237 case OP_CPSF: val = parse_cps_flags (&str); break;
7238 case OP_ENDI: val = parse_endian_specifier (&str); break;
7239 case OP_oROR: val = parse_ror (&str); break;
7240 case OP_COND: val = parse_cond (&str); break;
7241 case OP_oBARRIER_I15:
7242 po_barrier_or_imm (str); break;
7243 immediate:
7244 if (parse_immediate (&str, &val, 0, 15, TRUE) == FAIL)
7245 goto failure;
7246 break;
7247
7248 case OP_wPSR:
7249 case OP_rPSR:
7250 po_reg_or_goto (REG_TYPE_RNB, try_psr);
7251 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_virt))
7252 {
7253 inst.error = _("Banked registers are not available with this "
7254 "architecture.");
7255 goto failure;
7256 }
7257 break;
7258 try_psr:
7259 val = parse_psr (&str, op_parse_code == OP_wPSR);
7260 break;
7261
7262 case OP_VLDR:
7263 po_reg_or_goto (REG_TYPE_VFSD, try_sysreg);
7264 break;
7265 try_sysreg:
7266 val = parse_sys_vldr_vstr (&str);
7267 break;
7268
7269 case OP_APSR_RR:
7270 po_reg_or_goto (REG_TYPE_RN, try_apsr);
7271 break;
7272 try_apsr:
7273 /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
7274 instruction). */
7275 if (strncasecmp (str, "APSR_", 5) == 0)
7276 {
7277 unsigned found = 0;
7278 str += 5;
7279 while (found < 15)
7280 switch (*str++)
7281 {
7282 case 'c': found = (found & 1) ? 16 : found | 1; break;
7283 case 'n': found = (found & 2) ? 16 : found | 2; break;
7284 case 'z': found = (found & 4) ? 16 : found | 4; break;
7285 case 'v': found = (found & 8) ? 16 : found | 8; break;
7286 default: found = 16;
7287 }
7288 if (found != 15)
7289 goto failure;
7290 inst.operands[i].isvec = 1;
7291 /* APSR_nzcv is encoded in instructions as if it were the REG_PC. */
7292 inst.operands[i].reg = REG_PC;
7293 }
7294 else
7295 goto failure;
7296 break;
7297
7298 case OP_TB:
7299 po_misc_or_fail (parse_tb (&str));
7300 break;
7301
7302 /* Register lists. */
7303 case OP_REGLST:
7304 val = parse_reg_list (&str, REGLIST_RN);
7305 if (*str == '^')
7306 {
7307 inst.operands[i].writeback = 1;
7308 str++;
7309 }
7310 break;
7311
7312 case OP_CLRMLST:
7313 val = parse_reg_list (&str, REGLIST_CLRM);
7314 break;
7315
7316 case OP_VRSLST:
7317 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_S,
7318 &partial_match);
7319 break;
7320
7321 case OP_VRDLST:
7322 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_D,
7323 &partial_match);
7324 break;
7325
7326 case OP_VRSDLST:
7327 /* Allow Q registers too. */
7328 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7329 REGLIST_NEON_D, &partial_match);
7330 if (val == FAIL)
7331 {
7332 inst.error = NULL;
7333 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7334 REGLIST_VFP_S, &partial_match);
7335 inst.operands[i].issingle = 1;
7336 }
7337 break;
7338
7339 case OP_VRSDVLST:
7340 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7341 REGLIST_VFP_D_VPR, &partial_match);
7342 if (val == FAIL && !partial_match)
7343 {
7344 inst.error = NULL;
7345 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7346 REGLIST_VFP_S_VPR, &partial_match);
7347 inst.operands[i].issingle = 1;
7348 }
7349 break;
7350
7351 case OP_NRDLST:
7352 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7353 REGLIST_NEON_D, &partial_match);
7354 break;
7355
7356 case OP_NSTRLST:
7357 val = parse_neon_el_struct_list (&str, &inst.operands[i].reg,
7358 &inst.operands[i].vectype);
7359 break;
7360
7361 /* Addressing modes */
7362 case OP_ADDR:
7363 po_misc_or_fail (parse_address (&str, i));
7364 break;
7365
7366 case OP_ADDRGLDR:
7367 po_misc_or_fail_no_backtrack (
7368 parse_address_group_reloc (&str, i, GROUP_LDR));
7369 break;
7370
7371 case OP_ADDRGLDRS:
7372 po_misc_or_fail_no_backtrack (
7373 parse_address_group_reloc (&str, i, GROUP_LDRS));
7374 break;
7375
7376 case OP_ADDRGLDC:
7377 po_misc_or_fail_no_backtrack (
7378 parse_address_group_reloc (&str, i, GROUP_LDC));
7379 break;
7380
7381 case OP_SH:
7382 po_misc_or_fail (parse_shifter_operand (&str, i));
7383 break;
7384
7385 case OP_SHG:
7386 po_misc_or_fail_no_backtrack (
7387 parse_shifter_operand_group_reloc (&str, i));
7388 break;
7389
7390 case OP_oSHll:
7391 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_IMMEDIATE));
7392 break;
7393
7394 case OP_oSHar:
7395 po_misc_or_fail (parse_shift (&str, i, SHIFT_ASR_IMMEDIATE));
7396 break;
7397
7398 case OP_oSHllar:
7399 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_OR_ASR_IMMEDIATE));
7400 break;
7401
7402 default:
7403 as_fatal (_("unhandled operand code %d"), op_parse_code);
7404 }
7405
7406 /* Various value-based sanity checks and shared operations. We
7407 do not signal immediate failures for the register constraints;
7408 this allows a syntax error to take precedence. */
7409 switch (op_parse_code)
7410 {
7411 case OP_oRRnpc:
7412 case OP_RRnpc:
7413 case OP_RRnpcb:
7414 case OP_RRw:
7415 case OP_oRRw:
7416 case OP_RRnpc_I0:
7417 if (inst.operands[i].isreg && inst.operands[i].reg == REG_PC)
7418 inst.error = BAD_PC;
7419 break;
7420
7421 case OP_oRRnpcsp:
7422 case OP_RRnpcsp:
7423 if (inst.operands[i].isreg)
7424 {
7425 if (inst.operands[i].reg == REG_PC)
7426 inst.error = BAD_PC;
7427 else if (inst.operands[i].reg == REG_SP
7428 /* The restriction on Rd/Rt/Rt2 on Thumb mode has been
7429 relaxed since ARMv8-A. */
7430 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
7431 {
7432 gas_assert (thumb);
7433 inst.error = BAD_SP;
7434 }
7435 }
7436 break;
7437
7438 case OP_RRnpctw:
7439 if (inst.operands[i].isreg
7440 && inst.operands[i].reg == REG_PC
7441 && (inst.operands[i].writeback || thumb))
7442 inst.error = BAD_PC;
7443 break;
7444
7445 case OP_VLDR:
7446 if (inst.operands[i].isreg)
7447 break;
7448 /* fall through. */
7449 case OP_CPSF:
7450 case OP_ENDI:
7451 case OP_oROR:
7452 case OP_wPSR:
7453 case OP_rPSR:
7454 case OP_COND:
7455 case OP_oBARRIER_I15:
7456 case OP_REGLST:
7457 case OP_CLRMLST:
7458 case OP_VRSLST:
7459 case OP_VRDLST:
7460 case OP_VRSDLST:
7461 case OP_VRSDVLST:
7462 case OP_NRDLST:
7463 case OP_NSTRLST:
7464 if (val == FAIL)
7465 goto failure;
7466 inst.operands[i].imm = val;
7467 break;
7468
7469 case OP_LR:
7470 case OP_oLR:
7471 if (inst.operands[i].reg != REG_LR)
7472 inst.error = _("operand must be LR register");
7473 break;
7474
7475 default:
7476 break;
7477 }
7478
7479 /* If we get here, this operand was successfully parsed. */
7480 inst.operands[i].present = 1;
7481 continue;
7482
7483 bad_args:
7484 inst.error = BAD_ARGS;
7485
7486 failure:
7487 if (!backtrack_pos)
7488 {
7489 /* The parse routine should already have set inst.error, but set a
7490 default here just in case. */
7491 if (!inst.error)
7492 inst.error = _("syntax error");
7493 return FAIL;
7494 }
7495
7496 /* Do not backtrack over a trailing optional argument that
7497 absorbed some text. We will only fail again, with the
7498 'garbage following instruction' error message, which is
7499 probably less helpful than the current one. */
7500 if (backtrack_index == i && backtrack_pos != str
7501 && upat[i+1] == OP_stop)
7502 {
7503 if (!inst.error)
7504 inst.error = _("syntax error");
7505 return FAIL;
7506 }
7507
7508 /* Try again, skipping the optional argument at backtrack_pos. */
7509 str = backtrack_pos;
7510 inst.error = backtrack_error;
7511 inst.operands[backtrack_index].present = 0;
7512 i = backtrack_index;
7513 backtrack_pos = 0;
7514 }
7515
7516 /* Check that we have parsed all the arguments. */
7517 if (*str != '\0' && !inst.error)
7518 inst.error = _("garbage following instruction");
7519
7520 return inst.error ? FAIL : SUCCESS;
7521 }
7522
7523 #undef po_char_or_fail
7524 #undef po_reg_or_fail
7525 #undef po_reg_or_goto
7526 #undef po_imm_or_fail
7527 #undef po_scalar_or_fail
7528 #undef po_barrier_or_imm
7529
7530 /* Shorthand macro for instruction encoding functions issuing errors. */
7531 #define constraint(expr, err) \
7532 do \
7533 { \
7534 if (expr) \
7535 { \
7536 inst.error = err; \
7537 return; \
7538 } \
7539 } \
7540 while (0)
7541
7542 /* Reject "bad registers" for Thumb-2 instructions. Many Thumb-2
7543 instructions are unpredictable if these registers are used. This
7544 is the BadReg predicate in ARM's Thumb-2 documentation.
7545
7546 Before ARMv8-A, REG_PC and REG_SP were not allowed in quite a few
7547 places, while the restriction on REG_SP was relaxed since ARMv8-A. */
7548 #define reject_bad_reg(reg) \
7549 do \
7550 if (reg == REG_PC) \
7551 { \
7552 inst.error = BAD_PC; \
7553 return; \
7554 } \
7555 else if (reg == REG_SP \
7556 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8)) \
7557 { \
7558 inst.error = BAD_SP; \
7559 return; \
7560 } \
7561 while (0)
7562
7563 /* If REG is R13 (the stack pointer), warn that its use is
7564 deprecated. */
7565 #define warn_deprecated_sp(reg) \
7566 do \
7567 if (warn_on_deprecated && reg == REG_SP) \
7568 as_tsktsk (_("use of r13 is deprecated")); \
7569 while (0)
7570
7571 /* Functions for operand encoding. ARM, then Thumb. */
7572
7573 #define rotate_left(v, n) (v << (n & 31) | v >> ((32 - n) & 31))
7574
7575 /* If the current inst is scalar ARMv8.2 fp16 instruction, do special encoding.
7576
7577 The only binary encoding difference is the Coprocessor number. Coprocessor
7578 9 is used for half-precision calculations or conversions. The format of the
7579 instruction is the same as the equivalent Coprocessor 10 instruction that
7580 exists for Single-Precision operation. */
7581
7582 static void
7583 do_scalar_fp16_v82_encode (void)
7584 {
7585 if (inst.cond != COND_ALWAYS)
7586 as_warn (_("ARMv8.2 scalar fp16 instruction cannot be conditional,"
7587 " the behaviour is UNPREDICTABLE"));
7588 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16),
7589 _(BAD_FP16));
7590
7591 inst.instruction = (inst.instruction & 0xfffff0ff) | 0x900;
7592 mark_feature_used (&arm_ext_fp16);
7593 }
7594
7595 /* If VAL can be encoded in the immediate field of an ARM instruction,
7596 return the encoded form. Otherwise, return FAIL. */
7597
7598 static unsigned int
7599 encode_arm_immediate (unsigned int val)
7600 {
7601 unsigned int a, i;
7602
7603 if (val <= 0xff)
7604 return val;
7605
7606 for (i = 2; i < 32; i += 2)
7607 if ((a = rotate_left (val, i)) <= 0xff)
7608 return a | (i << 7); /* 12-bit pack: [shift-cnt,const]. */
7609
7610 return FAIL;
7611 }
7612
7613 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
7614 return the encoded form. Otherwise, return FAIL. */
7615 static unsigned int
7616 encode_thumb32_immediate (unsigned int val)
7617 {
7618 unsigned int a, i;
7619
7620 if (val <= 0xff)
7621 return val;
7622
7623 for (i = 1; i <= 24; i++)
7624 {
7625 a = val >> i;
7626 if ((val & ~(0xff << i)) == 0)
7627 return ((val >> i) & 0x7f) | ((32 - i) << 7);
7628 }
7629
7630 a = val & 0xff;
7631 if (val == ((a << 16) | a))
7632 return 0x100 | a;
7633 if (val == ((a << 24) | (a << 16) | (a << 8) | a))
7634 return 0x300 | a;
7635
7636 a = val & 0xff00;
7637 if (val == ((a << 16) | a))
7638 return 0x200 | (a >> 8);
7639
7640 return FAIL;
7641 }
7642 /* Encode a VFP SP or DP register number into inst.instruction. */
7643
7644 static void
7645 encode_arm_vfp_reg (int reg, enum vfp_reg_pos pos)
7646 {
7647 if ((pos == VFP_REG_Dd || pos == VFP_REG_Dn || pos == VFP_REG_Dm)
7648 && reg > 15)
7649 {
7650 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
7651 {
7652 if (thumb_mode)
7653 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
7654 fpu_vfp_ext_d32);
7655 else
7656 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
7657 fpu_vfp_ext_d32);
7658 }
7659 else
7660 {
7661 first_error (_("D register out of range for selected VFP version"));
7662 return;
7663 }
7664 }
7665
7666 switch (pos)
7667 {
7668 case VFP_REG_Sd:
7669 inst.instruction |= ((reg >> 1) << 12) | ((reg & 1) << 22);
7670 break;
7671
7672 case VFP_REG_Sn:
7673 inst.instruction |= ((reg >> 1) << 16) | ((reg & 1) << 7);
7674 break;
7675
7676 case VFP_REG_Sm:
7677 inst.instruction |= ((reg >> 1) << 0) | ((reg & 1) << 5);
7678 break;
7679
7680 case VFP_REG_Dd:
7681 inst.instruction |= ((reg & 15) << 12) | ((reg >> 4) << 22);
7682 break;
7683
7684 case VFP_REG_Dn:
7685 inst.instruction |= ((reg & 15) << 16) | ((reg >> 4) << 7);
7686 break;
7687
7688 case VFP_REG_Dm:
7689 inst.instruction |= (reg & 15) | ((reg >> 4) << 5);
7690 break;
7691
7692 default:
7693 abort ();
7694 }
7695 }
7696
7697 /* Encode a <shift> in an ARM-format instruction. The immediate,
7698 if any, is handled by md_apply_fix. */
7699 static void
7700 encode_arm_shift (int i)
7701 {
7702 /* register-shifted register. */
7703 if (inst.operands[i].immisreg)
7704 {
7705 int op_index;
7706 for (op_index = 0; op_index <= i; ++op_index)
7707 {
7708 /* Check the operand only when it's presented. In pre-UAL syntax,
7709 if the destination register is the same as the first operand, two
7710 register form of the instruction can be used. */
7711 if (inst.operands[op_index].present && inst.operands[op_index].isreg
7712 && inst.operands[op_index].reg == REG_PC)
7713 as_warn (UNPRED_REG ("r15"));
7714 }
7715
7716 if (inst.operands[i].imm == REG_PC)
7717 as_warn (UNPRED_REG ("r15"));
7718 }
7719
7720 if (inst.operands[i].shift_kind == SHIFT_RRX)
7721 inst.instruction |= SHIFT_ROR << 5;
7722 else
7723 {
7724 inst.instruction |= inst.operands[i].shift_kind << 5;
7725 if (inst.operands[i].immisreg)
7726 {
7727 inst.instruction |= SHIFT_BY_REG;
7728 inst.instruction |= inst.operands[i].imm << 8;
7729 }
7730 else
7731 inst.relocs[0].type = BFD_RELOC_ARM_SHIFT_IMM;
7732 }
7733 }
7734
7735 static void
7736 encode_arm_shifter_operand (int i)
7737 {
7738 if (inst.operands[i].isreg)
7739 {
7740 inst.instruction |= inst.operands[i].reg;
7741 encode_arm_shift (i);
7742 }
7743 else
7744 {
7745 inst.instruction |= INST_IMMEDIATE;
7746 if (inst.relocs[0].type != BFD_RELOC_ARM_IMMEDIATE)
7747 inst.instruction |= inst.operands[i].imm;
7748 }
7749 }
7750
7751 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
7752 static void
7753 encode_arm_addr_mode_common (int i, bfd_boolean is_t)
7754 {
7755 /* PR 14260:
7756 Generate an error if the operand is not a register. */
7757 constraint (!inst.operands[i].isreg,
7758 _("Instruction does not support =N addresses"));
7759
7760 inst.instruction |= inst.operands[i].reg << 16;
7761
7762 if (inst.operands[i].preind)
7763 {
7764 if (is_t)
7765 {
7766 inst.error = _("instruction does not accept preindexed addressing");
7767 return;
7768 }
7769 inst.instruction |= PRE_INDEX;
7770 if (inst.operands[i].writeback)
7771 inst.instruction |= WRITE_BACK;
7772
7773 }
7774 else if (inst.operands[i].postind)
7775 {
7776 gas_assert (inst.operands[i].writeback);
7777 if (is_t)
7778 inst.instruction |= WRITE_BACK;
7779 }
7780 else /* unindexed - only for coprocessor */
7781 {
7782 inst.error = _("instruction does not accept unindexed addressing");
7783 return;
7784 }
7785
7786 if (((inst.instruction & WRITE_BACK) || !(inst.instruction & PRE_INDEX))
7787 && (((inst.instruction & 0x000f0000) >> 16)
7788 == ((inst.instruction & 0x0000f000) >> 12)))
7789 as_warn ((inst.instruction & LOAD_BIT)
7790 ? _("destination register same as write-back base")
7791 : _("source register same as write-back base"));
7792 }
7793
7794 /* inst.operands[i] was set up by parse_address. Encode it into an
7795 ARM-format mode 2 load or store instruction. If is_t is true,
7796 reject forms that cannot be used with a T instruction (i.e. not
7797 post-indexed). */
7798 static void
7799 encode_arm_addr_mode_2 (int i, bfd_boolean is_t)
7800 {
7801 const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
7802
7803 encode_arm_addr_mode_common (i, is_t);
7804
7805 if (inst.operands[i].immisreg)
7806 {
7807 constraint ((inst.operands[i].imm == REG_PC
7808 || (is_pc && inst.operands[i].writeback)),
7809 BAD_PC_ADDRESSING);
7810 inst.instruction |= INST_IMMEDIATE; /* yes, this is backwards */
7811 inst.instruction |= inst.operands[i].imm;
7812 if (!inst.operands[i].negative)
7813 inst.instruction |= INDEX_UP;
7814 if (inst.operands[i].shifted)
7815 {
7816 if (inst.operands[i].shift_kind == SHIFT_RRX)
7817 inst.instruction |= SHIFT_ROR << 5;
7818 else
7819 {
7820 inst.instruction |= inst.operands[i].shift_kind << 5;
7821 inst.relocs[0].type = BFD_RELOC_ARM_SHIFT_IMM;
7822 }
7823 }
7824 }
7825 else /* immediate offset in inst.relocs[0] */
7826 {
7827 if (is_pc && !inst.relocs[0].pc_rel)
7828 {
7829 const bfd_boolean is_load = ((inst.instruction & LOAD_BIT) != 0);
7830
7831 /* If is_t is TRUE, it's called from do_ldstt. ldrt/strt
7832 cannot use PC in addressing.
7833 PC cannot be used in writeback addressing, either. */
7834 constraint ((is_t || inst.operands[i].writeback),
7835 BAD_PC_ADDRESSING);
7836
7837 /* Use of PC in str is deprecated for ARMv7. */
7838 if (warn_on_deprecated
7839 && !is_load
7840 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7))
7841 as_tsktsk (_("use of PC in this instruction is deprecated"));
7842 }
7843
7844 if (inst.relocs[0].type == BFD_RELOC_UNUSED)
7845 {
7846 /* Prefer + for zero encoded value. */
7847 if (!inst.operands[i].negative)
7848 inst.instruction |= INDEX_UP;
7849 inst.relocs[0].type = BFD_RELOC_ARM_OFFSET_IMM;
7850 }
7851 }
7852 }
7853
7854 /* inst.operands[i] was set up by parse_address. Encode it into an
7855 ARM-format mode 3 load or store instruction. Reject forms that
7856 cannot be used with such instructions. If is_t is true, reject
7857 forms that cannot be used with a T instruction (i.e. not
7858 post-indexed). */
7859 static void
7860 encode_arm_addr_mode_3 (int i, bfd_boolean is_t)
7861 {
7862 if (inst.operands[i].immisreg && inst.operands[i].shifted)
7863 {
7864 inst.error = _("instruction does not accept scaled register index");
7865 return;
7866 }
7867
7868 encode_arm_addr_mode_common (i, is_t);
7869
7870 if (inst.operands[i].immisreg)
7871 {
7872 constraint ((inst.operands[i].imm == REG_PC
7873 || (is_t && inst.operands[i].reg == REG_PC)),
7874 BAD_PC_ADDRESSING);
7875 constraint (inst.operands[i].reg == REG_PC && inst.operands[i].writeback,
7876 BAD_PC_WRITEBACK);
7877 inst.instruction |= inst.operands[i].imm;
7878 if (!inst.operands[i].negative)
7879 inst.instruction |= INDEX_UP;
7880 }
7881 else /* immediate offset in inst.relocs[0] */
7882 {
7883 constraint ((inst.operands[i].reg == REG_PC && !inst.relocs[0].pc_rel
7884 && inst.operands[i].writeback),
7885 BAD_PC_WRITEBACK);
7886 inst.instruction |= HWOFFSET_IMM;
7887 if (inst.relocs[0].type == BFD_RELOC_UNUSED)
7888 {
7889 /* Prefer + for zero encoded value. */
7890 if (!inst.operands[i].negative)
7891 inst.instruction |= INDEX_UP;
7892
7893 inst.relocs[0].type = BFD_RELOC_ARM_OFFSET_IMM8;
7894 }
7895 }
7896 }
7897
7898 /* Write immediate bits [7:0] to the following locations:
7899
7900 |28/24|23 19|18 16|15 4|3 0|
7901 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
7902
7903 This function is used by VMOV/VMVN/VORR/VBIC. */
7904
7905 static void
7906 neon_write_immbits (unsigned immbits)
7907 {
7908 inst.instruction |= immbits & 0xf;
7909 inst.instruction |= ((immbits >> 4) & 0x7) << 16;
7910 inst.instruction |= ((immbits >> 7) & 0x1) << (thumb_mode ? 28 : 24);
7911 }
7912
7913 /* Invert low-order SIZE bits of XHI:XLO. */
7914
7915 static void
7916 neon_invert_size (unsigned *xlo, unsigned *xhi, int size)
7917 {
7918 unsigned immlo = xlo ? *xlo : 0;
7919 unsigned immhi = xhi ? *xhi : 0;
7920
7921 switch (size)
7922 {
7923 case 8:
7924 immlo = (~immlo) & 0xff;
7925 break;
7926
7927 case 16:
7928 immlo = (~immlo) & 0xffff;
7929 break;
7930
7931 case 64:
7932 immhi = (~immhi) & 0xffffffff;
7933 /* fall through. */
7934
7935 case 32:
7936 immlo = (~immlo) & 0xffffffff;
7937 break;
7938
7939 default:
7940 abort ();
7941 }
7942
7943 if (xlo)
7944 *xlo = immlo;
7945
7946 if (xhi)
7947 *xhi = immhi;
7948 }
7949
7950 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
7951 A, B, C, D. */
7952
7953 static int
7954 neon_bits_same_in_bytes (unsigned imm)
7955 {
7956 return ((imm & 0x000000ff) == 0 || (imm & 0x000000ff) == 0x000000ff)
7957 && ((imm & 0x0000ff00) == 0 || (imm & 0x0000ff00) == 0x0000ff00)
7958 && ((imm & 0x00ff0000) == 0 || (imm & 0x00ff0000) == 0x00ff0000)
7959 && ((imm & 0xff000000) == 0 || (imm & 0xff000000) == 0xff000000);
7960 }
7961
7962 /* For immediate of above form, return 0bABCD. */
7963
7964 static unsigned
7965 neon_squash_bits (unsigned imm)
7966 {
7967 return (imm & 0x01) | ((imm & 0x0100) >> 7) | ((imm & 0x010000) >> 14)
7968 | ((imm & 0x01000000) >> 21);
7969 }
7970
7971 /* Compress quarter-float representation to 0b...000 abcdefgh. */
7972
7973 static unsigned
7974 neon_qfloat_bits (unsigned imm)
7975 {
7976 return ((imm >> 19) & 0x7f) | ((imm >> 24) & 0x80);
7977 }
7978
7979 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
7980 the instruction. *OP is passed as the initial value of the op field, and
7981 may be set to a different value depending on the constant (i.e.
7982 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
7983 MVN). If the immediate looks like a repeated pattern then also
7984 try smaller element sizes. */
7985
7986 static int
7987 neon_cmode_for_move_imm (unsigned immlo, unsigned immhi, int float_p,
7988 unsigned *immbits, int *op, int size,
7989 enum neon_el_type type)
7990 {
7991 /* Only permit float immediates (including 0.0/-0.0) if the operand type is
7992 float. */
7993 if (type == NT_float && !float_p)
7994 return FAIL;
7995
7996 if (type == NT_float && is_quarter_float (immlo) && immhi == 0)
7997 {
7998 if (size != 32 || *op == 1)
7999 return FAIL;
8000 *immbits = neon_qfloat_bits (immlo);
8001 return 0xf;
8002 }
8003
8004 if (size == 64)
8005 {
8006 if (neon_bits_same_in_bytes (immhi)
8007 && neon_bits_same_in_bytes (immlo))
8008 {
8009 if (*op == 1)
8010 return FAIL;
8011 *immbits = (neon_squash_bits (immhi) << 4)
8012 | neon_squash_bits (immlo);
8013 *op = 1;
8014 return 0xe;
8015 }
8016
8017 if (immhi != immlo)
8018 return FAIL;
8019 }
8020
8021 if (size >= 32)
8022 {
8023 if (immlo == (immlo & 0x000000ff))
8024 {
8025 *immbits = immlo;
8026 return 0x0;
8027 }
8028 else if (immlo == (immlo & 0x0000ff00))
8029 {
8030 *immbits = immlo >> 8;
8031 return 0x2;
8032 }
8033 else if (immlo == (immlo & 0x00ff0000))
8034 {
8035 *immbits = immlo >> 16;
8036 return 0x4;
8037 }
8038 else if (immlo == (immlo & 0xff000000))
8039 {
8040 *immbits = immlo >> 24;
8041 return 0x6;
8042 }
8043 else if (immlo == ((immlo & 0x0000ff00) | 0x000000ff))
8044 {
8045 *immbits = (immlo >> 8) & 0xff;
8046 return 0xc;
8047 }
8048 else if (immlo == ((immlo & 0x00ff0000) | 0x0000ffff))
8049 {
8050 *immbits = (immlo >> 16) & 0xff;
8051 return 0xd;
8052 }
8053
8054 if ((immlo & 0xffff) != (immlo >> 16))
8055 return FAIL;
8056 immlo &= 0xffff;
8057 }
8058
8059 if (size >= 16)
8060 {
8061 if (immlo == (immlo & 0x000000ff))
8062 {
8063 *immbits = immlo;
8064 return 0x8;
8065 }
8066 else if (immlo == (immlo & 0x0000ff00))
8067 {
8068 *immbits = immlo >> 8;
8069 return 0xa;
8070 }
8071
8072 if ((immlo & 0xff) != (immlo >> 8))
8073 return FAIL;
8074 immlo &= 0xff;
8075 }
8076
8077 if (immlo == (immlo & 0x000000ff))
8078 {
8079 /* Don't allow MVN with 8-bit immediate. */
8080 if (*op == 1)
8081 return FAIL;
8082 *immbits = immlo;
8083 return 0xe;
8084 }
8085
8086 return FAIL;
8087 }
8088
8089 #if defined BFD_HOST_64_BIT
8090 /* Returns TRUE if double precision value V may be cast
8091 to single precision without loss of accuracy. */
8092
8093 static bfd_boolean
8094 is_double_a_single (bfd_int64_t v)
8095 {
8096 int exp = (int)((v >> 52) & 0x7FF);
8097 bfd_int64_t mantissa = (v & (bfd_int64_t)0xFFFFFFFFFFFFFULL);
8098
8099 return (exp == 0 || exp == 0x7FF
8100 || (exp >= 1023 - 126 && exp <= 1023 + 127))
8101 && (mantissa & 0x1FFFFFFFl) == 0;
8102 }
8103
8104 /* Returns a double precision value casted to single precision
8105 (ignoring the least significant bits in exponent and mantissa). */
8106
8107 static int
8108 double_to_single (bfd_int64_t v)
8109 {
8110 int sign = (int) ((v >> 63) & 1l);
8111 int exp = (int) ((v >> 52) & 0x7FF);
8112 bfd_int64_t mantissa = (v & (bfd_int64_t)0xFFFFFFFFFFFFFULL);
8113
8114 if (exp == 0x7FF)
8115 exp = 0xFF;
8116 else
8117 {
8118 exp = exp - 1023 + 127;
8119 if (exp >= 0xFF)
8120 {
8121 /* Infinity. */
8122 exp = 0x7F;
8123 mantissa = 0;
8124 }
8125 else if (exp < 0)
8126 {
8127 /* No denormalized numbers. */
8128 exp = 0;
8129 mantissa = 0;
8130 }
8131 }
8132 mantissa >>= 29;
8133 return (sign << 31) | (exp << 23) | mantissa;
8134 }
8135 #endif /* BFD_HOST_64_BIT */
8136
8137 enum lit_type
8138 {
8139 CONST_THUMB,
8140 CONST_ARM,
8141 CONST_VEC
8142 };
8143
8144 static void do_vfp_nsyn_opcode (const char *);
8145
8146 /* inst.relocs[0].exp describes an "=expr" load pseudo-operation.
8147 Determine whether it can be performed with a move instruction; if
8148 it can, convert inst.instruction to that move instruction and
8149 return TRUE; if it can't, convert inst.instruction to a literal-pool
8150 load and return FALSE. If this is not a valid thing to do in the
8151 current context, set inst.error and return TRUE.
8152
8153 inst.operands[i] describes the destination register. */
8154
8155 static bfd_boolean
8156 move_or_literal_pool (int i, enum lit_type t, bfd_boolean mode_3)
8157 {
8158 unsigned long tbit;
8159 bfd_boolean thumb_p = (t == CONST_THUMB);
8160 bfd_boolean arm_p = (t == CONST_ARM);
8161
8162 if (thumb_p)
8163 tbit = (inst.instruction > 0xffff) ? THUMB2_LOAD_BIT : THUMB_LOAD_BIT;
8164 else
8165 tbit = LOAD_BIT;
8166
8167 if ((inst.instruction & tbit) == 0)
8168 {
8169 inst.error = _("invalid pseudo operation");
8170 return TRUE;
8171 }
8172
8173 if (inst.relocs[0].exp.X_op != O_constant
8174 && inst.relocs[0].exp.X_op != O_symbol
8175 && inst.relocs[0].exp.X_op != O_big)
8176 {
8177 inst.error = _("constant expression expected");
8178 return TRUE;
8179 }
8180
8181 if (inst.relocs[0].exp.X_op == O_constant
8182 || inst.relocs[0].exp.X_op == O_big)
8183 {
8184 #if defined BFD_HOST_64_BIT
8185 bfd_int64_t v;
8186 #else
8187 offsetT v;
8188 #endif
8189 if (inst.relocs[0].exp.X_op == O_big)
8190 {
8191 LITTLENUM_TYPE w[X_PRECISION];
8192 LITTLENUM_TYPE * l;
8193
8194 if (inst.relocs[0].exp.X_add_number == -1)
8195 {
8196 gen_to_words (w, X_PRECISION, E_PRECISION);
8197 l = w;
8198 /* FIXME: Should we check words w[2..5] ? */
8199 }
8200 else
8201 l = generic_bignum;
8202
8203 #if defined BFD_HOST_64_BIT
8204 v =
8205 ((((((((bfd_int64_t) l[3] & LITTLENUM_MASK)
8206 << LITTLENUM_NUMBER_OF_BITS)
8207 | ((bfd_int64_t) l[2] & LITTLENUM_MASK))
8208 << LITTLENUM_NUMBER_OF_BITS)
8209 | ((bfd_int64_t) l[1] & LITTLENUM_MASK))
8210 << LITTLENUM_NUMBER_OF_BITS)
8211 | ((bfd_int64_t) l[0] & LITTLENUM_MASK));
8212 #else
8213 v = ((l[1] & LITTLENUM_MASK) << LITTLENUM_NUMBER_OF_BITS)
8214 | (l[0] & LITTLENUM_MASK);
8215 #endif
8216 }
8217 else
8218 v = inst.relocs[0].exp.X_add_number;
8219
8220 if (!inst.operands[i].issingle)
8221 {
8222 if (thumb_p)
8223 {
8224 /* LDR should not use lead in a flag-setting instruction being
8225 chosen so we do not check whether movs can be used. */
8226
8227 if ((ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2)
8228 || ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m))
8229 && inst.operands[i].reg != 13
8230 && inst.operands[i].reg != 15)
8231 {
8232 /* Check if on thumb2 it can be done with a mov.w, mvn or
8233 movw instruction. */
8234 unsigned int newimm;
8235 bfd_boolean isNegated;
8236
8237 newimm = encode_thumb32_immediate (v);
8238 if (newimm != (unsigned int) FAIL)
8239 isNegated = FALSE;
8240 else
8241 {
8242 newimm = encode_thumb32_immediate (~v);
8243 if (newimm != (unsigned int) FAIL)
8244 isNegated = TRUE;
8245 }
8246
8247 /* The number can be loaded with a mov.w or mvn
8248 instruction. */
8249 if (newimm != (unsigned int) FAIL
8250 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2))
8251 {
8252 inst.instruction = (0xf04f0000 /* MOV.W. */
8253 | (inst.operands[i].reg << 8));
8254 /* Change to MOVN. */
8255 inst.instruction |= (isNegated ? 0x200000 : 0);
8256 inst.instruction |= (newimm & 0x800) << 15;
8257 inst.instruction |= (newimm & 0x700) << 4;
8258 inst.instruction |= (newimm & 0x0ff);
8259 return TRUE;
8260 }
8261 /* The number can be loaded with a movw instruction. */
8262 else if ((v & ~0xFFFF) == 0
8263 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m))
8264 {
8265 int imm = v & 0xFFFF;
8266
8267 inst.instruction = 0xf2400000; /* MOVW. */
8268 inst.instruction |= (inst.operands[i].reg << 8);
8269 inst.instruction |= (imm & 0xf000) << 4;
8270 inst.instruction |= (imm & 0x0800) << 15;
8271 inst.instruction |= (imm & 0x0700) << 4;
8272 inst.instruction |= (imm & 0x00ff);
8273 return TRUE;
8274 }
8275 }
8276 }
8277 else if (arm_p)
8278 {
8279 int value = encode_arm_immediate (v);
8280
8281 if (value != FAIL)
8282 {
8283 /* This can be done with a mov instruction. */
8284 inst.instruction &= LITERAL_MASK;
8285 inst.instruction |= INST_IMMEDIATE | (OPCODE_MOV << DATA_OP_SHIFT);
8286 inst.instruction |= value & 0xfff;
8287 return TRUE;
8288 }
8289
8290 value = encode_arm_immediate (~ v);
8291 if (value != FAIL)
8292 {
8293 /* This can be done with a mvn instruction. */
8294 inst.instruction &= LITERAL_MASK;
8295 inst.instruction |= INST_IMMEDIATE | (OPCODE_MVN << DATA_OP_SHIFT);
8296 inst.instruction |= value & 0xfff;
8297 return TRUE;
8298 }
8299 }
8300 else if (t == CONST_VEC && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1))
8301 {
8302 int op = 0;
8303 unsigned immbits = 0;
8304 unsigned immlo = inst.operands[1].imm;
8305 unsigned immhi = inst.operands[1].regisimm
8306 ? inst.operands[1].reg
8307 : inst.relocs[0].exp.X_unsigned
8308 ? 0
8309 : ((bfd_int64_t)((int) immlo)) >> 32;
8310 int cmode = neon_cmode_for_move_imm (immlo, immhi, FALSE, &immbits,
8311 &op, 64, NT_invtype);
8312
8313 if (cmode == FAIL)
8314 {
8315 neon_invert_size (&immlo, &immhi, 64);
8316 op = !op;
8317 cmode = neon_cmode_for_move_imm (immlo, immhi, FALSE, &immbits,
8318 &op, 64, NT_invtype);
8319 }
8320
8321 if (cmode != FAIL)
8322 {
8323 inst.instruction = (inst.instruction & VLDR_VMOV_SAME)
8324 | (1 << 23)
8325 | (cmode << 8)
8326 | (op << 5)
8327 | (1 << 4);
8328
8329 /* Fill other bits in vmov encoding for both thumb and arm. */
8330 if (thumb_mode)
8331 inst.instruction |= (0x7U << 29) | (0xF << 24);
8332 else
8333 inst.instruction |= (0xFU << 28) | (0x1 << 25);
8334 neon_write_immbits (immbits);
8335 return TRUE;
8336 }
8337 }
8338 }
8339
8340 if (t == CONST_VEC)
8341 {
8342 /* Check if vldr Rx, =constant could be optimized to vmov Rx, #constant. */
8343 if (inst.operands[i].issingle
8344 && is_quarter_float (inst.operands[1].imm)
8345 && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3xd))
8346 {
8347 inst.operands[1].imm =
8348 neon_qfloat_bits (v);
8349 do_vfp_nsyn_opcode ("fconsts");
8350 return TRUE;
8351 }
8352
8353 /* If our host does not support a 64-bit type then we cannot perform
8354 the following optimization. This mean that there will be a
8355 discrepancy between the output produced by an assembler built for
8356 a 32-bit-only host and the output produced from a 64-bit host, but
8357 this cannot be helped. */
8358 #if defined BFD_HOST_64_BIT
8359 else if (!inst.operands[1].issingle
8360 && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3))
8361 {
8362 if (is_double_a_single (v)
8363 && is_quarter_float (double_to_single (v)))
8364 {
8365 inst.operands[1].imm =
8366 neon_qfloat_bits (double_to_single (v));
8367 do_vfp_nsyn_opcode ("fconstd");
8368 return TRUE;
8369 }
8370 }
8371 #endif
8372 }
8373 }
8374
8375 if (add_to_lit_pool ((!inst.operands[i].isvec
8376 || inst.operands[i].issingle) ? 4 : 8) == FAIL)
8377 return TRUE;
8378
8379 inst.operands[1].reg = REG_PC;
8380 inst.operands[1].isreg = 1;
8381 inst.operands[1].preind = 1;
8382 inst.relocs[0].pc_rel = 1;
8383 inst.relocs[0].type = (thumb_p
8384 ? BFD_RELOC_ARM_THUMB_OFFSET
8385 : (mode_3
8386 ? BFD_RELOC_ARM_HWLITERAL
8387 : BFD_RELOC_ARM_LITERAL));
8388 return FALSE;
8389 }
8390
8391 /* inst.operands[i] was set up by parse_address. Encode it into an
8392 ARM-format instruction. Reject all forms which cannot be encoded
8393 into a coprocessor load/store instruction. If wb_ok is false,
8394 reject use of writeback; if unind_ok is false, reject use of
8395 unindexed addressing. If reloc_override is not 0, use it instead
8396 of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
8397 (in which case it is preserved). */
8398
8399 static int
8400 encode_arm_cp_address (int i, int wb_ok, int unind_ok, int reloc_override)
8401 {
8402 if (!inst.operands[i].isreg)
8403 {
8404 /* PR 18256 */
8405 if (! inst.operands[0].isvec)
8406 {
8407 inst.error = _("invalid co-processor operand");
8408 return FAIL;
8409 }
8410 if (move_or_literal_pool (0, CONST_VEC, /*mode_3=*/FALSE))
8411 return SUCCESS;
8412 }
8413
8414 inst.instruction |= inst.operands[i].reg << 16;
8415
8416 gas_assert (!(inst.operands[i].preind && inst.operands[i].postind));
8417
8418 if (!inst.operands[i].preind && !inst.operands[i].postind) /* unindexed */
8419 {
8420 gas_assert (!inst.operands[i].writeback);
8421 if (!unind_ok)
8422 {
8423 inst.error = _("instruction does not support unindexed addressing");
8424 return FAIL;
8425 }
8426 inst.instruction |= inst.operands[i].imm;
8427 inst.instruction |= INDEX_UP;
8428 return SUCCESS;
8429 }
8430
8431 if (inst.operands[i].preind)
8432 inst.instruction |= PRE_INDEX;
8433
8434 if (inst.operands[i].writeback)
8435 {
8436 if (inst.operands[i].reg == REG_PC)
8437 {
8438 inst.error = _("pc may not be used with write-back");
8439 return FAIL;
8440 }
8441 if (!wb_ok)
8442 {
8443 inst.error = _("instruction does not support writeback");
8444 return FAIL;
8445 }
8446 inst.instruction |= WRITE_BACK;
8447 }
8448
8449 if (reloc_override)
8450 inst.relocs[0].type = (bfd_reloc_code_real_type) reloc_override;
8451 else if ((inst.relocs[0].type < BFD_RELOC_ARM_ALU_PC_G0_NC
8452 || inst.relocs[0].type > BFD_RELOC_ARM_LDC_SB_G2)
8453 && inst.relocs[0].type != BFD_RELOC_ARM_LDR_PC_G0)
8454 {
8455 if (thumb_mode)
8456 inst.relocs[0].type = BFD_RELOC_ARM_T32_CP_OFF_IMM;
8457 else
8458 inst.relocs[0].type = BFD_RELOC_ARM_CP_OFF_IMM;
8459 }
8460
8461 /* Prefer + for zero encoded value. */
8462 if (!inst.operands[i].negative)
8463 inst.instruction |= INDEX_UP;
8464
8465 return SUCCESS;
8466 }
8467
8468 /* Functions for instruction encoding, sorted by sub-architecture.
8469 First some generics; their names are taken from the conventional
8470 bit positions for register arguments in ARM format instructions. */
8471
8472 static void
8473 do_noargs (void)
8474 {
8475 }
8476
8477 static void
8478 do_rd (void)
8479 {
8480 inst.instruction |= inst.operands[0].reg << 12;
8481 }
8482
8483 static void
8484 do_rn (void)
8485 {
8486 inst.instruction |= inst.operands[0].reg << 16;
8487 }
8488
8489 static void
8490 do_rd_rm (void)
8491 {
8492 inst.instruction |= inst.operands[0].reg << 12;
8493 inst.instruction |= inst.operands[1].reg;
8494 }
8495
8496 static void
8497 do_rm_rn (void)
8498 {
8499 inst.instruction |= inst.operands[0].reg;
8500 inst.instruction |= inst.operands[1].reg << 16;
8501 }
8502
8503 static void
8504 do_rd_rn (void)
8505 {
8506 inst.instruction |= inst.operands[0].reg << 12;
8507 inst.instruction |= inst.operands[1].reg << 16;
8508 }
8509
8510 static void
8511 do_rn_rd (void)
8512 {
8513 inst.instruction |= inst.operands[0].reg << 16;
8514 inst.instruction |= inst.operands[1].reg << 12;
8515 }
8516
8517 static void
8518 do_tt (void)
8519 {
8520 inst.instruction |= inst.operands[0].reg << 8;
8521 inst.instruction |= inst.operands[1].reg << 16;
8522 }
8523
8524 static bfd_boolean
8525 check_obsolete (const arm_feature_set *feature, const char *msg)
8526 {
8527 if (ARM_CPU_IS_ANY (cpu_variant))
8528 {
8529 as_tsktsk ("%s", msg);
8530 return TRUE;
8531 }
8532 else if (ARM_CPU_HAS_FEATURE (cpu_variant, *feature))
8533 {
8534 as_bad ("%s", msg);
8535 return TRUE;
8536 }
8537
8538 return FALSE;
8539 }
8540
8541 static void
8542 do_rd_rm_rn (void)
8543 {
8544 unsigned Rn = inst.operands[2].reg;
8545 /* Enforce restrictions on SWP instruction. */
8546 if ((inst.instruction & 0x0fbfffff) == 0x01000090)
8547 {
8548 constraint (Rn == inst.operands[0].reg || Rn == inst.operands[1].reg,
8549 _("Rn must not overlap other operands"));
8550
8551 /* SWP{b} is obsolete for ARMv8-A, and deprecated for ARMv6* and ARMv7.
8552 */
8553 if (!check_obsolete (&arm_ext_v8,
8554 _("swp{b} use is obsoleted for ARMv8 and later"))
8555 && warn_on_deprecated
8556 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6))
8557 as_tsktsk (_("swp{b} use is deprecated for ARMv6 and ARMv7"));
8558 }
8559
8560 inst.instruction |= inst.operands[0].reg << 12;
8561 inst.instruction |= inst.operands[1].reg;
8562 inst.instruction |= Rn << 16;
8563 }
8564
8565 static void
8566 do_rd_rn_rm (void)
8567 {
8568 inst.instruction |= inst.operands[0].reg << 12;
8569 inst.instruction |= inst.operands[1].reg << 16;
8570 inst.instruction |= inst.operands[2].reg;
8571 }
8572
8573 static void
8574 do_rm_rd_rn (void)
8575 {
8576 constraint ((inst.operands[2].reg == REG_PC), BAD_PC);
8577 constraint (((inst.relocs[0].exp.X_op != O_constant
8578 && inst.relocs[0].exp.X_op != O_illegal)
8579 || inst.relocs[0].exp.X_add_number != 0),
8580 BAD_ADDR_MODE);
8581 inst.instruction |= inst.operands[0].reg;
8582 inst.instruction |= inst.operands[1].reg << 12;
8583 inst.instruction |= inst.operands[2].reg << 16;
8584 }
8585
8586 static void
8587 do_imm0 (void)
8588 {
8589 inst.instruction |= inst.operands[0].imm;
8590 }
8591
8592 static void
8593 do_rd_cpaddr (void)
8594 {
8595 inst.instruction |= inst.operands[0].reg << 12;
8596 encode_arm_cp_address (1, TRUE, TRUE, 0);
8597 }
8598
8599 /* ARM instructions, in alphabetical order by function name (except
8600 that wrapper functions appear immediately after the function they
8601 wrap). */
8602
8603 /* This is a pseudo-op of the form "adr rd, label" to be converted
8604 into a relative address of the form "add rd, pc, #label-.-8". */
8605
8606 static void
8607 do_adr (void)
8608 {
8609 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
8610
8611 /* Frag hacking will turn this into a sub instruction if the offset turns
8612 out to be negative. */
8613 inst.relocs[0].type = BFD_RELOC_ARM_IMMEDIATE;
8614 inst.relocs[0].pc_rel = 1;
8615 inst.relocs[0].exp.X_add_number -= 8;
8616
8617 if (support_interwork
8618 && inst.relocs[0].exp.X_op == O_symbol
8619 && inst.relocs[0].exp.X_add_symbol != NULL
8620 && S_IS_DEFINED (inst.relocs[0].exp.X_add_symbol)
8621 && THUMB_IS_FUNC (inst.relocs[0].exp.X_add_symbol))
8622 inst.relocs[0].exp.X_add_number |= 1;
8623 }
8624
8625 /* This is a pseudo-op of the form "adrl rd, label" to be converted
8626 into a relative address of the form:
8627 add rd, pc, #low(label-.-8)"
8628 add rd, rd, #high(label-.-8)" */
8629
8630 static void
8631 do_adrl (void)
8632 {
8633 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
8634
8635 /* Frag hacking will turn this into a sub instruction if the offset turns
8636 out to be negative. */
8637 inst.relocs[0].type = BFD_RELOC_ARM_ADRL_IMMEDIATE;
8638 inst.relocs[0].pc_rel = 1;
8639 inst.size = INSN_SIZE * 2;
8640 inst.relocs[0].exp.X_add_number -= 8;
8641
8642 if (support_interwork
8643 && inst.relocs[0].exp.X_op == O_symbol
8644 && inst.relocs[0].exp.X_add_symbol != NULL
8645 && S_IS_DEFINED (inst.relocs[0].exp.X_add_symbol)
8646 && THUMB_IS_FUNC (inst.relocs[0].exp.X_add_symbol))
8647 inst.relocs[0].exp.X_add_number |= 1;
8648 }
8649
8650 static void
8651 do_arit (void)
8652 {
8653 constraint (inst.relocs[0].type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
8654 && inst.relocs[0].type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC ,
8655 THUMB1_RELOC_ONLY);
8656 if (!inst.operands[1].present)
8657 inst.operands[1].reg = inst.operands[0].reg;
8658 inst.instruction |= inst.operands[0].reg << 12;
8659 inst.instruction |= inst.operands[1].reg << 16;
8660 encode_arm_shifter_operand (2);
8661 }
8662
8663 static void
8664 do_barrier (void)
8665 {
8666 if (inst.operands[0].present)
8667 inst.instruction |= inst.operands[0].imm;
8668 else
8669 inst.instruction |= 0xf;
8670 }
8671
8672 static void
8673 do_bfc (void)
8674 {
8675 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
8676 constraint (msb > 32, _("bit-field extends past end of register"));
8677 /* The instruction encoding stores the LSB and MSB,
8678 not the LSB and width. */
8679 inst.instruction |= inst.operands[0].reg << 12;
8680 inst.instruction |= inst.operands[1].imm << 7;
8681 inst.instruction |= (msb - 1) << 16;
8682 }
8683
8684 static void
8685 do_bfi (void)
8686 {
8687 unsigned int msb;
8688
8689 /* #0 in second position is alternative syntax for bfc, which is
8690 the same instruction but with REG_PC in the Rm field. */
8691 if (!inst.operands[1].isreg)
8692 inst.operands[1].reg = REG_PC;
8693
8694 msb = inst.operands[2].imm + inst.operands[3].imm;
8695 constraint (msb > 32, _("bit-field extends past end of register"));
8696 /* The instruction encoding stores the LSB and MSB,
8697 not the LSB and width. */
8698 inst.instruction |= inst.operands[0].reg << 12;
8699 inst.instruction |= inst.operands[1].reg;
8700 inst.instruction |= inst.operands[2].imm << 7;
8701 inst.instruction |= (msb - 1) << 16;
8702 }
8703
8704 static void
8705 do_bfx (void)
8706 {
8707 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
8708 _("bit-field extends past end of register"));
8709 inst.instruction |= inst.operands[0].reg << 12;
8710 inst.instruction |= inst.operands[1].reg;
8711 inst.instruction |= inst.operands[2].imm << 7;
8712 inst.instruction |= (inst.operands[3].imm - 1) << 16;
8713 }
8714
8715 /* ARM V5 breakpoint instruction (argument parse)
8716 BKPT <16 bit unsigned immediate>
8717 Instruction is not conditional.
8718 The bit pattern given in insns[] has the COND_ALWAYS condition,
8719 and it is an error if the caller tried to override that. */
8720
8721 static void
8722 do_bkpt (void)
8723 {
8724 /* Top 12 of 16 bits to bits 19:8. */
8725 inst.instruction |= (inst.operands[0].imm & 0xfff0) << 4;
8726
8727 /* Bottom 4 of 16 bits to bits 3:0. */
8728 inst.instruction |= inst.operands[0].imm & 0xf;
8729 }
8730
8731 static void
8732 encode_branch (int default_reloc)
8733 {
8734 if (inst.operands[0].hasreloc)
8735 {
8736 constraint (inst.operands[0].imm != BFD_RELOC_ARM_PLT32
8737 && inst.operands[0].imm != BFD_RELOC_ARM_TLS_CALL,
8738 _("the only valid suffixes here are '(plt)' and '(tlscall)'"));
8739 inst.relocs[0].type = inst.operands[0].imm == BFD_RELOC_ARM_PLT32
8740 ? BFD_RELOC_ARM_PLT32
8741 : thumb_mode ? BFD_RELOC_ARM_THM_TLS_CALL : BFD_RELOC_ARM_TLS_CALL;
8742 }
8743 else
8744 inst.relocs[0].type = (bfd_reloc_code_real_type) default_reloc;
8745 inst.relocs[0].pc_rel = 1;
8746 }
8747
8748 static void
8749 do_branch (void)
8750 {
8751 #ifdef OBJ_ELF
8752 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
8753 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
8754 else
8755 #endif
8756 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
8757 }
8758
8759 static void
8760 do_bl (void)
8761 {
8762 #ifdef OBJ_ELF
8763 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
8764 {
8765 if (inst.cond == COND_ALWAYS)
8766 encode_branch (BFD_RELOC_ARM_PCREL_CALL);
8767 else
8768 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
8769 }
8770 else
8771 #endif
8772 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
8773 }
8774
8775 /* ARM V5 branch-link-exchange instruction (argument parse)
8776 BLX <target_addr> ie BLX(1)
8777 BLX{<condition>} <Rm> ie BLX(2)
8778 Unfortunately, there are two different opcodes for this mnemonic.
8779 So, the insns[].value is not used, and the code here zaps values
8780 into inst.instruction.
8781 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
8782
8783 static void
8784 do_blx (void)
8785 {
8786 if (inst.operands[0].isreg)
8787 {
8788 /* Arg is a register; the opcode provided by insns[] is correct.
8789 It is not illegal to do "blx pc", just useless. */
8790 if (inst.operands[0].reg == REG_PC)
8791 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
8792
8793 inst.instruction |= inst.operands[0].reg;
8794 }
8795 else
8796 {
8797 /* Arg is an address; this instruction cannot be executed
8798 conditionally, and the opcode must be adjusted.
8799 We retain the BFD_RELOC_ARM_PCREL_BLX till the very end
8800 where we generate out a BFD_RELOC_ARM_PCREL_CALL instead. */
8801 constraint (inst.cond != COND_ALWAYS, BAD_COND);
8802 inst.instruction = 0xfa000000;
8803 encode_branch (BFD_RELOC_ARM_PCREL_BLX);
8804 }
8805 }
8806
8807 static void
8808 do_bx (void)
8809 {
8810 bfd_boolean want_reloc;
8811
8812 if (inst.operands[0].reg == REG_PC)
8813 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
8814
8815 inst.instruction |= inst.operands[0].reg;
8816 /* Output R_ARM_V4BX relocations if is an EABI object that looks like
8817 it is for ARMv4t or earlier. */
8818 want_reloc = !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5);
8819 if (!ARM_FEATURE_ZERO (selected_object_arch)
8820 && !ARM_CPU_HAS_FEATURE (selected_object_arch, arm_ext_v5))
8821 want_reloc = TRUE;
8822
8823 #ifdef OBJ_ELF
8824 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
8825 #endif
8826 want_reloc = FALSE;
8827
8828 if (want_reloc)
8829 inst.relocs[0].type = BFD_RELOC_ARM_V4BX;
8830 }
8831
8832
8833 /* ARM v5TEJ. Jump to Jazelle code. */
8834
8835 static void
8836 do_bxj (void)
8837 {
8838 if (inst.operands[0].reg == REG_PC)
8839 as_tsktsk (_("use of r15 in bxj is not really useful"));
8840
8841 inst.instruction |= inst.operands[0].reg;
8842 }
8843
8844 /* Co-processor data operation:
8845 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
8846 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
8847 static void
8848 do_cdp (void)
8849 {
8850 inst.instruction |= inst.operands[0].reg << 8;
8851 inst.instruction |= inst.operands[1].imm << 20;
8852 inst.instruction |= inst.operands[2].reg << 12;
8853 inst.instruction |= inst.operands[3].reg << 16;
8854 inst.instruction |= inst.operands[4].reg;
8855 inst.instruction |= inst.operands[5].imm << 5;
8856 }
8857
8858 static void
8859 do_cmp (void)
8860 {
8861 inst.instruction |= inst.operands[0].reg << 16;
8862 encode_arm_shifter_operand (1);
8863 }
8864
8865 /* Transfer between coprocessor and ARM registers.
8866 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
8867 MRC2
8868 MCR{cond}
8869 MCR2
8870
8871 No special properties. */
8872
8873 struct deprecated_coproc_regs_s
8874 {
8875 unsigned cp;
8876 int opc1;
8877 unsigned crn;
8878 unsigned crm;
8879 int opc2;
8880 arm_feature_set deprecated;
8881 arm_feature_set obsoleted;
8882 const char *dep_msg;
8883 const char *obs_msg;
8884 };
8885
8886 #define DEPR_ACCESS_V8 \
8887 N_("This coprocessor register access is deprecated in ARMv8")
8888
8889 /* Table of all deprecated coprocessor registers. */
8890 static struct deprecated_coproc_regs_s deprecated_coproc_regs[] =
8891 {
8892 {15, 0, 7, 10, 5, /* CP15DMB. */
8893 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8894 DEPR_ACCESS_V8, NULL},
8895 {15, 0, 7, 10, 4, /* CP15DSB. */
8896 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8897 DEPR_ACCESS_V8, NULL},
8898 {15, 0, 7, 5, 4, /* CP15ISB. */
8899 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8900 DEPR_ACCESS_V8, NULL},
8901 {14, 6, 1, 0, 0, /* TEEHBR. */
8902 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8903 DEPR_ACCESS_V8, NULL},
8904 {14, 6, 0, 0, 0, /* TEECR. */
8905 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8906 DEPR_ACCESS_V8, NULL},
8907 };
8908
8909 #undef DEPR_ACCESS_V8
8910
8911 static const size_t deprecated_coproc_reg_count =
8912 sizeof (deprecated_coproc_regs) / sizeof (deprecated_coproc_regs[0]);
8913
8914 static void
8915 do_co_reg (void)
8916 {
8917 unsigned Rd;
8918 size_t i;
8919
8920 Rd = inst.operands[2].reg;
8921 if (thumb_mode)
8922 {
8923 if (inst.instruction == 0xee000010
8924 || inst.instruction == 0xfe000010)
8925 /* MCR, MCR2 */
8926 reject_bad_reg (Rd);
8927 else if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
8928 /* MRC, MRC2 */
8929 constraint (Rd == REG_SP, BAD_SP);
8930 }
8931 else
8932 {
8933 /* MCR */
8934 if (inst.instruction == 0xe000010)
8935 constraint (Rd == REG_PC, BAD_PC);
8936 }
8937
8938 for (i = 0; i < deprecated_coproc_reg_count; ++i)
8939 {
8940 const struct deprecated_coproc_regs_s *r =
8941 deprecated_coproc_regs + i;
8942
8943 if (inst.operands[0].reg == r->cp
8944 && inst.operands[1].imm == r->opc1
8945 && inst.operands[3].reg == r->crn
8946 && inst.operands[4].reg == r->crm
8947 && inst.operands[5].imm == r->opc2)
8948 {
8949 if (! ARM_CPU_IS_ANY (cpu_variant)
8950 && warn_on_deprecated
8951 && ARM_CPU_HAS_FEATURE (cpu_variant, r->deprecated))
8952 as_tsktsk ("%s", r->dep_msg);
8953 }
8954 }
8955
8956 inst.instruction |= inst.operands[0].reg << 8;
8957 inst.instruction |= inst.operands[1].imm << 21;
8958 inst.instruction |= Rd << 12;
8959 inst.instruction |= inst.operands[3].reg << 16;
8960 inst.instruction |= inst.operands[4].reg;
8961 inst.instruction |= inst.operands[5].imm << 5;
8962 }
8963
8964 /* Transfer between coprocessor register and pair of ARM registers.
8965 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
8966 MCRR2
8967 MRRC{cond}
8968 MRRC2
8969
8970 Two XScale instructions are special cases of these:
8971
8972 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
8973 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
8974
8975 Result unpredictable if Rd or Rn is R15. */
8976
8977 static void
8978 do_co_reg2c (void)
8979 {
8980 unsigned Rd, Rn;
8981
8982 Rd = inst.operands[2].reg;
8983 Rn = inst.operands[3].reg;
8984
8985 if (thumb_mode)
8986 {
8987 reject_bad_reg (Rd);
8988 reject_bad_reg (Rn);
8989 }
8990 else
8991 {
8992 constraint (Rd == REG_PC, BAD_PC);
8993 constraint (Rn == REG_PC, BAD_PC);
8994 }
8995
8996 /* Only check the MRRC{2} variants. */
8997 if ((inst.instruction & 0x0FF00000) == 0x0C500000)
8998 {
8999 /* If Rd == Rn, error that the operation is
9000 unpredictable (example MRRC p3,#1,r1,r1,c4). */
9001 constraint (Rd == Rn, BAD_OVERLAP);
9002 }
9003
9004 inst.instruction |= inst.operands[0].reg << 8;
9005 inst.instruction |= inst.operands[1].imm << 4;
9006 inst.instruction |= Rd << 12;
9007 inst.instruction |= Rn << 16;
9008 inst.instruction |= inst.operands[4].reg;
9009 }
9010
9011 static void
9012 do_cpsi (void)
9013 {
9014 inst.instruction |= inst.operands[0].imm << 6;
9015 if (inst.operands[1].present)
9016 {
9017 inst.instruction |= CPSI_MMOD;
9018 inst.instruction |= inst.operands[1].imm;
9019 }
9020 }
9021
9022 static void
9023 do_dbg (void)
9024 {
9025 inst.instruction |= inst.operands[0].imm;
9026 }
9027
9028 static void
9029 do_div (void)
9030 {
9031 unsigned Rd, Rn, Rm;
9032
9033 Rd = inst.operands[0].reg;
9034 Rn = (inst.operands[1].present
9035 ? inst.operands[1].reg : Rd);
9036 Rm = inst.operands[2].reg;
9037
9038 constraint ((Rd == REG_PC), BAD_PC);
9039 constraint ((Rn == REG_PC), BAD_PC);
9040 constraint ((Rm == REG_PC), BAD_PC);
9041
9042 inst.instruction |= Rd << 16;
9043 inst.instruction |= Rn << 0;
9044 inst.instruction |= Rm << 8;
9045 }
9046
9047 static void
9048 do_it (void)
9049 {
9050 /* There is no IT instruction in ARM mode. We
9051 process it to do the validation as if in
9052 thumb mode, just in case the code gets
9053 assembled for thumb using the unified syntax. */
9054
9055 inst.size = 0;
9056 if (unified_syntax)
9057 {
9058 set_it_insn_type (IT_INSN);
9059 now_it.mask = (inst.instruction & 0xf) | 0x10;
9060 now_it.cc = inst.operands[0].imm;
9061 }
9062 }
9063
9064 /* If there is only one register in the register list,
9065 then return its register number. Otherwise return -1. */
9066 static int
9067 only_one_reg_in_list (int range)
9068 {
9069 int i = ffs (range) - 1;
9070 return (i > 15 || range != (1 << i)) ? -1 : i;
9071 }
9072
9073 static void
9074 encode_ldmstm(int from_push_pop_mnem)
9075 {
9076 int base_reg = inst.operands[0].reg;
9077 int range = inst.operands[1].imm;
9078 int one_reg;
9079
9080 inst.instruction |= base_reg << 16;
9081 inst.instruction |= range;
9082
9083 if (inst.operands[1].writeback)
9084 inst.instruction |= LDM_TYPE_2_OR_3;
9085
9086 if (inst.operands[0].writeback)
9087 {
9088 inst.instruction |= WRITE_BACK;
9089 /* Check for unpredictable uses of writeback. */
9090 if (inst.instruction & LOAD_BIT)
9091 {
9092 /* Not allowed in LDM type 2. */
9093 if ((inst.instruction & LDM_TYPE_2_OR_3)
9094 && ((range & (1 << REG_PC)) == 0))
9095 as_warn (_("writeback of base register is UNPREDICTABLE"));
9096 /* Only allowed if base reg not in list for other types. */
9097 else if (range & (1 << base_reg))
9098 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
9099 }
9100 else /* STM. */
9101 {
9102 /* Not allowed for type 2. */
9103 if (inst.instruction & LDM_TYPE_2_OR_3)
9104 as_warn (_("writeback of base register is UNPREDICTABLE"));
9105 /* Only allowed if base reg not in list, or first in list. */
9106 else if ((range & (1 << base_reg))
9107 && (range & ((1 << base_reg) - 1)))
9108 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
9109 }
9110 }
9111
9112 /* If PUSH/POP has only one register, then use the A2 encoding. */
9113 one_reg = only_one_reg_in_list (range);
9114 if (from_push_pop_mnem && one_reg >= 0)
9115 {
9116 int is_push = (inst.instruction & A_PUSH_POP_OP_MASK) == A1_OPCODE_PUSH;
9117
9118 if (is_push && one_reg == 13 /* SP */)
9119 /* PR 22483: The A2 encoding cannot be used when
9120 pushing the stack pointer as this is UNPREDICTABLE. */
9121 return;
9122
9123 inst.instruction &= A_COND_MASK;
9124 inst.instruction |= is_push ? A2_OPCODE_PUSH : A2_OPCODE_POP;
9125 inst.instruction |= one_reg << 12;
9126 }
9127 }
9128
9129 static void
9130 do_ldmstm (void)
9131 {
9132 encode_ldmstm (/*from_push_pop_mnem=*/FALSE);
9133 }
9134
9135 /* ARMv5TE load-consecutive (argument parse)
9136 Mode is like LDRH.
9137
9138 LDRccD R, mode
9139 STRccD R, mode. */
9140
9141 static void
9142 do_ldrd (void)
9143 {
9144 constraint (inst.operands[0].reg % 2 != 0,
9145 _("first transfer register must be even"));
9146 constraint (inst.operands[1].present
9147 && inst.operands[1].reg != inst.operands[0].reg + 1,
9148 _("can only transfer two consecutive registers"));
9149 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
9150 constraint (!inst.operands[2].isreg, _("'[' expected"));
9151
9152 if (!inst.operands[1].present)
9153 inst.operands[1].reg = inst.operands[0].reg + 1;
9154
9155 /* encode_arm_addr_mode_3 will diagnose overlap between the base
9156 register and the first register written; we have to diagnose
9157 overlap between the base and the second register written here. */
9158
9159 if (inst.operands[2].reg == inst.operands[1].reg
9160 && (inst.operands[2].writeback || inst.operands[2].postind))
9161 as_warn (_("base register written back, and overlaps "
9162 "second transfer register"));
9163
9164 if (!(inst.instruction & V4_STR_BIT))
9165 {
9166 /* For an index-register load, the index register must not overlap the
9167 destination (even if not write-back). */
9168 if (inst.operands[2].immisreg
9169 && ((unsigned) inst.operands[2].imm == inst.operands[0].reg
9170 || (unsigned) inst.operands[2].imm == inst.operands[1].reg))
9171 as_warn (_("index register overlaps transfer register"));
9172 }
9173 inst.instruction |= inst.operands[0].reg << 12;
9174 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE);
9175 }
9176
9177 static void
9178 do_ldrex (void)
9179 {
9180 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
9181 || inst.operands[1].postind || inst.operands[1].writeback
9182 || inst.operands[1].immisreg || inst.operands[1].shifted
9183 || inst.operands[1].negative
9184 /* This can arise if the programmer has written
9185 strex rN, rM, foo
9186 or if they have mistakenly used a register name as the last
9187 operand, eg:
9188 strex rN, rM, rX
9189 It is very difficult to distinguish between these two cases
9190 because "rX" might actually be a label. ie the register
9191 name has been occluded by a symbol of the same name. So we
9192 just generate a general 'bad addressing mode' type error
9193 message and leave it up to the programmer to discover the
9194 true cause and fix their mistake. */
9195 || (inst.operands[1].reg == REG_PC),
9196 BAD_ADDR_MODE);
9197
9198 constraint (inst.relocs[0].exp.X_op != O_constant
9199 || inst.relocs[0].exp.X_add_number != 0,
9200 _("offset must be zero in ARM encoding"));
9201
9202 constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
9203
9204 inst.instruction |= inst.operands[0].reg << 12;
9205 inst.instruction |= inst.operands[1].reg << 16;
9206 inst.relocs[0].type = BFD_RELOC_UNUSED;
9207 }
9208
9209 static void
9210 do_ldrexd (void)
9211 {
9212 constraint (inst.operands[0].reg % 2 != 0,
9213 _("even register required"));
9214 constraint (inst.operands[1].present
9215 && inst.operands[1].reg != inst.operands[0].reg + 1,
9216 _("can only load two consecutive registers"));
9217 /* If op 1 were present and equal to PC, this function wouldn't
9218 have been called in the first place. */
9219 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
9220
9221 inst.instruction |= inst.operands[0].reg << 12;
9222 inst.instruction |= inst.operands[2].reg << 16;
9223 }
9224
9225 /* In both ARM and thumb state 'ldr pc, #imm' with an immediate
9226 which is not a multiple of four is UNPREDICTABLE. */
9227 static void
9228 check_ldr_r15_aligned (void)
9229 {
9230 constraint (!(inst.operands[1].immisreg)
9231 && (inst.operands[0].reg == REG_PC
9232 && inst.operands[1].reg == REG_PC
9233 && (inst.relocs[0].exp.X_add_number & 0x3)),
9234 _("ldr to register 15 must be 4-byte aligned"));
9235 }
9236
9237 static void
9238 do_ldst (void)
9239 {
9240 inst.instruction |= inst.operands[0].reg << 12;
9241 if (!inst.operands[1].isreg)
9242 if (move_or_literal_pool (0, CONST_ARM, /*mode_3=*/FALSE))
9243 return;
9244 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE);
9245 check_ldr_r15_aligned ();
9246 }
9247
9248 static void
9249 do_ldstt (void)
9250 {
9251 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
9252 reject [Rn,...]. */
9253 if (inst.operands[1].preind)
9254 {
9255 constraint (inst.relocs[0].exp.X_op != O_constant
9256 || inst.relocs[0].exp.X_add_number != 0,
9257 _("this instruction requires a post-indexed address"));
9258
9259 inst.operands[1].preind = 0;
9260 inst.operands[1].postind = 1;
9261 inst.operands[1].writeback = 1;
9262 }
9263 inst.instruction |= inst.operands[0].reg << 12;
9264 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE);
9265 }
9266
9267 /* Halfword and signed-byte load/store operations. */
9268
9269 static void
9270 do_ldstv4 (void)
9271 {
9272 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
9273 inst.instruction |= inst.operands[0].reg << 12;
9274 if (!inst.operands[1].isreg)
9275 if (move_or_literal_pool (0, CONST_ARM, /*mode_3=*/TRUE))
9276 return;
9277 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE);
9278 }
9279
9280 static void
9281 do_ldsttv4 (void)
9282 {
9283 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
9284 reject [Rn,...]. */
9285 if (inst.operands[1].preind)
9286 {
9287 constraint (inst.relocs[0].exp.X_op != O_constant
9288 || inst.relocs[0].exp.X_add_number != 0,
9289 _("this instruction requires a post-indexed address"));
9290
9291 inst.operands[1].preind = 0;
9292 inst.operands[1].postind = 1;
9293 inst.operands[1].writeback = 1;
9294 }
9295 inst.instruction |= inst.operands[0].reg << 12;
9296 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE);
9297 }
9298
9299 /* Co-processor register load/store.
9300 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
9301 static void
9302 do_lstc (void)
9303 {
9304 inst.instruction |= inst.operands[0].reg << 8;
9305 inst.instruction |= inst.operands[1].reg << 12;
9306 encode_arm_cp_address (2, TRUE, TRUE, 0);
9307 }
9308
9309 static void
9310 do_mlas (void)
9311 {
9312 /* This restriction does not apply to mls (nor to mla in v6 or later). */
9313 if (inst.operands[0].reg == inst.operands[1].reg
9314 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6)
9315 && !(inst.instruction & 0x00400000))
9316 as_tsktsk (_("Rd and Rm should be different in mla"));
9317
9318 inst.instruction |= inst.operands[0].reg << 16;
9319 inst.instruction |= inst.operands[1].reg;
9320 inst.instruction |= inst.operands[2].reg << 8;
9321 inst.instruction |= inst.operands[3].reg << 12;
9322 }
9323
9324 static void
9325 do_mov (void)
9326 {
9327 constraint (inst.relocs[0].type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
9328 && inst.relocs[0].type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC ,
9329 THUMB1_RELOC_ONLY);
9330 inst.instruction |= inst.operands[0].reg << 12;
9331 encode_arm_shifter_operand (1);
9332 }
9333
9334 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
9335 static void
9336 do_mov16 (void)
9337 {
9338 bfd_vma imm;
9339 bfd_boolean top;
9340
9341 top = (inst.instruction & 0x00400000) != 0;
9342 constraint (top && inst.relocs[0].type == BFD_RELOC_ARM_MOVW,
9343 _(":lower16: not allowed in this instruction"));
9344 constraint (!top && inst.relocs[0].type == BFD_RELOC_ARM_MOVT,
9345 _(":upper16: not allowed in this instruction"));
9346 inst.instruction |= inst.operands[0].reg << 12;
9347 if (inst.relocs[0].type == BFD_RELOC_UNUSED)
9348 {
9349 imm = inst.relocs[0].exp.X_add_number;
9350 /* The value is in two pieces: 0:11, 16:19. */
9351 inst.instruction |= (imm & 0x00000fff);
9352 inst.instruction |= (imm & 0x0000f000) << 4;
9353 }
9354 }
9355
9356 static int
9357 do_vfp_nsyn_mrs (void)
9358 {
9359 if (inst.operands[0].isvec)
9360 {
9361 if (inst.operands[1].reg != 1)
9362 first_error (_("operand 1 must be FPSCR"));
9363 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
9364 memset (&inst.operands[1], '\0', sizeof (inst.operands[1]));
9365 do_vfp_nsyn_opcode ("fmstat");
9366 }
9367 else if (inst.operands[1].isvec)
9368 do_vfp_nsyn_opcode ("fmrx");
9369 else
9370 return FAIL;
9371
9372 return SUCCESS;
9373 }
9374
9375 static int
9376 do_vfp_nsyn_msr (void)
9377 {
9378 if (inst.operands[0].isvec)
9379 do_vfp_nsyn_opcode ("fmxr");
9380 else
9381 return FAIL;
9382
9383 return SUCCESS;
9384 }
9385
9386 static void
9387 do_vmrs (void)
9388 {
9389 unsigned Rt = inst.operands[0].reg;
9390
9391 if (thumb_mode && Rt == REG_SP)
9392 {
9393 inst.error = BAD_SP;
9394 return;
9395 }
9396
9397 /* MVFR2 is only valid at ARMv8-A. */
9398 if (inst.operands[1].reg == 5)
9399 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
9400 _(BAD_FPU));
9401
9402 /* APSR_ sets isvec. All other refs to PC are illegal. */
9403 if (!inst.operands[0].isvec && Rt == REG_PC)
9404 {
9405 inst.error = BAD_PC;
9406 return;
9407 }
9408
9409 /* If we get through parsing the register name, we just insert the number
9410 generated into the instruction without further validation. */
9411 inst.instruction |= (inst.operands[1].reg << 16);
9412 inst.instruction |= (Rt << 12);
9413 }
9414
9415 static void
9416 do_vmsr (void)
9417 {
9418 unsigned Rt = inst.operands[1].reg;
9419
9420 if (thumb_mode)
9421 reject_bad_reg (Rt);
9422 else if (Rt == REG_PC)
9423 {
9424 inst.error = BAD_PC;
9425 return;
9426 }
9427
9428 /* MVFR2 is only valid for ARMv8-A. */
9429 if (inst.operands[0].reg == 5)
9430 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
9431 _(BAD_FPU));
9432
9433 /* If we get through parsing the register name, we just insert the number
9434 generated into the instruction without further validation. */
9435 inst.instruction |= (inst.operands[0].reg << 16);
9436 inst.instruction |= (Rt << 12);
9437 }
9438
9439 static void
9440 do_mrs (void)
9441 {
9442 unsigned br;
9443
9444 if (do_vfp_nsyn_mrs () == SUCCESS)
9445 return;
9446
9447 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
9448 inst.instruction |= inst.operands[0].reg << 12;
9449
9450 if (inst.operands[1].isreg)
9451 {
9452 br = inst.operands[1].reg;
9453 if (((br & 0x200) == 0) && ((br & 0xf0000) != 0xf0000))
9454 as_bad (_("bad register for mrs"));
9455 }
9456 else
9457 {
9458 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
9459 constraint ((inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f))
9460 != (PSR_c|PSR_f),
9461 _("'APSR', 'CPSR' or 'SPSR' expected"));
9462 br = (15<<16) | (inst.operands[1].imm & SPSR_BIT);
9463 }
9464
9465 inst.instruction |= br;
9466 }
9467
9468 /* Two possible forms:
9469 "{C|S}PSR_<field>, Rm",
9470 "{C|S}PSR_f, #expression". */
9471
9472 static void
9473 do_msr (void)
9474 {
9475 if (do_vfp_nsyn_msr () == SUCCESS)
9476 return;
9477
9478 inst.instruction |= inst.operands[0].imm;
9479 if (inst.operands[1].isreg)
9480 inst.instruction |= inst.operands[1].reg;
9481 else
9482 {
9483 inst.instruction |= INST_IMMEDIATE;
9484 inst.relocs[0].type = BFD_RELOC_ARM_IMMEDIATE;
9485 inst.relocs[0].pc_rel = 0;
9486 }
9487 }
9488
9489 static void
9490 do_mul (void)
9491 {
9492 constraint (inst.operands[2].reg == REG_PC, BAD_PC);
9493
9494 if (!inst.operands[2].present)
9495 inst.operands[2].reg = inst.operands[0].reg;
9496 inst.instruction |= inst.operands[0].reg << 16;
9497 inst.instruction |= inst.operands[1].reg;
9498 inst.instruction |= inst.operands[2].reg << 8;
9499
9500 if (inst.operands[0].reg == inst.operands[1].reg
9501 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
9502 as_tsktsk (_("Rd and Rm should be different in mul"));
9503 }
9504
9505 /* Long Multiply Parser
9506 UMULL RdLo, RdHi, Rm, Rs
9507 SMULL RdLo, RdHi, Rm, Rs
9508 UMLAL RdLo, RdHi, Rm, Rs
9509 SMLAL RdLo, RdHi, Rm, Rs. */
9510
9511 static void
9512 do_mull (void)
9513 {
9514 inst.instruction |= inst.operands[0].reg << 12;
9515 inst.instruction |= inst.operands[1].reg << 16;
9516 inst.instruction |= inst.operands[2].reg;
9517 inst.instruction |= inst.operands[3].reg << 8;
9518
9519 /* rdhi and rdlo must be different. */
9520 if (inst.operands[0].reg == inst.operands[1].reg)
9521 as_tsktsk (_("rdhi and rdlo must be different"));
9522
9523 /* rdhi, rdlo and rm must all be different before armv6. */
9524 if ((inst.operands[0].reg == inst.operands[2].reg
9525 || inst.operands[1].reg == inst.operands[2].reg)
9526 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
9527 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
9528 }
9529
9530 static void
9531 do_nop (void)
9532 {
9533 if (inst.operands[0].present
9534 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6k))
9535 {
9536 /* Architectural NOP hints are CPSR sets with no bits selected. */
9537 inst.instruction &= 0xf0000000;
9538 inst.instruction |= 0x0320f000;
9539 if (inst.operands[0].present)
9540 inst.instruction |= inst.operands[0].imm;
9541 }
9542 }
9543
9544 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
9545 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
9546 Condition defaults to COND_ALWAYS.
9547 Error if Rd, Rn or Rm are R15. */
9548
9549 static void
9550 do_pkhbt (void)
9551 {
9552 inst.instruction |= inst.operands[0].reg << 12;
9553 inst.instruction |= inst.operands[1].reg << 16;
9554 inst.instruction |= inst.operands[2].reg;
9555 if (inst.operands[3].present)
9556 encode_arm_shift (3);
9557 }
9558
9559 /* ARM V6 PKHTB (Argument Parse). */
9560
9561 static void
9562 do_pkhtb (void)
9563 {
9564 if (!inst.operands[3].present)
9565 {
9566 /* If the shift specifier is omitted, turn the instruction
9567 into pkhbt rd, rm, rn. */
9568 inst.instruction &= 0xfff00010;
9569 inst.instruction |= inst.operands[0].reg << 12;
9570 inst.instruction |= inst.operands[1].reg;
9571 inst.instruction |= inst.operands[2].reg << 16;
9572 }
9573 else
9574 {
9575 inst.instruction |= inst.operands[0].reg << 12;
9576 inst.instruction |= inst.operands[1].reg << 16;
9577 inst.instruction |= inst.operands[2].reg;
9578 encode_arm_shift (3);
9579 }
9580 }
9581
9582 /* ARMv5TE: Preload-Cache
9583 MP Extensions: Preload for write
9584
9585 PLD(W) <addr_mode>
9586
9587 Syntactically, like LDR with B=1, W=0, L=1. */
9588
9589 static void
9590 do_pld (void)
9591 {
9592 constraint (!inst.operands[0].isreg,
9593 _("'[' expected after PLD mnemonic"));
9594 constraint (inst.operands[0].postind,
9595 _("post-indexed expression used in preload instruction"));
9596 constraint (inst.operands[0].writeback,
9597 _("writeback used in preload instruction"));
9598 constraint (!inst.operands[0].preind,
9599 _("unindexed addressing used in preload instruction"));
9600 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
9601 }
9602
9603 /* ARMv7: PLI <addr_mode> */
9604 static void
9605 do_pli (void)
9606 {
9607 constraint (!inst.operands[0].isreg,
9608 _("'[' expected after PLI mnemonic"));
9609 constraint (inst.operands[0].postind,
9610 _("post-indexed expression used in preload instruction"));
9611 constraint (inst.operands[0].writeback,
9612 _("writeback used in preload instruction"));
9613 constraint (!inst.operands[0].preind,
9614 _("unindexed addressing used in preload instruction"));
9615 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
9616 inst.instruction &= ~PRE_INDEX;
9617 }
9618
9619 static void
9620 do_push_pop (void)
9621 {
9622 constraint (inst.operands[0].writeback,
9623 _("push/pop do not support {reglist}^"));
9624 inst.operands[1] = inst.operands[0];
9625 memset (&inst.operands[0], 0, sizeof inst.operands[0]);
9626 inst.operands[0].isreg = 1;
9627 inst.operands[0].writeback = 1;
9628 inst.operands[0].reg = REG_SP;
9629 encode_ldmstm (/*from_push_pop_mnem=*/TRUE);
9630 }
9631
9632 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
9633 word at the specified address and the following word
9634 respectively.
9635 Unconditionally executed.
9636 Error if Rn is R15. */
9637
9638 static void
9639 do_rfe (void)
9640 {
9641 inst.instruction |= inst.operands[0].reg << 16;
9642 if (inst.operands[0].writeback)
9643 inst.instruction |= WRITE_BACK;
9644 }
9645
9646 /* ARM V6 ssat (argument parse). */
9647
9648 static void
9649 do_ssat (void)
9650 {
9651 inst.instruction |= inst.operands[0].reg << 12;
9652 inst.instruction |= (inst.operands[1].imm - 1) << 16;
9653 inst.instruction |= inst.operands[2].reg;
9654
9655 if (inst.operands[3].present)
9656 encode_arm_shift (3);
9657 }
9658
9659 /* ARM V6 usat (argument parse). */
9660
9661 static void
9662 do_usat (void)
9663 {
9664 inst.instruction |= inst.operands[0].reg << 12;
9665 inst.instruction |= inst.operands[1].imm << 16;
9666 inst.instruction |= inst.operands[2].reg;
9667
9668 if (inst.operands[3].present)
9669 encode_arm_shift (3);
9670 }
9671
9672 /* ARM V6 ssat16 (argument parse). */
9673
9674 static void
9675 do_ssat16 (void)
9676 {
9677 inst.instruction |= inst.operands[0].reg << 12;
9678 inst.instruction |= ((inst.operands[1].imm - 1) << 16);
9679 inst.instruction |= inst.operands[2].reg;
9680 }
9681
9682 static void
9683 do_usat16 (void)
9684 {
9685 inst.instruction |= inst.operands[0].reg << 12;
9686 inst.instruction |= inst.operands[1].imm << 16;
9687 inst.instruction |= inst.operands[2].reg;
9688 }
9689
9690 /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
9691 preserving the other bits.
9692
9693 setend <endian_specifier>, where <endian_specifier> is either
9694 BE or LE. */
9695
9696 static void
9697 do_setend (void)
9698 {
9699 if (warn_on_deprecated
9700 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
9701 as_tsktsk (_("setend use is deprecated for ARMv8"));
9702
9703 if (inst.operands[0].imm)
9704 inst.instruction |= 0x200;
9705 }
9706
9707 static void
9708 do_shift (void)
9709 {
9710 unsigned int Rm = (inst.operands[1].present
9711 ? inst.operands[1].reg
9712 : inst.operands[0].reg);
9713
9714 inst.instruction |= inst.operands[0].reg << 12;
9715 inst.instruction |= Rm;
9716 if (inst.operands[2].isreg) /* Rd, {Rm,} Rs */
9717 {
9718 inst.instruction |= inst.operands[2].reg << 8;
9719 inst.instruction |= SHIFT_BY_REG;
9720 /* PR 12854: Error on extraneous shifts. */
9721 constraint (inst.operands[2].shifted,
9722 _("extraneous shift as part of operand to shift insn"));
9723 }
9724 else
9725 inst.relocs[0].type = BFD_RELOC_ARM_SHIFT_IMM;
9726 }
9727
9728 static void
9729 do_smc (void)
9730 {
9731 inst.relocs[0].type = BFD_RELOC_ARM_SMC;
9732 inst.relocs[0].pc_rel = 0;
9733 }
9734
9735 static void
9736 do_hvc (void)
9737 {
9738 inst.relocs[0].type = BFD_RELOC_ARM_HVC;
9739 inst.relocs[0].pc_rel = 0;
9740 }
9741
9742 static void
9743 do_swi (void)
9744 {
9745 inst.relocs[0].type = BFD_RELOC_ARM_SWI;
9746 inst.relocs[0].pc_rel = 0;
9747 }
9748
9749 static void
9750 do_setpan (void)
9751 {
9752 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_pan),
9753 _("selected processor does not support SETPAN instruction"));
9754
9755 inst.instruction |= ((inst.operands[0].imm & 1) << 9);
9756 }
9757
9758 static void
9759 do_t_setpan (void)
9760 {
9761 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_pan),
9762 _("selected processor does not support SETPAN instruction"));
9763
9764 inst.instruction |= (inst.operands[0].imm << 3);
9765 }
9766
9767 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
9768 SMLAxy{cond} Rd,Rm,Rs,Rn
9769 SMLAWy{cond} Rd,Rm,Rs,Rn
9770 Error if any register is R15. */
9771
9772 static void
9773 do_smla (void)
9774 {
9775 inst.instruction |= inst.operands[0].reg << 16;
9776 inst.instruction |= inst.operands[1].reg;
9777 inst.instruction |= inst.operands[2].reg << 8;
9778 inst.instruction |= inst.operands[3].reg << 12;
9779 }
9780
9781 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
9782 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
9783 Error if any register is R15.
9784 Warning if Rdlo == Rdhi. */
9785
9786 static void
9787 do_smlal (void)
9788 {
9789 inst.instruction |= inst.operands[0].reg << 12;
9790 inst.instruction |= inst.operands[1].reg << 16;
9791 inst.instruction |= inst.operands[2].reg;
9792 inst.instruction |= inst.operands[3].reg << 8;
9793
9794 if (inst.operands[0].reg == inst.operands[1].reg)
9795 as_tsktsk (_("rdhi and rdlo must be different"));
9796 }
9797
9798 /* ARM V5E (El Segundo) signed-multiply (argument parse)
9799 SMULxy{cond} Rd,Rm,Rs
9800 Error if any register is R15. */
9801
9802 static void
9803 do_smul (void)
9804 {
9805 inst.instruction |= inst.operands[0].reg << 16;
9806 inst.instruction |= inst.operands[1].reg;
9807 inst.instruction |= inst.operands[2].reg << 8;
9808 }
9809
9810 /* ARM V6 srs (argument parse). The variable fields in the encoding are
9811 the same for both ARM and Thumb-2. */
9812
9813 static void
9814 do_srs (void)
9815 {
9816 int reg;
9817
9818 if (inst.operands[0].present)
9819 {
9820 reg = inst.operands[0].reg;
9821 constraint (reg != REG_SP, _("SRS base register must be r13"));
9822 }
9823 else
9824 reg = REG_SP;
9825
9826 inst.instruction |= reg << 16;
9827 inst.instruction |= inst.operands[1].imm;
9828 if (inst.operands[0].writeback || inst.operands[1].writeback)
9829 inst.instruction |= WRITE_BACK;
9830 }
9831
9832 /* ARM V6 strex (argument parse). */
9833
9834 static void
9835 do_strex (void)
9836 {
9837 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
9838 || inst.operands[2].postind || inst.operands[2].writeback
9839 || inst.operands[2].immisreg || inst.operands[2].shifted
9840 || inst.operands[2].negative
9841 /* See comment in do_ldrex(). */
9842 || (inst.operands[2].reg == REG_PC),
9843 BAD_ADDR_MODE);
9844
9845 constraint (inst.operands[0].reg == inst.operands[1].reg
9846 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9847
9848 constraint (inst.relocs[0].exp.X_op != O_constant
9849 || inst.relocs[0].exp.X_add_number != 0,
9850 _("offset must be zero in ARM encoding"));
9851
9852 inst.instruction |= inst.operands[0].reg << 12;
9853 inst.instruction |= inst.operands[1].reg;
9854 inst.instruction |= inst.operands[2].reg << 16;
9855 inst.relocs[0].type = BFD_RELOC_UNUSED;
9856 }
9857
9858 static void
9859 do_t_strexbh (void)
9860 {
9861 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
9862 || inst.operands[2].postind || inst.operands[2].writeback
9863 || inst.operands[2].immisreg || inst.operands[2].shifted
9864 || inst.operands[2].negative,
9865 BAD_ADDR_MODE);
9866
9867 constraint (inst.operands[0].reg == inst.operands[1].reg
9868 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9869
9870 do_rm_rd_rn ();
9871 }
9872
9873 static void
9874 do_strexd (void)
9875 {
9876 constraint (inst.operands[1].reg % 2 != 0,
9877 _("even register required"));
9878 constraint (inst.operands[2].present
9879 && inst.operands[2].reg != inst.operands[1].reg + 1,
9880 _("can only store two consecutive registers"));
9881 /* If op 2 were present and equal to PC, this function wouldn't
9882 have been called in the first place. */
9883 constraint (inst.operands[1].reg == REG_LR, _("r14 not allowed here"));
9884
9885 constraint (inst.operands[0].reg == inst.operands[1].reg
9886 || inst.operands[0].reg == inst.operands[1].reg + 1
9887 || inst.operands[0].reg == inst.operands[3].reg,
9888 BAD_OVERLAP);
9889
9890 inst.instruction |= inst.operands[0].reg << 12;
9891 inst.instruction |= inst.operands[1].reg;
9892 inst.instruction |= inst.operands[3].reg << 16;
9893 }
9894
9895 /* ARM V8 STRL. */
9896 static void
9897 do_stlex (void)
9898 {
9899 constraint (inst.operands[0].reg == inst.operands[1].reg
9900 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9901
9902 do_rd_rm_rn ();
9903 }
9904
9905 static void
9906 do_t_stlex (void)
9907 {
9908 constraint (inst.operands[0].reg == inst.operands[1].reg
9909 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9910
9911 do_rm_rd_rn ();
9912 }
9913
9914 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
9915 extends it to 32-bits, and adds the result to a value in another
9916 register. You can specify a rotation by 0, 8, 16, or 24 bits
9917 before extracting the 16-bit value.
9918 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
9919 Condition defaults to COND_ALWAYS.
9920 Error if any register uses R15. */
9921
9922 static void
9923 do_sxtah (void)
9924 {
9925 inst.instruction |= inst.operands[0].reg << 12;
9926 inst.instruction |= inst.operands[1].reg << 16;
9927 inst.instruction |= inst.operands[2].reg;
9928 inst.instruction |= inst.operands[3].imm << 10;
9929 }
9930
9931 /* ARM V6 SXTH.
9932
9933 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
9934 Condition defaults to COND_ALWAYS.
9935 Error if any register uses R15. */
9936
9937 static void
9938 do_sxth (void)
9939 {
9940 inst.instruction |= inst.operands[0].reg << 12;
9941 inst.instruction |= inst.operands[1].reg;
9942 inst.instruction |= inst.operands[2].imm << 10;
9943 }
9944 \f
9945 /* VFP instructions. In a logical order: SP variant first, monad
9946 before dyad, arithmetic then move then load/store. */
9947
9948 static void
9949 do_vfp_sp_monadic (void)
9950 {
9951 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9952 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
9953 }
9954
9955 static void
9956 do_vfp_sp_dyadic (void)
9957 {
9958 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9959 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
9960 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
9961 }
9962
9963 static void
9964 do_vfp_sp_compare_z (void)
9965 {
9966 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9967 }
9968
9969 static void
9970 do_vfp_dp_sp_cvt (void)
9971 {
9972 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9973 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
9974 }
9975
9976 static void
9977 do_vfp_sp_dp_cvt (void)
9978 {
9979 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9980 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
9981 }
9982
9983 static void
9984 do_vfp_reg_from_sp (void)
9985 {
9986 inst.instruction |= inst.operands[0].reg << 12;
9987 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
9988 }
9989
9990 static void
9991 do_vfp_reg2_from_sp2 (void)
9992 {
9993 constraint (inst.operands[2].imm != 2,
9994 _("only two consecutive VFP SP registers allowed here"));
9995 inst.instruction |= inst.operands[0].reg << 12;
9996 inst.instruction |= inst.operands[1].reg << 16;
9997 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
9998 }
9999
10000 static void
10001 do_vfp_sp_from_reg (void)
10002 {
10003 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sn);
10004 inst.instruction |= inst.operands[1].reg << 12;
10005 }
10006
10007 static void
10008 do_vfp_sp2_from_reg2 (void)
10009 {
10010 constraint (inst.operands[0].imm != 2,
10011 _("only two consecutive VFP SP registers allowed here"));
10012 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sm);
10013 inst.instruction |= inst.operands[1].reg << 12;
10014 inst.instruction |= inst.operands[2].reg << 16;
10015 }
10016
10017 static void
10018 do_vfp_sp_ldst (void)
10019 {
10020 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10021 encode_arm_cp_address (1, FALSE, TRUE, 0);
10022 }
10023
10024 static void
10025 do_vfp_dp_ldst (void)
10026 {
10027 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10028 encode_arm_cp_address (1, FALSE, TRUE, 0);
10029 }
10030
10031
10032 static void
10033 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type)
10034 {
10035 if (inst.operands[0].writeback)
10036 inst.instruction |= WRITE_BACK;
10037 else
10038 constraint (ldstm_type != VFP_LDSTMIA,
10039 _("this addressing mode requires base-register writeback"));
10040 inst.instruction |= inst.operands[0].reg << 16;
10041 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sd);
10042 inst.instruction |= inst.operands[1].imm;
10043 }
10044
10045 static void
10046 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type)
10047 {
10048 int count;
10049
10050 if (inst.operands[0].writeback)
10051 inst.instruction |= WRITE_BACK;
10052 else
10053 constraint (ldstm_type != VFP_LDSTMIA && ldstm_type != VFP_LDSTMIAX,
10054 _("this addressing mode requires base-register writeback"));
10055
10056 inst.instruction |= inst.operands[0].reg << 16;
10057 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
10058
10059 count = inst.operands[1].imm << 1;
10060 if (ldstm_type == VFP_LDSTMIAX || ldstm_type == VFP_LDSTMDBX)
10061 count += 1;
10062
10063 inst.instruction |= count;
10064 }
10065
10066 static void
10067 do_vfp_sp_ldstmia (void)
10068 {
10069 vfp_sp_ldstm (VFP_LDSTMIA);
10070 }
10071
10072 static void
10073 do_vfp_sp_ldstmdb (void)
10074 {
10075 vfp_sp_ldstm (VFP_LDSTMDB);
10076 }
10077
10078 static void
10079 do_vfp_dp_ldstmia (void)
10080 {
10081 vfp_dp_ldstm (VFP_LDSTMIA);
10082 }
10083
10084 static void
10085 do_vfp_dp_ldstmdb (void)
10086 {
10087 vfp_dp_ldstm (VFP_LDSTMDB);
10088 }
10089
10090 static void
10091 do_vfp_xp_ldstmia (void)
10092 {
10093 vfp_dp_ldstm (VFP_LDSTMIAX);
10094 }
10095
10096 static void
10097 do_vfp_xp_ldstmdb (void)
10098 {
10099 vfp_dp_ldstm (VFP_LDSTMDBX);
10100 }
10101
10102 static void
10103 do_vfp_dp_rd_rm (void)
10104 {
10105 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10106 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
10107 }
10108
10109 static void
10110 do_vfp_dp_rn_rd (void)
10111 {
10112 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dn);
10113 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
10114 }
10115
10116 static void
10117 do_vfp_dp_rd_rn (void)
10118 {
10119 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10120 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
10121 }
10122
10123 static void
10124 do_vfp_dp_rd_rn_rm (void)
10125 {
10126 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10127 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
10128 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dm);
10129 }
10130
10131 static void
10132 do_vfp_dp_rd (void)
10133 {
10134 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10135 }
10136
10137 static void
10138 do_vfp_dp_rm_rd_rn (void)
10139 {
10140 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dm);
10141 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
10142 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dn);
10143 }
10144
10145 /* VFPv3 instructions. */
10146 static void
10147 do_vfp_sp_const (void)
10148 {
10149 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10150 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
10151 inst.instruction |= (inst.operands[1].imm & 0x0f);
10152 }
10153
10154 static void
10155 do_vfp_dp_const (void)
10156 {
10157 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10158 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
10159 inst.instruction |= (inst.operands[1].imm & 0x0f);
10160 }
10161
10162 static void
10163 vfp_conv (int srcsize)
10164 {
10165 int immbits = srcsize - inst.operands[1].imm;
10166
10167 if (srcsize == 16 && !(immbits >= 0 && immbits <= srcsize))
10168 {
10169 /* If srcsize is 16, inst.operands[1].imm must be in the range 0-16.
10170 i.e. immbits must be in range 0 - 16. */
10171 inst.error = _("immediate value out of range, expected range [0, 16]");
10172 return;
10173 }
10174 else if (srcsize == 32 && !(immbits >= 0 && immbits < srcsize))
10175 {
10176 /* If srcsize is 32, inst.operands[1].imm must be in the range 1-32.
10177 i.e. immbits must be in range 0 - 31. */
10178 inst.error = _("immediate value out of range, expected range [1, 32]");
10179 return;
10180 }
10181
10182 inst.instruction |= (immbits & 1) << 5;
10183 inst.instruction |= (immbits >> 1);
10184 }
10185
10186 static void
10187 do_vfp_sp_conv_16 (void)
10188 {
10189 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10190 vfp_conv (16);
10191 }
10192
10193 static void
10194 do_vfp_dp_conv_16 (void)
10195 {
10196 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10197 vfp_conv (16);
10198 }
10199
10200 static void
10201 do_vfp_sp_conv_32 (void)
10202 {
10203 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10204 vfp_conv (32);
10205 }
10206
10207 static void
10208 do_vfp_dp_conv_32 (void)
10209 {
10210 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10211 vfp_conv (32);
10212 }
10213 \f
10214 /* FPA instructions. Also in a logical order. */
10215
10216 static void
10217 do_fpa_cmp (void)
10218 {
10219 inst.instruction |= inst.operands[0].reg << 16;
10220 inst.instruction |= inst.operands[1].reg;
10221 }
10222
10223 static void
10224 do_fpa_ldmstm (void)
10225 {
10226 inst.instruction |= inst.operands[0].reg << 12;
10227 switch (inst.operands[1].imm)
10228 {
10229 case 1: inst.instruction |= CP_T_X; break;
10230 case 2: inst.instruction |= CP_T_Y; break;
10231 case 3: inst.instruction |= CP_T_Y | CP_T_X; break;
10232 case 4: break;
10233 default: abort ();
10234 }
10235
10236 if (inst.instruction & (PRE_INDEX | INDEX_UP))
10237 {
10238 /* The instruction specified "ea" or "fd", so we can only accept
10239 [Rn]{!}. The instruction does not really support stacking or
10240 unstacking, so we have to emulate these by setting appropriate
10241 bits and offsets. */
10242 constraint (inst.relocs[0].exp.X_op != O_constant
10243 || inst.relocs[0].exp.X_add_number != 0,
10244 _("this instruction does not support indexing"));
10245
10246 if ((inst.instruction & PRE_INDEX) || inst.operands[2].writeback)
10247 inst.relocs[0].exp.X_add_number = 12 * inst.operands[1].imm;
10248
10249 if (!(inst.instruction & INDEX_UP))
10250 inst.relocs[0].exp.X_add_number = -inst.relocs[0].exp.X_add_number;
10251
10252 if (!(inst.instruction & PRE_INDEX) && inst.operands[2].writeback)
10253 {
10254 inst.operands[2].preind = 0;
10255 inst.operands[2].postind = 1;
10256 }
10257 }
10258
10259 encode_arm_cp_address (2, TRUE, TRUE, 0);
10260 }
10261 \f
10262 /* iWMMXt instructions: strictly in alphabetical order. */
10263
10264 static void
10265 do_iwmmxt_tandorc (void)
10266 {
10267 constraint (inst.operands[0].reg != REG_PC, _("only r15 allowed here"));
10268 }
10269
10270 static void
10271 do_iwmmxt_textrc (void)
10272 {
10273 inst.instruction |= inst.operands[0].reg << 12;
10274 inst.instruction |= inst.operands[1].imm;
10275 }
10276
10277 static void
10278 do_iwmmxt_textrm (void)
10279 {
10280 inst.instruction |= inst.operands[0].reg << 12;
10281 inst.instruction |= inst.operands[1].reg << 16;
10282 inst.instruction |= inst.operands[2].imm;
10283 }
10284
10285 static void
10286 do_iwmmxt_tinsr (void)
10287 {
10288 inst.instruction |= inst.operands[0].reg << 16;
10289 inst.instruction |= inst.operands[1].reg << 12;
10290 inst.instruction |= inst.operands[2].imm;
10291 }
10292
10293 static void
10294 do_iwmmxt_tmia (void)
10295 {
10296 inst.instruction |= inst.operands[0].reg << 5;
10297 inst.instruction |= inst.operands[1].reg;
10298 inst.instruction |= inst.operands[2].reg << 12;
10299 }
10300
10301 static void
10302 do_iwmmxt_waligni (void)
10303 {
10304 inst.instruction |= inst.operands[0].reg << 12;
10305 inst.instruction |= inst.operands[1].reg << 16;
10306 inst.instruction |= inst.operands[2].reg;
10307 inst.instruction |= inst.operands[3].imm << 20;
10308 }
10309
10310 static void
10311 do_iwmmxt_wmerge (void)
10312 {
10313 inst.instruction |= inst.operands[0].reg << 12;
10314 inst.instruction |= inst.operands[1].reg << 16;
10315 inst.instruction |= inst.operands[2].reg;
10316 inst.instruction |= inst.operands[3].imm << 21;
10317 }
10318
10319 static void
10320 do_iwmmxt_wmov (void)
10321 {
10322 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
10323 inst.instruction |= inst.operands[0].reg << 12;
10324 inst.instruction |= inst.operands[1].reg << 16;
10325 inst.instruction |= inst.operands[1].reg;
10326 }
10327
10328 static void
10329 do_iwmmxt_wldstbh (void)
10330 {
10331 int reloc;
10332 inst.instruction |= inst.operands[0].reg << 12;
10333 if (thumb_mode)
10334 reloc = BFD_RELOC_ARM_T32_CP_OFF_IMM_S2;
10335 else
10336 reloc = BFD_RELOC_ARM_CP_OFF_IMM_S2;
10337 encode_arm_cp_address (1, TRUE, FALSE, reloc);
10338 }
10339
10340 static void
10341 do_iwmmxt_wldstw (void)
10342 {
10343 /* RIWR_RIWC clears .isreg for a control register. */
10344 if (!inst.operands[0].isreg)
10345 {
10346 constraint (inst.cond != COND_ALWAYS, BAD_COND);
10347 inst.instruction |= 0xf0000000;
10348 }
10349
10350 inst.instruction |= inst.operands[0].reg << 12;
10351 encode_arm_cp_address (1, TRUE, TRUE, 0);
10352 }
10353
10354 static void
10355 do_iwmmxt_wldstd (void)
10356 {
10357 inst.instruction |= inst.operands[0].reg << 12;
10358 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2)
10359 && inst.operands[1].immisreg)
10360 {
10361 inst.instruction &= ~0x1a000ff;
10362 inst.instruction |= (0xfU << 28);
10363 if (inst.operands[1].preind)
10364 inst.instruction |= PRE_INDEX;
10365 if (!inst.operands[1].negative)
10366 inst.instruction |= INDEX_UP;
10367 if (inst.operands[1].writeback)
10368 inst.instruction |= WRITE_BACK;
10369 inst.instruction |= inst.operands[1].reg << 16;
10370 inst.instruction |= inst.relocs[0].exp.X_add_number << 4;
10371 inst.instruction |= inst.operands[1].imm;
10372 }
10373 else
10374 encode_arm_cp_address (1, TRUE, FALSE, 0);
10375 }
10376
10377 static void
10378 do_iwmmxt_wshufh (void)
10379 {
10380 inst.instruction |= inst.operands[0].reg << 12;
10381 inst.instruction |= inst.operands[1].reg << 16;
10382 inst.instruction |= ((inst.operands[2].imm & 0xf0) << 16);
10383 inst.instruction |= (inst.operands[2].imm & 0x0f);
10384 }
10385
10386 static void
10387 do_iwmmxt_wzero (void)
10388 {
10389 /* WZERO reg is an alias for WANDN reg, reg, reg. */
10390 inst.instruction |= inst.operands[0].reg;
10391 inst.instruction |= inst.operands[0].reg << 12;
10392 inst.instruction |= inst.operands[0].reg << 16;
10393 }
10394
10395 static void
10396 do_iwmmxt_wrwrwr_or_imm5 (void)
10397 {
10398 if (inst.operands[2].isreg)
10399 do_rd_rn_rm ();
10400 else {
10401 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2),
10402 _("immediate operand requires iWMMXt2"));
10403 do_rd_rn ();
10404 if (inst.operands[2].imm == 0)
10405 {
10406 switch ((inst.instruction >> 20) & 0xf)
10407 {
10408 case 4:
10409 case 5:
10410 case 6:
10411 case 7:
10412 /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16. */
10413 inst.operands[2].imm = 16;
10414 inst.instruction = (inst.instruction & 0xff0fffff) | (0x7 << 20);
10415 break;
10416 case 8:
10417 case 9:
10418 case 10:
10419 case 11:
10420 /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32. */
10421 inst.operands[2].imm = 32;
10422 inst.instruction = (inst.instruction & 0xff0fffff) | (0xb << 20);
10423 break;
10424 case 12:
10425 case 13:
10426 case 14:
10427 case 15:
10428 {
10429 /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn. */
10430 unsigned long wrn;
10431 wrn = (inst.instruction >> 16) & 0xf;
10432 inst.instruction &= 0xff0fff0f;
10433 inst.instruction |= wrn;
10434 /* Bail out here; the instruction is now assembled. */
10435 return;
10436 }
10437 }
10438 }
10439 /* Map 32 -> 0, etc. */
10440 inst.operands[2].imm &= 0x1f;
10441 inst.instruction |= (0xfU << 28) | ((inst.operands[2].imm & 0x10) << 4) | (inst.operands[2].imm & 0xf);
10442 }
10443 }
10444 \f
10445 /* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
10446 operations first, then control, shift, and load/store. */
10447
10448 /* Insns like "foo X,Y,Z". */
10449
10450 static void
10451 do_mav_triple (void)
10452 {
10453 inst.instruction |= inst.operands[0].reg << 16;
10454 inst.instruction |= inst.operands[1].reg;
10455 inst.instruction |= inst.operands[2].reg << 12;
10456 }
10457
10458 /* Insns like "foo W,X,Y,Z".
10459 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
10460
10461 static void
10462 do_mav_quad (void)
10463 {
10464 inst.instruction |= inst.operands[0].reg << 5;
10465 inst.instruction |= inst.operands[1].reg << 12;
10466 inst.instruction |= inst.operands[2].reg << 16;
10467 inst.instruction |= inst.operands[3].reg;
10468 }
10469
10470 /* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
10471 static void
10472 do_mav_dspsc (void)
10473 {
10474 inst.instruction |= inst.operands[1].reg << 12;
10475 }
10476
10477 /* Maverick shift immediate instructions.
10478 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
10479 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
10480
10481 static void
10482 do_mav_shift (void)
10483 {
10484 int imm = inst.operands[2].imm;
10485
10486 inst.instruction |= inst.operands[0].reg << 12;
10487 inst.instruction |= inst.operands[1].reg << 16;
10488
10489 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
10490 Bits 5-7 of the insn should have bits 4-6 of the immediate.
10491 Bit 4 should be 0. */
10492 imm = (imm & 0xf) | ((imm & 0x70) << 1);
10493
10494 inst.instruction |= imm;
10495 }
10496 \f
10497 /* XScale instructions. Also sorted arithmetic before move. */
10498
10499 /* Xscale multiply-accumulate (argument parse)
10500 MIAcc acc0,Rm,Rs
10501 MIAPHcc acc0,Rm,Rs
10502 MIAxycc acc0,Rm,Rs. */
10503
10504 static void
10505 do_xsc_mia (void)
10506 {
10507 inst.instruction |= inst.operands[1].reg;
10508 inst.instruction |= inst.operands[2].reg << 12;
10509 }
10510
10511 /* Xscale move-accumulator-register (argument parse)
10512
10513 MARcc acc0,RdLo,RdHi. */
10514
10515 static void
10516 do_xsc_mar (void)
10517 {
10518 inst.instruction |= inst.operands[1].reg << 12;
10519 inst.instruction |= inst.operands[2].reg << 16;
10520 }
10521
10522 /* Xscale move-register-accumulator (argument parse)
10523
10524 MRAcc RdLo,RdHi,acc0. */
10525
10526 static void
10527 do_xsc_mra (void)
10528 {
10529 constraint (inst.operands[0].reg == inst.operands[1].reg, BAD_OVERLAP);
10530 inst.instruction |= inst.operands[0].reg << 12;
10531 inst.instruction |= inst.operands[1].reg << 16;
10532 }
10533 \f
10534 /* Encoding functions relevant only to Thumb. */
10535
10536 /* inst.operands[i] is a shifted-register operand; encode
10537 it into inst.instruction in the format used by Thumb32. */
10538
10539 static void
10540 encode_thumb32_shifted_operand (int i)
10541 {
10542 unsigned int value = inst.relocs[0].exp.X_add_number;
10543 unsigned int shift = inst.operands[i].shift_kind;
10544
10545 constraint (inst.operands[i].immisreg,
10546 _("shift by register not allowed in thumb mode"));
10547 inst.instruction |= inst.operands[i].reg;
10548 if (shift == SHIFT_RRX)
10549 inst.instruction |= SHIFT_ROR << 4;
10550 else
10551 {
10552 constraint (inst.relocs[0].exp.X_op != O_constant,
10553 _("expression too complex"));
10554
10555 constraint (value > 32
10556 || (value == 32 && (shift == SHIFT_LSL
10557 || shift == SHIFT_ROR)),
10558 _("shift expression is too large"));
10559
10560 if (value == 0)
10561 shift = SHIFT_LSL;
10562 else if (value == 32)
10563 value = 0;
10564
10565 inst.instruction |= shift << 4;
10566 inst.instruction |= (value & 0x1c) << 10;
10567 inst.instruction |= (value & 0x03) << 6;
10568 }
10569 }
10570
10571
10572 /* inst.operands[i] was set up by parse_address. Encode it into a
10573 Thumb32 format load or store instruction. Reject forms that cannot
10574 be used with such instructions. If is_t is true, reject forms that
10575 cannot be used with a T instruction; if is_d is true, reject forms
10576 that cannot be used with a D instruction. If it is a store insn,
10577 reject PC in Rn. */
10578
10579 static void
10580 encode_thumb32_addr_mode (int i, bfd_boolean is_t, bfd_boolean is_d)
10581 {
10582 const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
10583
10584 constraint (!inst.operands[i].isreg,
10585 _("Instruction does not support =N addresses"));
10586
10587 inst.instruction |= inst.operands[i].reg << 16;
10588 if (inst.operands[i].immisreg)
10589 {
10590 constraint (is_pc, BAD_PC_ADDRESSING);
10591 constraint (is_t || is_d, _("cannot use register index with this instruction"));
10592 constraint (inst.operands[i].negative,
10593 _("Thumb does not support negative register indexing"));
10594 constraint (inst.operands[i].postind,
10595 _("Thumb does not support register post-indexing"));
10596 constraint (inst.operands[i].writeback,
10597 _("Thumb does not support register indexing with writeback"));
10598 constraint (inst.operands[i].shifted && inst.operands[i].shift_kind != SHIFT_LSL,
10599 _("Thumb supports only LSL in shifted register indexing"));
10600
10601 inst.instruction |= inst.operands[i].imm;
10602 if (inst.operands[i].shifted)
10603 {
10604 constraint (inst.relocs[0].exp.X_op != O_constant,
10605 _("expression too complex"));
10606 constraint (inst.relocs[0].exp.X_add_number < 0
10607 || inst.relocs[0].exp.X_add_number > 3,
10608 _("shift out of range"));
10609 inst.instruction |= inst.relocs[0].exp.X_add_number << 4;
10610 }
10611 inst.relocs[0].type = BFD_RELOC_UNUSED;
10612 }
10613 else if (inst.operands[i].preind)
10614 {
10615 constraint (is_pc && inst.operands[i].writeback, BAD_PC_WRITEBACK);
10616 constraint (is_t && inst.operands[i].writeback,
10617 _("cannot use writeback with this instruction"));
10618 constraint (is_pc && ((inst.instruction & THUMB2_LOAD_BIT) == 0),
10619 BAD_PC_ADDRESSING);
10620
10621 if (is_d)
10622 {
10623 inst.instruction |= 0x01000000;
10624 if (inst.operands[i].writeback)
10625 inst.instruction |= 0x00200000;
10626 }
10627 else
10628 {
10629 inst.instruction |= 0x00000c00;
10630 if (inst.operands[i].writeback)
10631 inst.instruction |= 0x00000100;
10632 }
10633 inst.relocs[0].type = BFD_RELOC_ARM_T32_OFFSET_IMM;
10634 }
10635 else if (inst.operands[i].postind)
10636 {
10637 gas_assert (inst.operands[i].writeback);
10638 constraint (is_pc, _("cannot use post-indexing with PC-relative addressing"));
10639 constraint (is_t, _("cannot use post-indexing with this instruction"));
10640
10641 if (is_d)
10642 inst.instruction |= 0x00200000;
10643 else
10644 inst.instruction |= 0x00000900;
10645 inst.relocs[0].type = BFD_RELOC_ARM_T32_OFFSET_IMM;
10646 }
10647 else /* unindexed - only for coprocessor */
10648 inst.error = _("instruction does not accept unindexed addressing");
10649 }
10650
10651 /* Table of Thumb instructions which exist in both 16- and 32-bit
10652 encodings (the latter only in post-V6T2 cores). The index is the
10653 value used in the insns table below. When there is more than one
10654 possible 16-bit encoding for the instruction, this table always
10655 holds variant (1).
10656 Also contains several pseudo-instructions used during relaxation. */
10657 #define T16_32_TAB \
10658 X(_adc, 4140, eb400000), \
10659 X(_adcs, 4140, eb500000), \
10660 X(_add, 1c00, eb000000), \
10661 X(_adds, 1c00, eb100000), \
10662 X(_addi, 0000, f1000000), \
10663 X(_addis, 0000, f1100000), \
10664 X(_add_pc,000f, f20f0000), \
10665 X(_add_sp,000d, f10d0000), \
10666 X(_adr, 000f, f20f0000), \
10667 X(_and, 4000, ea000000), \
10668 X(_ands, 4000, ea100000), \
10669 X(_asr, 1000, fa40f000), \
10670 X(_asrs, 1000, fa50f000), \
10671 X(_b, e000, f000b000), \
10672 X(_bcond, d000, f0008000), \
10673 X(_bf, 0000, f040e001), \
10674 X(_bfcsel,0000, f000e001), \
10675 X(_bfx, 0000, f060e001), \
10676 X(_bfl, 0000, f000c001), \
10677 X(_bflx, 0000, f070e001), \
10678 X(_bic, 4380, ea200000), \
10679 X(_bics, 4380, ea300000), \
10680 X(_cmn, 42c0, eb100f00), \
10681 X(_cmp, 2800, ebb00f00), \
10682 X(_cpsie, b660, f3af8400), \
10683 X(_cpsid, b670, f3af8600), \
10684 X(_cpy, 4600, ea4f0000), \
10685 X(_dec_sp,80dd, f1ad0d00), \
10686 X(_dls, 0000, f040e001), \
10687 X(_eor, 4040, ea800000), \
10688 X(_eors, 4040, ea900000), \
10689 X(_inc_sp,00dd, f10d0d00), \
10690 X(_ldmia, c800, e8900000), \
10691 X(_ldr, 6800, f8500000), \
10692 X(_ldrb, 7800, f8100000), \
10693 X(_ldrh, 8800, f8300000), \
10694 X(_ldrsb, 5600, f9100000), \
10695 X(_ldrsh, 5e00, f9300000), \
10696 X(_ldr_pc,4800, f85f0000), \
10697 X(_ldr_pc2,4800, f85f0000), \
10698 X(_ldr_sp,9800, f85d0000), \
10699 X(_le, 0000, f00fc001), \
10700 X(_lsl, 0000, fa00f000), \
10701 X(_lsls, 0000, fa10f000), \
10702 X(_lsr, 0800, fa20f000), \
10703 X(_lsrs, 0800, fa30f000), \
10704 X(_mov, 2000, ea4f0000), \
10705 X(_movs, 2000, ea5f0000), \
10706 X(_mul, 4340, fb00f000), \
10707 X(_muls, 4340, ffffffff), /* no 32b muls */ \
10708 X(_mvn, 43c0, ea6f0000), \
10709 X(_mvns, 43c0, ea7f0000), \
10710 X(_neg, 4240, f1c00000), /* rsb #0 */ \
10711 X(_negs, 4240, f1d00000), /* rsbs #0 */ \
10712 X(_orr, 4300, ea400000), \
10713 X(_orrs, 4300, ea500000), \
10714 X(_pop, bc00, e8bd0000), /* ldmia sp!,... */ \
10715 X(_push, b400, e92d0000), /* stmdb sp!,... */ \
10716 X(_rev, ba00, fa90f080), \
10717 X(_rev16, ba40, fa90f090), \
10718 X(_revsh, bac0, fa90f0b0), \
10719 X(_ror, 41c0, fa60f000), \
10720 X(_rors, 41c0, fa70f000), \
10721 X(_sbc, 4180, eb600000), \
10722 X(_sbcs, 4180, eb700000), \
10723 X(_stmia, c000, e8800000), \
10724 X(_str, 6000, f8400000), \
10725 X(_strb, 7000, f8000000), \
10726 X(_strh, 8000, f8200000), \
10727 X(_str_sp,9000, f84d0000), \
10728 X(_sub, 1e00, eba00000), \
10729 X(_subs, 1e00, ebb00000), \
10730 X(_subi, 8000, f1a00000), \
10731 X(_subis, 8000, f1b00000), \
10732 X(_sxtb, b240, fa4ff080), \
10733 X(_sxth, b200, fa0ff080), \
10734 X(_tst, 4200, ea100f00), \
10735 X(_uxtb, b2c0, fa5ff080), \
10736 X(_uxth, b280, fa1ff080), \
10737 X(_nop, bf00, f3af8000), \
10738 X(_yield, bf10, f3af8001), \
10739 X(_wfe, bf20, f3af8002), \
10740 X(_wfi, bf30, f3af8003), \
10741 X(_wls, 0000, f040c001), \
10742 X(_sev, bf40, f3af8004), \
10743 X(_sevl, bf50, f3af8005), \
10744 X(_udf, de00, f7f0a000)
10745
10746 /* To catch errors in encoding functions, the codes are all offset by
10747 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
10748 as 16-bit instructions. */
10749 #define X(a,b,c) T_MNEM##a
10750 enum t16_32_codes { T16_32_OFFSET = 0xF7FF, T16_32_TAB };
10751 #undef X
10752
10753 #define X(a,b,c) 0x##b
10754 static const unsigned short thumb_op16[] = { T16_32_TAB };
10755 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
10756 #undef X
10757
10758 #define X(a,b,c) 0x##c
10759 static const unsigned int thumb_op32[] = { T16_32_TAB };
10760 #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
10761 #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
10762 #undef X
10763 #undef T16_32_TAB
10764
10765 /* Thumb instruction encoders, in alphabetical order. */
10766
10767 /* ADDW or SUBW. */
10768
10769 static void
10770 do_t_add_sub_w (void)
10771 {
10772 int Rd, Rn;
10773
10774 Rd = inst.operands[0].reg;
10775 Rn = inst.operands[1].reg;
10776
10777 /* If Rn is REG_PC, this is ADR; if Rn is REG_SP, then this
10778 is the SP-{plus,minus}-immediate form of the instruction. */
10779 if (Rn == REG_SP)
10780 constraint (Rd == REG_PC, BAD_PC);
10781 else
10782 reject_bad_reg (Rd);
10783
10784 inst.instruction |= (Rn << 16) | (Rd << 8);
10785 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMM12;
10786 }
10787
10788 /* Parse an add or subtract instruction. We get here with inst.instruction
10789 equaling any of THUMB_OPCODE_add, adds, sub, or subs. */
10790
10791 static void
10792 do_t_add_sub (void)
10793 {
10794 int Rd, Rs, Rn;
10795
10796 Rd = inst.operands[0].reg;
10797 Rs = (inst.operands[1].present
10798 ? inst.operands[1].reg /* Rd, Rs, foo */
10799 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
10800
10801 if (Rd == REG_PC)
10802 set_it_insn_type_last ();
10803
10804 if (unified_syntax)
10805 {
10806 bfd_boolean flags;
10807 bfd_boolean narrow;
10808 int opcode;
10809
10810 flags = (inst.instruction == T_MNEM_adds
10811 || inst.instruction == T_MNEM_subs);
10812 if (flags)
10813 narrow = !in_it_block ();
10814 else
10815 narrow = in_it_block ();
10816 if (!inst.operands[2].isreg)
10817 {
10818 int add;
10819
10820 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
10821 constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
10822
10823 add = (inst.instruction == T_MNEM_add
10824 || inst.instruction == T_MNEM_adds);
10825 opcode = 0;
10826 if (inst.size_req != 4)
10827 {
10828 /* Attempt to use a narrow opcode, with relaxation if
10829 appropriate. */
10830 if (Rd == REG_SP && Rs == REG_SP && !flags)
10831 opcode = add ? T_MNEM_inc_sp : T_MNEM_dec_sp;
10832 else if (Rd <= 7 && Rs == REG_SP && add && !flags)
10833 opcode = T_MNEM_add_sp;
10834 else if (Rd <= 7 && Rs == REG_PC && add && !flags)
10835 opcode = T_MNEM_add_pc;
10836 else if (Rd <= 7 && Rs <= 7 && narrow)
10837 {
10838 if (flags)
10839 opcode = add ? T_MNEM_addis : T_MNEM_subis;
10840 else
10841 opcode = add ? T_MNEM_addi : T_MNEM_subi;
10842 }
10843 if (opcode)
10844 {
10845 inst.instruction = THUMB_OP16(opcode);
10846 inst.instruction |= (Rd << 4) | Rs;
10847 if (inst.relocs[0].type < BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
10848 || (inst.relocs[0].type
10849 > BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC))
10850 {
10851 if (inst.size_req == 2)
10852 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_ADD;
10853 else
10854 inst.relax = opcode;
10855 }
10856 }
10857 else
10858 constraint (inst.size_req == 2, BAD_HIREG);
10859 }
10860 if (inst.size_req == 4
10861 || (inst.size_req != 2 && !opcode))
10862 {
10863 constraint ((inst.relocs[0].type
10864 >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC)
10865 && (inst.relocs[0].type
10866 <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC) ,
10867 THUMB1_RELOC_ONLY);
10868 if (Rd == REG_PC)
10869 {
10870 constraint (add, BAD_PC);
10871 constraint (Rs != REG_LR || inst.instruction != T_MNEM_subs,
10872 _("only SUBS PC, LR, #const allowed"));
10873 constraint (inst.relocs[0].exp.X_op != O_constant,
10874 _("expression too complex"));
10875 constraint (inst.relocs[0].exp.X_add_number < 0
10876 || inst.relocs[0].exp.X_add_number > 0xff,
10877 _("immediate value out of range"));
10878 inst.instruction = T2_SUBS_PC_LR
10879 | inst.relocs[0].exp.X_add_number;
10880 inst.relocs[0].type = BFD_RELOC_UNUSED;
10881 return;
10882 }
10883 else if (Rs == REG_PC)
10884 {
10885 /* Always use addw/subw. */
10886 inst.instruction = add ? 0xf20f0000 : 0xf2af0000;
10887 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMM12;
10888 }
10889 else
10890 {
10891 inst.instruction = THUMB_OP32 (inst.instruction);
10892 inst.instruction = (inst.instruction & 0xe1ffffff)
10893 | 0x10000000;
10894 if (flags)
10895 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
10896 else
10897 inst.relocs[0].type = BFD_RELOC_ARM_T32_ADD_IMM;
10898 }
10899 inst.instruction |= Rd << 8;
10900 inst.instruction |= Rs << 16;
10901 }
10902 }
10903 else
10904 {
10905 unsigned int value = inst.relocs[0].exp.X_add_number;
10906 unsigned int shift = inst.operands[2].shift_kind;
10907
10908 Rn = inst.operands[2].reg;
10909 /* See if we can do this with a 16-bit instruction. */
10910 if (!inst.operands[2].shifted && inst.size_req != 4)
10911 {
10912 if (Rd > 7 || Rs > 7 || Rn > 7)
10913 narrow = FALSE;
10914
10915 if (narrow)
10916 {
10917 inst.instruction = ((inst.instruction == T_MNEM_adds
10918 || inst.instruction == T_MNEM_add)
10919 ? T_OPCODE_ADD_R3
10920 : T_OPCODE_SUB_R3);
10921 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
10922 return;
10923 }
10924
10925 if (inst.instruction == T_MNEM_add && (Rd == Rs || Rd == Rn))
10926 {
10927 /* Thumb-1 cores (except v6-M) require at least one high
10928 register in a narrow non flag setting add. */
10929 if (Rd > 7 || Rn > 7
10930 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2)
10931 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_msr))
10932 {
10933 if (Rd == Rn)
10934 {
10935 Rn = Rs;
10936 Rs = Rd;
10937 }
10938 inst.instruction = T_OPCODE_ADD_HI;
10939 inst.instruction |= (Rd & 8) << 4;
10940 inst.instruction |= (Rd & 7);
10941 inst.instruction |= Rn << 3;
10942 return;
10943 }
10944 }
10945 }
10946
10947 constraint (Rd == REG_PC, BAD_PC);
10948 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
10949 constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
10950 constraint (Rs == REG_PC, BAD_PC);
10951 reject_bad_reg (Rn);
10952
10953 /* If we get here, it can't be done in 16 bits. */
10954 constraint (inst.operands[2].shifted && inst.operands[2].immisreg,
10955 _("shift must be constant"));
10956 inst.instruction = THUMB_OP32 (inst.instruction);
10957 inst.instruction |= Rd << 8;
10958 inst.instruction |= Rs << 16;
10959 constraint (Rd == REG_SP && Rs == REG_SP && value > 3,
10960 _("shift value over 3 not allowed in thumb mode"));
10961 constraint (Rd == REG_SP && Rs == REG_SP && shift != SHIFT_LSL,
10962 _("only LSL shift allowed in thumb mode"));
10963 encode_thumb32_shifted_operand (2);
10964 }
10965 }
10966 else
10967 {
10968 constraint (inst.instruction == T_MNEM_adds
10969 || inst.instruction == T_MNEM_subs,
10970 BAD_THUMB32);
10971
10972 if (!inst.operands[2].isreg) /* Rd, Rs, #imm */
10973 {
10974 constraint ((Rd > 7 && (Rd != REG_SP || Rs != REG_SP))
10975 || (Rs > 7 && Rs != REG_SP && Rs != REG_PC),
10976 BAD_HIREG);
10977
10978 inst.instruction = (inst.instruction == T_MNEM_add
10979 ? 0x0000 : 0x8000);
10980 inst.instruction |= (Rd << 4) | Rs;
10981 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_ADD;
10982 return;
10983 }
10984
10985 Rn = inst.operands[2].reg;
10986 constraint (inst.operands[2].shifted, _("unshifted register required"));
10987
10988 /* We now have Rd, Rs, and Rn set to registers. */
10989 if (Rd > 7 || Rs > 7 || Rn > 7)
10990 {
10991 /* Can't do this for SUB. */
10992 constraint (inst.instruction == T_MNEM_sub, BAD_HIREG);
10993 inst.instruction = T_OPCODE_ADD_HI;
10994 inst.instruction |= (Rd & 8) << 4;
10995 inst.instruction |= (Rd & 7);
10996 if (Rs == Rd)
10997 inst.instruction |= Rn << 3;
10998 else if (Rn == Rd)
10999 inst.instruction |= Rs << 3;
11000 else
11001 constraint (1, _("dest must overlap one source register"));
11002 }
11003 else
11004 {
11005 inst.instruction = (inst.instruction == T_MNEM_add
11006 ? T_OPCODE_ADD_R3 : T_OPCODE_SUB_R3);
11007 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
11008 }
11009 }
11010 }
11011
11012 static void
11013 do_t_adr (void)
11014 {
11015 unsigned Rd;
11016
11017 Rd = inst.operands[0].reg;
11018 reject_bad_reg (Rd);
11019
11020 if (unified_syntax && inst.size_req == 0 && Rd <= 7)
11021 {
11022 /* Defer to section relaxation. */
11023 inst.relax = inst.instruction;
11024 inst.instruction = THUMB_OP16 (inst.instruction);
11025 inst.instruction |= Rd << 4;
11026 }
11027 else if (unified_syntax && inst.size_req != 2)
11028 {
11029 /* Generate a 32-bit opcode. */
11030 inst.instruction = THUMB_OP32 (inst.instruction);
11031 inst.instruction |= Rd << 8;
11032 inst.relocs[0].type = BFD_RELOC_ARM_T32_ADD_PC12;
11033 inst.relocs[0].pc_rel = 1;
11034 }
11035 else
11036 {
11037 /* Generate a 16-bit opcode. */
11038 inst.instruction = THUMB_OP16 (inst.instruction);
11039 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_ADD;
11040 inst.relocs[0].exp.X_add_number -= 4; /* PC relative adjust. */
11041 inst.relocs[0].pc_rel = 1;
11042 inst.instruction |= Rd << 4;
11043 }
11044
11045 if (inst.relocs[0].exp.X_op == O_symbol
11046 && inst.relocs[0].exp.X_add_symbol != NULL
11047 && S_IS_DEFINED (inst.relocs[0].exp.X_add_symbol)
11048 && THUMB_IS_FUNC (inst.relocs[0].exp.X_add_symbol))
11049 inst.relocs[0].exp.X_add_number += 1;
11050 }
11051
11052 /* Arithmetic instructions for which there is just one 16-bit
11053 instruction encoding, and it allows only two low registers.
11054 For maximal compatibility with ARM syntax, we allow three register
11055 operands even when Thumb-32 instructions are not available, as long
11056 as the first two are identical. For instance, both "sbc r0,r1" and
11057 "sbc r0,r0,r1" are allowed. */
11058 static void
11059 do_t_arit3 (void)
11060 {
11061 int Rd, Rs, Rn;
11062
11063 Rd = inst.operands[0].reg;
11064 Rs = (inst.operands[1].present
11065 ? inst.operands[1].reg /* Rd, Rs, foo */
11066 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
11067 Rn = inst.operands[2].reg;
11068
11069 reject_bad_reg (Rd);
11070 reject_bad_reg (Rs);
11071 if (inst.operands[2].isreg)
11072 reject_bad_reg (Rn);
11073
11074 if (unified_syntax)
11075 {
11076 if (!inst.operands[2].isreg)
11077 {
11078 /* For an immediate, we always generate a 32-bit opcode;
11079 section relaxation will shrink it later if possible. */
11080 inst.instruction = THUMB_OP32 (inst.instruction);
11081 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
11082 inst.instruction |= Rd << 8;
11083 inst.instruction |= Rs << 16;
11084 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
11085 }
11086 else
11087 {
11088 bfd_boolean narrow;
11089
11090 /* See if we can do this with a 16-bit instruction. */
11091 if (THUMB_SETS_FLAGS (inst.instruction))
11092 narrow = !in_it_block ();
11093 else
11094 narrow = in_it_block ();
11095
11096 if (Rd > 7 || Rn > 7 || Rs > 7)
11097 narrow = FALSE;
11098 if (inst.operands[2].shifted)
11099 narrow = FALSE;
11100 if (inst.size_req == 4)
11101 narrow = FALSE;
11102
11103 if (narrow
11104 && Rd == Rs)
11105 {
11106 inst.instruction = THUMB_OP16 (inst.instruction);
11107 inst.instruction |= Rd;
11108 inst.instruction |= Rn << 3;
11109 return;
11110 }
11111
11112 /* If we get here, it can't be done in 16 bits. */
11113 constraint (inst.operands[2].shifted
11114 && inst.operands[2].immisreg,
11115 _("shift must be constant"));
11116 inst.instruction = THUMB_OP32 (inst.instruction);
11117 inst.instruction |= Rd << 8;
11118 inst.instruction |= Rs << 16;
11119 encode_thumb32_shifted_operand (2);
11120 }
11121 }
11122 else
11123 {
11124 /* On its face this is a lie - the instruction does set the
11125 flags. However, the only supported mnemonic in this mode
11126 says it doesn't. */
11127 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
11128
11129 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
11130 _("unshifted register required"));
11131 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
11132 constraint (Rd != Rs,
11133 _("dest and source1 must be the same register"));
11134
11135 inst.instruction = THUMB_OP16 (inst.instruction);
11136 inst.instruction |= Rd;
11137 inst.instruction |= Rn << 3;
11138 }
11139 }
11140
11141 /* Similarly, but for instructions where the arithmetic operation is
11142 commutative, so we can allow either of them to be different from
11143 the destination operand in a 16-bit instruction. For instance, all
11144 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
11145 accepted. */
11146 static void
11147 do_t_arit3c (void)
11148 {
11149 int Rd, Rs, Rn;
11150
11151 Rd = inst.operands[0].reg;
11152 Rs = (inst.operands[1].present
11153 ? inst.operands[1].reg /* Rd, Rs, foo */
11154 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
11155 Rn = inst.operands[2].reg;
11156
11157 reject_bad_reg (Rd);
11158 reject_bad_reg (Rs);
11159 if (inst.operands[2].isreg)
11160 reject_bad_reg (Rn);
11161
11162 if (unified_syntax)
11163 {
11164 if (!inst.operands[2].isreg)
11165 {
11166 /* For an immediate, we always generate a 32-bit opcode;
11167 section relaxation will shrink it later if possible. */
11168 inst.instruction = THUMB_OP32 (inst.instruction);
11169 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
11170 inst.instruction |= Rd << 8;
11171 inst.instruction |= Rs << 16;
11172 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
11173 }
11174 else
11175 {
11176 bfd_boolean narrow;
11177
11178 /* See if we can do this with a 16-bit instruction. */
11179 if (THUMB_SETS_FLAGS (inst.instruction))
11180 narrow = !in_it_block ();
11181 else
11182 narrow = in_it_block ();
11183
11184 if (Rd > 7 || Rn > 7 || Rs > 7)
11185 narrow = FALSE;
11186 if (inst.operands[2].shifted)
11187 narrow = FALSE;
11188 if (inst.size_req == 4)
11189 narrow = FALSE;
11190
11191 if (narrow)
11192 {
11193 if (Rd == Rs)
11194 {
11195 inst.instruction = THUMB_OP16 (inst.instruction);
11196 inst.instruction |= Rd;
11197 inst.instruction |= Rn << 3;
11198 return;
11199 }
11200 if (Rd == Rn)
11201 {
11202 inst.instruction = THUMB_OP16 (inst.instruction);
11203 inst.instruction |= Rd;
11204 inst.instruction |= Rs << 3;
11205 return;
11206 }
11207 }
11208
11209 /* If we get here, it can't be done in 16 bits. */
11210 constraint (inst.operands[2].shifted
11211 && inst.operands[2].immisreg,
11212 _("shift must be constant"));
11213 inst.instruction = THUMB_OP32 (inst.instruction);
11214 inst.instruction |= Rd << 8;
11215 inst.instruction |= Rs << 16;
11216 encode_thumb32_shifted_operand (2);
11217 }
11218 }
11219 else
11220 {
11221 /* On its face this is a lie - the instruction does set the
11222 flags. However, the only supported mnemonic in this mode
11223 says it doesn't. */
11224 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
11225
11226 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
11227 _("unshifted register required"));
11228 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
11229
11230 inst.instruction = THUMB_OP16 (inst.instruction);
11231 inst.instruction |= Rd;
11232
11233 if (Rd == Rs)
11234 inst.instruction |= Rn << 3;
11235 else if (Rd == Rn)
11236 inst.instruction |= Rs << 3;
11237 else
11238 constraint (1, _("dest must overlap one source register"));
11239 }
11240 }
11241
11242 static void
11243 do_t_bfc (void)
11244 {
11245 unsigned Rd;
11246 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
11247 constraint (msb > 32, _("bit-field extends past end of register"));
11248 /* The instruction encoding stores the LSB and MSB,
11249 not the LSB and width. */
11250 Rd = inst.operands[0].reg;
11251 reject_bad_reg (Rd);
11252 inst.instruction |= Rd << 8;
11253 inst.instruction |= (inst.operands[1].imm & 0x1c) << 10;
11254 inst.instruction |= (inst.operands[1].imm & 0x03) << 6;
11255 inst.instruction |= msb - 1;
11256 }
11257
11258 static void
11259 do_t_bfi (void)
11260 {
11261 int Rd, Rn;
11262 unsigned int msb;
11263
11264 Rd = inst.operands[0].reg;
11265 reject_bad_reg (Rd);
11266
11267 /* #0 in second position is alternative syntax for bfc, which is
11268 the same instruction but with REG_PC in the Rm field. */
11269 if (!inst.operands[1].isreg)
11270 Rn = REG_PC;
11271 else
11272 {
11273 Rn = inst.operands[1].reg;
11274 reject_bad_reg (Rn);
11275 }
11276
11277 msb = inst.operands[2].imm + inst.operands[3].imm;
11278 constraint (msb > 32, _("bit-field extends past end of register"));
11279 /* The instruction encoding stores the LSB and MSB,
11280 not the LSB and width. */
11281 inst.instruction |= Rd << 8;
11282 inst.instruction |= Rn << 16;
11283 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
11284 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
11285 inst.instruction |= msb - 1;
11286 }
11287
11288 static void
11289 do_t_bfx (void)
11290 {
11291 unsigned Rd, Rn;
11292
11293 Rd = inst.operands[0].reg;
11294 Rn = inst.operands[1].reg;
11295
11296 reject_bad_reg (Rd);
11297 reject_bad_reg (Rn);
11298
11299 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
11300 _("bit-field extends past end of register"));
11301 inst.instruction |= Rd << 8;
11302 inst.instruction |= Rn << 16;
11303 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
11304 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
11305 inst.instruction |= inst.operands[3].imm - 1;
11306 }
11307
11308 /* ARM V5 Thumb BLX (argument parse)
11309 BLX <target_addr> which is BLX(1)
11310 BLX <Rm> which is BLX(2)
11311 Unfortunately, there are two different opcodes for this mnemonic.
11312 So, the insns[].value is not used, and the code here zaps values
11313 into inst.instruction.
11314
11315 ??? How to take advantage of the additional two bits of displacement
11316 available in Thumb32 mode? Need new relocation? */
11317
11318 static void
11319 do_t_blx (void)
11320 {
11321 set_it_insn_type_last ();
11322
11323 if (inst.operands[0].isreg)
11324 {
11325 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
11326 /* We have a register, so this is BLX(2). */
11327 inst.instruction |= inst.operands[0].reg << 3;
11328 }
11329 else
11330 {
11331 /* No register. This must be BLX(1). */
11332 inst.instruction = 0xf000e800;
11333 encode_branch (BFD_RELOC_THUMB_PCREL_BLX);
11334 }
11335 }
11336
11337 static void
11338 do_t_branch (void)
11339 {
11340 int opcode;
11341 int cond;
11342 bfd_reloc_code_real_type reloc;
11343
11344 cond = inst.cond;
11345 set_it_insn_type (IF_INSIDE_IT_LAST_INSN);
11346
11347 if (in_it_block ())
11348 {
11349 /* Conditional branches inside IT blocks are encoded as unconditional
11350 branches. */
11351 cond = COND_ALWAYS;
11352 }
11353 else
11354 cond = inst.cond;
11355
11356 if (cond != COND_ALWAYS)
11357 opcode = T_MNEM_bcond;
11358 else
11359 opcode = inst.instruction;
11360
11361 if (unified_syntax
11362 && (inst.size_req == 4
11363 || (inst.size_req != 2
11364 && (inst.operands[0].hasreloc
11365 || inst.relocs[0].exp.X_op == O_constant))))
11366 {
11367 inst.instruction = THUMB_OP32(opcode);
11368 if (cond == COND_ALWAYS)
11369 reloc = BFD_RELOC_THUMB_PCREL_BRANCH25;
11370 else
11371 {
11372 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2),
11373 _("selected architecture does not support "
11374 "wide conditional branch instruction"));
11375
11376 gas_assert (cond != 0xF);
11377 inst.instruction |= cond << 22;
11378 reloc = BFD_RELOC_THUMB_PCREL_BRANCH20;
11379 }
11380 }
11381 else
11382 {
11383 inst.instruction = THUMB_OP16(opcode);
11384 if (cond == COND_ALWAYS)
11385 reloc = BFD_RELOC_THUMB_PCREL_BRANCH12;
11386 else
11387 {
11388 inst.instruction |= cond << 8;
11389 reloc = BFD_RELOC_THUMB_PCREL_BRANCH9;
11390 }
11391 /* Allow section relaxation. */
11392 if (unified_syntax && inst.size_req != 2)
11393 inst.relax = opcode;
11394 }
11395 inst.relocs[0].type = reloc;
11396 inst.relocs[0].pc_rel = 1;
11397 }
11398
11399 /* Actually do the work for Thumb state bkpt and hlt. The only difference
11400 between the two is the maximum immediate allowed - which is passed in
11401 RANGE. */
11402 static void
11403 do_t_bkpt_hlt1 (int range)
11404 {
11405 constraint (inst.cond != COND_ALWAYS,
11406 _("instruction is always unconditional"));
11407 if (inst.operands[0].present)
11408 {
11409 constraint (inst.operands[0].imm > range,
11410 _("immediate value out of range"));
11411 inst.instruction |= inst.operands[0].imm;
11412 }
11413
11414 set_it_insn_type (NEUTRAL_IT_INSN);
11415 }
11416
11417 static void
11418 do_t_hlt (void)
11419 {
11420 do_t_bkpt_hlt1 (63);
11421 }
11422
11423 static void
11424 do_t_bkpt (void)
11425 {
11426 do_t_bkpt_hlt1 (255);
11427 }
11428
11429 static void
11430 do_t_branch23 (void)
11431 {
11432 set_it_insn_type_last ();
11433 encode_branch (BFD_RELOC_THUMB_PCREL_BRANCH23);
11434
11435 /* md_apply_fix blows up with 'bl foo(PLT)' where foo is defined in
11436 this file. We used to simply ignore the PLT reloc type here --
11437 the branch encoding is now needed to deal with TLSCALL relocs.
11438 So if we see a PLT reloc now, put it back to how it used to be to
11439 keep the preexisting behaviour. */
11440 if (inst.relocs[0].type == BFD_RELOC_ARM_PLT32)
11441 inst.relocs[0].type = BFD_RELOC_THUMB_PCREL_BRANCH23;
11442
11443 #if defined(OBJ_COFF)
11444 /* If the destination of the branch is a defined symbol which does not have
11445 the THUMB_FUNC attribute, then we must be calling a function which has
11446 the (interfacearm) attribute. We look for the Thumb entry point to that
11447 function and change the branch to refer to that function instead. */
11448 if ( inst.relocs[0].exp.X_op == O_symbol
11449 && inst.relocs[0].exp.X_add_symbol != NULL
11450 && S_IS_DEFINED (inst.relocs[0].exp.X_add_symbol)
11451 && ! THUMB_IS_FUNC (inst.relocs[0].exp.X_add_symbol))
11452 inst.relocs[0].exp.X_add_symbol
11453 = find_real_start (inst.relocs[0].exp.X_add_symbol);
11454 #endif
11455 }
11456
11457 static void
11458 do_t_bx (void)
11459 {
11460 set_it_insn_type_last ();
11461 inst.instruction |= inst.operands[0].reg << 3;
11462 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
11463 should cause the alignment to be checked once it is known. This is
11464 because BX PC only works if the instruction is word aligned. */
11465 }
11466
11467 static void
11468 do_t_bxj (void)
11469 {
11470 int Rm;
11471
11472 set_it_insn_type_last ();
11473 Rm = inst.operands[0].reg;
11474 reject_bad_reg (Rm);
11475 inst.instruction |= Rm << 16;
11476 }
11477
11478 static void
11479 do_t_clz (void)
11480 {
11481 unsigned Rd;
11482 unsigned Rm;
11483
11484 Rd = inst.operands[0].reg;
11485 Rm = inst.operands[1].reg;
11486
11487 reject_bad_reg (Rd);
11488 reject_bad_reg (Rm);
11489
11490 inst.instruction |= Rd << 8;
11491 inst.instruction |= Rm << 16;
11492 inst.instruction |= Rm;
11493 }
11494
11495 static void
11496 do_t_csdb (void)
11497 {
11498 set_it_insn_type (OUTSIDE_IT_INSN);
11499 }
11500
11501 static void
11502 do_t_cps (void)
11503 {
11504 set_it_insn_type (OUTSIDE_IT_INSN);
11505 inst.instruction |= inst.operands[0].imm;
11506 }
11507
11508 static void
11509 do_t_cpsi (void)
11510 {
11511 set_it_insn_type (OUTSIDE_IT_INSN);
11512 if (unified_syntax
11513 && (inst.operands[1].present || inst.size_req == 4)
11514 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6_notm))
11515 {
11516 unsigned int imod = (inst.instruction & 0x0030) >> 4;
11517 inst.instruction = 0xf3af8000;
11518 inst.instruction |= imod << 9;
11519 inst.instruction |= inst.operands[0].imm << 5;
11520 if (inst.operands[1].present)
11521 inst.instruction |= 0x100 | inst.operands[1].imm;
11522 }
11523 else
11524 {
11525 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1)
11526 && (inst.operands[0].imm & 4),
11527 _("selected processor does not support 'A' form "
11528 "of this instruction"));
11529 constraint (inst.operands[1].present || inst.size_req == 4,
11530 _("Thumb does not support the 2-argument "
11531 "form of this instruction"));
11532 inst.instruction |= inst.operands[0].imm;
11533 }
11534 }
11535
11536 /* THUMB CPY instruction (argument parse). */
11537
11538 static void
11539 do_t_cpy (void)
11540 {
11541 if (inst.size_req == 4)
11542 {
11543 inst.instruction = THUMB_OP32 (T_MNEM_mov);
11544 inst.instruction |= inst.operands[0].reg << 8;
11545 inst.instruction |= inst.operands[1].reg;
11546 }
11547 else
11548 {
11549 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
11550 inst.instruction |= (inst.operands[0].reg & 0x7);
11551 inst.instruction |= inst.operands[1].reg << 3;
11552 }
11553 }
11554
11555 static void
11556 do_t_cbz (void)
11557 {
11558 set_it_insn_type (OUTSIDE_IT_INSN);
11559 constraint (inst.operands[0].reg > 7, BAD_HIREG);
11560 inst.instruction |= inst.operands[0].reg;
11561 inst.relocs[0].pc_rel = 1;
11562 inst.relocs[0].type = BFD_RELOC_THUMB_PCREL_BRANCH7;
11563 }
11564
11565 static void
11566 do_t_dbg (void)
11567 {
11568 inst.instruction |= inst.operands[0].imm;
11569 }
11570
11571 static void
11572 do_t_div (void)
11573 {
11574 unsigned Rd, Rn, Rm;
11575
11576 Rd = inst.operands[0].reg;
11577 Rn = (inst.operands[1].present
11578 ? inst.operands[1].reg : Rd);
11579 Rm = inst.operands[2].reg;
11580
11581 reject_bad_reg (Rd);
11582 reject_bad_reg (Rn);
11583 reject_bad_reg (Rm);
11584
11585 inst.instruction |= Rd << 8;
11586 inst.instruction |= Rn << 16;
11587 inst.instruction |= Rm;
11588 }
11589
11590 static void
11591 do_t_hint (void)
11592 {
11593 if (unified_syntax && inst.size_req == 4)
11594 inst.instruction = THUMB_OP32 (inst.instruction);
11595 else
11596 inst.instruction = THUMB_OP16 (inst.instruction);
11597 }
11598
11599 static void
11600 do_t_it (void)
11601 {
11602 unsigned int cond = inst.operands[0].imm;
11603
11604 set_it_insn_type (IT_INSN);
11605 now_it.mask = (inst.instruction & 0xf) | 0x10;
11606 now_it.cc = cond;
11607 now_it.warn_deprecated = FALSE;
11608
11609 /* If the condition is a negative condition, invert the mask. */
11610 if ((cond & 0x1) == 0x0)
11611 {
11612 unsigned int mask = inst.instruction & 0x000f;
11613
11614 if ((mask & 0x7) == 0)
11615 {
11616 /* No conversion needed. */
11617 now_it.block_length = 1;
11618 }
11619 else if ((mask & 0x3) == 0)
11620 {
11621 mask ^= 0x8;
11622 now_it.block_length = 2;
11623 }
11624 else if ((mask & 0x1) == 0)
11625 {
11626 mask ^= 0xC;
11627 now_it.block_length = 3;
11628 }
11629 else
11630 {
11631 mask ^= 0xE;
11632 now_it.block_length = 4;
11633 }
11634
11635 inst.instruction &= 0xfff0;
11636 inst.instruction |= mask;
11637 }
11638
11639 inst.instruction |= cond << 4;
11640 }
11641
11642 /* Helper function used for both push/pop and ldm/stm. */
11643 static void
11644 encode_thumb2_multi (bfd_boolean do_io, int base, unsigned mask,
11645 bfd_boolean writeback)
11646 {
11647 bfd_boolean load, store;
11648
11649 gas_assert (base != -1 || !do_io);
11650 load = do_io && ((inst.instruction & (1 << 20)) != 0);
11651 store = do_io && !load;
11652
11653 if (mask & (1 << 13))
11654 inst.error = _("SP not allowed in register list");
11655
11656 if (do_io && (mask & (1 << base)) != 0
11657 && writeback)
11658 inst.error = _("having the base register in the register list when "
11659 "using write back is UNPREDICTABLE");
11660
11661 if (load)
11662 {
11663 if (mask & (1 << 15))
11664 {
11665 if (mask & (1 << 14))
11666 inst.error = _("LR and PC should not both be in register list");
11667 else
11668 set_it_insn_type_last ();
11669 }
11670 }
11671 else if (store)
11672 {
11673 if (mask & (1 << 15))
11674 inst.error = _("PC not allowed in register list");
11675 }
11676
11677 if (do_io && ((mask & (mask - 1)) == 0))
11678 {
11679 /* Single register transfers implemented as str/ldr. */
11680 if (writeback)
11681 {
11682 if (inst.instruction & (1 << 23))
11683 inst.instruction = 0x00000b04; /* ia! -> [base], #4 */
11684 else
11685 inst.instruction = 0x00000d04; /* db! -> [base, #-4]! */
11686 }
11687 else
11688 {
11689 if (inst.instruction & (1 << 23))
11690 inst.instruction = 0x00800000; /* ia -> [base] */
11691 else
11692 inst.instruction = 0x00000c04; /* db -> [base, #-4] */
11693 }
11694
11695 inst.instruction |= 0xf8400000;
11696 if (load)
11697 inst.instruction |= 0x00100000;
11698
11699 mask = ffs (mask) - 1;
11700 mask <<= 12;
11701 }
11702 else if (writeback)
11703 inst.instruction |= WRITE_BACK;
11704
11705 inst.instruction |= mask;
11706 if (do_io)
11707 inst.instruction |= base << 16;
11708 }
11709
11710 static void
11711 do_t_ldmstm (void)
11712 {
11713 /* This really doesn't seem worth it. */
11714 constraint (inst.relocs[0].type != BFD_RELOC_UNUSED,
11715 _("expression too complex"));
11716 constraint (inst.operands[1].writeback,
11717 _("Thumb load/store multiple does not support {reglist}^"));
11718
11719 if (unified_syntax)
11720 {
11721 bfd_boolean narrow;
11722 unsigned mask;
11723
11724 narrow = FALSE;
11725 /* See if we can use a 16-bit instruction. */
11726 if (inst.instruction < 0xffff /* not ldmdb/stmdb */
11727 && inst.size_req != 4
11728 && !(inst.operands[1].imm & ~0xff))
11729 {
11730 mask = 1 << inst.operands[0].reg;
11731
11732 if (inst.operands[0].reg <= 7)
11733 {
11734 if (inst.instruction == T_MNEM_stmia
11735 ? inst.operands[0].writeback
11736 : (inst.operands[0].writeback
11737 == !(inst.operands[1].imm & mask)))
11738 {
11739 if (inst.instruction == T_MNEM_stmia
11740 && (inst.operands[1].imm & mask)
11741 && (inst.operands[1].imm & (mask - 1)))
11742 as_warn (_("value stored for r%d is UNKNOWN"),
11743 inst.operands[0].reg);
11744
11745 inst.instruction = THUMB_OP16 (inst.instruction);
11746 inst.instruction |= inst.operands[0].reg << 8;
11747 inst.instruction |= inst.operands[1].imm;
11748 narrow = TRUE;
11749 }
11750 else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
11751 {
11752 /* This means 1 register in reg list one of 3 situations:
11753 1. Instruction is stmia, but without writeback.
11754 2. lmdia without writeback, but with Rn not in
11755 reglist.
11756 3. ldmia with writeback, but with Rn in reglist.
11757 Case 3 is UNPREDICTABLE behaviour, so we handle
11758 case 1 and 2 which can be converted into a 16-bit
11759 str or ldr. The SP cases are handled below. */
11760 unsigned long opcode;
11761 /* First, record an error for Case 3. */
11762 if (inst.operands[1].imm & mask
11763 && inst.operands[0].writeback)
11764 inst.error =
11765 _("having the base register in the register list when "
11766 "using write back is UNPREDICTABLE");
11767
11768 opcode = (inst.instruction == T_MNEM_stmia ? T_MNEM_str
11769 : T_MNEM_ldr);
11770 inst.instruction = THUMB_OP16 (opcode);
11771 inst.instruction |= inst.operands[0].reg << 3;
11772 inst.instruction |= (ffs (inst.operands[1].imm)-1);
11773 narrow = TRUE;
11774 }
11775 }
11776 else if (inst.operands[0] .reg == REG_SP)
11777 {
11778 if (inst.operands[0].writeback)
11779 {
11780 inst.instruction =
11781 THUMB_OP16 (inst.instruction == T_MNEM_stmia
11782 ? T_MNEM_push : T_MNEM_pop);
11783 inst.instruction |= inst.operands[1].imm;
11784 narrow = TRUE;
11785 }
11786 else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
11787 {
11788 inst.instruction =
11789 THUMB_OP16 (inst.instruction == T_MNEM_stmia
11790 ? T_MNEM_str_sp : T_MNEM_ldr_sp);
11791 inst.instruction |= ((ffs (inst.operands[1].imm)-1) << 8);
11792 narrow = TRUE;
11793 }
11794 }
11795 }
11796
11797 if (!narrow)
11798 {
11799 if (inst.instruction < 0xffff)
11800 inst.instruction = THUMB_OP32 (inst.instruction);
11801
11802 encode_thumb2_multi (TRUE /* do_io */, inst.operands[0].reg,
11803 inst.operands[1].imm,
11804 inst.operands[0].writeback);
11805 }
11806 }
11807 else
11808 {
11809 constraint (inst.operands[0].reg > 7
11810 || (inst.operands[1].imm & ~0xff), BAD_HIREG);
11811 constraint (inst.instruction != T_MNEM_ldmia
11812 && inst.instruction != T_MNEM_stmia,
11813 _("Thumb-2 instruction only valid in unified syntax"));
11814 if (inst.instruction == T_MNEM_stmia)
11815 {
11816 if (!inst.operands[0].writeback)
11817 as_warn (_("this instruction will write back the base register"));
11818 if ((inst.operands[1].imm & (1 << inst.operands[0].reg))
11819 && (inst.operands[1].imm & ((1 << inst.operands[0].reg) - 1)))
11820 as_warn (_("value stored for r%d is UNKNOWN"),
11821 inst.operands[0].reg);
11822 }
11823 else
11824 {
11825 if (!inst.operands[0].writeback
11826 && !(inst.operands[1].imm & (1 << inst.operands[0].reg)))
11827 as_warn (_("this instruction will write back the base register"));
11828 else if (inst.operands[0].writeback
11829 && (inst.operands[1].imm & (1 << inst.operands[0].reg)))
11830 as_warn (_("this instruction will not write back the base register"));
11831 }
11832
11833 inst.instruction = THUMB_OP16 (inst.instruction);
11834 inst.instruction |= inst.operands[0].reg << 8;
11835 inst.instruction |= inst.operands[1].imm;
11836 }
11837 }
11838
11839 static void
11840 do_t_ldrex (void)
11841 {
11842 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
11843 || inst.operands[1].postind || inst.operands[1].writeback
11844 || inst.operands[1].immisreg || inst.operands[1].shifted
11845 || inst.operands[1].negative,
11846 BAD_ADDR_MODE);
11847
11848 constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
11849
11850 inst.instruction |= inst.operands[0].reg << 12;
11851 inst.instruction |= inst.operands[1].reg << 16;
11852 inst.relocs[0].type = BFD_RELOC_ARM_T32_OFFSET_U8;
11853 }
11854
11855 static void
11856 do_t_ldrexd (void)
11857 {
11858 if (!inst.operands[1].present)
11859 {
11860 constraint (inst.operands[0].reg == REG_LR,
11861 _("r14 not allowed as first register "
11862 "when second register is omitted"));
11863 inst.operands[1].reg = inst.operands[0].reg + 1;
11864 }
11865 constraint (inst.operands[0].reg == inst.operands[1].reg,
11866 BAD_OVERLAP);
11867
11868 inst.instruction |= inst.operands[0].reg << 12;
11869 inst.instruction |= inst.operands[1].reg << 8;
11870 inst.instruction |= inst.operands[2].reg << 16;
11871 }
11872
11873 static void
11874 do_t_ldst (void)
11875 {
11876 unsigned long opcode;
11877 int Rn;
11878
11879 if (inst.operands[0].isreg
11880 && !inst.operands[0].preind
11881 && inst.operands[0].reg == REG_PC)
11882 set_it_insn_type_last ();
11883
11884 opcode = inst.instruction;
11885 if (unified_syntax)
11886 {
11887 if (!inst.operands[1].isreg)
11888 {
11889 if (opcode <= 0xffff)
11890 inst.instruction = THUMB_OP32 (opcode);
11891 if (move_or_literal_pool (0, CONST_THUMB, /*mode_3=*/FALSE))
11892 return;
11893 }
11894 if (inst.operands[1].isreg
11895 && !inst.operands[1].writeback
11896 && !inst.operands[1].shifted && !inst.operands[1].postind
11897 && !inst.operands[1].negative && inst.operands[0].reg <= 7
11898 && opcode <= 0xffff
11899 && inst.size_req != 4)
11900 {
11901 /* Insn may have a 16-bit form. */
11902 Rn = inst.operands[1].reg;
11903 if (inst.operands[1].immisreg)
11904 {
11905 inst.instruction = THUMB_OP16 (opcode);
11906 /* [Rn, Rik] */
11907 if (Rn <= 7 && inst.operands[1].imm <= 7)
11908 goto op16;
11909 else if (opcode != T_MNEM_ldr && opcode != T_MNEM_str)
11910 reject_bad_reg (inst.operands[1].imm);
11911 }
11912 else if ((Rn <= 7 && opcode != T_MNEM_ldrsh
11913 && opcode != T_MNEM_ldrsb)
11914 || ((Rn == REG_PC || Rn == REG_SP) && opcode == T_MNEM_ldr)
11915 || (Rn == REG_SP && opcode == T_MNEM_str))
11916 {
11917 /* [Rn, #const] */
11918 if (Rn > 7)
11919 {
11920 if (Rn == REG_PC)
11921 {
11922 if (inst.relocs[0].pc_rel)
11923 opcode = T_MNEM_ldr_pc2;
11924 else
11925 opcode = T_MNEM_ldr_pc;
11926 }
11927 else
11928 {
11929 if (opcode == T_MNEM_ldr)
11930 opcode = T_MNEM_ldr_sp;
11931 else
11932 opcode = T_MNEM_str_sp;
11933 }
11934 inst.instruction = inst.operands[0].reg << 8;
11935 }
11936 else
11937 {
11938 inst.instruction = inst.operands[0].reg;
11939 inst.instruction |= inst.operands[1].reg << 3;
11940 }
11941 inst.instruction |= THUMB_OP16 (opcode);
11942 if (inst.size_req == 2)
11943 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_OFFSET;
11944 else
11945 inst.relax = opcode;
11946 return;
11947 }
11948 }
11949 /* Definitely a 32-bit variant. */
11950
11951 /* Warning for Erratum 752419. */
11952 if (opcode == T_MNEM_ldr
11953 && inst.operands[0].reg == REG_SP
11954 && inst.operands[1].writeback == 1
11955 && !inst.operands[1].immisreg)
11956 {
11957 if (no_cpu_selected ()
11958 || (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7)
11959 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a)
11960 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7r)))
11961 as_warn (_("This instruction may be unpredictable "
11962 "if executed on M-profile cores "
11963 "with interrupts enabled."));
11964 }
11965
11966 /* Do some validations regarding addressing modes. */
11967 if (inst.operands[1].immisreg)
11968 reject_bad_reg (inst.operands[1].imm);
11969
11970 constraint (inst.operands[1].writeback == 1
11971 && inst.operands[0].reg == inst.operands[1].reg,
11972 BAD_OVERLAP);
11973
11974 inst.instruction = THUMB_OP32 (opcode);
11975 inst.instruction |= inst.operands[0].reg << 12;
11976 encode_thumb32_addr_mode (1, /*is_t=*/FALSE, /*is_d=*/FALSE);
11977 check_ldr_r15_aligned ();
11978 return;
11979 }
11980
11981 constraint (inst.operands[0].reg > 7, BAD_HIREG);
11982
11983 if (inst.instruction == T_MNEM_ldrsh || inst.instruction == T_MNEM_ldrsb)
11984 {
11985 /* Only [Rn,Rm] is acceptable. */
11986 constraint (inst.operands[1].reg > 7 || inst.operands[1].imm > 7, BAD_HIREG);
11987 constraint (!inst.operands[1].isreg || !inst.operands[1].immisreg
11988 || inst.operands[1].postind || inst.operands[1].shifted
11989 || inst.operands[1].negative,
11990 _("Thumb does not support this addressing mode"));
11991 inst.instruction = THUMB_OP16 (inst.instruction);
11992 goto op16;
11993 }
11994
11995 inst.instruction = THUMB_OP16 (inst.instruction);
11996 if (!inst.operands[1].isreg)
11997 if (move_or_literal_pool (0, CONST_THUMB, /*mode_3=*/FALSE))
11998 return;
11999
12000 constraint (!inst.operands[1].preind
12001 || inst.operands[1].shifted
12002 || inst.operands[1].writeback,
12003 _("Thumb does not support this addressing mode"));
12004 if (inst.operands[1].reg == REG_PC || inst.operands[1].reg == REG_SP)
12005 {
12006 constraint (inst.instruction & 0x0600,
12007 _("byte or halfword not valid for base register"));
12008 constraint (inst.operands[1].reg == REG_PC
12009 && !(inst.instruction & THUMB_LOAD_BIT),
12010 _("r15 based store not allowed"));
12011 constraint (inst.operands[1].immisreg,
12012 _("invalid base register for register offset"));
12013
12014 if (inst.operands[1].reg == REG_PC)
12015 inst.instruction = T_OPCODE_LDR_PC;
12016 else if (inst.instruction & THUMB_LOAD_BIT)
12017 inst.instruction = T_OPCODE_LDR_SP;
12018 else
12019 inst.instruction = T_OPCODE_STR_SP;
12020
12021 inst.instruction |= inst.operands[0].reg << 8;
12022 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_OFFSET;
12023 return;
12024 }
12025
12026 constraint (inst.operands[1].reg > 7, BAD_HIREG);
12027 if (!inst.operands[1].immisreg)
12028 {
12029 /* Immediate offset. */
12030 inst.instruction |= inst.operands[0].reg;
12031 inst.instruction |= inst.operands[1].reg << 3;
12032 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_OFFSET;
12033 return;
12034 }
12035
12036 /* Register offset. */
12037 constraint (inst.operands[1].imm > 7, BAD_HIREG);
12038 constraint (inst.operands[1].negative,
12039 _("Thumb does not support this addressing mode"));
12040
12041 op16:
12042 switch (inst.instruction)
12043 {
12044 case T_OPCODE_STR_IW: inst.instruction = T_OPCODE_STR_RW; break;
12045 case T_OPCODE_STR_IH: inst.instruction = T_OPCODE_STR_RH; break;
12046 case T_OPCODE_STR_IB: inst.instruction = T_OPCODE_STR_RB; break;
12047 case T_OPCODE_LDR_IW: inst.instruction = T_OPCODE_LDR_RW; break;
12048 case T_OPCODE_LDR_IH: inst.instruction = T_OPCODE_LDR_RH; break;
12049 case T_OPCODE_LDR_IB: inst.instruction = T_OPCODE_LDR_RB; break;
12050 case 0x5600 /* ldrsb */:
12051 case 0x5e00 /* ldrsh */: break;
12052 default: abort ();
12053 }
12054
12055 inst.instruction |= inst.operands[0].reg;
12056 inst.instruction |= inst.operands[1].reg << 3;
12057 inst.instruction |= inst.operands[1].imm << 6;
12058 }
12059
12060 static void
12061 do_t_ldstd (void)
12062 {
12063 if (!inst.operands[1].present)
12064 {
12065 inst.operands[1].reg = inst.operands[0].reg + 1;
12066 constraint (inst.operands[0].reg == REG_LR,
12067 _("r14 not allowed here"));
12068 constraint (inst.operands[0].reg == REG_R12,
12069 _("r12 not allowed here"));
12070 }
12071
12072 if (inst.operands[2].writeback
12073 && (inst.operands[0].reg == inst.operands[2].reg
12074 || inst.operands[1].reg == inst.operands[2].reg))
12075 as_warn (_("base register written back, and overlaps "
12076 "one of transfer registers"));
12077
12078 inst.instruction |= inst.operands[0].reg << 12;
12079 inst.instruction |= inst.operands[1].reg << 8;
12080 encode_thumb32_addr_mode (2, /*is_t=*/FALSE, /*is_d=*/TRUE);
12081 }
12082
12083 static void
12084 do_t_ldstt (void)
12085 {
12086 inst.instruction |= inst.operands[0].reg << 12;
12087 encode_thumb32_addr_mode (1, /*is_t=*/TRUE, /*is_d=*/FALSE);
12088 }
12089
12090 static void
12091 do_t_mla (void)
12092 {
12093 unsigned Rd, Rn, Rm, Ra;
12094
12095 Rd = inst.operands[0].reg;
12096 Rn = inst.operands[1].reg;
12097 Rm = inst.operands[2].reg;
12098 Ra = inst.operands[3].reg;
12099
12100 reject_bad_reg (Rd);
12101 reject_bad_reg (Rn);
12102 reject_bad_reg (Rm);
12103 reject_bad_reg (Ra);
12104
12105 inst.instruction |= Rd << 8;
12106 inst.instruction |= Rn << 16;
12107 inst.instruction |= Rm;
12108 inst.instruction |= Ra << 12;
12109 }
12110
12111 static void
12112 do_t_mlal (void)
12113 {
12114 unsigned RdLo, RdHi, Rn, Rm;
12115
12116 RdLo = inst.operands[0].reg;
12117 RdHi = inst.operands[1].reg;
12118 Rn = inst.operands[2].reg;
12119 Rm = inst.operands[3].reg;
12120
12121 reject_bad_reg (RdLo);
12122 reject_bad_reg (RdHi);
12123 reject_bad_reg (Rn);
12124 reject_bad_reg (Rm);
12125
12126 inst.instruction |= RdLo << 12;
12127 inst.instruction |= RdHi << 8;
12128 inst.instruction |= Rn << 16;
12129 inst.instruction |= Rm;
12130 }
12131
12132 static void
12133 do_t_mov_cmp (void)
12134 {
12135 unsigned Rn, Rm;
12136
12137 Rn = inst.operands[0].reg;
12138 Rm = inst.operands[1].reg;
12139
12140 if (Rn == REG_PC)
12141 set_it_insn_type_last ();
12142
12143 if (unified_syntax)
12144 {
12145 int r0off = (inst.instruction == T_MNEM_mov
12146 || inst.instruction == T_MNEM_movs) ? 8 : 16;
12147 unsigned long opcode;
12148 bfd_boolean narrow;
12149 bfd_boolean low_regs;
12150
12151 low_regs = (Rn <= 7 && Rm <= 7);
12152 opcode = inst.instruction;
12153 if (in_it_block ())
12154 narrow = opcode != T_MNEM_movs;
12155 else
12156 narrow = opcode != T_MNEM_movs || low_regs;
12157 if (inst.size_req == 4
12158 || inst.operands[1].shifted)
12159 narrow = FALSE;
12160
12161 /* MOVS PC, LR is encoded as SUBS PC, LR, #0. */
12162 if (opcode == T_MNEM_movs && inst.operands[1].isreg
12163 && !inst.operands[1].shifted
12164 && Rn == REG_PC
12165 && Rm == REG_LR)
12166 {
12167 inst.instruction = T2_SUBS_PC_LR;
12168 return;
12169 }
12170
12171 if (opcode == T_MNEM_cmp)
12172 {
12173 constraint (Rn == REG_PC, BAD_PC);
12174 if (narrow)
12175 {
12176 /* In the Thumb-2 ISA, use of R13 as Rm is deprecated,
12177 but valid. */
12178 warn_deprecated_sp (Rm);
12179 /* R15 was documented as a valid choice for Rm in ARMv6,
12180 but as UNPREDICTABLE in ARMv7. ARM's proprietary
12181 tools reject R15, so we do too. */
12182 constraint (Rm == REG_PC, BAD_PC);
12183 }
12184 else
12185 reject_bad_reg (Rm);
12186 }
12187 else if (opcode == T_MNEM_mov
12188 || opcode == T_MNEM_movs)
12189 {
12190 if (inst.operands[1].isreg)
12191 {
12192 if (opcode == T_MNEM_movs)
12193 {
12194 reject_bad_reg (Rn);
12195 reject_bad_reg (Rm);
12196 }
12197 else if (narrow)
12198 {
12199 /* This is mov.n. */
12200 if ((Rn == REG_SP || Rn == REG_PC)
12201 && (Rm == REG_SP || Rm == REG_PC))
12202 {
12203 as_tsktsk (_("Use of r%u as a source register is "
12204 "deprecated when r%u is the destination "
12205 "register."), Rm, Rn);
12206 }
12207 }
12208 else
12209 {
12210 /* This is mov.w. */
12211 constraint (Rn == REG_PC, BAD_PC);
12212 constraint (Rm == REG_PC, BAD_PC);
12213 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
12214 constraint (Rn == REG_SP && Rm == REG_SP, BAD_SP);
12215 }
12216 }
12217 else
12218 reject_bad_reg (Rn);
12219 }
12220
12221 if (!inst.operands[1].isreg)
12222 {
12223 /* Immediate operand. */
12224 if (!in_it_block () && opcode == T_MNEM_mov)
12225 narrow = 0;
12226 if (low_regs && narrow)
12227 {
12228 inst.instruction = THUMB_OP16 (opcode);
12229 inst.instruction |= Rn << 8;
12230 if (inst.relocs[0].type < BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
12231 || inst.relocs[0].type > BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC)
12232 {
12233 if (inst.size_req == 2)
12234 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_IMM;
12235 else
12236 inst.relax = opcode;
12237 }
12238 }
12239 else
12240 {
12241 constraint ((inst.relocs[0].type
12242 >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC)
12243 && (inst.relocs[0].type
12244 <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC) ,
12245 THUMB1_RELOC_ONLY);
12246
12247 inst.instruction = THUMB_OP32 (inst.instruction);
12248 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
12249 inst.instruction |= Rn << r0off;
12250 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
12251 }
12252 }
12253 else if (inst.operands[1].shifted && inst.operands[1].immisreg
12254 && (inst.instruction == T_MNEM_mov
12255 || inst.instruction == T_MNEM_movs))
12256 {
12257 /* Register shifts are encoded as separate shift instructions. */
12258 bfd_boolean flags = (inst.instruction == T_MNEM_movs);
12259
12260 if (in_it_block ())
12261 narrow = !flags;
12262 else
12263 narrow = flags;
12264
12265 if (inst.size_req == 4)
12266 narrow = FALSE;
12267
12268 if (!low_regs || inst.operands[1].imm > 7)
12269 narrow = FALSE;
12270
12271 if (Rn != Rm)
12272 narrow = FALSE;
12273
12274 switch (inst.operands[1].shift_kind)
12275 {
12276 case SHIFT_LSL:
12277 opcode = narrow ? T_OPCODE_LSL_R : THUMB_OP32 (T_MNEM_lsl);
12278 break;
12279 case SHIFT_ASR:
12280 opcode = narrow ? T_OPCODE_ASR_R : THUMB_OP32 (T_MNEM_asr);
12281 break;
12282 case SHIFT_LSR:
12283 opcode = narrow ? T_OPCODE_LSR_R : THUMB_OP32 (T_MNEM_lsr);
12284 break;
12285 case SHIFT_ROR:
12286 opcode = narrow ? T_OPCODE_ROR_R : THUMB_OP32 (T_MNEM_ror);
12287 break;
12288 default:
12289 abort ();
12290 }
12291
12292 inst.instruction = opcode;
12293 if (narrow)
12294 {
12295 inst.instruction |= Rn;
12296 inst.instruction |= inst.operands[1].imm << 3;
12297 }
12298 else
12299 {
12300 if (flags)
12301 inst.instruction |= CONDS_BIT;
12302
12303 inst.instruction |= Rn << 8;
12304 inst.instruction |= Rm << 16;
12305 inst.instruction |= inst.operands[1].imm;
12306 }
12307 }
12308 else if (!narrow)
12309 {
12310 /* Some mov with immediate shift have narrow variants.
12311 Register shifts are handled above. */
12312 if (low_regs && inst.operands[1].shifted
12313 && (inst.instruction == T_MNEM_mov
12314 || inst.instruction == T_MNEM_movs))
12315 {
12316 if (in_it_block ())
12317 narrow = (inst.instruction == T_MNEM_mov);
12318 else
12319 narrow = (inst.instruction == T_MNEM_movs);
12320 }
12321
12322 if (narrow)
12323 {
12324 switch (inst.operands[1].shift_kind)
12325 {
12326 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
12327 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
12328 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
12329 default: narrow = FALSE; break;
12330 }
12331 }
12332
12333 if (narrow)
12334 {
12335 inst.instruction |= Rn;
12336 inst.instruction |= Rm << 3;
12337 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_SHIFT;
12338 }
12339 else
12340 {
12341 inst.instruction = THUMB_OP32 (inst.instruction);
12342 inst.instruction |= Rn << r0off;
12343 encode_thumb32_shifted_operand (1);
12344 }
12345 }
12346 else
12347 switch (inst.instruction)
12348 {
12349 case T_MNEM_mov:
12350 /* In v4t or v5t a move of two lowregs produces unpredictable
12351 results. Don't allow this. */
12352 if (low_regs)
12353 {
12354 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6),
12355 "MOV Rd, Rs with two low registers is not "
12356 "permitted on this architecture");
12357 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
12358 arm_ext_v6);
12359 }
12360
12361 inst.instruction = T_OPCODE_MOV_HR;
12362 inst.instruction |= (Rn & 0x8) << 4;
12363 inst.instruction |= (Rn & 0x7);
12364 inst.instruction |= Rm << 3;
12365 break;
12366
12367 case T_MNEM_movs:
12368 /* We know we have low registers at this point.
12369 Generate LSLS Rd, Rs, #0. */
12370 inst.instruction = T_OPCODE_LSL_I;
12371 inst.instruction |= Rn;
12372 inst.instruction |= Rm << 3;
12373 break;
12374
12375 case T_MNEM_cmp:
12376 if (low_regs)
12377 {
12378 inst.instruction = T_OPCODE_CMP_LR;
12379 inst.instruction |= Rn;
12380 inst.instruction |= Rm << 3;
12381 }
12382 else
12383 {
12384 inst.instruction = T_OPCODE_CMP_HR;
12385 inst.instruction |= (Rn & 0x8) << 4;
12386 inst.instruction |= (Rn & 0x7);
12387 inst.instruction |= Rm << 3;
12388 }
12389 break;
12390 }
12391 return;
12392 }
12393
12394 inst.instruction = THUMB_OP16 (inst.instruction);
12395
12396 /* PR 10443: Do not silently ignore shifted operands. */
12397 constraint (inst.operands[1].shifted,
12398 _("shifts in CMP/MOV instructions are only supported in unified syntax"));
12399
12400 if (inst.operands[1].isreg)
12401 {
12402 if (Rn < 8 && Rm < 8)
12403 {
12404 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
12405 since a MOV instruction produces unpredictable results. */
12406 if (inst.instruction == T_OPCODE_MOV_I8)
12407 inst.instruction = T_OPCODE_ADD_I3;
12408 else
12409 inst.instruction = T_OPCODE_CMP_LR;
12410
12411 inst.instruction |= Rn;
12412 inst.instruction |= Rm << 3;
12413 }
12414 else
12415 {
12416 if (inst.instruction == T_OPCODE_MOV_I8)
12417 inst.instruction = T_OPCODE_MOV_HR;
12418 else
12419 inst.instruction = T_OPCODE_CMP_HR;
12420 do_t_cpy ();
12421 }
12422 }
12423 else
12424 {
12425 constraint (Rn > 7,
12426 _("only lo regs allowed with immediate"));
12427 inst.instruction |= Rn << 8;
12428 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_IMM;
12429 }
12430 }
12431
12432 static void
12433 do_t_mov16 (void)
12434 {
12435 unsigned Rd;
12436 bfd_vma imm;
12437 bfd_boolean top;
12438
12439 top = (inst.instruction & 0x00800000) != 0;
12440 if (inst.relocs[0].type == BFD_RELOC_ARM_MOVW)
12441 {
12442 constraint (top, _(":lower16: not allowed in this instruction"));
12443 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_MOVW;
12444 }
12445 else if (inst.relocs[0].type == BFD_RELOC_ARM_MOVT)
12446 {
12447 constraint (!top, _(":upper16: not allowed in this instruction"));
12448 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_MOVT;
12449 }
12450
12451 Rd = inst.operands[0].reg;
12452 reject_bad_reg (Rd);
12453
12454 inst.instruction |= Rd << 8;
12455 if (inst.relocs[0].type == BFD_RELOC_UNUSED)
12456 {
12457 imm = inst.relocs[0].exp.X_add_number;
12458 inst.instruction |= (imm & 0xf000) << 4;
12459 inst.instruction |= (imm & 0x0800) << 15;
12460 inst.instruction |= (imm & 0x0700) << 4;
12461 inst.instruction |= (imm & 0x00ff);
12462 }
12463 }
12464
12465 static void
12466 do_t_mvn_tst (void)
12467 {
12468 unsigned Rn, Rm;
12469
12470 Rn = inst.operands[0].reg;
12471 Rm = inst.operands[1].reg;
12472
12473 if (inst.instruction == T_MNEM_cmp
12474 || inst.instruction == T_MNEM_cmn)
12475 constraint (Rn == REG_PC, BAD_PC);
12476 else
12477 reject_bad_reg (Rn);
12478 reject_bad_reg (Rm);
12479
12480 if (unified_syntax)
12481 {
12482 int r0off = (inst.instruction == T_MNEM_mvn
12483 || inst.instruction == T_MNEM_mvns) ? 8 : 16;
12484 bfd_boolean narrow;
12485
12486 if (inst.size_req == 4
12487 || inst.instruction > 0xffff
12488 || inst.operands[1].shifted
12489 || Rn > 7 || Rm > 7)
12490 narrow = FALSE;
12491 else if (inst.instruction == T_MNEM_cmn
12492 || inst.instruction == T_MNEM_tst)
12493 narrow = TRUE;
12494 else if (THUMB_SETS_FLAGS (inst.instruction))
12495 narrow = !in_it_block ();
12496 else
12497 narrow = in_it_block ();
12498
12499 if (!inst.operands[1].isreg)
12500 {
12501 /* For an immediate, we always generate a 32-bit opcode;
12502 section relaxation will shrink it later if possible. */
12503 if (inst.instruction < 0xffff)
12504 inst.instruction = THUMB_OP32 (inst.instruction);
12505 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
12506 inst.instruction |= Rn << r0off;
12507 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
12508 }
12509 else
12510 {
12511 /* See if we can do this with a 16-bit instruction. */
12512 if (narrow)
12513 {
12514 inst.instruction = THUMB_OP16 (inst.instruction);
12515 inst.instruction |= Rn;
12516 inst.instruction |= Rm << 3;
12517 }
12518 else
12519 {
12520 constraint (inst.operands[1].shifted
12521 && inst.operands[1].immisreg,
12522 _("shift must be constant"));
12523 if (inst.instruction < 0xffff)
12524 inst.instruction = THUMB_OP32 (inst.instruction);
12525 inst.instruction |= Rn << r0off;
12526 encode_thumb32_shifted_operand (1);
12527 }
12528 }
12529 }
12530 else
12531 {
12532 constraint (inst.instruction > 0xffff
12533 || inst.instruction == T_MNEM_mvns, BAD_THUMB32);
12534 constraint (!inst.operands[1].isreg || inst.operands[1].shifted,
12535 _("unshifted register required"));
12536 constraint (Rn > 7 || Rm > 7,
12537 BAD_HIREG);
12538
12539 inst.instruction = THUMB_OP16 (inst.instruction);
12540 inst.instruction |= Rn;
12541 inst.instruction |= Rm << 3;
12542 }
12543 }
12544
12545 static void
12546 do_t_mrs (void)
12547 {
12548 unsigned Rd;
12549
12550 if (do_vfp_nsyn_mrs () == SUCCESS)
12551 return;
12552
12553 Rd = inst.operands[0].reg;
12554 reject_bad_reg (Rd);
12555 inst.instruction |= Rd << 8;
12556
12557 if (inst.operands[1].isreg)
12558 {
12559 unsigned br = inst.operands[1].reg;
12560 if (((br & 0x200) == 0) && ((br & 0xf000) != 0xf000))
12561 as_bad (_("bad register for mrs"));
12562
12563 inst.instruction |= br & (0xf << 16);
12564 inst.instruction |= (br & 0x300) >> 4;
12565 inst.instruction |= (br & SPSR_BIT) >> 2;
12566 }
12567 else
12568 {
12569 int flags = inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
12570
12571 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
12572 {
12573 /* PR gas/12698: The constraint is only applied for m_profile.
12574 If the user has specified -march=all, we want to ignore it as
12575 we are building for any CPU type, including non-m variants. */
12576 bfd_boolean m_profile =
12577 !ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any);
12578 constraint ((flags != 0) && m_profile, _("selected processor does "
12579 "not support requested special purpose register"));
12580 }
12581 else
12582 /* mrs only accepts APSR/CPSR/SPSR/CPSR_all/SPSR_all (for non-M profile
12583 devices). */
12584 constraint ((flags & ~SPSR_BIT) != (PSR_c|PSR_f),
12585 _("'APSR', 'CPSR' or 'SPSR' expected"));
12586
12587 inst.instruction |= (flags & SPSR_BIT) >> 2;
12588 inst.instruction |= inst.operands[1].imm & 0xff;
12589 inst.instruction |= 0xf0000;
12590 }
12591 }
12592
12593 static void
12594 do_t_msr (void)
12595 {
12596 int flags;
12597 unsigned Rn;
12598
12599 if (do_vfp_nsyn_msr () == SUCCESS)
12600 return;
12601
12602 constraint (!inst.operands[1].isreg,
12603 _("Thumb encoding does not support an immediate here"));
12604
12605 if (inst.operands[0].isreg)
12606 flags = (int)(inst.operands[0].reg);
12607 else
12608 flags = inst.operands[0].imm;
12609
12610 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
12611 {
12612 int bits = inst.operands[0].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
12613
12614 /* PR gas/12698: The constraint is only applied for m_profile.
12615 If the user has specified -march=all, we want to ignore it as
12616 we are building for any CPU type, including non-m variants. */
12617 bfd_boolean m_profile =
12618 !ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any);
12619 constraint (((ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
12620 && (bits & ~(PSR_s | PSR_f)) != 0)
12621 || (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
12622 && bits != PSR_f)) && m_profile,
12623 _("selected processor does not support requested special "
12624 "purpose register"));
12625 }
12626 else
12627 constraint ((flags & 0xff) != 0, _("selected processor does not support "
12628 "requested special purpose register"));
12629
12630 Rn = inst.operands[1].reg;
12631 reject_bad_reg (Rn);
12632
12633 inst.instruction |= (flags & SPSR_BIT) >> 2;
12634 inst.instruction |= (flags & 0xf0000) >> 8;
12635 inst.instruction |= (flags & 0x300) >> 4;
12636 inst.instruction |= (flags & 0xff);
12637 inst.instruction |= Rn << 16;
12638 }
12639
12640 static void
12641 do_t_mul (void)
12642 {
12643 bfd_boolean narrow;
12644 unsigned Rd, Rn, Rm;
12645
12646 if (!inst.operands[2].present)
12647 inst.operands[2].reg = inst.operands[0].reg;
12648
12649 Rd = inst.operands[0].reg;
12650 Rn = inst.operands[1].reg;
12651 Rm = inst.operands[2].reg;
12652
12653 if (unified_syntax)
12654 {
12655 if (inst.size_req == 4
12656 || (Rd != Rn
12657 && Rd != Rm)
12658 || Rn > 7
12659 || Rm > 7)
12660 narrow = FALSE;
12661 else if (inst.instruction == T_MNEM_muls)
12662 narrow = !in_it_block ();
12663 else
12664 narrow = in_it_block ();
12665 }
12666 else
12667 {
12668 constraint (inst.instruction == T_MNEM_muls, BAD_THUMB32);
12669 constraint (Rn > 7 || Rm > 7,
12670 BAD_HIREG);
12671 narrow = TRUE;
12672 }
12673
12674 if (narrow)
12675 {
12676 /* 16-bit MULS/Conditional MUL. */
12677 inst.instruction = THUMB_OP16 (inst.instruction);
12678 inst.instruction |= Rd;
12679
12680 if (Rd == Rn)
12681 inst.instruction |= Rm << 3;
12682 else if (Rd == Rm)
12683 inst.instruction |= Rn << 3;
12684 else
12685 constraint (1, _("dest must overlap one source register"));
12686 }
12687 else
12688 {
12689 constraint (inst.instruction != T_MNEM_mul,
12690 _("Thumb-2 MUL must not set flags"));
12691 /* 32-bit MUL. */
12692 inst.instruction = THUMB_OP32 (inst.instruction);
12693 inst.instruction |= Rd << 8;
12694 inst.instruction |= Rn << 16;
12695 inst.instruction |= Rm << 0;
12696
12697 reject_bad_reg (Rd);
12698 reject_bad_reg (Rn);
12699 reject_bad_reg (Rm);
12700 }
12701 }
12702
12703 static void
12704 do_t_mull (void)
12705 {
12706 unsigned RdLo, RdHi, Rn, Rm;
12707
12708 RdLo = inst.operands[0].reg;
12709 RdHi = inst.operands[1].reg;
12710 Rn = inst.operands[2].reg;
12711 Rm = inst.operands[3].reg;
12712
12713 reject_bad_reg (RdLo);
12714 reject_bad_reg (RdHi);
12715 reject_bad_reg (Rn);
12716 reject_bad_reg (Rm);
12717
12718 inst.instruction |= RdLo << 12;
12719 inst.instruction |= RdHi << 8;
12720 inst.instruction |= Rn << 16;
12721 inst.instruction |= Rm;
12722
12723 if (RdLo == RdHi)
12724 as_tsktsk (_("rdhi and rdlo must be different"));
12725 }
12726
12727 static void
12728 do_t_nop (void)
12729 {
12730 set_it_insn_type (NEUTRAL_IT_INSN);
12731
12732 if (unified_syntax)
12733 {
12734 if (inst.size_req == 4 || inst.operands[0].imm > 15)
12735 {
12736 inst.instruction = THUMB_OP32 (inst.instruction);
12737 inst.instruction |= inst.operands[0].imm;
12738 }
12739 else
12740 {
12741 /* PR9722: Check for Thumb2 availability before
12742 generating a thumb2 nop instruction. */
12743 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2))
12744 {
12745 inst.instruction = THUMB_OP16 (inst.instruction);
12746 inst.instruction |= inst.operands[0].imm << 4;
12747 }
12748 else
12749 inst.instruction = 0x46c0;
12750 }
12751 }
12752 else
12753 {
12754 constraint (inst.operands[0].present,
12755 _("Thumb does not support NOP with hints"));
12756 inst.instruction = 0x46c0;
12757 }
12758 }
12759
12760 static void
12761 do_t_neg (void)
12762 {
12763 if (unified_syntax)
12764 {
12765 bfd_boolean narrow;
12766
12767 if (THUMB_SETS_FLAGS (inst.instruction))
12768 narrow = !in_it_block ();
12769 else
12770 narrow = in_it_block ();
12771 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
12772 narrow = FALSE;
12773 if (inst.size_req == 4)
12774 narrow = FALSE;
12775
12776 if (!narrow)
12777 {
12778 inst.instruction = THUMB_OP32 (inst.instruction);
12779 inst.instruction |= inst.operands[0].reg << 8;
12780 inst.instruction |= inst.operands[1].reg << 16;
12781 }
12782 else
12783 {
12784 inst.instruction = THUMB_OP16 (inst.instruction);
12785 inst.instruction |= inst.operands[0].reg;
12786 inst.instruction |= inst.operands[1].reg << 3;
12787 }
12788 }
12789 else
12790 {
12791 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
12792 BAD_HIREG);
12793 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
12794
12795 inst.instruction = THUMB_OP16 (inst.instruction);
12796 inst.instruction |= inst.operands[0].reg;
12797 inst.instruction |= inst.operands[1].reg << 3;
12798 }
12799 }
12800
12801 static void
12802 do_t_orn (void)
12803 {
12804 unsigned Rd, Rn;
12805
12806 Rd = inst.operands[0].reg;
12807 Rn = inst.operands[1].present ? inst.operands[1].reg : Rd;
12808
12809 reject_bad_reg (Rd);
12810 /* Rn == REG_SP is unpredictable; Rn == REG_PC is MVN. */
12811 reject_bad_reg (Rn);
12812
12813 inst.instruction |= Rd << 8;
12814 inst.instruction |= Rn << 16;
12815
12816 if (!inst.operands[2].isreg)
12817 {
12818 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
12819 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
12820 }
12821 else
12822 {
12823 unsigned Rm;
12824
12825 Rm = inst.operands[2].reg;
12826 reject_bad_reg (Rm);
12827
12828 constraint (inst.operands[2].shifted
12829 && inst.operands[2].immisreg,
12830 _("shift must be constant"));
12831 encode_thumb32_shifted_operand (2);
12832 }
12833 }
12834
12835 static void
12836 do_t_pkhbt (void)
12837 {
12838 unsigned Rd, Rn, Rm;
12839
12840 Rd = inst.operands[0].reg;
12841 Rn = inst.operands[1].reg;
12842 Rm = inst.operands[2].reg;
12843
12844 reject_bad_reg (Rd);
12845 reject_bad_reg (Rn);
12846 reject_bad_reg (Rm);
12847
12848 inst.instruction |= Rd << 8;
12849 inst.instruction |= Rn << 16;
12850 inst.instruction |= Rm;
12851 if (inst.operands[3].present)
12852 {
12853 unsigned int val = inst.relocs[0].exp.X_add_number;
12854 constraint (inst.relocs[0].exp.X_op != O_constant,
12855 _("expression too complex"));
12856 inst.instruction |= (val & 0x1c) << 10;
12857 inst.instruction |= (val & 0x03) << 6;
12858 }
12859 }
12860
12861 static void
12862 do_t_pkhtb (void)
12863 {
12864 if (!inst.operands[3].present)
12865 {
12866 unsigned Rtmp;
12867
12868 inst.instruction &= ~0x00000020;
12869
12870 /* PR 10168. Swap the Rm and Rn registers. */
12871 Rtmp = inst.operands[1].reg;
12872 inst.operands[1].reg = inst.operands[2].reg;
12873 inst.operands[2].reg = Rtmp;
12874 }
12875 do_t_pkhbt ();
12876 }
12877
12878 static void
12879 do_t_pld (void)
12880 {
12881 if (inst.operands[0].immisreg)
12882 reject_bad_reg (inst.operands[0].imm);
12883
12884 encode_thumb32_addr_mode (0, /*is_t=*/FALSE, /*is_d=*/FALSE);
12885 }
12886
12887 static void
12888 do_t_push_pop (void)
12889 {
12890 unsigned mask;
12891
12892 constraint (inst.operands[0].writeback,
12893 _("push/pop do not support {reglist}^"));
12894 constraint (inst.relocs[0].type != BFD_RELOC_UNUSED,
12895 _("expression too complex"));
12896
12897 mask = inst.operands[0].imm;
12898 if (inst.size_req != 4 && (mask & ~0xff) == 0)
12899 inst.instruction = THUMB_OP16 (inst.instruction) | mask;
12900 else if (inst.size_req != 4
12901 && (mask & ~0xff) == (1U << (inst.instruction == T_MNEM_push
12902 ? REG_LR : REG_PC)))
12903 {
12904 inst.instruction = THUMB_OP16 (inst.instruction);
12905 inst.instruction |= THUMB_PP_PC_LR;
12906 inst.instruction |= mask & 0xff;
12907 }
12908 else if (unified_syntax)
12909 {
12910 inst.instruction = THUMB_OP32 (inst.instruction);
12911 encode_thumb2_multi (TRUE /* do_io */, 13, mask, TRUE);
12912 }
12913 else
12914 {
12915 inst.error = _("invalid register list to push/pop instruction");
12916 return;
12917 }
12918 }
12919
12920 static void
12921 do_t_clrm (void)
12922 {
12923 if (unified_syntax)
12924 encode_thumb2_multi (FALSE /* do_io */, -1, inst.operands[0].imm, FALSE);
12925 else
12926 {
12927 inst.error = _("invalid register list to push/pop instruction");
12928 return;
12929 }
12930 }
12931
12932 static void
12933 do_t_vscclrm (void)
12934 {
12935 if (inst.operands[0].issingle)
12936 {
12937 inst.instruction |= (inst.operands[0].reg & 0x1) << 22;
12938 inst.instruction |= (inst.operands[0].reg & 0x1e) << 11;
12939 inst.instruction |= inst.operands[0].imm;
12940 }
12941 else
12942 {
12943 inst.instruction |= (inst.operands[0].reg & 0x10) << 18;
12944 inst.instruction |= (inst.operands[0].reg & 0xf) << 12;
12945 inst.instruction |= 1 << 8;
12946 inst.instruction |= inst.operands[0].imm << 1;
12947 }
12948 }
12949
12950 static void
12951 do_t_rbit (void)
12952 {
12953 unsigned Rd, Rm;
12954
12955 Rd = inst.operands[0].reg;
12956 Rm = inst.operands[1].reg;
12957
12958 reject_bad_reg (Rd);
12959 reject_bad_reg (Rm);
12960
12961 inst.instruction |= Rd << 8;
12962 inst.instruction |= Rm << 16;
12963 inst.instruction |= Rm;
12964 }
12965
12966 static void
12967 do_t_rev (void)
12968 {
12969 unsigned Rd, Rm;
12970
12971 Rd = inst.operands[0].reg;
12972 Rm = inst.operands[1].reg;
12973
12974 reject_bad_reg (Rd);
12975 reject_bad_reg (Rm);
12976
12977 if (Rd <= 7 && Rm <= 7
12978 && inst.size_req != 4)
12979 {
12980 inst.instruction = THUMB_OP16 (inst.instruction);
12981 inst.instruction |= Rd;
12982 inst.instruction |= Rm << 3;
12983 }
12984 else if (unified_syntax)
12985 {
12986 inst.instruction = THUMB_OP32 (inst.instruction);
12987 inst.instruction |= Rd << 8;
12988 inst.instruction |= Rm << 16;
12989 inst.instruction |= Rm;
12990 }
12991 else
12992 inst.error = BAD_HIREG;
12993 }
12994
12995 static void
12996 do_t_rrx (void)
12997 {
12998 unsigned Rd, Rm;
12999
13000 Rd = inst.operands[0].reg;
13001 Rm = inst.operands[1].reg;
13002
13003 reject_bad_reg (Rd);
13004 reject_bad_reg (Rm);
13005
13006 inst.instruction |= Rd << 8;
13007 inst.instruction |= Rm;
13008 }
13009
13010 static void
13011 do_t_rsb (void)
13012 {
13013 unsigned Rd, Rs;
13014
13015 Rd = inst.operands[0].reg;
13016 Rs = (inst.operands[1].present
13017 ? inst.operands[1].reg /* Rd, Rs, foo */
13018 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
13019
13020 reject_bad_reg (Rd);
13021 reject_bad_reg (Rs);
13022 if (inst.operands[2].isreg)
13023 reject_bad_reg (inst.operands[2].reg);
13024
13025 inst.instruction |= Rd << 8;
13026 inst.instruction |= Rs << 16;
13027 if (!inst.operands[2].isreg)
13028 {
13029 bfd_boolean narrow;
13030
13031 if ((inst.instruction & 0x00100000) != 0)
13032 narrow = !in_it_block ();
13033 else
13034 narrow = in_it_block ();
13035
13036 if (Rd > 7 || Rs > 7)
13037 narrow = FALSE;
13038
13039 if (inst.size_req == 4 || !unified_syntax)
13040 narrow = FALSE;
13041
13042 if (inst.relocs[0].exp.X_op != O_constant
13043 || inst.relocs[0].exp.X_add_number != 0)
13044 narrow = FALSE;
13045
13046 /* Turn rsb #0 into 16-bit neg. We should probably do this via
13047 relaxation, but it doesn't seem worth the hassle. */
13048 if (narrow)
13049 {
13050 inst.relocs[0].type = BFD_RELOC_UNUSED;
13051 inst.instruction = THUMB_OP16 (T_MNEM_negs);
13052 inst.instruction |= Rs << 3;
13053 inst.instruction |= Rd;
13054 }
13055 else
13056 {
13057 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
13058 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
13059 }
13060 }
13061 else
13062 encode_thumb32_shifted_operand (2);
13063 }
13064
13065 static void
13066 do_t_setend (void)
13067 {
13068 if (warn_on_deprecated
13069 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
13070 as_tsktsk (_("setend use is deprecated for ARMv8"));
13071
13072 set_it_insn_type (OUTSIDE_IT_INSN);
13073 if (inst.operands[0].imm)
13074 inst.instruction |= 0x8;
13075 }
13076
13077 static void
13078 do_t_shift (void)
13079 {
13080 if (!inst.operands[1].present)
13081 inst.operands[1].reg = inst.operands[0].reg;
13082
13083 if (unified_syntax)
13084 {
13085 bfd_boolean narrow;
13086 int shift_kind;
13087
13088 switch (inst.instruction)
13089 {
13090 case T_MNEM_asr:
13091 case T_MNEM_asrs: shift_kind = SHIFT_ASR; break;
13092 case T_MNEM_lsl:
13093 case T_MNEM_lsls: shift_kind = SHIFT_LSL; break;
13094 case T_MNEM_lsr:
13095 case T_MNEM_lsrs: shift_kind = SHIFT_LSR; break;
13096 case T_MNEM_ror:
13097 case T_MNEM_rors: shift_kind = SHIFT_ROR; break;
13098 default: abort ();
13099 }
13100
13101 if (THUMB_SETS_FLAGS (inst.instruction))
13102 narrow = !in_it_block ();
13103 else
13104 narrow = in_it_block ();
13105 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
13106 narrow = FALSE;
13107 if (!inst.operands[2].isreg && shift_kind == SHIFT_ROR)
13108 narrow = FALSE;
13109 if (inst.operands[2].isreg
13110 && (inst.operands[1].reg != inst.operands[0].reg
13111 || inst.operands[2].reg > 7))
13112 narrow = FALSE;
13113 if (inst.size_req == 4)
13114 narrow = FALSE;
13115
13116 reject_bad_reg (inst.operands[0].reg);
13117 reject_bad_reg (inst.operands[1].reg);
13118
13119 if (!narrow)
13120 {
13121 if (inst.operands[2].isreg)
13122 {
13123 reject_bad_reg (inst.operands[2].reg);
13124 inst.instruction = THUMB_OP32 (inst.instruction);
13125 inst.instruction |= inst.operands[0].reg << 8;
13126 inst.instruction |= inst.operands[1].reg << 16;
13127 inst.instruction |= inst.operands[2].reg;
13128
13129 /* PR 12854: Error on extraneous shifts. */
13130 constraint (inst.operands[2].shifted,
13131 _("extraneous shift as part of operand to shift insn"));
13132 }
13133 else
13134 {
13135 inst.operands[1].shifted = 1;
13136 inst.operands[1].shift_kind = shift_kind;
13137 inst.instruction = THUMB_OP32 (THUMB_SETS_FLAGS (inst.instruction)
13138 ? T_MNEM_movs : T_MNEM_mov);
13139 inst.instruction |= inst.operands[0].reg << 8;
13140 encode_thumb32_shifted_operand (1);
13141 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
13142 inst.relocs[0].type = BFD_RELOC_UNUSED;
13143 }
13144 }
13145 else
13146 {
13147 if (inst.operands[2].isreg)
13148 {
13149 switch (shift_kind)
13150 {
13151 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_R; break;
13152 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_R; break;
13153 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_R; break;
13154 case SHIFT_ROR: inst.instruction = T_OPCODE_ROR_R; break;
13155 default: abort ();
13156 }
13157
13158 inst.instruction |= inst.operands[0].reg;
13159 inst.instruction |= inst.operands[2].reg << 3;
13160
13161 /* PR 12854: Error on extraneous shifts. */
13162 constraint (inst.operands[2].shifted,
13163 _("extraneous shift as part of operand to shift insn"));
13164 }
13165 else
13166 {
13167 switch (shift_kind)
13168 {
13169 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
13170 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
13171 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
13172 default: abort ();
13173 }
13174 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_SHIFT;
13175 inst.instruction |= inst.operands[0].reg;
13176 inst.instruction |= inst.operands[1].reg << 3;
13177 }
13178 }
13179 }
13180 else
13181 {
13182 constraint (inst.operands[0].reg > 7
13183 || inst.operands[1].reg > 7, BAD_HIREG);
13184 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
13185
13186 if (inst.operands[2].isreg) /* Rd, {Rs,} Rn */
13187 {
13188 constraint (inst.operands[2].reg > 7, BAD_HIREG);
13189 constraint (inst.operands[0].reg != inst.operands[1].reg,
13190 _("source1 and dest must be same register"));
13191
13192 switch (inst.instruction)
13193 {
13194 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_R; break;
13195 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_R; break;
13196 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_R; break;
13197 case T_MNEM_ror: inst.instruction = T_OPCODE_ROR_R; break;
13198 default: abort ();
13199 }
13200
13201 inst.instruction |= inst.operands[0].reg;
13202 inst.instruction |= inst.operands[2].reg << 3;
13203
13204 /* PR 12854: Error on extraneous shifts. */
13205 constraint (inst.operands[2].shifted,
13206 _("extraneous shift as part of operand to shift insn"));
13207 }
13208 else
13209 {
13210 switch (inst.instruction)
13211 {
13212 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_I; break;
13213 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_I; break;
13214 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_I; break;
13215 case T_MNEM_ror: inst.error = _("ror #imm not supported"); return;
13216 default: abort ();
13217 }
13218 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_SHIFT;
13219 inst.instruction |= inst.operands[0].reg;
13220 inst.instruction |= inst.operands[1].reg << 3;
13221 }
13222 }
13223 }
13224
13225 static void
13226 do_t_simd (void)
13227 {
13228 unsigned Rd, Rn, Rm;
13229
13230 Rd = inst.operands[0].reg;
13231 Rn = inst.operands[1].reg;
13232 Rm = inst.operands[2].reg;
13233
13234 reject_bad_reg (Rd);
13235 reject_bad_reg (Rn);
13236 reject_bad_reg (Rm);
13237
13238 inst.instruction |= Rd << 8;
13239 inst.instruction |= Rn << 16;
13240 inst.instruction |= Rm;
13241 }
13242
13243 static void
13244 do_t_simd2 (void)
13245 {
13246 unsigned Rd, Rn, Rm;
13247
13248 Rd = inst.operands[0].reg;
13249 Rm = inst.operands[1].reg;
13250 Rn = inst.operands[2].reg;
13251
13252 reject_bad_reg (Rd);
13253 reject_bad_reg (Rn);
13254 reject_bad_reg (Rm);
13255
13256 inst.instruction |= Rd << 8;
13257 inst.instruction |= Rn << 16;
13258 inst.instruction |= Rm;
13259 }
13260
13261 static void
13262 do_t_smc (void)
13263 {
13264 unsigned int value = inst.relocs[0].exp.X_add_number;
13265 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a),
13266 _("SMC is not permitted on this architecture"));
13267 constraint (inst.relocs[0].exp.X_op != O_constant,
13268 _("expression too complex"));
13269 inst.relocs[0].type = BFD_RELOC_UNUSED;
13270 inst.instruction |= (value & 0xf000) >> 12;
13271 inst.instruction |= (value & 0x0ff0);
13272 inst.instruction |= (value & 0x000f) << 16;
13273 /* PR gas/15623: SMC instructions must be last in an IT block. */
13274 set_it_insn_type_last ();
13275 }
13276
13277 static void
13278 do_t_hvc (void)
13279 {
13280 unsigned int value = inst.relocs[0].exp.X_add_number;
13281
13282 inst.relocs[0].type = BFD_RELOC_UNUSED;
13283 inst.instruction |= (value & 0x0fff);
13284 inst.instruction |= (value & 0xf000) << 4;
13285 }
13286
13287 static void
13288 do_t_ssat_usat (int bias)
13289 {
13290 unsigned Rd, Rn;
13291
13292 Rd = inst.operands[0].reg;
13293 Rn = inst.operands[2].reg;
13294
13295 reject_bad_reg (Rd);
13296 reject_bad_reg (Rn);
13297
13298 inst.instruction |= Rd << 8;
13299 inst.instruction |= inst.operands[1].imm - bias;
13300 inst.instruction |= Rn << 16;
13301
13302 if (inst.operands[3].present)
13303 {
13304 offsetT shift_amount = inst.relocs[0].exp.X_add_number;
13305
13306 inst.relocs[0].type = BFD_RELOC_UNUSED;
13307
13308 constraint (inst.relocs[0].exp.X_op != O_constant,
13309 _("expression too complex"));
13310
13311 if (shift_amount != 0)
13312 {
13313 constraint (shift_amount > 31,
13314 _("shift expression is too large"));
13315
13316 if (inst.operands[3].shift_kind == SHIFT_ASR)
13317 inst.instruction |= 0x00200000; /* sh bit. */
13318
13319 inst.instruction |= (shift_amount & 0x1c) << 10;
13320 inst.instruction |= (shift_amount & 0x03) << 6;
13321 }
13322 }
13323 }
13324
13325 static void
13326 do_t_ssat (void)
13327 {
13328 do_t_ssat_usat (1);
13329 }
13330
13331 static void
13332 do_t_ssat16 (void)
13333 {
13334 unsigned Rd, Rn;
13335
13336 Rd = inst.operands[0].reg;
13337 Rn = inst.operands[2].reg;
13338
13339 reject_bad_reg (Rd);
13340 reject_bad_reg (Rn);
13341
13342 inst.instruction |= Rd << 8;
13343 inst.instruction |= inst.operands[1].imm - 1;
13344 inst.instruction |= Rn << 16;
13345 }
13346
13347 static void
13348 do_t_strex (void)
13349 {
13350 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
13351 || inst.operands[2].postind || inst.operands[2].writeback
13352 || inst.operands[2].immisreg || inst.operands[2].shifted
13353 || inst.operands[2].negative,
13354 BAD_ADDR_MODE);
13355
13356 constraint (inst.operands[2].reg == REG_PC, BAD_PC);
13357
13358 inst.instruction |= inst.operands[0].reg << 8;
13359 inst.instruction |= inst.operands[1].reg << 12;
13360 inst.instruction |= inst.operands[2].reg << 16;
13361 inst.relocs[0].type = BFD_RELOC_ARM_T32_OFFSET_U8;
13362 }
13363
13364 static void
13365 do_t_strexd (void)
13366 {
13367 if (!inst.operands[2].present)
13368 inst.operands[2].reg = inst.operands[1].reg + 1;
13369
13370 constraint (inst.operands[0].reg == inst.operands[1].reg
13371 || inst.operands[0].reg == inst.operands[2].reg
13372 || inst.operands[0].reg == inst.operands[3].reg,
13373 BAD_OVERLAP);
13374
13375 inst.instruction |= inst.operands[0].reg;
13376 inst.instruction |= inst.operands[1].reg << 12;
13377 inst.instruction |= inst.operands[2].reg << 8;
13378 inst.instruction |= inst.operands[3].reg << 16;
13379 }
13380
13381 static void
13382 do_t_sxtah (void)
13383 {
13384 unsigned Rd, Rn, Rm;
13385
13386 Rd = inst.operands[0].reg;
13387 Rn = inst.operands[1].reg;
13388 Rm = inst.operands[2].reg;
13389
13390 reject_bad_reg (Rd);
13391 reject_bad_reg (Rn);
13392 reject_bad_reg (Rm);
13393
13394 inst.instruction |= Rd << 8;
13395 inst.instruction |= Rn << 16;
13396 inst.instruction |= Rm;
13397 inst.instruction |= inst.operands[3].imm << 4;
13398 }
13399
13400 static void
13401 do_t_sxth (void)
13402 {
13403 unsigned Rd, Rm;
13404
13405 Rd = inst.operands[0].reg;
13406 Rm = inst.operands[1].reg;
13407
13408 reject_bad_reg (Rd);
13409 reject_bad_reg (Rm);
13410
13411 if (inst.instruction <= 0xffff
13412 && inst.size_req != 4
13413 && Rd <= 7 && Rm <= 7
13414 && (!inst.operands[2].present || inst.operands[2].imm == 0))
13415 {
13416 inst.instruction = THUMB_OP16 (inst.instruction);
13417 inst.instruction |= Rd;
13418 inst.instruction |= Rm << 3;
13419 }
13420 else if (unified_syntax)
13421 {
13422 if (inst.instruction <= 0xffff)
13423 inst.instruction = THUMB_OP32 (inst.instruction);
13424 inst.instruction |= Rd << 8;
13425 inst.instruction |= Rm;
13426 inst.instruction |= inst.operands[2].imm << 4;
13427 }
13428 else
13429 {
13430 constraint (inst.operands[2].present && inst.operands[2].imm != 0,
13431 _("Thumb encoding does not support rotation"));
13432 constraint (1, BAD_HIREG);
13433 }
13434 }
13435
13436 static void
13437 do_t_swi (void)
13438 {
13439 inst.relocs[0].type = BFD_RELOC_ARM_SWI;
13440 }
13441
13442 static void
13443 do_t_tb (void)
13444 {
13445 unsigned Rn, Rm;
13446 int half;
13447
13448 half = (inst.instruction & 0x10) != 0;
13449 set_it_insn_type_last ();
13450 constraint (inst.operands[0].immisreg,
13451 _("instruction requires register index"));
13452
13453 Rn = inst.operands[0].reg;
13454 Rm = inst.operands[0].imm;
13455
13456 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
13457 constraint (Rn == REG_SP, BAD_SP);
13458 reject_bad_reg (Rm);
13459
13460 constraint (!half && inst.operands[0].shifted,
13461 _("instruction does not allow shifted index"));
13462 inst.instruction |= (Rn << 16) | Rm;
13463 }
13464
13465 static void
13466 do_t_udf (void)
13467 {
13468 if (!inst.operands[0].present)
13469 inst.operands[0].imm = 0;
13470
13471 if ((unsigned int) inst.operands[0].imm > 255 || inst.size_req == 4)
13472 {
13473 constraint (inst.size_req == 2,
13474 _("immediate value out of range"));
13475 inst.instruction = THUMB_OP32 (inst.instruction);
13476 inst.instruction |= (inst.operands[0].imm & 0xf000u) << 4;
13477 inst.instruction |= (inst.operands[0].imm & 0x0fffu) << 0;
13478 }
13479 else
13480 {
13481 inst.instruction = THUMB_OP16 (inst.instruction);
13482 inst.instruction |= inst.operands[0].imm;
13483 }
13484
13485 set_it_insn_type (NEUTRAL_IT_INSN);
13486 }
13487
13488
13489 static void
13490 do_t_usat (void)
13491 {
13492 do_t_ssat_usat (0);
13493 }
13494
13495 static void
13496 do_t_usat16 (void)
13497 {
13498 unsigned Rd, Rn;
13499
13500 Rd = inst.operands[0].reg;
13501 Rn = inst.operands[2].reg;
13502
13503 reject_bad_reg (Rd);
13504 reject_bad_reg (Rn);
13505
13506 inst.instruction |= Rd << 8;
13507 inst.instruction |= inst.operands[1].imm;
13508 inst.instruction |= Rn << 16;
13509 }
13510
13511 /* Checking the range of the branch offset (VAL) with NBITS bits
13512 and IS_SIGNED signedness. Also checks the LSB to be 0. */
13513 static int
13514 v8_1_branch_value_check (int val, int nbits, int is_signed)
13515 {
13516 gas_assert (nbits > 0 && nbits <= 32);
13517 if (is_signed)
13518 {
13519 int cmp = (1 << (nbits - 1));
13520 if ((val < -cmp) || (val >= cmp) || (val & 0x01))
13521 return FAIL;
13522 }
13523 else
13524 {
13525 if ((val <= 0) || (val >= (1 << nbits)) || (val & 0x1))
13526 return FAIL;
13527 }
13528 return SUCCESS;
13529 }
13530
13531 /* For branches in Armv8.1-M Mainline. */
13532 static void
13533 do_t_branch_future (void)
13534 {
13535 unsigned long insn = inst.instruction;
13536
13537 inst.instruction = THUMB_OP32 (inst.instruction);
13538 if (inst.operands[0].hasreloc == 0)
13539 {
13540 if (v8_1_branch_value_check (inst.operands[0].imm, 5, FALSE) == FAIL)
13541 as_bad (BAD_BRANCH_OFF);
13542
13543 inst.instruction |= ((inst.operands[0].imm & 0x1f) >> 1) << 23;
13544 }
13545 else
13546 {
13547 inst.relocs[0].type = BFD_RELOC_THUMB_PCREL_BRANCH5;
13548 inst.relocs[0].pc_rel = 1;
13549 }
13550
13551 switch (insn)
13552 {
13553 case T_MNEM_bf:
13554 if (inst.operands[1].hasreloc == 0)
13555 {
13556 int val = inst.operands[1].imm;
13557 if (v8_1_branch_value_check (inst.operands[1].imm, 17, TRUE) == FAIL)
13558 as_bad (BAD_BRANCH_OFF);
13559
13560 int immA = (val & 0x0001f000) >> 12;
13561 int immB = (val & 0x00000ffc) >> 2;
13562 int immC = (val & 0x00000002) >> 1;
13563 inst.instruction |= (immA << 16) | (immB << 1) | (immC << 11);
13564 }
13565 else
13566 {
13567 inst.relocs[1].type = BFD_RELOC_ARM_THUMB_BF17;
13568 inst.relocs[1].pc_rel = 1;
13569 }
13570 break;
13571
13572 case T_MNEM_bfl:
13573 if (inst.operands[1].hasreloc == 0)
13574 {
13575 int val = inst.operands[1].imm;
13576 if (v8_1_branch_value_check (inst.operands[1].imm, 19, TRUE) == FAIL)
13577 as_bad (BAD_BRANCH_OFF);
13578
13579 int immA = (val & 0x0007f000) >> 12;
13580 int immB = (val & 0x00000ffc) >> 2;
13581 int immC = (val & 0x00000002) >> 1;
13582 inst.instruction |= (immA << 16) | (immB << 1) | (immC << 11);
13583 }
13584 else
13585 {
13586 inst.relocs[1].type = BFD_RELOC_ARM_THUMB_BF19;
13587 inst.relocs[1].pc_rel = 1;
13588 }
13589 break;
13590
13591 case T_MNEM_bfcsel:
13592 /* Operand 1. */
13593 if (inst.operands[1].hasreloc == 0)
13594 {
13595 int val = inst.operands[1].imm;
13596 int immA = (val & 0x00001000) >> 12;
13597 int immB = (val & 0x00000ffc) >> 2;
13598 int immC = (val & 0x00000002) >> 1;
13599 inst.instruction |= (immA << 16) | (immB << 1) | (immC << 11);
13600 }
13601 else
13602 {
13603 inst.relocs[1].type = BFD_RELOC_ARM_THUMB_BF13;
13604 inst.relocs[1].pc_rel = 1;
13605 }
13606
13607 /* Operand 2. */
13608 if (inst.operands[2].hasreloc == 0)
13609 {
13610 constraint ((inst.operands[0].hasreloc != 0), BAD_ARGS);
13611 int val2 = inst.operands[2].imm;
13612 int val0 = inst.operands[0].imm & 0x1f;
13613 int diff = val2 - val0;
13614 if (diff == 4)
13615 inst.instruction |= 1 << 17; /* T bit. */
13616 else if (diff != 2)
13617 as_bad (_("out of range label-relative fixup value"));
13618 }
13619 else
13620 {
13621 constraint ((inst.operands[0].hasreloc == 0), BAD_ARGS);
13622 inst.relocs[2].type = BFD_RELOC_THUMB_PCREL_BFCSEL;
13623 inst.relocs[2].pc_rel = 1;
13624 }
13625
13626 /* Operand 3. */
13627 constraint (inst.cond != COND_ALWAYS, BAD_COND);
13628 inst.instruction |= (inst.operands[3].imm & 0xf) << 18;
13629 break;
13630
13631 case T_MNEM_bfx:
13632 case T_MNEM_bflx:
13633 inst.instruction |= inst.operands[1].reg << 16;
13634 break;
13635
13636 default: abort ();
13637 }
13638 }
13639
13640 /* Helper function for do_t_loloop to handle relocations. */
13641 static void
13642 v8_1_loop_reloc (int is_le)
13643 {
13644 if (inst.relocs[0].exp.X_op == O_constant)
13645 {
13646 int value = inst.relocs[0].exp.X_add_number;
13647 value = (is_le) ? -value : value;
13648
13649 if (v8_1_branch_value_check (value, 12, FALSE) == FAIL)
13650 as_bad (BAD_BRANCH_OFF);
13651
13652 int imml, immh;
13653
13654 immh = (value & 0x00000ffc) >> 2;
13655 imml = (value & 0x00000002) >> 1;
13656
13657 inst.instruction |= (imml << 11) | (immh << 1);
13658 }
13659 else
13660 {
13661 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_LOOP12;
13662 inst.relocs[0].pc_rel = 1;
13663 }
13664 }
13665
13666 /* To handle the Scalar Low Overhead Loop instructions
13667 in Armv8.1-M Mainline. */
13668 static void
13669 do_t_loloop (void)
13670 {
13671 unsigned long insn = inst.instruction;
13672
13673 set_it_insn_type (OUTSIDE_IT_INSN);
13674 inst.instruction = THUMB_OP32 (inst.instruction);
13675
13676 switch (insn)
13677 {
13678 case T_MNEM_le:
13679 /* le <label>. */
13680 if (!inst.operands[0].present)
13681 inst.instruction |= 1 << 21;
13682
13683 v8_1_loop_reloc (TRUE);
13684 break;
13685
13686 case T_MNEM_wls:
13687 v8_1_loop_reloc (FALSE);
13688 /* Fall through. */
13689 case T_MNEM_dls:
13690 constraint (inst.operands[1].isreg != 1, BAD_ARGS);
13691 inst.instruction |= (inst.operands[1].reg << 16);
13692 break;
13693
13694 default: abort();
13695 }
13696 }
13697
13698 /* Neon instruction encoder helpers. */
13699
13700 /* Encodings for the different types for various Neon opcodes. */
13701
13702 /* An "invalid" code for the following tables. */
13703 #define N_INV -1u
13704
13705 struct neon_tab_entry
13706 {
13707 unsigned integer;
13708 unsigned float_or_poly;
13709 unsigned scalar_or_imm;
13710 };
13711
13712 /* Map overloaded Neon opcodes to their respective encodings. */
13713 #define NEON_ENC_TAB \
13714 X(vabd, 0x0000700, 0x1200d00, N_INV), \
13715 X(vmax, 0x0000600, 0x0000f00, N_INV), \
13716 X(vmin, 0x0000610, 0x0200f00, N_INV), \
13717 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
13718 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
13719 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
13720 X(vadd, 0x0000800, 0x0000d00, N_INV), \
13721 X(vsub, 0x1000800, 0x0200d00, N_INV), \
13722 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
13723 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
13724 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
13725 /* Register variants of the following two instructions are encoded as
13726 vcge / vcgt with the operands reversed. */ \
13727 X(vclt, 0x0000300, 0x1200e00, 0x1b10200), \
13728 X(vcle, 0x0000310, 0x1000e00, 0x1b10180), \
13729 X(vfma, N_INV, 0x0000c10, N_INV), \
13730 X(vfms, N_INV, 0x0200c10, N_INV), \
13731 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
13732 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
13733 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
13734 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
13735 X(vmlal, 0x0800800, N_INV, 0x0800240), \
13736 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
13737 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
13738 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
13739 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
13740 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
13741 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
13742 X(vqrdmlah, 0x3000b10, N_INV, 0x0800e40), \
13743 X(vqrdmlsh, 0x3000c10, N_INV, 0x0800f40), \
13744 X(vshl, 0x0000400, N_INV, 0x0800510), \
13745 X(vqshl, 0x0000410, N_INV, 0x0800710), \
13746 X(vand, 0x0000110, N_INV, 0x0800030), \
13747 X(vbic, 0x0100110, N_INV, 0x0800030), \
13748 X(veor, 0x1000110, N_INV, N_INV), \
13749 X(vorn, 0x0300110, N_INV, 0x0800010), \
13750 X(vorr, 0x0200110, N_INV, 0x0800010), \
13751 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
13752 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
13753 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
13754 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
13755 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
13756 X(vst1, 0x0000000, 0x0800000, N_INV), \
13757 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
13758 X(vst2, 0x0000100, 0x0800100, N_INV), \
13759 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
13760 X(vst3, 0x0000200, 0x0800200, N_INV), \
13761 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
13762 X(vst4, 0x0000300, 0x0800300, N_INV), \
13763 X(vmovn, 0x1b20200, N_INV, N_INV), \
13764 X(vtrn, 0x1b20080, N_INV, N_INV), \
13765 X(vqmovn, 0x1b20200, N_INV, N_INV), \
13766 X(vqmovun, 0x1b20240, N_INV, N_INV), \
13767 X(vnmul, 0xe200a40, 0xe200b40, N_INV), \
13768 X(vnmla, 0xe100a40, 0xe100b40, N_INV), \
13769 X(vnmls, 0xe100a00, 0xe100b00, N_INV), \
13770 X(vfnma, 0xe900a40, 0xe900b40, N_INV), \
13771 X(vfnms, 0xe900a00, 0xe900b00, N_INV), \
13772 X(vcmp, 0xeb40a40, 0xeb40b40, N_INV), \
13773 X(vcmpz, 0xeb50a40, 0xeb50b40, N_INV), \
13774 X(vcmpe, 0xeb40ac0, 0xeb40bc0, N_INV), \
13775 X(vcmpez, 0xeb50ac0, 0xeb50bc0, N_INV), \
13776 X(vseleq, 0xe000a00, N_INV, N_INV), \
13777 X(vselvs, 0xe100a00, N_INV, N_INV), \
13778 X(vselge, 0xe200a00, N_INV, N_INV), \
13779 X(vselgt, 0xe300a00, N_INV, N_INV), \
13780 X(vmaxnm, 0xe800a00, 0x3000f10, N_INV), \
13781 X(vminnm, 0xe800a40, 0x3200f10, N_INV), \
13782 X(vcvta, 0xebc0a40, 0x3bb0000, N_INV), \
13783 X(vrintr, 0xeb60a40, 0x3ba0400, N_INV), \
13784 X(vrinta, 0xeb80a40, 0x3ba0400, N_INV), \
13785 X(aes, 0x3b00300, N_INV, N_INV), \
13786 X(sha3op, 0x2000c00, N_INV, N_INV), \
13787 X(sha1h, 0x3b902c0, N_INV, N_INV), \
13788 X(sha2op, 0x3ba0380, N_INV, N_INV)
13789
13790 enum neon_opc
13791 {
13792 #define X(OPC,I,F,S) N_MNEM_##OPC
13793 NEON_ENC_TAB
13794 #undef X
13795 };
13796
13797 static const struct neon_tab_entry neon_enc_tab[] =
13798 {
13799 #define X(OPC,I,F,S) { (I), (F), (S) }
13800 NEON_ENC_TAB
13801 #undef X
13802 };
13803
13804 /* Do not use these macros; instead, use NEON_ENCODE defined below. */
13805 #define NEON_ENC_INTEGER_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13806 #define NEON_ENC_ARMREG_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13807 #define NEON_ENC_POLY_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13808 #define NEON_ENC_FLOAT_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13809 #define NEON_ENC_SCALAR_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13810 #define NEON_ENC_IMMED_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13811 #define NEON_ENC_INTERLV_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13812 #define NEON_ENC_LANE_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13813 #define NEON_ENC_DUP_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13814 #define NEON_ENC_SINGLE_(X) \
13815 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
13816 #define NEON_ENC_DOUBLE_(X) \
13817 ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
13818 #define NEON_ENC_FPV8_(X) \
13819 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf000000))
13820
13821 #define NEON_ENCODE(type, inst) \
13822 do \
13823 { \
13824 inst.instruction = NEON_ENC_##type##_ (inst.instruction); \
13825 inst.is_neon = 1; \
13826 } \
13827 while (0)
13828
13829 #define check_neon_suffixes \
13830 do \
13831 { \
13832 if (!inst.error && inst.vectype.elems > 0 && !inst.is_neon) \
13833 { \
13834 as_bad (_("invalid neon suffix for non neon instruction")); \
13835 return; \
13836 } \
13837 } \
13838 while (0)
13839
13840 /* Define shapes for instruction operands. The following mnemonic characters
13841 are used in this table:
13842
13843 F - VFP S<n> register
13844 D - Neon D<n> register
13845 Q - Neon Q<n> register
13846 I - Immediate
13847 S - Scalar
13848 R - ARM register
13849 L - D<n> register list
13850
13851 This table is used to generate various data:
13852 - enumerations of the form NS_DDR to be used as arguments to
13853 neon_select_shape.
13854 - a table classifying shapes into single, double, quad, mixed.
13855 - a table used to drive neon_select_shape. */
13856
13857 #define NEON_SHAPE_DEF \
13858 X(3, (D, D, D), DOUBLE), \
13859 X(3, (Q, Q, Q), QUAD), \
13860 X(3, (D, D, I), DOUBLE), \
13861 X(3, (Q, Q, I), QUAD), \
13862 X(3, (D, D, S), DOUBLE), \
13863 X(3, (Q, Q, S), QUAD), \
13864 X(2, (D, D), DOUBLE), \
13865 X(2, (Q, Q), QUAD), \
13866 X(2, (D, S), DOUBLE), \
13867 X(2, (Q, S), QUAD), \
13868 X(2, (D, R), DOUBLE), \
13869 X(2, (Q, R), QUAD), \
13870 X(2, (D, I), DOUBLE), \
13871 X(2, (Q, I), QUAD), \
13872 X(3, (D, L, D), DOUBLE), \
13873 X(2, (D, Q), MIXED), \
13874 X(2, (Q, D), MIXED), \
13875 X(3, (D, Q, I), MIXED), \
13876 X(3, (Q, D, I), MIXED), \
13877 X(3, (Q, D, D), MIXED), \
13878 X(3, (D, Q, Q), MIXED), \
13879 X(3, (Q, Q, D), MIXED), \
13880 X(3, (Q, D, S), MIXED), \
13881 X(3, (D, Q, S), MIXED), \
13882 X(4, (D, D, D, I), DOUBLE), \
13883 X(4, (Q, Q, Q, I), QUAD), \
13884 X(4, (D, D, S, I), DOUBLE), \
13885 X(4, (Q, Q, S, I), QUAD), \
13886 X(2, (F, F), SINGLE), \
13887 X(3, (F, F, F), SINGLE), \
13888 X(2, (F, I), SINGLE), \
13889 X(2, (F, D), MIXED), \
13890 X(2, (D, F), MIXED), \
13891 X(3, (F, F, I), MIXED), \
13892 X(4, (R, R, F, F), SINGLE), \
13893 X(4, (F, F, R, R), SINGLE), \
13894 X(3, (D, R, R), DOUBLE), \
13895 X(3, (R, R, D), DOUBLE), \
13896 X(2, (S, R), SINGLE), \
13897 X(2, (R, S), SINGLE), \
13898 X(2, (F, R), SINGLE), \
13899 X(2, (R, F), SINGLE), \
13900 /* Half float shape supported so far. */\
13901 X (2, (H, D), MIXED), \
13902 X (2, (D, H), MIXED), \
13903 X (2, (H, F), MIXED), \
13904 X (2, (F, H), MIXED), \
13905 X (2, (H, H), HALF), \
13906 X (2, (H, R), HALF), \
13907 X (2, (R, H), HALF), \
13908 X (2, (H, I), HALF), \
13909 X (3, (H, H, H), HALF), \
13910 X (3, (H, F, I), MIXED), \
13911 X (3, (F, H, I), MIXED), \
13912 X (3, (D, H, H), MIXED), \
13913 X (3, (D, H, S), MIXED)
13914
13915 #define S2(A,B) NS_##A##B
13916 #define S3(A,B,C) NS_##A##B##C
13917 #define S4(A,B,C,D) NS_##A##B##C##D
13918
13919 #define X(N, L, C) S##N L
13920
13921 enum neon_shape
13922 {
13923 NEON_SHAPE_DEF,
13924 NS_NULL
13925 };
13926
13927 #undef X
13928 #undef S2
13929 #undef S3
13930 #undef S4
13931
13932 enum neon_shape_class
13933 {
13934 SC_HALF,
13935 SC_SINGLE,
13936 SC_DOUBLE,
13937 SC_QUAD,
13938 SC_MIXED
13939 };
13940
13941 #define X(N, L, C) SC_##C
13942
13943 static enum neon_shape_class neon_shape_class[] =
13944 {
13945 NEON_SHAPE_DEF
13946 };
13947
13948 #undef X
13949
13950 enum neon_shape_el
13951 {
13952 SE_H,
13953 SE_F,
13954 SE_D,
13955 SE_Q,
13956 SE_I,
13957 SE_S,
13958 SE_R,
13959 SE_L
13960 };
13961
13962 /* Register widths of above. */
13963 static unsigned neon_shape_el_size[] =
13964 {
13965 16,
13966 32,
13967 64,
13968 128,
13969 0,
13970 32,
13971 32,
13972 0
13973 };
13974
13975 struct neon_shape_info
13976 {
13977 unsigned els;
13978 enum neon_shape_el el[NEON_MAX_TYPE_ELS];
13979 };
13980
13981 #define S2(A,B) { SE_##A, SE_##B }
13982 #define S3(A,B,C) { SE_##A, SE_##B, SE_##C }
13983 #define S4(A,B,C,D) { SE_##A, SE_##B, SE_##C, SE_##D }
13984
13985 #define X(N, L, C) { N, S##N L }
13986
13987 static struct neon_shape_info neon_shape_tab[] =
13988 {
13989 NEON_SHAPE_DEF
13990 };
13991
13992 #undef X
13993 #undef S2
13994 #undef S3
13995 #undef S4
13996
13997 /* Bit masks used in type checking given instructions.
13998 'N_EQK' means the type must be the same as (or based on in some way) the key
13999 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
14000 set, various other bits can be set as well in order to modify the meaning of
14001 the type constraint. */
14002
14003 enum neon_type_mask
14004 {
14005 N_S8 = 0x0000001,
14006 N_S16 = 0x0000002,
14007 N_S32 = 0x0000004,
14008 N_S64 = 0x0000008,
14009 N_U8 = 0x0000010,
14010 N_U16 = 0x0000020,
14011 N_U32 = 0x0000040,
14012 N_U64 = 0x0000080,
14013 N_I8 = 0x0000100,
14014 N_I16 = 0x0000200,
14015 N_I32 = 0x0000400,
14016 N_I64 = 0x0000800,
14017 N_8 = 0x0001000,
14018 N_16 = 0x0002000,
14019 N_32 = 0x0004000,
14020 N_64 = 0x0008000,
14021 N_P8 = 0x0010000,
14022 N_P16 = 0x0020000,
14023 N_F16 = 0x0040000,
14024 N_F32 = 0x0080000,
14025 N_F64 = 0x0100000,
14026 N_P64 = 0x0200000,
14027 N_KEY = 0x1000000, /* Key element (main type specifier). */
14028 N_EQK = 0x2000000, /* Given operand has the same type & size as the key. */
14029 N_VFP = 0x4000000, /* VFP mode: operand size must match register width. */
14030 N_UNT = 0x8000000, /* Must be explicitly untyped. */
14031 N_DBL = 0x0000001, /* If N_EQK, this operand is twice the size. */
14032 N_HLF = 0x0000002, /* If N_EQK, this operand is half the size. */
14033 N_SGN = 0x0000004, /* If N_EQK, this operand is forced to be signed. */
14034 N_UNS = 0x0000008, /* If N_EQK, this operand is forced to be unsigned. */
14035 N_INT = 0x0000010, /* If N_EQK, this operand is forced to be integer. */
14036 N_FLT = 0x0000020, /* If N_EQK, this operand is forced to be float. */
14037 N_SIZ = 0x0000040, /* If N_EQK, this operand is forced to be size-only. */
14038 N_UTYP = 0,
14039 N_MAX_NONSPECIAL = N_P64
14040 };
14041
14042 #define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
14043
14044 #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
14045 #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
14046 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
14047 #define N_S_32 (N_S8 | N_S16 | N_S32)
14048 #define N_F_16_32 (N_F16 | N_F32)
14049 #define N_SUF_32 (N_SU_32 | N_F_16_32)
14050 #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
14051 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F16 | N_F32)
14052 #define N_F_ALL (N_F16 | N_F32 | N_F64)
14053
14054 /* Pass this as the first type argument to neon_check_type to ignore types
14055 altogether. */
14056 #define N_IGNORE_TYPE (N_KEY | N_EQK)
14057
14058 /* Select a "shape" for the current instruction (describing register types or
14059 sizes) from a list of alternatives. Return NS_NULL if the current instruction
14060 doesn't fit. For non-polymorphic shapes, checking is usually done as a
14061 function of operand parsing, so this function doesn't need to be called.
14062 Shapes should be listed in order of decreasing length. */
14063
14064 static enum neon_shape
14065 neon_select_shape (enum neon_shape shape, ...)
14066 {
14067 va_list ap;
14068 enum neon_shape first_shape = shape;
14069
14070 /* Fix missing optional operands. FIXME: we don't know at this point how
14071 many arguments we should have, so this makes the assumption that we have
14072 > 1. This is true of all current Neon opcodes, I think, but may not be
14073 true in the future. */
14074 if (!inst.operands[1].present)
14075 inst.operands[1] = inst.operands[0];
14076
14077 va_start (ap, shape);
14078
14079 for (; shape != NS_NULL; shape = (enum neon_shape) va_arg (ap, int))
14080 {
14081 unsigned j;
14082 int matches = 1;
14083
14084 for (j = 0; j < neon_shape_tab[shape].els; j++)
14085 {
14086 if (!inst.operands[j].present)
14087 {
14088 matches = 0;
14089 break;
14090 }
14091
14092 switch (neon_shape_tab[shape].el[j])
14093 {
14094 /* If a .f16, .16, .u16, .s16 type specifier is given over
14095 a VFP single precision register operand, it's essentially
14096 means only half of the register is used.
14097
14098 If the type specifier is given after the mnemonics, the
14099 information is stored in inst.vectype. If the type specifier
14100 is given after register operand, the information is stored
14101 in inst.operands[].vectype.
14102
14103 When there is only one type specifier, and all the register
14104 operands are the same type of hardware register, the type
14105 specifier applies to all register operands.
14106
14107 If no type specifier is given, the shape is inferred from
14108 operand information.
14109
14110 for example:
14111 vadd.f16 s0, s1, s2: NS_HHH
14112 vabs.f16 s0, s1: NS_HH
14113 vmov.f16 s0, r1: NS_HR
14114 vmov.f16 r0, s1: NS_RH
14115 vcvt.f16 r0, s1: NS_RH
14116 vcvt.f16.s32 s2, s2, #29: NS_HFI
14117 vcvt.f16.s32 s2, s2: NS_HF
14118 */
14119 case SE_H:
14120 if (!(inst.operands[j].isreg
14121 && inst.operands[j].isvec
14122 && inst.operands[j].issingle
14123 && !inst.operands[j].isquad
14124 && ((inst.vectype.elems == 1
14125 && inst.vectype.el[0].size == 16)
14126 || (inst.vectype.elems > 1
14127 && inst.vectype.el[j].size == 16)
14128 || (inst.vectype.elems == 0
14129 && inst.operands[j].vectype.type != NT_invtype
14130 && inst.operands[j].vectype.size == 16))))
14131 matches = 0;
14132 break;
14133
14134 case SE_F:
14135 if (!(inst.operands[j].isreg
14136 && inst.operands[j].isvec
14137 && inst.operands[j].issingle
14138 && !inst.operands[j].isquad
14139 && ((inst.vectype.elems == 1 && inst.vectype.el[0].size == 32)
14140 || (inst.vectype.elems > 1 && inst.vectype.el[j].size == 32)
14141 || (inst.vectype.elems == 0
14142 && (inst.operands[j].vectype.size == 32
14143 || inst.operands[j].vectype.type == NT_invtype)))))
14144 matches = 0;
14145 break;
14146
14147 case SE_D:
14148 if (!(inst.operands[j].isreg
14149 && inst.operands[j].isvec
14150 && !inst.operands[j].isquad
14151 && !inst.operands[j].issingle))
14152 matches = 0;
14153 break;
14154
14155 case SE_R:
14156 if (!(inst.operands[j].isreg
14157 && !inst.operands[j].isvec))
14158 matches = 0;
14159 break;
14160
14161 case SE_Q:
14162 if (!(inst.operands[j].isreg
14163 && inst.operands[j].isvec
14164 && inst.operands[j].isquad
14165 && !inst.operands[j].issingle))
14166 matches = 0;
14167 break;
14168
14169 case SE_I:
14170 if (!(!inst.operands[j].isreg
14171 && !inst.operands[j].isscalar))
14172 matches = 0;
14173 break;
14174
14175 case SE_S:
14176 if (!(!inst.operands[j].isreg
14177 && inst.operands[j].isscalar))
14178 matches = 0;
14179 break;
14180
14181 case SE_L:
14182 break;
14183 }
14184 if (!matches)
14185 break;
14186 }
14187 if (matches && (j >= ARM_IT_MAX_OPERANDS || !inst.operands[j].present))
14188 /* We've matched all the entries in the shape table, and we don't
14189 have any left over operands which have not been matched. */
14190 break;
14191 }
14192
14193 va_end (ap);
14194
14195 if (shape == NS_NULL && first_shape != NS_NULL)
14196 first_error (_("invalid instruction shape"));
14197
14198 return shape;
14199 }
14200
14201 /* True if SHAPE is predominantly a quadword operation (most of the time, this
14202 means the Q bit should be set). */
14203
14204 static int
14205 neon_quad (enum neon_shape shape)
14206 {
14207 return neon_shape_class[shape] == SC_QUAD;
14208 }
14209
14210 static void
14211 neon_modify_type_size (unsigned typebits, enum neon_el_type *g_type,
14212 unsigned *g_size)
14213 {
14214 /* Allow modification to be made to types which are constrained to be
14215 based on the key element, based on bits set alongside N_EQK. */
14216 if ((typebits & N_EQK) != 0)
14217 {
14218 if ((typebits & N_HLF) != 0)
14219 *g_size /= 2;
14220 else if ((typebits & N_DBL) != 0)
14221 *g_size *= 2;
14222 if ((typebits & N_SGN) != 0)
14223 *g_type = NT_signed;
14224 else if ((typebits & N_UNS) != 0)
14225 *g_type = NT_unsigned;
14226 else if ((typebits & N_INT) != 0)
14227 *g_type = NT_integer;
14228 else if ((typebits & N_FLT) != 0)
14229 *g_type = NT_float;
14230 else if ((typebits & N_SIZ) != 0)
14231 *g_type = NT_untyped;
14232 }
14233 }
14234
14235 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
14236 operand type, i.e. the single type specified in a Neon instruction when it
14237 is the only one given. */
14238
14239 static struct neon_type_el
14240 neon_type_promote (struct neon_type_el *key, unsigned thisarg)
14241 {
14242 struct neon_type_el dest = *key;
14243
14244 gas_assert ((thisarg & N_EQK) != 0);
14245
14246 neon_modify_type_size (thisarg, &dest.type, &dest.size);
14247
14248 return dest;
14249 }
14250
14251 /* Convert Neon type and size into compact bitmask representation. */
14252
14253 static enum neon_type_mask
14254 type_chk_of_el_type (enum neon_el_type type, unsigned size)
14255 {
14256 switch (type)
14257 {
14258 case NT_untyped:
14259 switch (size)
14260 {
14261 case 8: return N_8;
14262 case 16: return N_16;
14263 case 32: return N_32;
14264 case 64: return N_64;
14265 default: ;
14266 }
14267 break;
14268
14269 case NT_integer:
14270 switch (size)
14271 {
14272 case 8: return N_I8;
14273 case 16: return N_I16;
14274 case 32: return N_I32;
14275 case 64: return N_I64;
14276 default: ;
14277 }
14278 break;
14279
14280 case NT_float:
14281 switch (size)
14282 {
14283 case 16: return N_F16;
14284 case 32: return N_F32;
14285 case 64: return N_F64;
14286 default: ;
14287 }
14288 break;
14289
14290 case NT_poly:
14291 switch (size)
14292 {
14293 case 8: return N_P8;
14294 case 16: return N_P16;
14295 case 64: return N_P64;
14296 default: ;
14297 }
14298 break;
14299
14300 case NT_signed:
14301 switch (size)
14302 {
14303 case 8: return N_S8;
14304 case 16: return N_S16;
14305 case 32: return N_S32;
14306 case 64: return N_S64;
14307 default: ;
14308 }
14309 break;
14310
14311 case NT_unsigned:
14312 switch (size)
14313 {
14314 case 8: return N_U8;
14315 case 16: return N_U16;
14316 case 32: return N_U32;
14317 case 64: return N_U64;
14318 default: ;
14319 }
14320 break;
14321
14322 default: ;
14323 }
14324
14325 return N_UTYP;
14326 }
14327
14328 /* Convert compact Neon bitmask type representation to a type and size. Only
14329 handles the case where a single bit is set in the mask. */
14330
14331 static int
14332 el_type_of_type_chk (enum neon_el_type *type, unsigned *size,
14333 enum neon_type_mask mask)
14334 {
14335 if ((mask & N_EQK) != 0)
14336 return FAIL;
14337
14338 if ((mask & (N_S8 | N_U8 | N_I8 | N_8 | N_P8)) != 0)
14339 *size = 8;
14340 else if ((mask & (N_S16 | N_U16 | N_I16 | N_16 | N_F16 | N_P16)) != 0)
14341 *size = 16;
14342 else if ((mask & (N_S32 | N_U32 | N_I32 | N_32 | N_F32)) != 0)
14343 *size = 32;
14344 else if ((mask & (N_S64 | N_U64 | N_I64 | N_64 | N_F64 | N_P64)) != 0)
14345 *size = 64;
14346 else
14347 return FAIL;
14348
14349 if ((mask & (N_S8 | N_S16 | N_S32 | N_S64)) != 0)
14350 *type = NT_signed;
14351 else if ((mask & (N_U8 | N_U16 | N_U32 | N_U64)) != 0)
14352 *type = NT_unsigned;
14353 else if ((mask & (N_I8 | N_I16 | N_I32 | N_I64)) != 0)
14354 *type = NT_integer;
14355 else if ((mask & (N_8 | N_16 | N_32 | N_64)) != 0)
14356 *type = NT_untyped;
14357 else if ((mask & (N_P8 | N_P16 | N_P64)) != 0)
14358 *type = NT_poly;
14359 else if ((mask & (N_F_ALL)) != 0)
14360 *type = NT_float;
14361 else
14362 return FAIL;
14363
14364 return SUCCESS;
14365 }
14366
14367 /* Modify a bitmask of allowed types. This is only needed for type
14368 relaxation. */
14369
14370 static unsigned
14371 modify_types_allowed (unsigned allowed, unsigned mods)
14372 {
14373 unsigned size;
14374 enum neon_el_type type;
14375 unsigned destmask;
14376 int i;
14377
14378 destmask = 0;
14379
14380 for (i = 1; i <= N_MAX_NONSPECIAL; i <<= 1)
14381 {
14382 if (el_type_of_type_chk (&type, &size,
14383 (enum neon_type_mask) (allowed & i)) == SUCCESS)
14384 {
14385 neon_modify_type_size (mods, &type, &size);
14386 destmask |= type_chk_of_el_type (type, size);
14387 }
14388 }
14389
14390 return destmask;
14391 }
14392
14393 /* Check type and return type classification.
14394 The manual states (paraphrase): If one datatype is given, it indicates the
14395 type given in:
14396 - the second operand, if there is one
14397 - the operand, if there is no second operand
14398 - the result, if there are no operands.
14399 This isn't quite good enough though, so we use a concept of a "key" datatype
14400 which is set on a per-instruction basis, which is the one which matters when
14401 only one data type is written.
14402 Note: this function has side-effects (e.g. filling in missing operands). All
14403 Neon instructions should call it before performing bit encoding. */
14404
14405 static struct neon_type_el
14406 neon_check_type (unsigned els, enum neon_shape ns, ...)
14407 {
14408 va_list ap;
14409 unsigned i, pass, key_el = 0;
14410 unsigned types[NEON_MAX_TYPE_ELS];
14411 enum neon_el_type k_type = NT_invtype;
14412 unsigned k_size = -1u;
14413 struct neon_type_el badtype = {NT_invtype, -1};
14414 unsigned key_allowed = 0;
14415
14416 /* Optional registers in Neon instructions are always (not) in operand 1.
14417 Fill in the missing operand here, if it was omitted. */
14418 if (els > 1 && !inst.operands[1].present)
14419 inst.operands[1] = inst.operands[0];
14420
14421 /* Suck up all the varargs. */
14422 va_start (ap, ns);
14423 for (i = 0; i < els; i++)
14424 {
14425 unsigned thisarg = va_arg (ap, unsigned);
14426 if (thisarg == N_IGNORE_TYPE)
14427 {
14428 va_end (ap);
14429 return badtype;
14430 }
14431 types[i] = thisarg;
14432 if ((thisarg & N_KEY) != 0)
14433 key_el = i;
14434 }
14435 va_end (ap);
14436
14437 if (inst.vectype.elems > 0)
14438 for (i = 0; i < els; i++)
14439 if (inst.operands[i].vectype.type != NT_invtype)
14440 {
14441 first_error (_("types specified in both the mnemonic and operands"));
14442 return badtype;
14443 }
14444
14445 /* Duplicate inst.vectype elements here as necessary.
14446 FIXME: No idea if this is exactly the same as the ARM assembler,
14447 particularly when an insn takes one register and one non-register
14448 operand. */
14449 if (inst.vectype.elems == 1 && els > 1)
14450 {
14451 unsigned j;
14452 inst.vectype.elems = els;
14453 inst.vectype.el[key_el] = inst.vectype.el[0];
14454 for (j = 0; j < els; j++)
14455 if (j != key_el)
14456 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
14457 types[j]);
14458 }
14459 else if (inst.vectype.elems == 0 && els > 0)
14460 {
14461 unsigned j;
14462 /* No types were given after the mnemonic, so look for types specified
14463 after each operand. We allow some flexibility here; as long as the
14464 "key" operand has a type, we can infer the others. */
14465 for (j = 0; j < els; j++)
14466 if (inst.operands[j].vectype.type != NT_invtype)
14467 inst.vectype.el[j] = inst.operands[j].vectype;
14468
14469 if (inst.operands[key_el].vectype.type != NT_invtype)
14470 {
14471 for (j = 0; j < els; j++)
14472 if (inst.operands[j].vectype.type == NT_invtype)
14473 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
14474 types[j]);
14475 }
14476 else
14477 {
14478 first_error (_("operand types can't be inferred"));
14479 return badtype;
14480 }
14481 }
14482 else if (inst.vectype.elems != els)
14483 {
14484 first_error (_("type specifier has the wrong number of parts"));
14485 return badtype;
14486 }
14487
14488 for (pass = 0; pass < 2; pass++)
14489 {
14490 for (i = 0; i < els; i++)
14491 {
14492 unsigned thisarg = types[i];
14493 unsigned types_allowed = ((thisarg & N_EQK) != 0 && pass != 0)
14494 ? modify_types_allowed (key_allowed, thisarg) : thisarg;
14495 enum neon_el_type g_type = inst.vectype.el[i].type;
14496 unsigned g_size = inst.vectype.el[i].size;
14497
14498 /* Decay more-specific signed & unsigned types to sign-insensitive
14499 integer types if sign-specific variants are unavailable. */
14500 if ((g_type == NT_signed || g_type == NT_unsigned)
14501 && (types_allowed & N_SU_ALL) == 0)
14502 g_type = NT_integer;
14503
14504 /* If only untyped args are allowed, decay any more specific types to
14505 them. Some instructions only care about signs for some element
14506 sizes, so handle that properly. */
14507 if (((types_allowed & N_UNT) == 0)
14508 && ((g_size == 8 && (types_allowed & N_8) != 0)
14509 || (g_size == 16 && (types_allowed & N_16) != 0)
14510 || (g_size == 32 && (types_allowed & N_32) != 0)
14511 || (g_size == 64 && (types_allowed & N_64) != 0)))
14512 g_type = NT_untyped;
14513
14514 if (pass == 0)
14515 {
14516 if ((thisarg & N_KEY) != 0)
14517 {
14518 k_type = g_type;
14519 k_size = g_size;
14520 key_allowed = thisarg & ~N_KEY;
14521
14522 /* Check architecture constraint on FP16 extension. */
14523 if (k_size == 16
14524 && k_type == NT_float
14525 && ! ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16))
14526 {
14527 inst.error = _(BAD_FP16);
14528 return badtype;
14529 }
14530 }
14531 }
14532 else
14533 {
14534 if ((thisarg & N_VFP) != 0)
14535 {
14536 enum neon_shape_el regshape;
14537 unsigned regwidth, match;
14538
14539 /* PR 11136: Catch the case where we are passed a shape of NS_NULL. */
14540 if (ns == NS_NULL)
14541 {
14542 first_error (_("invalid instruction shape"));
14543 return badtype;
14544 }
14545 regshape = neon_shape_tab[ns].el[i];
14546 regwidth = neon_shape_el_size[regshape];
14547
14548 /* In VFP mode, operands must match register widths. If we
14549 have a key operand, use its width, else use the width of
14550 the current operand. */
14551 if (k_size != -1u)
14552 match = k_size;
14553 else
14554 match = g_size;
14555
14556 /* FP16 will use a single precision register. */
14557 if (regwidth == 32 && match == 16)
14558 {
14559 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16))
14560 match = regwidth;
14561 else
14562 {
14563 inst.error = _(BAD_FP16);
14564 return badtype;
14565 }
14566 }
14567
14568 if (regwidth != match)
14569 {
14570 first_error (_("operand size must match register width"));
14571 return badtype;
14572 }
14573 }
14574
14575 if ((thisarg & N_EQK) == 0)
14576 {
14577 unsigned given_type = type_chk_of_el_type (g_type, g_size);
14578
14579 if ((given_type & types_allowed) == 0)
14580 {
14581 first_error (_("bad type in Neon instruction"));
14582 return badtype;
14583 }
14584 }
14585 else
14586 {
14587 enum neon_el_type mod_k_type = k_type;
14588 unsigned mod_k_size = k_size;
14589 neon_modify_type_size (thisarg, &mod_k_type, &mod_k_size);
14590 if (g_type != mod_k_type || g_size != mod_k_size)
14591 {
14592 first_error (_("inconsistent types in Neon instruction"));
14593 return badtype;
14594 }
14595 }
14596 }
14597 }
14598 }
14599
14600 return inst.vectype.el[key_el];
14601 }
14602
14603 /* Neon-style VFP instruction forwarding. */
14604
14605 /* Thumb VFP instructions have 0xE in the condition field. */
14606
14607 static void
14608 do_vfp_cond_or_thumb (void)
14609 {
14610 inst.is_neon = 1;
14611
14612 if (thumb_mode)
14613 inst.instruction |= 0xe0000000;
14614 else
14615 inst.instruction |= inst.cond << 28;
14616 }
14617
14618 /* Look up and encode a simple mnemonic, for use as a helper function for the
14619 Neon-style VFP syntax. This avoids duplication of bits of the insns table,
14620 etc. It is assumed that operand parsing has already been done, and that the
14621 operands are in the form expected by the given opcode (this isn't necessarily
14622 the same as the form in which they were parsed, hence some massaging must
14623 take place before this function is called).
14624 Checks current arch version against that in the looked-up opcode. */
14625
14626 static void
14627 do_vfp_nsyn_opcode (const char *opname)
14628 {
14629 const struct asm_opcode *opcode;
14630
14631 opcode = (const struct asm_opcode *) hash_find (arm_ops_hsh, opname);
14632
14633 if (!opcode)
14634 abort ();
14635
14636 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant,
14637 thumb_mode ? *opcode->tvariant : *opcode->avariant),
14638 _(BAD_FPU));
14639
14640 inst.is_neon = 1;
14641
14642 if (thumb_mode)
14643 {
14644 inst.instruction = opcode->tvalue;
14645 opcode->tencode ();
14646 }
14647 else
14648 {
14649 inst.instruction = (inst.cond << 28) | opcode->avalue;
14650 opcode->aencode ();
14651 }
14652 }
14653
14654 static void
14655 do_vfp_nsyn_add_sub (enum neon_shape rs)
14656 {
14657 int is_add = (inst.instruction & 0x0fffffff) == N_MNEM_vadd;
14658
14659 if (rs == NS_FFF || rs == NS_HHH)
14660 {
14661 if (is_add)
14662 do_vfp_nsyn_opcode ("fadds");
14663 else
14664 do_vfp_nsyn_opcode ("fsubs");
14665
14666 /* ARMv8.2 fp16 instruction. */
14667 if (rs == NS_HHH)
14668 do_scalar_fp16_v82_encode ();
14669 }
14670 else
14671 {
14672 if (is_add)
14673 do_vfp_nsyn_opcode ("faddd");
14674 else
14675 do_vfp_nsyn_opcode ("fsubd");
14676 }
14677 }
14678
14679 /* Check operand types to see if this is a VFP instruction, and if so call
14680 PFN (). */
14681
14682 static int
14683 try_vfp_nsyn (int args, void (*pfn) (enum neon_shape))
14684 {
14685 enum neon_shape rs;
14686 struct neon_type_el et;
14687
14688 switch (args)
14689 {
14690 case 2:
14691 rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_NULL);
14692 et = neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY | N_VFP);
14693 break;
14694
14695 case 3:
14696 rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_NULL);
14697 et = neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
14698 N_F_ALL | N_KEY | N_VFP);
14699 break;
14700
14701 default:
14702 abort ();
14703 }
14704
14705 if (et.type != NT_invtype)
14706 {
14707 pfn (rs);
14708 return SUCCESS;
14709 }
14710
14711 inst.error = NULL;
14712 return FAIL;
14713 }
14714
14715 static void
14716 do_vfp_nsyn_mla_mls (enum neon_shape rs)
14717 {
14718 int is_mla = (inst.instruction & 0x0fffffff) == N_MNEM_vmla;
14719
14720 if (rs == NS_FFF || rs == NS_HHH)
14721 {
14722 if (is_mla)
14723 do_vfp_nsyn_opcode ("fmacs");
14724 else
14725 do_vfp_nsyn_opcode ("fnmacs");
14726
14727 /* ARMv8.2 fp16 instruction. */
14728 if (rs == NS_HHH)
14729 do_scalar_fp16_v82_encode ();
14730 }
14731 else
14732 {
14733 if (is_mla)
14734 do_vfp_nsyn_opcode ("fmacd");
14735 else
14736 do_vfp_nsyn_opcode ("fnmacd");
14737 }
14738 }
14739
14740 static void
14741 do_vfp_nsyn_fma_fms (enum neon_shape rs)
14742 {
14743 int is_fma = (inst.instruction & 0x0fffffff) == N_MNEM_vfma;
14744
14745 if (rs == NS_FFF || rs == NS_HHH)
14746 {
14747 if (is_fma)
14748 do_vfp_nsyn_opcode ("ffmas");
14749 else
14750 do_vfp_nsyn_opcode ("ffnmas");
14751
14752 /* ARMv8.2 fp16 instruction. */
14753 if (rs == NS_HHH)
14754 do_scalar_fp16_v82_encode ();
14755 }
14756 else
14757 {
14758 if (is_fma)
14759 do_vfp_nsyn_opcode ("ffmad");
14760 else
14761 do_vfp_nsyn_opcode ("ffnmad");
14762 }
14763 }
14764
14765 static void
14766 do_vfp_nsyn_mul (enum neon_shape rs)
14767 {
14768 if (rs == NS_FFF || rs == NS_HHH)
14769 {
14770 do_vfp_nsyn_opcode ("fmuls");
14771
14772 /* ARMv8.2 fp16 instruction. */
14773 if (rs == NS_HHH)
14774 do_scalar_fp16_v82_encode ();
14775 }
14776 else
14777 do_vfp_nsyn_opcode ("fmuld");
14778 }
14779
14780 static void
14781 do_vfp_nsyn_abs_neg (enum neon_shape rs)
14782 {
14783 int is_neg = (inst.instruction & 0x80) != 0;
14784 neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_VFP | N_KEY);
14785
14786 if (rs == NS_FF || rs == NS_HH)
14787 {
14788 if (is_neg)
14789 do_vfp_nsyn_opcode ("fnegs");
14790 else
14791 do_vfp_nsyn_opcode ("fabss");
14792
14793 /* ARMv8.2 fp16 instruction. */
14794 if (rs == NS_HH)
14795 do_scalar_fp16_v82_encode ();
14796 }
14797 else
14798 {
14799 if (is_neg)
14800 do_vfp_nsyn_opcode ("fnegd");
14801 else
14802 do_vfp_nsyn_opcode ("fabsd");
14803 }
14804 }
14805
14806 /* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
14807 insns belong to Neon, and are handled elsewhere. */
14808
14809 static void
14810 do_vfp_nsyn_ldm_stm (int is_dbmode)
14811 {
14812 int is_ldm = (inst.instruction & (1 << 20)) != 0;
14813 if (is_ldm)
14814 {
14815 if (is_dbmode)
14816 do_vfp_nsyn_opcode ("fldmdbs");
14817 else
14818 do_vfp_nsyn_opcode ("fldmias");
14819 }
14820 else
14821 {
14822 if (is_dbmode)
14823 do_vfp_nsyn_opcode ("fstmdbs");
14824 else
14825 do_vfp_nsyn_opcode ("fstmias");
14826 }
14827 }
14828
14829 static void
14830 do_vfp_nsyn_sqrt (void)
14831 {
14832 enum neon_shape rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_NULL);
14833 neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY | N_VFP);
14834
14835 if (rs == NS_FF || rs == NS_HH)
14836 {
14837 do_vfp_nsyn_opcode ("fsqrts");
14838
14839 /* ARMv8.2 fp16 instruction. */
14840 if (rs == NS_HH)
14841 do_scalar_fp16_v82_encode ();
14842 }
14843 else
14844 do_vfp_nsyn_opcode ("fsqrtd");
14845 }
14846
14847 static void
14848 do_vfp_nsyn_div (void)
14849 {
14850 enum neon_shape rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_NULL);
14851 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
14852 N_F_ALL | N_KEY | N_VFP);
14853
14854 if (rs == NS_FFF || rs == NS_HHH)
14855 {
14856 do_vfp_nsyn_opcode ("fdivs");
14857
14858 /* ARMv8.2 fp16 instruction. */
14859 if (rs == NS_HHH)
14860 do_scalar_fp16_v82_encode ();
14861 }
14862 else
14863 do_vfp_nsyn_opcode ("fdivd");
14864 }
14865
14866 static void
14867 do_vfp_nsyn_nmul (void)
14868 {
14869 enum neon_shape rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_NULL);
14870 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
14871 N_F_ALL | N_KEY | N_VFP);
14872
14873 if (rs == NS_FFF || rs == NS_HHH)
14874 {
14875 NEON_ENCODE (SINGLE, inst);
14876 do_vfp_sp_dyadic ();
14877
14878 /* ARMv8.2 fp16 instruction. */
14879 if (rs == NS_HHH)
14880 do_scalar_fp16_v82_encode ();
14881 }
14882 else
14883 {
14884 NEON_ENCODE (DOUBLE, inst);
14885 do_vfp_dp_rd_rn_rm ();
14886 }
14887 do_vfp_cond_or_thumb ();
14888
14889 }
14890
14891 static void
14892 do_vfp_nsyn_cmp (void)
14893 {
14894 enum neon_shape rs;
14895 if (inst.operands[1].isreg)
14896 {
14897 rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_NULL);
14898 neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY | N_VFP);
14899
14900 if (rs == NS_FF || rs == NS_HH)
14901 {
14902 NEON_ENCODE (SINGLE, inst);
14903 do_vfp_sp_monadic ();
14904 }
14905 else
14906 {
14907 NEON_ENCODE (DOUBLE, inst);
14908 do_vfp_dp_rd_rm ();
14909 }
14910 }
14911 else
14912 {
14913 rs = neon_select_shape (NS_HI, NS_FI, NS_DI, NS_NULL);
14914 neon_check_type (2, rs, N_F_ALL | N_KEY | N_VFP, N_EQK);
14915
14916 switch (inst.instruction & 0x0fffffff)
14917 {
14918 case N_MNEM_vcmp:
14919 inst.instruction += N_MNEM_vcmpz - N_MNEM_vcmp;
14920 break;
14921 case N_MNEM_vcmpe:
14922 inst.instruction += N_MNEM_vcmpez - N_MNEM_vcmpe;
14923 break;
14924 default:
14925 abort ();
14926 }
14927
14928 if (rs == NS_FI || rs == NS_HI)
14929 {
14930 NEON_ENCODE (SINGLE, inst);
14931 do_vfp_sp_compare_z ();
14932 }
14933 else
14934 {
14935 NEON_ENCODE (DOUBLE, inst);
14936 do_vfp_dp_rd ();
14937 }
14938 }
14939 do_vfp_cond_or_thumb ();
14940
14941 /* ARMv8.2 fp16 instruction. */
14942 if (rs == NS_HI || rs == NS_HH)
14943 do_scalar_fp16_v82_encode ();
14944 }
14945
14946 static void
14947 nsyn_insert_sp (void)
14948 {
14949 inst.operands[1] = inst.operands[0];
14950 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
14951 inst.operands[0].reg = REG_SP;
14952 inst.operands[0].isreg = 1;
14953 inst.operands[0].writeback = 1;
14954 inst.operands[0].present = 1;
14955 }
14956
14957 static void
14958 do_vfp_nsyn_push (void)
14959 {
14960 nsyn_insert_sp ();
14961
14962 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
14963 _("register list must contain at least 1 and at most 16 "
14964 "registers"));
14965
14966 if (inst.operands[1].issingle)
14967 do_vfp_nsyn_opcode ("fstmdbs");
14968 else
14969 do_vfp_nsyn_opcode ("fstmdbd");
14970 }
14971
14972 static void
14973 do_vfp_nsyn_pop (void)
14974 {
14975 nsyn_insert_sp ();
14976
14977 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
14978 _("register list must contain at least 1 and at most 16 "
14979 "registers"));
14980
14981 if (inst.operands[1].issingle)
14982 do_vfp_nsyn_opcode ("fldmias");
14983 else
14984 do_vfp_nsyn_opcode ("fldmiad");
14985 }
14986
14987 /* Fix up Neon data-processing instructions, ORing in the correct bits for
14988 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
14989
14990 static void
14991 neon_dp_fixup (struct arm_it* insn)
14992 {
14993 unsigned int i = insn->instruction;
14994 insn->is_neon = 1;
14995
14996 if (thumb_mode)
14997 {
14998 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
14999 if (i & (1 << 24))
15000 i |= 1 << 28;
15001
15002 i &= ~(1 << 24);
15003
15004 i |= 0xef000000;
15005 }
15006 else
15007 i |= 0xf2000000;
15008
15009 insn->instruction = i;
15010 }
15011
15012 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
15013 (0, 1, 2, 3). */
15014
15015 static unsigned
15016 neon_logbits (unsigned x)
15017 {
15018 return ffs (x) - 4;
15019 }
15020
15021 #define LOW4(R) ((R) & 0xf)
15022 #define HI1(R) (((R) >> 4) & 1)
15023
15024 /* Encode insns with bit pattern:
15025
15026 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
15027 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
15028
15029 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
15030 different meaning for some instruction. */
15031
15032 static void
15033 neon_three_same (int isquad, int ubit, int size)
15034 {
15035 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15036 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15037 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
15038 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
15039 inst.instruction |= LOW4 (inst.operands[2].reg);
15040 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
15041 inst.instruction |= (isquad != 0) << 6;
15042 inst.instruction |= (ubit != 0) << 24;
15043 if (size != -1)
15044 inst.instruction |= neon_logbits (size) << 20;
15045
15046 neon_dp_fixup (&inst);
15047 }
15048
15049 /* Encode instructions of the form:
15050
15051 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
15052 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
15053
15054 Don't write size if SIZE == -1. */
15055
15056 static void
15057 neon_two_same (int qbit, int ubit, int size)
15058 {
15059 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15060 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15061 inst.instruction |= LOW4 (inst.operands[1].reg);
15062 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15063 inst.instruction |= (qbit != 0) << 6;
15064 inst.instruction |= (ubit != 0) << 24;
15065
15066 if (size != -1)
15067 inst.instruction |= neon_logbits (size) << 18;
15068
15069 neon_dp_fixup (&inst);
15070 }
15071
15072 /* Neon instruction encoders, in approximate order of appearance. */
15073
15074 static void
15075 do_neon_dyadic_i_su (void)
15076 {
15077 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15078 struct neon_type_el et = neon_check_type (3, rs,
15079 N_EQK, N_EQK, N_SU_32 | N_KEY);
15080 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
15081 }
15082
15083 static void
15084 do_neon_dyadic_i64_su (void)
15085 {
15086 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15087 struct neon_type_el et = neon_check_type (3, rs,
15088 N_EQK, N_EQK, N_SU_ALL | N_KEY);
15089 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
15090 }
15091
15092 static void
15093 neon_imm_shift (int write_ubit, int uval, int isquad, struct neon_type_el et,
15094 unsigned immbits)
15095 {
15096 unsigned size = et.size >> 3;
15097 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15098 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15099 inst.instruction |= LOW4 (inst.operands[1].reg);
15100 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15101 inst.instruction |= (isquad != 0) << 6;
15102 inst.instruction |= immbits << 16;
15103 inst.instruction |= (size >> 3) << 7;
15104 inst.instruction |= (size & 0x7) << 19;
15105 if (write_ubit)
15106 inst.instruction |= (uval != 0) << 24;
15107
15108 neon_dp_fixup (&inst);
15109 }
15110
15111 static void
15112 do_neon_shl_imm (void)
15113 {
15114 if (!inst.operands[2].isreg)
15115 {
15116 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
15117 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_KEY | N_I_ALL);
15118 int imm = inst.operands[2].imm;
15119
15120 constraint (imm < 0 || (unsigned)imm >= et.size,
15121 _("immediate out of range for shift"));
15122 NEON_ENCODE (IMMED, inst);
15123 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
15124 }
15125 else
15126 {
15127 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15128 struct neon_type_el et = neon_check_type (3, rs,
15129 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
15130 unsigned int tmp;
15131
15132 /* VSHL/VQSHL 3-register variants have syntax such as:
15133 vshl.xx Dd, Dm, Dn
15134 whereas other 3-register operations encoded by neon_three_same have
15135 syntax like:
15136 vadd.xx Dd, Dn, Dm
15137 (i.e. with Dn & Dm reversed). Swap operands[1].reg and operands[2].reg
15138 here. */
15139 tmp = inst.operands[2].reg;
15140 inst.operands[2].reg = inst.operands[1].reg;
15141 inst.operands[1].reg = tmp;
15142 NEON_ENCODE (INTEGER, inst);
15143 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
15144 }
15145 }
15146
15147 static void
15148 do_neon_qshl_imm (void)
15149 {
15150 if (!inst.operands[2].isreg)
15151 {
15152 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
15153 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
15154 int imm = inst.operands[2].imm;
15155
15156 constraint (imm < 0 || (unsigned)imm >= et.size,
15157 _("immediate out of range for shift"));
15158 NEON_ENCODE (IMMED, inst);
15159 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et, imm);
15160 }
15161 else
15162 {
15163 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15164 struct neon_type_el et = neon_check_type (3, rs,
15165 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
15166 unsigned int tmp;
15167
15168 /* See note in do_neon_shl_imm. */
15169 tmp = inst.operands[2].reg;
15170 inst.operands[2].reg = inst.operands[1].reg;
15171 inst.operands[1].reg = tmp;
15172 NEON_ENCODE (INTEGER, inst);
15173 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
15174 }
15175 }
15176
15177 static void
15178 do_neon_rshl (void)
15179 {
15180 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15181 struct neon_type_el et = neon_check_type (3, rs,
15182 N_EQK, N_EQK, N_SU_ALL | N_KEY);
15183 unsigned int tmp;
15184
15185 tmp = inst.operands[2].reg;
15186 inst.operands[2].reg = inst.operands[1].reg;
15187 inst.operands[1].reg = tmp;
15188 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
15189 }
15190
15191 static int
15192 neon_cmode_for_logic_imm (unsigned immediate, unsigned *immbits, int size)
15193 {
15194 /* Handle .I8 pseudo-instructions. */
15195 if (size == 8)
15196 {
15197 /* Unfortunately, this will make everything apart from zero out-of-range.
15198 FIXME is this the intended semantics? There doesn't seem much point in
15199 accepting .I8 if so. */
15200 immediate |= immediate << 8;
15201 size = 16;
15202 }
15203
15204 if (size >= 32)
15205 {
15206 if (immediate == (immediate & 0x000000ff))
15207 {
15208 *immbits = immediate;
15209 return 0x1;
15210 }
15211 else if (immediate == (immediate & 0x0000ff00))
15212 {
15213 *immbits = immediate >> 8;
15214 return 0x3;
15215 }
15216 else if (immediate == (immediate & 0x00ff0000))
15217 {
15218 *immbits = immediate >> 16;
15219 return 0x5;
15220 }
15221 else if (immediate == (immediate & 0xff000000))
15222 {
15223 *immbits = immediate >> 24;
15224 return 0x7;
15225 }
15226 if ((immediate & 0xffff) != (immediate >> 16))
15227 goto bad_immediate;
15228 immediate &= 0xffff;
15229 }
15230
15231 if (immediate == (immediate & 0x000000ff))
15232 {
15233 *immbits = immediate;
15234 return 0x9;
15235 }
15236 else if (immediate == (immediate & 0x0000ff00))
15237 {
15238 *immbits = immediate >> 8;
15239 return 0xb;
15240 }
15241
15242 bad_immediate:
15243 first_error (_("immediate value out of range"));
15244 return FAIL;
15245 }
15246
15247 static void
15248 do_neon_logic (void)
15249 {
15250 if (inst.operands[2].present && inst.operands[2].isreg)
15251 {
15252 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15253 neon_check_type (3, rs, N_IGNORE_TYPE);
15254 /* U bit and size field were set as part of the bitmask. */
15255 NEON_ENCODE (INTEGER, inst);
15256 neon_three_same (neon_quad (rs), 0, -1);
15257 }
15258 else
15259 {
15260 const int three_ops_form = (inst.operands[2].present
15261 && !inst.operands[2].isreg);
15262 const int immoperand = (three_ops_form ? 2 : 1);
15263 enum neon_shape rs = (three_ops_form
15264 ? neon_select_shape (NS_DDI, NS_QQI, NS_NULL)
15265 : neon_select_shape (NS_DI, NS_QI, NS_NULL));
15266 struct neon_type_el et = neon_check_type (2, rs,
15267 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
15268 enum neon_opc opcode = (enum neon_opc) inst.instruction & 0x0fffffff;
15269 unsigned immbits;
15270 int cmode;
15271
15272 if (et.type == NT_invtype)
15273 return;
15274
15275 if (three_ops_form)
15276 constraint (inst.operands[0].reg != inst.operands[1].reg,
15277 _("first and second operands shall be the same register"));
15278
15279 NEON_ENCODE (IMMED, inst);
15280
15281 immbits = inst.operands[immoperand].imm;
15282 if (et.size == 64)
15283 {
15284 /* .i64 is a pseudo-op, so the immediate must be a repeating
15285 pattern. */
15286 if (immbits != (inst.operands[immoperand].regisimm ?
15287 inst.operands[immoperand].reg : 0))
15288 {
15289 /* Set immbits to an invalid constant. */
15290 immbits = 0xdeadbeef;
15291 }
15292 }
15293
15294 switch (opcode)
15295 {
15296 case N_MNEM_vbic:
15297 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
15298 break;
15299
15300 case N_MNEM_vorr:
15301 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
15302 break;
15303
15304 case N_MNEM_vand:
15305 /* Pseudo-instruction for VBIC. */
15306 neon_invert_size (&immbits, 0, et.size);
15307 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
15308 break;
15309
15310 case N_MNEM_vorn:
15311 /* Pseudo-instruction for VORR. */
15312 neon_invert_size (&immbits, 0, et.size);
15313 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
15314 break;
15315
15316 default:
15317 abort ();
15318 }
15319
15320 if (cmode == FAIL)
15321 return;
15322
15323 inst.instruction |= neon_quad (rs) << 6;
15324 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15325 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15326 inst.instruction |= cmode << 8;
15327 neon_write_immbits (immbits);
15328
15329 neon_dp_fixup (&inst);
15330 }
15331 }
15332
15333 static void
15334 do_neon_bitfield (void)
15335 {
15336 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15337 neon_check_type (3, rs, N_IGNORE_TYPE);
15338 neon_three_same (neon_quad (rs), 0, -1);
15339 }
15340
15341 static void
15342 neon_dyadic_misc (enum neon_el_type ubit_meaning, unsigned types,
15343 unsigned destbits)
15344 {
15345 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15346 struct neon_type_el et = neon_check_type (3, rs, N_EQK | destbits, N_EQK,
15347 types | N_KEY);
15348 if (et.type == NT_float)
15349 {
15350 NEON_ENCODE (FLOAT, inst);
15351 neon_three_same (neon_quad (rs), 0, et.size == 16 ? (int) et.size : -1);
15352 }
15353 else
15354 {
15355 NEON_ENCODE (INTEGER, inst);
15356 neon_three_same (neon_quad (rs), et.type == ubit_meaning, et.size);
15357 }
15358 }
15359
15360 static void
15361 do_neon_dyadic_if_su (void)
15362 {
15363 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
15364 }
15365
15366 static void
15367 do_neon_dyadic_if_su_d (void)
15368 {
15369 /* This version only allow D registers, but that constraint is enforced during
15370 operand parsing so we don't need to do anything extra here. */
15371 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
15372 }
15373
15374 static void
15375 do_neon_dyadic_if_i_d (void)
15376 {
15377 /* The "untyped" case can't happen. Do this to stop the "U" bit being
15378 affected if we specify unsigned args. */
15379 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
15380 }
15381
15382 enum vfp_or_neon_is_neon_bits
15383 {
15384 NEON_CHECK_CC = 1,
15385 NEON_CHECK_ARCH = 2,
15386 NEON_CHECK_ARCH8 = 4
15387 };
15388
15389 /* Call this function if an instruction which may have belonged to the VFP or
15390 Neon instruction sets, but turned out to be a Neon instruction (due to the
15391 operand types involved, etc.). We have to check and/or fix-up a couple of
15392 things:
15393
15394 - Make sure the user hasn't attempted to make a Neon instruction
15395 conditional.
15396 - Alter the value in the condition code field if necessary.
15397 - Make sure that the arch supports Neon instructions.
15398
15399 Which of these operations take place depends on bits from enum
15400 vfp_or_neon_is_neon_bits.
15401
15402 WARNING: This function has side effects! If NEON_CHECK_CC is used and the
15403 current instruction's condition is COND_ALWAYS, the condition field is
15404 changed to inst.uncond_value. This is necessary because instructions shared
15405 between VFP and Neon may be conditional for the VFP variants only, and the
15406 unconditional Neon version must have, e.g., 0xF in the condition field. */
15407
15408 static int
15409 vfp_or_neon_is_neon (unsigned check)
15410 {
15411 /* Conditions are always legal in Thumb mode (IT blocks). */
15412 if (!thumb_mode && (check & NEON_CHECK_CC))
15413 {
15414 if (inst.cond != COND_ALWAYS)
15415 {
15416 first_error (_(BAD_COND));
15417 return FAIL;
15418 }
15419 if (inst.uncond_value != -1)
15420 inst.instruction |= inst.uncond_value << 28;
15421 }
15422
15423 if ((check & NEON_CHECK_ARCH)
15424 && !mark_feature_used (&fpu_neon_ext_v1))
15425 {
15426 first_error (_(BAD_FPU));
15427 return FAIL;
15428 }
15429
15430 if ((check & NEON_CHECK_ARCH8)
15431 && !mark_feature_used (&fpu_neon_ext_armv8))
15432 {
15433 first_error (_(BAD_FPU));
15434 return FAIL;
15435 }
15436
15437 return SUCCESS;
15438 }
15439
15440 static void
15441 do_neon_addsub_if_i (void)
15442 {
15443 if (try_vfp_nsyn (3, do_vfp_nsyn_add_sub) == SUCCESS)
15444 return;
15445
15446 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15447 return;
15448
15449 /* The "untyped" case can't happen. Do this to stop the "U" bit being
15450 affected if we specify unsigned args. */
15451 neon_dyadic_misc (NT_untyped, N_IF_32 | N_I64, 0);
15452 }
15453
15454 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
15455 result to be:
15456 V<op> A,B (A is operand 0, B is operand 2)
15457 to mean:
15458 V<op> A,B,A
15459 not:
15460 V<op> A,B,B
15461 so handle that case specially. */
15462
15463 static void
15464 neon_exchange_operands (void)
15465 {
15466 if (inst.operands[1].present)
15467 {
15468 void *scratch = xmalloc (sizeof (inst.operands[0]));
15469
15470 /* Swap operands[1] and operands[2]. */
15471 memcpy (scratch, &inst.operands[1], sizeof (inst.operands[0]));
15472 inst.operands[1] = inst.operands[2];
15473 memcpy (&inst.operands[2], scratch, sizeof (inst.operands[0]));
15474 free (scratch);
15475 }
15476 else
15477 {
15478 inst.operands[1] = inst.operands[2];
15479 inst.operands[2] = inst.operands[0];
15480 }
15481 }
15482
15483 static void
15484 neon_compare (unsigned regtypes, unsigned immtypes, int invert)
15485 {
15486 if (inst.operands[2].isreg)
15487 {
15488 if (invert)
15489 neon_exchange_operands ();
15490 neon_dyadic_misc (NT_unsigned, regtypes, N_SIZ);
15491 }
15492 else
15493 {
15494 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
15495 struct neon_type_el et = neon_check_type (2, rs,
15496 N_EQK | N_SIZ, immtypes | N_KEY);
15497
15498 NEON_ENCODE (IMMED, inst);
15499 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15500 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15501 inst.instruction |= LOW4 (inst.operands[1].reg);
15502 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15503 inst.instruction |= neon_quad (rs) << 6;
15504 inst.instruction |= (et.type == NT_float) << 10;
15505 inst.instruction |= neon_logbits (et.size) << 18;
15506
15507 neon_dp_fixup (&inst);
15508 }
15509 }
15510
15511 static void
15512 do_neon_cmp (void)
15513 {
15514 neon_compare (N_SUF_32, N_S_32 | N_F_16_32, FALSE);
15515 }
15516
15517 static void
15518 do_neon_cmp_inv (void)
15519 {
15520 neon_compare (N_SUF_32, N_S_32 | N_F_16_32, TRUE);
15521 }
15522
15523 static void
15524 do_neon_ceq (void)
15525 {
15526 neon_compare (N_IF_32, N_IF_32, FALSE);
15527 }
15528
15529 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
15530 scalars, which are encoded in 5 bits, M : Rm.
15531 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
15532 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
15533 index in M.
15534
15535 Dot Product instructions are similar to multiply instructions except elsize
15536 should always be 32.
15537
15538 This function translates SCALAR, which is GAS's internal encoding of indexed
15539 scalar register, to raw encoding. There is also register and index range
15540 check based on ELSIZE. */
15541
15542 static unsigned
15543 neon_scalar_for_mul (unsigned scalar, unsigned elsize)
15544 {
15545 unsigned regno = NEON_SCALAR_REG (scalar);
15546 unsigned elno = NEON_SCALAR_INDEX (scalar);
15547
15548 switch (elsize)
15549 {
15550 case 16:
15551 if (regno > 7 || elno > 3)
15552 goto bad_scalar;
15553 return regno | (elno << 3);
15554
15555 case 32:
15556 if (regno > 15 || elno > 1)
15557 goto bad_scalar;
15558 return regno | (elno << 4);
15559
15560 default:
15561 bad_scalar:
15562 first_error (_("scalar out of range for multiply instruction"));
15563 }
15564
15565 return 0;
15566 }
15567
15568 /* Encode multiply / multiply-accumulate scalar instructions. */
15569
15570 static void
15571 neon_mul_mac (struct neon_type_el et, int ubit)
15572 {
15573 unsigned scalar;
15574
15575 /* Give a more helpful error message if we have an invalid type. */
15576 if (et.type == NT_invtype)
15577 return;
15578
15579 scalar = neon_scalar_for_mul (inst.operands[2].reg, et.size);
15580 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15581 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15582 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
15583 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
15584 inst.instruction |= LOW4 (scalar);
15585 inst.instruction |= HI1 (scalar) << 5;
15586 inst.instruction |= (et.type == NT_float) << 8;
15587 inst.instruction |= neon_logbits (et.size) << 20;
15588 inst.instruction |= (ubit != 0) << 24;
15589
15590 neon_dp_fixup (&inst);
15591 }
15592
15593 static void
15594 do_neon_mac_maybe_scalar (void)
15595 {
15596 if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls) == SUCCESS)
15597 return;
15598
15599 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15600 return;
15601
15602 if (inst.operands[2].isscalar)
15603 {
15604 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
15605 struct neon_type_el et = neon_check_type (3, rs,
15606 N_EQK, N_EQK, N_I16 | N_I32 | N_F_16_32 | N_KEY);
15607 NEON_ENCODE (SCALAR, inst);
15608 neon_mul_mac (et, neon_quad (rs));
15609 }
15610 else
15611 {
15612 /* The "untyped" case can't happen. Do this to stop the "U" bit being
15613 affected if we specify unsigned args. */
15614 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
15615 }
15616 }
15617
15618 static void
15619 do_neon_fmac (void)
15620 {
15621 if (try_vfp_nsyn (3, do_vfp_nsyn_fma_fms) == SUCCESS)
15622 return;
15623
15624 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15625 return;
15626
15627 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
15628 }
15629
15630 static void
15631 do_neon_tst (void)
15632 {
15633 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15634 struct neon_type_el et = neon_check_type (3, rs,
15635 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_KEY);
15636 neon_three_same (neon_quad (rs), 0, et.size);
15637 }
15638
15639 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
15640 same types as the MAC equivalents. The polynomial type for this instruction
15641 is encoded the same as the integer type. */
15642
15643 static void
15644 do_neon_mul (void)
15645 {
15646 if (try_vfp_nsyn (3, do_vfp_nsyn_mul) == SUCCESS)
15647 return;
15648
15649 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15650 return;
15651
15652 if (inst.operands[2].isscalar)
15653 do_neon_mac_maybe_scalar ();
15654 else
15655 neon_dyadic_misc (NT_poly, N_I8 | N_I16 | N_I32 | N_F16 | N_F32 | N_P8, 0);
15656 }
15657
15658 static void
15659 do_neon_qdmulh (void)
15660 {
15661 if (inst.operands[2].isscalar)
15662 {
15663 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
15664 struct neon_type_el et = neon_check_type (3, rs,
15665 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
15666 NEON_ENCODE (SCALAR, inst);
15667 neon_mul_mac (et, neon_quad (rs));
15668 }
15669 else
15670 {
15671 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15672 struct neon_type_el et = neon_check_type (3, rs,
15673 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
15674 NEON_ENCODE (INTEGER, inst);
15675 /* The U bit (rounding) comes from bit mask. */
15676 neon_three_same (neon_quad (rs), 0, et.size);
15677 }
15678 }
15679
15680 static void
15681 do_neon_qrdmlah (void)
15682 {
15683 /* Check we're on the correct architecture. */
15684 if (!mark_feature_used (&fpu_neon_ext_armv8))
15685 inst.error =
15686 _("instruction form not available on this architecture.");
15687 else if (!mark_feature_used (&fpu_neon_ext_v8_1))
15688 {
15689 as_warn (_("this instruction implies use of ARMv8.1 AdvSIMD."));
15690 record_feature_use (&fpu_neon_ext_v8_1);
15691 }
15692
15693 if (inst.operands[2].isscalar)
15694 {
15695 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
15696 struct neon_type_el et = neon_check_type (3, rs,
15697 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
15698 NEON_ENCODE (SCALAR, inst);
15699 neon_mul_mac (et, neon_quad (rs));
15700 }
15701 else
15702 {
15703 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15704 struct neon_type_el et = neon_check_type (3, rs,
15705 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
15706 NEON_ENCODE (INTEGER, inst);
15707 /* The U bit (rounding) comes from bit mask. */
15708 neon_three_same (neon_quad (rs), 0, et.size);
15709 }
15710 }
15711
15712 static void
15713 do_neon_fcmp_absolute (void)
15714 {
15715 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15716 struct neon_type_el et = neon_check_type (3, rs, N_EQK, N_EQK,
15717 N_F_16_32 | N_KEY);
15718 /* Size field comes from bit mask. */
15719 neon_three_same (neon_quad (rs), 1, et.size == 16 ? (int) et.size : -1);
15720 }
15721
15722 static void
15723 do_neon_fcmp_absolute_inv (void)
15724 {
15725 neon_exchange_operands ();
15726 do_neon_fcmp_absolute ();
15727 }
15728
15729 static void
15730 do_neon_step (void)
15731 {
15732 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15733 struct neon_type_el et = neon_check_type (3, rs, N_EQK, N_EQK,
15734 N_F_16_32 | N_KEY);
15735 neon_three_same (neon_quad (rs), 0, et.size == 16 ? (int) et.size : -1);
15736 }
15737
15738 static void
15739 do_neon_abs_neg (void)
15740 {
15741 enum neon_shape rs;
15742 struct neon_type_el et;
15743
15744 if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg) == SUCCESS)
15745 return;
15746
15747 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15748 return;
15749
15750 rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15751 et = neon_check_type (2, rs, N_EQK, N_S_32 | N_F_16_32 | N_KEY);
15752
15753 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15754 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15755 inst.instruction |= LOW4 (inst.operands[1].reg);
15756 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15757 inst.instruction |= neon_quad (rs) << 6;
15758 inst.instruction |= (et.type == NT_float) << 10;
15759 inst.instruction |= neon_logbits (et.size) << 18;
15760
15761 neon_dp_fixup (&inst);
15762 }
15763
15764 static void
15765 do_neon_sli (void)
15766 {
15767 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
15768 struct neon_type_el et = neon_check_type (2, rs,
15769 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
15770 int imm = inst.operands[2].imm;
15771 constraint (imm < 0 || (unsigned)imm >= et.size,
15772 _("immediate out of range for insert"));
15773 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
15774 }
15775
15776 static void
15777 do_neon_sri (void)
15778 {
15779 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
15780 struct neon_type_el et = neon_check_type (2, rs,
15781 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
15782 int imm = inst.operands[2].imm;
15783 constraint (imm < 1 || (unsigned)imm > et.size,
15784 _("immediate out of range for insert"));
15785 neon_imm_shift (FALSE, 0, neon_quad (rs), et, et.size - imm);
15786 }
15787
15788 static void
15789 do_neon_qshlu_imm (void)
15790 {
15791 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
15792 struct neon_type_el et = neon_check_type (2, rs,
15793 N_EQK | N_UNS, N_S8 | N_S16 | N_S32 | N_S64 | N_KEY);
15794 int imm = inst.operands[2].imm;
15795 constraint (imm < 0 || (unsigned)imm >= et.size,
15796 _("immediate out of range for shift"));
15797 /* Only encodes the 'U present' variant of the instruction.
15798 In this case, signed types have OP (bit 8) set to 0.
15799 Unsigned types have OP set to 1. */
15800 inst.instruction |= (et.type == NT_unsigned) << 8;
15801 /* The rest of the bits are the same as other immediate shifts. */
15802 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
15803 }
15804
15805 static void
15806 do_neon_qmovn (void)
15807 {
15808 struct neon_type_el et = neon_check_type (2, NS_DQ,
15809 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
15810 /* Saturating move where operands can be signed or unsigned, and the
15811 destination has the same signedness. */
15812 NEON_ENCODE (INTEGER, inst);
15813 if (et.type == NT_unsigned)
15814 inst.instruction |= 0xc0;
15815 else
15816 inst.instruction |= 0x80;
15817 neon_two_same (0, 1, et.size / 2);
15818 }
15819
15820 static void
15821 do_neon_qmovun (void)
15822 {
15823 struct neon_type_el et = neon_check_type (2, NS_DQ,
15824 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
15825 /* Saturating move with unsigned results. Operands must be signed. */
15826 NEON_ENCODE (INTEGER, inst);
15827 neon_two_same (0, 1, et.size / 2);
15828 }
15829
15830 static void
15831 do_neon_rshift_sat_narrow (void)
15832 {
15833 /* FIXME: Types for narrowing. If operands are signed, results can be signed
15834 or unsigned. If operands are unsigned, results must also be unsigned. */
15835 struct neon_type_el et = neon_check_type (2, NS_DQI,
15836 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
15837 int imm = inst.operands[2].imm;
15838 /* This gets the bounds check, size encoding and immediate bits calculation
15839 right. */
15840 et.size /= 2;
15841
15842 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
15843 VQMOVN.I<size> <Dd>, <Qm>. */
15844 if (imm == 0)
15845 {
15846 inst.operands[2].present = 0;
15847 inst.instruction = N_MNEM_vqmovn;
15848 do_neon_qmovn ();
15849 return;
15850 }
15851
15852 constraint (imm < 1 || (unsigned)imm > et.size,
15853 _("immediate out of range"));
15854 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, et.size - imm);
15855 }
15856
15857 static void
15858 do_neon_rshift_sat_narrow_u (void)
15859 {
15860 /* FIXME: Types for narrowing. If operands are signed, results can be signed
15861 or unsigned. If operands are unsigned, results must also be unsigned. */
15862 struct neon_type_el et = neon_check_type (2, NS_DQI,
15863 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
15864 int imm = inst.operands[2].imm;
15865 /* This gets the bounds check, size encoding and immediate bits calculation
15866 right. */
15867 et.size /= 2;
15868
15869 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
15870 VQMOVUN.I<size> <Dd>, <Qm>. */
15871 if (imm == 0)
15872 {
15873 inst.operands[2].present = 0;
15874 inst.instruction = N_MNEM_vqmovun;
15875 do_neon_qmovun ();
15876 return;
15877 }
15878
15879 constraint (imm < 1 || (unsigned)imm > et.size,
15880 _("immediate out of range"));
15881 /* FIXME: The manual is kind of unclear about what value U should have in
15882 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
15883 must be 1. */
15884 neon_imm_shift (TRUE, 1, 0, et, et.size - imm);
15885 }
15886
15887 static void
15888 do_neon_movn (void)
15889 {
15890 struct neon_type_el et = neon_check_type (2, NS_DQ,
15891 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
15892 NEON_ENCODE (INTEGER, inst);
15893 neon_two_same (0, 1, et.size / 2);
15894 }
15895
15896 static void
15897 do_neon_rshift_narrow (void)
15898 {
15899 struct neon_type_el et = neon_check_type (2, NS_DQI,
15900 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
15901 int imm = inst.operands[2].imm;
15902 /* This gets the bounds check, size encoding and immediate bits calculation
15903 right. */
15904 et.size /= 2;
15905
15906 /* If immediate is zero then we are a pseudo-instruction for
15907 VMOVN.I<size> <Dd>, <Qm> */
15908 if (imm == 0)
15909 {
15910 inst.operands[2].present = 0;
15911 inst.instruction = N_MNEM_vmovn;
15912 do_neon_movn ();
15913 return;
15914 }
15915
15916 constraint (imm < 1 || (unsigned)imm > et.size,
15917 _("immediate out of range for narrowing operation"));
15918 neon_imm_shift (FALSE, 0, 0, et, et.size - imm);
15919 }
15920
15921 static void
15922 do_neon_shll (void)
15923 {
15924 /* FIXME: Type checking when lengthening. */
15925 struct neon_type_el et = neon_check_type (2, NS_QDI,
15926 N_EQK | N_DBL, N_I8 | N_I16 | N_I32 | N_KEY);
15927 unsigned imm = inst.operands[2].imm;
15928
15929 if (imm == et.size)
15930 {
15931 /* Maximum shift variant. */
15932 NEON_ENCODE (INTEGER, inst);
15933 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15934 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15935 inst.instruction |= LOW4 (inst.operands[1].reg);
15936 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15937 inst.instruction |= neon_logbits (et.size) << 18;
15938
15939 neon_dp_fixup (&inst);
15940 }
15941 else
15942 {
15943 /* A more-specific type check for non-max versions. */
15944 et = neon_check_type (2, NS_QDI,
15945 N_EQK | N_DBL, N_SU_32 | N_KEY);
15946 NEON_ENCODE (IMMED, inst);
15947 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, imm);
15948 }
15949 }
15950
15951 /* Check the various types for the VCVT instruction, and return which version
15952 the current instruction is. */
15953
15954 #define CVT_FLAVOUR_VAR \
15955 CVT_VAR (s32_f32, N_S32, N_F32, whole_reg, "ftosls", "ftosis", "ftosizs") \
15956 CVT_VAR (u32_f32, N_U32, N_F32, whole_reg, "ftouls", "ftouis", "ftouizs") \
15957 CVT_VAR (f32_s32, N_F32, N_S32, whole_reg, "fsltos", "fsitos", NULL) \
15958 CVT_VAR (f32_u32, N_F32, N_U32, whole_reg, "fultos", "fuitos", NULL) \
15959 /* Half-precision conversions. */ \
15960 CVT_VAR (s16_f16, N_S16, N_F16 | N_KEY, whole_reg, NULL, NULL, NULL) \
15961 CVT_VAR (u16_f16, N_U16, N_F16 | N_KEY, whole_reg, NULL, NULL, NULL) \
15962 CVT_VAR (f16_s16, N_F16 | N_KEY, N_S16, whole_reg, NULL, NULL, NULL) \
15963 CVT_VAR (f16_u16, N_F16 | N_KEY, N_U16, whole_reg, NULL, NULL, NULL) \
15964 CVT_VAR (f32_f16, N_F32, N_F16, whole_reg, NULL, NULL, NULL) \
15965 CVT_VAR (f16_f32, N_F16, N_F32, whole_reg, NULL, NULL, NULL) \
15966 /* New VCVT instructions introduced by ARMv8.2 fp16 extension. \
15967 Compared with single/double precision variants, only the co-processor \
15968 field is different, so the encoding flow is reused here. */ \
15969 CVT_VAR (f16_s32, N_F16 | N_KEY, N_S32, N_VFP, "fsltos", "fsitos", NULL) \
15970 CVT_VAR (f16_u32, N_F16 | N_KEY, N_U32, N_VFP, "fultos", "fuitos", NULL) \
15971 CVT_VAR (u32_f16, N_U32, N_F16 | N_KEY, N_VFP, "ftouls", "ftouis", "ftouizs")\
15972 CVT_VAR (s32_f16, N_S32, N_F16 | N_KEY, N_VFP, "ftosls", "ftosis", "ftosizs")\
15973 /* VFP instructions. */ \
15974 CVT_VAR (f32_f64, N_F32, N_F64, N_VFP, NULL, "fcvtsd", NULL) \
15975 CVT_VAR (f64_f32, N_F64, N_F32, N_VFP, NULL, "fcvtds", NULL) \
15976 CVT_VAR (s32_f64, N_S32, N_F64 | key, N_VFP, "ftosld", "ftosid", "ftosizd") \
15977 CVT_VAR (u32_f64, N_U32, N_F64 | key, N_VFP, "ftould", "ftouid", "ftouizd") \
15978 CVT_VAR (f64_s32, N_F64 | key, N_S32, N_VFP, "fsltod", "fsitod", NULL) \
15979 CVT_VAR (f64_u32, N_F64 | key, N_U32, N_VFP, "fultod", "fuitod", NULL) \
15980 /* VFP instructions with bitshift. */ \
15981 CVT_VAR (f32_s16, N_F32 | key, N_S16, N_VFP, "fshtos", NULL, NULL) \
15982 CVT_VAR (f32_u16, N_F32 | key, N_U16, N_VFP, "fuhtos", NULL, NULL) \
15983 CVT_VAR (f64_s16, N_F64 | key, N_S16, N_VFP, "fshtod", NULL, NULL) \
15984 CVT_VAR (f64_u16, N_F64 | key, N_U16, N_VFP, "fuhtod", NULL, NULL) \
15985 CVT_VAR (s16_f32, N_S16, N_F32 | key, N_VFP, "ftoshs", NULL, NULL) \
15986 CVT_VAR (u16_f32, N_U16, N_F32 | key, N_VFP, "ftouhs", NULL, NULL) \
15987 CVT_VAR (s16_f64, N_S16, N_F64 | key, N_VFP, "ftoshd", NULL, NULL) \
15988 CVT_VAR (u16_f64, N_U16, N_F64 | key, N_VFP, "ftouhd", NULL, NULL)
15989
15990 #define CVT_VAR(C, X, Y, R, BSN, CN, ZN) \
15991 neon_cvt_flavour_##C,
15992
15993 /* The different types of conversions we can do. */
15994 enum neon_cvt_flavour
15995 {
15996 CVT_FLAVOUR_VAR
15997 neon_cvt_flavour_invalid,
15998 neon_cvt_flavour_first_fp = neon_cvt_flavour_f32_f64
15999 };
16000
16001 #undef CVT_VAR
16002
16003 static enum neon_cvt_flavour
16004 get_neon_cvt_flavour (enum neon_shape rs)
16005 {
16006 #define CVT_VAR(C,X,Y,R,BSN,CN,ZN) \
16007 et = neon_check_type (2, rs, (R) | (X), (R) | (Y)); \
16008 if (et.type != NT_invtype) \
16009 { \
16010 inst.error = NULL; \
16011 return (neon_cvt_flavour_##C); \
16012 }
16013
16014 struct neon_type_el et;
16015 unsigned whole_reg = (rs == NS_FFI || rs == NS_FD || rs == NS_DF
16016 || rs == NS_FF) ? N_VFP : 0;
16017 /* The instruction versions which take an immediate take one register
16018 argument, which is extended to the width of the full register. Thus the
16019 "source" and "destination" registers must have the same width. Hack that
16020 here by making the size equal to the key (wider, in this case) operand. */
16021 unsigned key = (rs == NS_QQI || rs == NS_DDI || rs == NS_FFI) ? N_KEY : 0;
16022
16023 CVT_FLAVOUR_VAR;
16024
16025 return neon_cvt_flavour_invalid;
16026 #undef CVT_VAR
16027 }
16028
16029 enum neon_cvt_mode
16030 {
16031 neon_cvt_mode_a,
16032 neon_cvt_mode_n,
16033 neon_cvt_mode_p,
16034 neon_cvt_mode_m,
16035 neon_cvt_mode_z,
16036 neon_cvt_mode_x,
16037 neon_cvt_mode_r
16038 };
16039
16040 /* Neon-syntax VFP conversions. */
16041
16042 static void
16043 do_vfp_nsyn_cvt (enum neon_shape rs, enum neon_cvt_flavour flavour)
16044 {
16045 const char *opname = 0;
16046
16047 if (rs == NS_DDI || rs == NS_QQI || rs == NS_FFI
16048 || rs == NS_FHI || rs == NS_HFI)
16049 {
16050 /* Conversions with immediate bitshift. */
16051 const char *enc[] =
16052 {
16053 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) BSN,
16054 CVT_FLAVOUR_VAR
16055 NULL
16056 #undef CVT_VAR
16057 };
16058
16059 if (flavour < (int) ARRAY_SIZE (enc))
16060 {
16061 opname = enc[flavour];
16062 constraint (inst.operands[0].reg != inst.operands[1].reg,
16063 _("operands 0 and 1 must be the same register"));
16064 inst.operands[1] = inst.operands[2];
16065 memset (&inst.operands[2], '\0', sizeof (inst.operands[2]));
16066 }
16067 }
16068 else
16069 {
16070 /* Conversions without bitshift. */
16071 const char *enc[] =
16072 {
16073 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) CN,
16074 CVT_FLAVOUR_VAR
16075 NULL
16076 #undef CVT_VAR
16077 };
16078
16079 if (flavour < (int) ARRAY_SIZE (enc))
16080 opname = enc[flavour];
16081 }
16082
16083 if (opname)
16084 do_vfp_nsyn_opcode (opname);
16085
16086 /* ARMv8.2 fp16 VCVT instruction. */
16087 if (flavour == neon_cvt_flavour_s32_f16
16088 || flavour == neon_cvt_flavour_u32_f16
16089 || flavour == neon_cvt_flavour_f16_u32
16090 || flavour == neon_cvt_flavour_f16_s32)
16091 do_scalar_fp16_v82_encode ();
16092 }
16093
16094 static void
16095 do_vfp_nsyn_cvtz (void)
16096 {
16097 enum neon_shape rs = neon_select_shape (NS_FH, NS_FF, NS_FD, NS_NULL);
16098 enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
16099 const char *enc[] =
16100 {
16101 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) ZN,
16102 CVT_FLAVOUR_VAR
16103 NULL
16104 #undef CVT_VAR
16105 };
16106
16107 if (flavour < (int) ARRAY_SIZE (enc) && enc[flavour])
16108 do_vfp_nsyn_opcode (enc[flavour]);
16109 }
16110
16111 static void
16112 do_vfp_nsyn_cvt_fpv8 (enum neon_cvt_flavour flavour,
16113 enum neon_cvt_mode mode)
16114 {
16115 int sz, op;
16116 int rm;
16117
16118 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
16119 D register operands. */
16120 if (flavour == neon_cvt_flavour_s32_f64
16121 || flavour == neon_cvt_flavour_u32_f64)
16122 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
16123 _(BAD_FPU));
16124
16125 if (flavour == neon_cvt_flavour_s32_f16
16126 || flavour == neon_cvt_flavour_u32_f16)
16127 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16),
16128 _(BAD_FP16));
16129
16130 set_it_insn_type (OUTSIDE_IT_INSN);
16131
16132 switch (flavour)
16133 {
16134 case neon_cvt_flavour_s32_f64:
16135 sz = 1;
16136 op = 1;
16137 break;
16138 case neon_cvt_flavour_s32_f32:
16139 sz = 0;
16140 op = 1;
16141 break;
16142 case neon_cvt_flavour_s32_f16:
16143 sz = 0;
16144 op = 1;
16145 break;
16146 case neon_cvt_flavour_u32_f64:
16147 sz = 1;
16148 op = 0;
16149 break;
16150 case neon_cvt_flavour_u32_f32:
16151 sz = 0;
16152 op = 0;
16153 break;
16154 case neon_cvt_flavour_u32_f16:
16155 sz = 0;
16156 op = 0;
16157 break;
16158 default:
16159 first_error (_("invalid instruction shape"));
16160 return;
16161 }
16162
16163 switch (mode)
16164 {
16165 case neon_cvt_mode_a: rm = 0; break;
16166 case neon_cvt_mode_n: rm = 1; break;
16167 case neon_cvt_mode_p: rm = 2; break;
16168 case neon_cvt_mode_m: rm = 3; break;
16169 default: first_error (_("invalid rounding mode")); return;
16170 }
16171
16172 NEON_ENCODE (FPV8, inst);
16173 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
16174 encode_arm_vfp_reg (inst.operands[1].reg, sz == 1 ? VFP_REG_Dm : VFP_REG_Sm);
16175 inst.instruction |= sz << 8;
16176
16177 /* ARMv8.2 fp16 VCVT instruction. */
16178 if (flavour == neon_cvt_flavour_s32_f16
16179 ||flavour == neon_cvt_flavour_u32_f16)
16180 do_scalar_fp16_v82_encode ();
16181 inst.instruction |= op << 7;
16182 inst.instruction |= rm << 16;
16183 inst.instruction |= 0xf0000000;
16184 inst.is_neon = TRUE;
16185 }
16186
16187 static void
16188 do_neon_cvt_1 (enum neon_cvt_mode mode)
16189 {
16190 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_FFI, NS_DD, NS_QQ,
16191 NS_FD, NS_DF, NS_FF, NS_QD, NS_DQ,
16192 NS_FH, NS_HF, NS_FHI, NS_HFI,
16193 NS_NULL);
16194 enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
16195
16196 if (flavour == neon_cvt_flavour_invalid)
16197 return;
16198
16199 /* PR11109: Handle round-to-zero for VCVT conversions. */
16200 if (mode == neon_cvt_mode_z
16201 && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_vfp_v2)
16202 && (flavour == neon_cvt_flavour_s16_f16
16203 || flavour == neon_cvt_flavour_u16_f16
16204 || flavour == neon_cvt_flavour_s32_f32
16205 || flavour == neon_cvt_flavour_u32_f32
16206 || flavour == neon_cvt_flavour_s32_f64
16207 || flavour == neon_cvt_flavour_u32_f64)
16208 && (rs == NS_FD || rs == NS_FF))
16209 {
16210 do_vfp_nsyn_cvtz ();
16211 return;
16212 }
16213
16214 /* ARMv8.2 fp16 VCVT conversions. */
16215 if (mode == neon_cvt_mode_z
16216 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16)
16217 && (flavour == neon_cvt_flavour_s32_f16
16218 || flavour == neon_cvt_flavour_u32_f16)
16219 && (rs == NS_FH))
16220 {
16221 do_vfp_nsyn_cvtz ();
16222 do_scalar_fp16_v82_encode ();
16223 return;
16224 }
16225
16226 /* VFP rather than Neon conversions. */
16227 if (flavour >= neon_cvt_flavour_first_fp)
16228 {
16229 if (mode == neon_cvt_mode_x || mode == neon_cvt_mode_z)
16230 do_vfp_nsyn_cvt (rs, flavour);
16231 else
16232 do_vfp_nsyn_cvt_fpv8 (flavour, mode);
16233
16234 return;
16235 }
16236
16237 switch (rs)
16238 {
16239 case NS_DDI:
16240 case NS_QQI:
16241 {
16242 unsigned immbits;
16243 unsigned enctab[] = {0x0000100, 0x1000100, 0x0, 0x1000000,
16244 0x0000100, 0x1000100, 0x0, 0x1000000};
16245
16246 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
16247 return;
16248
16249 /* Fixed-point conversion with #0 immediate is encoded as an
16250 integer conversion. */
16251 if (inst.operands[2].present && inst.operands[2].imm == 0)
16252 goto int_encode;
16253 NEON_ENCODE (IMMED, inst);
16254 if (flavour != neon_cvt_flavour_invalid)
16255 inst.instruction |= enctab[flavour];
16256 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16257 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16258 inst.instruction |= LOW4 (inst.operands[1].reg);
16259 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16260 inst.instruction |= neon_quad (rs) << 6;
16261 inst.instruction |= 1 << 21;
16262 if (flavour < neon_cvt_flavour_s16_f16)
16263 {
16264 inst.instruction |= 1 << 21;
16265 immbits = 32 - inst.operands[2].imm;
16266 inst.instruction |= immbits << 16;
16267 }
16268 else
16269 {
16270 inst.instruction |= 3 << 20;
16271 immbits = 16 - inst.operands[2].imm;
16272 inst.instruction |= immbits << 16;
16273 inst.instruction &= ~(1 << 9);
16274 }
16275
16276 neon_dp_fixup (&inst);
16277 }
16278 break;
16279
16280 case NS_DD:
16281 case NS_QQ:
16282 if (mode != neon_cvt_mode_x && mode != neon_cvt_mode_z)
16283 {
16284 NEON_ENCODE (FLOAT, inst);
16285 set_it_insn_type (OUTSIDE_IT_INSN);
16286
16287 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
16288 return;
16289
16290 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16291 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16292 inst.instruction |= LOW4 (inst.operands[1].reg);
16293 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16294 inst.instruction |= neon_quad (rs) << 6;
16295 inst.instruction |= (flavour == neon_cvt_flavour_u16_f16
16296 || flavour == neon_cvt_flavour_u32_f32) << 7;
16297 inst.instruction |= mode << 8;
16298 if (flavour == neon_cvt_flavour_u16_f16
16299 || flavour == neon_cvt_flavour_s16_f16)
16300 /* Mask off the original size bits and reencode them. */
16301 inst.instruction = ((inst.instruction & 0xfff3ffff) | (1 << 18));
16302
16303 if (thumb_mode)
16304 inst.instruction |= 0xfc000000;
16305 else
16306 inst.instruction |= 0xf0000000;
16307 }
16308 else
16309 {
16310 int_encode:
16311 {
16312 unsigned enctab[] = { 0x100, 0x180, 0x0, 0x080,
16313 0x100, 0x180, 0x0, 0x080};
16314
16315 NEON_ENCODE (INTEGER, inst);
16316
16317 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
16318 return;
16319
16320 if (flavour != neon_cvt_flavour_invalid)
16321 inst.instruction |= enctab[flavour];
16322
16323 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16324 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16325 inst.instruction |= LOW4 (inst.operands[1].reg);
16326 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16327 inst.instruction |= neon_quad (rs) << 6;
16328 if (flavour >= neon_cvt_flavour_s16_f16
16329 && flavour <= neon_cvt_flavour_f16_u16)
16330 /* Half precision. */
16331 inst.instruction |= 1 << 18;
16332 else
16333 inst.instruction |= 2 << 18;
16334
16335 neon_dp_fixup (&inst);
16336 }
16337 }
16338 break;
16339
16340 /* Half-precision conversions for Advanced SIMD -- neon. */
16341 case NS_QD:
16342 case NS_DQ:
16343 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
16344 return;
16345
16346 if ((rs == NS_DQ)
16347 && (inst.vectype.el[0].size != 16 || inst.vectype.el[1].size != 32))
16348 {
16349 as_bad (_("operand size must match register width"));
16350 break;
16351 }
16352
16353 if ((rs == NS_QD)
16354 && ((inst.vectype.el[0].size != 32 || inst.vectype.el[1].size != 16)))
16355 {
16356 as_bad (_("operand size must match register width"));
16357 break;
16358 }
16359
16360 if (rs == NS_DQ)
16361 inst.instruction = 0x3b60600;
16362 else
16363 inst.instruction = 0x3b60700;
16364
16365 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16366 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16367 inst.instruction |= LOW4 (inst.operands[1].reg);
16368 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16369 neon_dp_fixup (&inst);
16370 break;
16371
16372 default:
16373 /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32). */
16374 if (mode == neon_cvt_mode_x || mode == neon_cvt_mode_z)
16375 do_vfp_nsyn_cvt (rs, flavour);
16376 else
16377 do_vfp_nsyn_cvt_fpv8 (flavour, mode);
16378 }
16379 }
16380
16381 static void
16382 do_neon_cvtr (void)
16383 {
16384 do_neon_cvt_1 (neon_cvt_mode_x);
16385 }
16386
16387 static void
16388 do_neon_cvt (void)
16389 {
16390 do_neon_cvt_1 (neon_cvt_mode_z);
16391 }
16392
16393 static void
16394 do_neon_cvta (void)
16395 {
16396 do_neon_cvt_1 (neon_cvt_mode_a);
16397 }
16398
16399 static void
16400 do_neon_cvtn (void)
16401 {
16402 do_neon_cvt_1 (neon_cvt_mode_n);
16403 }
16404
16405 static void
16406 do_neon_cvtp (void)
16407 {
16408 do_neon_cvt_1 (neon_cvt_mode_p);
16409 }
16410
16411 static void
16412 do_neon_cvtm (void)
16413 {
16414 do_neon_cvt_1 (neon_cvt_mode_m);
16415 }
16416
16417 static void
16418 do_neon_cvttb_2 (bfd_boolean t, bfd_boolean to, bfd_boolean is_double)
16419 {
16420 if (is_double)
16421 mark_feature_used (&fpu_vfp_ext_armv8);
16422
16423 encode_arm_vfp_reg (inst.operands[0].reg,
16424 (is_double && !to) ? VFP_REG_Dd : VFP_REG_Sd);
16425 encode_arm_vfp_reg (inst.operands[1].reg,
16426 (is_double && to) ? VFP_REG_Dm : VFP_REG_Sm);
16427 inst.instruction |= to ? 0x10000 : 0;
16428 inst.instruction |= t ? 0x80 : 0;
16429 inst.instruction |= is_double ? 0x100 : 0;
16430 do_vfp_cond_or_thumb ();
16431 }
16432
16433 static void
16434 do_neon_cvttb_1 (bfd_boolean t)
16435 {
16436 enum neon_shape rs = neon_select_shape (NS_HF, NS_HD, NS_FH, NS_FF, NS_FD,
16437 NS_DF, NS_DH, NS_NULL);
16438
16439 if (rs == NS_NULL)
16440 return;
16441 else if (neon_check_type (2, rs, N_F16, N_F32 | N_VFP).type != NT_invtype)
16442 {
16443 inst.error = NULL;
16444 do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/FALSE);
16445 }
16446 else if (neon_check_type (2, rs, N_F32 | N_VFP, N_F16).type != NT_invtype)
16447 {
16448 inst.error = NULL;
16449 do_neon_cvttb_2 (t, /*to=*/FALSE, /*is_double=*/FALSE);
16450 }
16451 else if (neon_check_type (2, rs, N_F16, N_F64 | N_VFP).type != NT_invtype)
16452 {
16453 /* The VCVTB and VCVTT instructions with D-register operands
16454 don't work for SP only targets. */
16455 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
16456 _(BAD_FPU));
16457
16458 inst.error = NULL;
16459 do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/TRUE);
16460 }
16461 else if (neon_check_type (2, rs, N_F64 | N_VFP, N_F16).type != NT_invtype)
16462 {
16463 /* The VCVTB and VCVTT instructions with D-register operands
16464 don't work for SP only targets. */
16465 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
16466 _(BAD_FPU));
16467
16468 inst.error = NULL;
16469 do_neon_cvttb_2 (t, /*to=*/FALSE, /*is_double=*/TRUE);
16470 }
16471 else
16472 return;
16473 }
16474
16475 static void
16476 do_neon_cvtb (void)
16477 {
16478 do_neon_cvttb_1 (FALSE);
16479 }
16480
16481
16482 static void
16483 do_neon_cvtt (void)
16484 {
16485 do_neon_cvttb_1 (TRUE);
16486 }
16487
16488 static void
16489 neon_move_immediate (void)
16490 {
16491 enum neon_shape rs = neon_select_shape (NS_DI, NS_QI, NS_NULL);
16492 struct neon_type_el et = neon_check_type (2, rs,
16493 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
16494 unsigned immlo, immhi = 0, immbits;
16495 int op, cmode, float_p;
16496
16497 constraint (et.type == NT_invtype,
16498 _("operand size must be specified for immediate VMOV"));
16499
16500 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
16501 op = (inst.instruction & (1 << 5)) != 0;
16502
16503 immlo = inst.operands[1].imm;
16504 if (inst.operands[1].regisimm)
16505 immhi = inst.operands[1].reg;
16506
16507 constraint (et.size < 32 && (immlo & ~((1 << et.size) - 1)) != 0,
16508 _("immediate has bits set outside the operand size"));
16509
16510 float_p = inst.operands[1].immisfloat;
16511
16512 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits, &op,
16513 et.size, et.type)) == FAIL)
16514 {
16515 /* Invert relevant bits only. */
16516 neon_invert_size (&immlo, &immhi, et.size);
16517 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
16518 with one or the other; those cases are caught by
16519 neon_cmode_for_move_imm. */
16520 op = !op;
16521 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits,
16522 &op, et.size, et.type)) == FAIL)
16523 {
16524 first_error (_("immediate out of range"));
16525 return;
16526 }
16527 }
16528
16529 inst.instruction &= ~(1 << 5);
16530 inst.instruction |= op << 5;
16531
16532 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16533 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16534 inst.instruction |= neon_quad (rs) << 6;
16535 inst.instruction |= cmode << 8;
16536
16537 neon_write_immbits (immbits);
16538 }
16539
16540 static void
16541 do_neon_mvn (void)
16542 {
16543 if (inst.operands[1].isreg)
16544 {
16545 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16546
16547 NEON_ENCODE (INTEGER, inst);
16548 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16549 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16550 inst.instruction |= LOW4 (inst.operands[1].reg);
16551 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16552 inst.instruction |= neon_quad (rs) << 6;
16553 }
16554 else
16555 {
16556 NEON_ENCODE (IMMED, inst);
16557 neon_move_immediate ();
16558 }
16559
16560 neon_dp_fixup (&inst);
16561 }
16562
16563 /* Encode instructions of form:
16564
16565 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
16566 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm | */
16567
16568 static void
16569 neon_mixed_length (struct neon_type_el et, unsigned size)
16570 {
16571 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16572 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16573 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16574 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16575 inst.instruction |= LOW4 (inst.operands[2].reg);
16576 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16577 inst.instruction |= (et.type == NT_unsigned) << 24;
16578 inst.instruction |= neon_logbits (size) << 20;
16579
16580 neon_dp_fixup (&inst);
16581 }
16582
16583 static void
16584 do_neon_dyadic_long (void)
16585 {
16586 /* FIXME: Type checking for lengthening op. */
16587 struct neon_type_el et = neon_check_type (3, NS_QDD,
16588 N_EQK | N_DBL, N_EQK, N_SU_32 | N_KEY);
16589 neon_mixed_length (et, et.size);
16590 }
16591
16592 static void
16593 do_neon_abal (void)
16594 {
16595 struct neon_type_el et = neon_check_type (3, NS_QDD,
16596 N_EQK | N_INT | N_DBL, N_EQK, N_SU_32 | N_KEY);
16597 neon_mixed_length (et, et.size);
16598 }
16599
16600 static void
16601 neon_mac_reg_scalar_long (unsigned regtypes, unsigned scalartypes)
16602 {
16603 if (inst.operands[2].isscalar)
16604 {
16605 struct neon_type_el et = neon_check_type (3, NS_QDS,
16606 N_EQK | N_DBL, N_EQK, regtypes | N_KEY);
16607 NEON_ENCODE (SCALAR, inst);
16608 neon_mul_mac (et, et.type == NT_unsigned);
16609 }
16610 else
16611 {
16612 struct neon_type_el et = neon_check_type (3, NS_QDD,
16613 N_EQK | N_DBL, N_EQK, scalartypes | N_KEY);
16614 NEON_ENCODE (INTEGER, inst);
16615 neon_mixed_length (et, et.size);
16616 }
16617 }
16618
16619 static void
16620 do_neon_mac_maybe_scalar_long (void)
16621 {
16622 neon_mac_reg_scalar_long (N_S16 | N_S32 | N_U16 | N_U32, N_SU_32);
16623 }
16624
16625 /* Like neon_scalar_for_mul, this function generate Rm encoding from GAS's
16626 internal SCALAR. QUAD_P is 1 if it's for Q format, otherwise it's 0. */
16627
16628 static unsigned
16629 neon_scalar_for_fmac_fp16_long (unsigned scalar, unsigned quad_p)
16630 {
16631 unsigned regno = NEON_SCALAR_REG (scalar);
16632 unsigned elno = NEON_SCALAR_INDEX (scalar);
16633
16634 if (quad_p)
16635 {
16636 if (regno > 7 || elno > 3)
16637 goto bad_scalar;
16638
16639 return ((regno & 0x7)
16640 | ((elno & 0x1) << 3)
16641 | (((elno >> 1) & 0x1) << 5));
16642 }
16643 else
16644 {
16645 if (regno > 15 || elno > 1)
16646 goto bad_scalar;
16647
16648 return (((regno & 0x1) << 5)
16649 | ((regno >> 1) & 0x7)
16650 | ((elno & 0x1) << 3));
16651 }
16652
16653 bad_scalar:
16654 first_error (_("scalar out of range for multiply instruction"));
16655 return 0;
16656 }
16657
16658 static void
16659 do_neon_fmac_maybe_scalar_long (int subtype)
16660 {
16661 enum neon_shape rs;
16662 int high8;
16663 /* NOTE: vfmal/vfmsl use slightly different NEON three-same encoding. 'size"
16664 field (bits[21:20]) has different meaning. For scalar index variant, it's
16665 used to differentiate add and subtract, otherwise it's with fixed value
16666 0x2. */
16667 int size = -1;
16668
16669 if (inst.cond != COND_ALWAYS)
16670 as_warn (_("vfmal/vfmsl with FP16 type cannot be conditional, the "
16671 "behaviour is UNPREDICTABLE"));
16672
16673 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16_fml),
16674 _(BAD_FP16));
16675
16676 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_armv8),
16677 _(BAD_FPU));
16678
16679 /* vfmal/vfmsl are in three-same D/Q register format or the third operand can
16680 be a scalar index register. */
16681 if (inst.operands[2].isscalar)
16682 {
16683 high8 = 0xfe000000;
16684 if (subtype)
16685 size = 16;
16686 rs = neon_select_shape (NS_DHS, NS_QDS, NS_NULL);
16687 }
16688 else
16689 {
16690 high8 = 0xfc000000;
16691 size = 32;
16692 if (subtype)
16693 inst.instruction |= (0x1 << 23);
16694 rs = neon_select_shape (NS_DHH, NS_QDD, NS_NULL);
16695 }
16696
16697 neon_check_type (3, rs, N_EQK, N_EQK, N_KEY | N_F16);
16698
16699 /* "opcode" from template has included "ubit", so simply pass 0 here. Also,
16700 the "S" bit in size field has been reused to differentiate vfmal and vfmsl,
16701 so we simply pass -1 as size. */
16702 unsigned quad_p = (rs == NS_QDD || rs == NS_QDS);
16703 neon_three_same (quad_p, 0, size);
16704
16705 /* Undo neon_dp_fixup. Redo the high eight bits. */
16706 inst.instruction &= 0x00ffffff;
16707 inst.instruction |= high8;
16708
16709 #define LOW1(R) ((R) & 0x1)
16710 #define HI4(R) (((R) >> 1) & 0xf)
16711 /* Unlike usually NEON three-same, encoding for Vn and Vm will depend on
16712 whether the instruction is in Q form and whether Vm is a scalar indexed
16713 operand. */
16714 if (inst.operands[2].isscalar)
16715 {
16716 unsigned rm
16717 = neon_scalar_for_fmac_fp16_long (inst.operands[2].reg, quad_p);
16718 inst.instruction &= 0xffffffd0;
16719 inst.instruction |= rm;
16720
16721 if (!quad_p)
16722 {
16723 /* Redo Rn as well. */
16724 inst.instruction &= 0xfff0ff7f;
16725 inst.instruction |= HI4 (inst.operands[1].reg) << 16;
16726 inst.instruction |= LOW1 (inst.operands[1].reg) << 7;
16727 }
16728 }
16729 else if (!quad_p)
16730 {
16731 /* Redo Rn and Rm. */
16732 inst.instruction &= 0xfff0ff50;
16733 inst.instruction |= HI4 (inst.operands[1].reg) << 16;
16734 inst.instruction |= LOW1 (inst.operands[1].reg) << 7;
16735 inst.instruction |= HI4 (inst.operands[2].reg);
16736 inst.instruction |= LOW1 (inst.operands[2].reg) << 5;
16737 }
16738 }
16739
16740 static void
16741 do_neon_vfmal (void)
16742 {
16743 return do_neon_fmac_maybe_scalar_long (0);
16744 }
16745
16746 static void
16747 do_neon_vfmsl (void)
16748 {
16749 return do_neon_fmac_maybe_scalar_long (1);
16750 }
16751
16752 static void
16753 do_neon_dyadic_wide (void)
16754 {
16755 struct neon_type_el et = neon_check_type (3, NS_QQD,
16756 N_EQK | N_DBL, N_EQK | N_DBL, N_SU_32 | N_KEY);
16757 neon_mixed_length (et, et.size);
16758 }
16759
16760 static void
16761 do_neon_dyadic_narrow (void)
16762 {
16763 struct neon_type_el et = neon_check_type (3, NS_QDD,
16764 N_EQK | N_DBL, N_EQK, N_I16 | N_I32 | N_I64 | N_KEY);
16765 /* Operand sign is unimportant, and the U bit is part of the opcode,
16766 so force the operand type to integer. */
16767 et.type = NT_integer;
16768 neon_mixed_length (et, et.size / 2);
16769 }
16770
16771 static void
16772 do_neon_mul_sat_scalar_long (void)
16773 {
16774 neon_mac_reg_scalar_long (N_S16 | N_S32, N_S16 | N_S32);
16775 }
16776
16777 static void
16778 do_neon_vmull (void)
16779 {
16780 if (inst.operands[2].isscalar)
16781 do_neon_mac_maybe_scalar_long ();
16782 else
16783 {
16784 struct neon_type_el et = neon_check_type (3, NS_QDD,
16785 N_EQK | N_DBL, N_EQK, N_SU_32 | N_P8 | N_P64 | N_KEY);
16786
16787 if (et.type == NT_poly)
16788 NEON_ENCODE (POLY, inst);
16789 else
16790 NEON_ENCODE (INTEGER, inst);
16791
16792 /* For polynomial encoding the U bit must be zero, and the size must
16793 be 8 (encoded as 0b00) or, on ARMv8 or later 64 (encoded, non
16794 obviously, as 0b10). */
16795 if (et.size == 64)
16796 {
16797 /* Check we're on the correct architecture. */
16798 if (!mark_feature_used (&fpu_crypto_ext_armv8))
16799 inst.error =
16800 _("Instruction form not available on this architecture.");
16801
16802 et.size = 32;
16803 }
16804
16805 neon_mixed_length (et, et.size);
16806 }
16807 }
16808
16809 static void
16810 do_neon_ext (void)
16811 {
16812 enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
16813 struct neon_type_el et = neon_check_type (3, rs,
16814 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
16815 unsigned imm = (inst.operands[3].imm * et.size) / 8;
16816
16817 constraint (imm >= (unsigned) (neon_quad (rs) ? 16 : 8),
16818 _("shift out of range"));
16819 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16820 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16821 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16822 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16823 inst.instruction |= LOW4 (inst.operands[2].reg);
16824 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16825 inst.instruction |= neon_quad (rs) << 6;
16826 inst.instruction |= imm << 8;
16827
16828 neon_dp_fixup (&inst);
16829 }
16830
16831 static void
16832 do_neon_rev (void)
16833 {
16834 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16835 struct neon_type_el et = neon_check_type (2, rs,
16836 N_EQK, N_8 | N_16 | N_32 | N_KEY);
16837 unsigned op = (inst.instruction >> 7) & 3;
16838 /* N (width of reversed regions) is encoded as part of the bitmask. We
16839 extract it here to check the elements to be reversed are smaller.
16840 Otherwise we'd get a reserved instruction. */
16841 unsigned elsize = (op == 2) ? 16 : (op == 1) ? 32 : (op == 0) ? 64 : 0;
16842 gas_assert (elsize != 0);
16843 constraint (et.size >= elsize,
16844 _("elements must be smaller than reversal region"));
16845 neon_two_same (neon_quad (rs), 1, et.size);
16846 }
16847
16848 static void
16849 do_neon_dup (void)
16850 {
16851 if (inst.operands[1].isscalar)
16852 {
16853 enum neon_shape rs = neon_select_shape (NS_DS, NS_QS, NS_NULL);
16854 struct neon_type_el et = neon_check_type (2, rs,
16855 N_EQK, N_8 | N_16 | N_32 | N_KEY);
16856 unsigned sizebits = et.size >> 3;
16857 unsigned dm = NEON_SCALAR_REG (inst.operands[1].reg);
16858 int logsize = neon_logbits (et.size);
16859 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg) << logsize;
16860
16861 if (vfp_or_neon_is_neon (NEON_CHECK_CC) == FAIL)
16862 return;
16863
16864 NEON_ENCODE (SCALAR, inst);
16865 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16866 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16867 inst.instruction |= LOW4 (dm);
16868 inst.instruction |= HI1 (dm) << 5;
16869 inst.instruction |= neon_quad (rs) << 6;
16870 inst.instruction |= x << 17;
16871 inst.instruction |= sizebits << 16;
16872
16873 neon_dp_fixup (&inst);
16874 }
16875 else
16876 {
16877 enum neon_shape rs = neon_select_shape (NS_DR, NS_QR, NS_NULL);
16878 struct neon_type_el et = neon_check_type (2, rs,
16879 N_8 | N_16 | N_32 | N_KEY, N_EQK);
16880 /* Duplicate ARM register to lanes of vector. */
16881 NEON_ENCODE (ARMREG, inst);
16882 switch (et.size)
16883 {
16884 case 8: inst.instruction |= 0x400000; break;
16885 case 16: inst.instruction |= 0x000020; break;
16886 case 32: inst.instruction |= 0x000000; break;
16887 default: break;
16888 }
16889 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
16890 inst.instruction |= LOW4 (inst.operands[0].reg) << 16;
16891 inst.instruction |= HI1 (inst.operands[0].reg) << 7;
16892 inst.instruction |= neon_quad (rs) << 21;
16893 /* The encoding for this instruction is identical for the ARM and Thumb
16894 variants, except for the condition field. */
16895 do_vfp_cond_or_thumb ();
16896 }
16897 }
16898
16899 /* VMOV has particularly many variations. It can be one of:
16900 0. VMOV<c><q> <Qd>, <Qm>
16901 1. VMOV<c><q> <Dd>, <Dm>
16902 (Register operations, which are VORR with Rm = Rn.)
16903 2. VMOV<c><q>.<dt> <Qd>, #<imm>
16904 3. VMOV<c><q>.<dt> <Dd>, #<imm>
16905 (Immediate loads.)
16906 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
16907 (ARM register to scalar.)
16908 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
16909 (Two ARM registers to vector.)
16910 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
16911 (Scalar to ARM register.)
16912 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
16913 (Vector to two ARM registers.)
16914 8. VMOV.F32 <Sd>, <Sm>
16915 9. VMOV.F64 <Dd>, <Dm>
16916 (VFP register moves.)
16917 10. VMOV.F32 <Sd>, #imm
16918 11. VMOV.F64 <Dd>, #imm
16919 (VFP float immediate load.)
16920 12. VMOV <Rd>, <Sm>
16921 (VFP single to ARM reg.)
16922 13. VMOV <Sd>, <Rm>
16923 (ARM reg to VFP single.)
16924 14. VMOV <Rd>, <Re>, <Sn>, <Sm>
16925 (Two ARM regs to two VFP singles.)
16926 15. VMOV <Sd>, <Se>, <Rn>, <Rm>
16927 (Two VFP singles to two ARM regs.)
16928
16929 These cases can be disambiguated using neon_select_shape, except cases 1/9
16930 and 3/11 which depend on the operand type too.
16931
16932 All the encoded bits are hardcoded by this function.
16933
16934 Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
16935 Cases 5, 7 may be used with VFPv2 and above.
16936
16937 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
16938 can specify a type where it doesn't make sense to, and is ignored). */
16939
16940 static void
16941 do_neon_mov (void)
16942 {
16943 enum neon_shape rs = neon_select_shape (NS_RRFF, NS_FFRR, NS_DRR, NS_RRD,
16944 NS_QQ, NS_DD, NS_QI, NS_DI, NS_SR,
16945 NS_RS, NS_FF, NS_FI, NS_RF, NS_FR,
16946 NS_HR, NS_RH, NS_HI, NS_NULL);
16947 struct neon_type_el et;
16948 const char *ldconst = 0;
16949
16950 switch (rs)
16951 {
16952 case NS_DD: /* case 1/9. */
16953 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
16954 /* It is not an error here if no type is given. */
16955 inst.error = NULL;
16956 if (et.type == NT_float && et.size == 64)
16957 {
16958 do_vfp_nsyn_opcode ("fcpyd");
16959 break;
16960 }
16961 /* fall through. */
16962
16963 case NS_QQ: /* case 0/1. */
16964 {
16965 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
16966 return;
16967 /* The architecture manual I have doesn't explicitly state which
16968 value the U bit should have for register->register moves, but
16969 the equivalent VORR instruction has U = 0, so do that. */
16970 inst.instruction = 0x0200110;
16971 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16972 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16973 inst.instruction |= LOW4 (inst.operands[1].reg);
16974 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16975 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16976 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16977 inst.instruction |= neon_quad (rs) << 6;
16978
16979 neon_dp_fixup (&inst);
16980 }
16981 break;
16982
16983 case NS_DI: /* case 3/11. */
16984 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
16985 inst.error = NULL;
16986 if (et.type == NT_float && et.size == 64)
16987 {
16988 /* case 11 (fconstd). */
16989 ldconst = "fconstd";
16990 goto encode_fconstd;
16991 }
16992 /* fall through. */
16993
16994 case NS_QI: /* case 2/3. */
16995 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
16996 return;
16997 inst.instruction = 0x0800010;
16998 neon_move_immediate ();
16999 neon_dp_fixup (&inst);
17000 break;
17001
17002 case NS_SR: /* case 4. */
17003 {
17004 unsigned bcdebits = 0;
17005 int logsize;
17006 unsigned dn = NEON_SCALAR_REG (inst.operands[0].reg);
17007 unsigned x = NEON_SCALAR_INDEX (inst.operands[0].reg);
17008
17009 /* .<size> is optional here, defaulting to .32. */
17010 if (inst.vectype.elems == 0
17011 && inst.operands[0].vectype.type == NT_invtype
17012 && inst.operands[1].vectype.type == NT_invtype)
17013 {
17014 inst.vectype.el[0].type = NT_untyped;
17015 inst.vectype.el[0].size = 32;
17016 inst.vectype.elems = 1;
17017 }
17018
17019 et = neon_check_type (2, NS_NULL, N_8 | N_16 | N_32 | N_KEY, N_EQK);
17020 logsize = neon_logbits (et.size);
17021
17022 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
17023 _(BAD_FPU));
17024 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
17025 && et.size != 32, _(BAD_FPU));
17026 constraint (et.type == NT_invtype, _("bad type for scalar"));
17027 constraint (x >= 64 / et.size, _("scalar index out of range"));
17028
17029 switch (et.size)
17030 {
17031 case 8: bcdebits = 0x8; break;
17032 case 16: bcdebits = 0x1; break;
17033 case 32: bcdebits = 0x0; break;
17034 default: ;
17035 }
17036
17037 bcdebits |= x << logsize;
17038
17039 inst.instruction = 0xe000b10;
17040 do_vfp_cond_or_thumb ();
17041 inst.instruction |= LOW4 (dn) << 16;
17042 inst.instruction |= HI1 (dn) << 7;
17043 inst.instruction |= inst.operands[1].reg << 12;
17044 inst.instruction |= (bcdebits & 3) << 5;
17045 inst.instruction |= (bcdebits >> 2) << 21;
17046 }
17047 break;
17048
17049 case NS_DRR: /* case 5 (fmdrr). */
17050 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
17051 _(BAD_FPU));
17052
17053 inst.instruction = 0xc400b10;
17054 do_vfp_cond_or_thumb ();
17055 inst.instruction |= LOW4 (inst.operands[0].reg);
17056 inst.instruction |= HI1 (inst.operands[0].reg) << 5;
17057 inst.instruction |= inst.operands[1].reg << 12;
17058 inst.instruction |= inst.operands[2].reg << 16;
17059 break;
17060
17061 case NS_RS: /* case 6. */
17062 {
17063 unsigned logsize;
17064 unsigned dn = NEON_SCALAR_REG (inst.operands[1].reg);
17065 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg);
17066 unsigned abcdebits = 0;
17067
17068 /* .<dt> is optional here, defaulting to .32. */
17069 if (inst.vectype.elems == 0
17070 && inst.operands[0].vectype.type == NT_invtype
17071 && inst.operands[1].vectype.type == NT_invtype)
17072 {
17073 inst.vectype.el[0].type = NT_untyped;
17074 inst.vectype.el[0].size = 32;
17075 inst.vectype.elems = 1;
17076 }
17077
17078 et = neon_check_type (2, NS_NULL,
17079 N_EQK, N_S8 | N_S16 | N_U8 | N_U16 | N_32 | N_KEY);
17080 logsize = neon_logbits (et.size);
17081
17082 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
17083 _(BAD_FPU));
17084 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
17085 && et.size != 32, _(BAD_FPU));
17086 constraint (et.type == NT_invtype, _("bad type for scalar"));
17087 constraint (x >= 64 / et.size, _("scalar index out of range"));
17088
17089 switch (et.size)
17090 {
17091 case 8: abcdebits = (et.type == NT_signed) ? 0x08 : 0x18; break;
17092 case 16: abcdebits = (et.type == NT_signed) ? 0x01 : 0x11; break;
17093 case 32: abcdebits = 0x00; break;
17094 default: ;
17095 }
17096
17097 abcdebits |= x << logsize;
17098 inst.instruction = 0xe100b10;
17099 do_vfp_cond_or_thumb ();
17100 inst.instruction |= LOW4 (dn) << 16;
17101 inst.instruction |= HI1 (dn) << 7;
17102 inst.instruction |= inst.operands[0].reg << 12;
17103 inst.instruction |= (abcdebits & 3) << 5;
17104 inst.instruction |= (abcdebits >> 2) << 21;
17105 }
17106 break;
17107
17108 case NS_RRD: /* case 7 (fmrrd). */
17109 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
17110 _(BAD_FPU));
17111
17112 inst.instruction = 0xc500b10;
17113 do_vfp_cond_or_thumb ();
17114 inst.instruction |= inst.operands[0].reg << 12;
17115 inst.instruction |= inst.operands[1].reg << 16;
17116 inst.instruction |= LOW4 (inst.operands[2].reg);
17117 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
17118 break;
17119
17120 case NS_FF: /* case 8 (fcpys). */
17121 do_vfp_nsyn_opcode ("fcpys");
17122 break;
17123
17124 case NS_HI:
17125 case NS_FI: /* case 10 (fconsts). */
17126 ldconst = "fconsts";
17127 encode_fconstd:
17128 if (!inst.operands[1].immisfloat)
17129 {
17130 unsigned new_imm;
17131 /* Immediate has to fit in 8 bits so float is enough. */
17132 float imm = (float) inst.operands[1].imm;
17133 memcpy (&new_imm, &imm, sizeof (float));
17134 /* But the assembly may have been written to provide an integer
17135 bit pattern that equates to a float, so check that the
17136 conversion has worked. */
17137 if (is_quarter_float (new_imm))
17138 {
17139 if (is_quarter_float (inst.operands[1].imm))
17140 as_warn (_("immediate constant is valid both as a bit-pattern and a floating point value (using the fp value)"));
17141
17142 inst.operands[1].imm = new_imm;
17143 inst.operands[1].immisfloat = 1;
17144 }
17145 }
17146
17147 if (is_quarter_float (inst.operands[1].imm))
17148 {
17149 inst.operands[1].imm = neon_qfloat_bits (inst.operands[1].imm);
17150 do_vfp_nsyn_opcode (ldconst);
17151
17152 /* ARMv8.2 fp16 vmov.f16 instruction. */
17153 if (rs == NS_HI)
17154 do_scalar_fp16_v82_encode ();
17155 }
17156 else
17157 first_error (_("immediate out of range"));
17158 break;
17159
17160 case NS_RH:
17161 case NS_RF: /* case 12 (fmrs). */
17162 do_vfp_nsyn_opcode ("fmrs");
17163 /* ARMv8.2 fp16 vmov.f16 instruction. */
17164 if (rs == NS_RH)
17165 do_scalar_fp16_v82_encode ();
17166 break;
17167
17168 case NS_HR:
17169 case NS_FR: /* case 13 (fmsr). */
17170 do_vfp_nsyn_opcode ("fmsr");
17171 /* ARMv8.2 fp16 vmov.f16 instruction. */
17172 if (rs == NS_HR)
17173 do_scalar_fp16_v82_encode ();
17174 break;
17175
17176 /* The encoders for the fmrrs and fmsrr instructions expect three operands
17177 (one of which is a list), but we have parsed four. Do some fiddling to
17178 make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
17179 expect. */
17180 case NS_RRFF: /* case 14 (fmrrs). */
17181 constraint (inst.operands[3].reg != inst.operands[2].reg + 1,
17182 _("VFP registers must be adjacent"));
17183 inst.operands[2].imm = 2;
17184 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
17185 do_vfp_nsyn_opcode ("fmrrs");
17186 break;
17187
17188 case NS_FFRR: /* case 15 (fmsrr). */
17189 constraint (inst.operands[1].reg != inst.operands[0].reg + 1,
17190 _("VFP registers must be adjacent"));
17191 inst.operands[1] = inst.operands[2];
17192 inst.operands[2] = inst.operands[3];
17193 inst.operands[0].imm = 2;
17194 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
17195 do_vfp_nsyn_opcode ("fmsrr");
17196 break;
17197
17198 case NS_NULL:
17199 /* neon_select_shape has determined that the instruction
17200 shape is wrong and has already set the error message. */
17201 break;
17202
17203 default:
17204 abort ();
17205 }
17206 }
17207
17208 static void
17209 do_neon_rshift_round_imm (void)
17210 {
17211 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
17212 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
17213 int imm = inst.operands[2].imm;
17214
17215 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
17216 if (imm == 0)
17217 {
17218 inst.operands[2].present = 0;
17219 do_neon_mov ();
17220 return;
17221 }
17222
17223 constraint (imm < 1 || (unsigned)imm > et.size,
17224 _("immediate out of range for shift"));
17225 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et,
17226 et.size - imm);
17227 }
17228
17229 static void
17230 do_neon_movhf (void)
17231 {
17232 enum neon_shape rs = neon_select_shape (NS_HH, NS_NULL);
17233 constraint (rs != NS_HH, _("invalid suffix"));
17234
17235 if (inst.cond != COND_ALWAYS)
17236 {
17237 if (thumb_mode)
17238 {
17239 as_warn (_("ARMv8.2 scalar fp16 instruction cannot be conditional,"
17240 " the behaviour is UNPREDICTABLE"));
17241 }
17242 else
17243 {
17244 inst.error = BAD_COND;
17245 return;
17246 }
17247 }
17248
17249 do_vfp_sp_monadic ();
17250
17251 inst.is_neon = 1;
17252 inst.instruction |= 0xf0000000;
17253 }
17254
17255 static void
17256 do_neon_movl (void)
17257 {
17258 struct neon_type_el et = neon_check_type (2, NS_QD,
17259 N_EQK | N_DBL, N_SU_32 | N_KEY);
17260 unsigned sizebits = et.size >> 3;
17261 inst.instruction |= sizebits << 19;
17262 neon_two_same (0, et.type == NT_unsigned, -1);
17263 }
17264
17265 static void
17266 do_neon_trn (void)
17267 {
17268 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
17269 struct neon_type_el et = neon_check_type (2, rs,
17270 N_EQK, N_8 | N_16 | N_32 | N_KEY);
17271 NEON_ENCODE (INTEGER, inst);
17272 neon_two_same (neon_quad (rs), 1, et.size);
17273 }
17274
17275 static void
17276 do_neon_zip_uzp (void)
17277 {
17278 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
17279 struct neon_type_el et = neon_check_type (2, rs,
17280 N_EQK, N_8 | N_16 | N_32 | N_KEY);
17281 if (rs == NS_DD && et.size == 32)
17282 {
17283 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
17284 inst.instruction = N_MNEM_vtrn;
17285 do_neon_trn ();
17286 return;
17287 }
17288 neon_two_same (neon_quad (rs), 1, et.size);
17289 }
17290
17291 static void
17292 do_neon_sat_abs_neg (void)
17293 {
17294 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
17295 struct neon_type_el et = neon_check_type (2, rs,
17296 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
17297 neon_two_same (neon_quad (rs), 1, et.size);
17298 }
17299
17300 static void
17301 do_neon_pair_long (void)
17302 {
17303 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
17304 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_32 | N_KEY);
17305 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
17306 inst.instruction |= (et.type == NT_unsigned) << 7;
17307 neon_two_same (neon_quad (rs), 1, et.size);
17308 }
17309
17310 static void
17311 do_neon_recip_est (void)
17312 {
17313 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
17314 struct neon_type_el et = neon_check_type (2, rs,
17315 N_EQK | N_FLT, N_F_16_32 | N_U32 | N_KEY);
17316 inst.instruction |= (et.type == NT_float) << 8;
17317 neon_two_same (neon_quad (rs), 1, et.size);
17318 }
17319
17320 static void
17321 do_neon_cls (void)
17322 {
17323 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
17324 struct neon_type_el et = neon_check_type (2, rs,
17325 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
17326 neon_two_same (neon_quad (rs), 1, et.size);
17327 }
17328
17329 static void
17330 do_neon_clz (void)
17331 {
17332 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
17333 struct neon_type_el et = neon_check_type (2, rs,
17334 N_EQK, N_I8 | N_I16 | N_I32 | N_KEY);
17335 neon_two_same (neon_quad (rs), 1, et.size);
17336 }
17337
17338 static void
17339 do_neon_cnt (void)
17340 {
17341 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
17342 struct neon_type_el et = neon_check_type (2, rs,
17343 N_EQK | N_INT, N_8 | N_KEY);
17344 neon_two_same (neon_quad (rs), 1, et.size);
17345 }
17346
17347 static void
17348 do_neon_swp (void)
17349 {
17350 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
17351 neon_two_same (neon_quad (rs), 1, -1);
17352 }
17353
17354 static void
17355 do_neon_tbl_tbx (void)
17356 {
17357 unsigned listlenbits;
17358 neon_check_type (3, NS_DLD, N_EQK, N_EQK, N_8 | N_KEY);
17359
17360 if (inst.operands[1].imm < 1 || inst.operands[1].imm > 4)
17361 {
17362 first_error (_("bad list length for table lookup"));
17363 return;
17364 }
17365
17366 listlenbits = inst.operands[1].imm - 1;
17367 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17368 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17369 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
17370 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
17371 inst.instruction |= LOW4 (inst.operands[2].reg);
17372 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
17373 inst.instruction |= listlenbits << 8;
17374
17375 neon_dp_fixup (&inst);
17376 }
17377
17378 static void
17379 do_neon_ldm_stm (void)
17380 {
17381 /* P, U and L bits are part of bitmask. */
17382 int is_dbmode = (inst.instruction & (1 << 24)) != 0;
17383 unsigned offsetbits = inst.operands[1].imm * 2;
17384
17385 if (inst.operands[1].issingle)
17386 {
17387 do_vfp_nsyn_ldm_stm (is_dbmode);
17388 return;
17389 }
17390
17391 constraint (is_dbmode && !inst.operands[0].writeback,
17392 _("writeback (!) must be used for VLDMDB and VSTMDB"));
17393
17394 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
17395 _("register list must contain at least 1 and at most 16 "
17396 "registers"));
17397
17398 inst.instruction |= inst.operands[0].reg << 16;
17399 inst.instruction |= inst.operands[0].writeback << 21;
17400 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
17401 inst.instruction |= HI1 (inst.operands[1].reg) << 22;
17402
17403 inst.instruction |= offsetbits;
17404
17405 do_vfp_cond_or_thumb ();
17406 }
17407
17408 static void
17409 do_neon_ldr_str (void)
17410 {
17411 int is_ldr = (inst.instruction & (1 << 20)) != 0;
17412
17413 /* Use of PC in vstr in ARM mode is deprecated in ARMv7.
17414 And is UNPREDICTABLE in thumb mode. */
17415 if (!is_ldr
17416 && inst.operands[1].reg == REG_PC
17417 && (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7) || thumb_mode))
17418 {
17419 if (thumb_mode)
17420 inst.error = _("Use of PC here is UNPREDICTABLE");
17421 else if (warn_on_deprecated)
17422 as_tsktsk (_("Use of PC here is deprecated"));
17423 }
17424
17425 if (inst.operands[0].issingle)
17426 {
17427 if (is_ldr)
17428 do_vfp_nsyn_opcode ("flds");
17429 else
17430 do_vfp_nsyn_opcode ("fsts");
17431
17432 /* ARMv8.2 vldr.16/vstr.16 instruction. */
17433 if (inst.vectype.el[0].size == 16)
17434 do_scalar_fp16_v82_encode ();
17435 }
17436 else
17437 {
17438 if (is_ldr)
17439 do_vfp_nsyn_opcode ("fldd");
17440 else
17441 do_vfp_nsyn_opcode ("fstd");
17442 }
17443 }
17444
17445 static void
17446 do_t_vldr_vstr_sysreg (void)
17447 {
17448 int fp_vldr_bitno = 20, sysreg_vldr_bitno = 20;
17449 bfd_boolean is_vldr = ((inst.instruction & (1 << fp_vldr_bitno)) != 0);
17450
17451 /* Use of PC is UNPREDICTABLE. */
17452 if (inst.operands[1].reg == REG_PC)
17453 inst.error = _("Use of PC here is UNPREDICTABLE");
17454
17455 if (inst.operands[1].immisreg)
17456 inst.error = _("instruction does not accept register index");
17457
17458 if (!inst.operands[1].isreg)
17459 inst.error = _("instruction does not accept PC-relative addressing");
17460
17461 if (abs (inst.operands[1].imm) >= (1 << 7))
17462 inst.error = _("immediate value out of range");
17463
17464 inst.instruction = 0xec000f80;
17465 if (is_vldr)
17466 inst.instruction |= 1 << sysreg_vldr_bitno;
17467 encode_arm_cp_address (1, TRUE, FALSE, BFD_RELOC_ARM_T32_VLDR_VSTR_OFF_IMM);
17468 inst.instruction |= (inst.operands[0].imm & 0x7) << 13;
17469 inst.instruction |= (inst.operands[0].imm & 0x8) << 19;
17470 }
17471
17472 static void
17473 do_vldr_vstr (void)
17474 {
17475 bfd_boolean sysreg_op = !inst.operands[0].isreg;
17476
17477 /* VLDR/VSTR (System Register). */
17478 if (sysreg_op)
17479 {
17480 if (!mark_feature_used (&arm_ext_v8_1m_main))
17481 as_bad (_("Instruction not permitted on this architecture"));
17482
17483 do_t_vldr_vstr_sysreg ();
17484 }
17485 /* VLDR/VSTR. */
17486 else
17487 {
17488 if (!mark_feature_used (&fpu_vfp_ext_v1xd))
17489 as_bad (_("Instruction not permitted on this architecture"));
17490 do_neon_ldr_str ();
17491 }
17492 }
17493
17494 /* "interleave" version also handles non-interleaving register VLD1/VST1
17495 instructions. */
17496
17497 static void
17498 do_neon_ld_st_interleave (void)
17499 {
17500 struct neon_type_el et = neon_check_type (1, NS_NULL,
17501 N_8 | N_16 | N_32 | N_64);
17502 unsigned alignbits = 0;
17503 unsigned idx;
17504 /* The bits in this table go:
17505 0: register stride of one (0) or two (1)
17506 1,2: register list length, minus one (1, 2, 3, 4).
17507 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
17508 We use -1 for invalid entries. */
17509 const int typetable[] =
17510 {
17511 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
17512 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
17513 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
17514 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
17515 };
17516 int typebits;
17517
17518 if (et.type == NT_invtype)
17519 return;
17520
17521 if (inst.operands[1].immisalign)
17522 switch (inst.operands[1].imm >> 8)
17523 {
17524 case 64: alignbits = 1; break;
17525 case 128:
17526 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2
17527 && NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
17528 goto bad_alignment;
17529 alignbits = 2;
17530 break;
17531 case 256:
17532 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
17533 goto bad_alignment;
17534 alignbits = 3;
17535 break;
17536 default:
17537 bad_alignment:
17538 first_error (_("bad alignment"));
17539 return;
17540 }
17541
17542 inst.instruction |= alignbits << 4;
17543 inst.instruction |= neon_logbits (et.size) << 6;
17544
17545 /* Bits [4:6] of the immediate in a list specifier encode register stride
17546 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
17547 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
17548 up the right value for "type" in a table based on this value and the given
17549 list style, then stick it back. */
17550 idx = ((inst.operands[0].imm >> 4) & 7)
17551 | (((inst.instruction >> 8) & 3) << 3);
17552
17553 typebits = typetable[idx];
17554
17555 constraint (typebits == -1, _("bad list type for instruction"));
17556 constraint (((inst.instruction >> 8) & 3) && et.size == 64,
17557 _("bad element type for instruction"));
17558
17559 inst.instruction &= ~0xf00;
17560 inst.instruction |= typebits << 8;
17561 }
17562
17563 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
17564 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
17565 otherwise. The variable arguments are a list of pairs of legal (size, align)
17566 values, terminated with -1. */
17567
17568 static int
17569 neon_alignment_bit (int size, int align, int *do_alignment, ...)
17570 {
17571 va_list ap;
17572 int result = FAIL, thissize, thisalign;
17573
17574 if (!inst.operands[1].immisalign)
17575 {
17576 *do_alignment = 0;
17577 return SUCCESS;
17578 }
17579
17580 va_start (ap, do_alignment);
17581
17582 do
17583 {
17584 thissize = va_arg (ap, int);
17585 if (thissize == -1)
17586 break;
17587 thisalign = va_arg (ap, int);
17588
17589 if (size == thissize && align == thisalign)
17590 result = SUCCESS;
17591 }
17592 while (result != SUCCESS);
17593
17594 va_end (ap);
17595
17596 if (result == SUCCESS)
17597 *do_alignment = 1;
17598 else
17599 first_error (_("unsupported alignment for instruction"));
17600
17601 return result;
17602 }
17603
17604 static void
17605 do_neon_ld_st_lane (void)
17606 {
17607 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
17608 int align_good, do_alignment = 0;
17609 int logsize = neon_logbits (et.size);
17610 int align = inst.operands[1].imm >> 8;
17611 int n = (inst.instruction >> 8) & 3;
17612 int max_el = 64 / et.size;
17613
17614 if (et.type == NT_invtype)
17615 return;
17616
17617 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != n + 1,
17618 _("bad list length"));
17619 constraint (NEON_LANE (inst.operands[0].imm) >= max_el,
17620 _("scalar index out of range"));
17621 constraint (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2
17622 && et.size == 8,
17623 _("stride of 2 unavailable when element size is 8"));
17624
17625 switch (n)
17626 {
17627 case 0: /* VLD1 / VST1. */
17628 align_good = neon_alignment_bit (et.size, align, &do_alignment, 16, 16,
17629 32, 32, -1);
17630 if (align_good == FAIL)
17631 return;
17632 if (do_alignment)
17633 {
17634 unsigned alignbits = 0;
17635 switch (et.size)
17636 {
17637 case 16: alignbits = 0x1; break;
17638 case 32: alignbits = 0x3; break;
17639 default: ;
17640 }
17641 inst.instruction |= alignbits << 4;
17642 }
17643 break;
17644
17645 case 1: /* VLD2 / VST2. */
17646 align_good = neon_alignment_bit (et.size, align, &do_alignment, 8, 16,
17647 16, 32, 32, 64, -1);
17648 if (align_good == FAIL)
17649 return;
17650 if (do_alignment)
17651 inst.instruction |= 1 << 4;
17652 break;
17653
17654 case 2: /* VLD3 / VST3. */
17655 constraint (inst.operands[1].immisalign,
17656 _("can't use alignment with this instruction"));
17657 break;
17658
17659 case 3: /* VLD4 / VST4. */
17660 align_good = neon_alignment_bit (et.size, align, &do_alignment, 8, 32,
17661 16, 64, 32, 64, 32, 128, -1);
17662 if (align_good == FAIL)
17663 return;
17664 if (do_alignment)
17665 {
17666 unsigned alignbits = 0;
17667 switch (et.size)
17668 {
17669 case 8: alignbits = 0x1; break;
17670 case 16: alignbits = 0x1; break;
17671 case 32: alignbits = (align == 64) ? 0x1 : 0x2; break;
17672 default: ;
17673 }
17674 inst.instruction |= alignbits << 4;
17675 }
17676 break;
17677
17678 default: ;
17679 }
17680
17681 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
17682 if (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2)
17683 inst.instruction |= 1 << (4 + logsize);
17684
17685 inst.instruction |= NEON_LANE (inst.operands[0].imm) << (logsize + 5);
17686 inst.instruction |= logsize << 10;
17687 }
17688
17689 /* Encode single n-element structure to all lanes VLD<n> instructions. */
17690
17691 static void
17692 do_neon_ld_dup (void)
17693 {
17694 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
17695 int align_good, do_alignment = 0;
17696
17697 if (et.type == NT_invtype)
17698 return;
17699
17700 switch ((inst.instruction >> 8) & 3)
17701 {
17702 case 0: /* VLD1. */
17703 gas_assert (NEON_REG_STRIDE (inst.operands[0].imm) != 2);
17704 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
17705 &do_alignment, 16, 16, 32, 32, -1);
17706 if (align_good == FAIL)
17707 return;
17708 switch (NEON_REGLIST_LENGTH (inst.operands[0].imm))
17709 {
17710 case 1: break;
17711 case 2: inst.instruction |= 1 << 5; break;
17712 default: first_error (_("bad list length")); return;
17713 }
17714 inst.instruction |= neon_logbits (et.size) << 6;
17715 break;
17716
17717 case 1: /* VLD2. */
17718 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
17719 &do_alignment, 8, 16, 16, 32, 32, 64,
17720 -1);
17721 if (align_good == FAIL)
17722 return;
17723 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2,
17724 _("bad list length"));
17725 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
17726 inst.instruction |= 1 << 5;
17727 inst.instruction |= neon_logbits (et.size) << 6;
17728 break;
17729
17730 case 2: /* VLD3. */
17731 constraint (inst.operands[1].immisalign,
17732 _("can't use alignment with this instruction"));
17733 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 3,
17734 _("bad list length"));
17735 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
17736 inst.instruction |= 1 << 5;
17737 inst.instruction |= neon_logbits (et.size) << 6;
17738 break;
17739
17740 case 3: /* VLD4. */
17741 {
17742 int align = inst.operands[1].imm >> 8;
17743 align_good = neon_alignment_bit (et.size, align, &do_alignment, 8, 32,
17744 16, 64, 32, 64, 32, 128, -1);
17745 if (align_good == FAIL)
17746 return;
17747 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4,
17748 _("bad list length"));
17749 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
17750 inst.instruction |= 1 << 5;
17751 if (et.size == 32 && align == 128)
17752 inst.instruction |= 0x3 << 6;
17753 else
17754 inst.instruction |= neon_logbits (et.size) << 6;
17755 }
17756 break;
17757
17758 default: ;
17759 }
17760
17761 inst.instruction |= do_alignment << 4;
17762 }
17763
17764 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
17765 apart from bits [11:4]. */
17766
17767 static void
17768 do_neon_ldx_stx (void)
17769 {
17770 if (inst.operands[1].isreg)
17771 constraint (inst.operands[1].reg == REG_PC, BAD_PC);
17772
17773 switch (NEON_LANE (inst.operands[0].imm))
17774 {
17775 case NEON_INTERLEAVE_LANES:
17776 NEON_ENCODE (INTERLV, inst);
17777 do_neon_ld_st_interleave ();
17778 break;
17779
17780 case NEON_ALL_LANES:
17781 NEON_ENCODE (DUP, inst);
17782 if (inst.instruction == N_INV)
17783 {
17784 first_error ("only loads support such operands");
17785 break;
17786 }
17787 do_neon_ld_dup ();
17788 break;
17789
17790 default:
17791 NEON_ENCODE (LANE, inst);
17792 do_neon_ld_st_lane ();
17793 }
17794
17795 /* L bit comes from bit mask. */
17796 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17797 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17798 inst.instruction |= inst.operands[1].reg << 16;
17799
17800 if (inst.operands[1].postind)
17801 {
17802 int postreg = inst.operands[1].imm & 0xf;
17803 constraint (!inst.operands[1].immisreg,
17804 _("post-index must be a register"));
17805 constraint (postreg == 0xd || postreg == 0xf,
17806 _("bad register for post-index"));
17807 inst.instruction |= postreg;
17808 }
17809 else
17810 {
17811 constraint (inst.operands[1].immisreg, BAD_ADDR_MODE);
17812 constraint (inst.relocs[0].exp.X_op != O_constant
17813 || inst.relocs[0].exp.X_add_number != 0,
17814 BAD_ADDR_MODE);
17815
17816 if (inst.operands[1].writeback)
17817 {
17818 inst.instruction |= 0xd;
17819 }
17820 else
17821 inst.instruction |= 0xf;
17822 }
17823
17824 if (thumb_mode)
17825 inst.instruction |= 0xf9000000;
17826 else
17827 inst.instruction |= 0xf4000000;
17828 }
17829
17830 /* FP v8. */
17831 static void
17832 do_vfp_nsyn_fpv8 (enum neon_shape rs)
17833 {
17834 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
17835 D register operands. */
17836 if (neon_shape_class[rs] == SC_DOUBLE)
17837 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
17838 _(BAD_FPU));
17839
17840 NEON_ENCODE (FPV8, inst);
17841
17842 if (rs == NS_FFF || rs == NS_HHH)
17843 {
17844 do_vfp_sp_dyadic ();
17845
17846 /* ARMv8.2 fp16 instruction. */
17847 if (rs == NS_HHH)
17848 do_scalar_fp16_v82_encode ();
17849 }
17850 else
17851 do_vfp_dp_rd_rn_rm ();
17852
17853 if (rs == NS_DDD)
17854 inst.instruction |= 0x100;
17855
17856 inst.instruction |= 0xf0000000;
17857 }
17858
17859 static void
17860 do_vsel (void)
17861 {
17862 set_it_insn_type (OUTSIDE_IT_INSN);
17863
17864 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8) != SUCCESS)
17865 first_error (_("invalid instruction shape"));
17866 }
17867
17868 static void
17869 do_vmaxnm (void)
17870 {
17871 set_it_insn_type (OUTSIDE_IT_INSN);
17872
17873 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8) == SUCCESS)
17874 return;
17875
17876 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
17877 return;
17878
17879 neon_dyadic_misc (NT_untyped, N_F_16_32, 0);
17880 }
17881
17882 static void
17883 do_vrint_1 (enum neon_cvt_mode mode)
17884 {
17885 enum neon_shape rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_QQ, NS_NULL);
17886 struct neon_type_el et;
17887
17888 if (rs == NS_NULL)
17889 return;
17890
17891 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
17892 D register operands. */
17893 if (neon_shape_class[rs] == SC_DOUBLE)
17894 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
17895 _(BAD_FPU));
17896
17897 et = neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY
17898 | N_VFP);
17899 if (et.type != NT_invtype)
17900 {
17901 /* VFP encodings. */
17902 if (mode == neon_cvt_mode_a || mode == neon_cvt_mode_n
17903 || mode == neon_cvt_mode_p || mode == neon_cvt_mode_m)
17904 set_it_insn_type (OUTSIDE_IT_INSN);
17905
17906 NEON_ENCODE (FPV8, inst);
17907 if (rs == NS_FF || rs == NS_HH)
17908 do_vfp_sp_monadic ();
17909 else
17910 do_vfp_dp_rd_rm ();
17911
17912 switch (mode)
17913 {
17914 case neon_cvt_mode_r: inst.instruction |= 0x00000000; break;
17915 case neon_cvt_mode_z: inst.instruction |= 0x00000080; break;
17916 case neon_cvt_mode_x: inst.instruction |= 0x00010000; break;
17917 case neon_cvt_mode_a: inst.instruction |= 0xf0000000; break;
17918 case neon_cvt_mode_n: inst.instruction |= 0xf0010000; break;
17919 case neon_cvt_mode_p: inst.instruction |= 0xf0020000; break;
17920 case neon_cvt_mode_m: inst.instruction |= 0xf0030000; break;
17921 default: abort ();
17922 }
17923
17924 inst.instruction |= (rs == NS_DD) << 8;
17925 do_vfp_cond_or_thumb ();
17926
17927 /* ARMv8.2 fp16 vrint instruction. */
17928 if (rs == NS_HH)
17929 do_scalar_fp16_v82_encode ();
17930 }
17931 else
17932 {
17933 /* Neon encodings (or something broken...). */
17934 inst.error = NULL;
17935 et = neon_check_type (2, rs, N_EQK, N_F_16_32 | N_KEY);
17936
17937 if (et.type == NT_invtype)
17938 return;
17939
17940 set_it_insn_type (OUTSIDE_IT_INSN);
17941 NEON_ENCODE (FLOAT, inst);
17942
17943 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
17944 return;
17945
17946 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17947 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17948 inst.instruction |= LOW4 (inst.operands[1].reg);
17949 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
17950 inst.instruction |= neon_quad (rs) << 6;
17951 /* Mask off the original size bits and reencode them. */
17952 inst.instruction = ((inst.instruction & 0xfff3ffff)
17953 | neon_logbits (et.size) << 18);
17954
17955 switch (mode)
17956 {
17957 case neon_cvt_mode_z: inst.instruction |= 3 << 7; break;
17958 case neon_cvt_mode_x: inst.instruction |= 1 << 7; break;
17959 case neon_cvt_mode_a: inst.instruction |= 2 << 7; break;
17960 case neon_cvt_mode_n: inst.instruction |= 0 << 7; break;
17961 case neon_cvt_mode_p: inst.instruction |= 7 << 7; break;
17962 case neon_cvt_mode_m: inst.instruction |= 5 << 7; break;
17963 case neon_cvt_mode_r: inst.error = _("invalid rounding mode"); break;
17964 default: abort ();
17965 }
17966
17967 if (thumb_mode)
17968 inst.instruction |= 0xfc000000;
17969 else
17970 inst.instruction |= 0xf0000000;
17971 }
17972 }
17973
17974 static void
17975 do_vrintx (void)
17976 {
17977 do_vrint_1 (neon_cvt_mode_x);
17978 }
17979
17980 static void
17981 do_vrintz (void)
17982 {
17983 do_vrint_1 (neon_cvt_mode_z);
17984 }
17985
17986 static void
17987 do_vrintr (void)
17988 {
17989 do_vrint_1 (neon_cvt_mode_r);
17990 }
17991
17992 static void
17993 do_vrinta (void)
17994 {
17995 do_vrint_1 (neon_cvt_mode_a);
17996 }
17997
17998 static void
17999 do_vrintn (void)
18000 {
18001 do_vrint_1 (neon_cvt_mode_n);
18002 }
18003
18004 static void
18005 do_vrintp (void)
18006 {
18007 do_vrint_1 (neon_cvt_mode_p);
18008 }
18009
18010 static void
18011 do_vrintm (void)
18012 {
18013 do_vrint_1 (neon_cvt_mode_m);
18014 }
18015
18016 static unsigned
18017 neon_scalar_for_vcmla (unsigned opnd, unsigned elsize)
18018 {
18019 unsigned regno = NEON_SCALAR_REG (opnd);
18020 unsigned elno = NEON_SCALAR_INDEX (opnd);
18021
18022 if (elsize == 16 && elno < 2 && regno < 16)
18023 return regno | (elno << 4);
18024 else if (elsize == 32 && elno == 0)
18025 return regno;
18026
18027 first_error (_("scalar out of range"));
18028 return 0;
18029 }
18030
18031 static void
18032 do_vcmla (void)
18033 {
18034 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_armv8),
18035 _(BAD_FPU));
18036 constraint (inst.relocs[0].exp.X_op != O_constant,
18037 _("expression too complex"));
18038 unsigned rot = inst.relocs[0].exp.X_add_number;
18039 constraint (rot != 0 && rot != 90 && rot != 180 && rot != 270,
18040 _("immediate out of range"));
18041 rot /= 90;
18042 if (inst.operands[2].isscalar)
18043 {
18044 enum neon_shape rs = neon_select_shape (NS_DDSI, NS_QQSI, NS_NULL);
18045 unsigned size = neon_check_type (3, rs, N_EQK, N_EQK,
18046 N_KEY | N_F16 | N_F32).size;
18047 unsigned m = neon_scalar_for_vcmla (inst.operands[2].reg, size);
18048 inst.is_neon = 1;
18049 inst.instruction = 0xfe000800;
18050 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
18051 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
18052 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
18053 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
18054 inst.instruction |= LOW4 (m);
18055 inst.instruction |= HI1 (m) << 5;
18056 inst.instruction |= neon_quad (rs) << 6;
18057 inst.instruction |= rot << 20;
18058 inst.instruction |= (size == 32) << 23;
18059 }
18060 else
18061 {
18062 enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
18063 unsigned size = neon_check_type (3, rs, N_EQK, N_EQK,
18064 N_KEY | N_F16 | N_F32).size;
18065 neon_three_same (neon_quad (rs), 0, -1);
18066 inst.instruction &= 0x00ffffff; /* Undo neon_dp_fixup. */
18067 inst.instruction |= 0xfc200800;
18068 inst.instruction |= rot << 23;
18069 inst.instruction |= (size == 32) << 20;
18070 }
18071 }
18072
18073 static void
18074 do_vcadd (void)
18075 {
18076 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_armv8),
18077 _(BAD_FPU));
18078 constraint (inst.relocs[0].exp.X_op != O_constant,
18079 _("expression too complex"));
18080 unsigned rot = inst.relocs[0].exp.X_add_number;
18081 constraint (rot != 90 && rot != 270, _("immediate out of range"));
18082 enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
18083 unsigned size = neon_check_type (3, rs, N_EQK, N_EQK,
18084 N_KEY | N_F16 | N_F32).size;
18085 neon_three_same (neon_quad (rs), 0, -1);
18086 inst.instruction &= 0x00ffffff; /* Undo neon_dp_fixup. */
18087 inst.instruction |= 0xfc800800;
18088 inst.instruction |= (rot == 270) << 24;
18089 inst.instruction |= (size == 32) << 20;
18090 }
18091
18092 /* Dot Product instructions encoding support. */
18093
18094 static void
18095 do_neon_dotproduct (int unsigned_p)
18096 {
18097 enum neon_shape rs;
18098 unsigned scalar_oprd2 = 0;
18099 int high8;
18100
18101 if (inst.cond != COND_ALWAYS)
18102 as_warn (_("Dot Product instructions cannot be conditional, the behaviour "
18103 "is UNPREDICTABLE"));
18104
18105 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_armv8),
18106 _(BAD_FPU));
18107
18108 /* Dot Product instructions are in three-same D/Q register format or the third
18109 operand can be a scalar index register. */
18110 if (inst.operands[2].isscalar)
18111 {
18112 scalar_oprd2 = neon_scalar_for_mul (inst.operands[2].reg, 32);
18113 high8 = 0xfe000000;
18114 rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
18115 }
18116 else
18117 {
18118 high8 = 0xfc000000;
18119 rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
18120 }
18121
18122 if (unsigned_p)
18123 neon_check_type (3, rs, N_EQK, N_EQK, N_KEY | N_U8);
18124 else
18125 neon_check_type (3, rs, N_EQK, N_EQK, N_KEY | N_S8);
18126
18127 /* The "U" bit in traditional Three Same encoding is fixed to 0 for Dot
18128 Product instruction, so we pass 0 as the "ubit" parameter. And the
18129 "Size" field are fixed to 0x2, so we pass 32 as the "size" parameter. */
18130 neon_three_same (neon_quad (rs), 0, 32);
18131
18132 /* Undo neon_dp_fixup. Dot Product instructions are using a slightly
18133 different NEON three-same encoding. */
18134 inst.instruction &= 0x00ffffff;
18135 inst.instruction |= high8;
18136 /* Encode 'U' bit which indicates signedness. */
18137 inst.instruction |= (unsigned_p ? 1 : 0) << 4;
18138 /* Re-encode operand2 if it's indexed scalar operand. What has been encoded
18139 from inst.operand[2].reg in neon_three_same is GAS's internal encoding, not
18140 the instruction encoding. */
18141 if (inst.operands[2].isscalar)
18142 {
18143 inst.instruction &= 0xffffffd0;
18144 inst.instruction |= LOW4 (scalar_oprd2);
18145 inst.instruction |= HI1 (scalar_oprd2) << 5;
18146 }
18147 }
18148
18149 /* Dot Product instructions for signed integer. */
18150
18151 static void
18152 do_neon_dotproduct_s (void)
18153 {
18154 return do_neon_dotproduct (0);
18155 }
18156
18157 /* Dot Product instructions for unsigned integer. */
18158
18159 static void
18160 do_neon_dotproduct_u (void)
18161 {
18162 return do_neon_dotproduct (1);
18163 }
18164
18165 /* Crypto v1 instructions. */
18166 static void
18167 do_crypto_2op_1 (unsigned elttype, int op)
18168 {
18169 set_it_insn_type (OUTSIDE_IT_INSN);
18170
18171 if (neon_check_type (2, NS_QQ, N_EQK | N_UNT, elttype | N_UNT | N_KEY).type
18172 == NT_invtype)
18173 return;
18174
18175 inst.error = NULL;
18176
18177 NEON_ENCODE (INTEGER, inst);
18178 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
18179 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
18180 inst.instruction |= LOW4 (inst.operands[1].reg);
18181 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
18182 if (op != -1)
18183 inst.instruction |= op << 6;
18184
18185 if (thumb_mode)
18186 inst.instruction |= 0xfc000000;
18187 else
18188 inst.instruction |= 0xf0000000;
18189 }
18190
18191 static void
18192 do_crypto_3op_1 (int u, int op)
18193 {
18194 set_it_insn_type (OUTSIDE_IT_INSN);
18195
18196 if (neon_check_type (3, NS_QQQ, N_EQK | N_UNT, N_EQK | N_UNT,
18197 N_32 | N_UNT | N_KEY).type == NT_invtype)
18198 return;
18199
18200 inst.error = NULL;
18201
18202 NEON_ENCODE (INTEGER, inst);
18203 neon_three_same (1, u, 8 << op);
18204 }
18205
18206 static void
18207 do_aese (void)
18208 {
18209 do_crypto_2op_1 (N_8, 0);
18210 }
18211
18212 static void
18213 do_aesd (void)
18214 {
18215 do_crypto_2op_1 (N_8, 1);
18216 }
18217
18218 static void
18219 do_aesmc (void)
18220 {
18221 do_crypto_2op_1 (N_8, 2);
18222 }
18223
18224 static void
18225 do_aesimc (void)
18226 {
18227 do_crypto_2op_1 (N_8, 3);
18228 }
18229
18230 static void
18231 do_sha1c (void)
18232 {
18233 do_crypto_3op_1 (0, 0);
18234 }
18235
18236 static void
18237 do_sha1p (void)
18238 {
18239 do_crypto_3op_1 (0, 1);
18240 }
18241
18242 static void
18243 do_sha1m (void)
18244 {
18245 do_crypto_3op_1 (0, 2);
18246 }
18247
18248 static void
18249 do_sha1su0 (void)
18250 {
18251 do_crypto_3op_1 (0, 3);
18252 }
18253
18254 static void
18255 do_sha256h (void)
18256 {
18257 do_crypto_3op_1 (1, 0);
18258 }
18259
18260 static void
18261 do_sha256h2 (void)
18262 {
18263 do_crypto_3op_1 (1, 1);
18264 }
18265
18266 static void
18267 do_sha256su1 (void)
18268 {
18269 do_crypto_3op_1 (1, 2);
18270 }
18271
18272 static void
18273 do_sha1h (void)
18274 {
18275 do_crypto_2op_1 (N_32, -1);
18276 }
18277
18278 static void
18279 do_sha1su1 (void)
18280 {
18281 do_crypto_2op_1 (N_32, 0);
18282 }
18283
18284 static void
18285 do_sha256su0 (void)
18286 {
18287 do_crypto_2op_1 (N_32, 1);
18288 }
18289
18290 static void
18291 do_crc32_1 (unsigned int poly, unsigned int sz)
18292 {
18293 unsigned int Rd = inst.operands[0].reg;
18294 unsigned int Rn = inst.operands[1].reg;
18295 unsigned int Rm = inst.operands[2].reg;
18296
18297 set_it_insn_type (OUTSIDE_IT_INSN);
18298 inst.instruction |= LOW4 (Rd) << (thumb_mode ? 8 : 12);
18299 inst.instruction |= LOW4 (Rn) << 16;
18300 inst.instruction |= LOW4 (Rm);
18301 inst.instruction |= sz << (thumb_mode ? 4 : 21);
18302 inst.instruction |= poly << (thumb_mode ? 20 : 9);
18303
18304 if (Rd == REG_PC || Rn == REG_PC || Rm == REG_PC)
18305 as_warn (UNPRED_REG ("r15"));
18306 }
18307
18308 static void
18309 do_crc32b (void)
18310 {
18311 do_crc32_1 (0, 0);
18312 }
18313
18314 static void
18315 do_crc32h (void)
18316 {
18317 do_crc32_1 (0, 1);
18318 }
18319
18320 static void
18321 do_crc32w (void)
18322 {
18323 do_crc32_1 (0, 2);
18324 }
18325
18326 static void
18327 do_crc32cb (void)
18328 {
18329 do_crc32_1 (1, 0);
18330 }
18331
18332 static void
18333 do_crc32ch (void)
18334 {
18335 do_crc32_1 (1, 1);
18336 }
18337
18338 static void
18339 do_crc32cw (void)
18340 {
18341 do_crc32_1 (1, 2);
18342 }
18343
18344 static void
18345 do_vjcvt (void)
18346 {
18347 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
18348 _(BAD_FPU));
18349 neon_check_type (2, NS_FD, N_S32, N_F64);
18350 do_vfp_sp_dp_cvt ();
18351 do_vfp_cond_or_thumb ();
18352 }
18353
18354 \f
18355 /* Overall per-instruction processing. */
18356
18357 /* We need to be able to fix up arbitrary expressions in some statements.
18358 This is so that we can handle symbols that are an arbitrary distance from
18359 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
18360 which returns part of an address in a form which will be valid for
18361 a data instruction. We do this by pushing the expression into a symbol
18362 in the expr_section, and creating a fix for that. */
18363
18364 static void
18365 fix_new_arm (fragS * frag,
18366 int where,
18367 short int size,
18368 expressionS * exp,
18369 int pc_rel,
18370 int reloc)
18371 {
18372 fixS * new_fix;
18373
18374 switch (exp->X_op)
18375 {
18376 case O_constant:
18377 if (pc_rel)
18378 {
18379 /* Create an absolute valued symbol, so we have something to
18380 refer to in the object file. Unfortunately for us, gas's
18381 generic expression parsing will already have folded out
18382 any use of .set foo/.type foo %function that may have
18383 been used to set type information of the target location,
18384 that's being specified symbolically. We have to presume
18385 the user knows what they are doing. */
18386 char name[16 + 8];
18387 symbolS *symbol;
18388
18389 sprintf (name, "*ABS*0x%lx", (unsigned long)exp->X_add_number);
18390
18391 symbol = symbol_find_or_make (name);
18392 S_SET_SEGMENT (symbol, absolute_section);
18393 symbol_set_frag (symbol, &zero_address_frag);
18394 S_SET_VALUE (symbol, exp->X_add_number);
18395 exp->X_op = O_symbol;
18396 exp->X_add_symbol = symbol;
18397 exp->X_add_number = 0;
18398 }
18399 /* FALLTHROUGH */
18400 case O_symbol:
18401 case O_add:
18402 case O_subtract:
18403 new_fix = fix_new_exp (frag, where, size, exp, pc_rel,
18404 (enum bfd_reloc_code_real) reloc);
18405 break;
18406
18407 default:
18408 new_fix = (fixS *) fix_new (frag, where, size, make_expr_symbol (exp), 0,
18409 pc_rel, (enum bfd_reloc_code_real) reloc);
18410 break;
18411 }
18412
18413 /* Mark whether the fix is to a THUMB instruction, or an ARM
18414 instruction. */
18415 new_fix->tc_fix_data = thumb_mode;
18416 }
18417
18418 /* Create a frg for an instruction requiring relaxation. */
18419 static void
18420 output_relax_insn (void)
18421 {
18422 char * to;
18423 symbolS *sym;
18424 int offset;
18425
18426 /* The size of the instruction is unknown, so tie the debug info to the
18427 start of the instruction. */
18428 dwarf2_emit_insn (0);
18429
18430 switch (inst.relocs[0].exp.X_op)
18431 {
18432 case O_symbol:
18433 sym = inst.relocs[0].exp.X_add_symbol;
18434 offset = inst.relocs[0].exp.X_add_number;
18435 break;
18436 case O_constant:
18437 sym = NULL;
18438 offset = inst.relocs[0].exp.X_add_number;
18439 break;
18440 default:
18441 sym = make_expr_symbol (&inst.relocs[0].exp);
18442 offset = 0;
18443 break;
18444 }
18445 to = frag_var (rs_machine_dependent, INSN_SIZE, THUMB_SIZE,
18446 inst.relax, sym, offset, NULL/*offset, opcode*/);
18447 md_number_to_chars (to, inst.instruction, THUMB_SIZE);
18448 }
18449
18450 /* Write a 32-bit thumb instruction to buf. */
18451 static void
18452 put_thumb32_insn (char * buf, unsigned long insn)
18453 {
18454 md_number_to_chars (buf, insn >> 16, THUMB_SIZE);
18455 md_number_to_chars (buf + THUMB_SIZE, insn, THUMB_SIZE);
18456 }
18457
18458 static void
18459 output_inst (const char * str)
18460 {
18461 char * to = NULL;
18462
18463 if (inst.error)
18464 {
18465 as_bad ("%s -- `%s'", inst.error, str);
18466 return;
18467 }
18468 if (inst.relax)
18469 {
18470 output_relax_insn ();
18471 return;
18472 }
18473 if (inst.size == 0)
18474 return;
18475
18476 to = frag_more (inst.size);
18477 /* PR 9814: Record the thumb mode into the current frag so that we know
18478 what type of NOP padding to use, if necessary. We override any previous
18479 setting so that if the mode has changed then the NOPS that we use will
18480 match the encoding of the last instruction in the frag. */
18481 frag_now->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
18482
18483 if (thumb_mode && (inst.size > THUMB_SIZE))
18484 {
18485 gas_assert (inst.size == (2 * THUMB_SIZE));
18486 put_thumb32_insn (to, inst.instruction);
18487 }
18488 else if (inst.size > INSN_SIZE)
18489 {
18490 gas_assert (inst.size == (2 * INSN_SIZE));
18491 md_number_to_chars (to, inst.instruction, INSN_SIZE);
18492 md_number_to_chars (to + INSN_SIZE, inst.instruction, INSN_SIZE);
18493 }
18494 else
18495 md_number_to_chars (to, inst.instruction, inst.size);
18496
18497 int r;
18498 for (r = 0; r < ARM_IT_MAX_RELOCS; r++)
18499 {
18500 if (inst.relocs[r].type != BFD_RELOC_UNUSED)
18501 fix_new_arm (frag_now, to - frag_now->fr_literal,
18502 inst.size, & inst.relocs[r].exp, inst.relocs[r].pc_rel,
18503 inst.relocs[r].type);
18504 }
18505
18506 dwarf2_emit_insn (inst.size);
18507 }
18508
18509 static char *
18510 output_it_inst (int cond, int mask, char * to)
18511 {
18512 unsigned long instruction = 0xbf00;
18513
18514 mask &= 0xf;
18515 instruction |= mask;
18516 instruction |= cond << 4;
18517
18518 if (to == NULL)
18519 {
18520 to = frag_more (2);
18521 #ifdef OBJ_ELF
18522 dwarf2_emit_insn (2);
18523 #endif
18524 }
18525
18526 md_number_to_chars (to, instruction, 2);
18527
18528 return to;
18529 }
18530
18531 /* Tag values used in struct asm_opcode's tag field. */
18532 enum opcode_tag
18533 {
18534 OT_unconditional, /* Instruction cannot be conditionalized.
18535 The ARM condition field is still 0xE. */
18536 OT_unconditionalF, /* Instruction cannot be conditionalized
18537 and carries 0xF in its ARM condition field. */
18538 OT_csuffix, /* Instruction takes a conditional suffix. */
18539 OT_csuffixF, /* Some forms of the instruction take a conditional
18540 suffix, others place 0xF where the condition field
18541 would be. */
18542 OT_cinfix3, /* Instruction takes a conditional infix,
18543 beginning at character index 3. (In
18544 unified mode, it becomes a suffix.) */
18545 OT_cinfix3_deprecated, /* The same as OT_cinfix3. This is used for
18546 tsts, cmps, cmns, and teqs. */
18547 OT_cinfix3_legacy, /* Legacy instruction takes a conditional infix at
18548 character index 3, even in unified mode. Used for
18549 legacy instructions where suffix and infix forms
18550 may be ambiguous. */
18551 OT_csuf_or_in3, /* Instruction takes either a conditional
18552 suffix or an infix at character index 3. */
18553 OT_odd_infix_unc, /* This is the unconditional variant of an
18554 instruction that takes a conditional infix
18555 at an unusual position. In unified mode,
18556 this variant will accept a suffix. */
18557 OT_odd_infix_0 /* Values greater than or equal to OT_odd_infix_0
18558 are the conditional variants of instructions that
18559 take conditional infixes in unusual positions.
18560 The infix appears at character index
18561 (tag - OT_odd_infix_0). These are not accepted
18562 in unified mode. */
18563 };
18564
18565 /* Subroutine of md_assemble, responsible for looking up the primary
18566 opcode from the mnemonic the user wrote. STR points to the
18567 beginning of the mnemonic.
18568
18569 This is not simply a hash table lookup, because of conditional
18570 variants. Most instructions have conditional variants, which are
18571 expressed with a _conditional affix_ to the mnemonic. If we were
18572 to encode each conditional variant as a literal string in the opcode
18573 table, it would have approximately 20,000 entries.
18574
18575 Most mnemonics take this affix as a suffix, and in unified syntax,
18576 'most' is upgraded to 'all'. However, in the divided syntax, some
18577 instructions take the affix as an infix, notably the s-variants of
18578 the arithmetic instructions. Of those instructions, all but six
18579 have the infix appear after the third character of the mnemonic.
18580
18581 Accordingly, the algorithm for looking up primary opcodes given
18582 an identifier is:
18583
18584 1. Look up the identifier in the opcode table.
18585 If we find a match, go to step U.
18586
18587 2. Look up the last two characters of the identifier in the
18588 conditions table. If we find a match, look up the first N-2
18589 characters of the identifier in the opcode table. If we
18590 find a match, go to step CE.
18591
18592 3. Look up the fourth and fifth characters of the identifier in
18593 the conditions table. If we find a match, extract those
18594 characters from the identifier, and look up the remaining
18595 characters in the opcode table. If we find a match, go
18596 to step CM.
18597
18598 4. Fail.
18599
18600 U. Examine the tag field of the opcode structure, in case this is
18601 one of the six instructions with its conditional infix in an
18602 unusual place. If it is, the tag tells us where to find the
18603 infix; look it up in the conditions table and set inst.cond
18604 accordingly. Otherwise, this is an unconditional instruction.
18605 Again set inst.cond accordingly. Return the opcode structure.
18606
18607 CE. Examine the tag field to make sure this is an instruction that
18608 should receive a conditional suffix. If it is not, fail.
18609 Otherwise, set inst.cond from the suffix we already looked up,
18610 and return the opcode structure.
18611
18612 CM. Examine the tag field to make sure this is an instruction that
18613 should receive a conditional infix after the third character.
18614 If it is not, fail. Otherwise, undo the edits to the current
18615 line of input and proceed as for case CE. */
18616
18617 static const struct asm_opcode *
18618 opcode_lookup (char **str)
18619 {
18620 char *end, *base;
18621 char *affix;
18622 const struct asm_opcode *opcode;
18623 const struct asm_cond *cond;
18624 char save[2];
18625
18626 /* Scan up to the end of the mnemonic, which must end in white space,
18627 '.' (in unified mode, or for Neon/VFP instructions), or end of string. */
18628 for (base = end = *str; *end != '\0'; end++)
18629 if (*end == ' ' || *end == '.')
18630 break;
18631
18632 if (end == base)
18633 return NULL;
18634
18635 /* Handle a possible width suffix and/or Neon type suffix. */
18636 if (end[0] == '.')
18637 {
18638 int offset = 2;
18639
18640 /* The .w and .n suffixes are only valid if the unified syntax is in
18641 use. */
18642 if (unified_syntax && end[1] == 'w')
18643 inst.size_req = 4;
18644 else if (unified_syntax && end[1] == 'n')
18645 inst.size_req = 2;
18646 else
18647 offset = 0;
18648
18649 inst.vectype.elems = 0;
18650
18651 *str = end + offset;
18652
18653 if (end[offset] == '.')
18654 {
18655 /* See if we have a Neon type suffix (possible in either unified or
18656 non-unified ARM syntax mode). */
18657 if (parse_neon_type (&inst.vectype, str) == FAIL)
18658 return NULL;
18659 }
18660 else if (end[offset] != '\0' && end[offset] != ' ')
18661 return NULL;
18662 }
18663 else
18664 *str = end;
18665
18666 /* Look for unaffixed or special-case affixed mnemonic. */
18667 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
18668 end - base);
18669 if (opcode)
18670 {
18671 /* step U */
18672 if (opcode->tag < OT_odd_infix_0)
18673 {
18674 inst.cond = COND_ALWAYS;
18675 return opcode;
18676 }
18677
18678 if (warn_on_deprecated && unified_syntax)
18679 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
18680 affix = base + (opcode->tag - OT_odd_infix_0);
18681 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
18682 gas_assert (cond);
18683
18684 inst.cond = cond->value;
18685 return opcode;
18686 }
18687
18688 /* Cannot have a conditional suffix on a mnemonic of less than two
18689 characters. */
18690 if (end - base < 3)
18691 return NULL;
18692
18693 /* Look for suffixed mnemonic. */
18694 affix = end - 2;
18695 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
18696 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
18697 affix - base);
18698 if (opcode && cond)
18699 {
18700 /* step CE */
18701 switch (opcode->tag)
18702 {
18703 case OT_cinfix3_legacy:
18704 /* Ignore conditional suffixes matched on infix only mnemonics. */
18705 break;
18706
18707 case OT_cinfix3:
18708 case OT_cinfix3_deprecated:
18709 case OT_odd_infix_unc:
18710 if (!unified_syntax)
18711 return NULL;
18712 /* Fall through. */
18713
18714 case OT_csuffix:
18715 case OT_csuffixF:
18716 case OT_csuf_or_in3:
18717 inst.cond = cond->value;
18718 return opcode;
18719
18720 case OT_unconditional:
18721 case OT_unconditionalF:
18722 if (thumb_mode)
18723 inst.cond = cond->value;
18724 else
18725 {
18726 /* Delayed diagnostic. */
18727 inst.error = BAD_COND;
18728 inst.cond = COND_ALWAYS;
18729 }
18730 return opcode;
18731
18732 default:
18733 return NULL;
18734 }
18735 }
18736
18737 /* Cannot have a usual-position infix on a mnemonic of less than
18738 six characters (five would be a suffix). */
18739 if (end - base < 6)
18740 return NULL;
18741
18742 /* Look for infixed mnemonic in the usual position. */
18743 affix = base + 3;
18744 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
18745 if (!cond)
18746 return NULL;
18747
18748 memcpy (save, affix, 2);
18749 memmove (affix, affix + 2, (end - affix) - 2);
18750 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
18751 (end - base) - 2);
18752 memmove (affix + 2, affix, (end - affix) - 2);
18753 memcpy (affix, save, 2);
18754
18755 if (opcode
18756 && (opcode->tag == OT_cinfix3
18757 || opcode->tag == OT_cinfix3_deprecated
18758 || opcode->tag == OT_csuf_or_in3
18759 || opcode->tag == OT_cinfix3_legacy))
18760 {
18761 /* Step CM. */
18762 if (warn_on_deprecated && unified_syntax
18763 && (opcode->tag == OT_cinfix3
18764 || opcode->tag == OT_cinfix3_deprecated))
18765 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
18766
18767 inst.cond = cond->value;
18768 return opcode;
18769 }
18770
18771 return NULL;
18772 }
18773
18774 /* This function generates an initial IT instruction, leaving its block
18775 virtually open for the new instructions. Eventually,
18776 the mask will be updated by now_it_add_mask () each time
18777 a new instruction needs to be included in the IT block.
18778 Finally, the block is closed with close_automatic_it_block ().
18779 The block closure can be requested either from md_assemble (),
18780 a tencode (), or due to a label hook. */
18781
18782 static void
18783 new_automatic_it_block (int cond)
18784 {
18785 now_it.state = AUTOMATIC_IT_BLOCK;
18786 now_it.mask = 0x18;
18787 now_it.cc = cond;
18788 now_it.block_length = 1;
18789 mapping_state (MAP_THUMB);
18790 now_it.insn = output_it_inst (cond, now_it.mask, NULL);
18791 now_it.warn_deprecated = FALSE;
18792 now_it.insn_cond = TRUE;
18793 }
18794
18795 /* Close an automatic IT block.
18796 See comments in new_automatic_it_block (). */
18797
18798 static void
18799 close_automatic_it_block (void)
18800 {
18801 now_it.mask = 0x10;
18802 now_it.block_length = 0;
18803 }
18804
18805 /* Update the mask of the current automatically-generated IT
18806 instruction. See comments in new_automatic_it_block (). */
18807
18808 static void
18809 now_it_add_mask (int cond)
18810 {
18811 #define CLEAR_BIT(value, nbit) ((value) & ~(1 << (nbit)))
18812 #define SET_BIT_VALUE(value, bitvalue, nbit) (CLEAR_BIT (value, nbit) \
18813 | ((bitvalue) << (nbit)))
18814 const int resulting_bit = (cond & 1);
18815
18816 now_it.mask &= 0xf;
18817 now_it.mask = SET_BIT_VALUE (now_it.mask,
18818 resulting_bit,
18819 (5 - now_it.block_length));
18820 now_it.mask = SET_BIT_VALUE (now_it.mask,
18821 1,
18822 ((5 - now_it.block_length) - 1) );
18823 output_it_inst (now_it.cc, now_it.mask, now_it.insn);
18824
18825 #undef CLEAR_BIT
18826 #undef SET_BIT_VALUE
18827 }
18828
18829 /* The IT blocks handling machinery is accessed through the these functions:
18830 it_fsm_pre_encode () from md_assemble ()
18831 set_it_insn_type () optional, from the tencode functions
18832 set_it_insn_type_last () ditto
18833 in_it_block () ditto
18834 it_fsm_post_encode () from md_assemble ()
18835 force_automatic_it_block_close () from label handling functions
18836
18837 Rationale:
18838 1) md_assemble () calls it_fsm_pre_encode () before calling tencode (),
18839 initializing the IT insn type with a generic initial value depending
18840 on the inst.condition.
18841 2) During the tencode function, two things may happen:
18842 a) The tencode function overrides the IT insn type by
18843 calling either set_it_insn_type (type) or set_it_insn_type_last ().
18844 b) The tencode function queries the IT block state by
18845 calling in_it_block () (i.e. to determine narrow/not narrow mode).
18846
18847 Both set_it_insn_type and in_it_block run the internal FSM state
18848 handling function (handle_it_state), because: a) setting the IT insn
18849 type may incur in an invalid state (exiting the function),
18850 and b) querying the state requires the FSM to be updated.
18851 Specifically we want to avoid creating an IT block for conditional
18852 branches, so it_fsm_pre_encode is actually a guess and we can't
18853 determine whether an IT block is required until the tencode () routine
18854 has decided what type of instruction this actually it.
18855 Because of this, if set_it_insn_type and in_it_block have to be used,
18856 set_it_insn_type has to be called first.
18857
18858 set_it_insn_type_last () is a wrapper of set_it_insn_type (type), that
18859 determines the insn IT type depending on the inst.cond code.
18860 When a tencode () routine encodes an instruction that can be
18861 either outside an IT block, or, in the case of being inside, has to be
18862 the last one, set_it_insn_type_last () will determine the proper
18863 IT instruction type based on the inst.cond code. Otherwise,
18864 set_it_insn_type can be called for overriding that logic or
18865 for covering other cases.
18866
18867 Calling handle_it_state () may not transition the IT block state to
18868 OUTSIDE_IT_BLOCK immediately, since the (current) state could be
18869 still queried. Instead, if the FSM determines that the state should
18870 be transitioned to OUTSIDE_IT_BLOCK, a flag is marked to be closed
18871 after the tencode () function: that's what it_fsm_post_encode () does.
18872
18873 Since in_it_block () calls the state handling function to get an
18874 updated state, an error may occur (due to invalid insns combination).
18875 In that case, inst.error is set.
18876 Therefore, inst.error has to be checked after the execution of
18877 the tencode () routine.
18878
18879 3) Back in md_assemble(), it_fsm_post_encode () is called to commit
18880 any pending state change (if any) that didn't take place in
18881 handle_it_state () as explained above. */
18882
18883 static void
18884 it_fsm_pre_encode (void)
18885 {
18886 if (inst.cond != COND_ALWAYS)
18887 inst.it_insn_type = INSIDE_IT_INSN;
18888 else
18889 inst.it_insn_type = OUTSIDE_IT_INSN;
18890
18891 now_it.state_handled = 0;
18892 }
18893
18894 /* IT state FSM handling function. */
18895
18896 static int
18897 handle_it_state (void)
18898 {
18899 now_it.state_handled = 1;
18900 now_it.insn_cond = FALSE;
18901
18902 switch (now_it.state)
18903 {
18904 case OUTSIDE_IT_BLOCK:
18905 switch (inst.it_insn_type)
18906 {
18907 case OUTSIDE_IT_INSN:
18908 break;
18909
18910 case INSIDE_IT_INSN:
18911 case INSIDE_IT_LAST_INSN:
18912 if (thumb_mode == 0)
18913 {
18914 if (unified_syntax
18915 && !(implicit_it_mode & IMPLICIT_IT_MODE_ARM))
18916 as_tsktsk (_("Warning: conditional outside an IT block"\
18917 " for Thumb."));
18918 }
18919 else
18920 {
18921 if ((implicit_it_mode & IMPLICIT_IT_MODE_THUMB)
18922 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2))
18923 {
18924 /* Automatically generate the IT instruction. */
18925 new_automatic_it_block (inst.cond);
18926 if (inst.it_insn_type == INSIDE_IT_LAST_INSN)
18927 close_automatic_it_block ();
18928 }
18929 else
18930 {
18931 inst.error = BAD_OUT_IT;
18932 return FAIL;
18933 }
18934 }
18935 break;
18936
18937 case IF_INSIDE_IT_LAST_INSN:
18938 case NEUTRAL_IT_INSN:
18939 break;
18940
18941 case IT_INSN:
18942 now_it.state = MANUAL_IT_BLOCK;
18943 now_it.block_length = 0;
18944 break;
18945 }
18946 break;
18947
18948 case AUTOMATIC_IT_BLOCK:
18949 /* Three things may happen now:
18950 a) We should increment current it block size;
18951 b) We should close current it block (closing insn or 4 insns);
18952 c) We should close current it block and start a new one (due
18953 to incompatible conditions or
18954 4 insns-length block reached). */
18955
18956 switch (inst.it_insn_type)
18957 {
18958 case OUTSIDE_IT_INSN:
18959 /* The closure of the block shall happen immediately,
18960 so any in_it_block () call reports the block as closed. */
18961 force_automatic_it_block_close ();
18962 break;
18963
18964 case INSIDE_IT_INSN:
18965 case INSIDE_IT_LAST_INSN:
18966 case IF_INSIDE_IT_LAST_INSN:
18967 now_it.block_length++;
18968
18969 if (now_it.block_length > 4
18970 || !now_it_compatible (inst.cond))
18971 {
18972 force_automatic_it_block_close ();
18973 if (inst.it_insn_type != IF_INSIDE_IT_LAST_INSN)
18974 new_automatic_it_block (inst.cond);
18975 }
18976 else
18977 {
18978 now_it.insn_cond = TRUE;
18979 now_it_add_mask (inst.cond);
18980 }
18981
18982 if (now_it.state == AUTOMATIC_IT_BLOCK
18983 && (inst.it_insn_type == INSIDE_IT_LAST_INSN
18984 || inst.it_insn_type == IF_INSIDE_IT_LAST_INSN))
18985 close_automatic_it_block ();
18986 break;
18987
18988 case NEUTRAL_IT_INSN:
18989 now_it.block_length++;
18990 now_it.insn_cond = TRUE;
18991
18992 if (now_it.block_length > 4)
18993 force_automatic_it_block_close ();
18994 else
18995 now_it_add_mask (now_it.cc & 1);
18996 break;
18997
18998 case IT_INSN:
18999 close_automatic_it_block ();
19000 now_it.state = MANUAL_IT_BLOCK;
19001 break;
19002 }
19003 break;
19004
19005 case MANUAL_IT_BLOCK:
19006 {
19007 /* Check conditional suffixes. */
19008 const int cond = now_it.cc ^ ((now_it.mask >> 4) & 1) ^ 1;
19009 int is_last;
19010 now_it.mask <<= 1;
19011 now_it.mask &= 0x1f;
19012 is_last = (now_it.mask == 0x10);
19013 now_it.insn_cond = TRUE;
19014
19015 switch (inst.it_insn_type)
19016 {
19017 case OUTSIDE_IT_INSN:
19018 inst.error = BAD_NOT_IT;
19019 return FAIL;
19020
19021 case INSIDE_IT_INSN:
19022 if (cond != inst.cond)
19023 {
19024 inst.error = BAD_IT_COND;
19025 return FAIL;
19026 }
19027 break;
19028
19029 case INSIDE_IT_LAST_INSN:
19030 case IF_INSIDE_IT_LAST_INSN:
19031 if (cond != inst.cond)
19032 {
19033 inst.error = BAD_IT_COND;
19034 return FAIL;
19035 }
19036 if (!is_last)
19037 {
19038 inst.error = BAD_BRANCH;
19039 return FAIL;
19040 }
19041 break;
19042
19043 case NEUTRAL_IT_INSN:
19044 /* The BKPT instruction is unconditional even in an IT block. */
19045 break;
19046
19047 case IT_INSN:
19048 inst.error = BAD_IT_IT;
19049 return FAIL;
19050 }
19051 }
19052 break;
19053 }
19054
19055 return SUCCESS;
19056 }
19057
19058 struct depr_insn_mask
19059 {
19060 unsigned long pattern;
19061 unsigned long mask;
19062 const char* description;
19063 };
19064
19065 /* List of 16-bit instruction patterns deprecated in an IT block in
19066 ARMv8. */
19067 static const struct depr_insn_mask depr_it_insns[] = {
19068 { 0xc000, 0xc000, N_("Short branches, Undefined, SVC, LDM/STM") },
19069 { 0xb000, 0xb000, N_("Miscellaneous 16-bit instructions") },
19070 { 0xa000, 0xb800, N_("ADR") },
19071 { 0x4800, 0xf800, N_("Literal loads") },
19072 { 0x4478, 0xf478, N_("Hi-register ADD, MOV, CMP, BX, BLX using pc") },
19073 { 0x4487, 0xfc87, N_("Hi-register ADD, MOV, CMP using pc") },
19074 /* NOTE: 0x00dd is not the real encoding, instead, it is the 'tvalue'
19075 field in asm_opcode. 'tvalue' is used at the stage this check happen. */
19076 { 0x00dd, 0x7fff, N_("ADD/SUB sp, sp #imm") },
19077 { 0, 0, NULL }
19078 };
19079
19080 static void
19081 it_fsm_post_encode (void)
19082 {
19083 int is_last;
19084
19085 if (!now_it.state_handled)
19086 handle_it_state ();
19087
19088 if (now_it.insn_cond
19089 && !now_it.warn_deprecated
19090 && warn_on_deprecated
19091 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8)
19092 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_m))
19093 {
19094 if (inst.instruction >= 0x10000)
19095 {
19096 as_tsktsk (_("IT blocks containing 32-bit Thumb instructions are "
19097 "performance deprecated in ARMv8-A and ARMv8-R"));
19098 now_it.warn_deprecated = TRUE;
19099 }
19100 else
19101 {
19102 const struct depr_insn_mask *p = depr_it_insns;
19103
19104 while (p->mask != 0)
19105 {
19106 if ((inst.instruction & p->mask) == p->pattern)
19107 {
19108 as_tsktsk (_("IT blocks containing 16-bit Thumb "
19109 "instructions of the following class are "
19110 "performance deprecated in ARMv8-A and "
19111 "ARMv8-R: %s"), p->description);
19112 now_it.warn_deprecated = TRUE;
19113 break;
19114 }
19115
19116 ++p;
19117 }
19118 }
19119
19120 if (now_it.block_length > 1)
19121 {
19122 as_tsktsk (_("IT blocks containing more than one conditional "
19123 "instruction are performance deprecated in ARMv8-A and "
19124 "ARMv8-R"));
19125 now_it.warn_deprecated = TRUE;
19126 }
19127 }
19128
19129 is_last = (now_it.mask == 0x10);
19130 if (is_last)
19131 {
19132 now_it.state = OUTSIDE_IT_BLOCK;
19133 now_it.mask = 0;
19134 }
19135 }
19136
19137 static void
19138 force_automatic_it_block_close (void)
19139 {
19140 if (now_it.state == AUTOMATIC_IT_BLOCK)
19141 {
19142 close_automatic_it_block ();
19143 now_it.state = OUTSIDE_IT_BLOCK;
19144 now_it.mask = 0;
19145 }
19146 }
19147
19148 static int
19149 in_it_block (void)
19150 {
19151 if (!now_it.state_handled)
19152 handle_it_state ();
19153
19154 return now_it.state != OUTSIDE_IT_BLOCK;
19155 }
19156
19157 /* Whether OPCODE only has T32 encoding. Since this function is only used by
19158 t32_insn_ok, OPCODE enabled by v6t2 extension bit do not need to be listed
19159 here, hence the "known" in the function name. */
19160
19161 static bfd_boolean
19162 known_t32_only_insn (const struct asm_opcode *opcode)
19163 {
19164 /* Original Thumb-1 wide instruction. */
19165 if (opcode->tencode == do_t_blx
19166 || opcode->tencode == do_t_branch23
19167 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_msr)
19168 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_barrier))
19169 return TRUE;
19170
19171 /* Wide-only instruction added to ARMv8-M Baseline. */
19172 if (ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_v8m_m_only)
19173 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_atomics)
19174 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_v6t2_v8m)
19175 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_div))
19176 return TRUE;
19177
19178 return FALSE;
19179 }
19180
19181 /* Whether wide instruction variant can be used if available for a valid OPCODE
19182 in ARCH. */
19183
19184 static bfd_boolean
19185 t32_insn_ok (arm_feature_set arch, const struct asm_opcode *opcode)
19186 {
19187 if (known_t32_only_insn (opcode))
19188 return TRUE;
19189
19190 /* Instruction with narrow and wide encoding added to ARMv8-M. Availability
19191 of variant T3 of B.W is checked in do_t_branch. */
19192 if (ARM_CPU_HAS_FEATURE (arch, arm_ext_v8m)
19193 && opcode->tencode == do_t_branch)
19194 return TRUE;
19195
19196 /* MOV accepts T1/T3 encodings under Baseline, T3 encoding is 32bit. */
19197 if (ARM_CPU_HAS_FEATURE (arch, arm_ext_v8m)
19198 && opcode->tencode == do_t_mov_cmp
19199 /* Make sure CMP instruction is not affected. */
19200 && opcode->aencode == do_mov)
19201 return TRUE;
19202
19203 /* Wide instruction variants of all instructions with narrow *and* wide
19204 variants become available with ARMv6t2. Other opcodes are either
19205 narrow-only or wide-only and are thus available if OPCODE is valid. */
19206 if (ARM_CPU_HAS_FEATURE (arch, arm_ext_v6t2))
19207 return TRUE;
19208
19209 /* OPCODE with narrow only instruction variant or wide variant not
19210 available. */
19211 return FALSE;
19212 }
19213
19214 void
19215 md_assemble (char *str)
19216 {
19217 char *p = str;
19218 const struct asm_opcode * opcode;
19219
19220 /* Align the previous label if needed. */
19221 if (last_label_seen != NULL)
19222 {
19223 symbol_set_frag (last_label_seen, frag_now);
19224 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
19225 S_SET_SEGMENT (last_label_seen, now_seg);
19226 }
19227
19228 memset (&inst, '\0', sizeof (inst));
19229 int r;
19230 for (r = 0; r < ARM_IT_MAX_RELOCS; r++)
19231 inst.relocs[r].type = BFD_RELOC_UNUSED;
19232
19233 opcode = opcode_lookup (&p);
19234 if (!opcode)
19235 {
19236 /* It wasn't an instruction, but it might be a register alias of
19237 the form alias .req reg, or a Neon .dn/.qn directive. */
19238 if (! create_register_alias (str, p)
19239 && ! create_neon_reg_alias (str, p))
19240 as_bad (_("bad instruction `%s'"), str);
19241
19242 return;
19243 }
19244
19245 if (warn_on_deprecated && opcode->tag == OT_cinfix3_deprecated)
19246 as_tsktsk (_("s suffix on comparison instruction is deprecated"));
19247
19248 /* The value which unconditional instructions should have in place of the
19249 condition field. */
19250 inst.uncond_value = (opcode->tag == OT_csuffixF) ? 0xf : -1;
19251
19252 if (thumb_mode)
19253 {
19254 arm_feature_set variant;
19255
19256 variant = cpu_variant;
19257 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
19258 if (!ARM_CPU_HAS_FEATURE (variant, arm_arch_t2))
19259 ARM_CLEAR_FEATURE (variant, variant, fpu_any_hard);
19260 /* Check that this instruction is supported for this CPU. */
19261 if (!opcode->tvariant
19262 || (thumb_mode == 1
19263 && !ARM_CPU_HAS_FEATURE (variant, *opcode->tvariant)))
19264 {
19265 if (opcode->tencode == do_t_swi)
19266 as_bad (_("SVC is not permitted on this architecture"));
19267 else
19268 as_bad (_("selected processor does not support `%s' in Thumb mode"), str);
19269 return;
19270 }
19271 if (inst.cond != COND_ALWAYS && !unified_syntax
19272 && opcode->tencode != do_t_branch)
19273 {
19274 as_bad (_("Thumb does not support conditional execution"));
19275 return;
19276 }
19277
19278 /* Two things are addressed here:
19279 1) Implicit require narrow instructions on Thumb-1.
19280 This avoids relaxation accidentally introducing Thumb-2
19281 instructions.
19282 2) Reject wide instructions in non Thumb-2 cores.
19283
19284 Only instructions with narrow and wide variants need to be handled
19285 but selecting all non wide-only instructions is easier. */
19286 if (!ARM_CPU_HAS_FEATURE (variant, arm_ext_v6t2)
19287 && !t32_insn_ok (variant, opcode))
19288 {
19289 if (inst.size_req == 0)
19290 inst.size_req = 2;
19291 else if (inst.size_req == 4)
19292 {
19293 if (ARM_CPU_HAS_FEATURE (variant, arm_ext_v8m))
19294 as_bad (_("selected processor does not support 32bit wide "
19295 "variant of instruction `%s'"), str);
19296 else
19297 as_bad (_("selected processor does not support `%s' in "
19298 "Thumb-2 mode"), str);
19299 return;
19300 }
19301 }
19302
19303 inst.instruction = opcode->tvalue;
19304
19305 if (!parse_operands (p, opcode->operands, /*thumb=*/TRUE))
19306 {
19307 /* Prepare the it_insn_type for those encodings that don't set
19308 it. */
19309 it_fsm_pre_encode ();
19310
19311 opcode->tencode ();
19312
19313 it_fsm_post_encode ();
19314 }
19315
19316 if (!(inst.error || inst.relax))
19317 {
19318 gas_assert (inst.instruction < 0xe800 || inst.instruction > 0xffff);
19319 inst.size = (inst.instruction > 0xffff ? 4 : 2);
19320 if (inst.size_req && inst.size_req != inst.size)
19321 {
19322 as_bad (_("cannot honor width suffix -- `%s'"), str);
19323 return;
19324 }
19325 }
19326
19327 /* Something has gone badly wrong if we try to relax a fixed size
19328 instruction. */
19329 gas_assert (inst.size_req == 0 || !inst.relax);
19330
19331 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
19332 *opcode->tvariant);
19333 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
19334 set those bits when Thumb-2 32-bit instructions are seen. The impact
19335 of relaxable instructions will be considered later after we finish all
19336 relaxation. */
19337 if (ARM_FEATURE_CORE_EQUAL (cpu_variant, arm_arch_any))
19338 variant = arm_arch_none;
19339 else
19340 variant = cpu_variant;
19341 if (inst.size == 4 && !t32_insn_ok (variant, opcode))
19342 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
19343 arm_ext_v6t2);
19344
19345 check_neon_suffixes;
19346
19347 if (!inst.error)
19348 {
19349 mapping_state (MAP_THUMB);
19350 }
19351 }
19352 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
19353 {
19354 bfd_boolean is_bx;
19355
19356 /* bx is allowed on v5 cores, and sometimes on v4 cores. */
19357 is_bx = (opcode->aencode == do_bx);
19358
19359 /* Check that this instruction is supported for this CPU. */
19360 if (!(is_bx && fix_v4bx)
19361 && !(opcode->avariant &&
19362 ARM_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant)))
19363 {
19364 as_bad (_("selected processor does not support `%s' in ARM mode"), str);
19365 return;
19366 }
19367 if (inst.size_req)
19368 {
19369 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str);
19370 return;
19371 }
19372
19373 inst.instruction = opcode->avalue;
19374 if (opcode->tag == OT_unconditionalF)
19375 inst.instruction |= 0xFU << 28;
19376 else
19377 inst.instruction |= inst.cond << 28;
19378 inst.size = INSN_SIZE;
19379 if (!parse_operands (p, opcode->operands, /*thumb=*/FALSE))
19380 {
19381 it_fsm_pre_encode ();
19382 opcode->aencode ();
19383 it_fsm_post_encode ();
19384 }
19385 /* Arm mode bx is marked as both v4T and v5 because it's still required
19386 on a hypothetical non-thumb v5 core. */
19387 if (is_bx)
19388 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, arm_ext_v4t);
19389 else
19390 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
19391 *opcode->avariant);
19392
19393 check_neon_suffixes;
19394
19395 if (!inst.error)
19396 {
19397 mapping_state (MAP_ARM);
19398 }
19399 }
19400 else
19401 {
19402 as_bad (_("attempt to use an ARM instruction on a Thumb-only processor "
19403 "-- `%s'"), str);
19404 return;
19405 }
19406 output_inst (str);
19407 }
19408
19409 static void
19410 check_it_blocks_finished (void)
19411 {
19412 #ifdef OBJ_ELF
19413 asection *sect;
19414
19415 for (sect = stdoutput->sections; sect != NULL; sect = sect->next)
19416 if (seg_info (sect)->tc_segment_info_data.current_it.state
19417 == MANUAL_IT_BLOCK)
19418 {
19419 as_warn (_("section '%s' finished with an open IT block."),
19420 sect->name);
19421 }
19422 #else
19423 if (now_it.state == MANUAL_IT_BLOCK)
19424 as_warn (_("file finished with an open IT block."));
19425 #endif
19426 }
19427
19428 /* Various frobbings of labels and their addresses. */
19429
19430 void
19431 arm_start_line_hook (void)
19432 {
19433 last_label_seen = NULL;
19434 }
19435
19436 void
19437 arm_frob_label (symbolS * sym)
19438 {
19439 last_label_seen = sym;
19440
19441 ARM_SET_THUMB (sym, thumb_mode);
19442
19443 #if defined OBJ_COFF || defined OBJ_ELF
19444 ARM_SET_INTERWORK (sym, support_interwork);
19445 #endif
19446
19447 force_automatic_it_block_close ();
19448
19449 /* Note - do not allow local symbols (.Lxxx) to be labelled
19450 as Thumb functions. This is because these labels, whilst
19451 they exist inside Thumb code, are not the entry points for
19452 possible ARM->Thumb calls. Also, these labels can be used
19453 as part of a computed goto or switch statement. eg gcc
19454 can generate code that looks like this:
19455
19456 ldr r2, [pc, .Laaa]
19457 lsl r3, r3, #2
19458 ldr r2, [r3, r2]
19459 mov pc, r2
19460
19461 .Lbbb: .word .Lxxx
19462 .Lccc: .word .Lyyy
19463 ..etc...
19464 .Laaa: .word Lbbb
19465
19466 The first instruction loads the address of the jump table.
19467 The second instruction converts a table index into a byte offset.
19468 The third instruction gets the jump address out of the table.
19469 The fourth instruction performs the jump.
19470
19471 If the address stored at .Laaa is that of a symbol which has the
19472 Thumb_Func bit set, then the linker will arrange for this address
19473 to have the bottom bit set, which in turn would mean that the
19474 address computation performed by the third instruction would end
19475 up with the bottom bit set. Since the ARM is capable of unaligned
19476 word loads, the instruction would then load the incorrect address
19477 out of the jump table, and chaos would ensue. */
19478 if (label_is_thumb_function_name
19479 && (S_GET_NAME (sym)[0] != '.' || S_GET_NAME (sym)[1] != 'L')
19480 && (bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE) != 0)
19481 {
19482 /* When the address of a Thumb function is taken the bottom
19483 bit of that address should be set. This will allow
19484 interworking between Arm and Thumb functions to work
19485 correctly. */
19486
19487 THUMB_SET_FUNC (sym, 1);
19488
19489 label_is_thumb_function_name = FALSE;
19490 }
19491
19492 dwarf2_emit_label (sym);
19493 }
19494
19495 bfd_boolean
19496 arm_data_in_code (void)
19497 {
19498 if (thumb_mode && ! strncmp (input_line_pointer + 1, "data:", 5))
19499 {
19500 *input_line_pointer = '/';
19501 input_line_pointer += 5;
19502 *input_line_pointer = 0;
19503 return TRUE;
19504 }
19505
19506 return FALSE;
19507 }
19508
19509 char *
19510 arm_canonicalize_symbol_name (char * name)
19511 {
19512 int len;
19513
19514 if (thumb_mode && (len = strlen (name)) > 5
19515 && streq (name + len - 5, "/data"))
19516 *(name + len - 5) = 0;
19517
19518 return name;
19519 }
19520 \f
19521 /* Table of all register names defined by default. The user can
19522 define additional names with .req. Note that all register names
19523 should appear in both upper and lowercase variants. Some registers
19524 also have mixed-case names. */
19525
19526 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
19527 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
19528 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
19529 #define REGSET(p,t) \
19530 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
19531 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
19532 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
19533 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
19534 #define REGSETH(p,t) \
19535 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
19536 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
19537 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
19538 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
19539 #define REGSET2(p,t) \
19540 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
19541 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
19542 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
19543 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
19544 #define SPLRBANK(base,bank,t) \
19545 REGDEF(lr_##bank, 768|((base+0)<<16), t), \
19546 REGDEF(sp_##bank, 768|((base+1)<<16), t), \
19547 REGDEF(spsr_##bank, 768|(base<<16)|SPSR_BIT, t), \
19548 REGDEF(LR_##bank, 768|((base+0)<<16), t), \
19549 REGDEF(SP_##bank, 768|((base+1)<<16), t), \
19550 REGDEF(SPSR_##bank, 768|(base<<16)|SPSR_BIT, t)
19551
19552 static const struct reg_entry reg_names[] =
19553 {
19554 /* ARM integer registers. */
19555 REGSET(r, RN), REGSET(R, RN),
19556
19557 /* ATPCS synonyms. */
19558 REGDEF(a1,0,RN), REGDEF(a2,1,RN), REGDEF(a3, 2,RN), REGDEF(a4, 3,RN),
19559 REGDEF(v1,4,RN), REGDEF(v2,5,RN), REGDEF(v3, 6,RN), REGDEF(v4, 7,RN),
19560 REGDEF(v5,8,RN), REGDEF(v6,9,RN), REGDEF(v7,10,RN), REGDEF(v8,11,RN),
19561
19562 REGDEF(A1,0,RN), REGDEF(A2,1,RN), REGDEF(A3, 2,RN), REGDEF(A4, 3,RN),
19563 REGDEF(V1,4,RN), REGDEF(V2,5,RN), REGDEF(V3, 6,RN), REGDEF(V4, 7,RN),
19564 REGDEF(V5,8,RN), REGDEF(V6,9,RN), REGDEF(V7,10,RN), REGDEF(V8,11,RN),
19565
19566 /* Well-known aliases. */
19567 REGDEF(wr, 7,RN), REGDEF(sb, 9,RN), REGDEF(sl,10,RN), REGDEF(fp,11,RN),
19568 REGDEF(ip,12,RN), REGDEF(sp,13,RN), REGDEF(lr,14,RN), REGDEF(pc,15,RN),
19569
19570 REGDEF(WR, 7,RN), REGDEF(SB, 9,RN), REGDEF(SL,10,RN), REGDEF(FP,11,RN),
19571 REGDEF(IP,12,RN), REGDEF(SP,13,RN), REGDEF(LR,14,RN), REGDEF(PC,15,RN),
19572
19573 /* Coprocessor numbers. */
19574 REGSET(p, CP), REGSET(P, CP),
19575
19576 /* Coprocessor register numbers. The "cr" variants are for backward
19577 compatibility. */
19578 REGSET(c, CN), REGSET(C, CN),
19579 REGSET(cr, CN), REGSET(CR, CN),
19580
19581 /* ARM banked registers. */
19582 REGDEF(R8_usr,512|(0<<16),RNB), REGDEF(r8_usr,512|(0<<16),RNB),
19583 REGDEF(R9_usr,512|(1<<16),RNB), REGDEF(r9_usr,512|(1<<16),RNB),
19584 REGDEF(R10_usr,512|(2<<16),RNB), REGDEF(r10_usr,512|(2<<16),RNB),
19585 REGDEF(R11_usr,512|(3<<16),RNB), REGDEF(r11_usr,512|(3<<16),RNB),
19586 REGDEF(R12_usr,512|(4<<16),RNB), REGDEF(r12_usr,512|(4<<16),RNB),
19587 REGDEF(SP_usr,512|(5<<16),RNB), REGDEF(sp_usr,512|(5<<16),RNB),
19588 REGDEF(LR_usr,512|(6<<16),RNB), REGDEF(lr_usr,512|(6<<16),RNB),
19589
19590 REGDEF(R8_fiq,512|(8<<16),RNB), REGDEF(r8_fiq,512|(8<<16),RNB),
19591 REGDEF(R9_fiq,512|(9<<16),RNB), REGDEF(r9_fiq,512|(9<<16),RNB),
19592 REGDEF(R10_fiq,512|(10<<16),RNB), REGDEF(r10_fiq,512|(10<<16),RNB),
19593 REGDEF(R11_fiq,512|(11<<16),RNB), REGDEF(r11_fiq,512|(11<<16),RNB),
19594 REGDEF(R12_fiq,512|(12<<16),RNB), REGDEF(r12_fiq,512|(12<<16),RNB),
19595 REGDEF(SP_fiq,512|(13<<16),RNB), REGDEF(sp_fiq,512|(13<<16),RNB),
19596 REGDEF(LR_fiq,512|(14<<16),RNB), REGDEF(lr_fiq,512|(14<<16),RNB),
19597 REGDEF(SPSR_fiq,512|(14<<16)|SPSR_BIT,RNB), REGDEF(spsr_fiq,512|(14<<16)|SPSR_BIT,RNB),
19598
19599 SPLRBANK(0,IRQ,RNB), SPLRBANK(0,irq,RNB),
19600 SPLRBANK(2,SVC,RNB), SPLRBANK(2,svc,RNB),
19601 SPLRBANK(4,ABT,RNB), SPLRBANK(4,abt,RNB),
19602 SPLRBANK(6,UND,RNB), SPLRBANK(6,und,RNB),
19603 SPLRBANK(12,MON,RNB), SPLRBANK(12,mon,RNB),
19604 REGDEF(elr_hyp,768|(14<<16),RNB), REGDEF(ELR_hyp,768|(14<<16),RNB),
19605 REGDEF(sp_hyp,768|(15<<16),RNB), REGDEF(SP_hyp,768|(15<<16),RNB),
19606 REGDEF(spsr_hyp,768|(14<<16)|SPSR_BIT,RNB),
19607 REGDEF(SPSR_hyp,768|(14<<16)|SPSR_BIT,RNB),
19608
19609 /* FPA registers. */
19610 REGNUM(f,0,FN), REGNUM(f,1,FN), REGNUM(f,2,FN), REGNUM(f,3,FN),
19611 REGNUM(f,4,FN), REGNUM(f,5,FN), REGNUM(f,6,FN), REGNUM(f,7, FN),
19612
19613 REGNUM(F,0,FN), REGNUM(F,1,FN), REGNUM(F,2,FN), REGNUM(F,3,FN),
19614 REGNUM(F,4,FN), REGNUM(F,5,FN), REGNUM(F,6,FN), REGNUM(F,7, FN),
19615
19616 /* VFP SP registers. */
19617 REGSET(s,VFS), REGSET(S,VFS),
19618 REGSETH(s,VFS), REGSETH(S,VFS),
19619
19620 /* VFP DP Registers. */
19621 REGSET(d,VFD), REGSET(D,VFD),
19622 /* Extra Neon DP registers. */
19623 REGSETH(d,VFD), REGSETH(D,VFD),
19624
19625 /* Neon QP registers. */
19626 REGSET2(q,NQ), REGSET2(Q,NQ),
19627
19628 /* VFP control registers. */
19629 REGDEF(fpsid,0,VFC), REGDEF(fpscr,1,VFC), REGDEF(fpexc,8,VFC),
19630 REGDEF(FPSID,0,VFC), REGDEF(FPSCR,1,VFC), REGDEF(FPEXC,8,VFC),
19631 REGDEF(fpinst,9,VFC), REGDEF(fpinst2,10,VFC),
19632 REGDEF(FPINST,9,VFC), REGDEF(FPINST2,10,VFC),
19633 REGDEF(mvfr0,7,VFC), REGDEF(mvfr1,6,VFC),
19634 REGDEF(MVFR0,7,VFC), REGDEF(MVFR1,6,VFC),
19635 REGDEF(mvfr2,5,VFC), REGDEF(MVFR2,5,VFC),
19636
19637 /* Maverick DSP coprocessor registers. */
19638 REGSET(mvf,MVF), REGSET(mvd,MVD), REGSET(mvfx,MVFX), REGSET(mvdx,MVDX),
19639 REGSET(MVF,MVF), REGSET(MVD,MVD), REGSET(MVFX,MVFX), REGSET(MVDX,MVDX),
19640
19641 REGNUM(mvax,0,MVAX), REGNUM(mvax,1,MVAX),
19642 REGNUM(mvax,2,MVAX), REGNUM(mvax,3,MVAX),
19643 REGDEF(dspsc,0,DSPSC),
19644
19645 REGNUM(MVAX,0,MVAX), REGNUM(MVAX,1,MVAX),
19646 REGNUM(MVAX,2,MVAX), REGNUM(MVAX,3,MVAX),
19647 REGDEF(DSPSC,0,DSPSC),
19648
19649 /* iWMMXt data registers - p0, c0-15. */
19650 REGSET(wr,MMXWR), REGSET(wR,MMXWR), REGSET(WR, MMXWR),
19651
19652 /* iWMMXt control registers - p1, c0-3. */
19653 REGDEF(wcid, 0,MMXWC), REGDEF(wCID, 0,MMXWC), REGDEF(WCID, 0,MMXWC),
19654 REGDEF(wcon, 1,MMXWC), REGDEF(wCon, 1,MMXWC), REGDEF(WCON, 1,MMXWC),
19655 REGDEF(wcssf, 2,MMXWC), REGDEF(wCSSF, 2,MMXWC), REGDEF(WCSSF, 2,MMXWC),
19656 REGDEF(wcasf, 3,MMXWC), REGDEF(wCASF, 3,MMXWC), REGDEF(WCASF, 3,MMXWC),
19657
19658 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
19659 REGDEF(wcgr0, 8,MMXWCG), REGDEF(wCGR0, 8,MMXWCG), REGDEF(WCGR0, 8,MMXWCG),
19660 REGDEF(wcgr1, 9,MMXWCG), REGDEF(wCGR1, 9,MMXWCG), REGDEF(WCGR1, 9,MMXWCG),
19661 REGDEF(wcgr2,10,MMXWCG), REGDEF(wCGR2,10,MMXWCG), REGDEF(WCGR2,10,MMXWCG),
19662 REGDEF(wcgr3,11,MMXWCG), REGDEF(wCGR3,11,MMXWCG), REGDEF(WCGR3,11,MMXWCG),
19663
19664 /* XScale accumulator registers. */
19665 REGNUM(acc,0,XSCALE), REGNUM(ACC,0,XSCALE),
19666 };
19667 #undef REGDEF
19668 #undef REGNUM
19669 #undef REGSET
19670
19671 /* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
19672 within psr_required_here. */
19673 static const struct asm_psr psrs[] =
19674 {
19675 /* Backward compatibility notation. Note that "all" is no longer
19676 truly all possible PSR bits. */
19677 {"all", PSR_c | PSR_f},
19678 {"flg", PSR_f},
19679 {"ctl", PSR_c},
19680
19681 /* Individual flags. */
19682 {"f", PSR_f},
19683 {"c", PSR_c},
19684 {"x", PSR_x},
19685 {"s", PSR_s},
19686
19687 /* Combinations of flags. */
19688 {"fs", PSR_f | PSR_s},
19689 {"fx", PSR_f | PSR_x},
19690 {"fc", PSR_f | PSR_c},
19691 {"sf", PSR_s | PSR_f},
19692 {"sx", PSR_s | PSR_x},
19693 {"sc", PSR_s | PSR_c},
19694 {"xf", PSR_x | PSR_f},
19695 {"xs", PSR_x | PSR_s},
19696 {"xc", PSR_x | PSR_c},
19697 {"cf", PSR_c | PSR_f},
19698 {"cs", PSR_c | PSR_s},
19699 {"cx", PSR_c | PSR_x},
19700 {"fsx", PSR_f | PSR_s | PSR_x},
19701 {"fsc", PSR_f | PSR_s | PSR_c},
19702 {"fxs", PSR_f | PSR_x | PSR_s},
19703 {"fxc", PSR_f | PSR_x | PSR_c},
19704 {"fcs", PSR_f | PSR_c | PSR_s},
19705 {"fcx", PSR_f | PSR_c | PSR_x},
19706 {"sfx", PSR_s | PSR_f | PSR_x},
19707 {"sfc", PSR_s | PSR_f | PSR_c},
19708 {"sxf", PSR_s | PSR_x | PSR_f},
19709 {"sxc", PSR_s | PSR_x | PSR_c},
19710 {"scf", PSR_s | PSR_c | PSR_f},
19711 {"scx", PSR_s | PSR_c | PSR_x},
19712 {"xfs", PSR_x | PSR_f | PSR_s},
19713 {"xfc", PSR_x | PSR_f | PSR_c},
19714 {"xsf", PSR_x | PSR_s | PSR_f},
19715 {"xsc", PSR_x | PSR_s | PSR_c},
19716 {"xcf", PSR_x | PSR_c | PSR_f},
19717 {"xcs", PSR_x | PSR_c | PSR_s},
19718 {"cfs", PSR_c | PSR_f | PSR_s},
19719 {"cfx", PSR_c | PSR_f | PSR_x},
19720 {"csf", PSR_c | PSR_s | PSR_f},
19721 {"csx", PSR_c | PSR_s | PSR_x},
19722 {"cxf", PSR_c | PSR_x | PSR_f},
19723 {"cxs", PSR_c | PSR_x | PSR_s},
19724 {"fsxc", PSR_f | PSR_s | PSR_x | PSR_c},
19725 {"fscx", PSR_f | PSR_s | PSR_c | PSR_x},
19726 {"fxsc", PSR_f | PSR_x | PSR_s | PSR_c},
19727 {"fxcs", PSR_f | PSR_x | PSR_c | PSR_s},
19728 {"fcsx", PSR_f | PSR_c | PSR_s | PSR_x},
19729 {"fcxs", PSR_f | PSR_c | PSR_x | PSR_s},
19730 {"sfxc", PSR_s | PSR_f | PSR_x | PSR_c},
19731 {"sfcx", PSR_s | PSR_f | PSR_c | PSR_x},
19732 {"sxfc", PSR_s | PSR_x | PSR_f | PSR_c},
19733 {"sxcf", PSR_s | PSR_x | PSR_c | PSR_f},
19734 {"scfx", PSR_s | PSR_c | PSR_f | PSR_x},
19735 {"scxf", PSR_s | PSR_c | PSR_x | PSR_f},
19736 {"xfsc", PSR_x | PSR_f | PSR_s | PSR_c},
19737 {"xfcs", PSR_x | PSR_f | PSR_c | PSR_s},
19738 {"xsfc", PSR_x | PSR_s | PSR_f | PSR_c},
19739 {"xscf", PSR_x | PSR_s | PSR_c | PSR_f},
19740 {"xcfs", PSR_x | PSR_c | PSR_f | PSR_s},
19741 {"xcsf", PSR_x | PSR_c | PSR_s | PSR_f},
19742 {"cfsx", PSR_c | PSR_f | PSR_s | PSR_x},
19743 {"cfxs", PSR_c | PSR_f | PSR_x | PSR_s},
19744 {"csfx", PSR_c | PSR_s | PSR_f | PSR_x},
19745 {"csxf", PSR_c | PSR_s | PSR_x | PSR_f},
19746 {"cxfs", PSR_c | PSR_x | PSR_f | PSR_s},
19747 {"cxsf", PSR_c | PSR_x | PSR_s | PSR_f},
19748 };
19749
19750 /* Table of V7M psr names. */
19751 static const struct asm_psr v7m_psrs[] =
19752 {
19753 {"apsr", 0x0 }, {"APSR", 0x0 },
19754 {"iapsr", 0x1 }, {"IAPSR", 0x1 },
19755 {"eapsr", 0x2 }, {"EAPSR", 0x2 },
19756 {"psr", 0x3 }, {"PSR", 0x3 },
19757 {"xpsr", 0x3 }, {"XPSR", 0x3 }, {"xPSR", 3 },
19758 {"ipsr", 0x5 }, {"IPSR", 0x5 },
19759 {"epsr", 0x6 }, {"EPSR", 0x6 },
19760 {"iepsr", 0x7 }, {"IEPSR", 0x7 },
19761 {"msp", 0x8 }, {"MSP", 0x8 },
19762 {"psp", 0x9 }, {"PSP", 0x9 },
19763 {"msplim", 0xa }, {"MSPLIM", 0xa },
19764 {"psplim", 0xb }, {"PSPLIM", 0xb },
19765 {"primask", 0x10}, {"PRIMASK", 0x10},
19766 {"basepri", 0x11}, {"BASEPRI", 0x11},
19767 {"basepri_max", 0x12}, {"BASEPRI_MAX", 0x12},
19768 {"faultmask", 0x13}, {"FAULTMASK", 0x13},
19769 {"control", 0x14}, {"CONTROL", 0x14},
19770 {"msp_ns", 0x88}, {"MSP_NS", 0x88},
19771 {"psp_ns", 0x89}, {"PSP_NS", 0x89},
19772 {"msplim_ns", 0x8a}, {"MSPLIM_NS", 0x8a},
19773 {"psplim_ns", 0x8b}, {"PSPLIM_NS", 0x8b},
19774 {"primask_ns", 0x90}, {"PRIMASK_NS", 0x90},
19775 {"basepri_ns", 0x91}, {"BASEPRI_NS", 0x91},
19776 {"faultmask_ns", 0x93}, {"FAULTMASK_NS", 0x93},
19777 {"control_ns", 0x94}, {"CONTROL_NS", 0x94},
19778 {"sp_ns", 0x98}, {"SP_NS", 0x98 }
19779 };
19780
19781 /* Table of all shift-in-operand names. */
19782 static const struct asm_shift_name shift_names [] =
19783 {
19784 { "asl", SHIFT_LSL }, { "ASL", SHIFT_LSL },
19785 { "lsl", SHIFT_LSL }, { "LSL", SHIFT_LSL },
19786 { "lsr", SHIFT_LSR }, { "LSR", SHIFT_LSR },
19787 { "asr", SHIFT_ASR }, { "ASR", SHIFT_ASR },
19788 { "ror", SHIFT_ROR }, { "ROR", SHIFT_ROR },
19789 { "rrx", SHIFT_RRX }, { "RRX", SHIFT_RRX }
19790 };
19791
19792 /* Table of all explicit relocation names. */
19793 #ifdef OBJ_ELF
19794 static struct reloc_entry reloc_names[] =
19795 {
19796 { "got", BFD_RELOC_ARM_GOT32 }, { "GOT", BFD_RELOC_ARM_GOT32 },
19797 { "gotoff", BFD_RELOC_ARM_GOTOFF }, { "GOTOFF", BFD_RELOC_ARM_GOTOFF },
19798 { "plt", BFD_RELOC_ARM_PLT32 }, { "PLT", BFD_RELOC_ARM_PLT32 },
19799 { "target1", BFD_RELOC_ARM_TARGET1 }, { "TARGET1", BFD_RELOC_ARM_TARGET1 },
19800 { "target2", BFD_RELOC_ARM_TARGET2 }, { "TARGET2", BFD_RELOC_ARM_TARGET2 },
19801 { "sbrel", BFD_RELOC_ARM_SBREL32 }, { "SBREL", BFD_RELOC_ARM_SBREL32 },
19802 { "tlsgd", BFD_RELOC_ARM_TLS_GD32}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32},
19803 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32},
19804 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32},
19805 { "gottpoff",BFD_RELOC_ARM_TLS_IE32}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32},
19806 { "tpoff", BFD_RELOC_ARM_TLS_LE32}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32},
19807 { "got_prel", BFD_RELOC_ARM_GOT_PREL}, { "GOT_PREL", BFD_RELOC_ARM_GOT_PREL},
19808 { "tlsdesc", BFD_RELOC_ARM_TLS_GOTDESC},
19809 { "TLSDESC", BFD_RELOC_ARM_TLS_GOTDESC},
19810 { "tlscall", BFD_RELOC_ARM_TLS_CALL},
19811 { "TLSCALL", BFD_RELOC_ARM_TLS_CALL},
19812 { "tlsdescseq", BFD_RELOC_ARM_TLS_DESCSEQ},
19813 { "TLSDESCSEQ", BFD_RELOC_ARM_TLS_DESCSEQ},
19814 { "gotfuncdesc", BFD_RELOC_ARM_GOTFUNCDESC },
19815 { "GOTFUNCDESC", BFD_RELOC_ARM_GOTFUNCDESC },
19816 { "gotofffuncdesc", BFD_RELOC_ARM_GOTOFFFUNCDESC },
19817 { "GOTOFFFUNCDESC", BFD_RELOC_ARM_GOTOFFFUNCDESC },
19818 { "funcdesc", BFD_RELOC_ARM_FUNCDESC },
19819 { "FUNCDESC", BFD_RELOC_ARM_FUNCDESC },
19820 { "tlsgd_fdpic", BFD_RELOC_ARM_TLS_GD32_FDPIC }, { "TLSGD_FDPIC", BFD_RELOC_ARM_TLS_GD32_FDPIC },
19821 { "tlsldm_fdpic", BFD_RELOC_ARM_TLS_LDM32_FDPIC }, { "TLSLDM_FDPIC", BFD_RELOC_ARM_TLS_LDM32_FDPIC },
19822 { "gottpoff_fdpic", BFD_RELOC_ARM_TLS_IE32_FDPIC }, { "GOTTPOFF_FDIC", BFD_RELOC_ARM_TLS_IE32_FDPIC },
19823 };
19824 #endif
19825
19826 /* Table of all conditional affixes. 0xF is not defined as a condition code. */
19827 static const struct asm_cond conds[] =
19828 {
19829 {"eq", 0x0},
19830 {"ne", 0x1},
19831 {"cs", 0x2}, {"hs", 0x2},
19832 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
19833 {"mi", 0x4},
19834 {"pl", 0x5},
19835 {"vs", 0x6},
19836 {"vc", 0x7},
19837 {"hi", 0x8},
19838 {"ls", 0x9},
19839 {"ge", 0xa},
19840 {"lt", 0xb},
19841 {"gt", 0xc},
19842 {"le", 0xd},
19843 {"al", 0xe}
19844 };
19845
19846 #define UL_BARRIER(L,U,CODE,FEAT) \
19847 { L, CODE, ARM_FEATURE_CORE_LOW (FEAT) }, \
19848 { U, CODE, ARM_FEATURE_CORE_LOW (FEAT) }
19849
19850 static struct asm_barrier_opt barrier_opt_names[] =
19851 {
19852 UL_BARRIER ("sy", "SY", 0xf, ARM_EXT_BARRIER),
19853 UL_BARRIER ("st", "ST", 0xe, ARM_EXT_BARRIER),
19854 UL_BARRIER ("ld", "LD", 0xd, ARM_EXT_V8),
19855 UL_BARRIER ("ish", "ISH", 0xb, ARM_EXT_BARRIER),
19856 UL_BARRIER ("sh", "SH", 0xb, ARM_EXT_BARRIER),
19857 UL_BARRIER ("ishst", "ISHST", 0xa, ARM_EXT_BARRIER),
19858 UL_BARRIER ("shst", "SHST", 0xa, ARM_EXT_BARRIER),
19859 UL_BARRIER ("ishld", "ISHLD", 0x9, ARM_EXT_V8),
19860 UL_BARRIER ("un", "UN", 0x7, ARM_EXT_BARRIER),
19861 UL_BARRIER ("nsh", "NSH", 0x7, ARM_EXT_BARRIER),
19862 UL_BARRIER ("unst", "UNST", 0x6, ARM_EXT_BARRIER),
19863 UL_BARRIER ("nshst", "NSHST", 0x6, ARM_EXT_BARRIER),
19864 UL_BARRIER ("nshld", "NSHLD", 0x5, ARM_EXT_V8),
19865 UL_BARRIER ("osh", "OSH", 0x3, ARM_EXT_BARRIER),
19866 UL_BARRIER ("oshst", "OSHST", 0x2, ARM_EXT_BARRIER),
19867 UL_BARRIER ("oshld", "OSHLD", 0x1, ARM_EXT_V8)
19868 };
19869
19870 #undef UL_BARRIER
19871
19872 /* Table of ARM-format instructions. */
19873
19874 /* Macros for gluing together operand strings. N.B. In all cases
19875 other than OPS0, the trailing OP_stop comes from default
19876 zero-initialization of the unspecified elements of the array. */
19877 #define OPS0() { OP_stop, }
19878 #define OPS1(a) { OP_##a, }
19879 #define OPS2(a,b) { OP_##a,OP_##b, }
19880 #define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
19881 #define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
19882 #define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
19883 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
19884
19885 /* These macros are similar to the OPSn, but do not prepend the OP_ prefix.
19886 This is useful when mixing operands for ARM and THUMB, i.e. using the
19887 MIX_ARM_THUMB_OPERANDS macro.
19888 In order to use these macros, prefix the number of operands with _
19889 e.g. _3. */
19890 #define OPS_1(a) { a, }
19891 #define OPS_2(a,b) { a,b, }
19892 #define OPS_3(a,b,c) { a,b,c, }
19893 #define OPS_4(a,b,c,d) { a,b,c,d, }
19894 #define OPS_5(a,b,c,d,e) { a,b,c,d,e, }
19895 #define OPS_6(a,b,c,d,e,f) { a,b,c,d,e,f, }
19896
19897 /* These macros abstract out the exact format of the mnemonic table and
19898 save some repeated characters. */
19899
19900 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
19901 #define TxCE(mnem, op, top, nops, ops, ae, te) \
19902 { mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
19903 THUMB_VARIANT, do_##ae, do_##te }
19904
19905 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
19906 a T_MNEM_xyz enumerator. */
19907 #define TCE(mnem, aop, top, nops, ops, ae, te) \
19908 TxCE (mnem, aop, 0x##top, nops, ops, ae, te)
19909 #define tCE(mnem, aop, top, nops, ops, ae, te) \
19910 TxCE (mnem, aop, T_MNEM##top, nops, ops, ae, te)
19911
19912 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
19913 infix after the third character. */
19914 #define TxC3(mnem, op, top, nops, ops, ae, te) \
19915 { mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
19916 THUMB_VARIANT, do_##ae, do_##te }
19917 #define TxC3w(mnem, op, top, nops, ops, ae, te) \
19918 { mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
19919 THUMB_VARIANT, do_##ae, do_##te }
19920 #define TC3(mnem, aop, top, nops, ops, ae, te) \
19921 TxC3 (mnem, aop, 0x##top, nops, ops, ae, te)
19922 #define TC3w(mnem, aop, top, nops, ops, ae, te) \
19923 TxC3w (mnem, aop, 0x##top, nops, ops, ae, te)
19924 #define tC3(mnem, aop, top, nops, ops, ae, te) \
19925 TxC3 (mnem, aop, T_MNEM##top, nops, ops, ae, te)
19926 #define tC3w(mnem, aop, top, nops, ops, ae, te) \
19927 TxC3w (mnem, aop, T_MNEM##top, nops, ops, ae, te)
19928
19929 /* Mnemonic that cannot be conditionalized. The ARM condition-code
19930 field is still 0xE. Many of the Thumb variants can be executed
19931 conditionally, so this is checked separately. */
19932 #define TUE(mnem, op, top, nops, ops, ae, te) \
19933 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
19934 THUMB_VARIANT, do_##ae, do_##te }
19935
19936 /* Same as TUE but the encoding function for ARM and Thumb modes is the same.
19937 Used by mnemonics that have very minimal differences in the encoding for
19938 ARM and Thumb variants and can be handled in a common function. */
19939 #define TUEc(mnem, op, top, nops, ops, en) \
19940 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
19941 THUMB_VARIANT, do_##en, do_##en }
19942
19943 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
19944 condition code field. */
19945 #define TUF(mnem, op, top, nops, ops, ae, te) \
19946 { mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
19947 THUMB_VARIANT, do_##ae, do_##te }
19948
19949 /* ARM-only variants of all the above. */
19950 #define CE(mnem, op, nops, ops, ae) \
19951 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
19952
19953 #define C3(mnem, op, nops, ops, ae) \
19954 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
19955
19956 /* Thumb-only variants of TCE and TUE. */
19957 #define ToC(mnem, top, nops, ops, te) \
19958 { mnem, OPS##nops ops, OT_csuffix, 0x0, 0x##top, 0, THUMB_VARIANT, NULL, \
19959 do_##te }
19960
19961 #define ToU(mnem, top, nops, ops, te) \
19962 { mnem, OPS##nops ops, OT_unconditional, 0x0, 0x##top, 0, THUMB_VARIANT, \
19963 NULL, do_##te }
19964
19965 /* T_MNEM_xyz enumerator variants of ToC. */
19966 #define toC(mnem, top, nops, ops, te) \
19967 { mnem, OPS##nops ops, OT_csuffix, 0x0, T_MNEM##top, 0, THUMB_VARIANT, NULL, \
19968 do_##te }
19969
19970 /* T_MNEM_xyz enumerator variants of ToU. */
19971 #define toU(mnem, top, nops, ops, te) \
19972 { mnem, OPS##nops ops, OT_unconditional, 0x0, T_MNEM##top, 0, THUMB_VARIANT, \
19973 NULL, do_##te }
19974
19975 /* Legacy mnemonics that always have conditional infix after the third
19976 character. */
19977 #define CL(mnem, op, nops, ops, ae) \
19978 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
19979 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
19980
19981 /* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
19982 #define cCE(mnem, op, nops, ops, ae) \
19983 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
19984
19985 /* Legacy coprocessor instructions where conditional infix and conditional
19986 suffix are ambiguous. For consistency this includes all FPA instructions,
19987 not just the potentially ambiguous ones. */
19988 #define cCL(mnem, op, nops, ops, ae) \
19989 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
19990 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
19991
19992 /* Coprocessor, takes either a suffix or a position-3 infix
19993 (for an FPA corner case). */
19994 #define C3E(mnem, op, nops, ops, ae) \
19995 { mnem, OPS##nops ops, OT_csuf_or_in3, \
19996 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
19997
19998 #define xCM_(m1, m2, m3, op, nops, ops, ae) \
19999 { m1 #m2 m3, OPS##nops ops, \
20000 sizeof (#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof (m1) - 1, \
20001 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
20002
20003 #define CM(m1, m2, op, nops, ops, ae) \
20004 xCM_ (m1, , m2, op, nops, ops, ae), \
20005 xCM_ (m1, eq, m2, op, nops, ops, ae), \
20006 xCM_ (m1, ne, m2, op, nops, ops, ae), \
20007 xCM_ (m1, cs, m2, op, nops, ops, ae), \
20008 xCM_ (m1, hs, m2, op, nops, ops, ae), \
20009 xCM_ (m1, cc, m2, op, nops, ops, ae), \
20010 xCM_ (m1, ul, m2, op, nops, ops, ae), \
20011 xCM_ (m1, lo, m2, op, nops, ops, ae), \
20012 xCM_ (m1, mi, m2, op, nops, ops, ae), \
20013 xCM_ (m1, pl, m2, op, nops, ops, ae), \
20014 xCM_ (m1, vs, m2, op, nops, ops, ae), \
20015 xCM_ (m1, vc, m2, op, nops, ops, ae), \
20016 xCM_ (m1, hi, m2, op, nops, ops, ae), \
20017 xCM_ (m1, ls, m2, op, nops, ops, ae), \
20018 xCM_ (m1, ge, m2, op, nops, ops, ae), \
20019 xCM_ (m1, lt, m2, op, nops, ops, ae), \
20020 xCM_ (m1, gt, m2, op, nops, ops, ae), \
20021 xCM_ (m1, le, m2, op, nops, ops, ae), \
20022 xCM_ (m1, al, m2, op, nops, ops, ae)
20023
20024 #define UE(mnem, op, nops, ops, ae) \
20025 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
20026
20027 #define UF(mnem, op, nops, ops, ae) \
20028 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
20029
20030 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
20031 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
20032 use the same encoding function for each. */
20033 #define NUF(mnem, op, nops, ops, enc) \
20034 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
20035 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
20036
20037 /* Neon data processing, version which indirects through neon_enc_tab for
20038 the various overloaded versions of opcodes. */
20039 #define nUF(mnem, op, nops, ops, enc) \
20040 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM##op, N_MNEM##op, \
20041 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
20042
20043 /* Neon insn with conditional suffix for the ARM version, non-overloaded
20044 version. */
20045 #define NCE_tag(mnem, op, nops, ops, enc, tag) \
20046 { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT, \
20047 THUMB_VARIANT, do_##enc, do_##enc }
20048
20049 #define NCE(mnem, op, nops, ops, enc) \
20050 NCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
20051
20052 #define NCEF(mnem, op, nops, ops, enc) \
20053 NCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
20054
20055 /* Neon insn with conditional suffix for the ARM version, overloaded types. */
20056 #define nCE_tag(mnem, op, nops, ops, enc, tag) \
20057 { #mnem, OPS##nops ops, tag, N_MNEM##op, N_MNEM##op, \
20058 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
20059
20060 #define nCE(mnem, op, nops, ops, enc) \
20061 nCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
20062
20063 #define nCEF(mnem, op, nops, ops, enc) \
20064 nCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
20065
20066 #define do_0 0
20067
20068 static const struct asm_opcode insns[] =
20069 {
20070 #define ARM_VARIANT & arm_ext_v1 /* Core ARM Instructions. */
20071 #define THUMB_VARIANT & arm_ext_v4t
20072 tCE("and", 0000000, _and, 3, (RR, oRR, SH), arit, t_arit3c),
20073 tC3("ands", 0100000, _ands, 3, (RR, oRR, SH), arit, t_arit3c),
20074 tCE("eor", 0200000, _eor, 3, (RR, oRR, SH), arit, t_arit3c),
20075 tC3("eors", 0300000, _eors, 3, (RR, oRR, SH), arit, t_arit3c),
20076 tCE("sub", 0400000, _sub, 3, (RR, oRR, SH), arit, t_add_sub),
20077 tC3("subs", 0500000, _subs, 3, (RR, oRR, SH), arit, t_add_sub),
20078 tCE("add", 0800000, _add, 3, (RR, oRR, SHG), arit, t_add_sub),
20079 tC3("adds", 0900000, _adds, 3, (RR, oRR, SHG), arit, t_add_sub),
20080 tCE("adc", 0a00000, _adc, 3, (RR, oRR, SH), arit, t_arit3c),
20081 tC3("adcs", 0b00000, _adcs, 3, (RR, oRR, SH), arit, t_arit3c),
20082 tCE("sbc", 0c00000, _sbc, 3, (RR, oRR, SH), arit, t_arit3),
20083 tC3("sbcs", 0d00000, _sbcs, 3, (RR, oRR, SH), arit, t_arit3),
20084 tCE("orr", 1800000, _orr, 3, (RR, oRR, SH), arit, t_arit3c),
20085 tC3("orrs", 1900000, _orrs, 3, (RR, oRR, SH), arit, t_arit3c),
20086 tCE("bic", 1c00000, _bic, 3, (RR, oRR, SH), arit, t_arit3),
20087 tC3("bics", 1d00000, _bics, 3, (RR, oRR, SH), arit, t_arit3),
20088
20089 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
20090 for setting PSR flag bits. They are obsolete in V6 and do not
20091 have Thumb equivalents. */
20092 tCE("tst", 1100000, _tst, 2, (RR, SH), cmp, t_mvn_tst),
20093 tC3w("tsts", 1100000, _tst, 2, (RR, SH), cmp, t_mvn_tst),
20094 CL("tstp", 110f000, 2, (RR, SH), cmp),
20095 tCE("cmp", 1500000, _cmp, 2, (RR, SH), cmp, t_mov_cmp),
20096 tC3w("cmps", 1500000, _cmp, 2, (RR, SH), cmp, t_mov_cmp),
20097 CL("cmpp", 150f000, 2, (RR, SH), cmp),
20098 tCE("cmn", 1700000, _cmn, 2, (RR, SH), cmp, t_mvn_tst),
20099 tC3w("cmns", 1700000, _cmn, 2, (RR, SH), cmp, t_mvn_tst),
20100 CL("cmnp", 170f000, 2, (RR, SH), cmp),
20101
20102 tCE("mov", 1a00000, _mov, 2, (RR, SH), mov, t_mov_cmp),
20103 tC3("movs", 1b00000, _movs, 2, (RR, SHG), mov, t_mov_cmp),
20104 tCE("mvn", 1e00000, _mvn, 2, (RR, SH), mov, t_mvn_tst),
20105 tC3("mvns", 1f00000, _mvns, 2, (RR, SH), mov, t_mvn_tst),
20106
20107 tCE("ldr", 4100000, _ldr, 2, (RR, ADDRGLDR),ldst, t_ldst),
20108 tC3("ldrb", 4500000, _ldrb, 2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
20109 tCE("str", 4000000, _str, _2, (MIX_ARM_THUMB_OPERANDS (OP_RR,
20110 OP_RRnpc),
20111 OP_ADDRGLDR),ldst, t_ldst),
20112 tC3("strb", 4400000, _strb, 2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
20113
20114 tCE("stm", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
20115 tC3("stmia", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
20116 tC3("stmea", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
20117 tCE("ldm", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
20118 tC3("ldmia", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
20119 tC3("ldmfd", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
20120
20121 tCE("b", a000000, _b, 1, (EXPr), branch, t_branch),
20122 TCE("bl", b000000, f000f800, 1, (EXPr), bl, t_branch23),
20123
20124 /* Pseudo ops. */
20125 tCE("adr", 28f0000, _adr, 2, (RR, EXP), adr, t_adr),
20126 C3(adrl, 28f0000, 2, (RR, EXP), adrl),
20127 tCE("nop", 1a00000, _nop, 1, (oI255c), nop, t_nop),
20128 tCE("udf", 7f000f0, _udf, 1, (oIffffb), bkpt, t_udf),
20129
20130 /* Thumb-compatibility pseudo ops. */
20131 tCE("lsl", 1a00000, _lsl, 3, (RR, oRR, SH), shift, t_shift),
20132 tC3("lsls", 1b00000, _lsls, 3, (RR, oRR, SH), shift, t_shift),
20133 tCE("lsr", 1a00020, _lsr, 3, (RR, oRR, SH), shift, t_shift),
20134 tC3("lsrs", 1b00020, _lsrs, 3, (RR, oRR, SH), shift, t_shift),
20135 tCE("asr", 1a00040, _asr, 3, (RR, oRR, SH), shift, t_shift),
20136 tC3("asrs", 1b00040, _asrs, 3, (RR, oRR, SH), shift, t_shift),
20137 tCE("ror", 1a00060, _ror, 3, (RR, oRR, SH), shift, t_shift),
20138 tC3("rors", 1b00060, _rors, 3, (RR, oRR, SH), shift, t_shift),
20139 tCE("neg", 2600000, _neg, 2, (RR, RR), rd_rn, t_neg),
20140 tC3("negs", 2700000, _negs, 2, (RR, RR), rd_rn, t_neg),
20141 tCE("push", 92d0000, _push, 1, (REGLST), push_pop, t_push_pop),
20142 tCE("pop", 8bd0000, _pop, 1, (REGLST), push_pop, t_push_pop),
20143
20144 /* These may simplify to neg. */
20145 TCE("rsb", 0600000, ebc00000, 3, (RR, oRR, SH), arit, t_rsb),
20146 TC3("rsbs", 0700000, ebd00000, 3, (RR, oRR, SH), arit, t_rsb),
20147
20148 #undef THUMB_VARIANT
20149 #define THUMB_VARIANT & arm_ext_os
20150
20151 TCE("swi", f000000, df00, 1, (EXPi), swi, t_swi),
20152 TCE("svc", f000000, df00, 1, (EXPi), swi, t_swi),
20153
20154 #undef THUMB_VARIANT
20155 #define THUMB_VARIANT & arm_ext_v6
20156
20157 TCE("cpy", 1a00000, 4600, 2, (RR, RR), rd_rm, t_cpy),
20158
20159 /* V1 instructions with no Thumb analogue prior to V6T2. */
20160 #undef THUMB_VARIANT
20161 #define THUMB_VARIANT & arm_ext_v6t2
20162
20163 TCE("teq", 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
20164 TC3w("teqs", 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
20165 CL("teqp", 130f000, 2, (RR, SH), cmp),
20166
20167 TC3("ldrt", 4300000, f8500e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
20168 TC3("ldrbt", 4700000, f8100e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
20169 TC3("strt", 4200000, f8400e00, 2, (RR_npcsp, ADDR), ldstt, t_ldstt),
20170 TC3("strbt", 4600000, f8000e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
20171
20172 TC3("stmdb", 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
20173 TC3("stmfd", 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
20174
20175 TC3("ldmdb", 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
20176 TC3("ldmea", 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
20177
20178 /* V1 instructions with no Thumb analogue at all. */
20179 CE("rsc", 0e00000, 3, (RR, oRR, SH), arit),
20180 C3(rscs, 0f00000, 3, (RR, oRR, SH), arit),
20181
20182 C3(stmib, 9800000, 2, (RRw, REGLST), ldmstm),
20183 C3(stmfa, 9800000, 2, (RRw, REGLST), ldmstm),
20184 C3(stmda, 8000000, 2, (RRw, REGLST), ldmstm),
20185 C3(stmed, 8000000, 2, (RRw, REGLST), ldmstm),
20186 C3(ldmib, 9900000, 2, (RRw, REGLST), ldmstm),
20187 C3(ldmed, 9900000, 2, (RRw, REGLST), ldmstm),
20188 C3(ldmda, 8100000, 2, (RRw, REGLST), ldmstm),
20189 C3(ldmfa, 8100000, 2, (RRw, REGLST), ldmstm),
20190
20191 #undef ARM_VARIANT
20192 #define ARM_VARIANT & arm_ext_v2 /* ARM 2 - multiplies. */
20193 #undef THUMB_VARIANT
20194 #define THUMB_VARIANT & arm_ext_v4t
20195
20196 tCE("mul", 0000090, _mul, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
20197 tC3("muls", 0100090, _muls, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
20198
20199 #undef THUMB_VARIANT
20200 #define THUMB_VARIANT & arm_ext_v6t2
20201
20202 TCE("mla", 0200090, fb000000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
20203 C3(mlas, 0300090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas),
20204
20205 /* Generic coprocessor instructions. */
20206 TCE("cdp", e000000, ee000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
20207 TCE("ldc", c100000, ec100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
20208 TC3("ldcl", c500000, ec500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
20209 TCE("stc", c000000, ec000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
20210 TC3("stcl", c400000, ec400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
20211 TCE("mcr", e000010, ee000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
20212 TCE("mrc", e100010, ee100010, 6, (RCP, I7b, APSR_RR, RCN, RCN, oI7b), co_reg, co_reg),
20213
20214 #undef ARM_VARIANT
20215 #define ARM_VARIANT & arm_ext_v2s /* ARM 3 - swp instructions. */
20216
20217 CE("swp", 1000090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
20218 C3(swpb, 1400090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
20219
20220 #undef ARM_VARIANT
20221 #define ARM_VARIANT & arm_ext_v3 /* ARM 6 Status register instructions. */
20222 #undef THUMB_VARIANT
20223 #define THUMB_VARIANT & arm_ext_msr
20224
20225 TCE("mrs", 1000000, f3e08000, 2, (RRnpc, rPSR), mrs, t_mrs),
20226 TCE("msr", 120f000, f3808000, 2, (wPSR, RR_EXi), msr, t_msr),
20227
20228 #undef ARM_VARIANT
20229 #define ARM_VARIANT & arm_ext_v3m /* ARM 7M long multiplies. */
20230 #undef THUMB_VARIANT
20231 #define THUMB_VARIANT & arm_ext_v6t2
20232
20233 TCE("smull", 0c00090, fb800000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
20234 CM("smull","s", 0d00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
20235 TCE("umull", 0800090, fba00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
20236 CM("umull","s", 0900090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
20237 TCE("smlal", 0e00090, fbc00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
20238 CM("smlal","s", 0f00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
20239 TCE("umlal", 0a00090, fbe00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
20240 CM("umlal","s", 0b00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
20241
20242 #undef ARM_VARIANT
20243 #define ARM_VARIANT & arm_ext_v4 /* ARM Architecture 4. */
20244 #undef THUMB_VARIANT
20245 #define THUMB_VARIANT & arm_ext_v4t
20246
20247 tC3("ldrh", 01000b0, _ldrh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
20248 tC3("strh", 00000b0, _strh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
20249 tC3("ldrsh", 01000f0, _ldrsh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
20250 tC3("ldrsb", 01000d0, _ldrsb, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
20251 tC3("ldsh", 01000f0, _ldrsh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
20252 tC3("ldsb", 01000d0, _ldrsb, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
20253
20254 #undef ARM_VARIANT
20255 #define ARM_VARIANT & arm_ext_v4t_5
20256
20257 /* ARM Architecture 4T. */
20258 /* Note: bx (and blx) are required on V5, even if the processor does
20259 not support Thumb. */
20260 TCE("bx", 12fff10, 4700, 1, (RR), bx, t_bx),
20261
20262 #undef ARM_VARIANT
20263 #define ARM_VARIANT & arm_ext_v5 /* ARM Architecture 5T. */
20264 #undef THUMB_VARIANT
20265 #define THUMB_VARIANT & arm_ext_v5t
20266
20267 /* Note: blx has 2 variants; the .value coded here is for
20268 BLX(2). Only this variant has conditional execution. */
20269 TCE("blx", 12fff30, 4780, 1, (RR_EXr), blx, t_blx),
20270 TUE("bkpt", 1200070, be00, 1, (oIffffb), bkpt, t_bkpt),
20271
20272 #undef THUMB_VARIANT
20273 #define THUMB_VARIANT & arm_ext_v6t2
20274
20275 TCE("clz", 16f0f10, fab0f080, 2, (RRnpc, RRnpc), rd_rm, t_clz),
20276 TUF("ldc2", c100000, fc100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
20277 TUF("ldc2l", c500000, fc500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
20278 TUF("stc2", c000000, fc000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
20279 TUF("stc2l", c400000, fc400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
20280 TUF("cdp2", e000000, fe000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
20281 TUF("mcr2", e000010, fe000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
20282 TUF("mrc2", e100010, fe100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
20283
20284 #undef ARM_VARIANT
20285 #define ARM_VARIANT & arm_ext_v5exp /* ARM Architecture 5TExP. */
20286 #undef THUMB_VARIANT
20287 #define THUMB_VARIANT & arm_ext_v5exp
20288
20289 TCE("smlabb", 1000080, fb100000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
20290 TCE("smlatb", 10000a0, fb100020, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
20291 TCE("smlabt", 10000c0, fb100010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
20292 TCE("smlatt", 10000e0, fb100030, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
20293
20294 TCE("smlawb", 1200080, fb300000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
20295 TCE("smlawt", 12000c0, fb300010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
20296
20297 TCE("smlalbb", 1400080, fbc00080, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
20298 TCE("smlaltb", 14000a0, fbc000a0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
20299 TCE("smlalbt", 14000c0, fbc00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
20300 TCE("smlaltt", 14000e0, fbc000b0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
20301
20302 TCE("smulbb", 1600080, fb10f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
20303 TCE("smultb", 16000a0, fb10f020, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
20304 TCE("smulbt", 16000c0, fb10f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
20305 TCE("smultt", 16000e0, fb10f030, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
20306
20307 TCE("smulwb", 12000a0, fb30f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
20308 TCE("smulwt", 12000e0, fb30f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
20309
20310 TCE("qadd", 1000050, fa80f080, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
20311 TCE("qdadd", 1400050, fa80f090, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
20312 TCE("qsub", 1200050, fa80f0a0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
20313 TCE("qdsub", 1600050, fa80f0b0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
20314
20315 #undef ARM_VARIANT
20316 #define ARM_VARIANT & arm_ext_v5e /* ARM Architecture 5TE. */
20317 #undef THUMB_VARIANT
20318 #define THUMB_VARIANT & arm_ext_v6t2
20319
20320 TUF("pld", 450f000, f810f000, 1, (ADDR), pld, t_pld),
20321 TC3("ldrd", 00000d0, e8500000, 3, (RRnpc_npcsp, oRRnpc_npcsp, ADDRGLDRS),
20322 ldrd, t_ldstd),
20323 TC3("strd", 00000f0, e8400000, 3, (RRnpc_npcsp, oRRnpc_npcsp,
20324 ADDRGLDRS), ldrd, t_ldstd),
20325
20326 TCE("mcrr", c400000, ec400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
20327 TCE("mrrc", c500000, ec500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
20328
20329 #undef ARM_VARIANT
20330 #define ARM_VARIANT & arm_ext_v5j /* ARM Architecture 5TEJ. */
20331
20332 TCE("bxj", 12fff20, f3c08f00, 1, (RR), bxj, t_bxj),
20333
20334 #undef ARM_VARIANT
20335 #define ARM_VARIANT & arm_ext_v6 /* ARM V6. */
20336 #undef THUMB_VARIANT
20337 #define THUMB_VARIANT & arm_ext_v6
20338
20339 TUF("cpsie", 1080000, b660, 2, (CPSF, oI31b), cpsi, t_cpsi),
20340 TUF("cpsid", 10c0000, b670, 2, (CPSF, oI31b), cpsi, t_cpsi),
20341 tCE("rev", 6bf0f30, _rev, 2, (RRnpc, RRnpc), rd_rm, t_rev),
20342 tCE("rev16", 6bf0fb0, _rev16, 2, (RRnpc, RRnpc), rd_rm, t_rev),
20343 tCE("revsh", 6ff0fb0, _revsh, 2, (RRnpc, RRnpc), rd_rm, t_rev),
20344 tCE("sxth", 6bf0070, _sxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
20345 tCE("uxth", 6ff0070, _uxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
20346 tCE("sxtb", 6af0070, _sxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
20347 tCE("uxtb", 6ef0070, _uxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
20348 TUF("setend", 1010000, b650, 1, (ENDI), setend, t_setend),
20349
20350 #undef THUMB_VARIANT
20351 #define THUMB_VARIANT & arm_ext_v6t2_v8m
20352
20353 TCE("ldrex", 1900f9f, e8500f00, 2, (RRnpc_npcsp, ADDR), ldrex, t_ldrex),
20354 TCE("strex", 1800f90, e8400000, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
20355 strex, t_strex),
20356 #undef THUMB_VARIANT
20357 #define THUMB_VARIANT & arm_ext_v6t2
20358
20359 TUF("mcrr2", c400000, fc400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
20360 TUF("mrrc2", c500000, fc500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
20361
20362 TCE("ssat", 6a00010, f3000000, 4, (RRnpc, I32, RRnpc, oSHllar),ssat, t_ssat),
20363 TCE("usat", 6e00010, f3800000, 4, (RRnpc, I31, RRnpc, oSHllar),usat, t_usat),
20364
20365 /* ARM V6 not included in V7M. */
20366 #undef THUMB_VARIANT
20367 #define THUMB_VARIANT & arm_ext_v6_notm
20368 TUF("rfeia", 8900a00, e990c000, 1, (RRw), rfe, rfe),
20369 TUF("rfe", 8900a00, e990c000, 1, (RRw), rfe, rfe),
20370 UF(rfeib, 9900a00, 1, (RRw), rfe),
20371 UF(rfeda, 8100a00, 1, (RRw), rfe),
20372 TUF("rfedb", 9100a00, e810c000, 1, (RRw), rfe, rfe),
20373 TUF("rfefd", 8900a00, e990c000, 1, (RRw), rfe, rfe),
20374 UF(rfefa, 8100a00, 1, (RRw), rfe),
20375 TUF("rfeea", 9100a00, e810c000, 1, (RRw), rfe, rfe),
20376 UF(rfeed, 9900a00, 1, (RRw), rfe),
20377 TUF("srsia", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
20378 TUF("srs", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
20379 TUF("srsea", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
20380 UF(srsib, 9c00500, 2, (oRRw, I31w), srs),
20381 UF(srsfa, 9c00500, 2, (oRRw, I31w), srs),
20382 UF(srsda, 8400500, 2, (oRRw, I31w), srs),
20383 UF(srsed, 8400500, 2, (oRRw, I31w), srs),
20384 TUF("srsdb", 9400500, e800c000, 2, (oRRw, I31w), srs, srs),
20385 TUF("srsfd", 9400500, e800c000, 2, (oRRw, I31w), srs, srs),
20386 TUF("cps", 1020000, f3af8100, 1, (I31b), imm0, t_cps),
20387
20388 /* ARM V6 not included in V7M (eg. integer SIMD). */
20389 #undef THUMB_VARIANT
20390 #define THUMB_VARIANT & arm_ext_v6_dsp
20391 TCE("pkhbt", 6800010, eac00000, 4, (RRnpc, RRnpc, RRnpc, oSHll), pkhbt, t_pkhbt),
20392 TCE("pkhtb", 6800050, eac00020, 4, (RRnpc, RRnpc, RRnpc, oSHar), pkhtb, t_pkhtb),
20393 TCE("qadd16", 6200f10, fa90f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20394 TCE("qadd8", 6200f90, fa80f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20395 TCE("qasx", 6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20396 /* Old name for QASX. */
20397 TCE("qaddsubx",6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20398 TCE("qsax", 6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20399 /* Old name for QSAX. */
20400 TCE("qsubaddx",6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20401 TCE("qsub16", 6200f70, fad0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20402 TCE("qsub8", 6200ff0, fac0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20403 TCE("sadd16", 6100f10, fa90f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20404 TCE("sadd8", 6100f90, fa80f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20405 TCE("sasx", 6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20406 /* Old name for SASX. */
20407 TCE("saddsubx",6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20408 TCE("shadd16", 6300f10, fa90f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20409 TCE("shadd8", 6300f90, fa80f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20410 TCE("shasx", 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20411 /* Old name for SHASX. */
20412 TCE("shaddsubx", 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20413 TCE("shsax", 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20414 /* Old name for SHSAX. */
20415 TCE("shsubaddx", 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20416 TCE("shsub16", 6300f70, fad0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20417 TCE("shsub8", 6300ff0, fac0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20418 TCE("ssax", 6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20419 /* Old name for SSAX. */
20420 TCE("ssubaddx",6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20421 TCE("ssub16", 6100f70, fad0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20422 TCE("ssub8", 6100ff0, fac0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20423 TCE("uadd16", 6500f10, fa90f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20424 TCE("uadd8", 6500f90, fa80f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20425 TCE("uasx", 6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20426 /* Old name for UASX. */
20427 TCE("uaddsubx",6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20428 TCE("uhadd16", 6700f10, fa90f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20429 TCE("uhadd8", 6700f90, fa80f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20430 TCE("uhasx", 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20431 /* Old name for UHASX. */
20432 TCE("uhaddsubx", 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20433 TCE("uhsax", 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20434 /* Old name for UHSAX. */
20435 TCE("uhsubaddx", 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20436 TCE("uhsub16", 6700f70, fad0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20437 TCE("uhsub8", 6700ff0, fac0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20438 TCE("uqadd16", 6600f10, fa90f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20439 TCE("uqadd8", 6600f90, fa80f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20440 TCE("uqasx", 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20441 /* Old name for UQASX. */
20442 TCE("uqaddsubx", 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20443 TCE("uqsax", 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20444 /* Old name for UQSAX. */
20445 TCE("uqsubaddx", 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20446 TCE("uqsub16", 6600f70, fad0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20447 TCE("uqsub8", 6600ff0, fac0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20448 TCE("usub16", 6500f70, fad0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20449 TCE("usax", 6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20450 /* Old name for USAX. */
20451 TCE("usubaddx",6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20452 TCE("usub8", 6500ff0, fac0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20453 TCE("sxtah", 6b00070, fa00f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
20454 TCE("sxtab16", 6800070, fa20f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
20455 TCE("sxtab", 6a00070, fa40f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
20456 TCE("sxtb16", 68f0070, fa2ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
20457 TCE("uxtah", 6f00070, fa10f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
20458 TCE("uxtab16", 6c00070, fa30f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
20459 TCE("uxtab", 6e00070, fa50f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
20460 TCE("uxtb16", 6cf0070, fa3ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
20461 TCE("sel", 6800fb0, faa0f080, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
20462 TCE("smlad", 7000010, fb200000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
20463 TCE("smladx", 7000030, fb200010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
20464 TCE("smlald", 7400010, fbc000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
20465 TCE("smlaldx", 7400030, fbc000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
20466 TCE("smlsd", 7000050, fb400000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
20467 TCE("smlsdx", 7000070, fb400010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
20468 TCE("smlsld", 7400050, fbd000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
20469 TCE("smlsldx", 7400070, fbd000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
20470 TCE("smmla", 7500010, fb500000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
20471 TCE("smmlar", 7500030, fb500010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
20472 TCE("smmls", 75000d0, fb600000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
20473 TCE("smmlsr", 75000f0, fb600010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
20474 TCE("smmul", 750f010, fb50f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
20475 TCE("smmulr", 750f030, fb50f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
20476 TCE("smuad", 700f010, fb20f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
20477 TCE("smuadx", 700f030, fb20f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
20478 TCE("smusd", 700f050, fb40f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
20479 TCE("smusdx", 700f070, fb40f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
20480 TCE("ssat16", 6a00f30, f3200000, 3, (RRnpc, I16, RRnpc), ssat16, t_ssat16),
20481 TCE("umaal", 0400090, fbe00060, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal, t_mlal),
20482 TCE("usad8", 780f010, fb70f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
20483 TCE("usada8", 7800010, fb700000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
20484 TCE("usat16", 6e00f30, f3a00000, 3, (RRnpc, I15, RRnpc), usat16, t_usat16),
20485
20486 #undef ARM_VARIANT
20487 #define ARM_VARIANT & arm_ext_v6k_v6t2
20488 #undef THUMB_VARIANT
20489 #define THUMB_VARIANT & arm_ext_v6k_v6t2
20490
20491 tCE("yield", 320f001, _yield, 0, (), noargs, t_hint),
20492 tCE("wfe", 320f002, _wfe, 0, (), noargs, t_hint),
20493 tCE("wfi", 320f003, _wfi, 0, (), noargs, t_hint),
20494 tCE("sev", 320f004, _sev, 0, (), noargs, t_hint),
20495
20496 #undef THUMB_VARIANT
20497 #define THUMB_VARIANT & arm_ext_v6_notm
20498 TCE("ldrexd", 1b00f9f, e8d0007f, 3, (RRnpc_npcsp, oRRnpc_npcsp, RRnpcb),
20499 ldrexd, t_ldrexd),
20500 TCE("strexd", 1a00f90, e8c00070, 4, (RRnpc_npcsp, RRnpc_npcsp, oRRnpc_npcsp,
20501 RRnpcb), strexd, t_strexd),
20502
20503 #undef THUMB_VARIANT
20504 #define THUMB_VARIANT & arm_ext_v6t2_v8m
20505 TCE("ldrexb", 1d00f9f, e8d00f4f, 2, (RRnpc_npcsp,RRnpcb),
20506 rd_rn, rd_rn),
20507 TCE("ldrexh", 1f00f9f, e8d00f5f, 2, (RRnpc_npcsp, RRnpcb),
20508 rd_rn, rd_rn),
20509 TCE("strexb", 1c00f90, e8c00f40, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
20510 strex, t_strexbh),
20511 TCE("strexh", 1e00f90, e8c00f50, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
20512 strex, t_strexbh),
20513 TUF("clrex", 57ff01f, f3bf8f2f, 0, (), noargs, noargs),
20514
20515 #undef ARM_VARIANT
20516 #define ARM_VARIANT & arm_ext_sec
20517 #undef THUMB_VARIANT
20518 #define THUMB_VARIANT & arm_ext_sec
20519
20520 TCE("smc", 1600070, f7f08000, 1, (EXPi), smc, t_smc),
20521
20522 #undef ARM_VARIANT
20523 #define ARM_VARIANT & arm_ext_virt
20524 #undef THUMB_VARIANT
20525 #define THUMB_VARIANT & arm_ext_virt
20526
20527 TCE("hvc", 1400070, f7e08000, 1, (EXPi), hvc, t_hvc),
20528 TCE("eret", 160006e, f3de8f00, 0, (), noargs, noargs),
20529
20530 #undef ARM_VARIANT
20531 #define ARM_VARIANT & arm_ext_pan
20532 #undef THUMB_VARIANT
20533 #define THUMB_VARIANT & arm_ext_pan
20534
20535 TUF("setpan", 1100000, b610, 1, (I7), setpan, t_setpan),
20536
20537 #undef ARM_VARIANT
20538 #define ARM_VARIANT & arm_ext_v6t2
20539 #undef THUMB_VARIANT
20540 #define THUMB_VARIANT & arm_ext_v6t2
20541
20542 TCE("bfc", 7c0001f, f36f0000, 3, (RRnpc, I31, I32), bfc, t_bfc),
20543 TCE("bfi", 7c00010, f3600000, 4, (RRnpc, RRnpc_I0, I31, I32), bfi, t_bfi),
20544 TCE("sbfx", 7a00050, f3400000, 4, (RR, RR, I31, I32), bfx, t_bfx),
20545 TCE("ubfx", 7e00050, f3c00000, 4, (RR, RR, I31, I32), bfx, t_bfx),
20546
20547 TCE("mls", 0600090, fb000010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
20548 TCE("rbit", 6ff0f30, fa90f0a0, 2, (RR, RR), rd_rm, t_rbit),
20549
20550 TC3("ldrht", 03000b0, f8300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
20551 TC3("ldrsht", 03000f0, f9300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
20552 TC3("ldrsbt", 03000d0, f9100e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
20553 TC3("strht", 02000b0, f8200e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
20554
20555 #undef ARM_VARIANT
20556 #define ARM_VARIANT & arm_ext_v3
20557 #undef THUMB_VARIANT
20558 #define THUMB_VARIANT & arm_ext_v6t2
20559
20560 TUE("csdb", 320f014, f3af8014, 0, (), noargs, t_csdb),
20561 TUF("ssbb", 57ff040, f3bf8f40, 0, (), noargs, t_csdb),
20562 TUF("pssbb", 57ff044, f3bf8f44, 0, (), noargs, t_csdb),
20563
20564 #undef ARM_VARIANT
20565 #define ARM_VARIANT & arm_ext_v6t2
20566 #undef THUMB_VARIANT
20567 #define THUMB_VARIANT & arm_ext_v6t2_v8m
20568 TCE("movw", 3000000, f2400000, 2, (RRnpc, HALF), mov16, t_mov16),
20569 TCE("movt", 3400000, f2c00000, 2, (RRnpc, HALF), mov16, t_mov16),
20570
20571 /* Thumb-only instructions. */
20572 #undef ARM_VARIANT
20573 #define ARM_VARIANT NULL
20574 TUE("cbnz", 0, b900, 2, (RR, EXP), 0, t_cbz),
20575 TUE("cbz", 0, b100, 2, (RR, EXP), 0, t_cbz),
20576
20577 /* ARM does not really have an IT instruction, so always allow it.
20578 The opcode is copied from Thumb in order to allow warnings in
20579 -mimplicit-it=[never | arm] modes. */
20580 #undef ARM_VARIANT
20581 #define ARM_VARIANT & arm_ext_v1
20582 #undef THUMB_VARIANT
20583 #define THUMB_VARIANT & arm_ext_v6t2
20584
20585 TUE("it", bf08, bf08, 1, (COND), it, t_it),
20586 TUE("itt", bf0c, bf0c, 1, (COND), it, t_it),
20587 TUE("ite", bf04, bf04, 1, (COND), it, t_it),
20588 TUE("ittt", bf0e, bf0e, 1, (COND), it, t_it),
20589 TUE("itet", bf06, bf06, 1, (COND), it, t_it),
20590 TUE("itte", bf0a, bf0a, 1, (COND), it, t_it),
20591 TUE("itee", bf02, bf02, 1, (COND), it, t_it),
20592 TUE("itttt", bf0f, bf0f, 1, (COND), it, t_it),
20593 TUE("itett", bf07, bf07, 1, (COND), it, t_it),
20594 TUE("ittet", bf0b, bf0b, 1, (COND), it, t_it),
20595 TUE("iteet", bf03, bf03, 1, (COND), it, t_it),
20596 TUE("ittte", bf0d, bf0d, 1, (COND), it, t_it),
20597 TUE("itete", bf05, bf05, 1, (COND), it, t_it),
20598 TUE("ittee", bf09, bf09, 1, (COND), it, t_it),
20599 TUE("iteee", bf01, bf01, 1, (COND), it, t_it),
20600 /* ARM/Thumb-2 instructions with no Thumb-1 equivalent. */
20601 TC3("rrx", 01a00060, ea4f0030, 2, (RR, RR), rd_rm, t_rrx),
20602 TC3("rrxs", 01b00060, ea5f0030, 2, (RR, RR), rd_rm, t_rrx),
20603
20604 /* Thumb2 only instructions. */
20605 #undef ARM_VARIANT
20606 #define ARM_VARIANT NULL
20607
20608 TCE("addw", 0, f2000000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
20609 TCE("subw", 0, f2a00000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
20610 TCE("orn", 0, ea600000, 3, (RR, oRR, SH), 0, t_orn),
20611 TCE("orns", 0, ea700000, 3, (RR, oRR, SH), 0, t_orn),
20612 TCE("tbb", 0, e8d0f000, 1, (TB), 0, t_tb),
20613 TCE("tbh", 0, e8d0f010, 1, (TB), 0, t_tb),
20614
20615 /* Hardware division instructions. */
20616 #undef ARM_VARIANT
20617 #define ARM_VARIANT & arm_ext_adiv
20618 #undef THUMB_VARIANT
20619 #define THUMB_VARIANT & arm_ext_div
20620
20621 TCE("sdiv", 710f010, fb90f0f0, 3, (RR, oRR, RR), div, t_div),
20622 TCE("udiv", 730f010, fbb0f0f0, 3, (RR, oRR, RR), div, t_div),
20623
20624 /* ARM V6M/V7 instructions. */
20625 #undef ARM_VARIANT
20626 #define ARM_VARIANT & arm_ext_barrier
20627 #undef THUMB_VARIANT
20628 #define THUMB_VARIANT & arm_ext_barrier
20629
20630 TUF("dmb", 57ff050, f3bf8f50, 1, (oBARRIER_I15), barrier, barrier),
20631 TUF("dsb", 57ff040, f3bf8f40, 1, (oBARRIER_I15), barrier, barrier),
20632 TUF("isb", 57ff060, f3bf8f60, 1, (oBARRIER_I15), barrier, barrier),
20633
20634 /* ARM V7 instructions. */
20635 #undef ARM_VARIANT
20636 #define ARM_VARIANT & arm_ext_v7
20637 #undef THUMB_VARIANT
20638 #define THUMB_VARIANT & arm_ext_v7
20639
20640 TUF("pli", 450f000, f910f000, 1, (ADDR), pli, t_pld),
20641 TCE("dbg", 320f0f0, f3af80f0, 1, (I15), dbg, t_dbg),
20642
20643 #undef ARM_VARIANT
20644 #define ARM_VARIANT & arm_ext_mp
20645 #undef THUMB_VARIANT
20646 #define THUMB_VARIANT & arm_ext_mp
20647
20648 TUF("pldw", 410f000, f830f000, 1, (ADDR), pld, t_pld),
20649
20650 /* AArchv8 instructions. */
20651 #undef ARM_VARIANT
20652 #define ARM_VARIANT & arm_ext_v8
20653
20654 /* Instructions shared between armv8-a and armv8-m. */
20655 #undef THUMB_VARIANT
20656 #define THUMB_VARIANT & arm_ext_atomics
20657
20658 TCE("lda", 1900c9f, e8d00faf, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
20659 TCE("ldab", 1d00c9f, e8d00f8f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
20660 TCE("ldah", 1f00c9f, e8d00f9f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
20661 TCE("stl", 180fc90, e8c00faf, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
20662 TCE("stlb", 1c0fc90, e8c00f8f, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
20663 TCE("stlh", 1e0fc90, e8c00f9f, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
20664 TCE("ldaex", 1900e9f, e8d00fef, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
20665 TCE("ldaexb", 1d00e9f, e8d00fcf, 2, (RRnpc,RRnpcb), rd_rn, rd_rn),
20666 TCE("ldaexh", 1f00e9f, e8d00fdf, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
20667 TCE("stlex", 1800e90, e8c00fe0, 3, (RRnpc, RRnpc, RRnpcb),
20668 stlex, t_stlex),
20669 TCE("stlexb", 1c00e90, e8c00fc0, 3, (RRnpc, RRnpc, RRnpcb),
20670 stlex, t_stlex),
20671 TCE("stlexh", 1e00e90, e8c00fd0, 3, (RRnpc, RRnpc, RRnpcb),
20672 stlex, t_stlex),
20673 #undef THUMB_VARIANT
20674 #define THUMB_VARIANT & arm_ext_v8
20675
20676 tCE("sevl", 320f005, _sevl, 0, (), noargs, t_hint),
20677 TCE("ldaexd", 1b00e9f, e8d000ff, 3, (RRnpc, oRRnpc, RRnpcb),
20678 ldrexd, t_ldrexd),
20679 TCE("stlexd", 1a00e90, e8c000f0, 4, (RRnpc, RRnpc, oRRnpc, RRnpcb),
20680 strexd, t_strexd),
20681
20682 /* Defined in V8 but is in undefined encoding space for earlier
20683 architectures. However earlier architectures are required to treat
20684 this instuction as a semihosting trap as well. Hence while not explicitly
20685 defined as such, it is in fact correct to define the instruction for all
20686 architectures. */
20687 #undef THUMB_VARIANT
20688 #define THUMB_VARIANT & arm_ext_v1
20689 #undef ARM_VARIANT
20690 #define ARM_VARIANT & arm_ext_v1
20691 TUE("hlt", 1000070, ba80, 1, (oIffffb), bkpt, t_hlt),
20692
20693 /* ARMv8 T32 only. */
20694 #undef ARM_VARIANT
20695 #define ARM_VARIANT NULL
20696 TUF("dcps1", 0, f78f8001, 0, (), noargs, noargs),
20697 TUF("dcps2", 0, f78f8002, 0, (), noargs, noargs),
20698 TUF("dcps3", 0, f78f8003, 0, (), noargs, noargs),
20699
20700 /* FP for ARMv8. */
20701 #undef ARM_VARIANT
20702 #define ARM_VARIANT & fpu_vfp_ext_armv8xd
20703 #undef THUMB_VARIANT
20704 #define THUMB_VARIANT & fpu_vfp_ext_armv8xd
20705
20706 nUF(vseleq, _vseleq, 3, (RVSD, RVSD, RVSD), vsel),
20707 nUF(vselvs, _vselvs, 3, (RVSD, RVSD, RVSD), vsel),
20708 nUF(vselge, _vselge, 3, (RVSD, RVSD, RVSD), vsel),
20709 nUF(vselgt, _vselgt, 3, (RVSD, RVSD, RVSD), vsel),
20710 nUF(vmaxnm, _vmaxnm, 3, (RNSDQ, oRNSDQ, RNSDQ), vmaxnm),
20711 nUF(vminnm, _vminnm, 3, (RNSDQ, oRNSDQ, RNSDQ), vmaxnm),
20712 nUF(vcvta, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvta),
20713 nUF(vcvtn, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvtn),
20714 nUF(vcvtp, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvtp),
20715 nUF(vcvtm, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvtm),
20716 nCE(vrintr, _vrintr, 2, (RNSDQ, oRNSDQ), vrintr),
20717 nCE(vrintz, _vrintr, 2, (RNSDQ, oRNSDQ), vrintz),
20718 nCE(vrintx, _vrintr, 2, (RNSDQ, oRNSDQ), vrintx),
20719 nUF(vrinta, _vrinta, 2, (RNSDQ, oRNSDQ), vrinta),
20720 nUF(vrintn, _vrinta, 2, (RNSDQ, oRNSDQ), vrintn),
20721 nUF(vrintp, _vrinta, 2, (RNSDQ, oRNSDQ), vrintp),
20722 nUF(vrintm, _vrinta, 2, (RNSDQ, oRNSDQ), vrintm),
20723
20724 /* Crypto v1 extensions. */
20725 #undef ARM_VARIANT
20726 #define ARM_VARIANT & fpu_crypto_ext_armv8
20727 #undef THUMB_VARIANT
20728 #define THUMB_VARIANT & fpu_crypto_ext_armv8
20729
20730 nUF(aese, _aes, 2, (RNQ, RNQ), aese),
20731 nUF(aesd, _aes, 2, (RNQ, RNQ), aesd),
20732 nUF(aesmc, _aes, 2, (RNQ, RNQ), aesmc),
20733 nUF(aesimc, _aes, 2, (RNQ, RNQ), aesimc),
20734 nUF(sha1c, _sha3op, 3, (RNQ, RNQ, RNQ), sha1c),
20735 nUF(sha1p, _sha3op, 3, (RNQ, RNQ, RNQ), sha1p),
20736 nUF(sha1m, _sha3op, 3, (RNQ, RNQ, RNQ), sha1m),
20737 nUF(sha1su0, _sha3op, 3, (RNQ, RNQ, RNQ), sha1su0),
20738 nUF(sha256h, _sha3op, 3, (RNQ, RNQ, RNQ), sha256h),
20739 nUF(sha256h2, _sha3op, 3, (RNQ, RNQ, RNQ), sha256h2),
20740 nUF(sha256su1, _sha3op, 3, (RNQ, RNQ, RNQ), sha256su1),
20741 nUF(sha1h, _sha1h, 2, (RNQ, RNQ), sha1h),
20742 nUF(sha1su1, _sha2op, 2, (RNQ, RNQ), sha1su1),
20743 nUF(sha256su0, _sha2op, 2, (RNQ, RNQ), sha256su0),
20744
20745 #undef ARM_VARIANT
20746 #define ARM_VARIANT & crc_ext_armv8
20747 #undef THUMB_VARIANT
20748 #define THUMB_VARIANT & crc_ext_armv8
20749 TUEc("crc32b", 1000040, fac0f080, 3, (RR, oRR, RR), crc32b),
20750 TUEc("crc32h", 1200040, fac0f090, 3, (RR, oRR, RR), crc32h),
20751 TUEc("crc32w", 1400040, fac0f0a0, 3, (RR, oRR, RR), crc32w),
20752 TUEc("crc32cb",1000240, fad0f080, 3, (RR, oRR, RR), crc32cb),
20753 TUEc("crc32ch",1200240, fad0f090, 3, (RR, oRR, RR), crc32ch),
20754 TUEc("crc32cw",1400240, fad0f0a0, 3, (RR, oRR, RR), crc32cw),
20755
20756 /* ARMv8.2 RAS extension. */
20757 #undef ARM_VARIANT
20758 #define ARM_VARIANT & arm_ext_ras
20759 #undef THUMB_VARIANT
20760 #define THUMB_VARIANT & arm_ext_ras
20761 TUE ("esb", 320f010, f3af8010, 0, (), noargs, noargs),
20762
20763 #undef ARM_VARIANT
20764 #define ARM_VARIANT & arm_ext_v8_3
20765 #undef THUMB_VARIANT
20766 #define THUMB_VARIANT & arm_ext_v8_3
20767 NCE (vjcvt, eb90bc0, 2, (RVS, RVD), vjcvt),
20768 NUF (vcmla, 0, 4, (RNDQ, RNDQ, RNDQ_RNSC, EXPi), vcmla),
20769 NUF (vcadd, 0, 4, (RNDQ, RNDQ, RNDQ, EXPi), vcadd),
20770
20771 #undef ARM_VARIANT
20772 #define ARM_VARIANT & fpu_neon_ext_dotprod
20773 #undef THUMB_VARIANT
20774 #define THUMB_VARIANT & fpu_neon_ext_dotprod
20775 NUF (vsdot, d00, 3, (RNDQ, RNDQ, RNDQ_RNSC), neon_dotproduct_s),
20776 NUF (vudot, d00, 3, (RNDQ, RNDQ, RNDQ_RNSC), neon_dotproduct_u),
20777
20778 #undef ARM_VARIANT
20779 #define ARM_VARIANT & fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
20780 #undef THUMB_VARIANT
20781 #define THUMB_VARIANT NULL
20782
20783 cCE("wfs", e200110, 1, (RR), rd),
20784 cCE("rfs", e300110, 1, (RR), rd),
20785 cCE("wfc", e400110, 1, (RR), rd),
20786 cCE("rfc", e500110, 1, (RR), rd),
20787
20788 cCL("ldfs", c100100, 2, (RF, ADDRGLDC), rd_cpaddr),
20789 cCL("ldfd", c108100, 2, (RF, ADDRGLDC), rd_cpaddr),
20790 cCL("ldfe", c500100, 2, (RF, ADDRGLDC), rd_cpaddr),
20791 cCL("ldfp", c508100, 2, (RF, ADDRGLDC), rd_cpaddr),
20792
20793 cCL("stfs", c000100, 2, (RF, ADDRGLDC), rd_cpaddr),
20794 cCL("stfd", c008100, 2, (RF, ADDRGLDC), rd_cpaddr),
20795 cCL("stfe", c400100, 2, (RF, ADDRGLDC), rd_cpaddr),
20796 cCL("stfp", c408100, 2, (RF, ADDRGLDC), rd_cpaddr),
20797
20798 cCL("mvfs", e008100, 2, (RF, RF_IF), rd_rm),
20799 cCL("mvfsp", e008120, 2, (RF, RF_IF), rd_rm),
20800 cCL("mvfsm", e008140, 2, (RF, RF_IF), rd_rm),
20801 cCL("mvfsz", e008160, 2, (RF, RF_IF), rd_rm),
20802 cCL("mvfd", e008180, 2, (RF, RF_IF), rd_rm),
20803 cCL("mvfdp", e0081a0, 2, (RF, RF_IF), rd_rm),
20804 cCL("mvfdm", e0081c0, 2, (RF, RF_IF), rd_rm),
20805 cCL("mvfdz", e0081e0, 2, (RF, RF_IF), rd_rm),
20806 cCL("mvfe", e088100, 2, (RF, RF_IF), rd_rm),
20807 cCL("mvfep", e088120, 2, (RF, RF_IF), rd_rm),
20808 cCL("mvfem", e088140, 2, (RF, RF_IF), rd_rm),
20809 cCL("mvfez", e088160, 2, (RF, RF_IF), rd_rm),
20810
20811 cCL("mnfs", e108100, 2, (RF, RF_IF), rd_rm),
20812 cCL("mnfsp", e108120, 2, (RF, RF_IF), rd_rm),
20813 cCL("mnfsm", e108140, 2, (RF, RF_IF), rd_rm),
20814 cCL("mnfsz", e108160, 2, (RF, RF_IF), rd_rm),
20815 cCL("mnfd", e108180, 2, (RF, RF_IF), rd_rm),
20816 cCL("mnfdp", e1081a0, 2, (RF, RF_IF), rd_rm),
20817 cCL("mnfdm", e1081c0, 2, (RF, RF_IF), rd_rm),
20818 cCL("mnfdz", e1081e0, 2, (RF, RF_IF), rd_rm),
20819 cCL("mnfe", e188100, 2, (RF, RF_IF), rd_rm),
20820 cCL("mnfep", e188120, 2, (RF, RF_IF), rd_rm),
20821 cCL("mnfem", e188140, 2, (RF, RF_IF), rd_rm),
20822 cCL("mnfez", e188160, 2, (RF, RF_IF), rd_rm),
20823
20824 cCL("abss", e208100, 2, (RF, RF_IF), rd_rm),
20825 cCL("abssp", e208120, 2, (RF, RF_IF), rd_rm),
20826 cCL("abssm", e208140, 2, (RF, RF_IF), rd_rm),
20827 cCL("abssz", e208160, 2, (RF, RF_IF), rd_rm),
20828 cCL("absd", e208180, 2, (RF, RF_IF), rd_rm),
20829 cCL("absdp", e2081a0, 2, (RF, RF_IF), rd_rm),
20830 cCL("absdm", e2081c0, 2, (RF, RF_IF), rd_rm),
20831 cCL("absdz", e2081e0, 2, (RF, RF_IF), rd_rm),
20832 cCL("abse", e288100, 2, (RF, RF_IF), rd_rm),
20833 cCL("absep", e288120, 2, (RF, RF_IF), rd_rm),
20834 cCL("absem", e288140, 2, (RF, RF_IF), rd_rm),
20835 cCL("absez", e288160, 2, (RF, RF_IF), rd_rm),
20836
20837 cCL("rnds", e308100, 2, (RF, RF_IF), rd_rm),
20838 cCL("rndsp", e308120, 2, (RF, RF_IF), rd_rm),
20839 cCL("rndsm", e308140, 2, (RF, RF_IF), rd_rm),
20840 cCL("rndsz", e308160, 2, (RF, RF_IF), rd_rm),
20841 cCL("rndd", e308180, 2, (RF, RF_IF), rd_rm),
20842 cCL("rnddp", e3081a0, 2, (RF, RF_IF), rd_rm),
20843 cCL("rnddm", e3081c0, 2, (RF, RF_IF), rd_rm),
20844 cCL("rnddz", e3081e0, 2, (RF, RF_IF), rd_rm),
20845 cCL("rnde", e388100, 2, (RF, RF_IF), rd_rm),
20846 cCL("rndep", e388120, 2, (RF, RF_IF), rd_rm),
20847 cCL("rndem", e388140, 2, (RF, RF_IF), rd_rm),
20848 cCL("rndez", e388160, 2, (RF, RF_IF), rd_rm),
20849
20850 cCL("sqts", e408100, 2, (RF, RF_IF), rd_rm),
20851 cCL("sqtsp", e408120, 2, (RF, RF_IF), rd_rm),
20852 cCL("sqtsm", e408140, 2, (RF, RF_IF), rd_rm),
20853 cCL("sqtsz", e408160, 2, (RF, RF_IF), rd_rm),
20854 cCL("sqtd", e408180, 2, (RF, RF_IF), rd_rm),
20855 cCL("sqtdp", e4081a0, 2, (RF, RF_IF), rd_rm),
20856 cCL("sqtdm", e4081c0, 2, (RF, RF_IF), rd_rm),
20857 cCL("sqtdz", e4081e0, 2, (RF, RF_IF), rd_rm),
20858 cCL("sqte", e488100, 2, (RF, RF_IF), rd_rm),
20859 cCL("sqtep", e488120, 2, (RF, RF_IF), rd_rm),
20860 cCL("sqtem", e488140, 2, (RF, RF_IF), rd_rm),
20861 cCL("sqtez", e488160, 2, (RF, RF_IF), rd_rm),
20862
20863 cCL("logs", e508100, 2, (RF, RF_IF), rd_rm),
20864 cCL("logsp", e508120, 2, (RF, RF_IF), rd_rm),
20865 cCL("logsm", e508140, 2, (RF, RF_IF), rd_rm),
20866 cCL("logsz", e508160, 2, (RF, RF_IF), rd_rm),
20867 cCL("logd", e508180, 2, (RF, RF_IF), rd_rm),
20868 cCL("logdp", e5081a0, 2, (RF, RF_IF), rd_rm),
20869 cCL("logdm", e5081c0, 2, (RF, RF_IF), rd_rm),
20870 cCL("logdz", e5081e0, 2, (RF, RF_IF), rd_rm),
20871 cCL("loge", e588100, 2, (RF, RF_IF), rd_rm),
20872 cCL("logep", e588120, 2, (RF, RF_IF), rd_rm),
20873 cCL("logem", e588140, 2, (RF, RF_IF), rd_rm),
20874 cCL("logez", e588160, 2, (RF, RF_IF), rd_rm),
20875
20876 cCL("lgns", e608100, 2, (RF, RF_IF), rd_rm),
20877 cCL("lgnsp", e608120, 2, (RF, RF_IF), rd_rm),
20878 cCL("lgnsm", e608140, 2, (RF, RF_IF), rd_rm),
20879 cCL("lgnsz", e608160, 2, (RF, RF_IF), rd_rm),
20880 cCL("lgnd", e608180, 2, (RF, RF_IF), rd_rm),
20881 cCL("lgndp", e6081a0, 2, (RF, RF_IF), rd_rm),
20882 cCL("lgndm", e6081c0, 2, (RF, RF_IF), rd_rm),
20883 cCL("lgndz", e6081e0, 2, (RF, RF_IF), rd_rm),
20884 cCL("lgne", e688100, 2, (RF, RF_IF), rd_rm),
20885 cCL("lgnep", e688120, 2, (RF, RF_IF), rd_rm),
20886 cCL("lgnem", e688140, 2, (RF, RF_IF), rd_rm),
20887 cCL("lgnez", e688160, 2, (RF, RF_IF), rd_rm),
20888
20889 cCL("exps", e708100, 2, (RF, RF_IF), rd_rm),
20890 cCL("expsp", e708120, 2, (RF, RF_IF), rd_rm),
20891 cCL("expsm", e708140, 2, (RF, RF_IF), rd_rm),
20892 cCL("expsz", e708160, 2, (RF, RF_IF), rd_rm),
20893 cCL("expd", e708180, 2, (RF, RF_IF), rd_rm),
20894 cCL("expdp", e7081a0, 2, (RF, RF_IF), rd_rm),
20895 cCL("expdm", e7081c0, 2, (RF, RF_IF), rd_rm),
20896 cCL("expdz", e7081e0, 2, (RF, RF_IF), rd_rm),
20897 cCL("expe", e788100, 2, (RF, RF_IF), rd_rm),
20898 cCL("expep", e788120, 2, (RF, RF_IF), rd_rm),
20899 cCL("expem", e788140, 2, (RF, RF_IF), rd_rm),
20900 cCL("expdz", e788160, 2, (RF, RF_IF), rd_rm),
20901
20902 cCL("sins", e808100, 2, (RF, RF_IF), rd_rm),
20903 cCL("sinsp", e808120, 2, (RF, RF_IF), rd_rm),
20904 cCL("sinsm", e808140, 2, (RF, RF_IF), rd_rm),
20905 cCL("sinsz", e808160, 2, (RF, RF_IF), rd_rm),
20906 cCL("sind", e808180, 2, (RF, RF_IF), rd_rm),
20907 cCL("sindp", e8081a0, 2, (RF, RF_IF), rd_rm),
20908 cCL("sindm", e8081c0, 2, (RF, RF_IF), rd_rm),
20909 cCL("sindz", e8081e0, 2, (RF, RF_IF), rd_rm),
20910 cCL("sine", e888100, 2, (RF, RF_IF), rd_rm),
20911 cCL("sinep", e888120, 2, (RF, RF_IF), rd_rm),
20912 cCL("sinem", e888140, 2, (RF, RF_IF), rd_rm),
20913 cCL("sinez", e888160, 2, (RF, RF_IF), rd_rm),
20914
20915 cCL("coss", e908100, 2, (RF, RF_IF), rd_rm),
20916 cCL("cossp", e908120, 2, (RF, RF_IF), rd_rm),
20917 cCL("cossm", e908140, 2, (RF, RF_IF), rd_rm),
20918 cCL("cossz", e908160, 2, (RF, RF_IF), rd_rm),
20919 cCL("cosd", e908180, 2, (RF, RF_IF), rd_rm),
20920 cCL("cosdp", e9081a0, 2, (RF, RF_IF), rd_rm),
20921 cCL("cosdm", e9081c0, 2, (RF, RF_IF), rd_rm),
20922 cCL("cosdz", e9081e0, 2, (RF, RF_IF), rd_rm),
20923 cCL("cose", e988100, 2, (RF, RF_IF), rd_rm),
20924 cCL("cosep", e988120, 2, (RF, RF_IF), rd_rm),
20925 cCL("cosem", e988140, 2, (RF, RF_IF), rd_rm),
20926 cCL("cosez", e988160, 2, (RF, RF_IF), rd_rm),
20927
20928 cCL("tans", ea08100, 2, (RF, RF_IF), rd_rm),
20929 cCL("tansp", ea08120, 2, (RF, RF_IF), rd_rm),
20930 cCL("tansm", ea08140, 2, (RF, RF_IF), rd_rm),
20931 cCL("tansz", ea08160, 2, (RF, RF_IF), rd_rm),
20932 cCL("tand", ea08180, 2, (RF, RF_IF), rd_rm),
20933 cCL("tandp", ea081a0, 2, (RF, RF_IF), rd_rm),
20934 cCL("tandm", ea081c0, 2, (RF, RF_IF), rd_rm),
20935 cCL("tandz", ea081e0, 2, (RF, RF_IF), rd_rm),
20936 cCL("tane", ea88100, 2, (RF, RF_IF), rd_rm),
20937 cCL("tanep", ea88120, 2, (RF, RF_IF), rd_rm),
20938 cCL("tanem", ea88140, 2, (RF, RF_IF), rd_rm),
20939 cCL("tanez", ea88160, 2, (RF, RF_IF), rd_rm),
20940
20941 cCL("asns", eb08100, 2, (RF, RF_IF), rd_rm),
20942 cCL("asnsp", eb08120, 2, (RF, RF_IF), rd_rm),
20943 cCL("asnsm", eb08140, 2, (RF, RF_IF), rd_rm),
20944 cCL("asnsz", eb08160, 2, (RF, RF_IF), rd_rm),
20945 cCL("asnd", eb08180, 2, (RF, RF_IF), rd_rm),
20946 cCL("asndp", eb081a0, 2, (RF, RF_IF), rd_rm),
20947 cCL("asndm", eb081c0, 2, (RF, RF_IF), rd_rm),
20948 cCL("asndz", eb081e0, 2, (RF, RF_IF), rd_rm),
20949 cCL("asne", eb88100, 2, (RF, RF_IF), rd_rm),
20950 cCL("asnep", eb88120, 2, (RF, RF_IF), rd_rm),
20951 cCL("asnem", eb88140, 2, (RF, RF_IF), rd_rm),
20952 cCL("asnez", eb88160, 2, (RF, RF_IF), rd_rm),
20953
20954 cCL("acss", ec08100, 2, (RF, RF_IF), rd_rm),
20955 cCL("acssp", ec08120, 2, (RF, RF_IF), rd_rm),
20956 cCL("acssm", ec08140, 2, (RF, RF_IF), rd_rm),
20957 cCL("acssz", ec08160, 2, (RF, RF_IF), rd_rm),
20958 cCL("acsd", ec08180, 2, (RF, RF_IF), rd_rm),
20959 cCL("acsdp", ec081a0, 2, (RF, RF_IF), rd_rm),
20960 cCL("acsdm", ec081c0, 2, (RF, RF_IF), rd_rm),
20961 cCL("acsdz", ec081e0, 2, (RF, RF_IF), rd_rm),
20962 cCL("acse", ec88100, 2, (RF, RF_IF), rd_rm),
20963 cCL("acsep", ec88120, 2, (RF, RF_IF), rd_rm),
20964 cCL("acsem", ec88140, 2, (RF, RF_IF), rd_rm),
20965 cCL("acsez", ec88160, 2, (RF, RF_IF), rd_rm),
20966
20967 cCL("atns", ed08100, 2, (RF, RF_IF), rd_rm),
20968 cCL("atnsp", ed08120, 2, (RF, RF_IF), rd_rm),
20969 cCL("atnsm", ed08140, 2, (RF, RF_IF), rd_rm),
20970 cCL("atnsz", ed08160, 2, (RF, RF_IF), rd_rm),
20971 cCL("atnd", ed08180, 2, (RF, RF_IF), rd_rm),
20972 cCL("atndp", ed081a0, 2, (RF, RF_IF), rd_rm),
20973 cCL("atndm", ed081c0, 2, (RF, RF_IF), rd_rm),
20974 cCL("atndz", ed081e0, 2, (RF, RF_IF), rd_rm),
20975 cCL("atne", ed88100, 2, (RF, RF_IF), rd_rm),
20976 cCL("atnep", ed88120, 2, (RF, RF_IF), rd_rm),
20977 cCL("atnem", ed88140, 2, (RF, RF_IF), rd_rm),
20978 cCL("atnez", ed88160, 2, (RF, RF_IF), rd_rm),
20979
20980 cCL("urds", ee08100, 2, (RF, RF_IF), rd_rm),
20981 cCL("urdsp", ee08120, 2, (RF, RF_IF), rd_rm),
20982 cCL("urdsm", ee08140, 2, (RF, RF_IF), rd_rm),
20983 cCL("urdsz", ee08160, 2, (RF, RF_IF), rd_rm),
20984 cCL("urdd", ee08180, 2, (RF, RF_IF), rd_rm),
20985 cCL("urddp", ee081a0, 2, (RF, RF_IF), rd_rm),
20986 cCL("urddm", ee081c0, 2, (RF, RF_IF), rd_rm),
20987 cCL("urddz", ee081e0, 2, (RF, RF_IF), rd_rm),
20988 cCL("urde", ee88100, 2, (RF, RF_IF), rd_rm),
20989 cCL("urdep", ee88120, 2, (RF, RF_IF), rd_rm),
20990 cCL("urdem", ee88140, 2, (RF, RF_IF), rd_rm),
20991 cCL("urdez", ee88160, 2, (RF, RF_IF), rd_rm),
20992
20993 cCL("nrms", ef08100, 2, (RF, RF_IF), rd_rm),
20994 cCL("nrmsp", ef08120, 2, (RF, RF_IF), rd_rm),
20995 cCL("nrmsm", ef08140, 2, (RF, RF_IF), rd_rm),
20996 cCL("nrmsz", ef08160, 2, (RF, RF_IF), rd_rm),
20997 cCL("nrmd", ef08180, 2, (RF, RF_IF), rd_rm),
20998 cCL("nrmdp", ef081a0, 2, (RF, RF_IF), rd_rm),
20999 cCL("nrmdm", ef081c0, 2, (RF, RF_IF), rd_rm),
21000 cCL("nrmdz", ef081e0, 2, (RF, RF_IF), rd_rm),
21001 cCL("nrme", ef88100, 2, (RF, RF_IF), rd_rm),
21002 cCL("nrmep", ef88120, 2, (RF, RF_IF), rd_rm),
21003 cCL("nrmem", ef88140, 2, (RF, RF_IF), rd_rm),
21004 cCL("nrmez", ef88160, 2, (RF, RF_IF), rd_rm),
21005
21006 cCL("adfs", e000100, 3, (RF, RF, RF_IF), rd_rn_rm),
21007 cCL("adfsp", e000120, 3, (RF, RF, RF_IF), rd_rn_rm),
21008 cCL("adfsm", e000140, 3, (RF, RF, RF_IF), rd_rn_rm),
21009 cCL("adfsz", e000160, 3, (RF, RF, RF_IF), rd_rn_rm),
21010 cCL("adfd", e000180, 3, (RF, RF, RF_IF), rd_rn_rm),
21011 cCL("adfdp", e0001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
21012 cCL("adfdm", e0001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
21013 cCL("adfdz", e0001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
21014 cCL("adfe", e080100, 3, (RF, RF, RF_IF), rd_rn_rm),
21015 cCL("adfep", e080120, 3, (RF, RF, RF_IF), rd_rn_rm),
21016 cCL("adfem", e080140, 3, (RF, RF, RF_IF), rd_rn_rm),
21017 cCL("adfez", e080160, 3, (RF, RF, RF_IF), rd_rn_rm),
21018
21019 cCL("sufs", e200100, 3, (RF, RF, RF_IF), rd_rn_rm),
21020 cCL("sufsp", e200120, 3, (RF, RF, RF_IF), rd_rn_rm),
21021 cCL("sufsm", e200140, 3, (RF, RF, RF_IF), rd_rn_rm),
21022 cCL("sufsz", e200160, 3, (RF, RF, RF_IF), rd_rn_rm),
21023 cCL("sufd", e200180, 3, (RF, RF, RF_IF), rd_rn_rm),
21024 cCL("sufdp", e2001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
21025 cCL("sufdm", e2001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
21026 cCL("sufdz", e2001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
21027 cCL("sufe", e280100, 3, (RF, RF, RF_IF), rd_rn_rm),
21028 cCL("sufep", e280120, 3, (RF, RF, RF_IF), rd_rn_rm),
21029 cCL("sufem", e280140, 3, (RF, RF, RF_IF), rd_rn_rm),
21030 cCL("sufez", e280160, 3, (RF, RF, RF_IF), rd_rn_rm),
21031
21032 cCL("rsfs", e300100, 3, (RF, RF, RF_IF), rd_rn_rm),
21033 cCL("rsfsp", e300120, 3, (RF, RF, RF_IF), rd_rn_rm),
21034 cCL("rsfsm", e300140, 3, (RF, RF, RF_IF), rd_rn_rm),
21035 cCL("rsfsz", e300160, 3, (RF, RF, RF_IF), rd_rn_rm),
21036 cCL("rsfd", e300180, 3, (RF, RF, RF_IF), rd_rn_rm),
21037 cCL("rsfdp", e3001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
21038 cCL("rsfdm", e3001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
21039 cCL("rsfdz", e3001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
21040 cCL("rsfe", e380100, 3, (RF, RF, RF_IF), rd_rn_rm),
21041 cCL("rsfep", e380120, 3, (RF, RF, RF_IF), rd_rn_rm),
21042 cCL("rsfem", e380140, 3, (RF, RF, RF_IF), rd_rn_rm),
21043 cCL("rsfez", e380160, 3, (RF, RF, RF_IF), rd_rn_rm),
21044
21045 cCL("mufs", e100100, 3, (RF, RF, RF_IF), rd_rn_rm),
21046 cCL("mufsp", e100120, 3, (RF, RF, RF_IF), rd_rn_rm),
21047 cCL("mufsm", e100140, 3, (RF, RF, RF_IF), rd_rn_rm),
21048 cCL("mufsz", e100160, 3, (RF, RF, RF_IF), rd_rn_rm),
21049 cCL("mufd", e100180, 3, (RF, RF, RF_IF), rd_rn_rm),
21050 cCL("mufdp", e1001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
21051 cCL("mufdm", e1001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
21052 cCL("mufdz", e1001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
21053 cCL("mufe", e180100, 3, (RF, RF, RF_IF), rd_rn_rm),
21054 cCL("mufep", e180120, 3, (RF, RF, RF_IF), rd_rn_rm),
21055 cCL("mufem", e180140, 3, (RF, RF, RF_IF), rd_rn_rm),
21056 cCL("mufez", e180160, 3, (RF, RF, RF_IF), rd_rn_rm),
21057
21058 cCL("dvfs", e400100, 3, (RF, RF, RF_IF), rd_rn_rm),
21059 cCL("dvfsp", e400120, 3, (RF, RF, RF_IF), rd_rn_rm),
21060 cCL("dvfsm", e400140, 3, (RF, RF, RF_IF), rd_rn_rm),
21061 cCL("dvfsz", e400160, 3, (RF, RF, RF_IF), rd_rn_rm),
21062 cCL("dvfd", e400180, 3, (RF, RF, RF_IF), rd_rn_rm),
21063 cCL("dvfdp", e4001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
21064 cCL("dvfdm", e4001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
21065 cCL("dvfdz", e4001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
21066 cCL("dvfe", e480100, 3, (RF, RF, RF_IF), rd_rn_rm),
21067 cCL("dvfep", e480120, 3, (RF, RF, RF_IF), rd_rn_rm),
21068 cCL("dvfem", e480140, 3, (RF, RF, RF_IF), rd_rn_rm),
21069 cCL("dvfez", e480160, 3, (RF, RF, RF_IF), rd_rn_rm),
21070
21071 cCL("rdfs", e500100, 3, (RF, RF, RF_IF), rd_rn_rm),
21072 cCL("rdfsp", e500120, 3, (RF, RF, RF_IF), rd_rn_rm),
21073 cCL("rdfsm", e500140, 3, (RF, RF, RF_IF), rd_rn_rm),
21074 cCL("rdfsz", e500160, 3, (RF, RF, RF_IF), rd_rn_rm),
21075 cCL("rdfd", e500180, 3, (RF, RF, RF_IF), rd_rn_rm),
21076 cCL("rdfdp", e5001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
21077 cCL("rdfdm", e5001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
21078 cCL("rdfdz", e5001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
21079 cCL("rdfe", e580100, 3, (RF, RF, RF_IF), rd_rn_rm),
21080 cCL("rdfep", e580120, 3, (RF, RF, RF_IF), rd_rn_rm),
21081 cCL("rdfem", e580140, 3, (RF, RF, RF_IF), rd_rn_rm),
21082 cCL("rdfez", e580160, 3, (RF, RF, RF_IF), rd_rn_rm),
21083
21084 cCL("pows", e600100, 3, (RF, RF, RF_IF), rd_rn_rm),
21085 cCL("powsp", e600120, 3, (RF, RF, RF_IF), rd_rn_rm),
21086 cCL("powsm", e600140, 3, (RF, RF, RF_IF), rd_rn_rm),
21087 cCL("powsz", e600160, 3, (RF, RF, RF_IF), rd_rn_rm),
21088 cCL("powd", e600180, 3, (RF, RF, RF_IF), rd_rn_rm),
21089 cCL("powdp", e6001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
21090 cCL("powdm", e6001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
21091 cCL("powdz", e6001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
21092 cCL("powe", e680100, 3, (RF, RF, RF_IF), rd_rn_rm),
21093 cCL("powep", e680120, 3, (RF, RF, RF_IF), rd_rn_rm),
21094 cCL("powem", e680140, 3, (RF, RF, RF_IF), rd_rn_rm),
21095 cCL("powez", e680160, 3, (RF, RF, RF_IF), rd_rn_rm),
21096
21097 cCL("rpws", e700100, 3, (RF, RF, RF_IF), rd_rn_rm),
21098 cCL("rpwsp", e700120, 3, (RF, RF, RF_IF), rd_rn_rm),
21099 cCL("rpwsm", e700140, 3, (RF, RF, RF_IF), rd_rn_rm),
21100 cCL("rpwsz", e700160, 3, (RF, RF, RF_IF), rd_rn_rm),
21101 cCL("rpwd", e700180, 3, (RF, RF, RF_IF), rd_rn_rm),
21102 cCL("rpwdp", e7001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
21103 cCL("rpwdm", e7001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
21104 cCL("rpwdz", e7001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
21105 cCL("rpwe", e780100, 3, (RF, RF, RF_IF), rd_rn_rm),
21106 cCL("rpwep", e780120, 3, (RF, RF, RF_IF), rd_rn_rm),
21107 cCL("rpwem", e780140, 3, (RF, RF, RF_IF), rd_rn_rm),
21108 cCL("rpwez", e780160, 3, (RF, RF, RF_IF), rd_rn_rm),
21109
21110 cCL("rmfs", e800100, 3, (RF, RF, RF_IF), rd_rn_rm),
21111 cCL("rmfsp", e800120, 3, (RF, RF, RF_IF), rd_rn_rm),
21112 cCL("rmfsm", e800140, 3, (RF, RF, RF_IF), rd_rn_rm),
21113 cCL("rmfsz", e800160, 3, (RF, RF, RF_IF), rd_rn_rm),
21114 cCL("rmfd", e800180, 3, (RF, RF, RF_IF), rd_rn_rm),
21115 cCL("rmfdp", e8001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
21116 cCL("rmfdm", e8001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
21117 cCL("rmfdz", e8001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
21118 cCL("rmfe", e880100, 3, (RF, RF, RF_IF), rd_rn_rm),
21119 cCL("rmfep", e880120, 3, (RF, RF, RF_IF), rd_rn_rm),
21120 cCL("rmfem", e880140, 3, (RF, RF, RF_IF), rd_rn_rm),
21121 cCL("rmfez", e880160, 3, (RF, RF, RF_IF), rd_rn_rm),
21122
21123 cCL("fmls", e900100, 3, (RF, RF, RF_IF), rd_rn_rm),
21124 cCL("fmlsp", e900120, 3, (RF, RF, RF_IF), rd_rn_rm),
21125 cCL("fmlsm", e900140, 3, (RF, RF, RF_IF), rd_rn_rm),
21126 cCL("fmlsz", e900160, 3, (RF, RF, RF_IF), rd_rn_rm),
21127 cCL("fmld", e900180, 3, (RF, RF, RF_IF), rd_rn_rm),
21128 cCL("fmldp", e9001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
21129 cCL("fmldm", e9001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
21130 cCL("fmldz", e9001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
21131 cCL("fmle", e980100, 3, (RF, RF, RF_IF), rd_rn_rm),
21132 cCL("fmlep", e980120, 3, (RF, RF, RF_IF), rd_rn_rm),
21133 cCL("fmlem", e980140, 3, (RF, RF, RF_IF), rd_rn_rm),
21134 cCL("fmlez", e980160, 3, (RF, RF, RF_IF), rd_rn_rm),
21135
21136 cCL("fdvs", ea00100, 3, (RF, RF, RF_IF), rd_rn_rm),
21137 cCL("fdvsp", ea00120, 3, (RF, RF, RF_IF), rd_rn_rm),
21138 cCL("fdvsm", ea00140, 3, (RF, RF, RF_IF), rd_rn_rm),
21139 cCL("fdvsz", ea00160, 3, (RF, RF, RF_IF), rd_rn_rm),
21140 cCL("fdvd", ea00180, 3, (RF, RF, RF_IF), rd_rn_rm),
21141 cCL("fdvdp", ea001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
21142 cCL("fdvdm", ea001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
21143 cCL("fdvdz", ea001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
21144 cCL("fdve", ea80100, 3, (RF, RF, RF_IF), rd_rn_rm),
21145 cCL("fdvep", ea80120, 3, (RF, RF, RF_IF), rd_rn_rm),
21146 cCL("fdvem", ea80140, 3, (RF, RF, RF_IF), rd_rn_rm),
21147 cCL("fdvez", ea80160, 3, (RF, RF, RF_IF), rd_rn_rm),
21148
21149 cCL("frds", eb00100, 3, (RF, RF, RF_IF), rd_rn_rm),
21150 cCL("frdsp", eb00120, 3, (RF, RF, RF_IF), rd_rn_rm),
21151 cCL("frdsm", eb00140, 3, (RF, RF, RF_IF), rd_rn_rm),
21152 cCL("frdsz", eb00160, 3, (RF, RF, RF_IF), rd_rn_rm),
21153 cCL("frdd", eb00180, 3, (RF, RF, RF_IF), rd_rn_rm),
21154 cCL("frddp", eb001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
21155 cCL("frddm", eb001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
21156 cCL("frddz", eb001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
21157 cCL("frde", eb80100, 3, (RF, RF, RF_IF), rd_rn_rm),
21158 cCL("frdep", eb80120, 3, (RF, RF, RF_IF), rd_rn_rm),
21159 cCL("frdem", eb80140, 3, (RF, RF, RF_IF), rd_rn_rm),
21160 cCL("frdez", eb80160, 3, (RF, RF, RF_IF), rd_rn_rm),
21161
21162 cCL("pols", ec00100, 3, (RF, RF, RF_IF), rd_rn_rm),
21163 cCL("polsp", ec00120, 3, (RF, RF, RF_IF), rd_rn_rm),
21164 cCL("polsm", ec00140, 3, (RF, RF, RF_IF), rd_rn_rm),
21165 cCL("polsz", ec00160, 3, (RF, RF, RF_IF), rd_rn_rm),
21166 cCL("pold", ec00180, 3, (RF, RF, RF_IF), rd_rn_rm),
21167 cCL("poldp", ec001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
21168 cCL("poldm", ec001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
21169 cCL("poldz", ec001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
21170 cCL("pole", ec80100, 3, (RF, RF, RF_IF), rd_rn_rm),
21171 cCL("polep", ec80120, 3, (RF, RF, RF_IF), rd_rn_rm),
21172 cCL("polem", ec80140, 3, (RF, RF, RF_IF), rd_rn_rm),
21173 cCL("polez", ec80160, 3, (RF, RF, RF_IF), rd_rn_rm),
21174
21175 cCE("cmf", e90f110, 2, (RF, RF_IF), fpa_cmp),
21176 C3E("cmfe", ed0f110, 2, (RF, RF_IF), fpa_cmp),
21177 cCE("cnf", eb0f110, 2, (RF, RF_IF), fpa_cmp),
21178 C3E("cnfe", ef0f110, 2, (RF, RF_IF), fpa_cmp),
21179
21180 cCL("flts", e000110, 2, (RF, RR), rn_rd),
21181 cCL("fltsp", e000130, 2, (RF, RR), rn_rd),
21182 cCL("fltsm", e000150, 2, (RF, RR), rn_rd),
21183 cCL("fltsz", e000170, 2, (RF, RR), rn_rd),
21184 cCL("fltd", e000190, 2, (RF, RR), rn_rd),
21185 cCL("fltdp", e0001b0, 2, (RF, RR), rn_rd),
21186 cCL("fltdm", e0001d0, 2, (RF, RR), rn_rd),
21187 cCL("fltdz", e0001f0, 2, (RF, RR), rn_rd),
21188 cCL("flte", e080110, 2, (RF, RR), rn_rd),
21189 cCL("fltep", e080130, 2, (RF, RR), rn_rd),
21190 cCL("fltem", e080150, 2, (RF, RR), rn_rd),
21191 cCL("fltez", e080170, 2, (RF, RR), rn_rd),
21192
21193 /* The implementation of the FIX instruction is broken on some
21194 assemblers, in that it accepts a precision specifier as well as a
21195 rounding specifier, despite the fact that this is meaningless.
21196 To be more compatible, we accept it as well, though of course it
21197 does not set any bits. */
21198 cCE("fix", e100110, 2, (RR, RF), rd_rm),
21199 cCL("fixp", e100130, 2, (RR, RF), rd_rm),
21200 cCL("fixm", e100150, 2, (RR, RF), rd_rm),
21201 cCL("fixz", e100170, 2, (RR, RF), rd_rm),
21202 cCL("fixsp", e100130, 2, (RR, RF), rd_rm),
21203 cCL("fixsm", e100150, 2, (RR, RF), rd_rm),
21204 cCL("fixsz", e100170, 2, (RR, RF), rd_rm),
21205 cCL("fixdp", e100130, 2, (RR, RF), rd_rm),
21206 cCL("fixdm", e100150, 2, (RR, RF), rd_rm),
21207 cCL("fixdz", e100170, 2, (RR, RF), rd_rm),
21208 cCL("fixep", e100130, 2, (RR, RF), rd_rm),
21209 cCL("fixem", e100150, 2, (RR, RF), rd_rm),
21210 cCL("fixez", e100170, 2, (RR, RF), rd_rm),
21211
21212 /* Instructions that were new with the real FPA, call them V2. */
21213 #undef ARM_VARIANT
21214 #define ARM_VARIANT & fpu_fpa_ext_v2
21215
21216 cCE("lfm", c100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
21217 cCL("lfmfd", c900200, 3, (RF, I4b, ADDR), fpa_ldmstm),
21218 cCL("lfmea", d100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
21219 cCE("sfm", c000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
21220 cCL("sfmfd", d000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
21221 cCL("sfmea", c800200, 3, (RF, I4b, ADDR), fpa_ldmstm),
21222
21223 #undef ARM_VARIANT
21224 #define ARM_VARIANT & fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
21225
21226 /* Moves and type conversions. */
21227 cCE("fcpys", eb00a40, 2, (RVS, RVS), vfp_sp_monadic),
21228 cCE("fmrs", e100a10, 2, (RR, RVS), vfp_reg_from_sp),
21229 cCE("fmsr", e000a10, 2, (RVS, RR), vfp_sp_from_reg),
21230 cCE("fmstat", ef1fa10, 0, (), noargs),
21231 cCE("vmrs", ef00a10, 2, (APSR_RR, RVC), vmrs),
21232 cCE("vmsr", ee00a10, 2, (RVC, RR), vmsr),
21233 cCE("fsitos", eb80ac0, 2, (RVS, RVS), vfp_sp_monadic),
21234 cCE("fuitos", eb80a40, 2, (RVS, RVS), vfp_sp_monadic),
21235 cCE("ftosis", ebd0a40, 2, (RVS, RVS), vfp_sp_monadic),
21236 cCE("ftosizs", ebd0ac0, 2, (RVS, RVS), vfp_sp_monadic),
21237 cCE("ftouis", ebc0a40, 2, (RVS, RVS), vfp_sp_monadic),
21238 cCE("ftouizs", ebc0ac0, 2, (RVS, RVS), vfp_sp_monadic),
21239 cCE("fmrx", ef00a10, 2, (RR, RVC), rd_rn),
21240 cCE("fmxr", ee00a10, 2, (RVC, RR), rn_rd),
21241
21242 /* Memory operations. */
21243 cCE("flds", d100a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
21244 cCE("fsts", d000a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
21245 cCE("fldmias", c900a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
21246 cCE("fldmfds", c900a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
21247 cCE("fldmdbs", d300a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
21248 cCE("fldmeas", d300a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
21249 cCE("fldmiax", c900b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
21250 cCE("fldmfdx", c900b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
21251 cCE("fldmdbx", d300b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
21252 cCE("fldmeax", d300b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
21253 cCE("fstmias", c800a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
21254 cCE("fstmeas", c800a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
21255 cCE("fstmdbs", d200a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
21256 cCE("fstmfds", d200a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
21257 cCE("fstmiax", c800b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
21258 cCE("fstmeax", c800b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
21259 cCE("fstmdbx", d200b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
21260 cCE("fstmfdx", d200b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
21261
21262 /* Monadic operations. */
21263 cCE("fabss", eb00ac0, 2, (RVS, RVS), vfp_sp_monadic),
21264 cCE("fnegs", eb10a40, 2, (RVS, RVS), vfp_sp_monadic),
21265 cCE("fsqrts", eb10ac0, 2, (RVS, RVS), vfp_sp_monadic),
21266
21267 /* Dyadic operations. */
21268 cCE("fadds", e300a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
21269 cCE("fsubs", e300a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
21270 cCE("fmuls", e200a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
21271 cCE("fdivs", e800a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
21272 cCE("fmacs", e000a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
21273 cCE("fmscs", e100a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
21274 cCE("fnmuls", e200a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
21275 cCE("fnmacs", e000a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
21276 cCE("fnmscs", e100a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
21277
21278 /* Comparisons. */
21279 cCE("fcmps", eb40a40, 2, (RVS, RVS), vfp_sp_monadic),
21280 cCE("fcmpzs", eb50a40, 1, (RVS), vfp_sp_compare_z),
21281 cCE("fcmpes", eb40ac0, 2, (RVS, RVS), vfp_sp_monadic),
21282 cCE("fcmpezs", eb50ac0, 1, (RVS), vfp_sp_compare_z),
21283
21284 /* Double precision load/store are still present on single precision
21285 implementations. */
21286 cCE("fldd", d100b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
21287 cCE("fstd", d000b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
21288 cCE("fldmiad", c900b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
21289 cCE("fldmfdd", c900b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
21290 cCE("fldmdbd", d300b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
21291 cCE("fldmead", d300b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
21292 cCE("fstmiad", c800b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
21293 cCE("fstmead", c800b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
21294 cCE("fstmdbd", d200b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
21295 cCE("fstmfdd", d200b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
21296
21297 #undef ARM_VARIANT
21298 #define ARM_VARIANT & fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
21299
21300 /* Moves and type conversions. */
21301 cCE("fcpyd", eb00b40, 2, (RVD, RVD), vfp_dp_rd_rm),
21302 cCE("fcvtds", eb70ac0, 2, (RVD, RVS), vfp_dp_sp_cvt),
21303 cCE("fcvtsd", eb70bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
21304 cCE("fmdhr", e200b10, 2, (RVD, RR), vfp_dp_rn_rd),
21305 cCE("fmdlr", e000b10, 2, (RVD, RR), vfp_dp_rn_rd),
21306 cCE("fmrdh", e300b10, 2, (RR, RVD), vfp_dp_rd_rn),
21307 cCE("fmrdl", e100b10, 2, (RR, RVD), vfp_dp_rd_rn),
21308 cCE("fsitod", eb80bc0, 2, (RVD, RVS), vfp_dp_sp_cvt),
21309 cCE("fuitod", eb80b40, 2, (RVD, RVS), vfp_dp_sp_cvt),
21310 cCE("ftosid", ebd0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
21311 cCE("ftosizd", ebd0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
21312 cCE("ftouid", ebc0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
21313 cCE("ftouizd", ebc0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
21314
21315 /* Monadic operations. */
21316 cCE("fabsd", eb00bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
21317 cCE("fnegd", eb10b40, 2, (RVD, RVD), vfp_dp_rd_rm),
21318 cCE("fsqrtd", eb10bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
21319
21320 /* Dyadic operations. */
21321 cCE("faddd", e300b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
21322 cCE("fsubd", e300b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
21323 cCE("fmuld", e200b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
21324 cCE("fdivd", e800b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
21325 cCE("fmacd", e000b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
21326 cCE("fmscd", e100b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
21327 cCE("fnmuld", e200b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
21328 cCE("fnmacd", e000b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
21329 cCE("fnmscd", e100b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
21330
21331 /* Comparisons. */
21332 cCE("fcmpd", eb40b40, 2, (RVD, RVD), vfp_dp_rd_rm),
21333 cCE("fcmpzd", eb50b40, 1, (RVD), vfp_dp_rd),
21334 cCE("fcmped", eb40bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
21335 cCE("fcmpezd", eb50bc0, 1, (RVD), vfp_dp_rd),
21336
21337 #undef ARM_VARIANT
21338 #define ARM_VARIANT & fpu_vfp_ext_v2
21339
21340 cCE("fmsrr", c400a10, 3, (VRSLST, RR, RR), vfp_sp2_from_reg2),
21341 cCE("fmrrs", c500a10, 3, (RR, RR, VRSLST), vfp_reg2_from_sp2),
21342 cCE("fmdrr", c400b10, 3, (RVD, RR, RR), vfp_dp_rm_rd_rn),
21343 cCE("fmrrd", c500b10, 3, (RR, RR, RVD), vfp_dp_rd_rn_rm),
21344
21345 /* Instructions which may belong to either the Neon or VFP instruction sets.
21346 Individual encoder functions perform additional architecture checks. */
21347 #undef ARM_VARIANT
21348 #define ARM_VARIANT & fpu_vfp_ext_v1xd
21349 #undef THUMB_VARIANT
21350 #define THUMB_VARIANT & fpu_vfp_ext_v1xd
21351
21352 /* These mnemonics are unique to VFP. */
21353 NCE(vsqrt, 0, 2, (RVSD, RVSD), vfp_nsyn_sqrt),
21354 NCE(vdiv, 0, 3, (RVSD, RVSD, RVSD), vfp_nsyn_div),
21355 nCE(vnmul, _vnmul, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
21356 nCE(vnmla, _vnmla, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
21357 nCE(vnmls, _vnmls, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
21358 nCE(vcmp, _vcmp, 2, (RVSD, RSVD_FI0), vfp_nsyn_cmp),
21359 nCE(vcmpe, _vcmpe, 2, (RVSD, RSVD_FI0), vfp_nsyn_cmp),
21360 NCE(vpush, 0, 1, (VRSDLST), vfp_nsyn_push),
21361 NCE(vpop, 0, 1, (VRSDLST), vfp_nsyn_pop),
21362 NCE(vcvtz, 0, 2, (RVSD, RVSD), vfp_nsyn_cvtz),
21363
21364 /* Mnemonics shared by Neon and VFP. */
21365 nCEF(vmul, _vmul, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mul),
21366 nCEF(vmla, _vmla, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
21367 nCEF(vmls, _vmls, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
21368
21369 nCEF(vadd, _vadd, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
21370 nCEF(vsub, _vsub, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
21371
21372 NCEF(vabs, 1b10300, 2, (RNSDQ, RNSDQ), neon_abs_neg),
21373 NCEF(vneg, 1b10380, 2, (RNSDQ, RNSDQ), neon_abs_neg),
21374
21375 NCE(vldm, c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
21376 NCE(vldmia, c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
21377 NCE(vldmdb, d100b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
21378 NCE(vstm, c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
21379 NCE(vstmia, c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
21380 NCE(vstmdb, d000b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
21381
21382 nCEF(vcvt, _vcvt, 3, (RNSDQ, RNSDQ, oI32z), neon_cvt),
21383 nCEF(vcvtr, _vcvt, 2, (RNSDQ, RNSDQ), neon_cvtr),
21384 NCEF(vcvtb, eb20a40, 2, (RVSD, RVSD), neon_cvtb),
21385 NCEF(vcvtt, eb20a40, 2, (RVSD, RVSD), neon_cvtt),
21386
21387
21388 /* NOTE: All VMOV encoding is special-cased! */
21389 NCE(vmov, 0, 1, (VMOV), neon_mov),
21390 NCE(vmovq, 0, 1, (VMOV), neon_mov),
21391
21392 #undef THUMB_VARIANT
21393 /* Could be either VLDR/VSTR or VLDR/VSTR (system register) which are guarded
21394 by different feature bits. Since we are setting the Thumb guard, we can
21395 require Thumb-1 which makes it a nop guard and set the right feature bit in
21396 do_vldr_vstr (). */
21397 #define THUMB_VARIANT & arm_ext_v4t
21398 NCE(vldr, d100b00, 2, (VLDR, ADDRGLDC), vldr_vstr),
21399 NCE(vstr, d000b00, 2, (VLDR, ADDRGLDC), vldr_vstr),
21400
21401 #undef ARM_VARIANT
21402 #define ARM_VARIANT & arm_ext_fp16
21403 #undef THUMB_VARIANT
21404 #define THUMB_VARIANT & arm_ext_fp16
21405 /* New instructions added from v8.2, allowing the extraction and insertion of
21406 the upper 16 bits of a 32-bit vector register. */
21407 NCE (vmovx, eb00a40, 2, (RVS, RVS), neon_movhf),
21408 NCE (vins, eb00ac0, 2, (RVS, RVS), neon_movhf),
21409
21410 /* New backported fma/fms instructions optional in v8.2. */
21411 NCE (vfmal, 810, 3, (RNDQ, RNSD, RNSD_RNSC), neon_vfmal),
21412 NCE (vfmsl, 810, 3, (RNDQ, RNSD, RNSD_RNSC), neon_vfmsl),
21413
21414 #undef THUMB_VARIANT
21415 #define THUMB_VARIANT & fpu_neon_ext_v1
21416 #undef ARM_VARIANT
21417 #define ARM_VARIANT & fpu_neon_ext_v1
21418
21419 /* Data processing with three registers of the same length. */
21420 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
21421 NUF(vaba, 0000710, 3, (RNDQ, RNDQ, RNDQ), neon_dyadic_i_su),
21422 NUF(vabaq, 0000710, 3, (RNQ, RNQ, RNQ), neon_dyadic_i_su),
21423 NUF(vhadd, 0000000, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
21424 NUF(vhaddq, 0000000, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
21425 NUF(vrhadd, 0000100, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
21426 NUF(vrhaddq, 0000100, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
21427 NUF(vhsub, 0000200, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
21428 NUF(vhsubq, 0000200, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
21429 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
21430 NUF(vqadd, 0000010, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
21431 NUF(vqaddq, 0000010, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
21432 NUF(vqsub, 0000210, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
21433 NUF(vqsubq, 0000210, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
21434 NUF(vrshl, 0000500, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
21435 NUF(vrshlq, 0000500, 3, (RNQ, oRNQ, RNQ), neon_rshl),
21436 NUF(vqrshl, 0000510, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
21437 NUF(vqrshlq, 0000510, 3, (RNQ, oRNQ, RNQ), neon_rshl),
21438 /* If not immediate, fall back to neon_dyadic_i64_su.
21439 shl_imm should accept I8 I16 I32 I64,
21440 qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64. */
21441 nUF(vshl, _vshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_shl_imm),
21442 nUF(vshlq, _vshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_shl_imm),
21443 nUF(vqshl, _vqshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_qshl_imm),
21444 nUF(vqshlq, _vqshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_qshl_imm),
21445 /* Logic ops, types optional & ignored. */
21446 nUF(vand, _vand, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
21447 nUF(vandq, _vand, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
21448 nUF(vbic, _vbic, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
21449 nUF(vbicq, _vbic, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
21450 nUF(vorr, _vorr, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
21451 nUF(vorrq, _vorr, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
21452 nUF(vorn, _vorn, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
21453 nUF(vornq, _vorn, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
21454 nUF(veor, _veor, 3, (RNDQ, oRNDQ, RNDQ), neon_logic),
21455 nUF(veorq, _veor, 3, (RNQ, oRNQ, RNQ), neon_logic),
21456 /* Bitfield ops, untyped. */
21457 NUF(vbsl, 1100110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
21458 NUF(vbslq, 1100110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
21459 NUF(vbit, 1200110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
21460 NUF(vbitq, 1200110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
21461 NUF(vbif, 1300110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
21462 NUF(vbifq, 1300110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
21463 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F16 F32. */
21464 nUF(vabd, _vabd, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
21465 nUF(vabdq, _vabd, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
21466 nUF(vmax, _vmax, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
21467 nUF(vmaxq, _vmax, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
21468 nUF(vmin, _vmin, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
21469 nUF(vminq, _vmin, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
21470 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
21471 back to neon_dyadic_if_su. */
21472 nUF(vcge, _vcge, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
21473 nUF(vcgeq, _vcge, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
21474 nUF(vcgt, _vcgt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
21475 nUF(vcgtq, _vcgt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
21476 nUF(vclt, _vclt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
21477 nUF(vcltq, _vclt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
21478 nUF(vcle, _vcle, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
21479 nUF(vcleq, _vcle, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
21480 /* Comparison. Type I8 I16 I32 F32. */
21481 nUF(vceq, _vceq, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_ceq),
21482 nUF(vceqq, _vceq, 3, (RNQ, oRNQ, RNDQ_I0), neon_ceq),
21483 /* As above, D registers only. */
21484 nUF(vpmax, _vpmax, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
21485 nUF(vpmin, _vpmin, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
21486 /* Int and float variants, signedness unimportant. */
21487 nUF(vmlaq, _vmla, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
21488 nUF(vmlsq, _vmls, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
21489 nUF(vpadd, _vpadd, 3, (RND, oRND, RND), neon_dyadic_if_i_d),
21490 /* Add/sub take types I8 I16 I32 I64 F32. */
21491 nUF(vaddq, _vadd, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
21492 nUF(vsubq, _vsub, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
21493 /* vtst takes sizes 8, 16, 32. */
21494 NUF(vtst, 0000810, 3, (RNDQ, oRNDQ, RNDQ), neon_tst),
21495 NUF(vtstq, 0000810, 3, (RNQ, oRNQ, RNQ), neon_tst),
21496 /* VMUL takes I8 I16 I32 F32 P8. */
21497 nUF(vmulq, _vmul, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mul),
21498 /* VQD{R}MULH takes S16 S32. */
21499 nUF(vqdmulh, _vqdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
21500 nUF(vqdmulhq, _vqdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
21501 nUF(vqrdmulh, _vqrdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
21502 nUF(vqrdmulhq, _vqrdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
21503 NUF(vacge, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
21504 NUF(vacgeq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
21505 NUF(vacgt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
21506 NUF(vacgtq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
21507 NUF(vaclt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
21508 NUF(vacltq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
21509 NUF(vacle, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
21510 NUF(vacleq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
21511 NUF(vrecps, 0000f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
21512 NUF(vrecpsq, 0000f10, 3, (RNQ, oRNQ, RNQ), neon_step),
21513 NUF(vrsqrts, 0200f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
21514 NUF(vrsqrtsq, 0200f10, 3, (RNQ, oRNQ, RNQ), neon_step),
21515 /* ARM v8.1 extension. */
21516 nUF (vqrdmlah, _vqrdmlah, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qrdmlah),
21517 nUF (vqrdmlahq, _vqrdmlah, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qrdmlah),
21518 nUF (vqrdmlsh, _vqrdmlsh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qrdmlah),
21519 nUF (vqrdmlshq, _vqrdmlsh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qrdmlah),
21520
21521 /* Two address, int/float. Types S8 S16 S32 F32. */
21522 NUF(vabsq, 1b10300, 2, (RNQ, RNQ), neon_abs_neg),
21523 NUF(vnegq, 1b10380, 2, (RNQ, RNQ), neon_abs_neg),
21524
21525 /* Data processing with two registers and a shift amount. */
21526 /* Right shifts, and variants with rounding.
21527 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
21528 NUF(vshr, 0800010, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
21529 NUF(vshrq, 0800010, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
21530 NUF(vrshr, 0800210, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
21531 NUF(vrshrq, 0800210, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
21532 NUF(vsra, 0800110, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
21533 NUF(vsraq, 0800110, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
21534 NUF(vrsra, 0800310, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
21535 NUF(vrsraq, 0800310, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
21536 /* Shift and insert. Sizes accepted 8 16 32 64. */
21537 NUF(vsli, 1800510, 3, (RNDQ, oRNDQ, I63), neon_sli),
21538 NUF(vsliq, 1800510, 3, (RNQ, oRNQ, I63), neon_sli),
21539 NUF(vsri, 1800410, 3, (RNDQ, oRNDQ, I64), neon_sri),
21540 NUF(vsriq, 1800410, 3, (RNQ, oRNQ, I64), neon_sri),
21541 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
21542 NUF(vqshlu, 1800610, 3, (RNDQ, oRNDQ, I63), neon_qshlu_imm),
21543 NUF(vqshluq, 1800610, 3, (RNQ, oRNQ, I63), neon_qshlu_imm),
21544 /* Right shift immediate, saturating & narrowing, with rounding variants.
21545 Types accepted S16 S32 S64 U16 U32 U64. */
21546 NUF(vqshrn, 0800910, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
21547 NUF(vqrshrn, 0800950, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
21548 /* As above, unsigned. Types accepted S16 S32 S64. */
21549 NUF(vqshrun, 0800810, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
21550 NUF(vqrshrun, 0800850, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
21551 /* Right shift narrowing. Types accepted I16 I32 I64. */
21552 NUF(vshrn, 0800810, 3, (RND, RNQ, I32z), neon_rshift_narrow),
21553 NUF(vrshrn, 0800850, 3, (RND, RNQ, I32z), neon_rshift_narrow),
21554 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
21555 nUF(vshll, _vshll, 3, (RNQ, RND, I32), neon_shll),
21556 /* CVT with optional immediate for fixed-point variant. */
21557 nUF(vcvtq, _vcvt, 3, (RNQ, RNQ, oI32b), neon_cvt),
21558
21559 nUF(vmvn, _vmvn, 2, (RNDQ, RNDQ_Ibig), neon_mvn),
21560 nUF(vmvnq, _vmvn, 2, (RNQ, RNDQ_Ibig), neon_mvn),
21561
21562 /* Data processing, three registers of different lengths. */
21563 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
21564 NUF(vabal, 0800500, 3, (RNQ, RND, RND), neon_abal),
21565 NUF(vabdl, 0800700, 3, (RNQ, RND, RND), neon_dyadic_long),
21566 NUF(vaddl, 0800000, 3, (RNQ, RND, RND), neon_dyadic_long),
21567 NUF(vsubl, 0800200, 3, (RNQ, RND, RND), neon_dyadic_long),
21568 /* If not scalar, fall back to neon_dyadic_long.
21569 Vector types as above, scalar types S16 S32 U16 U32. */
21570 nUF(vmlal, _vmlal, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
21571 nUF(vmlsl, _vmlsl, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
21572 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
21573 NUF(vaddw, 0800100, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
21574 NUF(vsubw, 0800300, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
21575 /* Dyadic, narrowing insns. Types I16 I32 I64. */
21576 NUF(vaddhn, 0800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
21577 NUF(vraddhn, 1800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
21578 NUF(vsubhn, 0800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
21579 NUF(vrsubhn, 1800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
21580 /* Saturating doubling multiplies. Types S16 S32. */
21581 nUF(vqdmlal, _vqdmlal, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
21582 nUF(vqdmlsl, _vqdmlsl, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
21583 nUF(vqdmull, _vqdmull, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
21584 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
21585 S16 S32 U16 U32. */
21586 nUF(vmull, _vmull, 3, (RNQ, RND, RND_RNSC), neon_vmull),
21587
21588 /* Extract. Size 8. */
21589 NUF(vext, 0b00000, 4, (RNDQ, oRNDQ, RNDQ, I15), neon_ext),
21590 NUF(vextq, 0b00000, 4, (RNQ, oRNQ, RNQ, I15), neon_ext),
21591
21592 /* Two registers, miscellaneous. */
21593 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
21594 NUF(vrev64, 1b00000, 2, (RNDQ, RNDQ), neon_rev),
21595 NUF(vrev64q, 1b00000, 2, (RNQ, RNQ), neon_rev),
21596 NUF(vrev32, 1b00080, 2, (RNDQ, RNDQ), neon_rev),
21597 NUF(vrev32q, 1b00080, 2, (RNQ, RNQ), neon_rev),
21598 NUF(vrev16, 1b00100, 2, (RNDQ, RNDQ), neon_rev),
21599 NUF(vrev16q, 1b00100, 2, (RNQ, RNQ), neon_rev),
21600 /* Vector replicate. Sizes 8 16 32. */
21601 nCE(vdup, _vdup, 2, (RNDQ, RR_RNSC), neon_dup),
21602 nCE(vdupq, _vdup, 2, (RNQ, RR_RNSC), neon_dup),
21603 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
21604 NUF(vmovl, 0800a10, 2, (RNQ, RND), neon_movl),
21605 /* VMOVN. Types I16 I32 I64. */
21606 nUF(vmovn, _vmovn, 2, (RND, RNQ), neon_movn),
21607 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
21608 nUF(vqmovn, _vqmovn, 2, (RND, RNQ), neon_qmovn),
21609 /* VQMOVUN. Types S16 S32 S64. */
21610 nUF(vqmovun, _vqmovun, 2, (RND, RNQ), neon_qmovun),
21611 /* VZIP / VUZP. Sizes 8 16 32. */
21612 NUF(vzip, 1b20180, 2, (RNDQ, RNDQ), neon_zip_uzp),
21613 NUF(vzipq, 1b20180, 2, (RNQ, RNQ), neon_zip_uzp),
21614 NUF(vuzp, 1b20100, 2, (RNDQ, RNDQ), neon_zip_uzp),
21615 NUF(vuzpq, 1b20100, 2, (RNQ, RNQ), neon_zip_uzp),
21616 /* VQABS / VQNEG. Types S8 S16 S32. */
21617 NUF(vqabs, 1b00700, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
21618 NUF(vqabsq, 1b00700, 2, (RNQ, RNQ), neon_sat_abs_neg),
21619 NUF(vqneg, 1b00780, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
21620 NUF(vqnegq, 1b00780, 2, (RNQ, RNQ), neon_sat_abs_neg),
21621 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
21622 NUF(vpadal, 1b00600, 2, (RNDQ, RNDQ), neon_pair_long),
21623 NUF(vpadalq, 1b00600, 2, (RNQ, RNQ), neon_pair_long),
21624 NUF(vpaddl, 1b00200, 2, (RNDQ, RNDQ), neon_pair_long),
21625 NUF(vpaddlq, 1b00200, 2, (RNQ, RNQ), neon_pair_long),
21626 /* Reciprocal estimates. Types U32 F16 F32. */
21627 NUF(vrecpe, 1b30400, 2, (RNDQ, RNDQ), neon_recip_est),
21628 NUF(vrecpeq, 1b30400, 2, (RNQ, RNQ), neon_recip_est),
21629 NUF(vrsqrte, 1b30480, 2, (RNDQ, RNDQ), neon_recip_est),
21630 NUF(vrsqrteq, 1b30480, 2, (RNQ, RNQ), neon_recip_est),
21631 /* VCLS. Types S8 S16 S32. */
21632 NUF(vcls, 1b00400, 2, (RNDQ, RNDQ), neon_cls),
21633 NUF(vclsq, 1b00400, 2, (RNQ, RNQ), neon_cls),
21634 /* VCLZ. Types I8 I16 I32. */
21635 NUF(vclz, 1b00480, 2, (RNDQ, RNDQ), neon_clz),
21636 NUF(vclzq, 1b00480, 2, (RNQ, RNQ), neon_clz),
21637 /* VCNT. Size 8. */
21638 NUF(vcnt, 1b00500, 2, (RNDQ, RNDQ), neon_cnt),
21639 NUF(vcntq, 1b00500, 2, (RNQ, RNQ), neon_cnt),
21640 /* Two address, untyped. */
21641 NUF(vswp, 1b20000, 2, (RNDQ, RNDQ), neon_swp),
21642 NUF(vswpq, 1b20000, 2, (RNQ, RNQ), neon_swp),
21643 /* VTRN. Sizes 8 16 32. */
21644 nUF(vtrn, _vtrn, 2, (RNDQ, RNDQ), neon_trn),
21645 nUF(vtrnq, _vtrn, 2, (RNQ, RNQ), neon_trn),
21646
21647 /* Table lookup. Size 8. */
21648 NUF(vtbl, 1b00800, 3, (RND, NRDLST, RND), neon_tbl_tbx),
21649 NUF(vtbx, 1b00840, 3, (RND, NRDLST, RND), neon_tbl_tbx),
21650
21651 #undef THUMB_VARIANT
21652 #define THUMB_VARIANT & fpu_vfp_v3_or_neon_ext
21653 #undef ARM_VARIANT
21654 #define ARM_VARIANT & fpu_vfp_v3_or_neon_ext
21655
21656 /* Neon element/structure load/store. */
21657 nUF(vld1, _vld1, 2, (NSTRLST, ADDR), neon_ldx_stx),
21658 nUF(vst1, _vst1, 2, (NSTRLST, ADDR), neon_ldx_stx),
21659 nUF(vld2, _vld2, 2, (NSTRLST, ADDR), neon_ldx_stx),
21660 nUF(vst2, _vst2, 2, (NSTRLST, ADDR), neon_ldx_stx),
21661 nUF(vld3, _vld3, 2, (NSTRLST, ADDR), neon_ldx_stx),
21662 nUF(vst3, _vst3, 2, (NSTRLST, ADDR), neon_ldx_stx),
21663 nUF(vld4, _vld4, 2, (NSTRLST, ADDR), neon_ldx_stx),
21664 nUF(vst4, _vst4, 2, (NSTRLST, ADDR), neon_ldx_stx),
21665
21666 #undef THUMB_VARIANT
21667 #define THUMB_VARIANT & fpu_vfp_ext_v3xd
21668 #undef ARM_VARIANT
21669 #define ARM_VARIANT & fpu_vfp_ext_v3xd
21670 cCE("fconsts", eb00a00, 2, (RVS, I255), vfp_sp_const),
21671 cCE("fshtos", eba0a40, 2, (RVS, I16z), vfp_sp_conv_16),
21672 cCE("fsltos", eba0ac0, 2, (RVS, I32), vfp_sp_conv_32),
21673 cCE("fuhtos", ebb0a40, 2, (RVS, I16z), vfp_sp_conv_16),
21674 cCE("fultos", ebb0ac0, 2, (RVS, I32), vfp_sp_conv_32),
21675 cCE("ftoshs", ebe0a40, 2, (RVS, I16z), vfp_sp_conv_16),
21676 cCE("ftosls", ebe0ac0, 2, (RVS, I32), vfp_sp_conv_32),
21677 cCE("ftouhs", ebf0a40, 2, (RVS, I16z), vfp_sp_conv_16),
21678 cCE("ftouls", ebf0ac0, 2, (RVS, I32), vfp_sp_conv_32),
21679
21680 #undef THUMB_VARIANT
21681 #define THUMB_VARIANT & fpu_vfp_ext_v3
21682 #undef ARM_VARIANT
21683 #define ARM_VARIANT & fpu_vfp_ext_v3
21684
21685 cCE("fconstd", eb00b00, 2, (RVD, I255), vfp_dp_const),
21686 cCE("fshtod", eba0b40, 2, (RVD, I16z), vfp_dp_conv_16),
21687 cCE("fsltod", eba0bc0, 2, (RVD, I32), vfp_dp_conv_32),
21688 cCE("fuhtod", ebb0b40, 2, (RVD, I16z), vfp_dp_conv_16),
21689 cCE("fultod", ebb0bc0, 2, (RVD, I32), vfp_dp_conv_32),
21690 cCE("ftoshd", ebe0b40, 2, (RVD, I16z), vfp_dp_conv_16),
21691 cCE("ftosld", ebe0bc0, 2, (RVD, I32), vfp_dp_conv_32),
21692 cCE("ftouhd", ebf0b40, 2, (RVD, I16z), vfp_dp_conv_16),
21693 cCE("ftould", ebf0bc0, 2, (RVD, I32), vfp_dp_conv_32),
21694
21695 #undef ARM_VARIANT
21696 #define ARM_VARIANT & fpu_vfp_ext_fma
21697 #undef THUMB_VARIANT
21698 #define THUMB_VARIANT & fpu_vfp_ext_fma
21699 /* Mnemonics shared by Neon and VFP. These are included in the
21700 VFP FMA variant; NEON and VFP FMA always includes the NEON
21701 FMA instructions. */
21702 nCEF(vfma, _vfma, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_fmac),
21703 nCEF(vfms, _vfms, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_fmac),
21704 /* ffmas/ffmad/ffmss/ffmsd are dummy mnemonics to satisfy gas;
21705 the v form should always be used. */
21706 cCE("ffmas", ea00a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
21707 cCE("ffnmas", ea00a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
21708 cCE("ffmad", ea00b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
21709 cCE("ffnmad", ea00b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
21710 nCE(vfnma, _vfnma, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
21711 nCE(vfnms, _vfnms, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
21712
21713 #undef THUMB_VARIANT
21714 #undef ARM_VARIANT
21715 #define ARM_VARIANT & arm_cext_xscale /* Intel XScale extensions. */
21716
21717 cCE("mia", e200010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
21718 cCE("miaph", e280010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
21719 cCE("miabb", e2c0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
21720 cCE("miabt", e2d0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
21721 cCE("miatb", e2e0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
21722 cCE("miatt", e2f0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
21723 cCE("mar", c400000, 3, (RXA, RRnpc, RRnpc), xsc_mar),
21724 cCE("mra", c500000, 3, (RRnpc, RRnpc, RXA), xsc_mra),
21725
21726 #undef ARM_VARIANT
21727 #define ARM_VARIANT & arm_cext_iwmmxt /* Intel Wireless MMX technology. */
21728
21729 cCE("tandcb", e13f130, 1, (RR), iwmmxt_tandorc),
21730 cCE("tandch", e53f130, 1, (RR), iwmmxt_tandorc),
21731 cCE("tandcw", e93f130, 1, (RR), iwmmxt_tandorc),
21732 cCE("tbcstb", e400010, 2, (RIWR, RR), rn_rd),
21733 cCE("tbcsth", e400050, 2, (RIWR, RR), rn_rd),
21734 cCE("tbcstw", e400090, 2, (RIWR, RR), rn_rd),
21735 cCE("textrcb", e130170, 2, (RR, I7), iwmmxt_textrc),
21736 cCE("textrch", e530170, 2, (RR, I7), iwmmxt_textrc),
21737 cCE("textrcw", e930170, 2, (RR, I7), iwmmxt_textrc),
21738 cCE("textrmub",e100070, 3, (RR, RIWR, I7), iwmmxt_textrm),
21739 cCE("textrmuh",e500070, 3, (RR, RIWR, I7), iwmmxt_textrm),
21740 cCE("textrmuw",e900070, 3, (RR, RIWR, I7), iwmmxt_textrm),
21741 cCE("textrmsb",e100078, 3, (RR, RIWR, I7), iwmmxt_textrm),
21742 cCE("textrmsh",e500078, 3, (RR, RIWR, I7), iwmmxt_textrm),
21743 cCE("textrmsw",e900078, 3, (RR, RIWR, I7), iwmmxt_textrm),
21744 cCE("tinsrb", e600010, 3, (RIWR, RR, I7), iwmmxt_tinsr),
21745 cCE("tinsrh", e600050, 3, (RIWR, RR, I7), iwmmxt_tinsr),
21746 cCE("tinsrw", e600090, 3, (RIWR, RR, I7), iwmmxt_tinsr),
21747 cCE("tmcr", e000110, 2, (RIWC_RIWG, RR), rn_rd),
21748 cCE("tmcrr", c400000, 3, (RIWR, RR, RR), rm_rd_rn),
21749 cCE("tmia", e200010, 3, (RIWR, RR, RR), iwmmxt_tmia),
21750 cCE("tmiaph", e280010, 3, (RIWR, RR, RR), iwmmxt_tmia),
21751 cCE("tmiabb", e2c0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
21752 cCE("tmiabt", e2d0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
21753 cCE("tmiatb", e2e0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
21754 cCE("tmiatt", e2f0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
21755 cCE("tmovmskb",e100030, 2, (RR, RIWR), rd_rn),
21756 cCE("tmovmskh",e500030, 2, (RR, RIWR), rd_rn),
21757 cCE("tmovmskw",e900030, 2, (RR, RIWR), rd_rn),
21758 cCE("tmrc", e100110, 2, (RR, RIWC_RIWG), rd_rn),
21759 cCE("tmrrc", c500000, 3, (RR, RR, RIWR), rd_rn_rm),
21760 cCE("torcb", e13f150, 1, (RR), iwmmxt_tandorc),
21761 cCE("torch", e53f150, 1, (RR), iwmmxt_tandorc),
21762 cCE("torcw", e93f150, 1, (RR), iwmmxt_tandorc),
21763 cCE("waccb", e0001c0, 2, (RIWR, RIWR), rd_rn),
21764 cCE("wacch", e4001c0, 2, (RIWR, RIWR), rd_rn),
21765 cCE("waccw", e8001c0, 2, (RIWR, RIWR), rd_rn),
21766 cCE("waddbss", e300180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21767 cCE("waddb", e000180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21768 cCE("waddbus", e100180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21769 cCE("waddhss", e700180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21770 cCE("waddh", e400180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21771 cCE("waddhus", e500180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21772 cCE("waddwss", eb00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21773 cCE("waddw", e800180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21774 cCE("waddwus", e900180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21775 cCE("waligni", e000020, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_waligni),
21776 cCE("walignr0",e800020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21777 cCE("walignr1",e900020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21778 cCE("walignr2",ea00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21779 cCE("walignr3",eb00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21780 cCE("wand", e200000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21781 cCE("wandn", e300000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21782 cCE("wavg2b", e800000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21783 cCE("wavg2br", e900000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21784 cCE("wavg2h", ec00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21785 cCE("wavg2hr", ed00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21786 cCE("wcmpeqb", e000060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21787 cCE("wcmpeqh", e400060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21788 cCE("wcmpeqw", e800060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21789 cCE("wcmpgtub",e100060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21790 cCE("wcmpgtuh",e500060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21791 cCE("wcmpgtuw",e900060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21792 cCE("wcmpgtsb",e300060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21793 cCE("wcmpgtsh",e700060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21794 cCE("wcmpgtsw",eb00060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21795 cCE("wldrb", c100000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
21796 cCE("wldrh", c500000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
21797 cCE("wldrw", c100100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
21798 cCE("wldrd", c500100, 2, (RIWR, ADDR), iwmmxt_wldstd),
21799 cCE("wmacs", e600100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21800 cCE("wmacsz", e700100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21801 cCE("wmacu", e400100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21802 cCE("wmacuz", e500100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21803 cCE("wmadds", ea00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21804 cCE("wmaddu", e800100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21805 cCE("wmaxsb", e200160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21806 cCE("wmaxsh", e600160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21807 cCE("wmaxsw", ea00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21808 cCE("wmaxub", e000160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21809 cCE("wmaxuh", e400160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21810 cCE("wmaxuw", e800160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21811 cCE("wminsb", e300160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21812 cCE("wminsh", e700160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21813 cCE("wminsw", eb00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21814 cCE("wminub", e100160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21815 cCE("wminuh", e500160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21816 cCE("wminuw", e900160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21817 cCE("wmov", e000000, 2, (RIWR, RIWR), iwmmxt_wmov),
21818 cCE("wmulsm", e300100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21819 cCE("wmulsl", e200100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21820 cCE("wmulum", e100100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21821 cCE("wmulul", e000100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21822 cCE("wor", e000000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21823 cCE("wpackhss",e700080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21824 cCE("wpackhus",e500080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21825 cCE("wpackwss",eb00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21826 cCE("wpackwus",e900080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21827 cCE("wpackdss",ef00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21828 cCE("wpackdus",ed00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21829 cCE("wrorh", e700040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21830 cCE("wrorhg", e700148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21831 cCE("wrorw", eb00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21832 cCE("wrorwg", eb00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21833 cCE("wrord", ef00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21834 cCE("wrordg", ef00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21835 cCE("wsadb", e000120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21836 cCE("wsadbz", e100120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21837 cCE("wsadh", e400120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21838 cCE("wsadhz", e500120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21839 cCE("wshufh", e0001e0, 3, (RIWR, RIWR, I255), iwmmxt_wshufh),
21840 cCE("wsllh", e500040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21841 cCE("wsllhg", e500148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21842 cCE("wsllw", e900040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21843 cCE("wsllwg", e900148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21844 cCE("wslld", ed00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21845 cCE("wslldg", ed00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21846 cCE("wsrah", e400040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21847 cCE("wsrahg", e400148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21848 cCE("wsraw", e800040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21849 cCE("wsrawg", e800148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21850 cCE("wsrad", ec00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21851 cCE("wsradg", ec00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21852 cCE("wsrlh", e600040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21853 cCE("wsrlhg", e600148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21854 cCE("wsrlw", ea00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21855 cCE("wsrlwg", ea00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21856 cCE("wsrld", ee00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21857 cCE("wsrldg", ee00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21858 cCE("wstrb", c000000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
21859 cCE("wstrh", c400000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
21860 cCE("wstrw", c000100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
21861 cCE("wstrd", c400100, 2, (RIWR, ADDR), iwmmxt_wldstd),
21862 cCE("wsubbss", e3001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21863 cCE("wsubb", e0001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21864 cCE("wsubbus", e1001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21865 cCE("wsubhss", e7001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21866 cCE("wsubh", e4001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21867 cCE("wsubhus", e5001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21868 cCE("wsubwss", eb001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21869 cCE("wsubw", e8001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21870 cCE("wsubwus", e9001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21871 cCE("wunpckehub",e0000c0, 2, (RIWR, RIWR), rd_rn),
21872 cCE("wunpckehuh",e4000c0, 2, (RIWR, RIWR), rd_rn),
21873 cCE("wunpckehuw",e8000c0, 2, (RIWR, RIWR), rd_rn),
21874 cCE("wunpckehsb",e2000c0, 2, (RIWR, RIWR), rd_rn),
21875 cCE("wunpckehsh",e6000c0, 2, (RIWR, RIWR), rd_rn),
21876 cCE("wunpckehsw",ea000c0, 2, (RIWR, RIWR), rd_rn),
21877 cCE("wunpckihb", e1000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21878 cCE("wunpckihh", e5000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21879 cCE("wunpckihw", e9000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21880 cCE("wunpckelub",e0000e0, 2, (RIWR, RIWR), rd_rn),
21881 cCE("wunpckeluh",e4000e0, 2, (RIWR, RIWR), rd_rn),
21882 cCE("wunpckeluw",e8000e0, 2, (RIWR, RIWR), rd_rn),
21883 cCE("wunpckelsb",e2000e0, 2, (RIWR, RIWR), rd_rn),
21884 cCE("wunpckelsh",e6000e0, 2, (RIWR, RIWR), rd_rn),
21885 cCE("wunpckelsw",ea000e0, 2, (RIWR, RIWR), rd_rn),
21886 cCE("wunpckilb", e1000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21887 cCE("wunpckilh", e5000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21888 cCE("wunpckilw", e9000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21889 cCE("wxor", e100000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21890 cCE("wzero", e300000, 1, (RIWR), iwmmxt_wzero),
21891
21892 #undef ARM_VARIANT
21893 #define ARM_VARIANT & arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2. */
21894
21895 cCE("torvscb", e12f190, 1, (RR), iwmmxt_tandorc),
21896 cCE("torvsch", e52f190, 1, (RR), iwmmxt_tandorc),
21897 cCE("torvscw", e92f190, 1, (RR), iwmmxt_tandorc),
21898 cCE("wabsb", e2001c0, 2, (RIWR, RIWR), rd_rn),
21899 cCE("wabsh", e6001c0, 2, (RIWR, RIWR), rd_rn),
21900 cCE("wabsw", ea001c0, 2, (RIWR, RIWR), rd_rn),
21901 cCE("wabsdiffb", e1001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21902 cCE("wabsdiffh", e5001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21903 cCE("wabsdiffw", e9001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21904 cCE("waddbhusl", e2001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21905 cCE("waddbhusm", e6001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21906 cCE("waddhc", e600180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21907 cCE("waddwc", ea00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21908 cCE("waddsubhx", ea001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21909 cCE("wavg4", e400000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21910 cCE("wavg4r", e500000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21911 cCE("wmaddsn", ee00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21912 cCE("wmaddsx", eb00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21913 cCE("wmaddun", ec00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21914 cCE("wmaddux", e900100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21915 cCE("wmerge", e000080, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_wmerge),
21916 cCE("wmiabb", e0000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21917 cCE("wmiabt", e1000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21918 cCE("wmiatb", e2000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21919 cCE("wmiatt", e3000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21920 cCE("wmiabbn", e4000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21921 cCE("wmiabtn", e5000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21922 cCE("wmiatbn", e6000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21923 cCE("wmiattn", e7000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21924 cCE("wmiawbb", e800120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21925 cCE("wmiawbt", e900120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21926 cCE("wmiawtb", ea00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21927 cCE("wmiawtt", eb00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21928 cCE("wmiawbbn", ec00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21929 cCE("wmiawbtn", ed00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21930 cCE("wmiawtbn", ee00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21931 cCE("wmiawttn", ef00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21932 cCE("wmulsmr", ef00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21933 cCE("wmulumr", ed00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21934 cCE("wmulwumr", ec000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21935 cCE("wmulwsmr", ee000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21936 cCE("wmulwum", ed000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21937 cCE("wmulwsm", ef000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21938 cCE("wmulwl", eb000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21939 cCE("wqmiabb", e8000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21940 cCE("wqmiabt", e9000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21941 cCE("wqmiatb", ea000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21942 cCE("wqmiatt", eb000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21943 cCE("wqmiabbn", ec000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21944 cCE("wqmiabtn", ed000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21945 cCE("wqmiatbn", ee000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21946 cCE("wqmiattn", ef000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21947 cCE("wqmulm", e100080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21948 cCE("wqmulmr", e300080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21949 cCE("wqmulwm", ec000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21950 cCE("wqmulwmr", ee000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21951 cCE("wsubaddhx", ed001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21952
21953 #undef ARM_VARIANT
21954 #define ARM_VARIANT & arm_cext_maverick /* Cirrus Maverick instructions. */
21955
21956 cCE("cfldrs", c100400, 2, (RMF, ADDRGLDC), rd_cpaddr),
21957 cCE("cfldrd", c500400, 2, (RMD, ADDRGLDC), rd_cpaddr),
21958 cCE("cfldr32", c100500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
21959 cCE("cfldr64", c500500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
21960 cCE("cfstrs", c000400, 2, (RMF, ADDRGLDC), rd_cpaddr),
21961 cCE("cfstrd", c400400, 2, (RMD, ADDRGLDC), rd_cpaddr),
21962 cCE("cfstr32", c000500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
21963 cCE("cfstr64", c400500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
21964 cCE("cfmvsr", e000450, 2, (RMF, RR), rn_rd),
21965 cCE("cfmvrs", e100450, 2, (RR, RMF), rd_rn),
21966 cCE("cfmvdlr", e000410, 2, (RMD, RR), rn_rd),
21967 cCE("cfmvrdl", e100410, 2, (RR, RMD), rd_rn),
21968 cCE("cfmvdhr", e000430, 2, (RMD, RR), rn_rd),
21969 cCE("cfmvrdh", e100430, 2, (RR, RMD), rd_rn),
21970 cCE("cfmv64lr",e000510, 2, (RMDX, RR), rn_rd),
21971 cCE("cfmvr64l",e100510, 2, (RR, RMDX), rd_rn),
21972 cCE("cfmv64hr",e000530, 2, (RMDX, RR), rn_rd),
21973 cCE("cfmvr64h",e100530, 2, (RR, RMDX), rd_rn),
21974 cCE("cfmval32",e200440, 2, (RMAX, RMFX), rd_rn),
21975 cCE("cfmv32al",e100440, 2, (RMFX, RMAX), rd_rn),
21976 cCE("cfmvam32",e200460, 2, (RMAX, RMFX), rd_rn),
21977 cCE("cfmv32am",e100460, 2, (RMFX, RMAX), rd_rn),
21978 cCE("cfmvah32",e200480, 2, (RMAX, RMFX), rd_rn),
21979 cCE("cfmv32ah",e100480, 2, (RMFX, RMAX), rd_rn),
21980 cCE("cfmva32", e2004a0, 2, (RMAX, RMFX), rd_rn),
21981 cCE("cfmv32a", e1004a0, 2, (RMFX, RMAX), rd_rn),
21982 cCE("cfmva64", e2004c0, 2, (RMAX, RMDX), rd_rn),
21983 cCE("cfmv64a", e1004c0, 2, (RMDX, RMAX), rd_rn),
21984 cCE("cfmvsc32",e2004e0, 2, (RMDS, RMDX), mav_dspsc),
21985 cCE("cfmv32sc",e1004e0, 2, (RMDX, RMDS), rd),
21986 cCE("cfcpys", e000400, 2, (RMF, RMF), rd_rn),
21987 cCE("cfcpyd", e000420, 2, (RMD, RMD), rd_rn),
21988 cCE("cfcvtsd", e000460, 2, (RMD, RMF), rd_rn),
21989 cCE("cfcvtds", e000440, 2, (RMF, RMD), rd_rn),
21990 cCE("cfcvt32s",e000480, 2, (RMF, RMFX), rd_rn),
21991 cCE("cfcvt32d",e0004a0, 2, (RMD, RMFX), rd_rn),
21992 cCE("cfcvt64s",e0004c0, 2, (RMF, RMDX), rd_rn),
21993 cCE("cfcvt64d",e0004e0, 2, (RMD, RMDX), rd_rn),
21994 cCE("cfcvts32",e100580, 2, (RMFX, RMF), rd_rn),
21995 cCE("cfcvtd32",e1005a0, 2, (RMFX, RMD), rd_rn),
21996 cCE("cftruncs32",e1005c0, 2, (RMFX, RMF), rd_rn),
21997 cCE("cftruncd32",e1005e0, 2, (RMFX, RMD), rd_rn),
21998 cCE("cfrshl32",e000550, 3, (RMFX, RMFX, RR), mav_triple),
21999 cCE("cfrshl64",e000570, 3, (RMDX, RMDX, RR), mav_triple),
22000 cCE("cfsh32", e000500, 3, (RMFX, RMFX, I63s), mav_shift),
22001 cCE("cfsh64", e200500, 3, (RMDX, RMDX, I63s), mav_shift),
22002 cCE("cfcmps", e100490, 3, (RR, RMF, RMF), rd_rn_rm),
22003 cCE("cfcmpd", e1004b0, 3, (RR, RMD, RMD), rd_rn_rm),
22004 cCE("cfcmp32", e100590, 3, (RR, RMFX, RMFX), rd_rn_rm),
22005 cCE("cfcmp64", e1005b0, 3, (RR, RMDX, RMDX), rd_rn_rm),
22006 cCE("cfabss", e300400, 2, (RMF, RMF), rd_rn),
22007 cCE("cfabsd", e300420, 2, (RMD, RMD), rd_rn),
22008 cCE("cfnegs", e300440, 2, (RMF, RMF), rd_rn),
22009 cCE("cfnegd", e300460, 2, (RMD, RMD), rd_rn),
22010 cCE("cfadds", e300480, 3, (RMF, RMF, RMF), rd_rn_rm),
22011 cCE("cfaddd", e3004a0, 3, (RMD, RMD, RMD), rd_rn_rm),
22012 cCE("cfsubs", e3004c0, 3, (RMF, RMF, RMF), rd_rn_rm),
22013 cCE("cfsubd", e3004e0, 3, (RMD, RMD, RMD), rd_rn_rm),
22014 cCE("cfmuls", e100400, 3, (RMF, RMF, RMF), rd_rn_rm),
22015 cCE("cfmuld", e100420, 3, (RMD, RMD, RMD), rd_rn_rm),
22016 cCE("cfabs32", e300500, 2, (RMFX, RMFX), rd_rn),
22017 cCE("cfabs64", e300520, 2, (RMDX, RMDX), rd_rn),
22018 cCE("cfneg32", e300540, 2, (RMFX, RMFX), rd_rn),
22019 cCE("cfneg64", e300560, 2, (RMDX, RMDX), rd_rn),
22020 cCE("cfadd32", e300580, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
22021 cCE("cfadd64", e3005a0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
22022 cCE("cfsub32", e3005c0, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
22023 cCE("cfsub64", e3005e0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
22024 cCE("cfmul32", e100500, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
22025 cCE("cfmul64", e100520, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
22026 cCE("cfmac32", e100540, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
22027 cCE("cfmsc32", e100560, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
22028 cCE("cfmadd32",e000600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
22029 cCE("cfmsub32",e100600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
22030 cCE("cfmadda32", e200600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
22031 cCE("cfmsuba32", e300600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
22032
22033 /* ARMv8.5-A instructions. */
22034 #undef ARM_VARIANT
22035 #define ARM_VARIANT & arm_ext_sb
22036 #undef THUMB_VARIANT
22037 #define THUMB_VARIANT & arm_ext_sb
22038 TUF("sb", 57ff070, f3bf8f70, 0, (), noargs, noargs),
22039
22040 #undef ARM_VARIANT
22041 #define ARM_VARIANT & arm_ext_predres
22042 #undef THUMB_VARIANT
22043 #define THUMB_VARIANT & arm_ext_predres
22044 CE("cfprctx", e070f93, 1, (RRnpc), rd),
22045 CE("dvprctx", e070fb3, 1, (RRnpc), rd),
22046 CE("cpprctx", e070ff3, 1, (RRnpc), rd),
22047
22048 /* ARMv8-M instructions. */
22049 #undef ARM_VARIANT
22050 #define ARM_VARIANT NULL
22051 #undef THUMB_VARIANT
22052 #define THUMB_VARIANT & arm_ext_v8m
22053 ToU("sg", e97fe97f, 0, (), noargs),
22054 ToC("blxns", 4784, 1, (RRnpc), t_blx),
22055 ToC("bxns", 4704, 1, (RRnpc), t_bx),
22056 ToC("tt", e840f000, 2, (RRnpc, RRnpc), tt),
22057 ToC("ttt", e840f040, 2, (RRnpc, RRnpc), tt),
22058 ToC("tta", e840f080, 2, (RRnpc, RRnpc), tt),
22059 ToC("ttat", e840f0c0, 2, (RRnpc, RRnpc), tt),
22060
22061 /* FP for ARMv8-M Mainline. Enabled for ARMv8-M Mainline because the
22062 instructions behave as nop if no VFP is present. */
22063 #undef THUMB_VARIANT
22064 #define THUMB_VARIANT & arm_ext_v8m_main
22065 ToC("vlldm", ec300a00, 1, (RRnpc), rn),
22066 ToC("vlstm", ec200a00, 1, (RRnpc), rn),
22067
22068 /* Armv8.1-M Mainline instructions. */
22069 #undef THUMB_VARIANT
22070 #define THUMB_VARIANT & arm_ext_v8_1m_main
22071 toC("bf", _bf, 2, (EXPs, EXPs), t_branch_future),
22072 toU("bfcsel", _bfcsel, 4, (EXPs, EXPs, EXPs, COND), t_branch_future),
22073 toC("bfx", _bfx, 2, (EXPs, RRnpcsp), t_branch_future),
22074 toC("bfl", _bfl, 2, (EXPs, EXPs), t_branch_future),
22075 toC("bflx", _bflx, 2, (EXPs, RRnpcsp), t_branch_future),
22076
22077 toU("dls", _dls, 2, (LR, RRnpcsp), t_loloop),
22078 toU("wls", _wls, 3, (LR, RRnpcsp, EXP), t_loloop),
22079 toU("le", _le, 2, (oLR, EXP), t_loloop),
22080
22081 ToC("clrm", e89f0000, 1, (CLRMLST), t_clrm),
22082 ToC("vscclrm", ec9f0a00, 1, (VRSDVLST), t_vscclrm)
22083 };
22084 #undef ARM_VARIANT
22085 #undef THUMB_VARIANT
22086 #undef TCE
22087 #undef TUE
22088 #undef TUF
22089 #undef TCC
22090 #undef cCE
22091 #undef cCL
22092 #undef C3E
22093 #undef C3
22094 #undef CE
22095 #undef CM
22096 #undef CL
22097 #undef UE
22098 #undef UF
22099 #undef UT
22100 #undef NUF
22101 #undef nUF
22102 #undef NCE
22103 #undef nCE
22104 #undef OPS0
22105 #undef OPS1
22106 #undef OPS2
22107 #undef OPS3
22108 #undef OPS4
22109 #undef OPS5
22110 #undef OPS6
22111 #undef do_0
22112 #undef ToC
22113 #undef toC
22114 #undef ToU
22115 #undef toU
22116 \f
22117 /* MD interface: bits in the object file. */
22118
22119 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
22120 for use in the a.out file, and stores them in the array pointed to by buf.
22121 This knows about the endian-ness of the target machine and does
22122 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
22123 2 (short) and 4 (long) Floating numbers are put out as a series of
22124 LITTLENUMS (shorts, here at least). */
22125
22126 void
22127 md_number_to_chars (char * buf, valueT val, int n)
22128 {
22129 if (target_big_endian)
22130 number_to_chars_bigendian (buf, val, n);
22131 else
22132 number_to_chars_littleendian (buf, val, n);
22133 }
22134
22135 static valueT
22136 md_chars_to_number (char * buf, int n)
22137 {
22138 valueT result = 0;
22139 unsigned char * where = (unsigned char *) buf;
22140
22141 if (target_big_endian)
22142 {
22143 while (n--)
22144 {
22145 result <<= 8;
22146 result |= (*where++ & 255);
22147 }
22148 }
22149 else
22150 {
22151 while (n--)
22152 {
22153 result <<= 8;
22154 result |= (where[n] & 255);
22155 }
22156 }
22157
22158 return result;
22159 }
22160
22161 /* MD interface: Sections. */
22162
22163 /* Calculate the maximum variable size (i.e., excluding fr_fix)
22164 that an rs_machine_dependent frag may reach. */
22165
22166 unsigned int
22167 arm_frag_max_var (fragS *fragp)
22168 {
22169 /* We only use rs_machine_dependent for variable-size Thumb instructions,
22170 which are either THUMB_SIZE (2) or INSN_SIZE (4).
22171
22172 Note that we generate relaxable instructions even for cases that don't
22173 really need it, like an immediate that's a trivial constant. So we're
22174 overestimating the instruction size for some of those cases. Rather
22175 than putting more intelligence here, it would probably be better to
22176 avoid generating a relaxation frag in the first place when it can be
22177 determined up front that a short instruction will suffice. */
22178
22179 gas_assert (fragp->fr_type == rs_machine_dependent);
22180 return INSN_SIZE;
22181 }
22182
22183 /* Estimate the size of a frag before relaxing. Assume everything fits in
22184 2 bytes. */
22185
22186 int
22187 md_estimate_size_before_relax (fragS * fragp,
22188 segT segtype ATTRIBUTE_UNUSED)
22189 {
22190 fragp->fr_var = 2;
22191 return 2;
22192 }
22193
22194 /* Convert a machine dependent frag. */
22195
22196 void
22197 md_convert_frag (bfd *abfd, segT asec ATTRIBUTE_UNUSED, fragS *fragp)
22198 {
22199 unsigned long insn;
22200 unsigned long old_op;
22201 char *buf;
22202 expressionS exp;
22203 fixS *fixp;
22204 int reloc_type;
22205 int pc_rel;
22206 int opcode;
22207
22208 buf = fragp->fr_literal + fragp->fr_fix;
22209
22210 old_op = bfd_get_16(abfd, buf);
22211 if (fragp->fr_symbol)
22212 {
22213 exp.X_op = O_symbol;
22214 exp.X_add_symbol = fragp->fr_symbol;
22215 }
22216 else
22217 {
22218 exp.X_op = O_constant;
22219 }
22220 exp.X_add_number = fragp->fr_offset;
22221 opcode = fragp->fr_subtype;
22222 switch (opcode)
22223 {
22224 case T_MNEM_ldr_pc:
22225 case T_MNEM_ldr_pc2:
22226 case T_MNEM_ldr_sp:
22227 case T_MNEM_str_sp:
22228 case T_MNEM_ldr:
22229 case T_MNEM_ldrb:
22230 case T_MNEM_ldrh:
22231 case T_MNEM_str:
22232 case T_MNEM_strb:
22233 case T_MNEM_strh:
22234 if (fragp->fr_var == 4)
22235 {
22236 insn = THUMB_OP32 (opcode);
22237 if ((old_op >> 12) == 4 || (old_op >> 12) == 9)
22238 {
22239 insn |= (old_op & 0x700) << 4;
22240 }
22241 else
22242 {
22243 insn |= (old_op & 7) << 12;
22244 insn |= (old_op & 0x38) << 13;
22245 }
22246 insn |= 0x00000c00;
22247 put_thumb32_insn (buf, insn);
22248 reloc_type = BFD_RELOC_ARM_T32_OFFSET_IMM;
22249 }
22250 else
22251 {
22252 reloc_type = BFD_RELOC_ARM_THUMB_OFFSET;
22253 }
22254 pc_rel = (opcode == T_MNEM_ldr_pc2);
22255 break;
22256 case T_MNEM_adr:
22257 if (fragp->fr_var == 4)
22258 {
22259 insn = THUMB_OP32 (opcode);
22260 insn |= (old_op & 0xf0) << 4;
22261 put_thumb32_insn (buf, insn);
22262 reloc_type = BFD_RELOC_ARM_T32_ADD_PC12;
22263 }
22264 else
22265 {
22266 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
22267 exp.X_add_number -= 4;
22268 }
22269 pc_rel = 1;
22270 break;
22271 case T_MNEM_mov:
22272 case T_MNEM_movs:
22273 case T_MNEM_cmp:
22274 case T_MNEM_cmn:
22275 if (fragp->fr_var == 4)
22276 {
22277 int r0off = (opcode == T_MNEM_mov
22278 || opcode == T_MNEM_movs) ? 0 : 8;
22279 insn = THUMB_OP32 (opcode);
22280 insn = (insn & 0xe1ffffff) | 0x10000000;
22281 insn |= (old_op & 0x700) << r0off;
22282 put_thumb32_insn (buf, insn);
22283 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
22284 }
22285 else
22286 {
22287 reloc_type = BFD_RELOC_ARM_THUMB_IMM;
22288 }
22289 pc_rel = 0;
22290 break;
22291 case T_MNEM_b:
22292 if (fragp->fr_var == 4)
22293 {
22294 insn = THUMB_OP32(opcode);
22295 put_thumb32_insn (buf, insn);
22296 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH25;
22297 }
22298 else
22299 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH12;
22300 pc_rel = 1;
22301 break;
22302 case T_MNEM_bcond:
22303 if (fragp->fr_var == 4)
22304 {
22305 insn = THUMB_OP32(opcode);
22306 insn |= (old_op & 0xf00) << 14;
22307 put_thumb32_insn (buf, insn);
22308 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH20;
22309 }
22310 else
22311 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH9;
22312 pc_rel = 1;
22313 break;
22314 case T_MNEM_add_sp:
22315 case T_MNEM_add_pc:
22316 case T_MNEM_inc_sp:
22317 case T_MNEM_dec_sp:
22318 if (fragp->fr_var == 4)
22319 {
22320 /* ??? Choose between add and addw. */
22321 insn = THUMB_OP32 (opcode);
22322 insn |= (old_op & 0xf0) << 4;
22323 put_thumb32_insn (buf, insn);
22324 if (opcode == T_MNEM_add_pc)
22325 reloc_type = BFD_RELOC_ARM_T32_IMM12;
22326 else
22327 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
22328 }
22329 else
22330 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
22331 pc_rel = 0;
22332 break;
22333
22334 case T_MNEM_addi:
22335 case T_MNEM_addis:
22336 case T_MNEM_subi:
22337 case T_MNEM_subis:
22338 if (fragp->fr_var == 4)
22339 {
22340 insn = THUMB_OP32 (opcode);
22341 insn |= (old_op & 0xf0) << 4;
22342 insn |= (old_op & 0xf) << 16;
22343 put_thumb32_insn (buf, insn);
22344 if (insn & (1 << 20))
22345 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
22346 else
22347 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
22348 }
22349 else
22350 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
22351 pc_rel = 0;
22352 break;
22353 default:
22354 abort ();
22355 }
22356 fixp = fix_new_exp (fragp, fragp->fr_fix, fragp->fr_var, &exp, pc_rel,
22357 (enum bfd_reloc_code_real) reloc_type);
22358 fixp->fx_file = fragp->fr_file;
22359 fixp->fx_line = fragp->fr_line;
22360 fragp->fr_fix += fragp->fr_var;
22361
22362 /* Set whether we use thumb-2 ISA based on final relaxation results. */
22363 if (thumb_mode && fragp->fr_var == 4 && no_cpu_selected ()
22364 && !ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_t2))
22365 ARM_MERGE_FEATURE_SETS (arm_arch_used, thumb_arch_used, arm_ext_v6t2);
22366 }
22367
22368 /* Return the size of a relaxable immediate operand instruction.
22369 SHIFT and SIZE specify the form of the allowable immediate. */
22370 static int
22371 relax_immediate (fragS *fragp, int size, int shift)
22372 {
22373 offsetT offset;
22374 offsetT mask;
22375 offsetT low;
22376
22377 /* ??? Should be able to do better than this. */
22378 if (fragp->fr_symbol)
22379 return 4;
22380
22381 low = (1 << shift) - 1;
22382 mask = (1 << (shift + size)) - (1 << shift);
22383 offset = fragp->fr_offset;
22384 /* Force misaligned offsets to 32-bit variant. */
22385 if (offset & low)
22386 return 4;
22387 if (offset & ~mask)
22388 return 4;
22389 return 2;
22390 }
22391
22392 /* Get the address of a symbol during relaxation. */
22393 static addressT
22394 relaxed_symbol_addr (fragS *fragp, long stretch)
22395 {
22396 fragS *sym_frag;
22397 addressT addr;
22398 symbolS *sym;
22399
22400 sym = fragp->fr_symbol;
22401 sym_frag = symbol_get_frag (sym);
22402 know (S_GET_SEGMENT (sym) != absolute_section
22403 || sym_frag == &zero_address_frag);
22404 addr = S_GET_VALUE (sym) + fragp->fr_offset;
22405
22406 /* If frag has yet to be reached on this pass, assume it will
22407 move by STRETCH just as we did. If this is not so, it will
22408 be because some frag between grows, and that will force
22409 another pass. */
22410
22411 if (stretch != 0
22412 && sym_frag->relax_marker != fragp->relax_marker)
22413 {
22414 fragS *f;
22415
22416 /* Adjust stretch for any alignment frag. Note that if have
22417 been expanding the earlier code, the symbol may be
22418 defined in what appears to be an earlier frag. FIXME:
22419 This doesn't handle the fr_subtype field, which specifies
22420 a maximum number of bytes to skip when doing an
22421 alignment. */
22422 for (f = fragp; f != NULL && f != sym_frag; f = f->fr_next)
22423 {
22424 if (f->fr_type == rs_align || f->fr_type == rs_align_code)
22425 {
22426 if (stretch < 0)
22427 stretch = - ((- stretch)
22428 & ~ ((1 << (int) f->fr_offset) - 1));
22429 else
22430 stretch &= ~ ((1 << (int) f->fr_offset) - 1);
22431 if (stretch == 0)
22432 break;
22433 }
22434 }
22435 if (f != NULL)
22436 addr += stretch;
22437 }
22438
22439 return addr;
22440 }
22441
22442 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
22443 load. */
22444 static int
22445 relax_adr (fragS *fragp, asection *sec, long stretch)
22446 {
22447 addressT addr;
22448 offsetT val;
22449
22450 /* Assume worst case for symbols not known to be in the same section. */
22451 if (fragp->fr_symbol == NULL
22452 || !S_IS_DEFINED (fragp->fr_symbol)
22453 || sec != S_GET_SEGMENT (fragp->fr_symbol)
22454 || S_IS_WEAK (fragp->fr_symbol))
22455 return 4;
22456
22457 val = relaxed_symbol_addr (fragp, stretch);
22458 addr = fragp->fr_address + fragp->fr_fix;
22459 addr = (addr + 4) & ~3;
22460 /* Force misaligned targets to 32-bit variant. */
22461 if (val & 3)
22462 return 4;
22463 val -= addr;
22464 if (val < 0 || val > 1020)
22465 return 4;
22466 return 2;
22467 }
22468
22469 /* Return the size of a relaxable add/sub immediate instruction. */
22470 static int
22471 relax_addsub (fragS *fragp, asection *sec)
22472 {
22473 char *buf;
22474 int op;
22475
22476 buf = fragp->fr_literal + fragp->fr_fix;
22477 op = bfd_get_16(sec->owner, buf);
22478 if ((op & 0xf) == ((op >> 4) & 0xf))
22479 return relax_immediate (fragp, 8, 0);
22480 else
22481 return relax_immediate (fragp, 3, 0);
22482 }
22483
22484 /* Return TRUE iff the definition of symbol S could be pre-empted
22485 (overridden) at link or load time. */
22486 static bfd_boolean
22487 symbol_preemptible (symbolS *s)
22488 {
22489 /* Weak symbols can always be pre-empted. */
22490 if (S_IS_WEAK (s))
22491 return TRUE;
22492
22493 /* Non-global symbols cannot be pre-empted. */
22494 if (! S_IS_EXTERNAL (s))
22495 return FALSE;
22496
22497 #ifdef OBJ_ELF
22498 /* In ELF, a global symbol can be marked protected, or private. In that
22499 case it can't be pre-empted (other definitions in the same link unit
22500 would violate the ODR). */
22501 if (ELF_ST_VISIBILITY (S_GET_OTHER (s)) > STV_DEFAULT)
22502 return FALSE;
22503 #endif
22504
22505 /* Other global symbols might be pre-empted. */
22506 return TRUE;
22507 }
22508
22509 /* Return the size of a relaxable branch instruction. BITS is the
22510 size of the offset field in the narrow instruction. */
22511
22512 static int
22513 relax_branch (fragS *fragp, asection *sec, int bits, long stretch)
22514 {
22515 addressT addr;
22516 offsetT val;
22517 offsetT limit;
22518
22519 /* Assume worst case for symbols not known to be in the same section. */
22520 if (!S_IS_DEFINED (fragp->fr_symbol)
22521 || sec != S_GET_SEGMENT (fragp->fr_symbol)
22522 || S_IS_WEAK (fragp->fr_symbol))
22523 return 4;
22524
22525 #ifdef OBJ_ELF
22526 /* A branch to a function in ARM state will require interworking. */
22527 if (S_IS_DEFINED (fragp->fr_symbol)
22528 && ARM_IS_FUNC (fragp->fr_symbol))
22529 return 4;
22530 #endif
22531
22532 if (symbol_preemptible (fragp->fr_symbol))
22533 return 4;
22534
22535 val = relaxed_symbol_addr (fragp, stretch);
22536 addr = fragp->fr_address + fragp->fr_fix + 4;
22537 val -= addr;
22538
22539 /* Offset is a signed value *2 */
22540 limit = 1 << bits;
22541 if (val >= limit || val < -limit)
22542 return 4;
22543 return 2;
22544 }
22545
22546
22547 /* Relax a machine dependent frag. This returns the amount by which
22548 the current size of the frag should change. */
22549
22550 int
22551 arm_relax_frag (asection *sec, fragS *fragp, long stretch)
22552 {
22553 int oldsize;
22554 int newsize;
22555
22556 oldsize = fragp->fr_var;
22557 switch (fragp->fr_subtype)
22558 {
22559 case T_MNEM_ldr_pc2:
22560 newsize = relax_adr (fragp, sec, stretch);
22561 break;
22562 case T_MNEM_ldr_pc:
22563 case T_MNEM_ldr_sp:
22564 case T_MNEM_str_sp:
22565 newsize = relax_immediate (fragp, 8, 2);
22566 break;
22567 case T_MNEM_ldr:
22568 case T_MNEM_str:
22569 newsize = relax_immediate (fragp, 5, 2);
22570 break;
22571 case T_MNEM_ldrh:
22572 case T_MNEM_strh:
22573 newsize = relax_immediate (fragp, 5, 1);
22574 break;
22575 case T_MNEM_ldrb:
22576 case T_MNEM_strb:
22577 newsize = relax_immediate (fragp, 5, 0);
22578 break;
22579 case T_MNEM_adr:
22580 newsize = relax_adr (fragp, sec, stretch);
22581 break;
22582 case T_MNEM_mov:
22583 case T_MNEM_movs:
22584 case T_MNEM_cmp:
22585 case T_MNEM_cmn:
22586 newsize = relax_immediate (fragp, 8, 0);
22587 break;
22588 case T_MNEM_b:
22589 newsize = relax_branch (fragp, sec, 11, stretch);
22590 break;
22591 case T_MNEM_bcond:
22592 newsize = relax_branch (fragp, sec, 8, stretch);
22593 break;
22594 case T_MNEM_add_sp:
22595 case T_MNEM_add_pc:
22596 newsize = relax_immediate (fragp, 8, 2);
22597 break;
22598 case T_MNEM_inc_sp:
22599 case T_MNEM_dec_sp:
22600 newsize = relax_immediate (fragp, 7, 2);
22601 break;
22602 case T_MNEM_addi:
22603 case T_MNEM_addis:
22604 case T_MNEM_subi:
22605 case T_MNEM_subis:
22606 newsize = relax_addsub (fragp, sec);
22607 break;
22608 default:
22609 abort ();
22610 }
22611
22612 fragp->fr_var = newsize;
22613 /* Freeze wide instructions that are at or before the same location as
22614 in the previous pass. This avoids infinite loops.
22615 Don't freeze them unconditionally because targets may be artificially
22616 misaligned by the expansion of preceding frags. */
22617 if (stretch <= 0 && newsize > 2)
22618 {
22619 md_convert_frag (sec->owner, sec, fragp);
22620 frag_wane (fragp);
22621 }
22622
22623 return newsize - oldsize;
22624 }
22625
22626 /* Round up a section size to the appropriate boundary. */
22627
22628 valueT
22629 md_section_align (segT segment ATTRIBUTE_UNUSED,
22630 valueT size)
22631 {
22632 return size;
22633 }
22634
22635 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
22636 of an rs_align_code fragment. */
22637
22638 void
22639 arm_handle_align (fragS * fragP)
22640 {
22641 static unsigned char const arm_noop[2][2][4] =
22642 {
22643 { /* ARMv1 */
22644 {0x00, 0x00, 0xa0, 0xe1}, /* LE */
22645 {0xe1, 0xa0, 0x00, 0x00}, /* BE */
22646 },
22647 { /* ARMv6k */
22648 {0x00, 0xf0, 0x20, 0xe3}, /* LE */
22649 {0xe3, 0x20, 0xf0, 0x00}, /* BE */
22650 },
22651 };
22652 static unsigned char const thumb_noop[2][2][2] =
22653 {
22654 { /* Thumb-1 */
22655 {0xc0, 0x46}, /* LE */
22656 {0x46, 0xc0}, /* BE */
22657 },
22658 { /* Thumb-2 */
22659 {0x00, 0xbf}, /* LE */
22660 {0xbf, 0x00} /* BE */
22661 }
22662 };
22663 static unsigned char const wide_thumb_noop[2][4] =
22664 { /* Wide Thumb-2 */
22665 {0xaf, 0xf3, 0x00, 0x80}, /* LE */
22666 {0xf3, 0xaf, 0x80, 0x00}, /* BE */
22667 };
22668
22669 unsigned bytes, fix, noop_size;
22670 char * p;
22671 const unsigned char * noop;
22672 const unsigned char *narrow_noop = NULL;
22673 #ifdef OBJ_ELF
22674 enum mstate state;
22675 #endif
22676
22677 if (fragP->fr_type != rs_align_code)
22678 return;
22679
22680 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
22681 p = fragP->fr_literal + fragP->fr_fix;
22682 fix = 0;
22683
22684 if (bytes > MAX_MEM_FOR_RS_ALIGN_CODE)
22685 bytes &= MAX_MEM_FOR_RS_ALIGN_CODE;
22686
22687 gas_assert ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) != 0);
22688
22689 if (fragP->tc_frag_data.thumb_mode & (~ MODE_RECORDED))
22690 {
22691 if (ARM_CPU_HAS_FEATURE (selected_cpu_name[0]
22692 ? selected_cpu : arm_arch_none, arm_ext_v6t2))
22693 {
22694 narrow_noop = thumb_noop[1][target_big_endian];
22695 noop = wide_thumb_noop[target_big_endian];
22696 }
22697 else
22698 noop = thumb_noop[0][target_big_endian];
22699 noop_size = 2;
22700 #ifdef OBJ_ELF
22701 state = MAP_THUMB;
22702 #endif
22703 }
22704 else
22705 {
22706 noop = arm_noop[ARM_CPU_HAS_FEATURE (selected_cpu_name[0]
22707 ? selected_cpu : arm_arch_none,
22708 arm_ext_v6k) != 0]
22709 [target_big_endian];
22710 noop_size = 4;
22711 #ifdef OBJ_ELF
22712 state = MAP_ARM;
22713 #endif
22714 }
22715
22716 fragP->fr_var = noop_size;
22717
22718 if (bytes & (noop_size - 1))
22719 {
22720 fix = bytes & (noop_size - 1);
22721 #ifdef OBJ_ELF
22722 insert_data_mapping_symbol (state, fragP->fr_fix, fragP, fix);
22723 #endif
22724 memset (p, 0, fix);
22725 p += fix;
22726 bytes -= fix;
22727 }
22728
22729 if (narrow_noop)
22730 {
22731 if (bytes & noop_size)
22732 {
22733 /* Insert a narrow noop. */
22734 memcpy (p, narrow_noop, noop_size);
22735 p += noop_size;
22736 bytes -= noop_size;
22737 fix += noop_size;
22738 }
22739
22740 /* Use wide noops for the remainder */
22741 noop_size = 4;
22742 }
22743
22744 while (bytes >= noop_size)
22745 {
22746 memcpy (p, noop, noop_size);
22747 p += noop_size;
22748 bytes -= noop_size;
22749 fix += noop_size;
22750 }
22751
22752 fragP->fr_fix += fix;
22753 }
22754
22755 /* Called from md_do_align. Used to create an alignment
22756 frag in a code section. */
22757
22758 void
22759 arm_frag_align_code (int n, int max)
22760 {
22761 char * p;
22762
22763 /* We assume that there will never be a requirement
22764 to support alignments greater than MAX_MEM_FOR_RS_ALIGN_CODE bytes. */
22765 if (max > MAX_MEM_FOR_RS_ALIGN_CODE)
22766 {
22767 char err_msg[128];
22768
22769 sprintf (err_msg,
22770 _("alignments greater than %d bytes not supported in .text sections."),
22771 MAX_MEM_FOR_RS_ALIGN_CODE + 1);
22772 as_fatal ("%s", err_msg);
22773 }
22774
22775 p = frag_var (rs_align_code,
22776 MAX_MEM_FOR_RS_ALIGN_CODE,
22777 1,
22778 (relax_substateT) max,
22779 (symbolS *) NULL,
22780 (offsetT) n,
22781 (char *) NULL);
22782 *p = 0;
22783 }
22784
22785 /* Perform target specific initialisation of a frag.
22786 Note - despite the name this initialisation is not done when the frag
22787 is created, but only when its type is assigned. A frag can be created
22788 and used a long time before its type is set, so beware of assuming that
22789 this initialisation is performed first. */
22790
22791 #ifndef OBJ_ELF
22792 void
22793 arm_init_frag (fragS * fragP, int max_chars ATTRIBUTE_UNUSED)
22794 {
22795 /* Record whether this frag is in an ARM or a THUMB area. */
22796 fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
22797 }
22798
22799 #else /* OBJ_ELF is defined. */
22800 void
22801 arm_init_frag (fragS * fragP, int max_chars)
22802 {
22803 bfd_boolean frag_thumb_mode;
22804
22805 /* If the current ARM vs THUMB mode has not already
22806 been recorded into this frag then do so now. */
22807 if ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) == 0)
22808 fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
22809
22810 /* PR 21809: Do not set a mapping state for debug sections
22811 - it just confuses other tools. */
22812 if (bfd_get_section_flags (NULL, now_seg) & SEC_DEBUGGING)
22813 return;
22814
22815 frag_thumb_mode = fragP->tc_frag_data.thumb_mode ^ MODE_RECORDED;
22816
22817 /* Record a mapping symbol for alignment frags. We will delete this
22818 later if the alignment ends up empty. */
22819 switch (fragP->fr_type)
22820 {
22821 case rs_align:
22822 case rs_align_test:
22823 case rs_fill:
22824 mapping_state_2 (MAP_DATA, max_chars);
22825 break;
22826 case rs_align_code:
22827 mapping_state_2 (frag_thumb_mode ? MAP_THUMB : MAP_ARM, max_chars);
22828 break;
22829 default:
22830 break;
22831 }
22832 }
22833
22834 /* When we change sections we need to issue a new mapping symbol. */
22835
22836 void
22837 arm_elf_change_section (void)
22838 {
22839 /* Link an unlinked unwind index table section to the .text section. */
22840 if (elf_section_type (now_seg) == SHT_ARM_EXIDX
22841 && elf_linked_to_section (now_seg) == NULL)
22842 elf_linked_to_section (now_seg) = text_section;
22843 }
22844
22845 int
22846 arm_elf_section_type (const char * str, size_t len)
22847 {
22848 if (len == 5 && strncmp (str, "exidx", 5) == 0)
22849 return SHT_ARM_EXIDX;
22850
22851 return -1;
22852 }
22853 \f
22854 /* Code to deal with unwinding tables. */
22855
22856 static void add_unwind_adjustsp (offsetT);
22857
22858 /* Generate any deferred unwind frame offset. */
22859
22860 static void
22861 flush_pending_unwind (void)
22862 {
22863 offsetT offset;
22864
22865 offset = unwind.pending_offset;
22866 unwind.pending_offset = 0;
22867 if (offset != 0)
22868 add_unwind_adjustsp (offset);
22869 }
22870
22871 /* Add an opcode to this list for this function. Two-byte opcodes should
22872 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse
22873 order. */
22874
22875 static void
22876 add_unwind_opcode (valueT op, int length)
22877 {
22878 /* Add any deferred stack adjustment. */
22879 if (unwind.pending_offset)
22880 flush_pending_unwind ();
22881
22882 unwind.sp_restored = 0;
22883
22884 if (unwind.opcode_count + length > unwind.opcode_alloc)
22885 {
22886 unwind.opcode_alloc += ARM_OPCODE_CHUNK_SIZE;
22887 if (unwind.opcodes)
22888 unwind.opcodes = XRESIZEVEC (unsigned char, unwind.opcodes,
22889 unwind.opcode_alloc);
22890 else
22891 unwind.opcodes = XNEWVEC (unsigned char, unwind.opcode_alloc);
22892 }
22893 while (length > 0)
22894 {
22895 length--;
22896 unwind.opcodes[unwind.opcode_count] = op & 0xff;
22897 op >>= 8;
22898 unwind.opcode_count++;
22899 }
22900 }
22901
22902 /* Add unwind opcodes to adjust the stack pointer. */
22903
22904 static void
22905 add_unwind_adjustsp (offsetT offset)
22906 {
22907 valueT op;
22908
22909 if (offset > 0x200)
22910 {
22911 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */
22912 char bytes[5];
22913 int n;
22914 valueT o;
22915
22916 /* Long form: 0xb2, uleb128. */
22917 /* This might not fit in a word so add the individual bytes,
22918 remembering the list is built in reverse order. */
22919 o = (valueT) ((offset - 0x204) >> 2);
22920 if (o == 0)
22921 add_unwind_opcode (0, 1);
22922
22923 /* Calculate the uleb128 encoding of the offset. */
22924 n = 0;
22925 while (o)
22926 {
22927 bytes[n] = o & 0x7f;
22928 o >>= 7;
22929 if (o)
22930 bytes[n] |= 0x80;
22931 n++;
22932 }
22933 /* Add the insn. */
22934 for (; n; n--)
22935 add_unwind_opcode (bytes[n - 1], 1);
22936 add_unwind_opcode (0xb2, 1);
22937 }
22938 else if (offset > 0x100)
22939 {
22940 /* Two short opcodes. */
22941 add_unwind_opcode (0x3f, 1);
22942 op = (offset - 0x104) >> 2;
22943 add_unwind_opcode (op, 1);
22944 }
22945 else if (offset > 0)
22946 {
22947 /* Short opcode. */
22948 op = (offset - 4) >> 2;
22949 add_unwind_opcode (op, 1);
22950 }
22951 else if (offset < 0)
22952 {
22953 offset = -offset;
22954 while (offset > 0x100)
22955 {
22956 add_unwind_opcode (0x7f, 1);
22957 offset -= 0x100;
22958 }
22959 op = ((offset - 4) >> 2) | 0x40;
22960 add_unwind_opcode (op, 1);
22961 }
22962 }
22963
22964 /* Finish the list of unwind opcodes for this function. */
22965
22966 static void
22967 finish_unwind_opcodes (void)
22968 {
22969 valueT op;
22970
22971 if (unwind.fp_used)
22972 {
22973 /* Adjust sp as necessary. */
22974 unwind.pending_offset += unwind.fp_offset - unwind.frame_size;
22975 flush_pending_unwind ();
22976
22977 /* After restoring sp from the frame pointer. */
22978 op = 0x90 | unwind.fp_reg;
22979 add_unwind_opcode (op, 1);
22980 }
22981 else
22982 flush_pending_unwind ();
22983 }
22984
22985
22986 /* Start an exception table entry. If idx is nonzero this is an index table
22987 entry. */
22988
22989 static void
22990 start_unwind_section (const segT text_seg, int idx)
22991 {
22992 const char * text_name;
22993 const char * prefix;
22994 const char * prefix_once;
22995 const char * group_name;
22996 char * sec_name;
22997 int type;
22998 int flags;
22999 int linkonce;
23000
23001 if (idx)
23002 {
23003 prefix = ELF_STRING_ARM_unwind;
23004 prefix_once = ELF_STRING_ARM_unwind_once;
23005 type = SHT_ARM_EXIDX;
23006 }
23007 else
23008 {
23009 prefix = ELF_STRING_ARM_unwind_info;
23010 prefix_once = ELF_STRING_ARM_unwind_info_once;
23011 type = SHT_PROGBITS;
23012 }
23013
23014 text_name = segment_name (text_seg);
23015 if (streq (text_name, ".text"))
23016 text_name = "";
23017
23018 if (strncmp (text_name, ".gnu.linkonce.t.",
23019 strlen (".gnu.linkonce.t.")) == 0)
23020 {
23021 prefix = prefix_once;
23022 text_name += strlen (".gnu.linkonce.t.");
23023 }
23024
23025 sec_name = concat (prefix, text_name, (char *) NULL);
23026
23027 flags = SHF_ALLOC;
23028 linkonce = 0;
23029 group_name = 0;
23030
23031 /* Handle COMDAT group. */
23032 if (prefix != prefix_once && (text_seg->flags & SEC_LINK_ONCE) != 0)
23033 {
23034 group_name = elf_group_name (text_seg);
23035 if (group_name == NULL)
23036 {
23037 as_bad (_("Group section `%s' has no group signature"),
23038 segment_name (text_seg));
23039 ignore_rest_of_line ();
23040 return;
23041 }
23042 flags |= SHF_GROUP;
23043 linkonce = 1;
23044 }
23045
23046 obj_elf_change_section (sec_name, type, 0, flags, 0, group_name,
23047 linkonce, 0);
23048
23049 /* Set the section link for index tables. */
23050 if (idx)
23051 elf_linked_to_section (now_seg) = text_seg;
23052 }
23053
23054
23055 /* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
23056 personality routine data. Returns zero, or the index table value for
23057 an inline entry. */
23058
23059 static valueT
23060 create_unwind_entry (int have_data)
23061 {
23062 int size;
23063 addressT where;
23064 char *ptr;
23065 /* The current word of data. */
23066 valueT data;
23067 /* The number of bytes left in this word. */
23068 int n;
23069
23070 finish_unwind_opcodes ();
23071
23072 /* Remember the current text section. */
23073 unwind.saved_seg = now_seg;
23074 unwind.saved_subseg = now_subseg;
23075
23076 start_unwind_section (now_seg, 0);
23077
23078 if (unwind.personality_routine == NULL)
23079 {
23080 if (unwind.personality_index == -2)
23081 {
23082 if (have_data)
23083 as_bad (_("handlerdata in cantunwind frame"));
23084 return 1; /* EXIDX_CANTUNWIND. */
23085 }
23086
23087 /* Use a default personality routine if none is specified. */
23088 if (unwind.personality_index == -1)
23089 {
23090 if (unwind.opcode_count > 3)
23091 unwind.personality_index = 1;
23092 else
23093 unwind.personality_index = 0;
23094 }
23095
23096 /* Space for the personality routine entry. */
23097 if (unwind.personality_index == 0)
23098 {
23099 if (unwind.opcode_count > 3)
23100 as_bad (_("too many unwind opcodes for personality routine 0"));
23101
23102 if (!have_data)
23103 {
23104 /* All the data is inline in the index table. */
23105 data = 0x80;
23106 n = 3;
23107 while (unwind.opcode_count > 0)
23108 {
23109 unwind.opcode_count--;
23110 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
23111 n--;
23112 }
23113
23114 /* Pad with "finish" opcodes. */
23115 while (n--)
23116 data = (data << 8) | 0xb0;
23117
23118 return data;
23119 }
23120 size = 0;
23121 }
23122 else
23123 /* We get two opcodes "free" in the first word. */
23124 size = unwind.opcode_count - 2;
23125 }
23126 else
23127 {
23128 /* PR 16765: Missing or misplaced unwind directives can trigger this. */
23129 if (unwind.personality_index != -1)
23130 {
23131 as_bad (_("attempt to recreate an unwind entry"));
23132 return 1;
23133 }
23134
23135 /* An extra byte is required for the opcode count. */
23136 size = unwind.opcode_count + 1;
23137 }
23138
23139 size = (size + 3) >> 2;
23140 if (size > 0xff)
23141 as_bad (_("too many unwind opcodes"));
23142
23143 frag_align (2, 0, 0);
23144 record_alignment (now_seg, 2);
23145 unwind.table_entry = expr_build_dot ();
23146
23147 /* Allocate the table entry. */
23148 ptr = frag_more ((size << 2) + 4);
23149 /* PR 13449: Zero the table entries in case some of them are not used. */
23150 memset (ptr, 0, (size << 2) + 4);
23151 where = frag_now_fix () - ((size << 2) + 4);
23152
23153 switch (unwind.personality_index)
23154 {
23155 case -1:
23156 /* ??? Should this be a PLT generating relocation? */
23157 /* Custom personality routine. */
23158 fix_new (frag_now, where, 4, unwind.personality_routine, 0, 1,
23159 BFD_RELOC_ARM_PREL31);
23160
23161 where += 4;
23162 ptr += 4;
23163
23164 /* Set the first byte to the number of additional words. */
23165 data = size > 0 ? size - 1 : 0;
23166 n = 3;
23167 break;
23168
23169 /* ABI defined personality routines. */
23170 case 0:
23171 /* Three opcodes bytes are packed into the first word. */
23172 data = 0x80;
23173 n = 3;
23174 break;
23175
23176 case 1:
23177 case 2:
23178 /* The size and first two opcode bytes go in the first word. */
23179 data = ((0x80 + unwind.personality_index) << 8) | size;
23180 n = 2;
23181 break;
23182
23183 default:
23184 /* Should never happen. */
23185 abort ();
23186 }
23187
23188 /* Pack the opcodes into words (MSB first), reversing the list at the same
23189 time. */
23190 while (unwind.opcode_count > 0)
23191 {
23192 if (n == 0)
23193 {
23194 md_number_to_chars (ptr, data, 4);
23195 ptr += 4;
23196 n = 4;
23197 data = 0;
23198 }
23199 unwind.opcode_count--;
23200 n--;
23201 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
23202 }
23203
23204 /* Finish off the last word. */
23205 if (n < 4)
23206 {
23207 /* Pad with "finish" opcodes. */
23208 while (n--)
23209 data = (data << 8) | 0xb0;
23210
23211 md_number_to_chars (ptr, data, 4);
23212 }
23213
23214 if (!have_data)
23215 {
23216 /* Add an empty descriptor if there is no user-specified data. */
23217 ptr = frag_more (4);
23218 md_number_to_chars (ptr, 0, 4);
23219 }
23220
23221 return 0;
23222 }
23223
23224
23225 /* Initialize the DWARF-2 unwind information for this procedure. */
23226
23227 void
23228 tc_arm_frame_initial_instructions (void)
23229 {
23230 cfi_add_CFA_def_cfa (REG_SP, 0);
23231 }
23232 #endif /* OBJ_ELF */
23233
23234 /* Convert REGNAME to a DWARF-2 register number. */
23235
23236 int
23237 tc_arm_regname_to_dw2regnum (char *regname)
23238 {
23239 int reg = arm_reg_parse (&regname, REG_TYPE_RN);
23240 if (reg != FAIL)
23241 return reg;
23242
23243 /* PR 16694: Allow VFP registers as well. */
23244 reg = arm_reg_parse (&regname, REG_TYPE_VFS);
23245 if (reg != FAIL)
23246 return 64 + reg;
23247
23248 reg = arm_reg_parse (&regname, REG_TYPE_VFD);
23249 if (reg != FAIL)
23250 return reg + 256;
23251
23252 return FAIL;
23253 }
23254
23255 #ifdef TE_PE
23256 void
23257 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
23258 {
23259 expressionS exp;
23260
23261 exp.X_op = O_secrel;
23262 exp.X_add_symbol = symbol;
23263 exp.X_add_number = 0;
23264 emit_expr (&exp, size);
23265 }
23266 #endif
23267
23268 /* MD interface: Symbol and relocation handling. */
23269
23270 /* Return the address within the segment that a PC-relative fixup is
23271 relative to. For ARM, PC-relative fixups applied to instructions
23272 are generally relative to the location of the fixup plus 8 bytes.
23273 Thumb branches are offset by 4, and Thumb loads relative to PC
23274 require special handling. */
23275
23276 long
23277 md_pcrel_from_section (fixS * fixP, segT seg)
23278 {
23279 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
23280
23281 /* If this is pc-relative and we are going to emit a relocation
23282 then we just want to put out any pipeline compensation that the linker
23283 will need. Otherwise we want to use the calculated base.
23284 For WinCE we skip the bias for externals as well, since this
23285 is how the MS ARM-CE assembler behaves and we want to be compatible. */
23286 if (fixP->fx_pcrel
23287 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
23288 || (arm_force_relocation (fixP)
23289 #ifdef TE_WINCE
23290 && !S_IS_EXTERNAL (fixP->fx_addsy)
23291 #endif
23292 )))
23293 base = 0;
23294
23295
23296 switch (fixP->fx_r_type)
23297 {
23298 /* PC relative addressing on the Thumb is slightly odd as the
23299 bottom two bits of the PC are forced to zero for the
23300 calculation. This happens *after* application of the
23301 pipeline offset. However, Thumb adrl already adjusts for
23302 this, so we need not do it again. */
23303 case BFD_RELOC_ARM_THUMB_ADD:
23304 return base & ~3;
23305
23306 case BFD_RELOC_ARM_THUMB_OFFSET:
23307 case BFD_RELOC_ARM_T32_OFFSET_IMM:
23308 case BFD_RELOC_ARM_T32_ADD_PC12:
23309 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
23310 return (base + 4) & ~3;
23311
23312 /* Thumb branches are simply offset by +4. */
23313 case BFD_RELOC_THUMB_PCREL_BRANCH5:
23314 case BFD_RELOC_THUMB_PCREL_BRANCH7:
23315 case BFD_RELOC_THUMB_PCREL_BRANCH9:
23316 case BFD_RELOC_THUMB_PCREL_BRANCH12:
23317 case BFD_RELOC_THUMB_PCREL_BRANCH20:
23318 case BFD_RELOC_THUMB_PCREL_BRANCH25:
23319 case BFD_RELOC_THUMB_PCREL_BFCSEL:
23320 case BFD_RELOC_ARM_THUMB_BF17:
23321 case BFD_RELOC_ARM_THUMB_BF19:
23322 case BFD_RELOC_ARM_THUMB_BF13:
23323 case BFD_RELOC_ARM_THUMB_LOOP12:
23324 return base + 4;
23325
23326 case BFD_RELOC_THUMB_PCREL_BRANCH23:
23327 if (fixP->fx_addsy
23328 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23329 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
23330 && ARM_IS_FUNC (fixP->fx_addsy)
23331 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
23332 base = fixP->fx_where + fixP->fx_frag->fr_address;
23333 return base + 4;
23334
23335 /* BLX is like branches above, but forces the low two bits of PC to
23336 zero. */
23337 case BFD_RELOC_THUMB_PCREL_BLX:
23338 if (fixP->fx_addsy
23339 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23340 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
23341 && THUMB_IS_FUNC (fixP->fx_addsy)
23342 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
23343 base = fixP->fx_where + fixP->fx_frag->fr_address;
23344 return (base + 4) & ~3;
23345
23346 /* ARM mode branches are offset by +8. However, the Windows CE
23347 loader expects the relocation not to take this into account. */
23348 case BFD_RELOC_ARM_PCREL_BLX:
23349 if (fixP->fx_addsy
23350 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23351 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
23352 && ARM_IS_FUNC (fixP->fx_addsy)
23353 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
23354 base = fixP->fx_where + fixP->fx_frag->fr_address;
23355 return base + 8;
23356
23357 case BFD_RELOC_ARM_PCREL_CALL:
23358 if (fixP->fx_addsy
23359 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23360 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
23361 && THUMB_IS_FUNC (fixP->fx_addsy)
23362 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
23363 base = fixP->fx_where + fixP->fx_frag->fr_address;
23364 return base + 8;
23365
23366 case BFD_RELOC_ARM_PCREL_BRANCH:
23367 case BFD_RELOC_ARM_PCREL_JUMP:
23368 case BFD_RELOC_ARM_PLT32:
23369 #ifdef TE_WINCE
23370 /* When handling fixups immediately, because we have already
23371 discovered the value of a symbol, or the address of the frag involved
23372 we must account for the offset by +8, as the OS loader will never see the reloc.
23373 see fixup_segment() in write.c
23374 The S_IS_EXTERNAL test handles the case of global symbols.
23375 Those need the calculated base, not just the pipe compensation the linker will need. */
23376 if (fixP->fx_pcrel
23377 && fixP->fx_addsy != NULL
23378 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23379 && (S_IS_EXTERNAL (fixP->fx_addsy) || !arm_force_relocation (fixP)))
23380 return base + 8;
23381 return base;
23382 #else
23383 return base + 8;
23384 #endif
23385
23386
23387 /* ARM mode loads relative to PC are also offset by +8. Unlike
23388 branches, the Windows CE loader *does* expect the relocation
23389 to take this into account. */
23390 case BFD_RELOC_ARM_OFFSET_IMM:
23391 case BFD_RELOC_ARM_OFFSET_IMM8:
23392 case BFD_RELOC_ARM_HWLITERAL:
23393 case BFD_RELOC_ARM_LITERAL:
23394 case BFD_RELOC_ARM_CP_OFF_IMM:
23395 return base + 8;
23396
23397
23398 /* Other PC-relative relocations are un-offset. */
23399 default:
23400 return base;
23401 }
23402 }
23403
23404 static bfd_boolean flag_warn_syms = TRUE;
23405
23406 bfd_boolean
23407 arm_tc_equal_in_insn (int c ATTRIBUTE_UNUSED, char * name)
23408 {
23409 /* PR 18347 - Warn if the user attempts to create a symbol with the same
23410 name as an ARM instruction. Whilst strictly speaking it is allowed, it
23411 does mean that the resulting code might be very confusing to the reader.
23412 Also this warning can be triggered if the user omits an operand before
23413 an immediate address, eg:
23414
23415 LDR =foo
23416
23417 GAS treats this as an assignment of the value of the symbol foo to a
23418 symbol LDR, and so (without this code) it will not issue any kind of
23419 warning or error message.
23420
23421 Note - ARM instructions are case-insensitive but the strings in the hash
23422 table are all stored in lower case, so we must first ensure that name is
23423 lower case too. */
23424 if (flag_warn_syms && arm_ops_hsh)
23425 {
23426 char * nbuf = strdup (name);
23427 char * p;
23428
23429 for (p = nbuf; *p; p++)
23430 *p = TOLOWER (*p);
23431 if (hash_find (arm_ops_hsh, nbuf) != NULL)
23432 {
23433 static struct hash_control * already_warned = NULL;
23434
23435 if (already_warned == NULL)
23436 already_warned = hash_new ();
23437 /* Only warn about the symbol once. To keep the code
23438 simple we let hash_insert do the lookup for us. */
23439 if (hash_insert (already_warned, name, NULL) == NULL)
23440 as_warn (_("[-mwarn-syms]: Assignment makes a symbol match an ARM instruction: %s"), name);
23441 }
23442 else
23443 free (nbuf);
23444 }
23445
23446 return FALSE;
23447 }
23448
23449 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
23450 Otherwise we have no need to default values of symbols. */
23451
23452 symbolS *
23453 md_undefined_symbol (char * name ATTRIBUTE_UNUSED)
23454 {
23455 #ifdef OBJ_ELF
23456 if (name[0] == '_' && name[1] == 'G'
23457 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
23458 {
23459 if (!GOT_symbol)
23460 {
23461 if (symbol_find (name))
23462 as_bad (_("GOT already in the symbol table"));
23463
23464 GOT_symbol = symbol_new (name, undefined_section,
23465 (valueT) 0, & zero_address_frag);
23466 }
23467
23468 return GOT_symbol;
23469 }
23470 #endif
23471
23472 return NULL;
23473 }
23474
23475 /* Subroutine of md_apply_fix. Check to see if an immediate can be
23476 computed as two separate immediate values, added together. We
23477 already know that this value cannot be computed by just one ARM
23478 instruction. */
23479
23480 static unsigned int
23481 validate_immediate_twopart (unsigned int val,
23482 unsigned int * highpart)
23483 {
23484 unsigned int a;
23485 unsigned int i;
23486
23487 for (i = 0; i < 32; i += 2)
23488 if (((a = rotate_left (val, i)) & 0xff) != 0)
23489 {
23490 if (a & 0xff00)
23491 {
23492 if (a & ~ 0xffff)
23493 continue;
23494 * highpart = (a >> 8) | ((i + 24) << 7);
23495 }
23496 else if (a & 0xff0000)
23497 {
23498 if (a & 0xff000000)
23499 continue;
23500 * highpart = (a >> 16) | ((i + 16) << 7);
23501 }
23502 else
23503 {
23504 gas_assert (a & 0xff000000);
23505 * highpart = (a >> 24) | ((i + 8) << 7);
23506 }
23507
23508 return (a & 0xff) | (i << 7);
23509 }
23510
23511 return FAIL;
23512 }
23513
23514 static int
23515 validate_offset_imm (unsigned int val, int hwse)
23516 {
23517 if ((hwse && val > 255) || val > 4095)
23518 return FAIL;
23519 return val;
23520 }
23521
23522 /* Subroutine of md_apply_fix. Do those data_ops which can take a
23523 negative immediate constant by altering the instruction. A bit of
23524 a hack really.
23525 MOV <-> MVN
23526 AND <-> BIC
23527 ADC <-> SBC
23528 by inverting the second operand, and
23529 ADD <-> SUB
23530 CMP <-> CMN
23531 by negating the second operand. */
23532
23533 static int
23534 negate_data_op (unsigned long * instruction,
23535 unsigned long value)
23536 {
23537 int op, new_inst;
23538 unsigned long negated, inverted;
23539
23540 negated = encode_arm_immediate (-value);
23541 inverted = encode_arm_immediate (~value);
23542
23543 op = (*instruction >> DATA_OP_SHIFT) & 0xf;
23544 switch (op)
23545 {
23546 /* First negates. */
23547 case OPCODE_SUB: /* ADD <-> SUB */
23548 new_inst = OPCODE_ADD;
23549 value = negated;
23550 break;
23551
23552 case OPCODE_ADD:
23553 new_inst = OPCODE_SUB;
23554 value = negated;
23555 break;
23556
23557 case OPCODE_CMP: /* CMP <-> CMN */
23558 new_inst = OPCODE_CMN;
23559 value = negated;
23560 break;
23561
23562 case OPCODE_CMN:
23563 new_inst = OPCODE_CMP;
23564 value = negated;
23565 break;
23566
23567 /* Now Inverted ops. */
23568 case OPCODE_MOV: /* MOV <-> MVN */
23569 new_inst = OPCODE_MVN;
23570 value = inverted;
23571 break;
23572
23573 case OPCODE_MVN:
23574 new_inst = OPCODE_MOV;
23575 value = inverted;
23576 break;
23577
23578 case OPCODE_AND: /* AND <-> BIC */
23579 new_inst = OPCODE_BIC;
23580 value = inverted;
23581 break;
23582
23583 case OPCODE_BIC:
23584 new_inst = OPCODE_AND;
23585 value = inverted;
23586 break;
23587
23588 case OPCODE_ADC: /* ADC <-> SBC */
23589 new_inst = OPCODE_SBC;
23590 value = inverted;
23591 break;
23592
23593 case OPCODE_SBC:
23594 new_inst = OPCODE_ADC;
23595 value = inverted;
23596 break;
23597
23598 /* We cannot do anything. */
23599 default:
23600 return FAIL;
23601 }
23602
23603 if (value == (unsigned) FAIL)
23604 return FAIL;
23605
23606 *instruction &= OPCODE_MASK;
23607 *instruction |= new_inst << DATA_OP_SHIFT;
23608 return value;
23609 }
23610
23611 /* Like negate_data_op, but for Thumb-2. */
23612
23613 static unsigned int
23614 thumb32_negate_data_op (offsetT *instruction, unsigned int value)
23615 {
23616 int op, new_inst;
23617 int rd;
23618 unsigned int negated, inverted;
23619
23620 negated = encode_thumb32_immediate (-value);
23621 inverted = encode_thumb32_immediate (~value);
23622
23623 rd = (*instruction >> 8) & 0xf;
23624 op = (*instruction >> T2_DATA_OP_SHIFT) & 0xf;
23625 switch (op)
23626 {
23627 /* ADD <-> SUB. Includes CMP <-> CMN. */
23628 case T2_OPCODE_SUB:
23629 new_inst = T2_OPCODE_ADD;
23630 value = negated;
23631 break;
23632
23633 case T2_OPCODE_ADD:
23634 new_inst = T2_OPCODE_SUB;
23635 value = negated;
23636 break;
23637
23638 /* ORR <-> ORN. Includes MOV <-> MVN. */
23639 case T2_OPCODE_ORR:
23640 new_inst = T2_OPCODE_ORN;
23641 value = inverted;
23642 break;
23643
23644 case T2_OPCODE_ORN:
23645 new_inst = T2_OPCODE_ORR;
23646 value = inverted;
23647 break;
23648
23649 /* AND <-> BIC. TST has no inverted equivalent. */
23650 case T2_OPCODE_AND:
23651 new_inst = T2_OPCODE_BIC;
23652 if (rd == 15)
23653 value = FAIL;
23654 else
23655 value = inverted;
23656 break;
23657
23658 case T2_OPCODE_BIC:
23659 new_inst = T2_OPCODE_AND;
23660 value = inverted;
23661 break;
23662
23663 /* ADC <-> SBC */
23664 case T2_OPCODE_ADC:
23665 new_inst = T2_OPCODE_SBC;
23666 value = inverted;
23667 break;
23668
23669 case T2_OPCODE_SBC:
23670 new_inst = T2_OPCODE_ADC;
23671 value = inverted;
23672 break;
23673
23674 /* We cannot do anything. */
23675 default:
23676 return FAIL;
23677 }
23678
23679 if (value == (unsigned int)FAIL)
23680 return FAIL;
23681
23682 *instruction &= T2_OPCODE_MASK;
23683 *instruction |= new_inst << T2_DATA_OP_SHIFT;
23684 return value;
23685 }
23686
23687 /* Read a 32-bit thumb instruction from buf. */
23688
23689 static unsigned long
23690 get_thumb32_insn (char * buf)
23691 {
23692 unsigned long insn;
23693 insn = md_chars_to_number (buf, THUMB_SIZE) << 16;
23694 insn |= md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
23695
23696 return insn;
23697 }
23698
23699 /* We usually want to set the low bit on the address of thumb function
23700 symbols. In particular .word foo - . should have the low bit set.
23701 Generic code tries to fold the difference of two symbols to
23702 a constant. Prevent this and force a relocation when the first symbols
23703 is a thumb function. */
23704
23705 bfd_boolean
23706 arm_optimize_expr (expressionS *l, operatorT op, expressionS *r)
23707 {
23708 if (op == O_subtract
23709 && l->X_op == O_symbol
23710 && r->X_op == O_symbol
23711 && THUMB_IS_FUNC (l->X_add_symbol))
23712 {
23713 l->X_op = O_subtract;
23714 l->X_op_symbol = r->X_add_symbol;
23715 l->X_add_number -= r->X_add_number;
23716 return TRUE;
23717 }
23718
23719 /* Process as normal. */
23720 return FALSE;
23721 }
23722
23723 /* Encode Thumb2 unconditional branches and calls. The encoding
23724 for the 2 are identical for the immediate values. */
23725
23726 static void
23727 encode_thumb2_b_bl_offset (char * buf, offsetT value)
23728 {
23729 #define T2I1I2MASK ((1 << 13) | (1 << 11))
23730 offsetT newval;
23731 offsetT newval2;
23732 addressT S, I1, I2, lo, hi;
23733
23734 S = (value >> 24) & 0x01;
23735 I1 = (value >> 23) & 0x01;
23736 I2 = (value >> 22) & 0x01;
23737 hi = (value >> 12) & 0x3ff;
23738 lo = (value >> 1) & 0x7ff;
23739 newval = md_chars_to_number (buf, THUMB_SIZE);
23740 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
23741 newval |= (S << 10) | hi;
23742 newval2 &= ~T2I1I2MASK;
23743 newval2 |= (((I1 ^ S) << 13) | ((I2 ^ S) << 11) | lo) ^ T2I1I2MASK;
23744 md_number_to_chars (buf, newval, THUMB_SIZE);
23745 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
23746 }
23747
23748 void
23749 md_apply_fix (fixS * fixP,
23750 valueT * valP,
23751 segT seg)
23752 {
23753 offsetT value = * valP;
23754 offsetT newval;
23755 unsigned int newimm;
23756 unsigned long temp;
23757 int sign;
23758 char * buf = fixP->fx_where + fixP->fx_frag->fr_literal;
23759
23760 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
23761
23762 /* Note whether this will delete the relocation. */
23763
23764 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
23765 fixP->fx_done = 1;
23766
23767 /* On a 64-bit host, silently truncate 'value' to 32 bits for
23768 consistency with the behaviour on 32-bit hosts. Remember value
23769 for emit_reloc. */
23770 value &= 0xffffffff;
23771 value ^= 0x80000000;
23772 value -= 0x80000000;
23773
23774 *valP = value;
23775 fixP->fx_addnumber = value;
23776
23777 /* Same treatment for fixP->fx_offset. */
23778 fixP->fx_offset &= 0xffffffff;
23779 fixP->fx_offset ^= 0x80000000;
23780 fixP->fx_offset -= 0x80000000;
23781
23782 switch (fixP->fx_r_type)
23783 {
23784 case BFD_RELOC_NONE:
23785 /* This will need to go in the object file. */
23786 fixP->fx_done = 0;
23787 break;
23788
23789 case BFD_RELOC_ARM_IMMEDIATE:
23790 /* We claim that this fixup has been processed here,
23791 even if in fact we generate an error because we do
23792 not have a reloc for it, so tc_gen_reloc will reject it. */
23793 fixP->fx_done = 1;
23794
23795 if (fixP->fx_addsy)
23796 {
23797 const char *msg = 0;
23798
23799 if (! S_IS_DEFINED (fixP->fx_addsy))
23800 msg = _("undefined symbol %s used as an immediate value");
23801 else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
23802 msg = _("symbol %s is in a different section");
23803 else if (S_IS_WEAK (fixP->fx_addsy))
23804 msg = _("symbol %s is weak and may be overridden later");
23805
23806 if (msg)
23807 {
23808 as_bad_where (fixP->fx_file, fixP->fx_line,
23809 msg, S_GET_NAME (fixP->fx_addsy));
23810 break;
23811 }
23812 }
23813
23814 temp = md_chars_to_number (buf, INSN_SIZE);
23815
23816 /* If the offset is negative, we should use encoding A2 for ADR. */
23817 if ((temp & 0xfff0000) == 0x28f0000 && value < 0)
23818 newimm = negate_data_op (&temp, value);
23819 else
23820 {
23821 newimm = encode_arm_immediate (value);
23822
23823 /* If the instruction will fail, see if we can fix things up by
23824 changing the opcode. */
23825 if (newimm == (unsigned int) FAIL)
23826 newimm = negate_data_op (&temp, value);
23827 /* MOV accepts both ARM modified immediate (A1 encoding) and
23828 UINT16 (A2 encoding) when possible, MOVW only accepts UINT16.
23829 When disassembling, MOV is preferred when there is no encoding
23830 overlap. */
23831 if (newimm == (unsigned int) FAIL
23832 && ((temp >> DATA_OP_SHIFT) & 0xf) == OPCODE_MOV
23833 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2)
23834 && !((temp >> SBIT_SHIFT) & 0x1)
23835 && value >= 0 && value <= 0xffff)
23836 {
23837 /* Clear bits[23:20] to change encoding from A1 to A2. */
23838 temp &= 0xff0fffff;
23839 /* Encoding high 4bits imm. Code below will encode the remaining
23840 low 12bits. */
23841 temp |= (value & 0x0000f000) << 4;
23842 newimm = value & 0x00000fff;
23843 }
23844 }
23845
23846 if (newimm == (unsigned int) FAIL)
23847 {
23848 as_bad_where (fixP->fx_file, fixP->fx_line,
23849 _("invalid constant (%lx) after fixup"),
23850 (unsigned long) value);
23851 break;
23852 }
23853
23854 newimm |= (temp & 0xfffff000);
23855 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
23856 break;
23857
23858 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
23859 {
23860 unsigned int highpart = 0;
23861 unsigned int newinsn = 0xe1a00000; /* nop. */
23862
23863 if (fixP->fx_addsy)
23864 {
23865 const char *msg = 0;
23866
23867 if (! S_IS_DEFINED (fixP->fx_addsy))
23868 msg = _("undefined symbol %s used as an immediate value");
23869 else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
23870 msg = _("symbol %s is in a different section");
23871 else if (S_IS_WEAK (fixP->fx_addsy))
23872 msg = _("symbol %s is weak and may be overridden later");
23873
23874 if (msg)
23875 {
23876 as_bad_where (fixP->fx_file, fixP->fx_line,
23877 msg, S_GET_NAME (fixP->fx_addsy));
23878 break;
23879 }
23880 }
23881
23882 newimm = encode_arm_immediate (value);
23883 temp = md_chars_to_number (buf, INSN_SIZE);
23884
23885 /* If the instruction will fail, see if we can fix things up by
23886 changing the opcode. */
23887 if (newimm == (unsigned int) FAIL
23888 && (newimm = negate_data_op (& temp, value)) == (unsigned int) FAIL)
23889 {
23890 /* No ? OK - try using two ADD instructions to generate
23891 the value. */
23892 newimm = validate_immediate_twopart (value, & highpart);
23893
23894 /* Yes - then make sure that the second instruction is
23895 also an add. */
23896 if (newimm != (unsigned int) FAIL)
23897 newinsn = temp;
23898 /* Still No ? Try using a negated value. */
23899 else if ((newimm = validate_immediate_twopart (- value, & highpart)) != (unsigned int) FAIL)
23900 temp = newinsn = (temp & OPCODE_MASK) | OPCODE_SUB << DATA_OP_SHIFT;
23901 /* Otherwise - give up. */
23902 else
23903 {
23904 as_bad_where (fixP->fx_file, fixP->fx_line,
23905 _("unable to compute ADRL instructions for PC offset of 0x%lx"),
23906 (long) value);
23907 break;
23908 }
23909
23910 /* Replace the first operand in the 2nd instruction (which
23911 is the PC) with the destination register. We have
23912 already added in the PC in the first instruction and we
23913 do not want to do it again. */
23914 newinsn &= ~ 0xf0000;
23915 newinsn |= ((newinsn & 0x0f000) << 4);
23916 }
23917
23918 newimm |= (temp & 0xfffff000);
23919 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
23920
23921 highpart |= (newinsn & 0xfffff000);
23922 md_number_to_chars (buf + INSN_SIZE, (valueT) highpart, INSN_SIZE);
23923 }
23924 break;
23925
23926 case BFD_RELOC_ARM_OFFSET_IMM:
23927 if (!fixP->fx_done && seg->use_rela_p)
23928 value = 0;
23929 /* Fall through. */
23930
23931 case BFD_RELOC_ARM_LITERAL:
23932 sign = value > 0;
23933
23934 if (value < 0)
23935 value = - value;
23936
23937 if (validate_offset_imm (value, 0) == FAIL)
23938 {
23939 if (fixP->fx_r_type == BFD_RELOC_ARM_LITERAL)
23940 as_bad_where (fixP->fx_file, fixP->fx_line,
23941 _("invalid literal constant: pool needs to be closer"));
23942 else
23943 as_bad_where (fixP->fx_file, fixP->fx_line,
23944 _("bad immediate value for offset (%ld)"),
23945 (long) value);
23946 break;
23947 }
23948
23949 newval = md_chars_to_number (buf, INSN_SIZE);
23950 if (value == 0)
23951 newval &= 0xfffff000;
23952 else
23953 {
23954 newval &= 0xff7ff000;
23955 newval |= value | (sign ? INDEX_UP : 0);
23956 }
23957 md_number_to_chars (buf, newval, INSN_SIZE);
23958 break;
23959
23960 case BFD_RELOC_ARM_OFFSET_IMM8:
23961 case BFD_RELOC_ARM_HWLITERAL:
23962 sign = value > 0;
23963
23964 if (value < 0)
23965 value = - value;
23966
23967 if (validate_offset_imm (value, 1) == FAIL)
23968 {
23969 if (fixP->fx_r_type == BFD_RELOC_ARM_HWLITERAL)
23970 as_bad_where (fixP->fx_file, fixP->fx_line,
23971 _("invalid literal constant: pool needs to be closer"));
23972 else
23973 as_bad_where (fixP->fx_file, fixP->fx_line,
23974 _("bad immediate value for 8-bit offset (%ld)"),
23975 (long) value);
23976 break;
23977 }
23978
23979 newval = md_chars_to_number (buf, INSN_SIZE);
23980 if (value == 0)
23981 newval &= 0xfffff0f0;
23982 else
23983 {
23984 newval &= 0xff7ff0f0;
23985 newval |= ((value >> 4) << 8) | (value & 0xf) | (sign ? INDEX_UP : 0);
23986 }
23987 md_number_to_chars (buf, newval, INSN_SIZE);
23988 break;
23989
23990 case BFD_RELOC_ARM_T32_OFFSET_U8:
23991 if (value < 0 || value > 1020 || value % 4 != 0)
23992 as_bad_where (fixP->fx_file, fixP->fx_line,
23993 _("bad immediate value for offset (%ld)"), (long) value);
23994 value /= 4;
23995
23996 newval = md_chars_to_number (buf+2, THUMB_SIZE);
23997 newval |= value;
23998 md_number_to_chars (buf+2, newval, THUMB_SIZE);
23999 break;
24000
24001 case BFD_RELOC_ARM_T32_OFFSET_IMM:
24002 /* This is a complicated relocation used for all varieties of Thumb32
24003 load/store instruction with immediate offset:
24004
24005 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
24006 *4, optional writeback(W)
24007 (doubleword load/store)
24008
24009 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
24010 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
24011 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
24012 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
24013 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
24014
24015 Uppercase letters indicate bits that are already encoded at
24016 this point. Lowercase letters are our problem. For the
24017 second block of instructions, the secondary opcode nybble
24018 (bits 8..11) is present, and bit 23 is zero, even if this is
24019 a PC-relative operation. */
24020 newval = md_chars_to_number (buf, THUMB_SIZE);
24021 newval <<= 16;
24022 newval |= md_chars_to_number (buf+THUMB_SIZE, THUMB_SIZE);
24023
24024 if ((newval & 0xf0000000) == 0xe0000000)
24025 {
24026 /* Doubleword load/store: 8-bit offset, scaled by 4. */
24027 if (value >= 0)
24028 newval |= (1 << 23);
24029 else
24030 value = -value;
24031 if (value % 4 != 0)
24032 {
24033 as_bad_where (fixP->fx_file, fixP->fx_line,
24034 _("offset not a multiple of 4"));
24035 break;
24036 }
24037 value /= 4;
24038 if (value > 0xff)
24039 {
24040 as_bad_where (fixP->fx_file, fixP->fx_line,
24041 _("offset out of range"));
24042 break;
24043 }
24044 newval &= ~0xff;
24045 }
24046 else if ((newval & 0x000f0000) == 0x000f0000)
24047 {
24048 /* PC-relative, 12-bit offset. */
24049 if (value >= 0)
24050 newval |= (1 << 23);
24051 else
24052 value = -value;
24053 if (value > 0xfff)
24054 {
24055 as_bad_where (fixP->fx_file, fixP->fx_line,
24056 _("offset out of range"));
24057 break;
24058 }
24059 newval &= ~0xfff;
24060 }
24061 else if ((newval & 0x00000100) == 0x00000100)
24062 {
24063 /* Writeback: 8-bit, +/- offset. */
24064 if (value >= 0)
24065 newval |= (1 << 9);
24066 else
24067 value = -value;
24068 if (value > 0xff)
24069 {
24070 as_bad_where (fixP->fx_file, fixP->fx_line,
24071 _("offset out of range"));
24072 break;
24073 }
24074 newval &= ~0xff;
24075 }
24076 else if ((newval & 0x00000f00) == 0x00000e00)
24077 {
24078 /* T-instruction: positive 8-bit offset. */
24079 if (value < 0 || value > 0xff)
24080 {
24081 as_bad_where (fixP->fx_file, fixP->fx_line,
24082 _("offset out of range"));
24083 break;
24084 }
24085 newval &= ~0xff;
24086 newval |= value;
24087 }
24088 else
24089 {
24090 /* Positive 12-bit or negative 8-bit offset. */
24091 int limit;
24092 if (value >= 0)
24093 {
24094 newval |= (1 << 23);
24095 limit = 0xfff;
24096 }
24097 else
24098 {
24099 value = -value;
24100 limit = 0xff;
24101 }
24102 if (value > limit)
24103 {
24104 as_bad_where (fixP->fx_file, fixP->fx_line,
24105 _("offset out of range"));
24106 break;
24107 }
24108 newval &= ~limit;
24109 }
24110
24111 newval |= value;
24112 md_number_to_chars (buf, (newval >> 16) & 0xffff, THUMB_SIZE);
24113 md_number_to_chars (buf + THUMB_SIZE, newval & 0xffff, THUMB_SIZE);
24114 break;
24115
24116 case BFD_RELOC_ARM_SHIFT_IMM:
24117 newval = md_chars_to_number (buf, INSN_SIZE);
24118 if (((unsigned long) value) > 32
24119 || (value == 32
24120 && (((newval & 0x60) == 0) || (newval & 0x60) == 0x60)))
24121 {
24122 as_bad_where (fixP->fx_file, fixP->fx_line,
24123 _("shift expression is too large"));
24124 break;
24125 }
24126
24127 if (value == 0)
24128 /* Shifts of zero must be done as lsl. */
24129 newval &= ~0x60;
24130 else if (value == 32)
24131 value = 0;
24132 newval &= 0xfffff07f;
24133 newval |= (value & 0x1f) << 7;
24134 md_number_to_chars (buf, newval, INSN_SIZE);
24135 break;
24136
24137 case BFD_RELOC_ARM_T32_IMMEDIATE:
24138 case BFD_RELOC_ARM_T32_ADD_IMM:
24139 case BFD_RELOC_ARM_T32_IMM12:
24140 case BFD_RELOC_ARM_T32_ADD_PC12:
24141 /* We claim that this fixup has been processed here,
24142 even if in fact we generate an error because we do
24143 not have a reloc for it, so tc_gen_reloc will reject it. */
24144 fixP->fx_done = 1;
24145
24146 if (fixP->fx_addsy
24147 && ! S_IS_DEFINED (fixP->fx_addsy))
24148 {
24149 as_bad_where (fixP->fx_file, fixP->fx_line,
24150 _("undefined symbol %s used as an immediate value"),
24151 S_GET_NAME (fixP->fx_addsy));
24152 break;
24153 }
24154
24155 newval = md_chars_to_number (buf, THUMB_SIZE);
24156 newval <<= 16;
24157 newval |= md_chars_to_number (buf+2, THUMB_SIZE);
24158
24159 newimm = FAIL;
24160 if ((fixP->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
24161 /* ARMv8-M Baseline MOV will reach here, but it doesn't support
24162 Thumb2 modified immediate encoding (T2). */
24163 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2))
24164 || fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
24165 {
24166 newimm = encode_thumb32_immediate (value);
24167 if (newimm == (unsigned int) FAIL)
24168 newimm = thumb32_negate_data_op (&newval, value);
24169 }
24170 if (newimm == (unsigned int) FAIL)
24171 {
24172 if (fixP->fx_r_type != BFD_RELOC_ARM_T32_IMMEDIATE)
24173 {
24174 /* Turn add/sum into addw/subw. */
24175 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
24176 newval = (newval & 0xfeffffff) | 0x02000000;
24177 /* No flat 12-bit imm encoding for addsw/subsw. */
24178 if ((newval & 0x00100000) == 0)
24179 {
24180 /* 12 bit immediate for addw/subw. */
24181 if (value < 0)
24182 {
24183 value = -value;
24184 newval ^= 0x00a00000;
24185 }
24186 if (value > 0xfff)
24187 newimm = (unsigned int) FAIL;
24188 else
24189 newimm = value;
24190 }
24191 }
24192 else
24193 {
24194 /* MOV accepts both Thumb2 modified immediate (T2 encoding) and
24195 UINT16 (T3 encoding), MOVW only accepts UINT16. When
24196 disassembling, MOV is preferred when there is no encoding
24197 overlap. */
24198 if (((newval >> T2_DATA_OP_SHIFT) & 0xf) == T2_OPCODE_ORR
24199 /* NOTE: MOV uses the ORR opcode in Thumb 2 mode
24200 but with the Rn field [19:16] set to 1111. */
24201 && (((newval >> 16) & 0xf) == 0xf)
24202 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m)
24203 && !((newval >> T2_SBIT_SHIFT) & 0x1)
24204 && value >= 0 && value <= 0xffff)
24205 {
24206 /* Toggle bit[25] to change encoding from T2 to T3. */
24207 newval ^= 1 << 25;
24208 /* Clear bits[19:16]. */
24209 newval &= 0xfff0ffff;
24210 /* Encoding high 4bits imm. Code below will encode the
24211 remaining low 12bits. */
24212 newval |= (value & 0x0000f000) << 4;
24213 newimm = value & 0x00000fff;
24214 }
24215 }
24216 }
24217
24218 if (newimm == (unsigned int)FAIL)
24219 {
24220 as_bad_where (fixP->fx_file, fixP->fx_line,
24221 _("invalid constant (%lx) after fixup"),
24222 (unsigned long) value);
24223 break;
24224 }
24225
24226 newval |= (newimm & 0x800) << 15;
24227 newval |= (newimm & 0x700) << 4;
24228 newval |= (newimm & 0x0ff);
24229
24230 md_number_to_chars (buf, (valueT) ((newval >> 16) & 0xffff), THUMB_SIZE);
24231 md_number_to_chars (buf+2, (valueT) (newval & 0xffff), THUMB_SIZE);
24232 break;
24233
24234 case BFD_RELOC_ARM_SMC:
24235 if (((unsigned long) value) > 0xffff)
24236 as_bad_where (fixP->fx_file, fixP->fx_line,
24237 _("invalid smc expression"));
24238 newval = md_chars_to_number (buf, INSN_SIZE);
24239 newval |= (value & 0xf) | ((value & 0xfff0) << 4);
24240 md_number_to_chars (buf, newval, INSN_SIZE);
24241 break;
24242
24243 case BFD_RELOC_ARM_HVC:
24244 if (((unsigned long) value) > 0xffff)
24245 as_bad_where (fixP->fx_file, fixP->fx_line,
24246 _("invalid hvc expression"));
24247 newval = md_chars_to_number (buf, INSN_SIZE);
24248 newval |= (value & 0xf) | ((value & 0xfff0) << 4);
24249 md_number_to_chars (buf, newval, INSN_SIZE);
24250 break;
24251
24252 case BFD_RELOC_ARM_SWI:
24253 if (fixP->tc_fix_data != 0)
24254 {
24255 if (((unsigned long) value) > 0xff)
24256 as_bad_where (fixP->fx_file, fixP->fx_line,
24257 _("invalid swi expression"));
24258 newval = md_chars_to_number (buf, THUMB_SIZE);
24259 newval |= value;
24260 md_number_to_chars (buf, newval, THUMB_SIZE);
24261 }
24262 else
24263 {
24264 if (((unsigned long) value) > 0x00ffffff)
24265 as_bad_where (fixP->fx_file, fixP->fx_line,
24266 _("invalid swi expression"));
24267 newval = md_chars_to_number (buf, INSN_SIZE);
24268 newval |= value;
24269 md_number_to_chars (buf, newval, INSN_SIZE);
24270 }
24271 break;
24272
24273 case BFD_RELOC_ARM_MULTI:
24274 if (((unsigned long) value) > 0xffff)
24275 as_bad_where (fixP->fx_file, fixP->fx_line,
24276 _("invalid expression in load/store multiple"));
24277 newval = value | md_chars_to_number (buf, INSN_SIZE);
24278 md_number_to_chars (buf, newval, INSN_SIZE);
24279 break;
24280
24281 #ifdef OBJ_ELF
24282 case BFD_RELOC_ARM_PCREL_CALL:
24283
24284 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
24285 && fixP->fx_addsy
24286 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
24287 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
24288 && THUMB_IS_FUNC (fixP->fx_addsy))
24289 /* Flip the bl to blx. This is a simple flip
24290 bit here because we generate PCREL_CALL for
24291 unconditional bls. */
24292 {
24293 newval = md_chars_to_number (buf, INSN_SIZE);
24294 newval = newval | 0x10000000;
24295 md_number_to_chars (buf, newval, INSN_SIZE);
24296 temp = 1;
24297 fixP->fx_done = 1;
24298 }
24299 else
24300 temp = 3;
24301 goto arm_branch_common;
24302
24303 case BFD_RELOC_ARM_PCREL_JUMP:
24304 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
24305 && fixP->fx_addsy
24306 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
24307 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
24308 && THUMB_IS_FUNC (fixP->fx_addsy))
24309 {
24310 /* This would map to a bl<cond>, b<cond>,
24311 b<always> to a Thumb function. We
24312 need to force a relocation for this particular
24313 case. */
24314 newval = md_chars_to_number (buf, INSN_SIZE);
24315 fixP->fx_done = 0;
24316 }
24317 /* Fall through. */
24318
24319 case BFD_RELOC_ARM_PLT32:
24320 #endif
24321 case BFD_RELOC_ARM_PCREL_BRANCH:
24322 temp = 3;
24323 goto arm_branch_common;
24324
24325 case BFD_RELOC_ARM_PCREL_BLX:
24326
24327 temp = 1;
24328 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
24329 && fixP->fx_addsy
24330 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
24331 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
24332 && ARM_IS_FUNC (fixP->fx_addsy))
24333 {
24334 /* Flip the blx to a bl and warn. */
24335 const char *name = S_GET_NAME (fixP->fx_addsy);
24336 newval = 0xeb000000;
24337 as_warn_where (fixP->fx_file, fixP->fx_line,
24338 _("blx to '%s' an ARM ISA state function changed to bl"),
24339 name);
24340 md_number_to_chars (buf, newval, INSN_SIZE);
24341 temp = 3;
24342 fixP->fx_done = 1;
24343 }
24344
24345 #ifdef OBJ_ELF
24346 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
24347 fixP->fx_r_type = BFD_RELOC_ARM_PCREL_CALL;
24348 #endif
24349
24350 arm_branch_common:
24351 /* We are going to store value (shifted right by two) in the
24352 instruction, in a 24 bit, signed field. Bits 26 through 32 either
24353 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
24354 also be clear. */
24355 if (value & temp)
24356 as_bad_where (fixP->fx_file, fixP->fx_line,
24357 _("misaligned branch destination"));
24358 if ((value & (offsetT)0xfe000000) != (offsetT)0
24359 && (value & (offsetT)0xfe000000) != (offsetT)0xfe000000)
24360 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
24361
24362 if (fixP->fx_done || !seg->use_rela_p)
24363 {
24364 newval = md_chars_to_number (buf, INSN_SIZE);
24365 newval |= (value >> 2) & 0x00ffffff;
24366 /* Set the H bit on BLX instructions. */
24367 if (temp == 1)
24368 {
24369 if (value & 2)
24370 newval |= 0x01000000;
24371 else
24372 newval &= ~0x01000000;
24373 }
24374 md_number_to_chars (buf, newval, INSN_SIZE);
24375 }
24376 break;
24377
24378 case BFD_RELOC_THUMB_PCREL_BRANCH7: /* CBZ */
24379 /* CBZ can only branch forward. */
24380
24381 /* Attempts to use CBZ to branch to the next instruction
24382 (which, strictly speaking, are prohibited) will be turned into
24383 no-ops.
24384
24385 FIXME: It may be better to remove the instruction completely and
24386 perform relaxation. */
24387 if (value == -2)
24388 {
24389 newval = md_chars_to_number (buf, THUMB_SIZE);
24390 newval = 0xbf00; /* NOP encoding T1 */
24391 md_number_to_chars (buf, newval, THUMB_SIZE);
24392 }
24393 else
24394 {
24395 if (value & ~0x7e)
24396 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
24397
24398 if (fixP->fx_done || !seg->use_rela_p)
24399 {
24400 newval = md_chars_to_number (buf, THUMB_SIZE);
24401 newval |= ((value & 0x3e) << 2) | ((value & 0x40) << 3);
24402 md_number_to_chars (buf, newval, THUMB_SIZE);
24403 }
24404 }
24405 break;
24406
24407 case BFD_RELOC_THUMB_PCREL_BRANCH9: /* Conditional branch. */
24408 if ((value & ~0xff) && ((value & ~0xff) != ~0xff))
24409 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
24410
24411 if (fixP->fx_done || !seg->use_rela_p)
24412 {
24413 newval = md_chars_to_number (buf, THUMB_SIZE);
24414 newval |= (value & 0x1ff) >> 1;
24415 md_number_to_chars (buf, newval, THUMB_SIZE);
24416 }
24417 break;
24418
24419 case BFD_RELOC_THUMB_PCREL_BRANCH12: /* Unconditional branch. */
24420 if ((value & ~0x7ff) && ((value & ~0x7ff) != ~0x7ff))
24421 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
24422
24423 if (fixP->fx_done || !seg->use_rela_p)
24424 {
24425 newval = md_chars_to_number (buf, THUMB_SIZE);
24426 newval |= (value & 0xfff) >> 1;
24427 md_number_to_chars (buf, newval, THUMB_SIZE);
24428 }
24429 break;
24430
24431 case BFD_RELOC_THUMB_PCREL_BRANCH20:
24432 if (fixP->fx_addsy
24433 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
24434 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
24435 && ARM_IS_FUNC (fixP->fx_addsy)
24436 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
24437 {
24438 /* Force a relocation for a branch 20 bits wide. */
24439 fixP->fx_done = 0;
24440 }
24441 if ((value & ~0x1fffff) && ((value & ~0x0fffff) != ~0x0fffff))
24442 as_bad_where (fixP->fx_file, fixP->fx_line,
24443 _("conditional branch out of range"));
24444
24445 if (fixP->fx_done || !seg->use_rela_p)
24446 {
24447 offsetT newval2;
24448 addressT S, J1, J2, lo, hi;
24449
24450 S = (value & 0x00100000) >> 20;
24451 J2 = (value & 0x00080000) >> 19;
24452 J1 = (value & 0x00040000) >> 18;
24453 hi = (value & 0x0003f000) >> 12;
24454 lo = (value & 0x00000ffe) >> 1;
24455
24456 newval = md_chars_to_number (buf, THUMB_SIZE);
24457 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
24458 newval |= (S << 10) | hi;
24459 newval2 |= (J1 << 13) | (J2 << 11) | lo;
24460 md_number_to_chars (buf, newval, THUMB_SIZE);
24461 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
24462 }
24463 break;
24464
24465 case BFD_RELOC_THUMB_PCREL_BLX:
24466 /* If there is a blx from a thumb state function to
24467 another thumb function flip this to a bl and warn
24468 about it. */
24469
24470 if (fixP->fx_addsy
24471 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
24472 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
24473 && THUMB_IS_FUNC (fixP->fx_addsy))
24474 {
24475 const char *name = S_GET_NAME (fixP->fx_addsy);
24476 as_warn_where (fixP->fx_file, fixP->fx_line,
24477 _("blx to Thumb func '%s' from Thumb ISA state changed to bl"),
24478 name);
24479 newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
24480 newval = newval | 0x1000;
24481 md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
24482 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
24483 fixP->fx_done = 1;
24484 }
24485
24486
24487 goto thumb_bl_common;
24488
24489 case BFD_RELOC_THUMB_PCREL_BRANCH23:
24490 /* A bl from Thumb state ISA to an internal ARM state function
24491 is converted to a blx. */
24492 if (fixP->fx_addsy
24493 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
24494 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
24495 && ARM_IS_FUNC (fixP->fx_addsy)
24496 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
24497 {
24498 newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
24499 newval = newval & ~0x1000;
24500 md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
24501 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BLX;
24502 fixP->fx_done = 1;
24503 }
24504
24505 thumb_bl_common:
24506
24507 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
24508 /* For a BLX instruction, make sure that the relocation is rounded up
24509 to a word boundary. This follows the semantics of the instruction
24510 which specifies that bit 1 of the target address will come from bit
24511 1 of the base address. */
24512 value = (value + 3) & ~ 3;
24513
24514 #ifdef OBJ_ELF
24515 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4
24516 && fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
24517 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
24518 #endif
24519
24520 if ((value & ~0x3fffff) && ((value & ~0x3fffff) != ~0x3fffff))
24521 {
24522 if (!(ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2)))
24523 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
24524 else if ((value & ~0x1ffffff)
24525 && ((value & ~0x1ffffff) != ~0x1ffffff))
24526 as_bad_where (fixP->fx_file, fixP->fx_line,
24527 _("Thumb2 branch out of range"));
24528 }
24529
24530 if (fixP->fx_done || !seg->use_rela_p)
24531 encode_thumb2_b_bl_offset (buf, value);
24532
24533 break;
24534
24535 case BFD_RELOC_THUMB_PCREL_BRANCH25:
24536 if ((value & ~0x0ffffff) && ((value & ~0x0ffffff) != ~0x0ffffff))
24537 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
24538
24539 if (fixP->fx_done || !seg->use_rela_p)
24540 encode_thumb2_b_bl_offset (buf, value);
24541
24542 break;
24543
24544 case BFD_RELOC_8:
24545 if (fixP->fx_done || !seg->use_rela_p)
24546 *buf = value;
24547 break;
24548
24549 case BFD_RELOC_16:
24550 if (fixP->fx_done || !seg->use_rela_p)
24551 md_number_to_chars (buf, value, 2);
24552 break;
24553
24554 #ifdef OBJ_ELF
24555 case BFD_RELOC_ARM_TLS_CALL:
24556 case BFD_RELOC_ARM_THM_TLS_CALL:
24557 case BFD_RELOC_ARM_TLS_DESCSEQ:
24558 case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
24559 case BFD_RELOC_ARM_TLS_GOTDESC:
24560 case BFD_RELOC_ARM_TLS_GD32:
24561 case BFD_RELOC_ARM_TLS_LE32:
24562 case BFD_RELOC_ARM_TLS_IE32:
24563 case BFD_RELOC_ARM_TLS_LDM32:
24564 case BFD_RELOC_ARM_TLS_LDO32:
24565 S_SET_THREAD_LOCAL (fixP->fx_addsy);
24566 break;
24567
24568 /* Same handling as above, but with the arm_fdpic guard. */
24569 case BFD_RELOC_ARM_TLS_GD32_FDPIC:
24570 case BFD_RELOC_ARM_TLS_IE32_FDPIC:
24571 case BFD_RELOC_ARM_TLS_LDM32_FDPIC:
24572 if (arm_fdpic)
24573 {
24574 S_SET_THREAD_LOCAL (fixP->fx_addsy);
24575 }
24576 else
24577 {
24578 as_bad_where (fixP->fx_file, fixP->fx_line,
24579 _("Relocation supported only in FDPIC mode"));
24580 }
24581 break;
24582
24583 case BFD_RELOC_ARM_GOT32:
24584 case BFD_RELOC_ARM_GOTOFF:
24585 break;
24586
24587 case BFD_RELOC_ARM_GOT_PREL:
24588 if (fixP->fx_done || !seg->use_rela_p)
24589 md_number_to_chars (buf, value, 4);
24590 break;
24591
24592 case BFD_RELOC_ARM_TARGET2:
24593 /* TARGET2 is not partial-inplace, so we need to write the
24594 addend here for REL targets, because it won't be written out
24595 during reloc processing later. */
24596 if (fixP->fx_done || !seg->use_rela_p)
24597 md_number_to_chars (buf, fixP->fx_offset, 4);
24598 break;
24599
24600 /* Relocations for FDPIC. */
24601 case BFD_RELOC_ARM_GOTFUNCDESC:
24602 case BFD_RELOC_ARM_GOTOFFFUNCDESC:
24603 case BFD_RELOC_ARM_FUNCDESC:
24604 if (arm_fdpic)
24605 {
24606 if (fixP->fx_done || !seg->use_rela_p)
24607 md_number_to_chars (buf, 0, 4);
24608 }
24609 else
24610 {
24611 as_bad_where (fixP->fx_file, fixP->fx_line,
24612 _("Relocation supported only in FDPIC mode"));
24613 }
24614 break;
24615 #endif
24616
24617 case BFD_RELOC_RVA:
24618 case BFD_RELOC_32:
24619 case BFD_RELOC_ARM_TARGET1:
24620 case BFD_RELOC_ARM_ROSEGREL32:
24621 case BFD_RELOC_ARM_SBREL32:
24622 case BFD_RELOC_32_PCREL:
24623 #ifdef TE_PE
24624 case BFD_RELOC_32_SECREL:
24625 #endif
24626 if (fixP->fx_done || !seg->use_rela_p)
24627 #ifdef TE_WINCE
24628 /* For WinCE we only do this for pcrel fixups. */
24629 if (fixP->fx_done || fixP->fx_pcrel)
24630 #endif
24631 md_number_to_chars (buf, value, 4);
24632 break;
24633
24634 #ifdef OBJ_ELF
24635 case BFD_RELOC_ARM_PREL31:
24636 if (fixP->fx_done || !seg->use_rela_p)
24637 {
24638 newval = md_chars_to_number (buf, 4) & 0x80000000;
24639 if ((value ^ (value >> 1)) & 0x40000000)
24640 {
24641 as_bad_where (fixP->fx_file, fixP->fx_line,
24642 _("rel31 relocation overflow"));
24643 }
24644 newval |= value & 0x7fffffff;
24645 md_number_to_chars (buf, newval, 4);
24646 }
24647 break;
24648 #endif
24649
24650 case BFD_RELOC_ARM_CP_OFF_IMM:
24651 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
24652 case BFD_RELOC_ARM_T32_VLDR_VSTR_OFF_IMM:
24653 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM)
24654 newval = md_chars_to_number (buf, INSN_SIZE);
24655 else
24656 newval = get_thumb32_insn (buf);
24657 if ((newval & 0x0f200f00) == 0x0d000900)
24658 {
24659 /* This is a fp16 vstr/vldr. The immediate offset in the mnemonic
24660 has permitted values that are multiples of 2, in the range 0
24661 to 510. */
24662 if (value < -510 || value > 510 || (value & 1))
24663 as_bad_where (fixP->fx_file, fixP->fx_line,
24664 _("co-processor offset out of range"));
24665 }
24666 else if ((newval & 0xfe001f80) == 0xec000f80)
24667 {
24668 if (value < -511 || value > 512 || (value & 3))
24669 as_bad_where (fixP->fx_file, fixP->fx_line,
24670 _("co-processor offset out of range"));
24671 }
24672 else if (value < -1023 || value > 1023 || (value & 3))
24673 as_bad_where (fixP->fx_file, fixP->fx_line,
24674 _("co-processor offset out of range"));
24675 cp_off_common:
24676 sign = value > 0;
24677 if (value < 0)
24678 value = -value;
24679 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
24680 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
24681 newval = md_chars_to_number (buf, INSN_SIZE);
24682 else
24683 newval = get_thumb32_insn (buf);
24684 if (value == 0)
24685 {
24686 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_VLDR_VSTR_OFF_IMM)
24687 newval &= 0xffffff80;
24688 else
24689 newval &= 0xffffff00;
24690 }
24691 else
24692 {
24693 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_VLDR_VSTR_OFF_IMM)
24694 newval &= 0xff7fff80;
24695 else
24696 newval &= 0xff7fff00;
24697 if ((newval & 0x0f200f00) == 0x0d000900)
24698 {
24699 /* This is a fp16 vstr/vldr.
24700
24701 It requires the immediate offset in the instruction is shifted
24702 left by 1 to be a half-word offset.
24703
24704 Here, left shift by 1 first, and later right shift by 2
24705 should get the right offset. */
24706 value <<= 1;
24707 }
24708 newval |= (value >> 2) | (sign ? INDEX_UP : 0);
24709 }
24710 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
24711 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
24712 md_number_to_chars (buf, newval, INSN_SIZE);
24713 else
24714 put_thumb32_insn (buf, newval);
24715 break;
24716
24717 case BFD_RELOC_ARM_CP_OFF_IMM_S2:
24718 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2:
24719 if (value < -255 || value > 255)
24720 as_bad_where (fixP->fx_file, fixP->fx_line,
24721 _("co-processor offset out of range"));
24722 value *= 4;
24723 goto cp_off_common;
24724
24725 case BFD_RELOC_ARM_THUMB_OFFSET:
24726 newval = md_chars_to_number (buf, THUMB_SIZE);
24727 /* Exactly what ranges, and where the offset is inserted depends
24728 on the type of instruction, we can establish this from the
24729 top 4 bits. */
24730 switch (newval >> 12)
24731 {
24732 case 4: /* PC load. */
24733 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
24734 forced to zero for these loads; md_pcrel_from has already
24735 compensated for this. */
24736 if (value & 3)
24737 as_bad_where (fixP->fx_file, fixP->fx_line,
24738 _("invalid offset, target not word aligned (0x%08lX)"),
24739 (((unsigned long) fixP->fx_frag->fr_address
24740 + (unsigned long) fixP->fx_where) & ~3)
24741 + (unsigned long) value);
24742
24743 if (value & ~0x3fc)
24744 as_bad_where (fixP->fx_file, fixP->fx_line,
24745 _("invalid offset, value too big (0x%08lX)"),
24746 (long) value);
24747
24748 newval |= value >> 2;
24749 break;
24750
24751 case 9: /* SP load/store. */
24752 if (value & ~0x3fc)
24753 as_bad_where (fixP->fx_file, fixP->fx_line,
24754 _("invalid offset, value too big (0x%08lX)"),
24755 (long) value);
24756 newval |= value >> 2;
24757 break;
24758
24759 case 6: /* Word load/store. */
24760 if (value & ~0x7c)
24761 as_bad_where (fixP->fx_file, fixP->fx_line,
24762 _("invalid offset, value too big (0x%08lX)"),
24763 (long) value);
24764 newval |= value << 4; /* 6 - 2. */
24765 break;
24766
24767 case 7: /* Byte load/store. */
24768 if (value & ~0x1f)
24769 as_bad_where (fixP->fx_file, fixP->fx_line,
24770 _("invalid offset, value too big (0x%08lX)"),
24771 (long) value);
24772 newval |= value << 6;
24773 break;
24774
24775 case 8: /* Halfword load/store. */
24776 if (value & ~0x3e)
24777 as_bad_where (fixP->fx_file, fixP->fx_line,
24778 _("invalid offset, value too big (0x%08lX)"),
24779 (long) value);
24780 newval |= value << 5; /* 6 - 1. */
24781 break;
24782
24783 default:
24784 as_bad_where (fixP->fx_file, fixP->fx_line,
24785 "Unable to process relocation for thumb opcode: %lx",
24786 (unsigned long) newval);
24787 break;
24788 }
24789 md_number_to_chars (buf, newval, THUMB_SIZE);
24790 break;
24791
24792 case BFD_RELOC_ARM_THUMB_ADD:
24793 /* This is a complicated relocation, since we use it for all of
24794 the following immediate relocations:
24795
24796 3bit ADD/SUB
24797 8bit ADD/SUB
24798 9bit ADD/SUB SP word-aligned
24799 10bit ADD PC/SP word-aligned
24800
24801 The type of instruction being processed is encoded in the
24802 instruction field:
24803
24804 0x8000 SUB
24805 0x00F0 Rd
24806 0x000F Rs
24807 */
24808 newval = md_chars_to_number (buf, THUMB_SIZE);
24809 {
24810 int rd = (newval >> 4) & 0xf;
24811 int rs = newval & 0xf;
24812 int subtract = !!(newval & 0x8000);
24813
24814 /* Check for HI regs, only very restricted cases allowed:
24815 Adjusting SP, and using PC or SP to get an address. */
24816 if ((rd > 7 && (rd != REG_SP || rs != REG_SP))
24817 || (rs > 7 && rs != REG_SP && rs != REG_PC))
24818 as_bad_where (fixP->fx_file, fixP->fx_line,
24819 _("invalid Hi register with immediate"));
24820
24821 /* If value is negative, choose the opposite instruction. */
24822 if (value < 0)
24823 {
24824 value = -value;
24825 subtract = !subtract;
24826 if (value < 0)
24827 as_bad_where (fixP->fx_file, fixP->fx_line,
24828 _("immediate value out of range"));
24829 }
24830
24831 if (rd == REG_SP)
24832 {
24833 if (value & ~0x1fc)
24834 as_bad_where (fixP->fx_file, fixP->fx_line,
24835 _("invalid immediate for stack address calculation"));
24836 newval = subtract ? T_OPCODE_SUB_ST : T_OPCODE_ADD_ST;
24837 newval |= value >> 2;
24838 }
24839 else if (rs == REG_PC || rs == REG_SP)
24840 {
24841 /* PR gas/18541. If the addition is for a defined symbol
24842 within range of an ADR instruction then accept it. */
24843 if (subtract
24844 && value == 4
24845 && fixP->fx_addsy != NULL)
24846 {
24847 subtract = 0;
24848
24849 if (! S_IS_DEFINED (fixP->fx_addsy)
24850 || S_GET_SEGMENT (fixP->fx_addsy) != seg
24851 || S_IS_WEAK (fixP->fx_addsy))
24852 {
24853 as_bad_where (fixP->fx_file, fixP->fx_line,
24854 _("address calculation needs a strongly defined nearby symbol"));
24855 }
24856 else
24857 {
24858 offsetT v = fixP->fx_where + fixP->fx_frag->fr_address;
24859
24860 /* Round up to the next 4-byte boundary. */
24861 if (v & 3)
24862 v = (v + 3) & ~ 3;
24863 else
24864 v += 4;
24865 v = S_GET_VALUE (fixP->fx_addsy) - v;
24866
24867 if (v & ~0x3fc)
24868 {
24869 as_bad_where (fixP->fx_file, fixP->fx_line,
24870 _("symbol too far away"));
24871 }
24872 else
24873 {
24874 fixP->fx_done = 1;
24875 value = v;
24876 }
24877 }
24878 }
24879
24880 if (subtract || value & ~0x3fc)
24881 as_bad_where (fixP->fx_file, fixP->fx_line,
24882 _("invalid immediate for address calculation (value = 0x%08lX)"),
24883 (unsigned long) (subtract ? - value : value));
24884 newval = (rs == REG_PC ? T_OPCODE_ADD_PC : T_OPCODE_ADD_SP);
24885 newval |= rd << 8;
24886 newval |= value >> 2;
24887 }
24888 else if (rs == rd)
24889 {
24890 if (value & ~0xff)
24891 as_bad_where (fixP->fx_file, fixP->fx_line,
24892 _("immediate value out of range"));
24893 newval = subtract ? T_OPCODE_SUB_I8 : T_OPCODE_ADD_I8;
24894 newval |= (rd << 8) | value;
24895 }
24896 else
24897 {
24898 if (value & ~0x7)
24899 as_bad_where (fixP->fx_file, fixP->fx_line,
24900 _("immediate value out of range"));
24901 newval = subtract ? T_OPCODE_SUB_I3 : T_OPCODE_ADD_I3;
24902 newval |= rd | (rs << 3) | (value << 6);
24903 }
24904 }
24905 md_number_to_chars (buf, newval, THUMB_SIZE);
24906 break;
24907
24908 case BFD_RELOC_ARM_THUMB_IMM:
24909 newval = md_chars_to_number (buf, THUMB_SIZE);
24910 if (value < 0 || value > 255)
24911 as_bad_where (fixP->fx_file, fixP->fx_line,
24912 _("invalid immediate: %ld is out of range"),
24913 (long) value);
24914 newval |= value;
24915 md_number_to_chars (buf, newval, THUMB_SIZE);
24916 break;
24917
24918 case BFD_RELOC_ARM_THUMB_SHIFT:
24919 /* 5bit shift value (0..32). LSL cannot take 32. */
24920 newval = md_chars_to_number (buf, THUMB_SIZE) & 0xf83f;
24921 temp = newval & 0xf800;
24922 if (value < 0 || value > 32 || (value == 32 && temp == T_OPCODE_LSL_I))
24923 as_bad_where (fixP->fx_file, fixP->fx_line,
24924 _("invalid shift value: %ld"), (long) value);
24925 /* Shifts of zero must be encoded as LSL. */
24926 if (value == 0)
24927 newval = (newval & 0x003f) | T_OPCODE_LSL_I;
24928 /* Shifts of 32 are encoded as zero. */
24929 else if (value == 32)
24930 value = 0;
24931 newval |= value << 6;
24932 md_number_to_chars (buf, newval, THUMB_SIZE);
24933 break;
24934
24935 case BFD_RELOC_VTABLE_INHERIT:
24936 case BFD_RELOC_VTABLE_ENTRY:
24937 fixP->fx_done = 0;
24938 return;
24939
24940 case BFD_RELOC_ARM_MOVW:
24941 case BFD_RELOC_ARM_MOVT:
24942 case BFD_RELOC_ARM_THUMB_MOVW:
24943 case BFD_RELOC_ARM_THUMB_MOVT:
24944 if (fixP->fx_done || !seg->use_rela_p)
24945 {
24946 /* REL format relocations are limited to a 16-bit addend. */
24947 if (!fixP->fx_done)
24948 {
24949 if (value < -0x8000 || value > 0x7fff)
24950 as_bad_where (fixP->fx_file, fixP->fx_line,
24951 _("offset out of range"));
24952 }
24953 else if (fixP->fx_r_type == BFD_RELOC_ARM_MOVT
24954 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
24955 {
24956 value >>= 16;
24957 }
24958
24959 if (fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
24960 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
24961 {
24962 newval = get_thumb32_insn (buf);
24963 newval &= 0xfbf08f00;
24964 newval |= (value & 0xf000) << 4;
24965 newval |= (value & 0x0800) << 15;
24966 newval |= (value & 0x0700) << 4;
24967 newval |= (value & 0x00ff);
24968 put_thumb32_insn (buf, newval);
24969 }
24970 else
24971 {
24972 newval = md_chars_to_number (buf, 4);
24973 newval &= 0xfff0f000;
24974 newval |= value & 0x0fff;
24975 newval |= (value & 0xf000) << 4;
24976 md_number_to_chars (buf, newval, 4);
24977 }
24978 }
24979 return;
24980
24981 case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC:
24982 case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC:
24983 case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC:
24984 case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC:
24985 gas_assert (!fixP->fx_done);
24986 {
24987 bfd_vma insn;
24988 bfd_boolean is_mov;
24989 bfd_vma encoded_addend = value;
24990
24991 /* Check that addend can be encoded in instruction. */
24992 if (!seg->use_rela_p && (value < 0 || value > 255))
24993 as_bad_where (fixP->fx_file, fixP->fx_line,
24994 _("the offset 0x%08lX is not representable"),
24995 (unsigned long) encoded_addend);
24996
24997 /* Extract the instruction. */
24998 insn = md_chars_to_number (buf, THUMB_SIZE);
24999 is_mov = (insn & 0xf800) == 0x2000;
25000
25001 /* Encode insn. */
25002 if (is_mov)
25003 {
25004 if (!seg->use_rela_p)
25005 insn |= encoded_addend;
25006 }
25007 else
25008 {
25009 int rd, rs;
25010
25011 /* Extract the instruction. */
25012 /* Encoding is the following
25013 0x8000 SUB
25014 0x00F0 Rd
25015 0x000F Rs
25016 */
25017 /* The following conditions must be true :
25018 - ADD
25019 - Rd == Rs
25020 - Rd <= 7
25021 */
25022 rd = (insn >> 4) & 0xf;
25023 rs = insn & 0xf;
25024 if ((insn & 0x8000) || (rd != rs) || rd > 7)
25025 as_bad_where (fixP->fx_file, fixP->fx_line,
25026 _("Unable to process relocation for thumb opcode: %lx"),
25027 (unsigned long) insn);
25028
25029 /* Encode as ADD immediate8 thumb 1 code. */
25030 insn = 0x3000 | (rd << 8);
25031
25032 /* Place the encoded addend into the first 8 bits of the
25033 instruction. */
25034 if (!seg->use_rela_p)
25035 insn |= encoded_addend;
25036 }
25037
25038 /* Update the instruction. */
25039 md_number_to_chars (buf, insn, THUMB_SIZE);
25040 }
25041 break;
25042
25043 case BFD_RELOC_ARM_ALU_PC_G0_NC:
25044 case BFD_RELOC_ARM_ALU_PC_G0:
25045 case BFD_RELOC_ARM_ALU_PC_G1_NC:
25046 case BFD_RELOC_ARM_ALU_PC_G1:
25047 case BFD_RELOC_ARM_ALU_PC_G2:
25048 case BFD_RELOC_ARM_ALU_SB_G0_NC:
25049 case BFD_RELOC_ARM_ALU_SB_G0:
25050 case BFD_RELOC_ARM_ALU_SB_G1_NC:
25051 case BFD_RELOC_ARM_ALU_SB_G1:
25052 case BFD_RELOC_ARM_ALU_SB_G2:
25053 gas_assert (!fixP->fx_done);
25054 if (!seg->use_rela_p)
25055 {
25056 bfd_vma insn;
25057 bfd_vma encoded_addend;
25058 bfd_vma addend_abs = llabs (value);
25059
25060 /* Check that the absolute value of the addend can be
25061 expressed as an 8-bit constant plus a rotation. */
25062 encoded_addend = encode_arm_immediate (addend_abs);
25063 if (encoded_addend == (unsigned int) FAIL)
25064 as_bad_where (fixP->fx_file, fixP->fx_line,
25065 _("the offset 0x%08lX is not representable"),
25066 (unsigned long) addend_abs);
25067
25068 /* Extract the instruction. */
25069 insn = md_chars_to_number (buf, INSN_SIZE);
25070
25071 /* If the addend is positive, use an ADD instruction.
25072 Otherwise use a SUB. Take care not to destroy the S bit. */
25073 insn &= 0xff1fffff;
25074 if (value < 0)
25075 insn |= 1 << 22;
25076 else
25077 insn |= 1 << 23;
25078
25079 /* Place the encoded addend into the first 12 bits of the
25080 instruction. */
25081 insn &= 0xfffff000;
25082 insn |= encoded_addend;
25083
25084 /* Update the instruction. */
25085 md_number_to_chars (buf, insn, INSN_SIZE);
25086 }
25087 break;
25088
25089 case BFD_RELOC_ARM_LDR_PC_G0:
25090 case BFD_RELOC_ARM_LDR_PC_G1:
25091 case BFD_RELOC_ARM_LDR_PC_G2:
25092 case BFD_RELOC_ARM_LDR_SB_G0:
25093 case BFD_RELOC_ARM_LDR_SB_G1:
25094 case BFD_RELOC_ARM_LDR_SB_G2:
25095 gas_assert (!fixP->fx_done);
25096 if (!seg->use_rela_p)
25097 {
25098 bfd_vma insn;
25099 bfd_vma addend_abs = llabs (value);
25100
25101 /* Check that the absolute value of the addend can be
25102 encoded in 12 bits. */
25103 if (addend_abs >= 0x1000)
25104 as_bad_where (fixP->fx_file, fixP->fx_line,
25105 _("bad offset 0x%08lX (only 12 bits available for the magnitude)"),
25106 (unsigned long) addend_abs);
25107
25108 /* Extract the instruction. */
25109 insn = md_chars_to_number (buf, INSN_SIZE);
25110
25111 /* If the addend is negative, clear bit 23 of the instruction.
25112 Otherwise set it. */
25113 if (value < 0)
25114 insn &= ~(1 << 23);
25115 else
25116 insn |= 1 << 23;
25117
25118 /* Place the absolute value of the addend into the first 12 bits
25119 of the instruction. */
25120 insn &= 0xfffff000;
25121 insn |= addend_abs;
25122
25123 /* Update the instruction. */
25124 md_number_to_chars (buf, insn, INSN_SIZE);
25125 }
25126 break;
25127
25128 case BFD_RELOC_ARM_LDRS_PC_G0:
25129 case BFD_RELOC_ARM_LDRS_PC_G1:
25130 case BFD_RELOC_ARM_LDRS_PC_G2:
25131 case BFD_RELOC_ARM_LDRS_SB_G0:
25132 case BFD_RELOC_ARM_LDRS_SB_G1:
25133 case BFD_RELOC_ARM_LDRS_SB_G2:
25134 gas_assert (!fixP->fx_done);
25135 if (!seg->use_rela_p)
25136 {
25137 bfd_vma insn;
25138 bfd_vma addend_abs = llabs (value);
25139
25140 /* Check that the absolute value of the addend can be
25141 encoded in 8 bits. */
25142 if (addend_abs >= 0x100)
25143 as_bad_where (fixP->fx_file, fixP->fx_line,
25144 _("bad offset 0x%08lX (only 8 bits available for the magnitude)"),
25145 (unsigned long) addend_abs);
25146
25147 /* Extract the instruction. */
25148 insn = md_chars_to_number (buf, INSN_SIZE);
25149
25150 /* If the addend is negative, clear bit 23 of the instruction.
25151 Otherwise set it. */
25152 if (value < 0)
25153 insn &= ~(1 << 23);
25154 else
25155 insn |= 1 << 23;
25156
25157 /* Place the first four bits of the absolute value of the addend
25158 into the first 4 bits of the instruction, and the remaining
25159 four into bits 8 .. 11. */
25160 insn &= 0xfffff0f0;
25161 insn |= (addend_abs & 0xf) | ((addend_abs & 0xf0) << 4);
25162
25163 /* Update the instruction. */
25164 md_number_to_chars (buf, insn, INSN_SIZE);
25165 }
25166 break;
25167
25168 case BFD_RELOC_ARM_LDC_PC_G0:
25169 case BFD_RELOC_ARM_LDC_PC_G1:
25170 case BFD_RELOC_ARM_LDC_PC_G2:
25171 case BFD_RELOC_ARM_LDC_SB_G0:
25172 case BFD_RELOC_ARM_LDC_SB_G1:
25173 case BFD_RELOC_ARM_LDC_SB_G2:
25174 gas_assert (!fixP->fx_done);
25175 if (!seg->use_rela_p)
25176 {
25177 bfd_vma insn;
25178 bfd_vma addend_abs = llabs (value);
25179
25180 /* Check that the absolute value of the addend is a multiple of
25181 four and, when divided by four, fits in 8 bits. */
25182 if (addend_abs & 0x3)
25183 as_bad_where (fixP->fx_file, fixP->fx_line,
25184 _("bad offset 0x%08lX (must be word-aligned)"),
25185 (unsigned long) addend_abs);
25186
25187 if ((addend_abs >> 2) > 0xff)
25188 as_bad_where (fixP->fx_file, fixP->fx_line,
25189 _("bad offset 0x%08lX (must be an 8-bit number of words)"),
25190 (unsigned long) addend_abs);
25191
25192 /* Extract the instruction. */
25193 insn = md_chars_to_number (buf, INSN_SIZE);
25194
25195 /* If the addend is negative, clear bit 23 of the instruction.
25196 Otherwise set it. */
25197 if (value < 0)
25198 insn &= ~(1 << 23);
25199 else
25200 insn |= 1 << 23;
25201
25202 /* Place the addend (divided by four) into the first eight
25203 bits of the instruction. */
25204 insn &= 0xfffffff0;
25205 insn |= addend_abs >> 2;
25206
25207 /* Update the instruction. */
25208 md_number_to_chars (buf, insn, INSN_SIZE);
25209 }
25210 break;
25211
25212 case BFD_RELOC_THUMB_PCREL_BRANCH5:
25213 if (fixP->fx_addsy
25214 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
25215 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
25216 && ARM_IS_FUNC (fixP->fx_addsy)
25217 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v8_1m_main))
25218 {
25219 /* Force a relocation for a branch 5 bits wide. */
25220 fixP->fx_done = 0;
25221 }
25222 if (v8_1_branch_value_check (value, 5, FALSE) == FAIL)
25223 as_bad_where (fixP->fx_file, fixP->fx_line,
25224 BAD_BRANCH_OFF);
25225
25226 if (fixP->fx_done || !seg->use_rela_p)
25227 {
25228 addressT boff = value >> 1;
25229
25230 newval = md_chars_to_number (buf, THUMB_SIZE);
25231 newval |= (boff << 7);
25232 md_number_to_chars (buf, newval, THUMB_SIZE);
25233 }
25234 break;
25235
25236 case BFD_RELOC_THUMB_PCREL_BFCSEL:
25237 if (fixP->fx_addsy
25238 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
25239 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
25240 && ARM_IS_FUNC (fixP->fx_addsy)
25241 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v8_1m_main))
25242 {
25243 fixP->fx_done = 0;
25244 }
25245 if ((value & ~0x7f) && ((value & ~0x3f) != ~0x3f))
25246 as_bad_where (fixP->fx_file, fixP->fx_line,
25247 _("branch out of range"));
25248
25249 if (fixP->fx_done || !seg->use_rela_p)
25250 {
25251 newval = md_chars_to_number (buf, THUMB_SIZE);
25252
25253 addressT boff = ((newval & 0x0780) >> 7) << 1;
25254 addressT diff = value - boff;
25255
25256 if (diff == 4)
25257 {
25258 newval |= 1 << 1; /* T bit. */
25259 }
25260 else if (diff != 2)
25261 {
25262 as_bad_where (fixP->fx_file, fixP->fx_line,
25263 _("out of range label-relative fixup value"));
25264 }
25265 md_number_to_chars (buf, newval, THUMB_SIZE);
25266 }
25267 break;
25268
25269 case BFD_RELOC_ARM_THUMB_BF17:
25270 if (fixP->fx_addsy
25271 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
25272 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
25273 && ARM_IS_FUNC (fixP->fx_addsy)
25274 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v8_1m_main))
25275 {
25276 /* Force a relocation for a branch 17 bits wide. */
25277 fixP->fx_done = 0;
25278 }
25279
25280 if (v8_1_branch_value_check (value, 17, TRUE) == FAIL)
25281 as_bad_where (fixP->fx_file, fixP->fx_line,
25282 BAD_BRANCH_OFF);
25283
25284 if (fixP->fx_done || !seg->use_rela_p)
25285 {
25286 offsetT newval2;
25287 addressT immA, immB, immC;
25288
25289 immA = (value & 0x0001f000) >> 12;
25290 immB = (value & 0x00000ffc) >> 2;
25291 immC = (value & 0x00000002) >> 1;
25292
25293 newval = md_chars_to_number (buf, THUMB_SIZE);
25294 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
25295 newval |= immA;
25296 newval2 |= (immC << 11) | (immB << 1);
25297 md_number_to_chars (buf, newval, THUMB_SIZE);
25298 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
25299 }
25300 break;
25301
25302 case BFD_RELOC_ARM_THUMB_BF19:
25303 if (fixP->fx_addsy
25304 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
25305 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
25306 && ARM_IS_FUNC (fixP->fx_addsy)
25307 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v8_1m_main))
25308 {
25309 /* Force a relocation for a branch 19 bits wide. */
25310 fixP->fx_done = 0;
25311 }
25312
25313 if (v8_1_branch_value_check (value, 19, TRUE) == FAIL)
25314 as_bad_where (fixP->fx_file, fixP->fx_line,
25315 BAD_BRANCH_OFF);
25316
25317 if (fixP->fx_done || !seg->use_rela_p)
25318 {
25319 offsetT newval2;
25320 addressT immA, immB, immC;
25321
25322 immA = (value & 0x0007f000) >> 12;
25323 immB = (value & 0x00000ffc) >> 2;
25324 immC = (value & 0x00000002) >> 1;
25325
25326 newval = md_chars_to_number (buf, THUMB_SIZE);
25327 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
25328 newval |= immA;
25329 newval2 |= (immC << 11) | (immB << 1);
25330 md_number_to_chars (buf, newval, THUMB_SIZE);
25331 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
25332 }
25333 break;
25334
25335 case BFD_RELOC_ARM_THUMB_BF13:
25336 if (fixP->fx_addsy
25337 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
25338 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
25339 && ARM_IS_FUNC (fixP->fx_addsy)
25340 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v8_1m_main))
25341 {
25342 /* Force a relocation for a branch 13 bits wide. */
25343 fixP->fx_done = 0;
25344 }
25345
25346 if (v8_1_branch_value_check (value, 13, TRUE) == FAIL)
25347 as_bad_where (fixP->fx_file, fixP->fx_line,
25348 BAD_BRANCH_OFF);
25349
25350 if (fixP->fx_done || !seg->use_rela_p)
25351 {
25352 offsetT newval2;
25353 addressT immA, immB, immC;
25354
25355 immA = (value & 0x00001000) >> 12;
25356 immB = (value & 0x00000ffc) >> 2;
25357 immC = (value & 0x00000002) >> 1;
25358
25359 newval = md_chars_to_number (buf, THUMB_SIZE);
25360 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
25361 newval |= immA;
25362 newval2 |= (immC << 11) | (immB << 1);
25363 md_number_to_chars (buf, newval, THUMB_SIZE);
25364 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
25365 }
25366 break;
25367
25368 case BFD_RELOC_ARM_THUMB_LOOP12:
25369 if (fixP->fx_addsy
25370 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
25371 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
25372 && ARM_IS_FUNC (fixP->fx_addsy)
25373 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v8_1m_main))
25374 {
25375 /* Force a relocation for a branch 12 bits wide. */
25376 fixP->fx_done = 0;
25377 }
25378
25379 bfd_vma insn = get_thumb32_insn (buf);
25380 /* le lr, <label> or le <label> */
25381 if (((insn & 0xffffffff) == 0xf00fc001)
25382 || ((insn & 0xffffffff) == 0xf02fc001))
25383 value = -value;
25384
25385 if (v8_1_branch_value_check (value, 12, FALSE) == FAIL)
25386 as_bad_where (fixP->fx_file, fixP->fx_line,
25387 BAD_BRANCH_OFF);
25388 if (fixP->fx_done || !seg->use_rela_p)
25389 {
25390 addressT imml, immh;
25391
25392 immh = (value & 0x00000ffc) >> 2;
25393 imml = (value & 0x00000002) >> 1;
25394
25395 newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
25396 newval |= (imml << 11) | (immh << 1);
25397 md_number_to_chars (buf + THUMB_SIZE, newval, THUMB_SIZE);
25398 }
25399 break;
25400
25401 case BFD_RELOC_ARM_V4BX:
25402 /* This will need to go in the object file. */
25403 fixP->fx_done = 0;
25404 break;
25405
25406 case BFD_RELOC_UNUSED:
25407 default:
25408 as_bad_where (fixP->fx_file, fixP->fx_line,
25409 _("bad relocation fixup type (%d)"), fixP->fx_r_type);
25410 }
25411 }
25412
25413 /* Translate internal representation of relocation info to BFD target
25414 format. */
25415
25416 arelent *
25417 tc_gen_reloc (asection *section, fixS *fixp)
25418 {
25419 arelent * reloc;
25420 bfd_reloc_code_real_type code;
25421
25422 reloc = XNEW (arelent);
25423
25424 reloc->sym_ptr_ptr = XNEW (asymbol *);
25425 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
25426 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
25427
25428 if (fixp->fx_pcrel)
25429 {
25430 if (section->use_rela_p)
25431 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
25432 else
25433 fixp->fx_offset = reloc->address;
25434 }
25435 reloc->addend = fixp->fx_offset;
25436
25437 switch (fixp->fx_r_type)
25438 {
25439 case BFD_RELOC_8:
25440 if (fixp->fx_pcrel)
25441 {
25442 code = BFD_RELOC_8_PCREL;
25443 break;
25444 }
25445 /* Fall through. */
25446
25447 case BFD_RELOC_16:
25448 if (fixp->fx_pcrel)
25449 {
25450 code = BFD_RELOC_16_PCREL;
25451 break;
25452 }
25453 /* Fall through. */
25454
25455 case BFD_RELOC_32:
25456 if (fixp->fx_pcrel)
25457 {
25458 code = BFD_RELOC_32_PCREL;
25459 break;
25460 }
25461 /* Fall through. */
25462
25463 case BFD_RELOC_ARM_MOVW:
25464 if (fixp->fx_pcrel)
25465 {
25466 code = BFD_RELOC_ARM_MOVW_PCREL;
25467 break;
25468 }
25469 /* Fall through. */
25470
25471 case BFD_RELOC_ARM_MOVT:
25472 if (fixp->fx_pcrel)
25473 {
25474 code = BFD_RELOC_ARM_MOVT_PCREL;
25475 break;
25476 }
25477 /* Fall through. */
25478
25479 case BFD_RELOC_ARM_THUMB_MOVW:
25480 if (fixp->fx_pcrel)
25481 {
25482 code = BFD_RELOC_ARM_THUMB_MOVW_PCREL;
25483 break;
25484 }
25485 /* Fall through. */
25486
25487 case BFD_RELOC_ARM_THUMB_MOVT:
25488 if (fixp->fx_pcrel)
25489 {
25490 code = BFD_RELOC_ARM_THUMB_MOVT_PCREL;
25491 break;
25492 }
25493 /* Fall through. */
25494
25495 case BFD_RELOC_NONE:
25496 case BFD_RELOC_ARM_PCREL_BRANCH:
25497 case BFD_RELOC_ARM_PCREL_BLX:
25498 case BFD_RELOC_RVA:
25499 case BFD_RELOC_THUMB_PCREL_BRANCH7:
25500 case BFD_RELOC_THUMB_PCREL_BRANCH9:
25501 case BFD_RELOC_THUMB_PCREL_BRANCH12:
25502 case BFD_RELOC_THUMB_PCREL_BRANCH20:
25503 case BFD_RELOC_THUMB_PCREL_BRANCH23:
25504 case BFD_RELOC_THUMB_PCREL_BRANCH25:
25505 case BFD_RELOC_VTABLE_ENTRY:
25506 case BFD_RELOC_VTABLE_INHERIT:
25507 #ifdef TE_PE
25508 case BFD_RELOC_32_SECREL:
25509 #endif
25510 code = fixp->fx_r_type;
25511 break;
25512
25513 case BFD_RELOC_THUMB_PCREL_BLX:
25514 #ifdef OBJ_ELF
25515 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
25516 code = BFD_RELOC_THUMB_PCREL_BRANCH23;
25517 else
25518 #endif
25519 code = BFD_RELOC_THUMB_PCREL_BLX;
25520 break;
25521
25522 case BFD_RELOC_ARM_LITERAL:
25523 case BFD_RELOC_ARM_HWLITERAL:
25524 /* If this is called then the a literal has
25525 been referenced across a section boundary. */
25526 as_bad_where (fixp->fx_file, fixp->fx_line,
25527 _("literal referenced across section boundary"));
25528 return NULL;
25529
25530 #ifdef OBJ_ELF
25531 case BFD_RELOC_ARM_TLS_CALL:
25532 case BFD_RELOC_ARM_THM_TLS_CALL:
25533 case BFD_RELOC_ARM_TLS_DESCSEQ:
25534 case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
25535 case BFD_RELOC_ARM_GOT32:
25536 case BFD_RELOC_ARM_GOTOFF:
25537 case BFD_RELOC_ARM_GOT_PREL:
25538 case BFD_RELOC_ARM_PLT32:
25539 case BFD_RELOC_ARM_TARGET1:
25540 case BFD_RELOC_ARM_ROSEGREL32:
25541 case BFD_RELOC_ARM_SBREL32:
25542 case BFD_RELOC_ARM_PREL31:
25543 case BFD_RELOC_ARM_TARGET2:
25544 case BFD_RELOC_ARM_TLS_LDO32:
25545 case BFD_RELOC_ARM_PCREL_CALL:
25546 case BFD_RELOC_ARM_PCREL_JUMP:
25547 case BFD_RELOC_ARM_ALU_PC_G0_NC:
25548 case BFD_RELOC_ARM_ALU_PC_G0:
25549 case BFD_RELOC_ARM_ALU_PC_G1_NC:
25550 case BFD_RELOC_ARM_ALU_PC_G1:
25551 case BFD_RELOC_ARM_ALU_PC_G2:
25552 case BFD_RELOC_ARM_LDR_PC_G0:
25553 case BFD_RELOC_ARM_LDR_PC_G1:
25554 case BFD_RELOC_ARM_LDR_PC_G2:
25555 case BFD_RELOC_ARM_LDRS_PC_G0:
25556 case BFD_RELOC_ARM_LDRS_PC_G1:
25557 case BFD_RELOC_ARM_LDRS_PC_G2:
25558 case BFD_RELOC_ARM_LDC_PC_G0:
25559 case BFD_RELOC_ARM_LDC_PC_G1:
25560 case BFD_RELOC_ARM_LDC_PC_G2:
25561 case BFD_RELOC_ARM_ALU_SB_G0_NC:
25562 case BFD_RELOC_ARM_ALU_SB_G0:
25563 case BFD_RELOC_ARM_ALU_SB_G1_NC:
25564 case BFD_RELOC_ARM_ALU_SB_G1:
25565 case BFD_RELOC_ARM_ALU_SB_G2:
25566 case BFD_RELOC_ARM_LDR_SB_G0:
25567 case BFD_RELOC_ARM_LDR_SB_G1:
25568 case BFD_RELOC_ARM_LDR_SB_G2:
25569 case BFD_RELOC_ARM_LDRS_SB_G0:
25570 case BFD_RELOC_ARM_LDRS_SB_G1:
25571 case BFD_RELOC_ARM_LDRS_SB_G2:
25572 case BFD_RELOC_ARM_LDC_SB_G0:
25573 case BFD_RELOC_ARM_LDC_SB_G1:
25574 case BFD_RELOC_ARM_LDC_SB_G2:
25575 case BFD_RELOC_ARM_V4BX:
25576 case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC:
25577 case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC:
25578 case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC:
25579 case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC:
25580 case BFD_RELOC_ARM_GOTFUNCDESC:
25581 case BFD_RELOC_ARM_GOTOFFFUNCDESC:
25582 case BFD_RELOC_ARM_FUNCDESC:
25583 case BFD_RELOC_ARM_THUMB_BF17:
25584 case BFD_RELOC_ARM_THUMB_BF19:
25585 case BFD_RELOC_ARM_THUMB_BF13:
25586 code = fixp->fx_r_type;
25587 break;
25588
25589 case BFD_RELOC_ARM_TLS_GOTDESC:
25590 case BFD_RELOC_ARM_TLS_GD32:
25591 case BFD_RELOC_ARM_TLS_GD32_FDPIC:
25592 case BFD_RELOC_ARM_TLS_LE32:
25593 case BFD_RELOC_ARM_TLS_IE32:
25594 case BFD_RELOC_ARM_TLS_IE32_FDPIC:
25595 case BFD_RELOC_ARM_TLS_LDM32:
25596 case BFD_RELOC_ARM_TLS_LDM32_FDPIC:
25597 /* BFD will include the symbol's address in the addend.
25598 But we don't want that, so subtract it out again here. */
25599 if (!S_IS_COMMON (fixp->fx_addsy))
25600 reloc->addend -= (*reloc->sym_ptr_ptr)->value;
25601 code = fixp->fx_r_type;
25602 break;
25603 #endif
25604
25605 case BFD_RELOC_ARM_IMMEDIATE:
25606 as_bad_where (fixp->fx_file, fixp->fx_line,
25607 _("internal relocation (type: IMMEDIATE) not fixed up"));
25608 return NULL;
25609
25610 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
25611 as_bad_where (fixp->fx_file, fixp->fx_line,
25612 _("ADRL used for a symbol not defined in the same file"));
25613 return NULL;
25614
25615 case BFD_RELOC_THUMB_PCREL_BRANCH5:
25616 case BFD_RELOC_THUMB_PCREL_BFCSEL:
25617 case BFD_RELOC_ARM_THUMB_LOOP12:
25618 as_bad_where (fixp->fx_file, fixp->fx_line,
25619 _("%s used for a symbol not defined in the same file"),
25620 bfd_get_reloc_code_name (fixp->fx_r_type));
25621 return NULL;
25622
25623 case BFD_RELOC_ARM_OFFSET_IMM:
25624 if (section->use_rela_p)
25625 {
25626 code = fixp->fx_r_type;
25627 break;
25628 }
25629
25630 if (fixp->fx_addsy != NULL
25631 && !S_IS_DEFINED (fixp->fx_addsy)
25632 && S_IS_LOCAL (fixp->fx_addsy))
25633 {
25634 as_bad_where (fixp->fx_file, fixp->fx_line,
25635 _("undefined local label `%s'"),
25636 S_GET_NAME (fixp->fx_addsy));
25637 return NULL;
25638 }
25639
25640 as_bad_where (fixp->fx_file, fixp->fx_line,
25641 _("internal_relocation (type: OFFSET_IMM) not fixed up"));
25642 return NULL;
25643
25644 default:
25645 {
25646 const char * type;
25647
25648 switch (fixp->fx_r_type)
25649 {
25650 case BFD_RELOC_NONE: type = "NONE"; break;
25651 case BFD_RELOC_ARM_OFFSET_IMM8: type = "OFFSET_IMM8"; break;
25652 case BFD_RELOC_ARM_SHIFT_IMM: type = "SHIFT_IMM"; break;
25653 case BFD_RELOC_ARM_SMC: type = "SMC"; break;
25654 case BFD_RELOC_ARM_SWI: type = "SWI"; break;
25655 case BFD_RELOC_ARM_MULTI: type = "MULTI"; break;
25656 case BFD_RELOC_ARM_CP_OFF_IMM: type = "CP_OFF_IMM"; break;
25657 case BFD_RELOC_ARM_T32_OFFSET_IMM: type = "T32_OFFSET_IMM"; break;
25658 case BFD_RELOC_ARM_T32_CP_OFF_IMM: type = "T32_CP_OFF_IMM"; break;
25659 case BFD_RELOC_ARM_THUMB_ADD: type = "THUMB_ADD"; break;
25660 case BFD_RELOC_ARM_THUMB_SHIFT: type = "THUMB_SHIFT"; break;
25661 case BFD_RELOC_ARM_THUMB_IMM: type = "THUMB_IMM"; break;
25662 case BFD_RELOC_ARM_THUMB_OFFSET: type = "THUMB_OFFSET"; break;
25663 default: type = _("<unknown>"); break;
25664 }
25665 as_bad_where (fixp->fx_file, fixp->fx_line,
25666 _("cannot represent %s relocation in this object file format"),
25667 type);
25668 return NULL;
25669 }
25670 }
25671
25672 #ifdef OBJ_ELF
25673 if ((code == BFD_RELOC_32_PCREL || code == BFD_RELOC_32)
25674 && GOT_symbol
25675 && fixp->fx_addsy == GOT_symbol)
25676 {
25677 code = BFD_RELOC_ARM_GOTPC;
25678 reloc->addend = fixp->fx_offset = reloc->address;
25679 }
25680 #endif
25681
25682 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
25683
25684 if (reloc->howto == NULL)
25685 {
25686 as_bad_where (fixp->fx_file, fixp->fx_line,
25687 _("cannot represent %s relocation in this object file format"),
25688 bfd_get_reloc_code_name (code));
25689 return NULL;
25690 }
25691
25692 /* HACK: Since arm ELF uses Rel instead of Rela, encode the
25693 vtable entry to be used in the relocation's section offset. */
25694 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
25695 reloc->address = fixp->fx_offset;
25696
25697 return reloc;
25698 }
25699
25700 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
25701
25702 void
25703 cons_fix_new_arm (fragS * frag,
25704 int where,
25705 int size,
25706 expressionS * exp,
25707 bfd_reloc_code_real_type reloc)
25708 {
25709 int pcrel = 0;
25710
25711 /* Pick a reloc.
25712 FIXME: @@ Should look at CPU word size. */
25713 switch (size)
25714 {
25715 case 1:
25716 reloc = BFD_RELOC_8;
25717 break;
25718 case 2:
25719 reloc = BFD_RELOC_16;
25720 break;
25721 case 4:
25722 default:
25723 reloc = BFD_RELOC_32;
25724 break;
25725 case 8:
25726 reloc = BFD_RELOC_64;
25727 break;
25728 }
25729
25730 #ifdef TE_PE
25731 if (exp->X_op == O_secrel)
25732 {
25733 exp->X_op = O_symbol;
25734 reloc = BFD_RELOC_32_SECREL;
25735 }
25736 #endif
25737
25738 fix_new_exp (frag, where, size, exp, pcrel, reloc);
25739 }
25740
25741 #if defined (OBJ_COFF)
25742 void
25743 arm_validate_fix (fixS * fixP)
25744 {
25745 /* If the destination of the branch is a defined symbol which does not have
25746 the THUMB_FUNC attribute, then we must be calling a function which has
25747 the (interfacearm) attribute. We look for the Thumb entry point to that
25748 function and change the branch to refer to that function instead. */
25749 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BRANCH23
25750 && fixP->fx_addsy != NULL
25751 && S_IS_DEFINED (fixP->fx_addsy)
25752 && ! THUMB_IS_FUNC (fixP->fx_addsy))
25753 {
25754 fixP->fx_addsy = find_real_start (fixP->fx_addsy);
25755 }
25756 }
25757 #endif
25758
25759
25760 int
25761 arm_force_relocation (struct fix * fixp)
25762 {
25763 #if defined (OBJ_COFF) && defined (TE_PE)
25764 if (fixp->fx_r_type == BFD_RELOC_RVA)
25765 return 1;
25766 #endif
25767
25768 /* In case we have a call or a branch to a function in ARM ISA mode from
25769 a thumb function or vice-versa force the relocation. These relocations
25770 are cleared off for some cores that might have blx and simple transformations
25771 are possible. */
25772
25773 #ifdef OBJ_ELF
25774 switch (fixp->fx_r_type)
25775 {
25776 case BFD_RELOC_ARM_PCREL_JUMP:
25777 case BFD_RELOC_ARM_PCREL_CALL:
25778 case BFD_RELOC_THUMB_PCREL_BLX:
25779 if (THUMB_IS_FUNC (fixp->fx_addsy))
25780 return 1;
25781 break;
25782
25783 case BFD_RELOC_ARM_PCREL_BLX:
25784 case BFD_RELOC_THUMB_PCREL_BRANCH25:
25785 case BFD_RELOC_THUMB_PCREL_BRANCH20:
25786 case BFD_RELOC_THUMB_PCREL_BRANCH23:
25787 if (ARM_IS_FUNC (fixp->fx_addsy))
25788 return 1;
25789 break;
25790
25791 default:
25792 break;
25793 }
25794 #endif
25795
25796 /* Resolve these relocations even if the symbol is extern or weak.
25797 Technically this is probably wrong due to symbol preemption.
25798 In practice these relocations do not have enough range to be useful
25799 at dynamic link time, and some code (e.g. in the Linux kernel)
25800 expects these references to be resolved. */
25801 if (fixp->fx_r_type == BFD_RELOC_ARM_IMMEDIATE
25802 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM
25803 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM8
25804 || fixp->fx_r_type == BFD_RELOC_ARM_ADRL_IMMEDIATE
25805 || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
25806 || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2
25807 || fixp->fx_r_type == BFD_RELOC_ARM_THUMB_OFFSET
25808 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM
25809 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
25810 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMM12
25811 || fixp->fx_r_type == BFD_RELOC_ARM_T32_OFFSET_IMM
25812 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_PC12
25813 || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM
25814 || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM_S2)
25815 return 0;
25816
25817 /* Always leave these relocations for the linker. */
25818 if ((fixp->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
25819 && fixp->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
25820 || fixp->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
25821 return 1;
25822
25823 /* Always generate relocations against function symbols. */
25824 if (fixp->fx_r_type == BFD_RELOC_32
25825 && fixp->fx_addsy
25826 && (symbol_get_bfdsym (fixp->fx_addsy)->flags & BSF_FUNCTION))
25827 return 1;
25828
25829 return generic_force_reloc (fixp);
25830 }
25831
25832 #if defined (OBJ_ELF) || defined (OBJ_COFF)
25833 /* Relocations against function names must be left unadjusted,
25834 so that the linker can use this information to generate interworking
25835 stubs. The MIPS version of this function
25836 also prevents relocations that are mips-16 specific, but I do not
25837 know why it does this.
25838
25839 FIXME:
25840 There is one other problem that ought to be addressed here, but
25841 which currently is not: Taking the address of a label (rather
25842 than a function) and then later jumping to that address. Such
25843 addresses also ought to have their bottom bit set (assuming that
25844 they reside in Thumb code), but at the moment they will not. */
25845
25846 bfd_boolean
25847 arm_fix_adjustable (fixS * fixP)
25848 {
25849 if (fixP->fx_addsy == NULL)
25850 return 1;
25851
25852 /* Preserve relocations against symbols with function type. */
25853 if (symbol_get_bfdsym (fixP->fx_addsy)->flags & BSF_FUNCTION)
25854 return FALSE;
25855
25856 if (THUMB_IS_FUNC (fixP->fx_addsy)
25857 && fixP->fx_subsy == NULL)
25858 return FALSE;
25859
25860 /* We need the symbol name for the VTABLE entries. */
25861 if ( fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
25862 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
25863 return FALSE;
25864
25865 /* Don't allow symbols to be discarded on GOT related relocs. */
25866 if (fixP->fx_r_type == BFD_RELOC_ARM_PLT32
25867 || fixP->fx_r_type == BFD_RELOC_ARM_GOT32
25868 || fixP->fx_r_type == BFD_RELOC_ARM_GOTOFF
25869 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GD32
25870 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GD32_FDPIC
25871 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LE32
25872 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_IE32
25873 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_IE32_FDPIC
25874 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDM32
25875 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDM32_FDPIC
25876 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDO32
25877 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GOTDESC
25878 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_CALL
25879 || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_CALL
25880 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_DESCSEQ
25881 || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_DESCSEQ
25882 || fixP->fx_r_type == BFD_RELOC_ARM_TARGET2)
25883 return FALSE;
25884
25885 /* Similarly for group relocations. */
25886 if ((fixP->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
25887 && fixP->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
25888 || fixP->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
25889 return FALSE;
25890
25891 /* MOVW/MOVT REL relocations have limited offsets, so keep the symbols. */
25892 if (fixP->fx_r_type == BFD_RELOC_ARM_MOVW
25893 || fixP->fx_r_type == BFD_RELOC_ARM_MOVT
25894 || fixP->fx_r_type == BFD_RELOC_ARM_MOVW_PCREL
25895 || fixP->fx_r_type == BFD_RELOC_ARM_MOVT_PCREL
25896 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
25897 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT
25898 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW_PCREL
25899 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT_PCREL)
25900 return FALSE;
25901
25902 /* BFD_RELOC_ARM_THUMB_ALU_ABS_Gx_NC relocations have VERY limited
25903 offsets, so keep these symbols. */
25904 if (fixP->fx_r_type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
25905 && fixP->fx_r_type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC)
25906 return FALSE;
25907
25908 return TRUE;
25909 }
25910 #endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */
25911
25912 #ifdef OBJ_ELF
25913 const char *
25914 elf32_arm_target_format (void)
25915 {
25916 #ifdef TE_SYMBIAN
25917 return (target_big_endian
25918 ? "elf32-bigarm-symbian"
25919 : "elf32-littlearm-symbian");
25920 #elif defined (TE_VXWORKS)
25921 return (target_big_endian
25922 ? "elf32-bigarm-vxworks"
25923 : "elf32-littlearm-vxworks");
25924 #elif defined (TE_NACL)
25925 return (target_big_endian
25926 ? "elf32-bigarm-nacl"
25927 : "elf32-littlearm-nacl");
25928 #else
25929 if (arm_fdpic)
25930 {
25931 if (target_big_endian)
25932 return "elf32-bigarm-fdpic";
25933 else
25934 return "elf32-littlearm-fdpic";
25935 }
25936 else
25937 {
25938 if (target_big_endian)
25939 return "elf32-bigarm";
25940 else
25941 return "elf32-littlearm";
25942 }
25943 #endif
25944 }
25945
25946 void
25947 armelf_frob_symbol (symbolS * symp,
25948 int * puntp)
25949 {
25950 elf_frob_symbol (symp, puntp);
25951 }
25952 #endif
25953
25954 /* MD interface: Finalization. */
25955
25956 void
25957 arm_cleanup (void)
25958 {
25959 literal_pool * pool;
25960
25961 /* Ensure that all the IT blocks are properly closed. */
25962 check_it_blocks_finished ();
25963
25964 for (pool = list_of_pools; pool; pool = pool->next)
25965 {
25966 /* Put it at the end of the relevant section. */
25967 subseg_set (pool->section, pool->sub_section);
25968 #ifdef OBJ_ELF
25969 arm_elf_change_section ();
25970 #endif
25971 s_ltorg (0);
25972 }
25973 }
25974
25975 #ifdef OBJ_ELF
25976 /* Remove any excess mapping symbols generated for alignment frags in
25977 SEC. We may have created a mapping symbol before a zero byte
25978 alignment; remove it if there's a mapping symbol after the
25979 alignment. */
25980 static void
25981 check_mapping_symbols (bfd *abfd ATTRIBUTE_UNUSED, asection *sec,
25982 void *dummy ATTRIBUTE_UNUSED)
25983 {
25984 segment_info_type *seginfo = seg_info (sec);
25985 fragS *fragp;
25986
25987 if (seginfo == NULL || seginfo->frchainP == NULL)
25988 return;
25989
25990 for (fragp = seginfo->frchainP->frch_root;
25991 fragp != NULL;
25992 fragp = fragp->fr_next)
25993 {
25994 symbolS *sym = fragp->tc_frag_data.last_map;
25995 fragS *next = fragp->fr_next;
25996
25997 /* Variable-sized frags have been converted to fixed size by
25998 this point. But if this was variable-sized to start with,
25999 there will be a fixed-size frag after it. So don't handle
26000 next == NULL. */
26001 if (sym == NULL || next == NULL)
26002 continue;
26003
26004 if (S_GET_VALUE (sym) < next->fr_address)
26005 /* Not at the end of this frag. */
26006 continue;
26007 know (S_GET_VALUE (sym) == next->fr_address);
26008
26009 do
26010 {
26011 if (next->tc_frag_data.first_map != NULL)
26012 {
26013 /* Next frag starts with a mapping symbol. Discard this
26014 one. */
26015 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
26016 break;
26017 }
26018
26019 if (next->fr_next == NULL)
26020 {
26021 /* This mapping symbol is at the end of the section. Discard
26022 it. */
26023 know (next->fr_fix == 0 && next->fr_var == 0);
26024 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
26025 break;
26026 }
26027
26028 /* As long as we have empty frags without any mapping symbols,
26029 keep looking. */
26030 /* If the next frag is non-empty and does not start with a
26031 mapping symbol, then this mapping symbol is required. */
26032 if (next->fr_address != next->fr_next->fr_address)
26033 break;
26034
26035 next = next->fr_next;
26036 }
26037 while (next != NULL);
26038 }
26039 }
26040 #endif
26041
26042 /* Adjust the symbol table. This marks Thumb symbols as distinct from
26043 ARM ones. */
26044
26045 void
26046 arm_adjust_symtab (void)
26047 {
26048 #ifdef OBJ_COFF
26049 symbolS * sym;
26050
26051 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
26052 {
26053 if (ARM_IS_THUMB (sym))
26054 {
26055 if (THUMB_IS_FUNC (sym))
26056 {
26057 /* Mark the symbol as a Thumb function. */
26058 if ( S_GET_STORAGE_CLASS (sym) == C_STAT
26059 || S_GET_STORAGE_CLASS (sym) == C_LABEL) /* This can happen! */
26060 S_SET_STORAGE_CLASS (sym, C_THUMBSTATFUNC);
26061
26062 else if (S_GET_STORAGE_CLASS (sym) == C_EXT)
26063 S_SET_STORAGE_CLASS (sym, C_THUMBEXTFUNC);
26064 else
26065 as_bad (_("%s: unexpected function type: %d"),
26066 S_GET_NAME (sym), S_GET_STORAGE_CLASS (sym));
26067 }
26068 else switch (S_GET_STORAGE_CLASS (sym))
26069 {
26070 case C_EXT:
26071 S_SET_STORAGE_CLASS (sym, C_THUMBEXT);
26072 break;
26073 case C_STAT:
26074 S_SET_STORAGE_CLASS (sym, C_THUMBSTAT);
26075 break;
26076 case C_LABEL:
26077 S_SET_STORAGE_CLASS (sym, C_THUMBLABEL);
26078 break;
26079 default:
26080 /* Do nothing. */
26081 break;
26082 }
26083 }
26084
26085 if (ARM_IS_INTERWORK (sym))
26086 coffsymbol (symbol_get_bfdsym (sym))->native->u.syment.n_flags = 0xFF;
26087 }
26088 #endif
26089 #ifdef OBJ_ELF
26090 symbolS * sym;
26091 char bind;
26092
26093 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
26094 {
26095 if (ARM_IS_THUMB (sym))
26096 {
26097 elf_symbol_type * elf_sym;
26098
26099 elf_sym = elf_symbol (symbol_get_bfdsym (sym));
26100 bind = ELF_ST_BIND (elf_sym->internal_elf_sym.st_info);
26101
26102 if (! bfd_is_arm_special_symbol_name (elf_sym->symbol.name,
26103 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
26104 {
26105 /* If it's a .thumb_func, declare it as so,
26106 otherwise tag label as .code 16. */
26107 if (THUMB_IS_FUNC (sym))
26108 ARM_SET_SYM_BRANCH_TYPE (elf_sym->internal_elf_sym.st_target_internal,
26109 ST_BRANCH_TO_THUMB);
26110 else if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
26111 elf_sym->internal_elf_sym.st_info =
26112 ELF_ST_INFO (bind, STT_ARM_16BIT);
26113 }
26114 }
26115 }
26116
26117 /* Remove any overlapping mapping symbols generated by alignment frags. */
26118 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
26119 /* Now do generic ELF adjustments. */
26120 elf_adjust_symtab ();
26121 #endif
26122 }
26123
26124 /* MD interface: Initialization. */
26125
26126 static void
26127 set_constant_flonums (void)
26128 {
26129 int i;
26130
26131 for (i = 0; i < NUM_FLOAT_VALS; i++)
26132 if (atof_ieee ((char *) fp_const[i], 'x', fp_values[i]) == NULL)
26133 abort ();
26134 }
26135
26136 /* Auto-select Thumb mode if it's the only available instruction set for the
26137 given architecture. */
26138
26139 static void
26140 autoselect_thumb_from_cpu_variant (void)
26141 {
26142 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
26143 opcode_select (16);
26144 }
26145
26146 void
26147 md_begin (void)
26148 {
26149 unsigned mach;
26150 unsigned int i;
26151
26152 if ( (arm_ops_hsh = hash_new ()) == NULL
26153 || (arm_cond_hsh = hash_new ()) == NULL
26154 || (arm_shift_hsh = hash_new ()) == NULL
26155 || (arm_psr_hsh = hash_new ()) == NULL
26156 || (arm_v7m_psr_hsh = hash_new ()) == NULL
26157 || (arm_reg_hsh = hash_new ()) == NULL
26158 || (arm_reloc_hsh = hash_new ()) == NULL
26159 || (arm_barrier_opt_hsh = hash_new ()) == NULL)
26160 as_fatal (_("virtual memory exhausted"));
26161
26162 for (i = 0; i < sizeof (insns) / sizeof (struct asm_opcode); i++)
26163 hash_insert (arm_ops_hsh, insns[i].template_name, (void *) (insns + i));
26164 for (i = 0; i < sizeof (conds) / sizeof (struct asm_cond); i++)
26165 hash_insert (arm_cond_hsh, conds[i].template_name, (void *) (conds + i));
26166 for (i = 0; i < sizeof (shift_names) / sizeof (struct asm_shift_name); i++)
26167 hash_insert (arm_shift_hsh, shift_names[i].name, (void *) (shift_names + i));
26168 for (i = 0; i < sizeof (psrs) / sizeof (struct asm_psr); i++)
26169 hash_insert (arm_psr_hsh, psrs[i].template_name, (void *) (psrs + i));
26170 for (i = 0; i < sizeof (v7m_psrs) / sizeof (struct asm_psr); i++)
26171 hash_insert (arm_v7m_psr_hsh, v7m_psrs[i].template_name,
26172 (void *) (v7m_psrs + i));
26173 for (i = 0; i < sizeof (reg_names) / sizeof (struct reg_entry); i++)
26174 hash_insert (arm_reg_hsh, reg_names[i].name, (void *) (reg_names + i));
26175 for (i = 0;
26176 i < sizeof (barrier_opt_names) / sizeof (struct asm_barrier_opt);
26177 i++)
26178 hash_insert (arm_barrier_opt_hsh, barrier_opt_names[i].template_name,
26179 (void *) (barrier_opt_names + i));
26180 #ifdef OBJ_ELF
26181 for (i = 0; i < ARRAY_SIZE (reloc_names); i++)
26182 {
26183 struct reloc_entry * entry = reloc_names + i;
26184
26185 if (arm_is_eabi() && entry->reloc == BFD_RELOC_ARM_PLT32)
26186 /* This makes encode_branch() use the EABI versions of this relocation. */
26187 entry->reloc = BFD_RELOC_UNUSED;
26188
26189 hash_insert (arm_reloc_hsh, entry->name, (void *) entry);
26190 }
26191 #endif
26192
26193 set_constant_flonums ();
26194
26195 /* Set the cpu variant based on the command-line options. We prefer
26196 -mcpu= over -march= if both are set (as for GCC); and we prefer
26197 -mfpu= over any other way of setting the floating point unit.
26198 Use of legacy options with new options are faulted. */
26199 if (legacy_cpu)
26200 {
26201 if (mcpu_cpu_opt || march_cpu_opt)
26202 as_bad (_("use of old and new-style options to set CPU type"));
26203
26204 selected_arch = *legacy_cpu;
26205 }
26206 else if (mcpu_cpu_opt)
26207 {
26208 selected_arch = *mcpu_cpu_opt;
26209 selected_ext = *mcpu_ext_opt;
26210 }
26211 else if (march_cpu_opt)
26212 {
26213 selected_arch = *march_cpu_opt;
26214 selected_ext = *march_ext_opt;
26215 }
26216 ARM_MERGE_FEATURE_SETS (selected_cpu, selected_arch, selected_ext);
26217
26218 if (legacy_fpu)
26219 {
26220 if (mfpu_opt)
26221 as_bad (_("use of old and new-style options to set FPU type"));
26222
26223 selected_fpu = *legacy_fpu;
26224 }
26225 else if (mfpu_opt)
26226 selected_fpu = *mfpu_opt;
26227 else
26228 {
26229 #if !(defined (EABI_DEFAULT) || defined (TE_LINUX) \
26230 || defined (TE_NetBSD) || defined (TE_VXWORKS))
26231 /* Some environments specify a default FPU. If they don't, infer it
26232 from the processor. */
26233 if (mcpu_fpu_opt)
26234 selected_fpu = *mcpu_fpu_opt;
26235 else if (march_fpu_opt)
26236 selected_fpu = *march_fpu_opt;
26237 #else
26238 selected_fpu = fpu_default;
26239 #endif
26240 }
26241
26242 if (ARM_FEATURE_ZERO (selected_fpu))
26243 {
26244 if (!no_cpu_selected ())
26245 selected_fpu = fpu_default;
26246 else
26247 selected_fpu = fpu_arch_fpa;
26248 }
26249
26250 #ifdef CPU_DEFAULT
26251 if (ARM_FEATURE_ZERO (selected_arch))
26252 {
26253 selected_arch = cpu_default;
26254 selected_cpu = selected_arch;
26255 }
26256 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
26257 #else
26258 /* Autodection of feature mode: allow all features in cpu_variant but leave
26259 selected_cpu unset. It will be set in aeabi_set_public_attributes ()
26260 after all instruction have been processed and we can decide what CPU
26261 should be selected. */
26262 if (ARM_FEATURE_ZERO (selected_arch))
26263 ARM_MERGE_FEATURE_SETS (cpu_variant, arm_arch_any, selected_fpu);
26264 else
26265 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
26266 #endif
26267
26268 autoselect_thumb_from_cpu_variant ();
26269
26270 arm_arch_used = thumb_arch_used = arm_arch_none;
26271
26272 #if defined OBJ_COFF || defined OBJ_ELF
26273 {
26274 unsigned int flags = 0;
26275
26276 #if defined OBJ_ELF
26277 flags = meabi_flags;
26278
26279 switch (meabi_flags)
26280 {
26281 case EF_ARM_EABI_UNKNOWN:
26282 #endif
26283 /* Set the flags in the private structure. */
26284 if (uses_apcs_26) flags |= F_APCS26;
26285 if (support_interwork) flags |= F_INTERWORK;
26286 if (uses_apcs_float) flags |= F_APCS_FLOAT;
26287 if (pic_code) flags |= F_PIC;
26288 if (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_any_hard))
26289 flags |= F_SOFT_FLOAT;
26290
26291 switch (mfloat_abi_opt)
26292 {
26293 case ARM_FLOAT_ABI_SOFT:
26294 case ARM_FLOAT_ABI_SOFTFP:
26295 flags |= F_SOFT_FLOAT;
26296 break;
26297
26298 case ARM_FLOAT_ABI_HARD:
26299 if (flags & F_SOFT_FLOAT)
26300 as_bad (_("hard-float conflicts with specified fpu"));
26301 break;
26302 }
26303
26304 /* Using pure-endian doubles (even if soft-float). */
26305 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
26306 flags |= F_VFP_FLOAT;
26307
26308 #if defined OBJ_ELF
26309 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_maverick))
26310 flags |= EF_ARM_MAVERICK_FLOAT;
26311 break;
26312
26313 case EF_ARM_EABI_VER4:
26314 case EF_ARM_EABI_VER5:
26315 /* No additional flags to set. */
26316 break;
26317
26318 default:
26319 abort ();
26320 }
26321 #endif
26322 bfd_set_private_flags (stdoutput, flags);
26323
26324 /* We have run out flags in the COFF header to encode the
26325 status of ATPCS support, so instead we create a dummy,
26326 empty, debug section called .arm.atpcs. */
26327 if (atpcs)
26328 {
26329 asection * sec;
26330
26331 sec = bfd_make_section (stdoutput, ".arm.atpcs");
26332
26333 if (sec != NULL)
26334 {
26335 bfd_set_section_flags
26336 (stdoutput, sec, SEC_READONLY | SEC_DEBUGGING /* | SEC_HAS_CONTENTS */);
26337 bfd_set_section_size (stdoutput, sec, 0);
26338 bfd_set_section_contents (stdoutput, sec, NULL, 0, 0);
26339 }
26340 }
26341 }
26342 #endif
26343
26344 /* Record the CPU type as well. */
26345 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2))
26346 mach = bfd_mach_arm_iWMMXt2;
26347 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt))
26348 mach = bfd_mach_arm_iWMMXt;
26349 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_xscale))
26350 mach = bfd_mach_arm_XScale;
26351 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_maverick))
26352 mach = bfd_mach_arm_ep9312;
26353 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5e))
26354 mach = bfd_mach_arm_5TE;
26355 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5))
26356 {
26357 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
26358 mach = bfd_mach_arm_5T;
26359 else
26360 mach = bfd_mach_arm_5;
26361 }
26362 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4))
26363 {
26364 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
26365 mach = bfd_mach_arm_4T;
26366 else
26367 mach = bfd_mach_arm_4;
26368 }
26369 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3m))
26370 mach = bfd_mach_arm_3M;
26371 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3))
26372 mach = bfd_mach_arm_3;
26373 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2s))
26374 mach = bfd_mach_arm_2a;
26375 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2))
26376 mach = bfd_mach_arm_2;
26377 else
26378 mach = bfd_mach_arm_unknown;
26379
26380 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
26381 }
26382
26383 /* Command line processing. */
26384
26385 /* md_parse_option
26386 Invocation line includes a switch not recognized by the base assembler.
26387 See if it's a processor-specific option.
26388
26389 This routine is somewhat complicated by the need for backwards
26390 compatibility (since older releases of gcc can't be changed).
26391 The new options try to make the interface as compatible as
26392 possible with GCC.
26393
26394 New options (supported) are:
26395
26396 -mcpu=<cpu name> Assemble for selected processor
26397 -march=<architecture name> Assemble for selected architecture
26398 -mfpu=<fpu architecture> Assemble for selected FPU.
26399 -EB/-mbig-endian Big-endian
26400 -EL/-mlittle-endian Little-endian
26401 -k Generate PIC code
26402 -mthumb Start in Thumb mode
26403 -mthumb-interwork Code supports ARM/Thumb interworking
26404
26405 -m[no-]warn-deprecated Warn about deprecated features
26406 -m[no-]warn-syms Warn when symbols match instructions
26407
26408 For now we will also provide support for:
26409
26410 -mapcs-32 32-bit Program counter
26411 -mapcs-26 26-bit Program counter
26412 -macps-float Floats passed in FP registers
26413 -mapcs-reentrant Reentrant code
26414 -matpcs
26415 (sometime these will probably be replaced with -mapcs=<list of options>
26416 and -matpcs=<list of options>)
26417
26418 The remaining options are only supported for back-wards compatibility.
26419 Cpu variants, the arm part is optional:
26420 -m[arm]1 Currently not supported.
26421 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor
26422 -m[arm]3 Arm 3 processor
26423 -m[arm]6[xx], Arm 6 processors
26424 -m[arm]7[xx][t][[d]m] Arm 7 processors
26425 -m[arm]8[10] Arm 8 processors
26426 -m[arm]9[20][tdmi] Arm 9 processors
26427 -mstrongarm[110[0]] StrongARM processors
26428 -mxscale XScale processors
26429 -m[arm]v[2345[t[e]]] Arm architectures
26430 -mall All (except the ARM1)
26431 FP variants:
26432 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions
26433 -mfpe-old (No float load/store multiples)
26434 -mvfpxd VFP Single precision
26435 -mvfp All VFP
26436 -mno-fpu Disable all floating point instructions
26437
26438 The following CPU names are recognized:
26439 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
26440 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
26441 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
26442 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
26443 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
26444 arm10t arm10e, arm1020t, arm1020e, arm10200e,
26445 strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
26446
26447 */
26448
26449 const char * md_shortopts = "m:k";
26450
26451 #ifdef ARM_BI_ENDIAN
26452 #define OPTION_EB (OPTION_MD_BASE + 0)
26453 #define OPTION_EL (OPTION_MD_BASE + 1)
26454 #else
26455 #if TARGET_BYTES_BIG_ENDIAN
26456 #define OPTION_EB (OPTION_MD_BASE + 0)
26457 #else
26458 #define OPTION_EL (OPTION_MD_BASE + 1)
26459 #endif
26460 #endif
26461 #define OPTION_FIX_V4BX (OPTION_MD_BASE + 2)
26462 #define OPTION_FDPIC (OPTION_MD_BASE + 3)
26463
26464 struct option md_longopts[] =
26465 {
26466 #ifdef OPTION_EB
26467 {"EB", no_argument, NULL, OPTION_EB},
26468 #endif
26469 #ifdef OPTION_EL
26470 {"EL", no_argument, NULL, OPTION_EL},
26471 #endif
26472 {"fix-v4bx", no_argument, NULL, OPTION_FIX_V4BX},
26473 #ifdef OBJ_ELF
26474 {"fdpic", no_argument, NULL, OPTION_FDPIC},
26475 #endif
26476 {NULL, no_argument, NULL, 0}
26477 };
26478
26479 size_t md_longopts_size = sizeof (md_longopts);
26480
26481 struct arm_option_table
26482 {
26483 const char * option; /* Option name to match. */
26484 const char * help; /* Help information. */
26485 int * var; /* Variable to change. */
26486 int value; /* What to change it to. */
26487 const char * deprecated; /* If non-null, print this message. */
26488 };
26489
26490 struct arm_option_table arm_opts[] =
26491 {
26492 {"k", N_("generate PIC code"), &pic_code, 1, NULL},
26493 {"mthumb", N_("assemble Thumb code"), &thumb_mode, 1, NULL},
26494 {"mthumb-interwork", N_("support ARM/Thumb interworking"),
26495 &support_interwork, 1, NULL},
26496 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26, 0, NULL},
26497 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26, 1, NULL},
26498 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float,
26499 1, NULL},
26500 {"mapcs-reentrant", N_("re-entrant code"), &pic_code, 1, NULL},
26501 {"matpcs", N_("code is ATPCS conformant"), &atpcs, 1, NULL},
26502 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
26503 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
26504 NULL},
26505
26506 /* These are recognized by the assembler, but have no affect on code. */
26507 {"mapcs-frame", N_("use frame pointer"), NULL, 0, NULL},
26508 {"mapcs-stack-check", N_("use stack size checking"), NULL, 0, NULL},
26509
26510 {"mwarn-deprecated", NULL, &warn_on_deprecated, 1, NULL},
26511 {"mno-warn-deprecated", N_("do not warn on use of deprecated feature"),
26512 &warn_on_deprecated, 0, NULL},
26513 {"mwarn-syms", N_("warn about symbols that match instruction names [default]"), (int *) (& flag_warn_syms), TRUE, NULL},
26514 {"mno-warn-syms", N_("disable warnings about symobls that match instructions"), (int *) (& flag_warn_syms), FALSE, NULL},
26515 {NULL, NULL, NULL, 0, NULL}
26516 };
26517
26518 struct arm_legacy_option_table
26519 {
26520 const char * option; /* Option name to match. */
26521 const arm_feature_set ** var; /* Variable to change. */
26522 const arm_feature_set value; /* What to change it to. */
26523 const char * deprecated; /* If non-null, print this message. */
26524 };
26525
26526 const struct arm_legacy_option_table arm_legacy_opts[] =
26527 {
26528 /* DON'T add any new processors to this list -- we want the whole list
26529 to go away... Add them to the processors table instead. */
26530 {"marm1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
26531 {"m1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
26532 {"marm2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
26533 {"m2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
26534 {"marm250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
26535 {"m250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
26536 {"marm3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
26537 {"m3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
26538 {"marm6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
26539 {"m6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
26540 {"marm600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
26541 {"m600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
26542 {"marm610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
26543 {"m610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
26544 {"marm620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
26545 {"m620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
26546 {"marm7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
26547 {"m7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
26548 {"marm70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
26549 {"m70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
26550 {"marm700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
26551 {"m700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
26552 {"marm700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
26553 {"m700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
26554 {"marm710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
26555 {"m710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
26556 {"marm710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
26557 {"m710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
26558 {"marm720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
26559 {"m720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
26560 {"marm7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
26561 {"m7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
26562 {"marm7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
26563 {"m7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
26564 {"marm7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
26565 {"m7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
26566 {"marm7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
26567 {"m7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
26568 {"marm7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
26569 {"m7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
26570 {"marm7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
26571 {"m7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
26572 {"marm7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
26573 {"m7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
26574 {"marm7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
26575 {"m7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
26576 {"marm7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
26577 {"m7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
26578 {"marm7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
26579 {"m7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
26580 {"marm710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
26581 {"m710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
26582 {"marm720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
26583 {"m720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
26584 {"marm740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
26585 {"m740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
26586 {"marm8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
26587 {"m8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
26588 {"marm810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
26589 {"m810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
26590 {"marm9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
26591 {"m9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
26592 {"marm9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
26593 {"m9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
26594 {"marm920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
26595 {"m920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
26596 {"marm940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
26597 {"m940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
26598 {"mstrongarm", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=strongarm")},
26599 {"mstrongarm110", &legacy_cpu, ARM_ARCH_V4,
26600 N_("use -mcpu=strongarm110")},
26601 {"mstrongarm1100", &legacy_cpu, ARM_ARCH_V4,
26602 N_("use -mcpu=strongarm1100")},
26603 {"mstrongarm1110", &legacy_cpu, ARM_ARCH_V4,
26604 N_("use -mcpu=strongarm1110")},
26605 {"mxscale", &legacy_cpu, ARM_ARCH_XSCALE, N_("use -mcpu=xscale")},
26606 {"miwmmxt", &legacy_cpu, ARM_ARCH_IWMMXT, N_("use -mcpu=iwmmxt")},
26607 {"mall", &legacy_cpu, ARM_ANY, N_("use -mcpu=all")},
26608
26609 /* Architecture variants -- don't add any more to this list either. */
26610 {"mv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
26611 {"marmv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
26612 {"mv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
26613 {"marmv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
26614 {"mv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
26615 {"marmv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
26616 {"mv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
26617 {"marmv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
26618 {"mv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
26619 {"marmv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
26620 {"mv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
26621 {"marmv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
26622 {"mv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
26623 {"marmv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
26624 {"mv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
26625 {"marmv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
26626 {"mv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
26627 {"marmv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
26628
26629 /* Floating point variants -- don't add any more to this list either. */
26630 {"mfpe-old", &legacy_fpu, FPU_ARCH_FPE, N_("use -mfpu=fpe")},
26631 {"mfpa10", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa10")},
26632 {"mfpa11", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa11")},
26633 {"mno-fpu", &legacy_fpu, ARM_ARCH_NONE,
26634 N_("use either -mfpu=softfpa or -mfpu=softvfp")},
26635
26636 {NULL, NULL, ARM_ARCH_NONE, NULL}
26637 };
26638
26639 struct arm_cpu_option_table
26640 {
26641 const char * name;
26642 size_t name_len;
26643 const arm_feature_set value;
26644 const arm_feature_set ext;
26645 /* For some CPUs we assume an FPU unless the user explicitly sets
26646 -mfpu=... */
26647 const arm_feature_set default_fpu;
26648 /* The canonical name of the CPU, or NULL to use NAME converted to upper
26649 case. */
26650 const char * canonical_name;
26651 };
26652
26653 /* This list should, at a minimum, contain all the cpu names
26654 recognized by GCC. */
26655 #define ARM_CPU_OPT(N, CN, V, E, DF) { N, sizeof (N) - 1, V, E, DF, CN }
26656
26657 static const struct arm_cpu_option_table arm_cpus[] =
26658 {
26659 ARM_CPU_OPT ("all", NULL, ARM_ANY,
26660 ARM_ARCH_NONE,
26661 FPU_ARCH_FPA),
26662 ARM_CPU_OPT ("arm1", NULL, ARM_ARCH_V1,
26663 ARM_ARCH_NONE,
26664 FPU_ARCH_FPA),
26665 ARM_CPU_OPT ("arm2", NULL, ARM_ARCH_V2,
26666 ARM_ARCH_NONE,
26667 FPU_ARCH_FPA),
26668 ARM_CPU_OPT ("arm250", NULL, ARM_ARCH_V2S,
26669 ARM_ARCH_NONE,
26670 FPU_ARCH_FPA),
26671 ARM_CPU_OPT ("arm3", NULL, ARM_ARCH_V2S,
26672 ARM_ARCH_NONE,
26673 FPU_ARCH_FPA),
26674 ARM_CPU_OPT ("arm6", NULL, ARM_ARCH_V3,
26675 ARM_ARCH_NONE,
26676 FPU_ARCH_FPA),
26677 ARM_CPU_OPT ("arm60", NULL, ARM_ARCH_V3,
26678 ARM_ARCH_NONE,
26679 FPU_ARCH_FPA),
26680 ARM_CPU_OPT ("arm600", NULL, ARM_ARCH_V3,
26681 ARM_ARCH_NONE,
26682 FPU_ARCH_FPA),
26683 ARM_CPU_OPT ("arm610", NULL, ARM_ARCH_V3,
26684 ARM_ARCH_NONE,
26685 FPU_ARCH_FPA),
26686 ARM_CPU_OPT ("arm620", NULL, ARM_ARCH_V3,
26687 ARM_ARCH_NONE,
26688 FPU_ARCH_FPA),
26689 ARM_CPU_OPT ("arm7", NULL, ARM_ARCH_V3,
26690 ARM_ARCH_NONE,
26691 FPU_ARCH_FPA),
26692 ARM_CPU_OPT ("arm7m", NULL, ARM_ARCH_V3M,
26693 ARM_ARCH_NONE,
26694 FPU_ARCH_FPA),
26695 ARM_CPU_OPT ("arm7d", NULL, ARM_ARCH_V3,
26696 ARM_ARCH_NONE,
26697 FPU_ARCH_FPA),
26698 ARM_CPU_OPT ("arm7dm", NULL, ARM_ARCH_V3M,
26699 ARM_ARCH_NONE,
26700 FPU_ARCH_FPA),
26701 ARM_CPU_OPT ("arm7di", NULL, ARM_ARCH_V3,
26702 ARM_ARCH_NONE,
26703 FPU_ARCH_FPA),
26704 ARM_CPU_OPT ("arm7dmi", NULL, ARM_ARCH_V3M,
26705 ARM_ARCH_NONE,
26706 FPU_ARCH_FPA),
26707 ARM_CPU_OPT ("arm70", NULL, ARM_ARCH_V3,
26708 ARM_ARCH_NONE,
26709 FPU_ARCH_FPA),
26710 ARM_CPU_OPT ("arm700", NULL, ARM_ARCH_V3,
26711 ARM_ARCH_NONE,
26712 FPU_ARCH_FPA),
26713 ARM_CPU_OPT ("arm700i", NULL, ARM_ARCH_V3,
26714 ARM_ARCH_NONE,
26715 FPU_ARCH_FPA),
26716 ARM_CPU_OPT ("arm710", NULL, ARM_ARCH_V3,
26717 ARM_ARCH_NONE,
26718 FPU_ARCH_FPA),
26719 ARM_CPU_OPT ("arm710t", NULL, ARM_ARCH_V4T,
26720 ARM_ARCH_NONE,
26721 FPU_ARCH_FPA),
26722 ARM_CPU_OPT ("arm720", NULL, ARM_ARCH_V3,
26723 ARM_ARCH_NONE,
26724 FPU_ARCH_FPA),
26725 ARM_CPU_OPT ("arm720t", NULL, ARM_ARCH_V4T,
26726 ARM_ARCH_NONE,
26727 FPU_ARCH_FPA),
26728 ARM_CPU_OPT ("arm740t", NULL, ARM_ARCH_V4T,
26729 ARM_ARCH_NONE,
26730 FPU_ARCH_FPA),
26731 ARM_CPU_OPT ("arm710c", NULL, ARM_ARCH_V3,
26732 ARM_ARCH_NONE,
26733 FPU_ARCH_FPA),
26734 ARM_CPU_OPT ("arm7100", NULL, ARM_ARCH_V3,
26735 ARM_ARCH_NONE,
26736 FPU_ARCH_FPA),
26737 ARM_CPU_OPT ("arm7500", NULL, ARM_ARCH_V3,
26738 ARM_ARCH_NONE,
26739 FPU_ARCH_FPA),
26740 ARM_CPU_OPT ("arm7500fe", NULL, ARM_ARCH_V3,
26741 ARM_ARCH_NONE,
26742 FPU_ARCH_FPA),
26743 ARM_CPU_OPT ("arm7t", NULL, ARM_ARCH_V4T,
26744 ARM_ARCH_NONE,
26745 FPU_ARCH_FPA),
26746 ARM_CPU_OPT ("arm7tdmi", NULL, ARM_ARCH_V4T,
26747 ARM_ARCH_NONE,
26748 FPU_ARCH_FPA),
26749 ARM_CPU_OPT ("arm7tdmi-s", NULL, ARM_ARCH_V4T,
26750 ARM_ARCH_NONE,
26751 FPU_ARCH_FPA),
26752 ARM_CPU_OPT ("arm8", NULL, ARM_ARCH_V4,
26753 ARM_ARCH_NONE,
26754 FPU_ARCH_FPA),
26755 ARM_CPU_OPT ("arm810", NULL, ARM_ARCH_V4,
26756 ARM_ARCH_NONE,
26757 FPU_ARCH_FPA),
26758 ARM_CPU_OPT ("strongarm", NULL, ARM_ARCH_V4,
26759 ARM_ARCH_NONE,
26760 FPU_ARCH_FPA),
26761 ARM_CPU_OPT ("strongarm1", NULL, ARM_ARCH_V4,
26762 ARM_ARCH_NONE,
26763 FPU_ARCH_FPA),
26764 ARM_CPU_OPT ("strongarm110", NULL, ARM_ARCH_V4,
26765 ARM_ARCH_NONE,
26766 FPU_ARCH_FPA),
26767 ARM_CPU_OPT ("strongarm1100", NULL, ARM_ARCH_V4,
26768 ARM_ARCH_NONE,
26769 FPU_ARCH_FPA),
26770 ARM_CPU_OPT ("strongarm1110", NULL, ARM_ARCH_V4,
26771 ARM_ARCH_NONE,
26772 FPU_ARCH_FPA),
26773 ARM_CPU_OPT ("arm9", NULL, ARM_ARCH_V4T,
26774 ARM_ARCH_NONE,
26775 FPU_ARCH_FPA),
26776 ARM_CPU_OPT ("arm920", "ARM920T", ARM_ARCH_V4T,
26777 ARM_ARCH_NONE,
26778 FPU_ARCH_FPA),
26779 ARM_CPU_OPT ("arm920t", NULL, ARM_ARCH_V4T,
26780 ARM_ARCH_NONE,
26781 FPU_ARCH_FPA),
26782 ARM_CPU_OPT ("arm922t", NULL, ARM_ARCH_V4T,
26783 ARM_ARCH_NONE,
26784 FPU_ARCH_FPA),
26785 ARM_CPU_OPT ("arm940t", NULL, ARM_ARCH_V4T,
26786 ARM_ARCH_NONE,
26787 FPU_ARCH_FPA),
26788 ARM_CPU_OPT ("arm9tdmi", NULL, ARM_ARCH_V4T,
26789 ARM_ARCH_NONE,
26790 FPU_ARCH_FPA),
26791 ARM_CPU_OPT ("fa526", NULL, ARM_ARCH_V4,
26792 ARM_ARCH_NONE,
26793 FPU_ARCH_FPA),
26794 ARM_CPU_OPT ("fa626", NULL, ARM_ARCH_V4,
26795 ARM_ARCH_NONE,
26796 FPU_ARCH_FPA),
26797
26798 /* For V5 or later processors we default to using VFP; but the user
26799 should really set the FPU type explicitly. */
26800 ARM_CPU_OPT ("arm9e-r0", NULL, ARM_ARCH_V5TExP,
26801 ARM_ARCH_NONE,
26802 FPU_ARCH_VFP_V2),
26803 ARM_CPU_OPT ("arm9e", NULL, ARM_ARCH_V5TE,
26804 ARM_ARCH_NONE,
26805 FPU_ARCH_VFP_V2),
26806 ARM_CPU_OPT ("arm926ej", "ARM926EJ-S", ARM_ARCH_V5TEJ,
26807 ARM_ARCH_NONE,
26808 FPU_ARCH_VFP_V2),
26809 ARM_CPU_OPT ("arm926ejs", "ARM926EJ-S", ARM_ARCH_V5TEJ,
26810 ARM_ARCH_NONE,
26811 FPU_ARCH_VFP_V2),
26812 ARM_CPU_OPT ("arm926ej-s", NULL, ARM_ARCH_V5TEJ,
26813 ARM_ARCH_NONE,
26814 FPU_ARCH_VFP_V2),
26815 ARM_CPU_OPT ("arm946e-r0", NULL, ARM_ARCH_V5TExP,
26816 ARM_ARCH_NONE,
26817 FPU_ARCH_VFP_V2),
26818 ARM_CPU_OPT ("arm946e", "ARM946E-S", ARM_ARCH_V5TE,
26819 ARM_ARCH_NONE,
26820 FPU_ARCH_VFP_V2),
26821 ARM_CPU_OPT ("arm946e-s", NULL, ARM_ARCH_V5TE,
26822 ARM_ARCH_NONE,
26823 FPU_ARCH_VFP_V2),
26824 ARM_CPU_OPT ("arm966e-r0", NULL, ARM_ARCH_V5TExP,
26825 ARM_ARCH_NONE,
26826 FPU_ARCH_VFP_V2),
26827 ARM_CPU_OPT ("arm966e", "ARM966E-S", ARM_ARCH_V5TE,
26828 ARM_ARCH_NONE,
26829 FPU_ARCH_VFP_V2),
26830 ARM_CPU_OPT ("arm966e-s", NULL, ARM_ARCH_V5TE,
26831 ARM_ARCH_NONE,
26832 FPU_ARCH_VFP_V2),
26833 ARM_CPU_OPT ("arm968e-s", NULL, ARM_ARCH_V5TE,
26834 ARM_ARCH_NONE,
26835 FPU_ARCH_VFP_V2),
26836 ARM_CPU_OPT ("arm10t", NULL, ARM_ARCH_V5T,
26837 ARM_ARCH_NONE,
26838 FPU_ARCH_VFP_V1),
26839 ARM_CPU_OPT ("arm10tdmi", NULL, ARM_ARCH_V5T,
26840 ARM_ARCH_NONE,
26841 FPU_ARCH_VFP_V1),
26842 ARM_CPU_OPT ("arm10e", NULL, ARM_ARCH_V5TE,
26843 ARM_ARCH_NONE,
26844 FPU_ARCH_VFP_V2),
26845 ARM_CPU_OPT ("arm1020", "ARM1020E", ARM_ARCH_V5TE,
26846 ARM_ARCH_NONE,
26847 FPU_ARCH_VFP_V2),
26848 ARM_CPU_OPT ("arm1020t", NULL, ARM_ARCH_V5T,
26849 ARM_ARCH_NONE,
26850 FPU_ARCH_VFP_V1),
26851 ARM_CPU_OPT ("arm1020e", NULL, ARM_ARCH_V5TE,
26852 ARM_ARCH_NONE,
26853 FPU_ARCH_VFP_V2),
26854 ARM_CPU_OPT ("arm1022e", NULL, ARM_ARCH_V5TE,
26855 ARM_ARCH_NONE,
26856 FPU_ARCH_VFP_V2),
26857 ARM_CPU_OPT ("arm1026ejs", "ARM1026EJ-S", ARM_ARCH_V5TEJ,
26858 ARM_ARCH_NONE,
26859 FPU_ARCH_VFP_V2),
26860 ARM_CPU_OPT ("arm1026ej-s", NULL, ARM_ARCH_V5TEJ,
26861 ARM_ARCH_NONE,
26862 FPU_ARCH_VFP_V2),
26863 ARM_CPU_OPT ("fa606te", NULL, ARM_ARCH_V5TE,
26864 ARM_ARCH_NONE,
26865 FPU_ARCH_VFP_V2),
26866 ARM_CPU_OPT ("fa616te", NULL, ARM_ARCH_V5TE,
26867 ARM_ARCH_NONE,
26868 FPU_ARCH_VFP_V2),
26869 ARM_CPU_OPT ("fa626te", NULL, ARM_ARCH_V5TE,
26870 ARM_ARCH_NONE,
26871 FPU_ARCH_VFP_V2),
26872 ARM_CPU_OPT ("fmp626", NULL, ARM_ARCH_V5TE,
26873 ARM_ARCH_NONE,
26874 FPU_ARCH_VFP_V2),
26875 ARM_CPU_OPT ("fa726te", NULL, ARM_ARCH_V5TE,
26876 ARM_ARCH_NONE,
26877 FPU_ARCH_VFP_V2),
26878 ARM_CPU_OPT ("arm1136js", "ARM1136J-S", ARM_ARCH_V6,
26879 ARM_ARCH_NONE,
26880 FPU_NONE),
26881 ARM_CPU_OPT ("arm1136j-s", NULL, ARM_ARCH_V6,
26882 ARM_ARCH_NONE,
26883 FPU_NONE),
26884 ARM_CPU_OPT ("arm1136jfs", "ARM1136JF-S", ARM_ARCH_V6,
26885 ARM_ARCH_NONE,
26886 FPU_ARCH_VFP_V2),
26887 ARM_CPU_OPT ("arm1136jf-s", NULL, ARM_ARCH_V6,
26888 ARM_ARCH_NONE,
26889 FPU_ARCH_VFP_V2),
26890 ARM_CPU_OPT ("mpcore", "MPCore", ARM_ARCH_V6K,
26891 ARM_ARCH_NONE,
26892 FPU_ARCH_VFP_V2),
26893 ARM_CPU_OPT ("mpcorenovfp", "MPCore", ARM_ARCH_V6K,
26894 ARM_ARCH_NONE,
26895 FPU_NONE),
26896 ARM_CPU_OPT ("arm1156t2-s", NULL, ARM_ARCH_V6T2,
26897 ARM_ARCH_NONE,
26898 FPU_NONE),
26899 ARM_CPU_OPT ("arm1156t2f-s", NULL, ARM_ARCH_V6T2,
26900 ARM_ARCH_NONE,
26901 FPU_ARCH_VFP_V2),
26902 ARM_CPU_OPT ("arm1176jz-s", NULL, ARM_ARCH_V6KZ,
26903 ARM_ARCH_NONE,
26904 FPU_NONE),
26905 ARM_CPU_OPT ("arm1176jzf-s", NULL, ARM_ARCH_V6KZ,
26906 ARM_ARCH_NONE,
26907 FPU_ARCH_VFP_V2),
26908 ARM_CPU_OPT ("cortex-a5", "Cortex-A5", ARM_ARCH_V7A,
26909 ARM_FEATURE_CORE_LOW (ARM_EXT_MP | ARM_EXT_SEC),
26910 FPU_NONE),
26911 ARM_CPU_OPT ("cortex-a7", "Cortex-A7", ARM_ARCH_V7VE,
26912 ARM_ARCH_NONE,
26913 FPU_ARCH_NEON_VFP_V4),
26914 ARM_CPU_OPT ("cortex-a8", "Cortex-A8", ARM_ARCH_V7A,
26915 ARM_FEATURE_CORE_LOW (ARM_EXT_SEC),
26916 ARM_FEATURE_COPROC (FPU_VFP_V3 | FPU_NEON_EXT_V1)),
26917 ARM_CPU_OPT ("cortex-a9", "Cortex-A9", ARM_ARCH_V7A,
26918 ARM_FEATURE_CORE_LOW (ARM_EXT_MP | ARM_EXT_SEC),
26919 ARM_FEATURE_COPROC (FPU_VFP_V3 | FPU_NEON_EXT_V1)),
26920 ARM_CPU_OPT ("cortex-a12", "Cortex-A12", ARM_ARCH_V7VE,
26921 ARM_ARCH_NONE,
26922 FPU_ARCH_NEON_VFP_V4),
26923 ARM_CPU_OPT ("cortex-a15", "Cortex-A15", ARM_ARCH_V7VE,
26924 ARM_ARCH_NONE,
26925 FPU_ARCH_NEON_VFP_V4),
26926 ARM_CPU_OPT ("cortex-a17", "Cortex-A17", ARM_ARCH_V7VE,
26927 ARM_ARCH_NONE,
26928 FPU_ARCH_NEON_VFP_V4),
26929 ARM_CPU_OPT ("cortex-a32", "Cortex-A32", ARM_ARCH_V8A,
26930 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
26931 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
26932 ARM_CPU_OPT ("cortex-a35", "Cortex-A35", ARM_ARCH_V8A,
26933 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
26934 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
26935 ARM_CPU_OPT ("cortex-a53", "Cortex-A53", ARM_ARCH_V8A,
26936 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
26937 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
26938 ARM_CPU_OPT ("cortex-a55", "Cortex-A55", ARM_ARCH_V8_2A,
26939 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
26940 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
26941 ARM_CPU_OPT ("cortex-a57", "Cortex-A57", ARM_ARCH_V8A,
26942 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
26943 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
26944 ARM_CPU_OPT ("cortex-a72", "Cortex-A72", ARM_ARCH_V8A,
26945 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
26946 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
26947 ARM_CPU_OPT ("cortex-a73", "Cortex-A73", ARM_ARCH_V8A,
26948 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
26949 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
26950 ARM_CPU_OPT ("cortex-a75", "Cortex-A75", ARM_ARCH_V8_2A,
26951 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
26952 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
26953 ARM_CPU_OPT ("cortex-a76", "Cortex-A76", ARM_ARCH_V8_2A,
26954 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
26955 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
26956 ARM_CPU_OPT ("ares", "Ares", ARM_ARCH_V8_2A,
26957 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
26958 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
26959 ARM_CPU_OPT ("cortex-r4", "Cortex-R4", ARM_ARCH_V7R,
26960 ARM_ARCH_NONE,
26961 FPU_NONE),
26962 ARM_CPU_OPT ("cortex-r4f", "Cortex-R4F", ARM_ARCH_V7R,
26963 ARM_ARCH_NONE,
26964 FPU_ARCH_VFP_V3D16),
26965 ARM_CPU_OPT ("cortex-r5", "Cortex-R5", ARM_ARCH_V7R,
26966 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV),
26967 FPU_NONE),
26968 ARM_CPU_OPT ("cortex-r7", "Cortex-R7", ARM_ARCH_V7R,
26969 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV),
26970 FPU_ARCH_VFP_V3D16),
26971 ARM_CPU_OPT ("cortex-r8", "Cortex-R8", ARM_ARCH_V7R,
26972 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV),
26973 FPU_ARCH_VFP_V3D16),
26974 ARM_CPU_OPT ("cortex-r52", "Cortex-R52", ARM_ARCH_V8R,
26975 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
26976 FPU_ARCH_NEON_VFP_ARMV8),
26977 ARM_CPU_OPT ("cortex-m33", "Cortex-M33", ARM_ARCH_V8M_MAIN,
26978 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
26979 FPU_NONE),
26980 ARM_CPU_OPT ("cortex-m23", "Cortex-M23", ARM_ARCH_V8M_BASE,
26981 ARM_ARCH_NONE,
26982 FPU_NONE),
26983 ARM_CPU_OPT ("cortex-m7", "Cortex-M7", ARM_ARCH_V7EM,
26984 ARM_ARCH_NONE,
26985 FPU_NONE),
26986 ARM_CPU_OPT ("cortex-m4", "Cortex-M4", ARM_ARCH_V7EM,
26987 ARM_ARCH_NONE,
26988 FPU_NONE),
26989 ARM_CPU_OPT ("cortex-m3", "Cortex-M3", ARM_ARCH_V7M,
26990 ARM_ARCH_NONE,
26991 FPU_NONE),
26992 ARM_CPU_OPT ("cortex-m1", "Cortex-M1", ARM_ARCH_V6SM,
26993 ARM_ARCH_NONE,
26994 FPU_NONE),
26995 ARM_CPU_OPT ("cortex-m0", "Cortex-M0", ARM_ARCH_V6SM,
26996 ARM_ARCH_NONE,
26997 FPU_NONE),
26998 ARM_CPU_OPT ("cortex-m0plus", "Cortex-M0+", ARM_ARCH_V6SM,
26999 ARM_ARCH_NONE,
27000 FPU_NONE),
27001 ARM_CPU_OPT ("exynos-m1", "Samsung Exynos M1", ARM_ARCH_V8A,
27002 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
27003 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
27004 ARM_CPU_OPT ("neoverse-n1", "Neoverse N1", ARM_ARCH_V8_2A,
27005 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
27006 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
27007 /* ??? XSCALE is really an architecture. */
27008 ARM_CPU_OPT ("xscale", NULL, ARM_ARCH_XSCALE,
27009 ARM_ARCH_NONE,
27010 FPU_ARCH_VFP_V2),
27011
27012 /* ??? iwmmxt is not a processor. */
27013 ARM_CPU_OPT ("iwmmxt", NULL, ARM_ARCH_IWMMXT,
27014 ARM_ARCH_NONE,
27015 FPU_ARCH_VFP_V2),
27016 ARM_CPU_OPT ("iwmmxt2", NULL, ARM_ARCH_IWMMXT2,
27017 ARM_ARCH_NONE,
27018 FPU_ARCH_VFP_V2),
27019 ARM_CPU_OPT ("i80200", NULL, ARM_ARCH_XSCALE,
27020 ARM_ARCH_NONE,
27021 FPU_ARCH_VFP_V2),
27022
27023 /* Maverick. */
27024 ARM_CPU_OPT ("ep9312", "ARM920T",
27025 ARM_FEATURE_LOW (ARM_AEXT_V4T, ARM_CEXT_MAVERICK),
27026 ARM_ARCH_NONE, FPU_ARCH_MAVERICK),
27027
27028 /* Marvell processors. */
27029 ARM_CPU_OPT ("marvell-pj4", NULL, ARM_ARCH_V7A,
27030 ARM_FEATURE_CORE_LOW (ARM_EXT_MP | ARM_EXT_SEC),
27031 FPU_ARCH_VFP_V3D16),
27032 ARM_CPU_OPT ("marvell-whitney", NULL, ARM_ARCH_V7A,
27033 ARM_FEATURE_CORE_LOW (ARM_EXT_MP | ARM_EXT_SEC),
27034 FPU_ARCH_NEON_VFP_V4),
27035
27036 /* APM X-Gene family. */
27037 ARM_CPU_OPT ("xgene1", "APM X-Gene 1", ARM_ARCH_V8A,
27038 ARM_ARCH_NONE,
27039 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
27040 ARM_CPU_OPT ("xgene2", "APM X-Gene 2", ARM_ARCH_V8A,
27041 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
27042 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
27043
27044 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, ARM_ARCH_NONE, NULL }
27045 };
27046 #undef ARM_CPU_OPT
27047
27048 struct arm_ext_table
27049 {
27050 const char * name;
27051 size_t name_len;
27052 const arm_feature_set merge;
27053 const arm_feature_set clear;
27054 };
27055
27056 struct arm_arch_option_table
27057 {
27058 const char * name;
27059 size_t name_len;
27060 const arm_feature_set value;
27061 const arm_feature_set default_fpu;
27062 const struct arm_ext_table * ext_table;
27063 };
27064
27065 /* Used to add support for +E and +noE extension. */
27066 #define ARM_EXT(E, M, C) { E, sizeof (E) - 1, M, C }
27067 /* Used to add support for a +E extension. */
27068 #define ARM_ADD(E, M) { E, sizeof(E) - 1, M, ARM_ARCH_NONE }
27069 /* Used to add support for a +noE extension. */
27070 #define ARM_REMOVE(E, C) { E, sizeof(E) -1, ARM_ARCH_NONE, C }
27071
27072 #define ALL_FP ARM_FEATURE (0, ARM_EXT2_FP16_INST | ARM_EXT2_FP16_FML, \
27073 ~0 & ~FPU_ENDIAN_PURE)
27074
27075 static const struct arm_ext_table armv5te_ext_table[] =
27076 {
27077 ARM_EXT ("fp", FPU_ARCH_VFP_V2, ALL_FP),
27078 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
27079 };
27080
27081 static const struct arm_ext_table armv7_ext_table[] =
27082 {
27083 ARM_EXT ("fp", FPU_ARCH_VFP_V3D16, ALL_FP),
27084 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
27085 };
27086
27087 static const struct arm_ext_table armv7ve_ext_table[] =
27088 {
27089 ARM_EXT ("fp", FPU_ARCH_VFP_V4D16, ALL_FP),
27090 ARM_ADD ("vfpv3-d16", FPU_ARCH_VFP_V3D16),
27091 ARM_ADD ("vfpv3", FPU_ARCH_VFP_V3),
27092 ARM_ADD ("vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16),
27093 ARM_ADD ("vfpv3-fp16", FPU_ARCH_VFP_V3_FP16),
27094 ARM_ADD ("vfpv4-d16", FPU_ARCH_VFP_V4D16), /* Alias for +fp. */
27095 ARM_ADD ("vfpv4", FPU_ARCH_VFP_V4),
27096
27097 ARM_EXT ("simd", FPU_ARCH_NEON_VFP_V4,
27098 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1 | FPU_NEON_EXT_FMA)),
27099
27100 /* Aliases for +simd. */
27101 ARM_ADD ("neon-vfpv4", FPU_ARCH_NEON_VFP_V4),
27102
27103 ARM_ADD ("neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1),
27104 ARM_ADD ("neon-vfpv3", FPU_ARCH_VFP_V3_PLUS_NEON_V1),
27105 ARM_ADD ("neon-fp16", FPU_ARCH_NEON_FP16),
27106
27107 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
27108 };
27109
27110 static const struct arm_ext_table armv7a_ext_table[] =
27111 {
27112 ARM_EXT ("fp", FPU_ARCH_VFP_V3D16, ALL_FP),
27113 ARM_ADD ("vfpv3-d16", FPU_ARCH_VFP_V3D16), /* Alias for +fp. */
27114 ARM_ADD ("vfpv3", FPU_ARCH_VFP_V3),
27115 ARM_ADD ("vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16),
27116 ARM_ADD ("vfpv3-fp16", FPU_ARCH_VFP_V3_FP16),
27117 ARM_ADD ("vfpv4-d16", FPU_ARCH_VFP_V4D16),
27118 ARM_ADD ("vfpv4", FPU_ARCH_VFP_V4),
27119
27120 ARM_EXT ("simd", FPU_ARCH_VFP_V3_PLUS_NEON_V1,
27121 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1 | FPU_NEON_EXT_FMA)),
27122
27123 /* Aliases for +simd. */
27124 ARM_ADD ("neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1),
27125 ARM_ADD ("neon-vfpv3", FPU_ARCH_VFP_V3_PLUS_NEON_V1),
27126
27127 ARM_ADD ("neon-fp16", FPU_ARCH_NEON_FP16),
27128 ARM_ADD ("neon-vfpv4", FPU_ARCH_NEON_VFP_V4),
27129
27130 ARM_ADD ("mp", ARM_FEATURE_CORE_LOW (ARM_EXT_MP)),
27131 ARM_ADD ("sec", ARM_FEATURE_CORE_LOW (ARM_EXT_SEC)),
27132 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
27133 };
27134
27135 static const struct arm_ext_table armv7r_ext_table[] =
27136 {
27137 ARM_ADD ("fp.sp", FPU_ARCH_VFP_V3xD),
27138 ARM_ADD ("vfpv3xd", FPU_ARCH_VFP_V3xD), /* Alias for +fp.sp. */
27139 ARM_EXT ("fp", FPU_ARCH_VFP_V3D16, ALL_FP),
27140 ARM_ADD ("vfpv3-d16", FPU_ARCH_VFP_V3D16), /* Alias for +fp. */
27141 ARM_ADD ("vfpv3xd-fp16", FPU_ARCH_VFP_V3xD_FP16),
27142 ARM_ADD ("vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16),
27143 ARM_EXT ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV),
27144 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV)),
27145 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
27146 };
27147
27148 static const struct arm_ext_table armv7em_ext_table[] =
27149 {
27150 ARM_EXT ("fp", FPU_ARCH_VFP_V4_SP_D16, ALL_FP),
27151 /* Alias for +fp, used to be known as fpv4-sp-d16. */
27152 ARM_ADD ("vfpv4-sp-d16", FPU_ARCH_VFP_V4_SP_D16),
27153 ARM_ADD ("fpv5", FPU_ARCH_VFP_V5_SP_D16),
27154 ARM_ADD ("fp.dp", FPU_ARCH_VFP_V5D16),
27155 ARM_ADD ("fpv5-d16", FPU_ARCH_VFP_V5D16),
27156 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
27157 };
27158
27159 static const struct arm_ext_table armv8a_ext_table[] =
27160 {
27161 ARM_ADD ("crc", ARCH_CRC_ARMV8),
27162 ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8),
27163 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
27164 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8)),
27165
27166 /* Armv8-a does not allow an FP implementation without SIMD, so the user
27167 should use the +simd option to turn on FP. */
27168 ARM_REMOVE ("fp", ALL_FP),
27169 ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB)),
27170 ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES)),
27171 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
27172 };
27173
27174
27175 static const struct arm_ext_table armv81a_ext_table[] =
27176 {
27177 ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8_1),
27178 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1,
27179 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8)),
27180
27181 /* Armv8-a does not allow an FP implementation without SIMD, so the user
27182 should use the +simd option to turn on FP. */
27183 ARM_REMOVE ("fp", ALL_FP),
27184 ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB)),
27185 ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES)),
27186 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
27187 };
27188
27189 static const struct arm_ext_table armv82a_ext_table[] =
27190 {
27191 ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8_1),
27192 ARM_ADD ("fp16", FPU_ARCH_NEON_VFP_ARMV8_2_FP16),
27193 ARM_ADD ("fp16fml", FPU_ARCH_NEON_VFP_ARMV8_2_FP16FML),
27194 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1,
27195 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8)),
27196 ARM_ADD ("dotprod", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8),
27197
27198 /* Armv8-a does not allow an FP implementation without SIMD, so the user
27199 should use the +simd option to turn on FP. */
27200 ARM_REMOVE ("fp", ALL_FP),
27201 ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB)),
27202 ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES)),
27203 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
27204 };
27205
27206 static const struct arm_ext_table armv84a_ext_table[] =
27207 {
27208 ARM_ADD ("simd", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8),
27209 ARM_ADD ("fp16", FPU_ARCH_NEON_VFP_ARMV8_4_FP16FML),
27210 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_4,
27211 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8)),
27212
27213 /* Armv8-a does not allow an FP implementation without SIMD, so the user
27214 should use the +simd option to turn on FP. */
27215 ARM_REMOVE ("fp", ALL_FP),
27216 ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB)),
27217 ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES)),
27218 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
27219 };
27220
27221 static const struct arm_ext_table armv85a_ext_table[] =
27222 {
27223 ARM_ADD ("simd", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8),
27224 ARM_ADD ("fp16", FPU_ARCH_NEON_VFP_ARMV8_4_FP16FML),
27225 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_4,
27226 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8)),
27227
27228 /* Armv8-a does not allow an FP implementation without SIMD, so the user
27229 should use the +simd option to turn on FP. */
27230 ARM_REMOVE ("fp", ALL_FP),
27231 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
27232 };
27233
27234 static const struct arm_ext_table armv8m_main_ext_table[] =
27235 {
27236 ARM_EXT ("dsp", ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
27237 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP)),
27238 ARM_EXT ("fp", FPU_ARCH_VFP_V5_SP_D16, ALL_FP),
27239 ARM_ADD ("fp.dp", FPU_ARCH_VFP_V5D16),
27240 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
27241 };
27242
27243 static const struct arm_ext_table armv8_1m_main_ext_table[] =
27244 {
27245 ARM_EXT ("dsp", ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
27246 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP)),
27247 ARM_EXT ("fp",
27248 ARM_FEATURE (0, ARM_EXT2_FP16_INST,
27249 FPU_VFP_V5_SP_D16 | FPU_VFP_EXT_FP16 | FPU_VFP_EXT_FMA),
27250 ALL_FP),
27251 ARM_ADD ("fp.dp",
27252 ARM_FEATURE (0, ARM_EXT2_FP16_INST,
27253 FPU_VFP_V5D16 | FPU_VFP_EXT_FP16 | FPU_VFP_EXT_FMA)),
27254 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
27255 };
27256
27257 static const struct arm_ext_table armv8r_ext_table[] =
27258 {
27259 ARM_ADD ("crc", ARCH_CRC_ARMV8),
27260 ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8),
27261 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
27262 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8)),
27263 ARM_REMOVE ("fp", ALL_FP),
27264 ARM_ADD ("fp.sp", FPU_ARCH_VFP_V5_SP_D16),
27265 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
27266 };
27267
27268 /* This list should, at a minimum, contain all the architecture names
27269 recognized by GCC. */
27270 #define ARM_ARCH_OPT(N, V, DF) { N, sizeof (N) - 1, V, DF, NULL }
27271 #define ARM_ARCH_OPT2(N, V, DF, ext) \
27272 { N, sizeof (N) - 1, V, DF, ext##_ext_table }
27273
27274 static const struct arm_arch_option_table arm_archs[] =
27275 {
27276 ARM_ARCH_OPT ("all", ARM_ANY, FPU_ARCH_FPA),
27277 ARM_ARCH_OPT ("armv1", ARM_ARCH_V1, FPU_ARCH_FPA),
27278 ARM_ARCH_OPT ("armv2", ARM_ARCH_V2, FPU_ARCH_FPA),
27279 ARM_ARCH_OPT ("armv2a", ARM_ARCH_V2S, FPU_ARCH_FPA),
27280 ARM_ARCH_OPT ("armv2s", ARM_ARCH_V2S, FPU_ARCH_FPA),
27281 ARM_ARCH_OPT ("armv3", ARM_ARCH_V3, FPU_ARCH_FPA),
27282 ARM_ARCH_OPT ("armv3m", ARM_ARCH_V3M, FPU_ARCH_FPA),
27283 ARM_ARCH_OPT ("armv4", ARM_ARCH_V4, FPU_ARCH_FPA),
27284 ARM_ARCH_OPT ("armv4xm", ARM_ARCH_V4xM, FPU_ARCH_FPA),
27285 ARM_ARCH_OPT ("armv4t", ARM_ARCH_V4T, FPU_ARCH_FPA),
27286 ARM_ARCH_OPT ("armv4txm", ARM_ARCH_V4TxM, FPU_ARCH_FPA),
27287 ARM_ARCH_OPT ("armv5", ARM_ARCH_V5, FPU_ARCH_VFP),
27288 ARM_ARCH_OPT ("armv5t", ARM_ARCH_V5T, FPU_ARCH_VFP),
27289 ARM_ARCH_OPT ("armv5txm", ARM_ARCH_V5TxM, FPU_ARCH_VFP),
27290 ARM_ARCH_OPT2 ("armv5te", ARM_ARCH_V5TE, FPU_ARCH_VFP, armv5te),
27291 ARM_ARCH_OPT2 ("armv5texp", ARM_ARCH_V5TExP, FPU_ARCH_VFP, armv5te),
27292 ARM_ARCH_OPT2 ("armv5tej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP, armv5te),
27293 ARM_ARCH_OPT2 ("armv6", ARM_ARCH_V6, FPU_ARCH_VFP, armv5te),
27294 ARM_ARCH_OPT2 ("armv6j", ARM_ARCH_V6, FPU_ARCH_VFP, armv5te),
27295 ARM_ARCH_OPT2 ("armv6k", ARM_ARCH_V6K, FPU_ARCH_VFP, armv5te),
27296 ARM_ARCH_OPT2 ("armv6z", ARM_ARCH_V6Z, FPU_ARCH_VFP, armv5te),
27297 /* The official spelling of this variant is ARMv6KZ, the name "armv6zk" is
27298 kept to preserve existing behaviour. */
27299 ARM_ARCH_OPT2 ("armv6kz", ARM_ARCH_V6KZ, FPU_ARCH_VFP, armv5te),
27300 ARM_ARCH_OPT2 ("armv6zk", ARM_ARCH_V6KZ, FPU_ARCH_VFP, armv5te),
27301 ARM_ARCH_OPT2 ("armv6t2", ARM_ARCH_V6T2, FPU_ARCH_VFP, armv5te),
27302 ARM_ARCH_OPT2 ("armv6kt2", ARM_ARCH_V6KT2, FPU_ARCH_VFP, armv5te),
27303 ARM_ARCH_OPT2 ("armv6zt2", ARM_ARCH_V6ZT2, FPU_ARCH_VFP, armv5te),
27304 /* The official spelling of this variant is ARMv6KZ, the name "armv6zkt2" is
27305 kept to preserve existing behaviour. */
27306 ARM_ARCH_OPT2 ("armv6kzt2", ARM_ARCH_V6KZT2, FPU_ARCH_VFP, armv5te),
27307 ARM_ARCH_OPT2 ("armv6zkt2", ARM_ARCH_V6KZT2, FPU_ARCH_VFP, armv5te),
27308 ARM_ARCH_OPT ("armv6-m", ARM_ARCH_V6M, FPU_ARCH_VFP),
27309 ARM_ARCH_OPT ("armv6s-m", ARM_ARCH_V6SM, FPU_ARCH_VFP),
27310 ARM_ARCH_OPT2 ("armv7", ARM_ARCH_V7, FPU_ARCH_VFP, armv7),
27311 /* The official spelling of the ARMv7 profile variants is the dashed form.
27312 Accept the non-dashed form for compatibility with old toolchains. */
27313 ARM_ARCH_OPT2 ("armv7a", ARM_ARCH_V7A, FPU_ARCH_VFP, armv7a),
27314 ARM_ARCH_OPT2 ("armv7ve", ARM_ARCH_V7VE, FPU_ARCH_VFP, armv7ve),
27315 ARM_ARCH_OPT2 ("armv7r", ARM_ARCH_V7R, FPU_ARCH_VFP, armv7r),
27316 ARM_ARCH_OPT ("armv7m", ARM_ARCH_V7M, FPU_ARCH_VFP),
27317 ARM_ARCH_OPT2 ("armv7-a", ARM_ARCH_V7A, FPU_ARCH_VFP, armv7a),
27318 ARM_ARCH_OPT2 ("armv7-r", ARM_ARCH_V7R, FPU_ARCH_VFP, armv7r),
27319 ARM_ARCH_OPT ("armv7-m", ARM_ARCH_V7M, FPU_ARCH_VFP),
27320 ARM_ARCH_OPT2 ("armv7e-m", ARM_ARCH_V7EM, FPU_ARCH_VFP, armv7em),
27321 ARM_ARCH_OPT ("armv8-m.base", ARM_ARCH_V8M_BASE, FPU_ARCH_VFP),
27322 ARM_ARCH_OPT2 ("armv8-m.main", ARM_ARCH_V8M_MAIN, FPU_ARCH_VFP,
27323 armv8m_main),
27324 ARM_ARCH_OPT2 ("armv8.1-m.main", ARM_ARCH_V8_1M_MAIN, FPU_ARCH_VFP,
27325 armv8_1m_main),
27326 ARM_ARCH_OPT2 ("armv8-a", ARM_ARCH_V8A, FPU_ARCH_VFP, armv8a),
27327 ARM_ARCH_OPT2 ("armv8.1-a", ARM_ARCH_V8_1A, FPU_ARCH_VFP, armv81a),
27328 ARM_ARCH_OPT2 ("armv8.2-a", ARM_ARCH_V8_2A, FPU_ARCH_VFP, armv82a),
27329 ARM_ARCH_OPT2 ("armv8.3-a", ARM_ARCH_V8_3A, FPU_ARCH_VFP, armv82a),
27330 ARM_ARCH_OPT2 ("armv8-r", ARM_ARCH_V8R, FPU_ARCH_VFP, armv8r),
27331 ARM_ARCH_OPT2 ("armv8.4-a", ARM_ARCH_V8_4A, FPU_ARCH_VFP, armv84a),
27332 ARM_ARCH_OPT2 ("armv8.5-a", ARM_ARCH_V8_5A, FPU_ARCH_VFP, armv85a),
27333 ARM_ARCH_OPT ("xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP),
27334 ARM_ARCH_OPT ("iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP),
27335 ARM_ARCH_OPT ("iwmmxt2", ARM_ARCH_IWMMXT2, FPU_ARCH_VFP),
27336 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, NULL }
27337 };
27338 #undef ARM_ARCH_OPT
27339
27340 /* ISA extensions in the co-processor and main instruction set space. */
27341
27342 struct arm_option_extension_value_table
27343 {
27344 const char * name;
27345 size_t name_len;
27346 const arm_feature_set merge_value;
27347 const arm_feature_set clear_value;
27348 /* List of architectures for which an extension is available. ARM_ARCH_NONE
27349 indicates that an extension is available for all architectures while
27350 ARM_ANY marks an empty entry. */
27351 const arm_feature_set allowed_archs[2];
27352 };
27353
27354 /* The following table must be in alphabetical order with a NULL last entry. */
27355
27356 #define ARM_EXT_OPT(N, M, C, AA) { N, sizeof (N) - 1, M, C, { AA, ARM_ANY } }
27357 #define ARM_EXT_OPT2(N, M, C, AA1, AA2) { N, sizeof (N) - 1, M, C, {AA1, AA2} }
27358
27359 /* DEPRECATED: Refrain from using this table to add any new extensions, instead
27360 use the context sensitive approach using arm_ext_table's. */
27361 static const struct arm_option_extension_value_table arm_extensions[] =
27362 {
27363 ARM_EXT_OPT ("crc", ARCH_CRC_ARMV8, ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
27364 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
27365 ARM_EXT_OPT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
27366 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8),
27367 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
27368 ARM_EXT_OPT ("dotprod", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8,
27369 ARM_FEATURE_COPROC (FPU_NEON_EXT_DOTPROD),
27370 ARM_ARCH_V8_2A),
27371 ARM_EXT_OPT ("dsp", ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
27372 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
27373 ARM_FEATURE_CORE (ARM_EXT_V7M, ARM_EXT2_V8M)),
27374 ARM_EXT_OPT ("fp", FPU_ARCH_VFP_ARMV8, ARM_FEATURE_COPROC (FPU_VFP_ARMV8),
27375 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
27376 ARM_EXT_OPT ("fp16", ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
27377 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
27378 ARM_ARCH_V8_2A),
27379 ARM_EXT_OPT ("fp16fml", ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
27380 | ARM_EXT2_FP16_FML),
27381 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
27382 | ARM_EXT2_FP16_FML),
27383 ARM_ARCH_V8_2A),
27384 ARM_EXT_OPT2 ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV),
27385 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV),
27386 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A),
27387 ARM_FEATURE_CORE_LOW (ARM_EXT_V7R)),
27388 /* Duplicate entry for the purpose of allowing ARMv7 to match in presence of
27389 Thumb divide instruction. Due to this having the same name as the
27390 previous entry, this will be ignored when doing command-line parsing and
27391 only considered by build attribute selection code. */
27392 ARM_EXT_OPT ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_DIV),
27393 ARM_FEATURE_CORE_LOW (ARM_EXT_DIV),
27394 ARM_FEATURE_CORE_LOW (ARM_EXT_V7)),
27395 ARM_EXT_OPT ("iwmmxt",ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT),
27396 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT), ARM_ARCH_NONE),
27397 ARM_EXT_OPT ("iwmmxt2", ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2),
27398 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2), ARM_ARCH_NONE),
27399 ARM_EXT_OPT ("maverick", ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK),
27400 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK), ARM_ARCH_NONE),
27401 ARM_EXT_OPT2 ("mp", ARM_FEATURE_CORE_LOW (ARM_EXT_MP),
27402 ARM_FEATURE_CORE_LOW (ARM_EXT_MP),
27403 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A),
27404 ARM_FEATURE_CORE_LOW (ARM_EXT_V7R)),
27405 ARM_EXT_OPT ("os", ARM_FEATURE_CORE_LOW (ARM_EXT_OS),
27406 ARM_FEATURE_CORE_LOW (ARM_EXT_OS),
27407 ARM_FEATURE_CORE_LOW (ARM_EXT_V6M)),
27408 ARM_EXT_OPT ("pan", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN),
27409 ARM_FEATURE (ARM_EXT_V8, ARM_EXT2_PAN, 0),
27410 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8A)),
27411 ARM_EXT_OPT ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES),
27412 ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES),
27413 ARM_ARCH_V8A),
27414 ARM_EXT_OPT ("ras", ARM_FEATURE_CORE_HIGH (ARM_EXT2_RAS),
27415 ARM_FEATURE (ARM_EXT_V8, ARM_EXT2_RAS, 0),
27416 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8A)),
27417 ARM_EXT_OPT ("rdma", FPU_ARCH_NEON_VFP_ARMV8_1,
27418 ARM_FEATURE_COPROC (FPU_NEON_ARMV8 | FPU_NEON_EXT_RDMA),
27419 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8A)),
27420 ARM_EXT_OPT ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB),
27421 ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB),
27422 ARM_ARCH_V8A),
27423 ARM_EXT_OPT2 ("sec", ARM_FEATURE_CORE_LOW (ARM_EXT_SEC),
27424 ARM_FEATURE_CORE_LOW (ARM_EXT_SEC),
27425 ARM_FEATURE_CORE_LOW (ARM_EXT_V6K),
27426 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A)),
27427 ARM_EXT_OPT ("simd", FPU_ARCH_NEON_VFP_ARMV8,
27428 ARM_FEATURE_COPROC (FPU_NEON_ARMV8),
27429 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
27430 ARM_EXT_OPT ("virt", ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT | ARM_EXT_ADIV
27431 | ARM_EXT_DIV),
27432 ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT),
27433 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A)),
27434 ARM_EXT_OPT ("xscale",ARM_FEATURE_COPROC (ARM_CEXT_XSCALE),
27435 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE), ARM_ARCH_NONE),
27436 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, { ARM_ARCH_NONE, ARM_ARCH_NONE } }
27437 };
27438 #undef ARM_EXT_OPT
27439
27440 /* ISA floating-point and Advanced SIMD extensions. */
27441 struct arm_option_fpu_value_table
27442 {
27443 const char * name;
27444 const arm_feature_set value;
27445 };
27446
27447 /* This list should, at a minimum, contain all the fpu names
27448 recognized by GCC. */
27449 static const struct arm_option_fpu_value_table arm_fpus[] =
27450 {
27451 {"softfpa", FPU_NONE},
27452 {"fpe", FPU_ARCH_FPE},
27453 {"fpe2", FPU_ARCH_FPE},
27454 {"fpe3", FPU_ARCH_FPA}, /* Third release supports LFM/SFM. */
27455 {"fpa", FPU_ARCH_FPA},
27456 {"fpa10", FPU_ARCH_FPA},
27457 {"fpa11", FPU_ARCH_FPA},
27458 {"arm7500fe", FPU_ARCH_FPA},
27459 {"softvfp", FPU_ARCH_VFP},
27460 {"softvfp+vfp", FPU_ARCH_VFP_V2},
27461 {"vfp", FPU_ARCH_VFP_V2},
27462 {"vfp9", FPU_ARCH_VFP_V2},
27463 {"vfp3", FPU_ARCH_VFP_V3}, /* Undocumented, use vfpv3. */
27464 {"vfp10", FPU_ARCH_VFP_V2},
27465 {"vfp10-r0", FPU_ARCH_VFP_V1},
27466 {"vfpxd", FPU_ARCH_VFP_V1xD},
27467 {"vfpv2", FPU_ARCH_VFP_V2},
27468 {"vfpv3", FPU_ARCH_VFP_V3},
27469 {"vfpv3-fp16", FPU_ARCH_VFP_V3_FP16},
27470 {"vfpv3-d16", FPU_ARCH_VFP_V3D16},
27471 {"vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16},
27472 {"vfpv3xd", FPU_ARCH_VFP_V3xD},
27473 {"vfpv3xd-fp16", FPU_ARCH_VFP_V3xD_FP16},
27474 {"arm1020t", FPU_ARCH_VFP_V1},
27475 {"arm1020e", FPU_ARCH_VFP_V2},
27476 {"arm1136jfs", FPU_ARCH_VFP_V2}, /* Undocumented, use arm1136jf-s. */
27477 {"arm1136jf-s", FPU_ARCH_VFP_V2},
27478 {"maverick", FPU_ARCH_MAVERICK},
27479 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1},
27480 {"neon-vfpv3", FPU_ARCH_VFP_V3_PLUS_NEON_V1},
27481 {"neon-fp16", FPU_ARCH_NEON_FP16},
27482 {"vfpv4", FPU_ARCH_VFP_V4},
27483 {"vfpv4-d16", FPU_ARCH_VFP_V4D16},
27484 {"fpv4-sp-d16", FPU_ARCH_VFP_V4_SP_D16},
27485 {"fpv5-d16", FPU_ARCH_VFP_V5D16},
27486 {"fpv5-sp-d16", FPU_ARCH_VFP_V5_SP_D16},
27487 {"neon-vfpv4", FPU_ARCH_NEON_VFP_V4},
27488 {"fp-armv8", FPU_ARCH_VFP_ARMV8},
27489 {"neon-fp-armv8", FPU_ARCH_NEON_VFP_ARMV8},
27490 {"crypto-neon-fp-armv8",
27491 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8},
27492 {"neon-fp-armv8.1", FPU_ARCH_NEON_VFP_ARMV8_1},
27493 {"crypto-neon-fp-armv8.1",
27494 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1},
27495 {NULL, ARM_ARCH_NONE}
27496 };
27497
27498 struct arm_option_value_table
27499 {
27500 const char *name;
27501 long value;
27502 };
27503
27504 static const struct arm_option_value_table arm_float_abis[] =
27505 {
27506 {"hard", ARM_FLOAT_ABI_HARD},
27507 {"softfp", ARM_FLOAT_ABI_SOFTFP},
27508 {"soft", ARM_FLOAT_ABI_SOFT},
27509 {NULL, 0}
27510 };
27511
27512 #ifdef OBJ_ELF
27513 /* We only know how to output GNU and ver 4/5 (AAELF) formats. */
27514 static const struct arm_option_value_table arm_eabis[] =
27515 {
27516 {"gnu", EF_ARM_EABI_UNKNOWN},
27517 {"4", EF_ARM_EABI_VER4},
27518 {"5", EF_ARM_EABI_VER5},
27519 {NULL, 0}
27520 };
27521 #endif
27522
27523 struct arm_long_option_table
27524 {
27525 const char * option; /* Substring to match. */
27526 const char * help; /* Help information. */
27527 int (* func) (const char * subopt); /* Function to decode sub-option. */
27528 const char * deprecated; /* If non-null, print this message. */
27529 };
27530
27531 static bfd_boolean
27532 arm_parse_extension (const char *str, const arm_feature_set *opt_set,
27533 arm_feature_set *ext_set,
27534 const struct arm_ext_table *ext_table)
27535 {
27536 /* We insist on extensions being specified in alphabetical order, and with
27537 extensions being added before being removed. We achieve this by having
27538 the global ARM_EXTENSIONS table in alphabetical order, and using the
27539 ADDING_VALUE variable to indicate whether we are adding an extension (1)
27540 or removing it (0) and only allowing it to change in the order
27541 -1 -> 1 -> 0. */
27542 const struct arm_option_extension_value_table * opt = NULL;
27543 const arm_feature_set arm_any = ARM_ANY;
27544 int adding_value = -1;
27545
27546 while (str != NULL && *str != 0)
27547 {
27548 const char *ext;
27549 size_t len;
27550
27551 if (*str != '+')
27552 {
27553 as_bad (_("invalid architectural extension"));
27554 return FALSE;
27555 }
27556
27557 str++;
27558 ext = strchr (str, '+');
27559
27560 if (ext != NULL)
27561 len = ext - str;
27562 else
27563 len = strlen (str);
27564
27565 if (len >= 2 && strncmp (str, "no", 2) == 0)
27566 {
27567 if (adding_value != 0)
27568 {
27569 adding_value = 0;
27570 opt = arm_extensions;
27571 }
27572
27573 len -= 2;
27574 str += 2;
27575 }
27576 else if (len > 0)
27577 {
27578 if (adding_value == -1)
27579 {
27580 adding_value = 1;
27581 opt = arm_extensions;
27582 }
27583 else if (adding_value != 1)
27584 {
27585 as_bad (_("must specify extensions to add before specifying "
27586 "those to remove"));
27587 return FALSE;
27588 }
27589 }
27590
27591 if (len == 0)
27592 {
27593 as_bad (_("missing architectural extension"));
27594 return FALSE;
27595 }
27596
27597 gas_assert (adding_value != -1);
27598 gas_assert (opt != NULL);
27599
27600 if (ext_table != NULL)
27601 {
27602 const struct arm_ext_table * ext_opt = ext_table;
27603 bfd_boolean found = FALSE;
27604 for (; ext_opt->name != NULL; ext_opt++)
27605 if (ext_opt->name_len == len
27606 && strncmp (ext_opt->name, str, len) == 0)
27607 {
27608 if (adding_value)
27609 {
27610 if (ARM_FEATURE_ZERO (ext_opt->merge))
27611 /* TODO: Option not supported. When we remove the
27612 legacy table this case should error out. */
27613 continue;
27614
27615 ARM_MERGE_FEATURE_SETS (*ext_set, *ext_set, ext_opt->merge);
27616 }
27617 else
27618 {
27619 if (ARM_FEATURE_ZERO (ext_opt->clear))
27620 /* TODO: Option not supported. When we remove the
27621 legacy table this case should error out. */
27622 continue;
27623 ARM_CLEAR_FEATURE (*ext_set, *ext_set, ext_opt->clear);
27624 }
27625 found = TRUE;
27626 break;
27627 }
27628 if (found)
27629 {
27630 str = ext;
27631 continue;
27632 }
27633 }
27634
27635 /* Scan over the options table trying to find an exact match. */
27636 for (; opt->name != NULL; opt++)
27637 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
27638 {
27639 int i, nb_allowed_archs =
27640 sizeof (opt->allowed_archs) / sizeof (opt->allowed_archs[0]);
27641 /* Check we can apply the extension to this architecture. */
27642 for (i = 0; i < nb_allowed_archs; i++)
27643 {
27644 /* Empty entry. */
27645 if (ARM_FEATURE_EQUAL (opt->allowed_archs[i], arm_any))
27646 continue;
27647 if (ARM_FSET_CPU_SUBSET (opt->allowed_archs[i], *opt_set))
27648 break;
27649 }
27650 if (i == nb_allowed_archs)
27651 {
27652 as_bad (_("extension does not apply to the base architecture"));
27653 return FALSE;
27654 }
27655
27656 /* Add or remove the extension. */
27657 if (adding_value)
27658 ARM_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->merge_value);
27659 else
27660 ARM_CLEAR_FEATURE (*ext_set, *ext_set, opt->clear_value);
27661
27662 /* Allowing Thumb division instructions for ARMv7 in autodetection
27663 rely on this break so that duplicate extensions (extensions
27664 with the same name as a previous extension in the list) are not
27665 considered for command-line parsing. */
27666 break;
27667 }
27668
27669 if (opt->name == NULL)
27670 {
27671 /* Did we fail to find an extension because it wasn't specified in
27672 alphabetical order, or because it does not exist? */
27673
27674 for (opt = arm_extensions; opt->name != NULL; opt++)
27675 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
27676 break;
27677
27678 if (opt->name == NULL)
27679 as_bad (_("unknown architectural extension `%s'"), str);
27680 else
27681 as_bad (_("architectural extensions must be specified in "
27682 "alphabetical order"));
27683
27684 return FALSE;
27685 }
27686 else
27687 {
27688 /* We should skip the extension we've just matched the next time
27689 round. */
27690 opt++;
27691 }
27692
27693 str = ext;
27694 };
27695
27696 return TRUE;
27697 }
27698
27699 static bfd_boolean
27700 arm_parse_cpu (const char *str)
27701 {
27702 const struct arm_cpu_option_table *opt;
27703 const char *ext = strchr (str, '+');
27704 size_t len;
27705
27706 if (ext != NULL)
27707 len = ext - str;
27708 else
27709 len = strlen (str);
27710
27711 if (len == 0)
27712 {
27713 as_bad (_("missing cpu name `%s'"), str);
27714 return FALSE;
27715 }
27716
27717 for (opt = arm_cpus; opt->name != NULL; opt++)
27718 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
27719 {
27720 mcpu_cpu_opt = &opt->value;
27721 if (mcpu_ext_opt == NULL)
27722 mcpu_ext_opt = XNEW (arm_feature_set);
27723 *mcpu_ext_opt = opt->ext;
27724 mcpu_fpu_opt = &opt->default_fpu;
27725 if (opt->canonical_name)
27726 {
27727 gas_assert (sizeof selected_cpu_name > strlen (opt->canonical_name));
27728 strcpy (selected_cpu_name, opt->canonical_name);
27729 }
27730 else
27731 {
27732 size_t i;
27733
27734 if (len >= sizeof selected_cpu_name)
27735 len = (sizeof selected_cpu_name) - 1;
27736
27737 for (i = 0; i < len; i++)
27738 selected_cpu_name[i] = TOUPPER (opt->name[i]);
27739 selected_cpu_name[i] = 0;
27740 }
27741
27742 if (ext != NULL)
27743 return arm_parse_extension (ext, mcpu_cpu_opt, mcpu_ext_opt, NULL);
27744
27745 return TRUE;
27746 }
27747
27748 as_bad (_("unknown cpu `%s'"), str);
27749 return FALSE;
27750 }
27751
27752 static bfd_boolean
27753 arm_parse_arch (const char *str)
27754 {
27755 const struct arm_arch_option_table *opt;
27756 const char *ext = strchr (str, '+');
27757 size_t len;
27758
27759 if (ext != NULL)
27760 len = ext - str;
27761 else
27762 len = strlen (str);
27763
27764 if (len == 0)
27765 {
27766 as_bad (_("missing architecture name `%s'"), str);
27767 return FALSE;
27768 }
27769
27770 for (opt = arm_archs; opt->name != NULL; opt++)
27771 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
27772 {
27773 march_cpu_opt = &opt->value;
27774 if (march_ext_opt == NULL)
27775 march_ext_opt = XNEW (arm_feature_set);
27776 *march_ext_opt = arm_arch_none;
27777 march_fpu_opt = &opt->default_fpu;
27778 strcpy (selected_cpu_name, opt->name);
27779
27780 if (ext != NULL)
27781 return arm_parse_extension (ext, march_cpu_opt, march_ext_opt,
27782 opt->ext_table);
27783
27784 return TRUE;
27785 }
27786
27787 as_bad (_("unknown architecture `%s'\n"), str);
27788 return FALSE;
27789 }
27790
27791 static bfd_boolean
27792 arm_parse_fpu (const char * str)
27793 {
27794 const struct arm_option_fpu_value_table * opt;
27795
27796 for (opt = arm_fpus; opt->name != NULL; opt++)
27797 if (streq (opt->name, str))
27798 {
27799 mfpu_opt = &opt->value;
27800 return TRUE;
27801 }
27802
27803 as_bad (_("unknown floating point format `%s'\n"), str);
27804 return FALSE;
27805 }
27806
27807 static bfd_boolean
27808 arm_parse_float_abi (const char * str)
27809 {
27810 const struct arm_option_value_table * opt;
27811
27812 for (opt = arm_float_abis; opt->name != NULL; opt++)
27813 if (streq (opt->name, str))
27814 {
27815 mfloat_abi_opt = opt->value;
27816 return TRUE;
27817 }
27818
27819 as_bad (_("unknown floating point abi `%s'\n"), str);
27820 return FALSE;
27821 }
27822
27823 #ifdef OBJ_ELF
27824 static bfd_boolean
27825 arm_parse_eabi (const char * str)
27826 {
27827 const struct arm_option_value_table *opt;
27828
27829 for (opt = arm_eabis; opt->name != NULL; opt++)
27830 if (streq (opt->name, str))
27831 {
27832 meabi_flags = opt->value;
27833 return TRUE;
27834 }
27835 as_bad (_("unknown EABI `%s'\n"), str);
27836 return FALSE;
27837 }
27838 #endif
27839
27840 static bfd_boolean
27841 arm_parse_it_mode (const char * str)
27842 {
27843 bfd_boolean ret = TRUE;
27844
27845 if (streq ("arm", str))
27846 implicit_it_mode = IMPLICIT_IT_MODE_ARM;
27847 else if (streq ("thumb", str))
27848 implicit_it_mode = IMPLICIT_IT_MODE_THUMB;
27849 else if (streq ("always", str))
27850 implicit_it_mode = IMPLICIT_IT_MODE_ALWAYS;
27851 else if (streq ("never", str))
27852 implicit_it_mode = IMPLICIT_IT_MODE_NEVER;
27853 else
27854 {
27855 as_bad (_("unknown implicit IT mode `%s', should be "\
27856 "arm, thumb, always, or never."), str);
27857 ret = FALSE;
27858 }
27859
27860 return ret;
27861 }
27862
27863 static bfd_boolean
27864 arm_ccs_mode (const char * unused ATTRIBUTE_UNUSED)
27865 {
27866 codecomposer_syntax = TRUE;
27867 arm_comment_chars[0] = ';';
27868 arm_line_separator_chars[0] = 0;
27869 return TRUE;
27870 }
27871
27872 struct arm_long_option_table arm_long_opts[] =
27873 {
27874 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
27875 arm_parse_cpu, NULL},
27876 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
27877 arm_parse_arch, NULL},
27878 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"),
27879 arm_parse_fpu, NULL},
27880 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"),
27881 arm_parse_float_abi, NULL},
27882 #ifdef OBJ_ELF
27883 {"meabi=", N_("<ver>\t\t assemble for eabi version <ver>"),
27884 arm_parse_eabi, NULL},
27885 #endif
27886 {"mimplicit-it=", N_("<mode>\t controls implicit insertion of IT instructions"),
27887 arm_parse_it_mode, NULL},
27888 {"mccs", N_("\t\t\t TI CodeComposer Studio syntax compatibility mode"),
27889 arm_ccs_mode, NULL},
27890 {NULL, NULL, 0, NULL}
27891 };
27892
27893 int
27894 md_parse_option (int c, const char * arg)
27895 {
27896 struct arm_option_table *opt;
27897 const struct arm_legacy_option_table *fopt;
27898 struct arm_long_option_table *lopt;
27899
27900 switch (c)
27901 {
27902 #ifdef OPTION_EB
27903 case OPTION_EB:
27904 target_big_endian = 1;
27905 break;
27906 #endif
27907
27908 #ifdef OPTION_EL
27909 case OPTION_EL:
27910 target_big_endian = 0;
27911 break;
27912 #endif
27913
27914 case OPTION_FIX_V4BX:
27915 fix_v4bx = TRUE;
27916 break;
27917
27918 #ifdef OBJ_ELF
27919 case OPTION_FDPIC:
27920 arm_fdpic = TRUE;
27921 break;
27922 #endif /* OBJ_ELF */
27923
27924 case 'a':
27925 /* Listing option. Just ignore these, we don't support additional
27926 ones. */
27927 return 0;
27928
27929 default:
27930 for (opt = arm_opts; opt->option != NULL; opt++)
27931 {
27932 if (c == opt->option[0]
27933 && ((arg == NULL && opt->option[1] == 0)
27934 || streq (arg, opt->option + 1)))
27935 {
27936 /* If the option is deprecated, tell the user. */
27937 if (warn_on_deprecated && opt->deprecated != NULL)
27938 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
27939 arg ? arg : "", _(opt->deprecated));
27940
27941 if (opt->var != NULL)
27942 *opt->var = opt->value;
27943
27944 return 1;
27945 }
27946 }
27947
27948 for (fopt = arm_legacy_opts; fopt->option != NULL; fopt++)
27949 {
27950 if (c == fopt->option[0]
27951 && ((arg == NULL && fopt->option[1] == 0)
27952 || streq (arg, fopt->option + 1)))
27953 {
27954 /* If the option is deprecated, tell the user. */
27955 if (warn_on_deprecated && fopt->deprecated != NULL)
27956 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
27957 arg ? arg : "", _(fopt->deprecated));
27958
27959 if (fopt->var != NULL)
27960 *fopt->var = &fopt->value;
27961
27962 return 1;
27963 }
27964 }
27965
27966 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
27967 {
27968 /* These options are expected to have an argument. */
27969 if (c == lopt->option[0]
27970 && arg != NULL
27971 && strncmp (arg, lopt->option + 1,
27972 strlen (lopt->option + 1)) == 0)
27973 {
27974 /* If the option is deprecated, tell the user. */
27975 if (warn_on_deprecated && lopt->deprecated != NULL)
27976 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
27977 _(lopt->deprecated));
27978
27979 /* Call the sup-option parser. */
27980 return lopt->func (arg + strlen (lopt->option) - 1);
27981 }
27982 }
27983
27984 return 0;
27985 }
27986
27987 return 1;
27988 }
27989
27990 void
27991 md_show_usage (FILE * fp)
27992 {
27993 struct arm_option_table *opt;
27994 struct arm_long_option_table *lopt;
27995
27996 fprintf (fp, _(" ARM-specific assembler options:\n"));
27997
27998 for (opt = arm_opts; opt->option != NULL; opt++)
27999 if (opt->help != NULL)
28000 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
28001
28002 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
28003 if (lopt->help != NULL)
28004 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
28005
28006 #ifdef OPTION_EB
28007 fprintf (fp, _("\
28008 -EB assemble code for a big-endian cpu\n"));
28009 #endif
28010
28011 #ifdef OPTION_EL
28012 fprintf (fp, _("\
28013 -EL assemble code for a little-endian cpu\n"));
28014 #endif
28015
28016 fprintf (fp, _("\
28017 --fix-v4bx Allow BX in ARMv4 code\n"));
28018
28019 #ifdef OBJ_ELF
28020 fprintf (fp, _("\
28021 --fdpic generate an FDPIC object file\n"));
28022 #endif /* OBJ_ELF */
28023 }
28024
28025 #ifdef OBJ_ELF
28026
28027 typedef struct
28028 {
28029 int val;
28030 arm_feature_set flags;
28031 } cpu_arch_ver_table;
28032
28033 /* Mapping from CPU features to EABI CPU arch values. Table must be sorted
28034 chronologically for architectures, with an exception for ARMv6-M and
28035 ARMv6S-M due to legacy reasons. No new architecture should have a
28036 special case. This allows for build attribute selection results to be
28037 stable when new architectures are added. */
28038 static const cpu_arch_ver_table cpu_arch_ver[] =
28039 {
28040 {TAG_CPU_ARCH_PRE_V4, ARM_ARCH_V1},
28041 {TAG_CPU_ARCH_PRE_V4, ARM_ARCH_V2},
28042 {TAG_CPU_ARCH_PRE_V4, ARM_ARCH_V2S},
28043 {TAG_CPU_ARCH_PRE_V4, ARM_ARCH_V3},
28044 {TAG_CPU_ARCH_PRE_V4, ARM_ARCH_V3M},
28045 {TAG_CPU_ARCH_V4, ARM_ARCH_V4xM},
28046 {TAG_CPU_ARCH_V4, ARM_ARCH_V4},
28047 {TAG_CPU_ARCH_V4T, ARM_ARCH_V4TxM},
28048 {TAG_CPU_ARCH_V4T, ARM_ARCH_V4T},
28049 {TAG_CPU_ARCH_V5T, ARM_ARCH_V5xM},
28050 {TAG_CPU_ARCH_V5T, ARM_ARCH_V5},
28051 {TAG_CPU_ARCH_V5T, ARM_ARCH_V5TxM},
28052 {TAG_CPU_ARCH_V5T, ARM_ARCH_V5T},
28053 {TAG_CPU_ARCH_V5TE, ARM_ARCH_V5TExP},
28054 {TAG_CPU_ARCH_V5TE, ARM_ARCH_V5TE},
28055 {TAG_CPU_ARCH_V5TEJ, ARM_ARCH_V5TEJ},
28056 {TAG_CPU_ARCH_V6, ARM_ARCH_V6},
28057 {TAG_CPU_ARCH_V6KZ, ARM_ARCH_V6Z},
28058 {TAG_CPU_ARCH_V6KZ, ARM_ARCH_V6KZ},
28059 {TAG_CPU_ARCH_V6K, ARM_ARCH_V6K},
28060 {TAG_CPU_ARCH_V6T2, ARM_ARCH_V6T2},
28061 {TAG_CPU_ARCH_V6T2, ARM_ARCH_V6KT2},
28062 {TAG_CPU_ARCH_V6T2, ARM_ARCH_V6ZT2},
28063 {TAG_CPU_ARCH_V6T2, ARM_ARCH_V6KZT2},
28064
28065 /* When assembling a file with only ARMv6-M or ARMv6S-M instruction, GNU as
28066 always selected build attributes to match those of ARMv6-M
28067 (resp. ARMv6S-M). However, due to these architectures being a strict
28068 subset of ARMv7-M in terms of instructions available, ARMv7-M attributes
28069 would be selected when fully respecting chronology of architectures.
28070 It is thus necessary to make a special case of ARMv6-M and ARMv6S-M and
28071 move them before ARMv7 architectures. */
28072 {TAG_CPU_ARCH_V6_M, ARM_ARCH_V6M},
28073 {TAG_CPU_ARCH_V6S_M, ARM_ARCH_V6SM},
28074
28075 {TAG_CPU_ARCH_V7, ARM_ARCH_V7},
28076 {TAG_CPU_ARCH_V7, ARM_ARCH_V7A},
28077 {TAG_CPU_ARCH_V7, ARM_ARCH_V7R},
28078 {TAG_CPU_ARCH_V7, ARM_ARCH_V7M},
28079 {TAG_CPU_ARCH_V7, ARM_ARCH_V7VE},
28080 {TAG_CPU_ARCH_V7E_M, ARM_ARCH_V7EM},
28081 {TAG_CPU_ARCH_V8, ARM_ARCH_V8A},
28082 {TAG_CPU_ARCH_V8, ARM_ARCH_V8_1A},
28083 {TAG_CPU_ARCH_V8, ARM_ARCH_V8_2A},
28084 {TAG_CPU_ARCH_V8, ARM_ARCH_V8_3A},
28085 {TAG_CPU_ARCH_V8M_BASE, ARM_ARCH_V8M_BASE},
28086 {TAG_CPU_ARCH_V8M_MAIN, ARM_ARCH_V8M_MAIN},
28087 {TAG_CPU_ARCH_V8R, ARM_ARCH_V8R},
28088 {TAG_CPU_ARCH_V8, ARM_ARCH_V8_4A},
28089 {TAG_CPU_ARCH_V8, ARM_ARCH_V8_5A},
28090 {TAG_CPU_ARCH_V8_1M_MAIN, ARM_ARCH_V8_1M_MAIN},
28091 {-1, ARM_ARCH_NONE}
28092 };
28093
28094 /* Set an attribute if it has not already been set by the user. */
28095
28096 static void
28097 aeabi_set_attribute_int (int tag, int value)
28098 {
28099 if (tag < 1
28100 || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
28101 || !attributes_set_explicitly[tag])
28102 bfd_elf_add_proc_attr_int (stdoutput, tag, value);
28103 }
28104
28105 static void
28106 aeabi_set_attribute_string (int tag, const char *value)
28107 {
28108 if (tag < 1
28109 || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
28110 || !attributes_set_explicitly[tag])
28111 bfd_elf_add_proc_attr_string (stdoutput, tag, value);
28112 }
28113
28114 /* Return whether features in the *NEEDED feature set are available via
28115 extensions for the architecture whose feature set is *ARCH_FSET. */
28116
28117 static bfd_boolean
28118 have_ext_for_needed_feat_p (const arm_feature_set *arch_fset,
28119 const arm_feature_set *needed)
28120 {
28121 int i, nb_allowed_archs;
28122 arm_feature_set ext_fset;
28123 const struct arm_option_extension_value_table *opt;
28124
28125 ext_fset = arm_arch_none;
28126 for (opt = arm_extensions; opt->name != NULL; opt++)
28127 {
28128 /* Extension does not provide any feature we need. */
28129 if (!ARM_CPU_HAS_FEATURE (*needed, opt->merge_value))
28130 continue;
28131
28132 nb_allowed_archs =
28133 sizeof (opt->allowed_archs) / sizeof (opt->allowed_archs[0]);
28134 for (i = 0; i < nb_allowed_archs; i++)
28135 {
28136 /* Empty entry. */
28137 if (ARM_FEATURE_EQUAL (opt->allowed_archs[i], arm_arch_any))
28138 break;
28139
28140 /* Extension is available, add it. */
28141 if (ARM_FSET_CPU_SUBSET (opt->allowed_archs[i], *arch_fset))
28142 ARM_MERGE_FEATURE_SETS (ext_fset, ext_fset, opt->merge_value);
28143 }
28144 }
28145
28146 /* Can we enable all features in *needed? */
28147 return ARM_FSET_CPU_SUBSET (*needed, ext_fset);
28148 }
28149
28150 /* Select value for Tag_CPU_arch and Tag_CPU_arch_profile build attributes for
28151 a given architecture feature set *ARCH_EXT_FSET including extension feature
28152 set *EXT_FSET. Selection logic used depend on EXACT_MATCH:
28153 - if true, check for an exact match of the architecture modulo extensions;
28154 - otherwise, select build attribute value of the first superset
28155 architecture released so that results remains stable when new architectures
28156 are added.
28157 For -march/-mcpu=all the build attribute value of the most featureful
28158 architecture is returned. Tag_CPU_arch_profile result is returned in
28159 PROFILE. */
28160
28161 static int
28162 get_aeabi_cpu_arch_from_fset (const arm_feature_set *arch_ext_fset,
28163 const arm_feature_set *ext_fset,
28164 char *profile, int exact_match)
28165 {
28166 arm_feature_set arch_fset;
28167 const cpu_arch_ver_table *p_ver, *p_ver_ret = NULL;
28168
28169 /* Select most featureful architecture with all its extensions if building
28170 for -march=all as the feature sets used to set build attributes. */
28171 if (ARM_FEATURE_EQUAL (*arch_ext_fset, arm_arch_any))
28172 {
28173 /* Force revisiting of decision for each new architecture. */
28174 gas_assert (MAX_TAG_CPU_ARCH <= TAG_CPU_ARCH_V8_1M_MAIN);
28175 *profile = 'A';
28176 return TAG_CPU_ARCH_V8;
28177 }
28178
28179 ARM_CLEAR_FEATURE (arch_fset, *arch_ext_fset, *ext_fset);
28180
28181 for (p_ver = cpu_arch_ver; p_ver->val != -1; p_ver++)
28182 {
28183 arm_feature_set known_arch_fset;
28184
28185 ARM_CLEAR_FEATURE (known_arch_fset, p_ver->flags, fpu_any);
28186 if (exact_match)
28187 {
28188 /* Base architecture match user-specified architecture and
28189 extensions, eg. ARMv6S-M matching -march=armv6-m+os. */
28190 if (ARM_FEATURE_EQUAL (*arch_ext_fset, known_arch_fset))
28191 {
28192 p_ver_ret = p_ver;
28193 goto found;
28194 }
28195 /* Base architecture match user-specified architecture only
28196 (eg. ARMv6-M in the same case as above). Record it in case we
28197 find a match with above condition. */
28198 else if (p_ver_ret == NULL
28199 && ARM_FEATURE_EQUAL (arch_fset, known_arch_fset))
28200 p_ver_ret = p_ver;
28201 }
28202 else
28203 {
28204
28205 /* Architecture has all features wanted. */
28206 if (ARM_FSET_CPU_SUBSET (arch_fset, known_arch_fset))
28207 {
28208 arm_feature_set added_fset;
28209
28210 /* Compute features added by this architecture over the one
28211 recorded in p_ver_ret. */
28212 if (p_ver_ret != NULL)
28213 ARM_CLEAR_FEATURE (added_fset, known_arch_fset,
28214 p_ver_ret->flags);
28215 /* First architecture that match incl. with extensions, or the
28216 only difference in features over the recorded match is
28217 features that were optional and are now mandatory. */
28218 if (p_ver_ret == NULL
28219 || ARM_FSET_CPU_SUBSET (added_fset, arch_fset))
28220 {
28221 p_ver_ret = p_ver;
28222 goto found;
28223 }
28224 }
28225 else if (p_ver_ret == NULL)
28226 {
28227 arm_feature_set needed_ext_fset;
28228
28229 ARM_CLEAR_FEATURE (needed_ext_fset, arch_fset, known_arch_fset);
28230
28231 /* Architecture has all features needed when using some
28232 extensions. Record it and continue searching in case there
28233 exist an architecture providing all needed features without
28234 the need for extensions (eg. ARMv6S-M Vs ARMv6-M with
28235 OS extension). */
28236 if (have_ext_for_needed_feat_p (&known_arch_fset,
28237 &needed_ext_fset))
28238 p_ver_ret = p_ver;
28239 }
28240 }
28241 }
28242
28243 if (p_ver_ret == NULL)
28244 return -1;
28245
28246 found:
28247 /* Tag_CPU_arch_profile. */
28248 if (ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_v7a)
28249 || ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_v8)
28250 || (ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_atomics)
28251 && !ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_v8m_m_only)))
28252 *profile = 'A';
28253 else if (ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_v7r))
28254 *profile = 'R';
28255 else if (ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_m))
28256 *profile = 'M';
28257 else
28258 *profile = '\0';
28259 return p_ver_ret->val;
28260 }
28261
28262 /* Set the public EABI object attributes. */
28263
28264 static void
28265 aeabi_set_public_attributes (void)
28266 {
28267 char profile = '\0';
28268 int arch = -1;
28269 int virt_sec = 0;
28270 int fp16_optional = 0;
28271 int skip_exact_match = 0;
28272 arm_feature_set flags, flags_arch, flags_ext;
28273
28274 /* Autodetection mode, choose the architecture based the instructions
28275 actually used. */
28276 if (no_cpu_selected ())
28277 {
28278 ARM_MERGE_FEATURE_SETS (flags, arm_arch_used, thumb_arch_used);
28279
28280 if (ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_any))
28281 ARM_MERGE_FEATURE_SETS (flags, flags, arm_ext_v1);
28282
28283 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_any))
28284 ARM_MERGE_FEATURE_SETS (flags, flags, arm_ext_v4t);
28285
28286 /* Code run during relaxation relies on selected_cpu being set. */
28287 ARM_CLEAR_FEATURE (flags_arch, flags, fpu_any);
28288 flags_ext = arm_arch_none;
28289 ARM_CLEAR_FEATURE (selected_arch, flags_arch, flags_ext);
28290 selected_ext = flags_ext;
28291 selected_cpu = flags;
28292 }
28293 /* Otherwise, choose the architecture based on the capabilities of the
28294 requested cpu. */
28295 else
28296 {
28297 ARM_MERGE_FEATURE_SETS (flags_arch, selected_arch, selected_ext);
28298 ARM_CLEAR_FEATURE (flags_arch, flags_arch, fpu_any);
28299 flags_ext = selected_ext;
28300 flags = selected_cpu;
28301 }
28302 ARM_MERGE_FEATURE_SETS (flags, flags, selected_fpu);
28303
28304 /* Allow the user to override the reported architecture. */
28305 if (!ARM_FEATURE_ZERO (selected_object_arch))
28306 {
28307 ARM_CLEAR_FEATURE (flags_arch, selected_object_arch, fpu_any);
28308 flags_ext = arm_arch_none;
28309 }
28310 else
28311 skip_exact_match = ARM_FEATURE_EQUAL (selected_cpu, arm_arch_any);
28312
28313 /* When this function is run again after relaxation has happened there is no
28314 way to determine whether an architecture or CPU was specified by the user:
28315 - selected_cpu is set above for relaxation to work;
28316 - march_cpu_opt is not set if only -mcpu or .cpu is used;
28317 - mcpu_cpu_opt is set to arm_arch_any for autodetection.
28318 Therefore, if not in -march=all case we first try an exact match and fall
28319 back to autodetection. */
28320 if (!skip_exact_match)
28321 arch = get_aeabi_cpu_arch_from_fset (&flags_arch, &flags_ext, &profile, 1);
28322 if (arch == -1)
28323 arch = get_aeabi_cpu_arch_from_fset (&flags_arch, &flags_ext, &profile, 0);
28324 if (arch == -1)
28325 as_bad (_("no architecture contains all the instructions used\n"));
28326
28327 /* Tag_CPU_name. */
28328 if (selected_cpu_name[0])
28329 {
28330 char *q;
28331
28332 q = selected_cpu_name;
28333 if (strncmp (q, "armv", 4) == 0)
28334 {
28335 int i;
28336
28337 q += 4;
28338 for (i = 0; q[i]; i++)
28339 q[i] = TOUPPER (q[i]);
28340 }
28341 aeabi_set_attribute_string (Tag_CPU_name, q);
28342 }
28343
28344 /* Tag_CPU_arch. */
28345 aeabi_set_attribute_int (Tag_CPU_arch, arch);
28346
28347 /* Tag_CPU_arch_profile. */
28348 if (profile != '\0')
28349 aeabi_set_attribute_int (Tag_CPU_arch_profile, profile);
28350
28351 /* Tag_DSP_extension. */
28352 if (ARM_CPU_HAS_FEATURE (selected_ext, arm_ext_dsp))
28353 aeabi_set_attribute_int (Tag_DSP_extension, 1);
28354
28355 ARM_CLEAR_FEATURE (flags_arch, flags, fpu_any);
28356 /* Tag_ARM_ISA_use. */
28357 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v1)
28358 || ARM_FEATURE_ZERO (flags_arch))
28359 aeabi_set_attribute_int (Tag_ARM_ISA_use, 1);
28360
28361 /* Tag_THUMB_ISA_use. */
28362 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v4t)
28363 || ARM_FEATURE_ZERO (flags_arch))
28364 {
28365 int thumb_isa_use;
28366
28367 if (!ARM_CPU_HAS_FEATURE (flags, arm_ext_v8)
28368 && ARM_CPU_HAS_FEATURE (flags, arm_ext_v8m_m_only))
28369 thumb_isa_use = 3;
28370 else if (ARM_CPU_HAS_FEATURE (flags, arm_arch_t2))
28371 thumb_isa_use = 2;
28372 else
28373 thumb_isa_use = 1;
28374 aeabi_set_attribute_int (Tag_THUMB_ISA_use, thumb_isa_use);
28375 }
28376
28377 /* Tag_VFP_arch. */
28378 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_armv8xd))
28379 aeabi_set_attribute_int (Tag_VFP_arch,
28380 ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32)
28381 ? 7 : 8);
28382 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_fma))
28383 aeabi_set_attribute_int (Tag_VFP_arch,
28384 ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32)
28385 ? 5 : 6);
28386 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32))
28387 {
28388 fp16_optional = 1;
28389 aeabi_set_attribute_int (Tag_VFP_arch, 3);
28390 }
28391 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v3xd))
28392 {
28393 aeabi_set_attribute_int (Tag_VFP_arch, 4);
28394 fp16_optional = 1;
28395 }
28396 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v2))
28397 aeabi_set_attribute_int (Tag_VFP_arch, 2);
28398 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1)
28399 || ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd))
28400 aeabi_set_attribute_int (Tag_VFP_arch, 1);
28401
28402 /* Tag_ABI_HardFP_use. */
28403 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd)
28404 && !ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1))
28405 aeabi_set_attribute_int (Tag_ABI_HardFP_use, 1);
28406
28407 /* Tag_WMMX_arch. */
28408 if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt2))
28409 aeabi_set_attribute_int (Tag_WMMX_arch, 2);
28410 else if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt))
28411 aeabi_set_attribute_int (Tag_WMMX_arch, 1);
28412
28413 /* Tag_Advanced_SIMD_arch (formerly Tag_NEON_arch). */
28414 if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_v8_1))
28415 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 4);
28416 else if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_armv8))
28417 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 3);
28418 else if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_v1))
28419 {
28420 if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_fma))
28421 {
28422 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 2);
28423 }
28424 else
28425 {
28426 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 1);
28427 fp16_optional = 1;
28428 }
28429 }
28430
28431 /* Tag_VFP_HP_extension (formerly Tag_NEON_FP16_arch). */
28432 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_fp16) && fp16_optional)
28433 aeabi_set_attribute_int (Tag_VFP_HP_extension, 1);
28434
28435 /* Tag_DIV_use.
28436
28437 We set Tag_DIV_use to two when integer divide instructions have been used
28438 in ARM state, or when Thumb integer divide instructions have been used,
28439 but we have no architecture profile set, nor have we any ARM instructions.
28440
28441 For ARMv8-A and ARMv8-M we set the tag to 0 as integer divide is implied
28442 by the base architecture.
28443
28444 For new architectures we will have to check these tests. */
28445 gas_assert (arch <= TAG_CPU_ARCH_V8_1M_MAIN);
28446 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v8)
28447 || ARM_CPU_HAS_FEATURE (flags, arm_ext_v8m))
28448 aeabi_set_attribute_int (Tag_DIV_use, 0);
28449 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_adiv)
28450 || (profile == '\0'
28451 && ARM_CPU_HAS_FEATURE (flags, arm_ext_div)
28452 && !ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_any)))
28453 aeabi_set_attribute_int (Tag_DIV_use, 2);
28454
28455 /* Tag_MP_extension_use. */
28456 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_mp))
28457 aeabi_set_attribute_int (Tag_MPextension_use, 1);
28458
28459 /* Tag Virtualization_use. */
28460 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_sec))
28461 virt_sec |= 1;
28462 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_virt))
28463 virt_sec |= 2;
28464 if (virt_sec != 0)
28465 aeabi_set_attribute_int (Tag_Virtualization_use, virt_sec);
28466 }
28467
28468 /* Post relaxation hook. Recompute ARM attributes now that relaxation is
28469 finished and free extension feature bits which will not be used anymore. */
28470
28471 void
28472 arm_md_post_relax (void)
28473 {
28474 aeabi_set_public_attributes ();
28475 XDELETE (mcpu_ext_opt);
28476 mcpu_ext_opt = NULL;
28477 XDELETE (march_ext_opt);
28478 march_ext_opt = NULL;
28479 }
28480
28481 /* Add the default contents for the .ARM.attributes section. */
28482
28483 void
28484 arm_md_end (void)
28485 {
28486 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
28487 return;
28488
28489 aeabi_set_public_attributes ();
28490 }
28491 #endif /* OBJ_ELF */
28492
28493 /* Parse a .cpu directive. */
28494
28495 static void
28496 s_arm_cpu (int ignored ATTRIBUTE_UNUSED)
28497 {
28498 const struct arm_cpu_option_table *opt;
28499 char *name;
28500 char saved_char;
28501
28502 name = input_line_pointer;
28503 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
28504 input_line_pointer++;
28505 saved_char = *input_line_pointer;
28506 *input_line_pointer = 0;
28507
28508 /* Skip the first "all" entry. */
28509 for (opt = arm_cpus + 1; opt->name != NULL; opt++)
28510 if (streq (opt->name, name))
28511 {
28512 selected_arch = opt->value;
28513 selected_ext = opt->ext;
28514 ARM_MERGE_FEATURE_SETS (selected_cpu, selected_arch, selected_ext);
28515 if (opt->canonical_name)
28516 strcpy (selected_cpu_name, opt->canonical_name);
28517 else
28518 {
28519 int i;
28520 for (i = 0; opt->name[i]; i++)
28521 selected_cpu_name[i] = TOUPPER (opt->name[i]);
28522
28523 selected_cpu_name[i] = 0;
28524 }
28525 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
28526
28527 *input_line_pointer = saved_char;
28528 demand_empty_rest_of_line ();
28529 return;
28530 }
28531 as_bad (_("unknown cpu `%s'"), name);
28532 *input_line_pointer = saved_char;
28533 ignore_rest_of_line ();
28534 }
28535
28536 /* Parse a .arch directive. */
28537
28538 static void
28539 s_arm_arch (int ignored ATTRIBUTE_UNUSED)
28540 {
28541 const struct arm_arch_option_table *opt;
28542 char saved_char;
28543 char *name;
28544
28545 name = input_line_pointer;
28546 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
28547 input_line_pointer++;
28548 saved_char = *input_line_pointer;
28549 *input_line_pointer = 0;
28550
28551 /* Skip the first "all" entry. */
28552 for (opt = arm_archs + 1; opt->name != NULL; opt++)
28553 if (streq (opt->name, name))
28554 {
28555 selected_arch = opt->value;
28556 selected_ext = arm_arch_none;
28557 selected_cpu = selected_arch;
28558 strcpy (selected_cpu_name, opt->name);
28559 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
28560 *input_line_pointer = saved_char;
28561 demand_empty_rest_of_line ();
28562 return;
28563 }
28564
28565 as_bad (_("unknown architecture `%s'\n"), name);
28566 *input_line_pointer = saved_char;
28567 ignore_rest_of_line ();
28568 }
28569
28570 /* Parse a .object_arch directive. */
28571
28572 static void
28573 s_arm_object_arch (int ignored ATTRIBUTE_UNUSED)
28574 {
28575 const struct arm_arch_option_table *opt;
28576 char saved_char;
28577 char *name;
28578
28579 name = input_line_pointer;
28580 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
28581 input_line_pointer++;
28582 saved_char = *input_line_pointer;
28583 *input_line_pointer = 0;
28584
28585 /* Skip the first "all" entry. */
28586 for (opt = arm_archs + 1; opt->name != NULL; opt++)
28587 if (streq (opt->name, name))
28588 {
28589 selected_object_arch = opt->value;
28590 *input_line_pointer = saved_char;
28591 demand_empty_rest_of_line ();
28592 return;
28593 }
28594
28595 as_bad (_("unknown architecture `%s'\n"), name);
28596 *input_line_pointer = saved_char;
28597 ignore_rest_of_line ();
28598 }
28599
28600 /* Parse a .arch_extension directive. */
28601
28602 static void
28603 s_arm_arch_extension (int ignored ATTRIBUTE_UNUSED)
28604 {
28605 const struct arm_option_extension_value_table *opt;
28606 char saved_char;
28607 char *name;
28608 int adding_value = 1;
28609
28610 name = input_line_pointer;
28611 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
28612 input_line_pointer++;
28613 saved_char = *input_line_pointer;
28614 *input_line_pointer = 0;
28615
28616 if (strlen (name) >= 2
28617 && strncmp (name, "no", 2) == 0)
28618 {
28619 adding_value = 0;
28620 name += 2;
28621 }
28622
28623 for (opt = arm_extensions; opt->name != NULL; opt++)
28624 if (streq (opt->name, name))
28625 {
28626 int i, nb_allowed_archs =
28627 sizeof (opt->allowed_archs) / sizeof (opt->allowed_archs[i]);
28628 for (i = 0; i < nb_allowed_archs; i++)
28629 {
28630 /* Empty entry. */
28631 if (ARM_CPU_IS_ANY (opt->allowed_archs[i]))
28632 continue;
28633 if (ARM_FSET_CPU_SUBSET (opt->allowed_archs[i], selected_arch))
28634 break;
28635 }
28636
28637 if (i == nb_allowed_archs)
28638 {
28639 as_bad (_("architectural extension `%s' is not allowed for the "
28640 "current base architecture"), name);
28641 break;
28642 }
28643
28644 if (adding_value)
28645 ARM_MERGE_FEATURE_SETS (selected_ext, selected_ext,
28646 opt->merge_value);
28647 else
28648 ARM_CLEAR_FEATURE (selected_ext, selected_ext, opt->clear_value);
28649
28650 ARM_MERGE_FEATURE_SETS (selected_cpu, selected_arch, selected_ext);
28651 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
28652 *input_line_pointer = saved_char;
28653 demand_empty_rest_of_line ();
28654 /* Allowing Thumb division instructions for ARMv7 in autodetection rely
28655 on this return so that duplicate extensions (extensions with the
28656 same name as a previous extension in the list) are not considered
28657 for command-line parsing. */
28658 return;
28659 }
28660
28661 if (opt->name == NULL)
28662 as_bad (_("unknown architecture extension `%s'\n"), name);
28663
28664 *input_line_pointer = saved_char;
28665 ignore_rest_of_line ();
28666 }
28667
28668 /* Parse a .fpu directive. */
28669
28670 static void
28671 s_arm_fpu (int ignored ATTRIBUTE_UNUSED)
28672 {
28673 const struct arm_option_fpu_value_table *opt;
28674 char saved_char;
28675 char *name;
28676
28677 name = input_line_pointer;
28678 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
28679 input_line_pointer++;
28680 saved_char = *input_line_pointer;
28681 *input_line_pointer = 0;
28682
28683 for (opt = arm_fpus; opt->name != NULL; opt++)
28684 if (streq (opt->name, name))
28685 {
28686 selected_fpu = opt->value;
28687 #ifndef CPU_DEFAULT
28688 if (no_cpu_selected ())
28689 ARM_MERGE_FEATURE_SETS (cpu_variant, arm_arch_any, selected_fpu);
28690 else
28691 #endif
28692 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
28693 *input_line_pointer = saved_char;
28694 demand_empty_rest_of_line ();
28695 return;
28696 }
28697
28698 as_bad (_("unknown floating point format `%s'\n"), name);
28699 *input_line_pointer = saved_char;
28700 ignore_rest_of_line ();
28701 }
28702
28703 /* Copy symbol information. */
28704
28705 void
28706 arm_copy_symbol_attributes (symbolS *dest, symbolS *src)
28707 {
28708 ARM_GET_FLAG (dest) = ARM_GET_FLAG (src);
28709 }
28710
28711 #ifdef OBJ_ELF
28712 /* Given a symbolic attribute NAME, return the proper integer value.
28713 Returns -1 if the attribute is not known. */
28714
28715 int
28716 arm_convert_symbolic_attribute (const char *name)
28717 {
28718 static const struct
28719 {
28720 const char * name;
28721 const int tag;
28722 }
28723 attribute_table[] =
28724 {
28725 /* When you modify this table you should
28726 also modify the list in doc/c-arm.texi. */
28727 #define T(tag) {#tag, tag}
28728 T (Tag_CPU_raw_name),
28729 T (Tag_CPU_name),
28730 T (Tag_CPU_arch),
28731 T (Tag_CPU_arch_profile),
28732 T (Tag_ARM_ISA_use),
28733 T (Tag_THUMB_ISA_use),
28734 T (Tag_FP_arch),
28735 T (Tag_VFP_arch),
28736 T (Tag_WMMX_arch),
28737 T (Tag_Advanced_SIMD_arch),
28738 T (Tag_PCS_config),
28739 T (Tag_ABI_PCS_R9_use),
28740 T (Tag_ABI_PCS_RW_data),
28741 T (Tag_ABI_PCS_RO_data),
28742 T (Tag_ABI_PCS_GOT_use),
28743 T (Tag_ABI_PCS_wchar_t),
28744 T (Tag_ABI_FP_rounding),
28745 T (Tag_ABI_FP_denormal),
28746 T (Tag_ABI_FP_exceptions),
28747 T (Tag_ABI_FP_user_exceptions),
28748 T (Tag_ABI_FP_number_model),
28749 T (Tag_ABI_align_needed),
28750 T (Tag_ABI_align8_needed),
28751 T (Tag_ABI_align_preserved),
28752 T (Tag_ABI_align8_preserved),
28753 T (Tag_ABI_enum_size),
28754 T (Tag_ABI_HardFP_use),
28755 T (Tag_ABI_VFP_args),
28756 T (Tag_ABI_WMMX_args),
28757 T (Tag_ABI_optimization_goals),
28758 T (Tag_ABI_FP_optimization_goals),
28759 T (Tag_compatibility),
28760 T (Tag_CPU_unaligned_access),
28761 T (Tag_FP_HP_extension),
28762 T (Tag_VFP_HP_extension),
28763 T (Tag_ABI_FP_16bit_format),
28764 T (Tag_MPextension_use),
28765 T (Tag_DIV_use),
28766 T (Tag_nodefaults),
28767 T (Tag_also_compatible_with),
28768 T (Tag_conformance),
28769 T (Tag_T2EE_use),
28770 T (Tag_Virtualization_use),
28771 T (Tag_DSP_extension),
28772 /* We deliberately do not include Tag_MPextension_use_legacy. */
28773 #undef T
28774 };
28775 unsigned int i;
28776
28777 if (name == NULL)
28778 return -1;
28779
28780 for (i = 0; i < ARRAY_SIZE (attribute_table); i++)
28781 if (streq (name, attribute_table[i].name))
28782 return attribute_table[i].tag;
28783
28784 return -1;
28785 }
28786
28787 /* Apply sym value for relocations only in the case that they are for
28788 local symbols in the same segment as the fixup and you have the
28789 respective architectural feature for blx and simple switches. */
28790
28791 int
28792 arm_apply_sym_value (struct fix * fixP, segT this_seg)
28793 {
28794 if (fixP->fx_addsy
28795 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
28796 /* PR 17444: If the local symbol is in a different section then a reloc
28797 will always be generated for it, so applying the symbol value now
28798 will result in a double offset being stored in the relocation. */
28799 && (S_GET_SEGMENT (fixP->fx_addsy) == this_seg)
28800 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE))
28801 {
28802 switch (fixP->fx_r_type)
28803 {
28804 case BFD_RELOC_ARM_PCREL_BLX:
28805 case BFD_RELOC_THUMB_PCREL_BRANCH23:
28806 if (ARM_IS_FUNC (fixP->fx_addsy))
28807 return 1;
28808 break;
28809
28810 case BFD_RELOC_ARM_PCREL_CALL:
28811 case BFD_RELOC_THUMB_PCREL_BLX:
28812 if (THUMB_IS_FUNC (fixP->fx_addsy))
28813 return 1;
28814 break;
28815
28816 default:
28817 break;
28818 }
28819
28820 }
28821 return 0;
28822 }
28823 #endif /* OBJ_ELF */