[PATCH, BINUTILS, AARCH64, 5/9] Add DC CVADP instruction
[binutils-gdb.git] / gas / config / tc-arm.c
1 /* tc-arm.c -- Assemble for the ARM
2 Copyright (C) 1994-2018 Free Software Foundation, Inc.
3 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
4 Modified by David Taylor (dtaylor@armltd.co.uk)
5 Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
6 Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
7 Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
8
9 This file is part of GAS, the GNU Assembler.
10
11 GAS is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 3, or (at your option)
14 any later version.
15
16 GAS is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
20
21 You should have received a copy of the GNU General Public License
22 along with GAS; see the file COPYING. If not, write to the Free
23 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
24 02110-1301, USA. */
25
26 #include "as.h"
27 #include <limits.h>
28 #include <stdarg.h>
29 #define NO_RELOC 0
30 #include "safe-ctype.h"
31 #include "subsegs.h"
32 #include "obstack.h"
33 #include "libiberty.h"
34 #include "opcode/arm.h"
35
36 #ifdef OBJ_ELF
37 #include "elf/arm.h"
38 #include "dw2gencfi.h"
39 #endif
40
41 #include "dwarf2dbg.h"
42
43 #ifdef OBJ_ELF
44 /* Must be at least the size of the largest unwind opcode (currently two). */
45 #define ARM_OPCODE_CHUNK_SIZE 8
46
47 /* This structure holds the unwinding state. */
48
49 static struct
50 {
51 symbolS * proc_start;
52 symbolS * table_entry;
53 symbolS * personality_routine;
54 int personality_index;
55 /* The segment containing the function. */
56 segT saved_seg;
57 subsegT saved_subseg;
58 /* Opcodes generated from this function. */
59 unsigned char * opcodes;
60 int opcode_count;
61 int opcode_alloc;
62 /* The number of bytes pushed to the stack. */
63 offsetT frame_size;
64 /* We don't add stack adjustment opcodes immediately so that we can merge
65 multiple adjustments. We can also omit the final adjustment
66 when using a frame pointer. */
67 offsetT pending_offset;
68 /* These two fields are set by both unwind_movsp and unwind_setfp. They
69 hold the reg+offset to use when restoring sp from a frame pointer. */
70 offsetT fp_offset;
71 int fp_reg;
72 /* Nonzero if an unwind_setfp directive has been seen. */
73 unsigned fp_used:1;
74 /* Nonzero if the last opcode restores sp from fp_reg. */
75 unsigned sp_restored:1;
76 } unwind;
77
78 /* Whether --fdpic was given. */
79 static int arm_fdpic;
80
81 #endif /* OBJ_ELF */
82
83 /* Results from operand parsing worker functions. */
84
85 typedef enum
86 {
87 PARSE_OPERAND_SUCCESS,
88 PARSE_OPERAND_FAIL,
89 PARSE_OPERAND_FAIL_NO_BACKTRACK
90 } parse_operand_result;
91
92 enum arm_float_abi
93 {
94 ARM_FLOAT_ABI_HARD,
95 ARM_FLOAT_ABI_SOFTFP,
96 ARM_FLOAT_ABI_SOFT
97 };
98
99 /* Types of processor to assemble for. */
100 #ifndef CPU_DEFAULT
101 /* The code that was here used to select a default CPU depending on compiler
102 pre-defines which were only present when doing native builds, thus
103 changing gas' default behaviour depending upon the build host.
104
105 If you have a target that requires a default CPU option then the you
106 should define CPU_DEFAULT here. */
107 #endif
108
109 #ifndef FPU_DEFAULT
110 # ifdef TE_LINUX
111 # define FPU_DEFAULT FPU_ARCH_FPA
112 # elif defined (TE_NetBSD)
113 # ifdef OBJ_ELF
114 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */
115 # else
116 /* Legacy a.out format. */
117 # define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */
118 # endif
119 # elif defined (TE_VXWORKS)
120 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */
121 # else
122 /* For backwards compatibility, default to FPA. */
123 # define FPU_DEFAULT FPU_ARCH_FPA
124 # endif
125 #endif /* ifndef FPU_DEFAULT */
126
127 #define streq(a, b) (strcmp (a, b) == 0)
128
129 /* Current set of feature bits available (CPU+FPU). Different from
130 selected_cpu + selected_fpu in case of autodetection since the CPU
131 feature bits are then all set. */
132 static arm_feature_set cpu_variant;
133 /* Feature bits used in each execution state. Used to set build attribute
134 (in particular Tag_*_ISA_use) in CPU autodetection mode. */
135 static arm_feature_set arm_arch_used;
136 static arm_feature_set thumb_arch_used;
137
138 /* Flags stored in private area of BFD structure. */
139 static int uses_apcs_26 = FALSE;
140 static int atpcs = FALSE;
141 static int support_interwork = FALSE;
142 static int uses_apcs_float = FALSE;
143 static int pic_code = FALSE;
144 static int fix_v4bx = FALSE;
145 /* Warn on using deprecated features. */
146 static int warn_on_deprecated = TRUE;
147
148 /* Understand CodeComposer Studio assembly syntax. */
149 bfd_boolean codecomposer_syntax = FALSE;
150
151 /* Variables that we set while parsing command-line options. Once all
152 options have been read we re-process these values to set the real
153 assembly flags. */
154
155 /* CPU and FPU feature bits set for legacy CPU and FPU options (eg. -marm1
156 instead of -mcpu=arm1). */
157 static const arm_feature_set *legacy_cpu = NULL;
158 static const arm_feature_set *legacy_fpu = NULL;
159
160 /* CPU, extension and FPU feature bits selected by -mcpu. */
161 static const arm_feature_set *mcpu_cpu_opt = NULL;
162 static arm_feature_set *mcpu_ext_opt = NULL;
163 static const arm_feature_set *mcpu_fpu_opt = NULL;
164
165 /* CPU, extension and FPU feature bits selected by -march. */
166 static const arm_feature_set *march_cpu_opt = NULL;
167 static arm_feature_set *march_ext_opt = NULL;
168 static const arm_feature_set *march_fpu_opt = NULL;
169
170 /* Feature bits selected by -mfpu. */
171 static const arm_feature_set *mfpu_opt = NULL;
172
173 /* Constants for known architecture features. */
174 static const arm_feature_set fpu_default = FPU_DEFAULT;
175 static const arm_feature_set fpu_arch_vfp_v1 ATTRIBUTE_UNUSED = FPU_ARCH_VFP_V1;
176 static const arm_feature_set fpu_arch_vfp_v2 = FPU_ARCH_VFP_V2;
177 static const arm_feature_set fpu_arch_vfp_v3 ATTRIBUTE_UNUSED = FPU_ARCH_VFP_V3;
178 static const arm_feature_set fpu_arch_neon_v1 ATTRIBUTE_UNUSED = FPU_ARCH_NEON_V1;
179 static const arm_feature_set fpu_arch_fpa = FPU_ARCH_FPA;
180 static const arm_feature_set fpu_any_hard = FPU_ANY_HARD;
181 #ifdef OBJ_ELF
182 static const arm_feature_set fpu_arch_maverick = FPU_ARCH_MAVERICK;
183 #endif
184 static const arm_feature_set fpu_endian_pure = FPU_ARCH_ENDIAN_PURE;
185
186 #ifdef CPU_DEFAULT
187 static const arm_feature_set cpu_default = CPU_DEFAULT;
188 #endif
189
190 static const arm_feature_set arm_ext_v1 = ARM_FEATURE_CORE_LOW (ARM_EXT_V1);
191 static const arm_feature_set arm_ext_v2 = ARM_FEATURE_CORE_LOW (ARM_EXT_V2);
192 static const arm_feature_set arm_ext_v2s = ARM_FEATURE_CORE_LOW (ARM_EXT_V2S);
193 static const arm_feature_set arm_ext_v3 = ARM_FEATURE_CORE_LOW (ARM_EXT_V3);
194 static const arm_feature_set arm_ext_v3m = ARM_FEATURE_CORE_LOW (ARM_EXT_V3M);
195 static const arm_feature_set arm_ext_v4 = ARM_FEATURE_CORE_LOW (ARM_EXT_V4);
196 static const arm_feature_set arm_ext_v4t = ARM_FEATURE_CORE_LOW (ARM_EXT_V4T);
197 static const arm_feature_set arm_ext_v5 = ARM_FEATURE_CORE_LOW (ARM_EXT_V5);
198 static const arm_feature_set arm_ext_v4t_5 =
199 ARM_FEATURE_CORE_LOW (ARM_EXT_V4T | ARM_EXT_V5);
200 static const arm_feature_set arm_ext_v5t = ARM_FEATURE_CORE_LOW (ARM_EXT_V5T);
201 static const arm_feature_set arm_ext_v5e = ARM_FEATURE_CORE_LOW (ARM_EXT_V5E);
202 static const arm_feature_set arm_ext_v5exp = ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP);
203 static const arm_feature_set arm_ext_v5j = ARM_FEATURE_CORE_LOW (ARM_EXT_V5J);
204 static const arm_feature_set arm_ext_v6 = ARM_FEATURE_CORE_LOW (ARM_EXT_V6);
205 static const arm_feature_set arm_ext_v6k = ARM_FEATURE_CORE_LOW (ARM_EXT_V6K);
206 static const arm_feature_set arm_ext_v6t2 = ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2);
207 static const arm_feature_set arm_ext_v6_notm =
208 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_NOTM);
209 static const arm_feature_set arm_ext_v6_dsp =
210 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_DSP);
211 static const arm_feature_set arm_ext_barrier =
212 ARM_FEATURE_CORE_LOW (ARM_EXT_BARRIER);
213 static const arm_feature_set arm_ext_msr =
214 ARM_FEATURE_CORE_LOW (ARM_EXT_THUMB_MSR);
215 static const arm_feature_set arm_ext_div = ARM_FEATURE_CORE_LOW (ARM_EXT_DIV);
216 static const arm_feature_set arm_ext_v7 = ARM_FEATURE_CORE_LOW (ARM_EXT_V7);
217 static const arm_feature_set arm_ext_v7a = ARM_FEATURE_CORE_LOW (ARM_EXT_V7A);
218 static const arm_feature_set arm_ext_v7r = ARM_FEATURE_CORE_LOW (ARM_EXT_V7R);
219 #ifdef OBJ_ELF
220 static const arm_feature_set ATTRIBUTE_UNUSED arm_ext_v7m = ARM_FEATURE_CORE_LOW (ARM_EXT_V7M);
221 #endif
222 static const arm_feature_set arm_ext_v8 = ARM_FEATURE_CORE_LOW (ARM_EXT_V8);
223 static const arm_feature_set arm_ext_m =
224 ARM_FEATURE_CORE (ARM_EXT_V6M | ARM_EXT_V7M,
225 ARM_EXT2_V8M | ARM_EXT2_V8M_MAIN);
226 static const arm_feature_set arm_ext_mp = ARM_FEATURE_CORE_LOW (ARM_EXT_MP);
227 static const arm_feature_set arm_ext_sec = ARM_FEATURE_CORE_LOW (ARM_EXT_SEC);
228 static const arm_feature_set arm_ext_os = ARM_FEATURE_CORE_LOW (ARM_EXT_OS);
229 static const arm_feature_set arm_ext_adiv = ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV);
230 static const arm_feature_set arm_ext_virt = ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT);
231 static const arm_feature_set arm_ext_pan = ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN);
232 static const arm_feature_set arm_ext_v8m = ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M);
233 static const arm_feature_set arm_ext_v8m_main =
234 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M_MAIN);
235 /* Instructions in ARMv8-M only found in M profile architectures. */
236 static const arm_feature_set arm_ext_v8m_m_only =
237 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M | ARM_EXT2_V8M_MAIN);
238 static const arm_feature_set arm_ext_v6t2_v8m =
239 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V6T2_V8M);
240 /* Instructions shared between ARMv8-A and ARMv8-M. */
241 static const arm_feature_set arm_ext_atomics =
242 ARM_FEATURE_CORE_HIGH (ARM_EXT2_ATOMICS);
243 #ifdef OBJ_ELF
244 /* DSP instructions Tag_DSP_extension refers to. */
245 static const arm_feature_set arm_ext_dsp =
246 ARM_FEATURE_CORE_LOW (ARM_EXT_V5E | ARM_EXT_V5ExP | ARM_EXT_V6_DSP);
247 #endif
248 static const arm_feature_set arm_ext_ras =
249 ARM_FEATURE_CORE_HIGH (ARM_EXT2_RAS);
250 /* FP16 instructions. */
251 static const arm_feature_set arm_ext_fp16 =
252 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST);
253 static const arm_feature_set arm_ext_fp16_fml =
254 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_FML);
255 static const arm_feature_set arm_ext_v8_2 =
256 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_2A);
257 static const arm_feature_set arm_ext_v8_3 =
258 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_3A);
259 static const arm_feature_set arm_ext_sb =
260 ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB);
261 static const arm_feature_set arm_ext_predres =
262 ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES);
263
264 static const arm_feature_set arm_arch_any = ARM_ANY;
265 #ifdef OBJ_ELF
266 static const arm_feature_set fpu_any = FPU_ANY;
267 #endif
268 static const arm_feature_set arm_arch_full ATTRIBUTE_UNUSED = ARM_FEATURE (-1, -1, -1);
269 static const arm_feature_set arm_arch_t2 = ARM_ARCH_THUMB2;
270 static const arm_feature_set arm_arch_none = ARM_ARCH_NONE;
271
272 static const arm_feature_set arm_cext_iwmmxt2 =
273 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2);
274 static const arm_feature_set arm_cext_iwmmxt =
275 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT);
276 static const arm_feature_set arm_cext_xscale =
277 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE);
278 static const arm_feature_set arm_cext_maverick =
279 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK);
280 static const arm_feature_set fpu_fpa_ext_v1 =
281 ARM_FEATURE_COPROC (FPU_FPA_EXT_V1);
282 static const arm_feature_set fpu_fpa_ext_v2 =
283 ARM_FEATURE_COPROC (FPU_FPA_EXT_V2);
284 static const arm_feature_set fpu_vfp_ext_v1xd =
285 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1xD);
286 static const arm_feature_set fpu_vfp_ext_v1 =
287 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1);
288 static const arm_feature_set fpu_vfp_ext_v2 =
289 ARM_FEATURE_COPROC (FPU_VFP_EXT_V2);
290 static const arm_feature_set fpu_vfp_ext_v3xd =
291 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3xD);
292 static const arm_feature_set fpu_vfp_ext_v3 =
293 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3);
294 static const arm_feature_set fpu_vfp_ext_d32 =
295 ARM_FEATURE_COPROC (FPU_VFP_EXT_D32);
296 static const arm_feature_set fpu_neon_ext_v1 =
297 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1);
298 static const arm_feature_set fpu_vfp_v3_or_neon_ext =
299 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1 | FPU_VFP_EXT_V3);
300 #ifdef OBJ_ELF
301 static const arm_feature_set fpu_vfp_fp16 =
302 ARM_FEATURE_COPROC (FPU_VFP_EXT_FP16);
303 static const arm_feature_set fpu_neon_ext_fma =
304 ARM_FEATURE_COPROC (FPU_NEON_EXT_FMA);
305 #endif
306 static const arm_feature_set fpu_vfp_ext_fma =
307 ARM_FEATURE_COPROC (FPU_VFP_EXT_FMA);
308 static const arm_feature_set fpu_vfp_ext_armv8 =
309 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8);
310 static const arm_feature_set fpu_vfp_ext_armv8xd =
311 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8xD);
312 static const arm_feature_set fpu_neon_ext_armv8 =
313 ARM_FEATURE_COPROC (FPU_NEON_EXT_ARMV8);
314 static const arm_feature_set fpu_crypto_ext_armv8 =
315 ARM_FEATURE_COPROC (FPU_CRYPTO_EXT_ARMV8);
316 static const arm_feature_set crc_ext_armv8 =
317 ARM_FEATURE_COPROC (CRC_EXT_ARMV8);
318 static const arm_feature_set fpu_neon_ext_v8_1 =
319 ARM_FEATURE_COPROC (FPU_NEON_EXT_RDMA);
320 static const arm_feature_set fpu_neon_ext_dotprod =
321 ARM_FEATURE_COPROC (FPU_NEON_EXT_DOTPROD);
322
323 static int mfloat_abi_opt = -1;
324 /* Architecture feature bits selected by the last -mcpu/-march or .cpu/.arch
325 directive. */
326 static arm_feature_set selected_arch = ARM_ARCH_NONE;
327 /* Extension feature bits selected by the last -mcpu/-march or .arch_extension
328 directive. */
329 static arm_feature_set selected_ext = ARM_ARCH_NONE;
330 /* Feature bits selected by the last -mcpu/-march or by the combination of the
331 last .cpu/.arch directive .arch_extension directives since that
332 directive. */
333 static arm_feature_set selected_cpu = ARM_ARCH_NONE;
334 /* FPU feature bits selected by the last -mfpu or .fpu directive. */
335 static arm_feature_set selected_fpu = FPU_NONE;
336 /* Feature bits selected by the last .object_arch directive. */
337 static arm_feature_set selected_object_arch = ARM_ARCH_NONE;
338 /* Must be long enough to hold any of the names in arm_cpus. */
339 static char selected_cpu_name[20];
340
341 extern FLONUM_TYPE generic_floating_point_number;
342
343 /* Return if no cpu was selected on command-line. */
344 static bfd_boolean
345 no_cpu_selected (void)
346 {
347 return ARM_FEATURE_EQUAL (selected_cpu, arm_arch_none);
348 }
349
350 #ifdef OBJ_ELF
351 # ifdef EABI_DEFAULT
352 static int meabi_flags = EABI_DEFAULT;
353 # else
354 static int meabi_flags = EF_ARM_EABI_UNKNOWN;
355 # endif
356
357 static int attributes_set_explicitly[NUM_KNOWN_OBJ_ATTRIBUTES];
358
359 bfd_boolean
360 arm_is_eabi (void)
361 {
362 return (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4);
363 }
364 #endif
365
366 #ifdef OBJ_ELF
367 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
368 symbolS * GOT_symbol;
369 #endif
370
371 /* 0: assemble for ARM,
372 1: assemble for Thumb,
373 2: assemble for Thumb even though target CPU does not support thumb
374 instructions. */
375 static int thumb_mode = 0;
376 /* A value distinct from the possible values for thumb_mode that we
377 can use to record whether thumb_mode has been copied into the
378 tc_frag_data field of a frag. */
379 #define MODE_RECORDED (1 << 4)
380
381 /* Specifies the intrinsic IT insn behavior mode. */
382 enum implicit_it_mode
383 {
384 IMPLICIT_IT_MODE_NEVER = 0x00,
385 IMPLICIT_IT_MODE_ARM = 0x01,
386 IMPLICIT_IT_MODE_THUMB = 0x02,
387 IMPLICIT_IT_MODE_ALWAYS = (IMPLICIT_IT_MODE_ARM | IMPLICIT_IT_MODE_THUMB)
388 };
389 static int implicit_it_mode = IMPLICIT_IT_MODE_ARM;
390
391 /* If unified_syntax is true, we are processing the new unified
392 ARM/Thumb syntax. Important differences from the old ARM mode:
393
394 - Immediate operands do not require a # prefix.
395 - Conditional affixes always appear at the end of the
396 instruction. (For backward compatibility, those instructions
397 that formerly had them in the middle, continue to accept them
398 there.)
399 - The IT instruction may appear, and if it does is validated
400 against subsequent conditional affixes. It does not generate
401 machine code.
402
403 Important differences from the old Thumb mode:
404
405 - Immediate operands do not require a # prefix.
406 - Most of the V6T2 instructions are only available in unified mode.
407 - The .N and .W suffixes are recognized and honored (it is an error
408 if they cannot be honored).
409 - All instructions set the flags if and only if they have an 's' affix.
410 - Conditional affixes may be used. They are validated against
411 preceding IT instructions. Unlike ARM mode, you cannot use a
412 conditional affix except in the scope of an IT instruction. */
413
414 static bfd_boolean unified_syntax = FALSE;
415
416 /* An immediate operand can start with #, and ld*, st*, pld operands
417 can contain [ and ]. We need to tell APP not to elide whitespace
418 before a [, which can appear as the first operand for pld.
419 Likewise, a { can appear as the first operand for push, pop, vld*, etc. */
420 const char arm_symbol_chars[] = "#[]{}";
421
422 enum neon_el_type
423 {
424 NT_invtype,
425 NT_untyped,
426 NT_integer,
427 NT_float,
428 NT_poly,
429 NT_signed,
430 NT_unsigned
431 };
432
433 struct neon_type_el
434 {
435 enum neon_el_type type;
436 unsigned size;
437 };
438
439 #define NEON_MAX_TYPE_ELS 4
440
441 struct neon_type
442 {
443 struct neon_type_el el[NEON_MAX_TYPE_ELS];
444 unsigned elems;
445 };
446
447 enum it_instruction_type
448 {
449 OUTSIDE_IT_INSN,
450 INSIDE_IT_INSN,
451 INSIDE_IT_LAST_INSN,
452 IF_INSIDE_IT_LAST_INSN, /* Either outside or inside;
453 if inside, should be the last one. */
454 NEUTRAL_IT_INSN, /* This could be either inside or outside,
455 i.e. BKPT and NOP. */
456 IT_INSN /* The IT insn has been parsed. */
457 };
458
459 /* The maximum number of operands we need. */
460 #define ARM_IT_MAX_OPERANDS 6
461
462 struct arm_it
463 {
464 const char * error;
465 unsigned long instruction;
466 int size;
467 int size_req;
468 int cond;
469 /* "uncond_value" is set to the value in place of the conditional field in
470 unconditional versions of the instruction, or -1 if nothing is
471 appropriate. */
472 int uncond_value;
473 struct neon_type vectype;
474 /* This does not indicate an actual NEON instruction, only that
475 the mnemonic accepts neon-style type suffixes. */
476 int is_neon;
477 /* Set to the opcode if the instruction needs relaxation.
478 Zero if the instruction is not relaxed. */
479 unsigned long relax;
480 struct
481 {
482 bfd_reloc_code_real_type type;
483 expressionS exp;
484 int pc_rel;
485 } reloc;
486
487 enum it_instruction_type it_insn_type;
488
489 struct
490 {
491 unsigned reg;
492 signed int imm;
493 struct neon_type_el vectype;
494 unsigned present : 1; /* Operand present. */
495 unsigned isreg : 1; /* Operand was a register. */
496 unsigned immisreg : 1; /* .imm field is a second register. */
497 unsigned isscalar : 1; /* Operand is a (Neon) scalar. */
498 unsigned immisalign : 1; /* Immediate is an alignment specifier. */
499 unsigned immisfloat : 1; /* Immediate was parsed as a float. */
500 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
501 instructions. This allows us to disambiguate ARM <-> vector insns. */
502 unsigned regisimm : 1; /* 64-bit immediate, reg forms high 32 bits. */
503 unsigned isvec : 1; /* Is a single, double or quad VFP/Neon reg. */
504 unsigned isquad : 1; /* Operand is Neon quad-precision register. */
505 unsigned issingle : 1; /* Operand is VFP single-precision register. */
506 unsigned hasreloc : 1; /* Operand has relocation suffix. */
507 unsigned writeback : 1; /* Operand has trailing ! */
508 unsigned preind : 1; /* Preindexed address. */
509 unsigned postind : 1; /* Postindexed address. */
510 unsigned negative : 1; /* Index register was negated. */
511 unsigned shifted : 1; /* Shift applied to operation. */
512 unsigned shift_kind : 3; /* Shift operation (enum shift_kind). */
513 } operands[ARM_IT_MAX_OPERANDS];
514 };
515
516 static struct arm_it inst;
517
518 #define NUM_FLOAT_VALS 8
519
520 const char * fp_const[] =
521 {
522 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
523 };
524
525 /* Number of littlenums required to hold an extended precision number. */
526 #define MAX_LITTLENUMS 6
527
528 LITTLENUM_TYPE fp_values[NUM_FLOAT_VALS][MAX_LITTLENUMS];
529
530 #define FAIL (-1)
531 #define SUCCESS (0)
532
533 #define SUFF_S 1
534 #define SUFF_D 2
535 #define SUFF_E 3
536 #define SUFF_P 4
537
538 #define CP_T_X 0x00008000
539 #define CP_T_Y 0x00400000
540
541 #define CONDS_BIT 0x00100000
542 #define LOAD_BIT 0x00100000
543
544 #define DOUBLE_LOAD_FLAG 0x00000001
545
546 struct asm_cond
547 {
548 const char * template_name;
549 unsigned long value;
550 };
551
552 #define COND_ALWAYS 0xE
553
554 struct asm_psr
555 {
556 const char * template_name;
557 unsigned long field;
558 };
559
560 struct asm_barrier_opt
561 {
562 const char * template_name;
563 unsigned long value;
564 const arm_feature_set arch;
565 };
566
567 /* The bit that distinguishes CPSR and SPSR. */
568 #define SPSR_BIT (1 << 22)
569
570 /* The individual PSR flag bits. */
571 #define PSR_c (1 << 16)
572 #define PSR_x (1 << 17)
573 #define PSR_s (1 << 18)
574 #define PSR_f (1 << 19)
575
576 struct reloc_entry
577 {
578 const char * name;
579 bfd_reloc_code_real_type reloc;
580 };
581
582 enum vfp_reg_pos
583 {
584 VFP_REG_Sd, VFP_REG_Sm, VFP_REG_Sn,
585 VFP_REG_Dd, VFP_REG_Dm, VFP_REG_Dn
586 };
587
588 enum vfp_ldstm_type
589 {
590 VFP_LDSTMIA, VFP_LDSTMDB, VFP_LDSTMIAX, VFP_LDSTMDBX
591 };
592
593 /* Bits for DEFINED field in neon_typed_alias. */
594 #define NTA_HASTYPE 1
595 #define NTA_HASINDEX 2
596
597 struct neon_typed_alias
598 {
599 unsigned char defined;
600 unsigned char index;
601 struct neon_type_el eltype;
602 };
603
604 /* ARM register categories. This includes coprocessor numbers and various
605 architecture extensions' registers. Each entry should have an error message
606 in reg_expected_msgs below. */
607 enum arm_reg_type
608 {
609 REG_TYPE_RN,
610 REG_TYPE_CP,
611 REG_TYPE_CN,
612 REG_TYPE_FN,
613 REG_TYPE_VFS,
614 REG_TYPE_VFD,
615 REG_TYPE_NQ,
616 REG_TYPE_VFSD,
617 REG_TYPE_NDQ,
618 REG_TYPE_NSD,
619 REG_TYPE_NSDQ,
620 REG_TYPE_VFC,
621 REG_TYPE_MVF,
622 REG_TYPE_MVD,
623 REG_TYPE_MVFX,
624 REG_TYPE_MVDX,
625 REG_TYPE_MVAX,
626 REG_TYPE_DSPSC,
627 REG_TYPE_MMXWR,
628 REG_TYPE_MMXWC,
629 REG_TYPE_MMXWCG,
630 REG_TYPE_XSCALE,
631 REG_TYPE_RNB
632 };
633
634 /* Structure for a hash table entry for a register.
635 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
636 information which states whether a vector type or index is specified (for a
637 register alias created with .dn or .qn). Otherwise NEON should be NULL. */
638 struct reg_entry
639 {
640 const char * name;
641 unsigned int number;
642 unsigned char type;
643 unsigned char builtin;
644 struct neon_typed_alias * neon;
645 };
646
647 /* Diagnostics used when we don't get a register of the expected type. */
648 const char * const reg_expected_msgs[] =
649 {
650 [REG_TYPE_RN] = N_("ARM register expected"),
651 [REG_TYPE_CP] = N_("bad or missing co-processor number"),
652 [REG_TYPE_CN] = N_("co-processor register expected"),
653 [REG_TYPE_FN] = N_("FPA register expected"),
654 [REG_TYPE_VFS] = N_("VFP single precision register expected"),
655 [REG_TYPE_VFD] = N_("VFP/Neon double precision register expected"),
656 [REG_TYPE_NQ] = N_("Neon quad precision register expected"),
657 [REG_TYPE_VFSD] = N_("VFP single or double precision register expected"),
658 [REG_TYPE_NDQ] = N_("Neon double or quad precision register expected"),
659 [REG_TYPE_NSD] = N_("Neon single or double precision register expected"),
660 [REG_TYPE_NSDQ] = N_("VFP single, double or Neon quad precision register"
661 " expected"),
662 [REG_TYPE_VFC] = N_("VFP system register expected"),
663 [REG_TYPE_MVF] = N_("Maverick MVF register expected"),
664 [REG_TYPE_MVD] = N_("Maverick MVD register expected"),
665 [REG_TYPE_MVFX] = N_("Maverick MVFX register expected"),
666 [REG_TYPE_MVDX] = N_("Maverick MVDX register expected"),
667 [REG_TYPE_MVAX] = N_("Maverick MVAX register expected"),
668 [REG_TYPE_DSPSC] = N_("Maverick DSPSC register expected"),
669 [REG_TYPE_MMXWR] = N_("iWMMXt data register expected"),
670 [REG_TYPE_MMXWC] = N_("iWMMXt control register expected"),
671 [REG_TYPE_MMXWCG] = N_("iWMMXt scalar register expected"),
672 [REG_TYPE_XSCALE] = N_("XScale accumulator register expected"),
673 [REG_TYPE_RNB] = N_("")
674 };
675
676 /* Some well known registers that we refer to directly elsewhere. */
677 #define REG_R12 12
678 #define REG_SP 13
679 #define REG_LR 14
680 #define REG_PC 15
681
682 /* ARM instructions take 4bytes in the object file, Thumb instructions
683 take 2: */
684 #define INSN_SIZE 4
685
686 struct asm_opcode
687 {
688 /* Basic string to match. */
689 const char * template_name;
690
691 /* Parameters to instruction. */
692 unsigned int operands[8];
693
694 /* Conditional tag - see opcode_lookup. */
695 unsigned int tag : 4;
696
697 /* Basic instruction code. */
698 unsigned int avalue : 28;
699
700 /* Thumb-format instruction code. */
701 unsigned int tvalue;
702
703 /* Which architecture variant provides this instruction. */
704 const arm_feature_set * avariant;
705 const arm_feature_set * tvariant;
706
707 /* Function to call to encode instruction in ARM format. */
708 void (* aencode) (void);
709
710 /* Function to call to encode instruction in Thumb format. */
711 void (* tencode) (void);
712 };
713
714 /* Defines for various bits that we will want to toggle. */
715 #define INST_IMMEDIATE 0x02000000
716 #define OFFSET_REG 0x02000000
717 #define HWOFFSET_IMM 0x00400000
718 #define SHIFT_BY_REG 0x00000010
719 #define PRE_INDEX 0x01000000
720 #define INDEX_UP 0x00800000
721 #define WRITE_BACK 0x00200000
722 #define LDM_TYPE_2_OR_3 0x00400000
723 #define CPSI_MMOD 0x00020000
724
725 #define LITERAL_MASK 0xf000f000
726 #define OPCODE_MASK 0xfe1fffff
727 #define V4_STR_BIT 0x00000020
728 #define VLDR_VMOV_SAME 0x0040f000
729
730 #define T2_SUBS_PC_LR 0xf3de8f00
731
732 #define DATA_OP_SHIFT 21
733 #define SBIT_SHIFT 20
734
735 #define T2_OPCODE_MASK 0xfe1fffff
736 #define T2_DATA_OP_SHIFT 21
737 #define T2_SBIT_SHIFT 20
738
739 #define A_COND_MASK 0xf0000000
740 #define A_PUSH_POP_OP_MASK 0x0fff0000
741
742 /* Opcodes for pushing/poping registers to/from the stack. */
743 #define A1_OPCODE_PUSH 0x092d0000
744 #define A2_OPCODE_PUSH 0x052d0004
745 #define A2_OPCODE_POP 0x049d0004
746
747 /* Codes to distinguish the arithmetic instructions. */
748 #define OPCODE_AND 0
749 #define OPCODE_EOR 1
750 #define OPCODE_SUB 2
751 #define OPCODE_RSB 3
752 #define OPCODE_ADD 4
753 #define OPCODE_ADC 5
754 #define OPCODE_SBC 6
755 #define OPCODE_RSC 7
756 #define OPCODE_TST 8
757 #define OPCODE_TEQ 9
758 #define OPCODE_CMP 10
759 #define OPCODE_CMN 11
760 #define OPCODE_ORR 12
761 #define OPCODE_MOV 13
762 #define OPCODE_BIC 14
763 #define OPCODE_MVN 15
764
765 #define T2_OPCODE_AND 0
766 #define T2_OPCODE_BIC 1
767 #define T2_OPCODE_ORR 2
768 #define T2_OPCODE_ORN 3
769 #define T2_OPCODE_EOR 4
770 #define T2_OPCODE_ADD 8
771 #define T2_OPCODE_ADC 10
772 #define T2_OPCODE_SBC 11
773 #define T2_OPCODE_SUB 13
774 #define T2_OPCODE_RSB 14
775
776 #define T_OPCODE_MUL 0x4340
777 #define T_OPCODE_TST 0x4200
778 #define T_OPCODE_CMN 0x42c0
779 #define T_OPCODE_NEG 0x4240
780 #define T_OPCODE_MVN 0x43c0
781
782 #define T_OPCODE_ADD_R3 0x1800
783 #define T_OPCODE_SUB_R3 0x1a00
784 #define T_OPCODE_ADD_HI 0x4400
785 #define T_OPCODE_ADD_ST 0xb000
786 #define T_OPCODE_SUB_ST 0xb080
787 #define T_OPCODE_ADD_SP 0xa800
788 #define T_OPCODE_ADD_PC 0xa000
789 #define T_OPCODE_ADD_I8 0x3000
790 #define T_OPCODE_SUB_I8 0x3800
791 #define T_OPCODE_ADD_I3 0x1c00
792 #define T_OPCODE_SUB_I3 0x1e00
793
794 #define T_OPCODE_ASR_R 0x4100
795 #define T_OPCODE_LSL_R 0x4080
796 #define T_OPCODE_LSR_R 0x40c0
797 #define T_OPCODE_ROR_R 0x41c0
798 #define T_OPCODE_ASR_I 0x1000
799 #define T_OPCODE_LSL_I 0x0000
800 #define T_OPCODE_LSR_I 0x0800
801
802 #define T_OPCODE_MOV_I8 0x2000
803 #define T_OPCODE_CMP_I8 0x2800
804 #define T_OPCODE_CMP_LR 0x4280
805 #define T_OPCODE_MOV_HR 0x4600
806 #define T_OPCODE_CMP_HR 0x4500
807
808 #define T_OPCODE_LDR_PC 0x4800
809 #define T_OPCODE_LDR_SP 0x9800
810 #define T_OPCODE_STR_SP 0x9000
811 #define T_OPCODE_LDR_IW 0x6800
812 #define T_OPCODE_STR_IW 0x6000
813 #define T_OPCODE_LDR_IH 0x8800
814 #define T_OPCODE_STR_IH 0x8000
815 #define T_OPCODE_LDR_IB 0x7800
816 #define T_OPCODE_STR_IB 0x7000
817 #define T_OPCODE_LDR_RW 0x5800
818 #define T_OPCODE_STR_RW 0x5000
819 #define T_OPCODE_LDR_RH 0x5a00
820 #define T_OPCODE_STR_RH 0x5200
821 #define T_OPCODE_LDR_RB 0x5c00
822 #define T_OPCODE_STR_RB 0x5400
823
824 #define T_OPCODE_PUSH 0xb400
825 #define T_OPCODE_POP 0xbc00
826
827 #define T_OPCODE_BRANCH 0xe000
828
829 #define THUMB_SIZE 2 /* Size of thumb instruction. */
830 #define THUMB_PP_PC_LR 0x0100
831 #define THUMB_LOAD_BIT 0x0800
832 #define THUMB2_LOAD_BIT 0x00100000
833
834 #define BAD_ARGS _("bad arguments to instruction")
835 #define BAD_SP _("r13 not allowed here")
836 #define BAD_PC _("r15 not allowed here")
837 #define BAD_COND _("instruction cannot be conditional")
838 #define BAD_OVERLAP _("registers may not be the same")
839 #define BAD_HIREG _("lo register required")
840 #define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
841 #define BAD_ADDR_MODE _("instruction does not accept this addressing mode");
842 #define BAD_BRANCH _("branch must be last instruction in IT block")
843 #define BAD_NOT_IT _("instruction not allowed in IT block")
844 #define BAD_FPU _("selected FPU does not support instruction")
845 #define BAD_OUT_IT _("thumb conditional instruction should be in IT block")
846 #define BAD_IT_COND _("incorrect condition in IT block")
847 #define BAD_IT_IT _("IT falling in the range of a previous IT block")
848 #define MISSING_FNSTART _("missing .fnstart before unwinding directive")
849 #define BAD_PC_ADDRESSING \
850 _("cannot use register index with PC-relative addressing")
851 #define BAD_PC_WRITEBACK \
852 _("cannot use writeback with PC-relative addressing")
853 #define BAD_RANGE _("branch out of range")
854 #define BAD_FP16 _("selected processor does not support fp16 instruction")
855 #define UNPRED_REG(R) _("using " R " results in unpredictable behaviour")
856 #define THUMB1_RELOC_ONLY _("relocation valid in thumb1 code only")
857
858 static struct hash_control * arm_ops_hsh;
859 static struct hash_control * arm_cond_hsh;
860 static struct hash_control * arm_shift_hsh;
861 static struct hash_control * arm_psr_hsh;
862 static struct hash_control * arm_v7m_psr_hsh;
863 static struct hash_control * arm_reg_hsh;
864 static struct hash_control * arm_reloc_hsh;
865 static struct hash_control * arm_barrier_opt_hsh;
866
867 /* Stuff needed to resolve the label ambiguity
868 As:
869 ...
870 label: <insn>
871 may differ from:
872 ...
873 label:
874 <insn> */
875
876 symbolS * last_label_seen;
877 static int label_is_thumb_function_name = FALSE;
878
879 /* Literal pool structure. Held on a per-section
880 and per-sub-section basis. */
881
882 #define MAX_LITERAL_POOL_SIZE 1024
883 typedef struct literal_pool
884 {
885 expressionS literals [MAX_LITERAL_POOL_SIZE];
886 unsigned int next_free_entry;
887 unsigned int id;
888 symbolS * symbol;
889 segT section;
890 subsegT sub_section;
891 #ifdef OBJ_ELF
892 struct dwarf2_line_info locs [MAX_LITERAL_POOL_SIZE];
893 #endif
894 struct literal_pool * next;
895 unsigned int alignment;
896 } literal_pool;
897
898 /* Pointer to a linked list of literal pools. */
899 literal_pool * list_of_pools = NULL;
900
901 typedef enum asmfunc_states
902 {
903 OUTSIDE_ASMFUNC,
904 WAITING_ASMFUNC_NAME,
905 WAITING_ENDASMFUNC
906 } asmfunc_states;
907
908 static asmfunc_states asmfunc_state = OUTSIDE_ASMFUNC;
909
910 #ifdef OBJ_ELF
911 # define now_it seg_info (now_seg)->tc_segment_info_data.current_it
912 #else
913 static struct current_it now_it;
914 #endif
915
916 static inline int
917 now_it_compatible (int cond)
918 {
919 return (cond & ~1) == (now_it.cc & ~1);
920 }
921
922 static inline int
923 conditional_insn (void)
924 {
925 return inst.cond != COND_ALWAYS;
926 }
927
928 static int in_it_block (void);
929
930 static int handle_it_state (void);
931
932 static void force_automatic_it_block_close (void);
933
934 static void it_fsm_post_encode (void);
935
936 #define set_it_insn_type(type) \
937 do \
938 { \
939 inst.it_insn_type = type; \
940 if (handle_it_state () == FAIL) \
941 return; \
942 } \
943 while (0)
944
945 #define set_it_insn_type_nonvoid(type, failret) \
946 do \
947 { \
948 inst.it_insn_type = type; \
949 if (handle_it_state () == FAIL) \
950 return failret; \
951 } \
952 while(0)
953
954 #define set_it_insn_type_last() \
955 do \
956 { \
957 if (inst.cond == COND_ALWAYS) \
958 set_it_insn_type (IF_INSIDE_IT_LAST_INSN); \
959 else \
960 set_it_insn_type (INSIDE_IT_LAST_INSN); \
961 } \
962 while (0)
963
964 /* Pure syntax. */
965
966 /* This array holds the chars that always start a comment. If the
967 pre-processor is disabled, these aren't very useful. */
968 char arm_comment_chars[] = "@";
969
970 /* This array holds the chars that only start a comment at the beginning of
971 a line. If the line seems to have the form '# 123 filename'
972 .line and .file directives will appear in the pre-processed output. */
973 /* Note that input_file.c hand checks for '#' at the beginning of the
974 first line of the input file. This is because the compiler outputs
975 #NO_APP at the beginning of its output. */
976 /* Also note that comments like this one will always work. */
977 const char line_comment_chars[] = "#";
978
979 char arm_line_separator_chars[] = ";";
980
981 /* Chars that can be used to separate mant
982 from exp in floating point numbers. */
983 const char EXP_CHARS[] = "eE";
984
985 /* Chars that mean this number is a floating point constant. */
986 /* As in 0f12.456 */
987 /* or 0d1.2345e12 */
988
989 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
990
991 /* Prefix characters that indicate the start of an immediate
992 value. */
993 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
994
995 /* Separator character handling. */
996
997 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
998
999 static inline int
1000 skip_past_char (char ** str, char c)
1001 {
1002 /* PR gas/14987: Allow for whitespace before the expected character. */
1003 skip_whitespace (*str);
1004
1005 if (**str == c)
1006 {
1007 (*str)++;
1008 return SUCCESS;
1009 }
1010 else
1011 return FAIL;
1012 }
1013
1014 #define skip_past_comma(str) skip_past_char (str, ',')
1015
1016 /* Arithmetic expressions (possibly involving symbols). */
1017
1018 /* Return TRUE if anything in the expression is a bignum. */
1019
1020 static bfd_boolean
1021 walk_no_bignums (symbolS * sp)
1022 {
1023 if (symbol_get_value_expression (sp)->X_op == O_big)
1024 return TRUE;
1025
1026 if (symbol_get_value_expression (sp)->X_add_symbol)
1027 {
1028 return (walk_no_bignums (symbol_get_value_expression (sp)->X_add_symbol)
1029 || (symbol_get_value_expression (sp)->X_op_symbol
1030 && walk_no_bignums (symbol_get_value_expression (sp)->X_op_symbol)));
1031 }
1032
1033 return FALSE;
1034 }
1035
1036 static bfd_boolean in_my_get_expression = FALSE;
1037
1038 /* Third argument to my_get_expression. */
1039 #define GE_NO_PREFIX 0
1040 #define GE_IMM_PREFIX 1
1041 #define GE_OPT_PREFIX 2
1042 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
1043 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
1044 #define GE_OPT_PREFIX_BIG 3
1045
1046 static int
1047 my_get_expression (expressionS * ep, char ** str, int prefix_mode)
1048 {
1049 char * save_in;
1050
1051 /* In unified syntax, all prefixes are optional. */
1052 if (unified_syntax)
1053 prefix_mode = (prefix_mode == GE_OPT_PREFIX_BIG) ? prefix_mode
1054 : GE_OPT_PREFIX;
1055
1056 switch (prefix_mode)
1057 {
1058 case GE_NO_PREFIX: break;
1059 case GE_IMM_PREFIX:
1060 if (!is_immediate_prefix (**str))
1061 {
1062 inst.error = _("immediate expression requires a # prefix");
1063 return FAIL;
1064 }
1065 (*str)++;
1066 break;
1067 case GE_OPT_PREFIX:
1068 case GE_OPT_PREFIX_BIG:
1069 if (is_immediate_prefix (**str))
1070 (*str)++;
1071 break;
1072 default:
1073 abort ();
1074 }
1075
1076 memset (ep, 0, sizeof (expressionS));
1077
1078 save_in = input_line_pointer;
1079 input_line_pointer = *str;
1080 in_my_get_expression = TRUE;
1081 expression (ep);
1082 in_my_get_expression = FALSE;
1083
1084 if (ep->X_op == O_illegal || ep->X_op == O_absent)
1085 {
1086 /* We found a bad or missing expression in md_operand(). */
1087 *str = input_line_pointer;
1088 input_line_pointer = save_in;
1089 if (inst.error == NULL)
1090 inst.error = (ep->X_op == O_absent
1091 ? _("missing expression") :_("bad expression"));
1092 return 1;
1093 }
1094
1095 /* Get rid of any bignums now, so that we don't generate an error for which
1096 we can't establish a line number later on. Big numbers are never valid
1097 in instructions, which is where this routine is always called. */
1098 if (prefix_mode != GE_OPT_PREFIX_BIG
1099 && (ep->X_op == O_big
1100 || (ep->X_add_symbol
1101 && (walk_no_bignums (ep->X_add_symbol)
1102 || (ep->X_op_symbol
1103 && walk_no_bignums (ep->X_op_symbol))))))
1104 {
1105 inst.error = _("invalid constant");
1106 *str = input_line_pointer;
1107 input_line_pointer = save_in;
1108 return 1;
1109 }
1110
1111 *str = input_line_pointer;
1112 input_line_pointer = save_in;
1113 return SUCCESS;
1114 }
1115
1116 /* Turn a string in input_line_pointer into a floating point constant
1117 of type TYPE, and store the appropriate bytes in *LITP. The number
1118 of LITTLENUMS emitted is stored in *SIZEP. An error message is
1119 returned, or NULL on OK.
1120
1121 Note that fp constants aren't represent in the normal way on the ARM.
1122 In big endian mode, things are as expected. However, in little endian
1123 mode fp constants are big-endian word-wise, and little-endian byte-wise
1124 within the words. For example, (double) 1.1 in big endian mode is
1125 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
1126 the byte sequence 99 99 f1 3f 9a 99 99 99.
1127
1128 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
1129
1130 const char *
1131 md_atof (int type, char * litP, int * sizeP)
1132 {
1133 int prec;
1134 LITTLENUM_TYPE words[MAX_LITTLENUMS];
1135 char *t;
1136 int i;
1137
1138 switch (type)
1139 {
1140 case 'f':
1141 case 'F':
1142 case 's':
1143 case 'S':
1144 prec = 2;
1145 break;
1146
1147 case 'd':
1148 case 'D':
1149 case 'r':
1150 case 'R':
1151 prec = 4;
1152 break;
1153
1154 case 'x':
1155 case 'X':
1156 prec = 5;
1157 break;
1158
1159 case 'p':
1160 case 'P':
1161 prec = 5;
1162 break;
1163
1164 default:
1165 *sizeP = 0;
1166 return _("Unrecognized or unsupported floating point constant");
1167 }
1168
1169 t = atof_ieee (input_line_pointer, type, words);
1170 if (t)
1171 input_line_pointer = t;
1172 *sizeP = prec * sizeof (LITTLENUM_TYPE);
1173
1174 if (target_big_endian)
1175 {
1176 for (i = 0; i < prec; i++)
1177 {
1178 md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1179 litP += sizeof (LITTLENUM_TYPE);
1180 }
1181 }
1182 else
1183 {
1184 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
1185 for (i = prec - 1; i >= 0; i--)
1186 {
1187 md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1188 litP += sizeof (LITTLENUM_TYPE);
1189 }
1190 else
1191 /* For a 4 byte float the order of elements in `words' is 1 0.
1192 For an 8 byte float the order is 1 0 3 2. */
1193 for (i = 0; i < prec; i += 2)
1194 {
1195 md_number_to_chars (litP, (valueT) words[i + 1],
1196 sizeof (LITTLENUM_TYPE));
1197 md_number_to_chars (litP + sizeof (LITTLENUM_TYPE),
1198 (valueT) words[i], sizeof (LITTLENUM_TYPE));
1199 litP += 2 * sizeof (LITTLENUM_TYPE);
1200 }
1201 }
1202
1203 return NULL;
1204 }
1205
1206 /* We handle all bad expressions here, so that we can report the faulty
1207 instruction in the error message. */
1208
1209 void
1210 md_operand (expressionS * exp)
1211 {
1212 if (in_my_get_expression)
1213 exp->X_op = O_illegal;
1214 }
1215
1216 /* Immediate values. */
1217
1218 #ifdef OBJ_ELF
1219 /* Generic immediate-value read function for use in directives.
1220 Accepts anything that 'expression' can fold to a constant.
1221 *val receives the number. */
1222
1223 static int
1224 immediate_for_directive (int *val)
1225 {
1226 expressionS exp;
1227 exp.X_op = O_illegal;
1228
1229 if (is_immediate_prefix (*input_line_pointer))
1230 {
1231 input_line_pointer++;
1232 expression (&exp);
1233 }
1234
1235 if (exp.X_op != O_constant)
1236 {
1237 as_bad (_("expected #constant"));
1238 ignore_rest_of_line ();
1239 return FAIL;
1240 }
1241 *val = exp.X_add_number;
1242 return SUCCESS;
1243 }
1244 #endif
1245
1246 /* Register parsing. */
1247
1248 /* Generic register parser. CCP points to what should be the
1249 beginning of a register name. If it is indeed a valid register
1250 name, advance CCP over it and return the reg_entry structure;
1251 otherwise return NULL. Does not issue diagnostics. */
1252
1253 static struct reg_entry *
1254 arm_reg_parse_multi (char **ccp)
1255 {
1256 char *start = *ccp;
1257 char *p;
1258 struct reg_entry *reg;
1259
1260 skip_whitespace (start);
1261
1262 #ifdef REGISTER_PREFIX
1263 if (*start != REGISTER_PREFIX)
1264 return NULL;
1265 start++;
1266 #endif
1267 #ifdef OPTIONAL_REGISTER_PREFIX
1268 if (*start == OPTIONAL_REGISTER_PREFIX)
1269 start++;
1270 #endif
1271
1272 p = start;
1273 if (!ISALPHA (*p) || !is_name_beginner (*p))
1274 return NULL;
1275
1276 do
1277 p++;
1278 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
1279
1280 reg = (struct reg_entry *) hash_find_n (arm_reg_hsh, start, p - start);
1281
1282 if (!reg)
1283 return NULL;
1284
1285 *ccp = p;
1286 return reg;
1287 }
1288
1289 static int
1290 arm_reg_alt_syntax (char **ccp, char *start, struct reg_entry *reg,
1291 enum arm_reg_type type)
1292 {
1293 /* Alternative syntaxes are accepted for a few register classes. */
1294 switch (type)
1295 {
1296 case REG_TYPE_MVF:
1297 case REG_TYPE_MVD:
1298 case REG_TYPE_MVFX:
1299 case REG_TYPE_MVDX:
1300 /* Generic coprocessor register names are allowed for these. */
1301 if (reg && reg->type == REG_TYPE_CN)
1302 return reg->number;
1303 break;
1304
1305 case REG_TYPE_CP:
1306 /* For backward compatibility, a bare number is valid here. */
1307 {
1308 unsigned long processor = strtoul (start, ccp, 10);
1309 if (*ccp != start && processor <= 15)
1310 return processor;
1311 }
1312 /* Fall through. */
1313
1314 case REG_TYPE_MMXWC:
1315 /* WC includes WCG. ??? I'm not sure this is true for all
1316 instructions that take WC registers. */
1317 if (reg && reg->type == REG_TYPE_MMXWCG)
1318 return reg->number;
1319 break;
1320
1321 default:
1322 break;
1323 }
1324
1325 return FAIL;
1326 }
1327
1328 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1329 return value is the register number or FAIL. */
1330
1331 static int
1332 arm_reg_parse (char **ccp, enum arm_reg_type type)
1333 {
1334 char *start = *ccp;
1335 struct reg_entry *reg = arm_reg_parse_multi (ccp);
1336 int ret;
1337
1338 /* Do not allow a scalar (reg+index) to parse as a register. */
1339 if (reg && reg->neon && (reg->neon->defined & NTA_HASINDEX))
1340 return FAIL;
1341
1342 if (reg && reg->type == type)
1343 return reg->number;
1344
1345 if ((ret = arm_reg_alt_syntax (ccp, start, reg, type)) != FAIL)
1346 return ret;
1347
1348 *ccp = start;
1349 return FAIL;
1350 }
1351
1352 /* Parse a Neon type specifier. *STR should point at the leading '.'
1353 character. Does no verification at this stage that the type fits the opcode
1354 properly. E.g.,
1355
1356 .i32.i32.s16
1357 .s32.f32
1358 .u16
1359
1360 Can all be legally parsed by this function.
1361
1362 Fills in neon_type struct pointer with parsed information, and updates STR
1363 to point after the parsed type specifier. Returns SUCCESS if this was a legal
1364 type, FAIL if not. */
1365
1366 static int
1367 parse_neon_type (struct neon_type *type, char **str)
1368 {
1369 char *ptr = *str;
1370
1371 if (type)
1372 type->elems = 0;
1373
1374 while (type->elems < NEON_MAX_TYPE_ELS)
1375 {
1376 enum neon_el_type thistype = NT_untyped;
1377 unsigned thissize = -1u;
1378
1379 if (*ptr != '.')
1380 break;
1381
1382 ptr++;
1383
1384 /* Just a size without an explicit type. */
1385 if (ISDIGIT (*ptr))
1386 goto parsesize;
1387
1388 switch (TOLOWER (*ptr))
1389 {
1390 case 'i': thistype = NT_integer; break;
1391 case 'f': thistype = NT_float; break;
1392 case 'p': thistype = NT_poly; break;
1393 case 's': thistype = NT_signed; break;
1394 case 'u': thistype = NT_unsigned; break;
1395 case 'd':
1396 thistype = NT_float;
1397 thissize = 64;
1398 ptr++;
1399 goto done;
1400 default:
1401 as_bad (_("unexpected character `%c' in type specifier"), *ptr);
1402 return FAIL;
1403 }
1404
1405 ptr++;
1406
1407 /* .f is an abbreviation for .f32. */
1408 if (thistype == NT_float && !ISDIGIT (*ptr))
1409 thissize = 32;
1410 else
1411 {
1412 parsesize:
1413 thissize = strtoul (ptr, &ptr, 10);
1414
1415 if (thissize != 8 && thissize != 16 && thissize != 32
1416 && thissize != 64)
1417 {
1418 as_bad (_("bad size %d in type specifier"), thissize);
1419 return FAIL;
1420 }
1421 }
1422
1423 done:
1424 if (type)
1425 {
1426 type->el[type->elems].type = thistype;
1427 type->el[type->elems].size = thissize;
1428 type->elems++;
1429 }
1430 }
1431
1432 /* Empty/missing type is not a successful parse. */
1433 if (type->elems == 0)
1434 return FAIL;
1435
1436 *str = ptr;
1437
1438 return SUCCESS;
1439 }
1440
1441 /* Errors may be set multiple times during parsing or bit encoding
1442 (particularly in the Neon bits), but usually the earliest error which is set
1443 will be the most meaningful. Avoid overwriting it with later (cascading)
1444 errors by calling this function. */
1445
1446 static void
1447 first_error (const char *err)
1448 {
1449 if (!inst.error)
1450 inst.error = err;
1451 }
1452
1453 /* Parse a single type, e.g. ".s32", leading period included. */
1454 static int
1455 parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
1456 {
1457 char *str = *ccp;
1458 struct neon_type optype;
1459
1460 if (*str == '.')
1461 {
1462 if (parse_neon_type (&optype, &str) == SUCCESS)
1463 {
1464 if (optype.elems == 1)
1465 *vectype = optype.el[0];
1466 else
1467 {
1468 first_error (_("only one type should be specified for operand"));
1469 return FAIL;
1470 }
1471 }
1472 else
1473 {
1474 first_error (_("vector type expected"));
1475 return FAIL;
1476 }
1477 }
1478 else
1479 return FAIL;
1480
1481 *ccp = str;
1482
1483 return SUCCESS;
1484 }
1485
1486 /* Special meanings for indices (which have a range of 0-7), which will fit into
1487 a 4-bit integer. */
1488
1489 #define NEON_ALL_LANES 15
1490 #define NEON_INTERLEAVE_LANES 14
1491
1492 /* Parse either a register or a scalar, with an optional type. Return the
1493 register number, and optionally fill in the actual type of the register
1494 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1495 type/index information in *TYPEINFO. */
1496
1497 static int
1498 parse_typed_reg_or_scalar (char **ccp, enum arm_reg_type type,
1499 enum arm_reg_type *rtype,
1500 struct neon_typed_alias *typeinfo)
1501 {
1502 char *str = *ccp;
1503 struct reg_entry *reg = arm_reg_parse_multi (&str);
1504 struct neon_typed_alias atype;
1505 struct neon_type_el parsetype;
1506
1507 atype.defined = 0;
1508 atype.index = -1;
1509 atype.eltype.type = NT_invtype;
1510 atype.eltype.size = -1;
1511
1512 /* Try alternate syntax for some types of register. Note these are mutually
1513 exclusive with the Neon syntax extensions. */
1514 if (reg == NULL)
1515 {
1516 int altreg = arm_reg_alt_syntax (&str, *ccp, reg, type);
1517 if (altreg != FAIL)
1518 *ccp = str;
1519 if (typeinfo)
1520 *typeinfo = atype;
1521 return altreg;
1522 }
1523
1524 /* Undo polymorphism when a set of register types may be accepted. */
1525 if ((type == REG_TYPE_NDQ
1526 && (reg->type == REG_TYPE_NQ || reg->type == REG_TYPE_VFD))
1527 || (type == REG_TYPE_VFSD
1528 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD))
1529 || (type == REG_TYPE_NSDQ
1530 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD
1531 || reg->type == REG_TYPE_NQ))
1532 || (type == REG_TYPE_NSD
1533 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD))
1534 || (type == REG_TYPE_MMXWC
1535 && (reg->type == REG_TYPE_MMXWCG)))
1536 type = (enum arm_reg_type) reg->type;
1537
1538 if (type != reg->type)
1539 return FAIL;
1540
1541 if (reg->neon)
1542 atype = *reg->neon;
1543
1544 if (parse_neon_operand_type (&parsetype, &str) == SUCCESS)
1545 {
1546 if ((atype.defined & NTA_HASTYPE) != 0)
1547 {
1548 first_error (_("can't redefine type for operand"));
1549 return FAIL;
1550 }
1551 atype.defined |= NTA_HASTYPE;
1552 atype.eltype = parsetype;
1553 }
1554
1555 if (skip_past_char (&str, '[') == SUCCESS)
1556 {
1557 if (type != REG_TYPE_VFD
1558 && !(type == REG_TYPE_VFS
1559 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8_2)))
1560 {
1561 first_error (_("only D registers may be indexed"));
1562 return FAIL;
1563 }
1564
1565 if ((atype.defined & NTA_HASINDEX) != 0)
1566 {
1567 first_error (_("can't change index for operand"));
1568 return FAIL;
1569 }
1570
1571 atype.defined |= NTA_HASINDEX;
1572
1573 if (skip_past_char (&str, ']') == SUCCESS)
1574 atype.index = NEON_ALL_LANES;
1575 else
1576 {
1577 expressionS exp;
1578
1579 my_get_expression (&exp, &str, GE_NO_PREFIX);
1580
1581 if (exp.X_op != O_constant)
1582 {
1583 first_error (_("constant expression required"));
1584 return FAIL;
1585 }
1586
1587 if (skip_past_char (&str, ']') == FAIL)
1588 return FAIL;
1589
1590 atype.index = exp.X_add_number;
1591 }
1592 }
1593
1594 if (typeinfo)
1595 *typeinfo = atype;
1596
1597 if (rtype)
1598 *rtype = type;
1599
1600 *ccp = str;
1601
1602 return reg->number;
1603 }
1604
1605 /* Like arm_reg_parse, but allow allow the following extra features:
1606 - If RTYPE is non-zero, return the (possibly restricted) type of the
1607 register (e.g. Neon double or quad reg when either has been requested).
1608 - If this is a Neon vector type with additional type information, fill
1609 in the struct pointed to by VECTYPE (if non-NULL).
1610 This function will fault on encountering a scalar. */
1611
1612 static int
1613 arm_typed_reg_parse (char **ccp, enum arm_reg_type type,
1614 enum arm_reg_type *rtype, struct neon_type_el *vectype)
1615 {
1616 struct neon_typed_alias atype;
1617 char *str = *ccp;
1618 int reg = parse_typed_reg_or_scalar (&str, type, rtype, &atype);
1619
1620 if (reg == FAIL)
1621 return FAIL;
1622
1623 /* Do not allow regname(... to parse as a register. */
1624 if (*str == '(')
1625 return FAIL;
1626
1627 /* Do not allow a scalar (reg+index) to parse as a register. */
1628 if ((atype.defined & NTA_HASINDEX) != 0)
1629 {
1630 first_error (_("register operand expected, but got scalar"));
1631 return FAIL;
1632 }
1633
1634 if (vectype)
1635 *vectype = atype.eltype;
1636
1637 *ccp = str;
1638
1639 return reg;
1640 }
1641
1642 #define NEON_SCALAR_REG(X) ((X) >> 4)
1643 #define NEON_SCALAR_INDEX(X) ((X) & 15)
1644
1645 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1646 have enough information to be able to do a good job bounds-checking. So, we
1647 just do easy checks here, and do further checks later. */
1648
1649 static int
1650 parse_scalar (char **ccp, int elsize, struct neon_type_el *type)
1651 {
1652 int reg;
1653 char *str = *ccp;
1654 struct neon_typed_alias atype;
1655 enum arm_reg_type reg_type = REG_TYPE_VFD;
1656
1657 if (elsize == 4)
1658 reg_type = REG_TYPE_VFS;
1659
1660 reg = parse_typed_reg_or_scalar (&str, reg_type, NULL, &atype);
1661
1662 if (reg == FAIL || (atype.defined & NTA_HASINDEX) == 0)
1663 return FAIL;
1664
1665 if (atype.index == NEON_ALL_LANES)
1666 {
1667 first_error (_("scalar must have an index"));
1668 return FAIL;
1669 }
1670 else if (atype.index >= 64 / elsize)
1671 {
1672 first_error (_("scalar index out of range"));
1673 return FAIL;
1674 }
1675
1676 if (type)
1677 *type = atype.eltype;
1678
1679 *ccp = str;
1680
1681 return reg * 16 + atype.index;
1682 }
1683
1684 /* Parse an ARM register list. Returns the bitmask, or FAIL. */
1685
1686 static long
1687 parse_reg_list (char ** strp)
1688 {
1689 char * str = * strp;
1690 long range = 0;
1691 int another_range;
1692
1693 /* We come back here if we get ranges concatenated by '+' or '|'. */
1694 do
1695 {
1696 skip_whitespace (str);
1697
1698 another_range = 0;
1699
1700 if (*str == '{')
1701 {
1702 int in_range = 0;
1703 int cur_reg = -1;
1704
1705 str++;
1706 do
1707 {
1708 int reg;
1709
1710 if ((reg = arm_reg_parse (&str, REG_TYPE_RN)) == FAIL)
1711 {
1712 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
1713 return FAIL;
1714 }
1715
1716 if (in_range)
1717 {
1718 int i;
1719
1720 if (reg <= cur_reg)
1721 {
1722 first_error (_("bad range in register list"));
1723 return FAIL;
1724 }
1725
1726 for (i = cur_reg + 1; i < reg; i++)
1727 {
1728 if (range & (1 << i))
1729 as_tsktsk
1730 (_("Warning: duplicated register (r%d) in register list"),
1731 i);
1732 else
1733 range |= 1 << i;
1734 }
1735 in_range = 0;
1736 }
1737
1738 if (range & (1 << reg))
1739 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1740 reg);
1741 else if (reg <= cur_reg)
1742 as_tsktsk (_("Warning: register range not in ascending order"));
1743
1744 range |= 1 << reg;
1745 cur_reg = reg;
1746 }
1747 while (skip_past_comma (&str) != FAIL
1748 || (in_range = 1, *str++ == '-'));
1749 str--;
1750
1751 if (skip_past_char (&str, '}') == FAIL)
1752 {
1753 first_error (_("missing `}'"));
1754 return FAIL;
1755 }
1756 }
1757 else
1758 {
1759 expressionS exp;
1760
1761 if (my_get_expression (&exp, &str, GE_NO_PREFIX))
1762 return FAIL;
1763
1764 if (exp.X_op == O_constant)
1765 {
1766 if (exp.X_add_number
1767 != (exp.X_add_number & 0x0000ffff))
1768 {
1769 inst.error = _("invalid register mask");
1770 return FAIL;
1771 }
1772
1773 if ((range & exp.X_add_number) != 0)
1774 {
1775 int regno = range & exp.X_add_number;
1776
1777 regno &= -regno;
1778 regno = (1 << regno) - 1;
1779 as_tsktsk
1780 (_("Warning: duplicated register (r%d) in register list"),
1781 regno);
1782 }
1783
1784 range |= exp.X_add_number;
1785 }
1786 else
1787 {
1788 if (inst.reloc.type != 0)
1789 {
1790 inst.error = _("expression too complex");
1791 return FAIL;
1792 }
1793
1794 memcpy (&inst.reloc.exp, &exp, sizeof (expressionS));
1795 inst.reloc.type = BFD_RELOC_ARM_MULTI;
1796 inst.reloc.pc_rel = 0;
1797 }
1798 }
1799
1800 if (*str == '|' || *str == '+')
1801 {
1802 str++;
1803 another_range = 1;
1804 }
1805 }
1806 while (another_range);
1807
1808 *strp = str;
1809 return range;
1810 }
1811
1812 /* Types of registers in a list. */
1813
1814 enum reg_list_els
1815 {
1816 REGLIST_VFP_S,
1817 REGLIST_VFP_D,
1818 REGLIST_NEON_D
1819 };
1820
1821 /* Parse a VFP register list. If the string is invalid return FAIL.
1822 Otherwise return the number of registers, and set PBASE to the first
1823 register. Parses registers of type ETYPE.
1824 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1825 - Q registers can be used to specify pairs of D registers
1826 - { } can be omitted from around a singleton register list
1827 FIXME: This is not implemented, as it would require backtracking in
1828 some cases, e.g.:
1829 vtbl.8 d3,d4,d5
1830 This could be done (the meaning isn't really ambiguous), but doesn't
1831 fit in well with the current parsing framework.
1832 - 32 D registers may be used (also true for VFPv3).
1833 FIXME: Types are ignored in these register lists, which is probably a
1834 bug. */
1835
1836 static int
1837 parse_vfp_reg_list (char **ccp, unsigned int *pbase, enum reg_list_els etype)
1838 {
1839 char *str = *ccp;
1840 int base_reg;
1841 int new_base;
1842 enum arm_reg_type regtype = (enum arm_reg_type) 0;
1843 int max_regs = 0;
1844 int count = 0;
1845 int warned = 0;
1846 unsigned long mask = 0;
1847 int i;
1848
1849 if (skip_past_char (&str, '{') == FAIL)
1850 {
1851 inst.error = _("expecting {");
1852 return FAIL;
1853 }
1854
1855 switch (etype)
1856 {
1857 case REGLIST_VFP_S:
1858 regtype = REG_TYPE_VFS;
1859 max_regs = 32;
1860 break;
1861
1862 case REGLIST_VFP_D:
1863 regtype = REG_TYPE_VFD;
1864 break;
1865
1866 case REGLIST_NEON_D:
1867 regtype = REG_TYPE_NDQ;
1868 break;
1869 }
1870
1871 if (etype != REGLIST_VFP_S)
1872 {
1873 /* VFPv3 allows 32 D registers, except for the VFPv3-D16 variant. */
1874 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
1875 {
1876 max_regs = 32;
1877 if (thumb_mode)
1878 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
1879 fpu_vfp_ext_d32);
1880 else
1881 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
1882 fpu_vfp_ext_d32);
1883 }
1884 else
1885 max_regs = 16;
1886 }
1887
1888 base_reg = max_regs;
1889
1890 do
1891 {
1892 int setmask = 1, addregs = 1;
1893
1894 new_base = arm_typed_reg_parse (&str, regtype, &regtype, NULL);
1895
1896 if (new_base == FAIL)
1897 {
1898 first_error (_(reg_expected_msgs[regtype]));
1899 return FAIL;
1900 }
1901
1902 if (new_base >= max_regs)
1903 {
1904 first_error (_("register out of range in list"));
1905 return FAIL;
1906 }
1907
1908 /* Note: a value of 2 * n is returned for the register Q<n>. */
1909 if (regtype == REG_TYPE_NQ)
1910 {
1911 setmask = 3;
1912 addregs = 2;
1913 }
1914
1915 if (new_base < base_reg)
1916 base_reg = new_base;
1917
1918 if (mask & (setmask << new_base))
1919 {
1920 first_error (_("invalid register list"));
1921 return FAIL;
1922 }
1923
1924 if ((mask >> new_base) != 0 && ! warned)
1925 {
1926 as_tsktsk (_("register list not in ascending order"));
1927 warned = 1;
1928 }
1929
1930 mask |= setmask << new_base;
1931 count += addregs;
1932
1933 if (*str == '-') /* We have the start of a range expression */
1934 {
1935 int high_range;
1936
1937 str++;
1938
1939 if ((high_range = arm_typed_reg_parse (&str, regtype, NULL, NULL))
1940 == FAIL)
1941 {
1942 inst.error = gettext (reg_expected_msgs[regtype]);
1943 return FAIL;
1944 }
1945
1946 if (high_range >= max_regs)
1947 {
1948 first_error (_("register out of range in list"));
1949 return FAIL;
1950 }
1951
1952 if (regtype == REG_TYPE_NQ)
1953 high_range = high_range + 1;
1954
1955 if (high_range <= new_base)
1956 {
1957 inst.error = _("register range not in ascending order");
1958 return FAIL;
1959 }
1960
1961 for (new_base += addregs; new_base <= high_range; new_base += addregs)
1962 {
1963 if (mask & (setmask << new_base))
1964 {
1965 inst.error = _("invalid register list");
1966 return FAIL;
1967 }
1968
1969 mask |= setmask << new_base;
1970 count += addregs;
1971 }
1972 }
1973 }
1974 while (skip_past_comma (&str) != FAIL);
1975
1976 str++;
1977
1978 /* Sanity check -- should have raised a parse error above. */
1979 if (count == 0 || count > max_regs)
1980 abort ();
1981
1982 *pbase = base_reg;
1983
1984 /* Final test -- the registers must be consecutive. */
1985 mask >>= base_reg;
1986 for (i = 0; i < count; i++)
1987 {
1988 if ((mask & (1u << i)) == 0)
1989 {
1990 inst.error = _("non-contiguous register range");
1991 return FAIL;
1992 }
1993 }
1994
1995 *ccp = str;
1996
1997 return count;
1998 }
1999
2000 /* True if two alias types are the same. */
2001
2002 static bfd_boolean
2003 neon_alias_types_same (struct neon_typed_alias *a, struct neon_typed_alias *b)
2004 {
2005 if (!a && !b)
2006 return TRUE;
2007
2008 if (!a || !b)
2009 return FALSE;
2010
2011 if (a->defined != b->defined)
2012 return FALSE;
2013
2014 if ((a->defined & NTA_HASTYPE) != 0
2015 && (a->eltype.type != b->eltype.type
2016 || a->eltype.size != b->eltype.size))
2017 return FALSE;
2018
2019 if ((a->defined & NTA_HASINDEX) != 0
2020 && (a->index != b->index))
2021 return FALSE;
2022
2023 return TRUE;
2024 }
2025
2026 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
2027 The base register is put in *PBASE.
2028 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
2029 the return value.
2030 The register stride (minus one) is put in bit 4 of the return value.
2031 Bits [6:5] encode the list length (minus one).
2032 The type of the list elements is put in *ELTYPE, if non-NULL. */
2033
2034 #define NEON_LANE(X) ((X) & 0xf)
2035 #define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1)
2036 #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
2037
2038 static int
2039 parse_neon_el_struct_list (char **str, unsigned *pbase,
2040 struct neon_type_el *eltype)
2041 {
2042 char *ptr = *str;
2043 int base_reg = -1;
2044 int reg_incr = -1;
2045 int count = 0;
2046 int lane = -1;
2047 int leading_brace = 0;
2048 enum arm_reg_type rtype = REG_TYPE_NDQ;
2049 const char *const incr_error = _("register stride must be 1 or 2");
2050 const char *const type_error = _("mismatched element/structure types in list");
2051 struct neon_typed_alias firsttype;
2052 firsttype.defined = 0;
2053 firsttype.eltype.type = NT_invtype;
2054 firsttype.eltype.size = -1;
2055 firsttype.index = -1;
2056
2057 if (skip_past_char (&ptr, '{') == SUCCESS)
2058 leading_brace = 1;
2059
2060 do
2061 {
2062 struct neon_typed_alias atype;
2063 int getreg = parse_typed_reg_or_scalar (&ptr, rtype, &rtype, &atype);
2064
2065 if (getreg == FAIL)
2066 {
2067 first_error (_(reg_expected_msgs[rtype]));
2068 return FAIL;
2069 }
2070
2071 if (base_reg == -1)
2072 {
2073 base_reg = getreg;
2074 if (rtype == REG_TYPE_NQ)
2075 {
2076 reg_incr = 1;
2077 }
2078 firsttype = atype;
2079 }
2080 else if (reg_incr == -1)
2081 {
2082 reg_incr = getreg - base_reg;
2083 if (reg_incr < 1 || reg_incr > 2)
2084 {
2085 first_error (_(incr_error));
2086 return FAIL;
2087 }
2088 }
2089 else if (getreg != base_reg + reg_incr * count)
2090 {
2091 first_error (_(incr_error));
2092 return FAIL;
2093 }
2094
2095 if (! neon_alias_types_same (&atype, &firsttype))
2096 {
2097 first_error (_(type_error));
2098 return FAIL;
2099 }
2100
2101 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
2102 modes. */
2103 if (ptr[0] == '-')
2104 {
2105 struct neon_typed_alias htype;
2106 int hireg, dregs = (rtype == REG_TYPE_NQ) ? 2 : 1;
2107 if (lane == -1)
2108 lane = NEON_INTERLEAVE_LANES;
2109 else if (lane != NEON_INTERLEAVE_LANES)
2110 {
2111 first_error (_(type_error));
2112 return FAIL;
2113 }
2114 if (reg_incr == -1)
2115 reg_incr = 1;
2116 else if (reg_incr != 1)
2117 {
2118 first_error (_("don't use Rn-Rm syntax with non-unit stride"));
2119 return FAIL;
2120 }
2121 ptr++;
2122 hireg = parse_typed_reg_or_scalar (&ptr, rtype, NULL, &htype);
2123 if (hireg == FAIL)
2124 {
2125 first_error (_(reg_expected_msgs[rtype]));
2126 return FAIL;
2127 }
2128 if (! neon_alias_types_same (&htype, &firsttype))
2129 {
2130 first_error (_(type_error));
2131 return FAIL;
2132 }
2133 count += hireg + dregs - getreg;
2134 continue;
2135 }
2136
2137 /* If we're using Q registers, we can't use [] or [n] syntax. */
2138 if (rtype == REG_TYPE_NQ)
2139 {
2140 count += 2;
2141 continue;
2142 }
2143
2144 if ((atype.defined & NTA_HASINDEX) != 0)
2145 {
2146 if (lane == -1)
2147 lane = atype.index;
2148 else if (lane != atype.index)
2149 {
2150 first_error (_(type_error));
2151 return FAIL;
2152 }
2153 }
2154 else if (lane == -1)
2155 lane = NEON_INTERLEAVE_LANES;
2156 else if (lane != NEON_INTERLEAVE_LANES)
2157 {
2158 first_error (_(type_error));
2159 return FAIL;
2160 }
2161 count++;
2162 }
2163 while ((count != 1 || leading_brace) && skip_past_comma (&ptr) != FAIL);
2164
2165 /* No lane set by [x]. We must be interleaving structures. */
2166 if (lane == -1)
2167 lane = NEON_INTERLEAVE_LANES;
2168
2169 /* Sanity check. */
2170 if (lane == -1 || base_reg == -1 || count < 1 || count > 4
2171 || (count > 1 && reg_incr == -1))
2172 {
2173 first_error (_("error parsing element/structure list"));
2174 return FAIL;
2175 }
2176
2177 if ((count > 1 || leading_brace) && skip_past_char (&ptr, '}') == FAIL)
2178 {
2179 first_error (_("expected }"));
2180 return FAIL;
2181 }
2182
2183 if (reg_incr == -1)
2184 reg_incr = 1;
2185
2186 if (eltype)
2187 *eltype = firsttype.eltype;
2188
2189 *pbase = base_reg;
2190 *str = ptr;
2191
2192 return lane | ((reg_incr - 1) << 4) | ((count - 1) << 5);
2193 }
2194
2195 /* Parse an explicit relocation suffix on an expression. This is
2196 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
2197 arm_reloc_hsh contains no entries, so this function can only
2198 succeed if there is no () after the word. Returns -1 on error,
2199 BFD_RELOC_UNUSED if there wasn't any suffix. */
2200
2201 static int
2202 parse_reloc (char **str)
2203 {
2204 struct reloc_entry *r;
2205 char *p, *q;
2206
2207 if (**str != '(')
2208 return BFD_RELOC_UNUSED;
2209
2210 p = *str + 1;
2211 q = p;
2212
2213 while (*q && *q != ')' && *q != ',')
2214 q++;
2215 if (*q != ')')
2216 return -1;
2217
2218 if ((r = (struct reloc_entry *)
2219 hash_find_n (arm_reloc_hsh, p, q - p)) == NULL)
2220 return -1;
2221
2222 *str = q + 1;
2223 return r->reloc;
2224 }
2225
2226 /* Directives: register aliases. */
2227
2228 static struct reg_entry *
2229 insert_reg_alias (char *str, unsigned number, int type)
2230 {
2231 struct reg_entry *new_reg;
2232 const char *name;
2233
2234 if ((new_reg = (struct reg_entry *) hash_find (arm_reg_hsh, str)) != 0)
2235 {
2236 if (new_reg->builtin)
2237 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str);
2238
2239 /* Only warn about a redefinition if it's not defined as the
2240 same register. */
2241 else if (new_reg->number != number || new_reg->type != type)
2242 as_warn (_("ignoring redefinition of register alias '%s'"), str);
2243
2244 return NULL;
2245 }
2246
2247 name = xstrdup (str);
2248 new_reg = XNEW (struct reg_entry);
2249
2250 new_reg->name = name;
2251 new_reg->number = number;
2252 new_reg->type = type;
2253 new_reg->builtin = FALSE;
2254 new_reg->neon = NULL;
2255
2256 if (hash_insert (arm_reg_hsh, name, (void *) new_reg))
2257 abort ();
2258
2259 return new_reg;
2260 }
2261
2262 static void
2263 insert_neon_reg_alias (char *str, int number, int type,
2264 struct neon_typed_alias *atype)
2265 {
2266 struct reg_entry *reg = insert_reg_alias (str, number, type);
2267
2268 if (!reg)
2269 {
2270 first_error (_("attempt to redefine typed alias"));
2271 return;
2272 }
2273
2274 if (atype)
2275 {
2276 reg->neon = XNEW (struct neon_typed_alias);
2277 *reg->neon = *atype;
2278 }
2279 }
2280
2281 /* Look for the .req directive. This is of the form:
2282
2283 new_register_name .req existing_register_name
2284
2285 If we find one, or if it looks sufficiently like one that we want to
2286 handle any error here, return TRUE. Otherwise return FALSE. */
2287
2288 static bfd_boolean
2289 create_register_alias (char * newname, char *p)
2290 {
2291 struct reg_entry *old;
2292 char *oldname, *nbuf;
2293 size_t nlen;
2294
2295 /* The input scrubber ensures that whitespace after the mnemonic is
2296 collapsed to single spaces. */
2297 oldname = p;
2298 if (strncmp (oldname, " .req ", 6) != 0)
2299 return FALSE;
2300
2301 oldname += 6;
2302 if (*oldname == '\0')
2303 return FALSE;
2304
2305 old = (struct reg_entry *) hash_find (arm_reg_hsh, oldname);
2306 if (!old)
2307 {
2308 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
2309 return TRUE;
2310 }
2311
2312 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2313 the desired alias name, and p points to its end. If not, then
2314 the desired alias name is in the global original_case_string. */
2315 #ifdef TC_CASE_SENSITIVE
2316 nlen = p - newname;
2317 #else
2318 newname = original_case_string;
2319 nlen = strlen (newname);
2320 #endif
2321
2322 nbuf = xmemdup0 (newname, nlen);
2323
2324 /* Create aliases under the new name as stated; an all-lowercase
2325 version of the new name; and an all-uppercase version of the new
2326 name. */
2327 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
2328 {
2329 for (p = nbuf; *p; p++)
2330 *p = TOUPPER (*p);
2331
2332 if (strncmp (nbuf, newname, nlen))
2333 {
2334 /* If this attempt to create an additional alias fails, do not bother
2335 trying to create the all-lower case alias. We will fail and issue
2336 a second, duplicate error message. This situation arises when the
2337 programmer does something like:
2338 foo .req r0
2339 Foo .req r1
2340 The second .req creates the "Foo" alias but then fails to create
2341 the artificial FOO alias because it has already been created by the
2342 first .req. */
2343 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
2344 {
2345 free (nbuf);
2346 return TRUE;
2347 }
2348 }
2349
2350 for (p = nbuf; *p; p++)
2351 *p = TOLOWER (*p);
2352
2353 if (strncmp (nbuf, newname, nlen))
2354 insert_reg_alias (nbuf, old->number, old->type);
2355 }
2356
2357 free (nbuf);
2358 return TRUE;
2359 }
2360
2361 /* Create a Neon typed/indexed register alias using directives, e.g.:
2362 X .dn d5.s32[1]
2363 Y .qn 6.s16
2364 Z .dn d7
2365 T .dn Z[0]
2366 These typed registers can be used instead of the types specified after the
2367 Neon mnemonic, so long as all operands given have types. Types can also be
2368 specified directly, e.g.:
2369 vadd d0.s32, d1.s32, d2.s32 */
2370
2371 static bfd_boolean
2372 create_neon_reg_alias (char *newname, char *p)
2373 {
2374 enum arm_reg_type basetype;
2375 struct reg_entry *basereg;
2376 struct reg_entry mybasereg;
2377 struct neon_type ntype;
2378 struct neon_typed_alias typeinfo;
2379 char *namebuf, *nameend ATTRIBUTE_UNUSED;
2380 int namelen;
2381
2382 typeinfo.defined = 0;
2383 typeinfo.eltype.type = NT_invtype;
2384 typeinfo.eltype.size = -1;
2385 typeinfo.index = -1;
2386
2387 nameend = p;
2388
2389 if (strncmp (p, " .dn ", 5) == 0)
2390 basetype = REG_TYPE_VFD;
2391 else if (strncmp (p, " .qn ", 5) == 0)
2392 basetype = REG_TYPE_NQ;
2393 else
2394 return FALSE;
2395
2396 p += 5;
2397
2398 if (*p == '\0')
2399 return FALSE;
2400
2401 basereg = arm_reg_parse_multi (&p);
2402
2403 if (basereg && basereg->type != basetype)
2404 {
2405 as_bad (_("bad type for register"));
2406 return FALSE;
2407 }
2408
2409 if (basereg == NULL)
2410 {
2411 expressionS exp;
2412 /* Try parsing as an integer. */
2413 my_get_expression (&exp, &p, GE_NO_PREFIX);
2414 if (exp.X_op != O_constant)
2415 {
2416 as_bad (_("expression must be constant"));
2417 return FALSE;
2418 }
2419 basereg = &mybasereg;
2420 basereg->number = (basetype == REG_TYPE_NQ) ? exp.X_add_number * 2
2421 : exp.X_add_number;
2422 basereg->neon = 0;
2423 }
2424
2425 if (basereg->neon)
2426 typeinfo = *basereg->neon;
2427
2428 if (parse_neon_type (&ntype, &p) == SUCCESS)
2429 {
2430 /* We got a type. */
2431 if (typeinfo.defined & NTA_HASTYPE)
2432 {
2433 as_bad (_("can't redefine the type of a register alias"));
2434 return FALSE;
2435 }
2436
2437 typeinfo.defined |= NTA_HASTYPE;
2438 if (ntype.elems != 1)
2439 {
2440 as_bad (_("you must specify a single type only"));
2441 return FALSE;
2442 }
2443 typeinfo.eltype = ntype.el[0];
2444 }
2445
2446 if (skip_past_char (&p, '[') == SUCCESS)
2447 {
2448 expressionS exp;
2449 /* We got a scalar index. */
2450
2451 if (typeinfo.defined & NTA_HASINDEX)
2452 {
2453 as_bad (_("can't redefine the index of a scalar alias"));
2454 return FALSE;
2455 }
2456
2457 my_get_expression (&exp, &p, GE_NO_PREFIX);
2458
2459 if (exp.X_op != O_constant)
2460 {
2461 as_bad (_("scalar index must be constant"));
2462 return FALSE;
2463 }
2464
2465 typeinfo.defined |= NTA_HASINDEX;
2466 typeinfo.index = exp.X_add_number;
2467
2468 if (skip_past_char (&p, ']') == FAIL)
2469 {
2470 as_bad (_("expecting ]"));
2471 return FALSE;
2472 }
2473 }
2474
2475 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2476 the desired alias name, and p points to its end. If not, then
2477 the desired alias name is in the global original_case_string. */
2478 #ifdef TC_CASE_SENSITIVE
2479 namelen = nameend - newname;
2480 #else
2481 newname = original_case_string;
2482 namelen = strlen (newname);
2483 #endif
2484
2485 namebuf = xmemdup0 (newname, namelen);
2486
2487 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2488 typeinfo.defined != 0 ? &typeinfo : NULL);
2489
2490 /* Insert name in all uppercase. */
2491 for (p = namebuf; *p; p++)
2492 *p = TOUPPER (*p);
2493
2494 if (strncmp (namebuf, newname, namelen))
2495 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2496 typeinfo.defined != 0 ? &typeinfo : NULL);
2497
2498 /* Insert name in all lowercase. */
2499 for (p = namebuf; *p; p++)
2500 *p = TOLOWER (*p);
2501
2502 if (strncmp (namebuf, newname, namelen))
2503 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2504 typeinfo.defined != 0 ? &typeinfo : NULL);
2505
2506 free (namebuf);
2507 return TRUE;
2508 }
2509
2510 /* Should never be called, as .req goes between the alias and the
2511 register name, not at the beginning of the line. */
2512
2513 static void
2514 s_req (int a ATTRIBUTE_UNUSED)
2515 {
2516 as_bad (_("invalid syntax for .req directive"));
2517 }
2518
2519 static void
2520 s_dn (int a ATTRIBUTE_UNUSED)
2521 {
2522 as_bad (_("invalid syntax for .dn directive"));
2523 }
2524
2525 static void
2526 s_qn (int a ATTRIBUTE_UNUSED)
2527 {
2528 as_bad (_("invalid syntax for .qn directive"));
2529 }
2530
2531 /* The .unreq directive deletes an alias which was previously defined
2532 by .req. For example:
2533
2534 my_alias .req r11
2535 .unreq my_alias */
2536
2537 static void
2538 s_unreq (int a ATTRIBUTE_UNUSED)
2539 {
2540 char * name;
2541 char saved_char;
2542
2543 name = input_line_pointer;
2544
2545 while (*input_line_pointer != 0
2546 && *input_line_pointer != ' '
2547 && *input_line_pointer != '\n')
2548 ++input_line_pointer;
2549
2550 saved_char = *input_line_pointer;
2551 *input_line_pointer = 0;
2552
2553 if (!*name)
2554 as_bad (_("invalid syntax for .unreq directive"));
2555 else
2556 {
2557 struct reg_entry *reg = (struct reg_entry *) hash_find (arm_reg_hsh,
2558 name);
2559
2560 if (!reg)
2561 as_bad (_("unknown register alias '%s'"), name);
2562 else if (reg->builtin)
2563 as_warn (_("ignoring attempt to use .unreq on fixed register name: '%s'"),
2564 name);
2565 else
2566 {
2567 char * p;
2568 char * nbuf;
2569
2570 hash_delete (arm_reg_hsh, name, FALSE);
2571 free ((char *) reg->name);
2572 if (reg->neon)
2573 free (reg->neon);
2574 free (reg);
2575
2576 /* Also locate the all upper case and all lower case versions.
2577 Do not complain if we cannot find one or the other as it
2578 was probably deleted above. */
2579
2580 nbuf = strdup (name);
2581 for (p = nbuf; *p; p++)
2582 *p = TOUPPER (*p);
2583 reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
2584 if (reg)
2585 {
2586 hash_delete (arm_reg_hsh, nbuf, FALSE);
2587 free ((char *) reg->name);
2588 if (reg->neon)
2589 free (reg->neon);
2590 free (reg);
2591 }
2592
2593 for (p = nbuf; *p; p++)
2594 *p = TOLOWER (*p);
2595 reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
2596 if (reg)
2597 {
2598 hash_delete (arm_reg_hsh, nbuf, FALSE);
2599 free ((char *) reg->name);
2600 if (reg->neon)
2601 free (reg->neon);
2602 free (reg);
2603 }
2604
2605 free (nbuf);
2606 }
2607 }
2608
2609 *input_line_pointer = saved_char;
2610 demand_empty_rest_of_line ();
2611 }
2612
2613 /* Directives: Instruction set selection. */
2614
2615 #ifdef OBJ_ELF
2616 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
2617 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2618 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2619 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
2620
2621 /* Create a new mapping symbol for the transition to STATE. */
2622
2623 static void
2624 make_mapping_symbol (enum mstate state, valueT value, fragS *frag)
2625 {
2626 symbolS * symbolP;
2627 const char * symname;
2628 int type;
2629
2630 switch (state)
2631 {
2632 case MAP_DATA:
2633 symname = "$d";
2634 type = BSF_NO_FLAGS;
2635 break;
2636 case MAP_ARM:
2637 symname = "$a";
2638 type = BSF_NO_FLAGS;
2639 break;
2640 case MAP_THUMB:
2641 symname = "$t";
2642 type = BSF_NO_FLAGS;
2643 break;
2644 default:
2645 abort ();
2646 }
2647
2648 symbolP = symbol_new (symname, now_seg, value, frag);
2649 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
2650
2651 switch (state)
2652 {
2653 case MAP_ARM:
2654 THUMB_SET_FUNC (symbolP, 0);
2655 ARM_SET_THUMB (symbolP, 0);
2656 ARM_SET_INTERWORK (symbolP, support_interwork);
2657 break;
2658
2659 case MAP_THUMB:
2660 THUMB_SET_FUNC (symbolP, 1);
2661 ARM_SET_THUMB (symbolP, 1);
2662 ARM_SET_INTERWORK (symbolP, support_interwork);
2663 break;
2664
2665 case MAP_DATA:
2666 default:
2667 break;
2668 }
2669
2670 /* Save the mapping symbols for future reference. Also check that
2671 we do not place two mapping symbols at the same offset within a
2672 frag. We'll handle overlap between frags in
2673 check_mapping_symbols.
2674
2675 If .fill or other data filling directive generates zero sized data,
2676 the mapping symbol for the following code will have the same value
2677 as the one generated for the data filling directive. In this case,
2678 we replace the old symbol with the new one at the same address. */
2679 if (value == 0)
2680 {
2681 if (frag->tc_frag_data.first_map != NULL)
2682 {
2683 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
2684 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP, &symbol_lastP);
2685 }
2686 frag->tc_frag_data.first_map = symbolP;
2687 }
2688 if (frag->tc_frag_data.last_map != NULL)
2689 {
2690 know (S_GET_VALUE (frag->tc_frag_data.last_map) <= S_GET_VALUE (symbolP));
2691 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
2692 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP, &symbol_lastP);
2693 }
2694 frag->tc_frag_data.last_map = symbolP;
2695 }
2696
2697 /* We must sometimes convert a region marked as code to data during
2698 code alignment, if an odd number of bytes have to be padded. The
2699 code mapping symbol is pushed to an aligned address. */
2700
2701 static void
2702 insert_data_mapping_symbol (enum mstate state,
2703 valueT value, fragS *frag, offsetT bytes)
2704 {
2705 /* If there was already a mapping symbol, remove it. */
2706 if (frag->tc_frag_data.last_map != NULL
2707 && S_GET_VALUE (frag->tc_frag_data.last_map) == frag->fr_address + value)
2708 {
2709 symbolS *symp = frag->tc_frag_data.last_map;
2710
2711 if (value == 0)
2712 {
2713 know (frag->tc_frag_data.first_map == symp);
2714 frag->tc_frag_data.first_map = NULL;
2715 }
2716 frag->tc_frag_data.last_map = NULL;
2717 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
2718 }
2719
2720 make_mapping_symbol (MAP_DATA, value, frag);
2721 make_mapping_symbol (state, value + bytes, frag);
2722 }
2723
2724 static void mapping_state_2 (enum mstate state, int max_chars);
2725
2726 /* Set the mapping state to STATE. Only call this when about to
2727 emit some STATE bytes to the file. */
2728
2729 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
2730 void
2731 mapping_state (enum mstate state)
2732 {
2733 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2734
2735 if (mapstate == state)
2736 /* The mapping symbol has already been emitted.
2737 There is nothing else to do. */
2738 return;
2739
2740 if (state == MAP_ARM || state == MAP_THUMB)
2741 /* PR gas/12931
2742 All ARM instructions require 4-byte alignment.
2743 (Almost) all Thumb instructions require 2-byte alignment.
2744
2745 When emitting instructions into any section, mark the section
2746 appropriately.
2747
2748 Some Thumb instructions are alignment-sensitive modulo 4 bytes,
2749 but themselves require 2-byte alignment; this applies to some
2750 PC- relative forms. However, these cases will involve implicit
2751 literal pool generation or an explicit .align >=2, both of
2752 which will cause the section to me marked with sufficient
2753 alignment. Thus, we don't handle those cases here. */
2754 record_alignment (now_seg, state == MAP_ARM ? 2 : 1);
2755
2756 if (TRANSITION (MAP_UNDEFINED, MAP_DATA))
2757 /* This case will be evaluated later. */
2758 return;
2759
2760 mapping_state_2 (state, 0);
2761 }
2762
2763 /* Same as mapping_state, but MAX_CHARS bytes have already been
2764 allocated. Put the mapping symbol that far back. */
2765
2766 static void
2767 mapping_state_2 (enum mstate state, int max_chars)
2768 {
2769 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2770
2771 if (!SEG_NORMAL (now_seg))
2772 return;
2773
2774 if (mapstate == state)
2775 /* The mapping symbol has already been emitted.
2776 There is nothing else to do. */
2777 return;
2778
2779 if (TRANSITION (MAP_UNDEFINED, MAP_ARM)
2780 || TRANSITION (MAP_UNDEFINED, MAP_THUMB))
2781 {
2782 struct frag * const frag_first = seg_info (now_seg)->frchainP->frch_root;
2783 const int add_symbol = (frag_now != frag_first) || (frag_now_fix () > 0);
2784
2785 if (add_symbol)
2786 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
2787 }
2788
2789 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
2790 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
2791 }
2792 #undef TRANSITION
2793 #else
2794 #define mapping_state(x) ((void)0)
2795 #define mapping_state_2(x, y) ((void)0)
2796 #endif
2797
2798 /* Find the real, Thumb encoded start of a Thumb function. */
2799
2800 #ifdef OBJ_COFF
2801 static symbolS *
2802 find_real_start (symbolS * symbolP)
2803 {
2804 char * real_start;
2805 const char * name = S_GET_NAME (symbolP);
2806 symbolS * new_target;
2807
2808 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
2809 #define STUB_NAME ".real_start_of"
2810
2811 if (name == NULL)
2812 abort ();
2813
2814 /* The compiler may generate BL instructions to local labels because
2815 it needs to perform a branch to a far away location. These labels
2816 do not have a corresponding ".real_start_of" label. We check
2817 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2818 the ".real_start_of" convention for nonlocal branches. */
2819 if (S_IS_LOCAL (symbolP) || name[0] == '.')
2820 return symbolP;
2821
2822 real_start = concat (STUB_NAME, name, NULL);
2823 new_target = symbol_find (real_start);
2824 free (real_start);
2825
2826 if (new_target == NULL)
2827 {
2828 as_warn (_("Failed to find real start of function: %s\n"), name);
2829 new_target = symbolP;
2830 }
2831
2832 return new_target;
2833 }
2834 #endif
2835
2836 static void
2837 opcode_select (int width)
2838 {
2839 switch (width)
2840 {
2841 case 16:
2842 if (! thumb_mode)
2843 {
2844 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
2845 as_bad (_("selected processor does not support THUMB opcodes"));
2846
2847 thumb_mode = 1;
2848 /* No need to force the alignment, since we will have been
2849 coming from ARM mode, which is word-aligned. */
2850 record_alignment (now_seg, 1);
2851 }
2852 break;
2853
2854 case 32:
2855 if (thumb_mode)
2856 {
2857 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
2858 as_bad (_("selected processor does not support ARM opcodes"));
2859
2860 thumb_mode = 0;
2861
2862 if (!need_pass_2)
2863 frag_align (2, 0, 0);
2864
2865 record_alignment (now_seg, 1);
2866 }
2867 break;
2868
2869 default:
2870 as_bad (_("invalid instruction size selected (%d)"), width);
2871 }
2872 }
2873
2874 static void
2875 s_arm (int ignore ATTRIBUTE_UNUSED)
2876 {
2877 opcode_select (32);
2878 demand_empty_rest_of_line ();
2879 }
2880
2881 static void
2882 s_thumb (int ignore ATTRIBUTE_UNUSED)
2883 {
2884 opcode_select (16);
2885 demand_empty_rest_of_line ();
2886 }
2887
2888 static void
2889 s_code (int unused ATTRIBUTE_UNUSED)
2890 {
2891 int temp;
2892
2893 temp = get_absolute_expression ();
2894 switch (temp)
2895 {
2896 case 16:
2897 case 32:
2898 opcode_select (temp);
2899 break;
2900
2901 default:
2902 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp);
2903 }
2904 }
2905
2906 static void
2907 s_force_thumb (int ignore ATTRIBUTE_UNUSED)
2908 {
2909 /* If we are not already in thumb mode go into it, EVEN if
2910 the target processor does not support thumb instructions.
2911 This is used by gcc/config/arm/lib1funcs.asm for example
2912 to compile interworking support functions even if the
2913 target processor should not support interworking. */
2914 if (! thumb_mode)
2915 {
2916 thumb_mode = 2;
2917 record_alignment (now_seg, 1);
2918 }
2919
2920 demand_empty_rest_of_line ();
2921 }
2922
2923 static void
2924 s_thumb_func (int ignore ATTRIBUTE_UNUSED)
2925 {
2926 s_thumb (0);
2927
2928 /* The following label is the name/address of the start of a Thumb function.
2929 We need to know this for the interworking support. */
2930 label_is_thumb_function_name = TRUE;
2931 }
2932
2933 /* Perform a .set directive, but also mark the alias as
2934 being a thumb function. */
2935
2936 static void
2937 s_thumb_set (int equiv)
2938 {
2939 /* XXX the following is a duplicate of the code for s_set() in read.c
2940 We cannot just call that code as we need to get at the symbol that
2941 is created. */
2942 char * name;
2943 char delim;
2944 char * end_name;
2945 symbolS * symbolP;
2946
2947 /* Especial apologies for the random logic:
2948 This just grew, and could be parsed much more simply!
2949 Dean - in haste. */
2950 delim = get_symbol_name (& name);
2951 end_name = input_line_pointer;
2952 (void) restore_line_pointer (delim);
2953
2954 if (*input_line_pointer != ',')
2955 {
2956 *end_name = 0;
2957 as_bad (_("expected comma after name \"%s\""), name);
2958 *end_name = delim;
2959 ignore_rest_of_line ();
2960 return;
2961 }
2962
2963 input_line_pointer++;
2964 *end_name = 0;
2965
2966 if (name[0] == '.' && name[1] == '\0')
2967 {
2968 /* XXX - this should not happen to .thumb_set. */
2969 abort ();
2970 }
2971
2972 if ((symbolP = symbol_find (name)) == NULL
2973 && (symbolP = md_undefined_symbol (name)) == NULL)
2974 {
2975 #ifndef NO_LISTING
2976 /* When doing symbol listings, play games with dummy fragments living
2977 outside the normal fragment chain to record the file and line info
2978 for this symbol. */
2979 if (listing & LISTING_SYMBOLS)
2980 {
2981 extern struct list_info_struct * listing_tail;
2982 fragS * dummy_frag = (fragS * ) xmalloc (sizeof (fragS));
2983
2984 memset (dummy_frag, 0, sizeof (fragS));
2985 dummy_frag->fr_type = rs_fill;
2986 dummy_frag->line = listing_tail;
2987 symbolP = symbol_new (name, undefined_section, 0, dummy_frag);
2988 dummy_frag->fr_symbol = symbolP;
2989 }
2990 else
2991 #endif
2992 symbolP = symbol_new (name, undefined_section, 0, &zero_address_frag);
2993
2994 #ifdef OBJ_COFF
2995 /* "set" symbols are local unless otherwise specified. */
2996 SF_SET_LOCAL (symbolP);
2997 #endif /* OBJ_COFF */
2998 } /* Make a new symbol. */
2999
3000 symbol_table_insert (symbolP);
3001
3002 * end_name = delim;
3003
3004 if (equiv
3005 && S_IS_DEFINED (symbolP)
3006 && S_GET_SEGMENT (symbolP) != reg_section)
3007 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP));
3008
3009 pseudo_set (symbolP);
3010
3011 demand_empty_rest_of_line ();
3012
3013 /* XXX Now we come to the Thumb specific bit of code. */
3014
3015 THUMB_SET_FUNC (symbolP, 1);
3016 ARM_SET_THUMB (symbolP, 1);
3017 #if defined OBJ_ELF || defined OBJ_COFF
3018 ARM_SET_INTERWORK (symbolP, support_interwork);
3019 #endif
3020 }
3021
3022 /* Directives: Mode selection. */
3023
3024 /* .syntax [unified|divided] - choose the new unified syntax
3025 (same for Arm and Thumb encoding, modulo slight differences in what
3026 can be represented) or the old divergent syntax for each mode. */
3027 static void
3028 s_syntax (int unused ATTRIBUTE_UNUSED)
3029 {
3030 char *name, delim;
3031
3032 delim = get_symbol_name (& name);
3033
3034 if (!strcasecmp (name, "unified"))
3035 unified_syntax = TRUE;
3036 else if (!strcasecmp (name, "divided"))
3037 unified_syntax = FALSE;
3038 else
3039 {
3040 as_bad (_("unrecognized syntax mode \"%s\""), name);
3041 return;
3042 }
3043 (void) restore_line_pointer (delim);
3044 demand_empty_rest_of_line ();
3045 }
3046
3047 /* Directives: sectioning and alignment. */
3048
3049 static void
3050 s_bss (int ignore ATTRIBUTE_UNUSED)
3051 {
3052 /* We don't support putting frags in the BSS segment, we fake it by
3053 marking in_bss, then looking at s_skip for clues. */
3054 subseg_set (bss_section, 0);
3055 demand_empty_rest_of_line ();
3056
3057 #ifdef md_elf_section_change_hook
3058 md_elf_section_change_hook ();
3059 #endif
3060 }
3061
3062 static void
3063 s_even (int ignore ATTRIBUTE_UNUSED)
3064 {
3065 /* Never make frag if expect extra pass. */
3066 if (!need_pass_2)
3067 frag_align (1, 0, 0);
3068
3069 record_alignment (now_seg, 1);
3070
3071 demand_empty_rest_of_line ();
3072 }
3073
3074 /* Directives: CodeComposer Studio. */
3075
3076 /* .ref (for CodeComposer Studio syntax only). */
3077 static void
3078 s_ccs_ref (int unused ATTRIBUTE_UNUSED)
3079 {
3080 if (codecomposer_syntax)
3081 ignore_rest_of_line ();
3082 else
3083 as_bad (_(".ref pseudo-op only available with -mccs flag."));
3084 }
3085
3086 /* If name is not NULL, then it is used for marking the beginning of a
3087 function, whereas if it is NULL then it means the function end. */
3088 static void
3089 asmfunc_debug (const char * name)
3090 {
3091 static const char * last_name = NULL;
3092
3093 if (name != NULL)
3094 {
3095 gas_assert (last_name == NULL);
3096 last_name = name;
3097
3098 if (debug_type == DEBUG_STABS)
3099 stabs_generate_asm_func (name, name);
3100 }
3101 else
3102 {
3103 gas_assert (last_name != NULL);
3104
3105 if (debug_type == DEBUG_STABS)
3106 stabs_generate_asm_endfunc (last_name, last_name);
3107
3108 last_name = NULL;
3109 }
3110 }
3111
3112 static void
3113 s_ccs_asmfunc (int unused ATTRIBUTE_UNUSED)
3114 {
3115 if (codecomposer_syntax)
3116 {
3117 switch (asmfunc_state)
3118 {
3119 case OUTSIDE_ASMFUNC:
3120 asmfunc_state = WAITING_ASMFUNC_NAME;
3121 break;
3122
3123 case WAITING_ASMFUNC_NAME:
3124 as_bad (_(".asmfunc repeated."));
3125 break;
3126
3127 case WAITING_ENDASMFUNC:
3128 as_bad (_(".asmfunc without function."));
3129 break;
3130 }
3131 demand_empty_rest_of_line ();
3132 }
3133 else
3134 as_bad (_(".asmfunc pseudo-op only available with -mccs flag."));
3135 }
3136
3137 static void
3138 s_ccs_endasmfunc (int unused ATTRIBUTE_UNUSED)
3139 {
3140 if (codecomposer_syntax)
3141 {
3142 switch (asmfunc_state)
3143 {
3144 case OUTSIDE_ASMFUNC:
3145 as_bad (_(".endasmfunc without a .asmfunc."));
3146 break;
3147
3148 case WAITING_ASMFUNC_NAME:
3149 as_bad (_(".endasmfunc without function."));
3150 break;
3151
3152 case WAITING_ENDASMFUNC:
3153 asmfunc_state = OUTSIDE_ASMFUNC;
3154 asmfunc_debug (NULL);
3155 break;
3156 }
3157 demand_empty_rest_of_line ();
3158 }
3159 else
3160 as_bad (_(".endasmfunc pseudo-op only available with -mccs flag."));
3161 }
3162
3163 static void
3164 s_ccs_def (int name)
3165 {
3166 if (codecomposer_syntax)
3167 s_globl (name);
3168 else
3169 as_bad (_(".def pseudo-op only available with -mccs flag."));
3170 }
3171
3172 /* Directives: Literal pools. */
3173
3174 static literal_pool *
3175 find_literal_pool (void)
3176 {
3177 literal_pool * pool;
3178
3179 for (pool = list_of_pools; pool != NULL; pool = pool->next)
3180 {
3181 if (pool->section == now_seg
3182 && pool->sub_section == now_subseg)
3183 break;
3184 }
3185
3186 return pool;
3187 }
3188
3189 static literal_pool *
3190 find_or_make_literal_pool (void)
3191 {
3192 /* Next literal pool ID number. */
3193 static unsigned int latest_pool_num = 1;
3194 literal_pool * pool;
3195
3196 pool = find_literal_pool ();
3197
3198 if (pool == NULL)
3199 {
3200 /* Create a new pool. */
3201 pool = XNEW (literal_pool);
3202 if (! pool)
3203 return NULL;
3204
3205 pool->next_free_entry = 0;
3206 pool->section = now_seg;
3207 pool->sub_section = now_subseg;
3208 pool->next = list_of_pools;
3209 pool->symbol = NULL;
3210 pool->alignment = 2;
3211
3212 /* Add it to the list. */
3213 list_of_pools = pool;
3214 }
3215
3216 /* New pools, and emptied pools, will have a NULL symbol. */
3217 if (pool->symbol == NULL)
3218 {
3219 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
3220 (valueT) 0, &zero_address_frag);
3221 pool->id = latest_pool_num ++;
3222 }
3223
3224 /* Done. */
3225 return pool;
3226 }
3227
3228 /* Add the literal in the global 'inst'
3229 structure to the relevant literal pool. */
3230
3231 static int
3232 add_to_lit_pool (unsigned int nbytes)
3233 {
3234 #define PADDING_SLOT 0x1
3235 #define LIT_ENTRY_SIZE_MASK 0xFF
3236 literal_pool * pool;
3237 unsigned int entry, pool_size = 0;
3238 bfd_boolean padding_slot_p = FALSE;
3239 unsigned imm1 = 0;
3240 unsigned imm2 = 0;
3241
3242 if (nbytes == 8)
3243 {
3244 imm1 = inst.operands[1].imm;
3245 imm2 = (inst.operands[1].regisimm ? inst.operands[1].reg
3246 : inst.reloc.exp.X_unsigned ? 0
3247 : ((bfd_int64_t) inst.operands[1].imm) >> 32);
3248 if (target_big_endian)
3249 {
3250 imm1 = imm2;
3251 imm2 = inst.operands[1].imm;
3252 }
3253 }
3254
3255 pool = find_or_make_literal_pool ();
3256
3257 /* Check if this literal value is already in the pool. */
3258 for (entry = 0; entry < pool->next_free_entry; entry ++)
3259 {
3260 if (nbytes == 4)
3261 {
3262 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
3263 && (inst.reloc.exp.X_op == O_constant)
3264 && (pool->literals[entry].X_add_number
3265 == inst.reloc.exp.X_add_number)
3266 && (pool->literals[entry].X_md == nbytes)
3267 && (pool->literals[entry].X_unsigned
3268 == inst.reloc.exp.X_unsigned))
3269 break;
3270
3271 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
3272 && (inst.reloc.exp.X_op == O_symbol)
3273 && (pool->literals[entry].X_add_number
3274 == inst.reloc.exp.X_add_number)
3275 && (pool->literals[entry].X_add_symbol
3276 == inst.reloc.exp.X_add_symbol)
3277 && (pool->literals[entry].X_op_symbol
3278 == inst.reloc.exp.X_op_symbol)
3279 && (pool->literals[entry].X_md == nbytes))
3280 break;
3281 }
3282 else if ((nbytes == 8)
3283 && !(pool_size & 0x7)
3284 && ((entry + 1) != pool->next_free_entry)
3285 && (pool->literals[entry].X_op == O_constant)
3286 && (pool->literals[entry].X_add_number == (offsetT) imm1)
3287 && (pool->literals[entry].X_unsigned
3288 == inst.reloc.exp.X_unsigned)
3289 && (pool->literals[entry + 1].X_op == O_constant)
3290 && (pool->literals[entry + 1].X_add_number == (offsetT) imm2)
3291 && (pool->literals[entry + 1].X_unsigned
3292 == inst.reloc.exp.X_unsigned))
3293 break;
3294
3295 padding_slot_p = ((pool->literals[entry].X_md >> 8) == PADDING_SLOT);
3296 if (padding_slot_p && (nbytes == 4))
3297 break;
3298
3299 pool_size += 4;
3300 }
3301
3302 /* Do we need to create a new entry? */
3303 if (entry == pool->next_free_entry)
3304 {
3305 if (entry >= MAX_LITERAL_POOL_SIZE)
3306 {
3307 inst.error = _("literal pool overflow");
3308 return FAIL;
3309 }
3310
3311 if (nbytes == 8)
3312 {
3313 /* For 8-byte entries, we align to an 8-byte boundary,
3314 and split it into two 4-byte entries, because on 32-bit
3315 host, 8-byte constants are treated as big num, thus
3316 saved in "generic_bignum" which will be overwritten
3317 by later assignments.
3318
3319 We also need to make sure there is enough space for
3320 the split.
3321
3322 We also check to make sure the literal operand is a
3323 constant number. */
3324 if (!(inst.reloc.exp.X_op == O_constant
3325 || inst.reloc.exp.X_op == O_big))
3326 {
3327 inst.error = _("invalid type for literal pool");
3328 return FAIL;
3329 }
3330 else if (pool_size & 0x7)
3331 {
3332 if ((entry + 2) >= MAX_LITERAL_POOL_SIZE)
3333 {
3334 inst.error = _("literal pool overflow");
3335 return FAIL;
3336 }
3337
3338 pool->literals[entry] = inst.reloc.exp;
3339 pool->literals[entry].X_op = O_constant;
3340 pool->literals[entry].X_add_number = 0;
3341 pool->literals[entry++].X_md = (PADDING_SLOT << 8) | 4;
3342 pool->next_free_entry += 1;
3343 pool_size += 4;
3344 }
3345 else if ((entry + 1) >= MAX_LITERAL_POOL_SIZE)
3346 {
3347 inst.error = _("literal pool overflow");
3348 return FAIL;
3349 }
3350
3351 pool->literals[entry] = inst.reloc.exp;
3352 pool->literals[entry].X_op = O_constant;
3353 pool->literals[entry].X_add_number = imm1;
3354 pool->literals[entry].X_unsigned = inst.reloc.exp.X_unsigned;
3355 pool->literals[entry++].X_md = 4;
3356 pool->literals[entry] = inst.reloc.exp;
3357 pool->literals[entry].X_op = O_constant;
3358 pool->literals[entry].X_add_number = imm2;
3359 pool->literals[entry].X_unsigned = inst.reloc.exp.X_unsigned;
3360 pool->literals[entry].X_md = 4;
3361 pool->alignment = 3;
3362 pool->next_free_entry += 1;
3363 }
3364 else
3365 {
3366 pool->literals[entry] = inst.reloc.exp;
3367 pool->literals[entry].X_md = 4;
3368 }
3369
3370 #ifdef OBJ_ELF
3371 /* PR ld/12974: Record the location of the first source line to reference
3372 this entry in the literal pool. If it turns out during linking that the
3373 symbol does not exist we will be able to give an accurate line number for
3374 the (first use of the) missing reference. */
3375 if (debug_type == DEBUG_DWARF2)
3376 dwarf2_where (pool->locs + entry);
3377 #endif
3378 pool->next_free_entry += 1;
3379 }
3380 else if (padding_slot_p)
3381 {
3382 pool->literals[entry] = inst.reloc.exp;
3383 pool->literals[entry].X_md = nbytes;
3384 }
3385
3386 inst.reloc.exp.X_op = O_symbol;
3387 inst.reloc.exp.X_add_number = pool_size;
3388 inst.reloc.exp.X_add_symbol = pool->symbol;
3389
3390 return SUCCESS;
3391 }
3392
3393 bfd_boolean
3394 tc_start_label_without_colon (void)
3395 {
3396 bfd_boolean ret = TRUE;
3397
3398 if (codecomposer_syntax && asmfunc_state == WAITING_ASMFUNC_NAME)
3399 {
3400 const char *label = input_line_pointer;
3401
3402 while (!is_end_of_line[(int) label[-1]])
3403 --label;
3404
3405 if (*label == '.')
3406 {
3407 as_bad (_("Invalid label '%s'"), label);
3408 ret = FALSE;
3409 }
3410
3411 asmfunc_debug (label);
3412
3413 asmfunc_state = WAITING_ENDASMFUNC;
3414 }
3415
3416 return ret;
3417 }
3418
3419 /* Can't use symbol_new here, so have to create a symbol and then at
3420 a later date assign it a value. That's what these functions do. */
3421
3422 static void
3423 symbol_locate (symbolS * symbolP,
3424 const char * name, /* It is copied, the caller can modify. */
3425 segT segment, /* Segment identifier (SEG_<something>). */
3426 valueT valu, /* Symbol value. */
3427 fragS * frag) /* Associated fragment. */
3428 {
3429 size_t name_length;
3430 char * preserved_copy_of_name;
3431
3432 name_length = strlen (name) + 1; /* +1 for \0. */
3433 obstack_grow (&notes, name, name_length);
3434 preserved_copy_of_name = (char *) obstack_finish (&notes);
3435
3436 #ifdef tc_canonicalize_symbol_name
3437 preserved_copy_of_name =
3438 tc_canonicalize_symbol_name (preserved_copy_of_name);
3439 #endif
3440
3441 S_SET_NAME (symbolP, preserved_copy_of_name);
3442
3443 S_SET_SEGMENT (symbolP, segment);
3444 S_SET_VALUE (symbolP, valu);
3445 symbol_clear_list_pointers (symbolP);
3446
3447 symbol_set_frag (symbolP, frag);
3448
3449 /* Link to end of symbol chain. */
3450 {
3451 extern int symbol_table_frozen;
3452
3453 if (symbol_table_frozen)
3454 abort ();
3455 }
3456
3457 symbol_append (symbolP, symbol_lastP, & symbol_rootP, & symbol_lastP);
3458
3459 obj_symbol_new_hook (symbolP);
3460
3461 #ifdef tc_symbol_new_hook
3462 tc_symbol_new_hook (symbolP);
3463 #endif
3464
3465 #ifdef DEBUG_SYMS
3466 verify_symbol_chain (symbol_rootP, symbol_lastP);
3467 #endif /* DEBUG_SYMS */
3468 }
3469
3470 static void
3471 s_ltorg (int ignored ATTRIBUTE_UNUSED)
3472 {
3473 unsigned int entry;
3474 literal_pool * pool;
3475 char sym_name[20];
3476
3477 pool = find_literal_pool ();
3478 if (pool == NULL
3479 || pool->symbol == NULL
3480 || pool->next_free_entry == 0)
3481 return;
3482
3483 /* Align pool as you have word accesses.
3484 Only make a frag if we have to. */
3485 if (!need_pass_2)
3486 frag_align (pool->alignment, 0, 0);
3487
3488 record_alignment (now_seg, 2);
3489
3490 #ifdef OBJ_ELF
3491 seg_info (now_seg)->tc_segment_info_data.mapstate = MAP_DATA;
3492 make_mapping_symbol (MAP_DATA, (valueT) frag_now_fix (), frag_now);
3493 #endif
3494 sprintf (sym_name, "$$lit_\002%x", pool->id);
3495
3496 symbol_locate (pool->symbol, sym_name, now_seg,
3497 (valueT) frag_now_fix (), frag_now);
3498 symbol_table_insert (pool->symbol);
3499
3500 ARM_SET_THUMB (pool->symbol, thumb_mode);
3501
3502 #if defined OBJ_COFF || defined OBJ_ELF
3503 ARM_SET_INTERWORK (pool->symbol, support_interwork);
3504 #endif
3505
3506 for (entry = 0; entry < pool->next_free_entry; entry ++)
3507 {
3508 #ifdef OBJ_ELF
3509 if (debug_type == DEBUG_DWARF2)
3510 dwarf2_gen_line_info (frag_now_fix (), pool->locs + entry);
3511 #endif
3512 /* First output the expression in the instruction to the pool. */
3513 emit_expr (&(pool->literals[entry]),
3514 pool->literals[entry].X_md & LIT_ENTRY_SIZE_MASK);
3515 }
3516
3517 /* Mark the pool as empty. */
3518 pool->next_free_entry = 0;
3519 pool->symbol = NULL;
3520 }
3521
3522 #ifdef OBJ_ELF
3523 /* Forward declarations for functions below, in the MD interface
3524 section. */
3525 static void fix_new_arm (fragS *, int, short, expressionS *, int, int);
3526 static valueT create_unwind_entry (int);
3527 static void start_unwind_section (const segT, int);
3528 static void add_unwind_opcode (valueT, int);
3529 static void flush_pending_unwind (void);
3530
3531 /* Directives: Data. */
3532
3533 static void
3534 s_arm_elf_cons (int nbytes)
3535 {
3536 expressionS exp;
3537
3538 #ifdef md_flush_pending_output
3539 md_flush_pending_output ();
3540 #endif
3541
3542 if (is_it_end_of_statement ())
3543 {
3544 demand_empty_rest_of_line ();
3545 return;
3546 }
3547
3548 #ifdef md_cons_align
3549 md_cons_align (nbytes);
3550 #endif
3551
3552 mapping_state (MAP_DATA);
3553 do
3554 {
3555 int reloc;
3556 char *base = input_line_pointer;
3557
3558 expression (& exp);
3559
3560 if (exp.X_op != O_symbol)
3561 emit_expr (&exp, (unsigned int) nbytes);
3562 else
3563 {
3564 char *before_reloc = input_line_pointer;
3565 reloc = parse_reloc (&input_line_pointer);
3566 if (reloc == -1)
3567 {
3568 as_bad (_("unrecognized relocation suffix"));
3569 ignore_rest_of_line ();
3570 return;
3571 }
3572 else if (reloc == BFD_RELOC_UNUSED)
3573 emit_expr (&exp, (unsigned int) nbytes);
3574 else
3575 {
3576 reloc_howto_type *howto = (reloc_howto_type *)
3577 bfd_reloc_type_lookup (stdoutput,
3578 (bfd_reloc_code_real_type) reloc);
3579 int size = bfd_get_reloc_size (howto);
3580
3581 if (reloc == BFD_RELOC_ARM_PLT32)
3582 {
3583 as_bad (_("(plt) is only valid on branch targets"));
3584 reloc = BFD_RELOC_UNUSED;
3585 size = 0;
3586 }
3587
3588 if (size > nbytes)
3589 as_bad (ngettext ("%s relocations do not fit in %d byte",
3590 "%s relocations do not fit in %d bytes",
3591 nbytes),
3592 howto->name, nbytes);
3593 else
3594 {
3595 /* We've parsed an expression stopping at O_symbol.
3596 But there may be more expression left now that we
3597 have parsed the relocation marker. Parse it again.
3598 XXX Surely there is a cleaner way to do this. */
3599 char *p = input_line_pointer;
3600 int offset;
3601 char *save_buf = XNEWVEC (char, input_line_pointer - base);
3602
3603 memcpy (save_buf, base, input_line_pointer - base);
3604 memmove (base + (input_line_pointer - before_reloc),
3605 base, before_reloc - base);
3606
3607 input_line_pointer = base + (input_line_pointer-before_reloc);
3608 expression (&exp);
3609 memcpy (base, save_buf, p - base);
3610
3611 offset = nbytes - size;
3612 p = frag_more (nbytes);
3613 memset (p, 0, nbytes);
3614 fix_new_exp (frag_now, p - frag_now->fr_literal + offset,
3615 size, &exp, 0, (enum bfd_reloc_code_real) reloc);
3616 free (save_buf);
3617 }
3618 }
3619 }
3620 }
3621 while (*input_line_pointer++ == ',');
3622
3623 /* Put terminator back into stream. */
3624 input_line_pointer --;
3625 demand_empty_rest_of_line ();
3626 }
3627
3628 /* Emit an expression containing a 32-bit thumb instruction.
3629 Implementation based on put_thumb32_insn. */
3630
3631 static void
3632 emit_thumb32_expr (expressionS * exp)
3633 {
3634 expressionS exp_high = *exp;
3635
3636 exp_high.X_add_number = (unsigned long)exp_high.X_add_number >> 16;
3637 emit_expr (& exp_high, (unsigned int) THUMB_SIZE);
3638 exp->X_add_number &= 0xffff;
3639 emit_expr (exp, (unsigned int) THUMB_SIZE);
3640 }
3641
3642 /* Guess the instruction size based on the opcode. */
3643
3644 static int
3645 thumb_insn_size (int opcode)
3646 {
3647 if ((unsigned int) opcode < 0xe800u)
3648 return 2;
3649 else if ((unsigned int) opcode >= 0xe8000000u)
3650 return 4;
3651 else
3652 return 0;
3653 }
3654
3655 static bfd_boolean
3656 emit_insn (expressionS *exp, int nbytes)
3657 {
3658 int size = 0;
3659
3660 if (exp->X_op == O_constant)
3661 {
3662 size = nbytes;
3663
3664 if (size == 0)
3665 size = thumb_insn_size (exp->X_add_number);
3666
3667 if (size != 0)
3668 {
3669 if (size == 2 && (unsigned int)exp->X_add_number > 0xffffu)
3670 {
3671 as_bad (_(".inst.n operand too big. "\
3672 "Use .inst.w instead"));
3673 size = 0;
3674 }
3675 else
3676 {
3677 if (now_it.state == AUTOMATIC_IT_BLOCK)
3678 set_it_insn_type_nonvoid (OUTSIDE_IT_INSN, 0);
3679 else
3680 set_it_insn_type_nonvoid (NEUTRAL_IT_INSN, 0);
3681
3682 if (thumb_mode && (size > THUMB_SIZE) && !target_big_endian)
3683 emit_thumb32_expr (exp);
3684 else
3685 emit_expr (exp, (unsigned int) size);
3686
3687 it_fsm_post_encode ();
3688 }
3689 }
3690 else
3691 as_bad (_("cannot determine Thumb instruction size. " \
3692 "Use .inst.n/.inst.w instead"));
3693 }
3694 else
3695 as_bad (_("constant expression required"));
3696
3697 return (size != 0);
3698 }
3699
3700 /* Like s_arm_elf_cons but do not use md_cons_align and
3701 set the mapping state to MAP_ARM/MAP_THUMB. */
3702
3703 static void
3704 s_arm_elf_inst (int nbytes)
3705 {
3706 if (is_it_end_of_statement ())
3707 {
3708 demand_empty_rest_of_line ();
3709 return;
3710 }
3711
3712 /* Calling mapping_state () here will not change ARM/THUMB,
3713 but will ensure not to be in DATA state. */
3714
3715 if (thumb_mode)
3716 mapping_state (MAP_THUMB);
3717 else
3718 {
3719 if (nbytes != 0)
3720 {
3721 as_bad (_("width suffixes are invalid in ARM mode"));
3722 ignore_rest_of_line ();
3723 return;
3724 }
3725
3726 nbytes = 4;
3727
3728 mapping_state (MAP_ARM);
3729 }
3730
3731 do
3732 {
3733 expressionS exp;
3734
3735 expression (& exp);
3736
3737 if (! emit_insn (& exp, nbytes))
3738 {
3739 ignore_rest_of_line ();
3740 return;
3741 }
3742 }
3743 while (*input_line_pointer++ == ',');
3744
3745 /* Put terminator back into stream. */
3746 input_line_pointer --;
3747 demand_empty_rest_of_line ();
3748 }
3749
3750 /* Parse a .rel31 directive. */
3751
3752 static void
3753 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED)
3754 {
3755 expressionS exp;
3756 char *p;
3757 valueT highbit;
3758
3759 highbit = 0;
3760 if (*input_line_pointer == '1')
3761 highbit = 0x80000000;
3762 else if (*input_line_pointer != '0')
3763 as_bad (_("expected 0 or 1"));
3764
3765 input_line_pointer++;
3766 if (*input_line_pointer != ',')
3767 as_bad (_("missing comma"));
3768 input_line_pointer++;
3769
3770 #ifdef md_flush_pending_output
3771 md_flush_pending_output ();
3772 #endif
3773
3774 #ifdef md_cons_align
3775 md_cons_align (4);
3776 #endif
3777
3778 mapping_state (MAP_DATA);
3779
3780 expression (&exp);
3781
3782 p = frag_more (4);
3783 md_number_to_chars (p, highbit, 4);
3784 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 1,
3785 BFD_RELOC_ARM_PREL31);
3786
3787 demand_empty_rest_of_line ();
3788 }
3789
3790 /* Directives: AEABI stack-unwind tables. */
3791
3792 /* Parse an unwind_fnstart directive. Simply records the current location. */
3793
3794 static void
3795 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED)
3796 {
3797 demand_empty_rest_of_line ();
3798 if (unwind.proc_start)
3799 {
3800 as_bad (_("duplicate .fnstart directive"));
3801 return;
3802 }
3803
3804 /* Mark the start of the function. */
3805 unwind.proc_start = expr_build_dot ();
3806
3807 /* Reset the rest of the unwind info. */
3808 unwind.opcode_count = 0;
3809 unwind.table_entry = NULL;
3810 unwind.personality_routine = NULL;
3811 unwind.personality_index = -1;
3812 unwind.frame_size = 0;
3813 unwind.fp_offset = 0;
3814 unwind.fp_reg = REG_SP;
3815 unwind.fp_used = 0;
3816 unwind.sp_restored = 0;
3817 }
3818
3819
3820 /* Parse a handlerdata directive. Creates the exception handling table entry
3821 for the function. */
3822
3823 static void
3824 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED)
3825 {
3826 demand_empty_rest_of_line ();
3827 if (!unwind.proc_start)
3828 as_bad (MISSING_FNSTART);
3829
3830 if (unwind.table_entry)
3831 as_bad (_("duplicate .handlerdata directive"));
3832
3833 create_unwind_entry (1);
3834 }
3835
3836 /* Parse an unwind_fnend directive. Generates the index table entry. */
3837
3838 static void
3839 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED)
3840 {
3841 long where;
3842 char *ptr;
3843 valueT val;
3844 unsigned int marked_pr_dependency;
3845
3846 demand_empty_rest_of_line ();
3847
3848 if (!unwind.proc_start)
3849 {
3850 as_bad (_(".fnend directive without .fnstart"));
3851 return;
3852 }
3853
3854 /* Add eh table entry. */
3855 if (unwind.table_entry == NULL)
3856 val = create_unwind_entry (0);
3857 else
3858 val = 0;
3859
3860 /* Add index table entry. This is two words. */
3861 start_unwind_section (unwind.saved_seg, 1);
3862 frag_align (2, 0, 0);
3863 record_alignment (now_seg, 2);
3864
3865 ptr = frag_more (8);
3866 memset (ptr, 0, 8);
3867 where = frag_now_fix () - 8;
3868
3869 /* Self relative offset of the function start. */
3870 fix_new (frag_now, where, 4, unwind.proc_start, 0, 1,
3871 BFD_RELOC_ARM_PREL31);
3872
3873 /* Indicate dependency on EHABI-defined personality routines to the
3874 linker, if it hasn't been done already. */
3875 marked_pr_dependency
3876 = seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency;
3877 if (unwind.personality_index >= 0 && unwind.personality_index < 3
3878 && !(marked_pr_dependency & (1 << unwind.personality_index)))
3879 {
3880 static const char *const name[] =
3881 {
3882 "__aeabi_unwind_cpp_pr0",
3883 "__aeabi_unwind_cpp_pr1",
3884 "__aeabi_unwind_cpp_pr2"
3885 };
3886 symbolS *pr = symbol_find_or_make (name[unwind.personality_index]);
3887 fix_new (frag_now, where, 0, pr, 0, 1, BFD_RELOC_NONE);
3888 seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency
3889 |= 1 << unwind.personality_index;
3890 }
3891
3892 if (val)
3893 /* Inline exception table entry. */
3894 md_number_to_chars (ptr + 4, val, 4);
3895 else
3896 /* Self relative offset of the table entry. */
3897 fix_new (frag_now, where + 4, 4, unwind.table_entry, 0, 1,
3898 BFD_RELOC_ARM_PREL31);
3899
3900 /* Restore the original section. */
3901 subseg_set (unwind.saved_seg, unwind.saved_subseg);
3902
3903 unwind.proc_start = NULL;
3904 }
3905
3906
3907 /* Parse an unwind_cantunwind directive. */
3908
3909 static void
3910 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED)
3911 {
3912 demand_empty_rest_of_line ();
3913 if (!unwind.proc_start)
3914 as_bad (MISSING_FNSTART);
3915
3916 if (unwind.personality_routine || unwind.personality_index != -1)
3917 as_bad (_("personality routine specified for cantunwind frame"));
3918
3919 unwind.personality_index = -2;
3920 }
3921
3922
3923 /* Parse a personalityindex directive. */
3924
3925 static void
3926 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED)
3927 {
3928 expressionS exp;
3929
3930 if (!unwind.proc_start)
3931 as_bad (MISSING_FNSTART);
3932
3933 if (unwind.personality_routine || unwind.personality_index != -1)
3934 as_bad (_("duplicate .personalityindex directive"));
3935
3936 expression (&exp);
3937
3938 if (exp.X_op != O_constant
3939 || exp.X_add_number < 0 || exp.X_add_number > 15)
3940 {
3941 as_bad (_("bad personality routine number"));
3942 ignore_rest_of_line ();
3943 return;
3944 }
3945
3946 unwind.personality_index = exp.X_add_number;
3947
3948 demand_empty_rest_of_line ();
3949 }
3950
3951
3952 /* Parse a personality directive. */
3953
3954 static void
3955 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED)
3956 {
3957 char *name, *p, c;
3958
3959 if (!unwind.proc_start)
3960 as_bad (MISSING_FNSTART);
3961
3962 if (unwind.personality_routine || unwind.personality_index != -1)
3963 as_bad (_("duplicate .personality directive"));
3964
3965 c = get_symbol_name (& name);
3966 p = input_line_pointer;
3967 if (c == '"')
3968 ++ input_line_pointer;
3969 unwind.personality_routine = symbol_find_or_make (name);
3970 *p = c;
3971 demand_empty_rest_of_line ();
3972 }
3973
3974
3975 /* Parse a directive saving core registers. */
3976
3977 static void
3978 s_arm_unwind_save_core (void)
3979 {
3980 valueT op;
3981 long range;
3982 int n;
3983
3984 range = parse_reg_list (&input_line_pointer);
3985 if (range == FAIL)
3986 {
3987 as_bad (_("expected register list"));
3988 ignore_rest_of_line ();
3989 return;
3990 }
3991
3992 demand_empty_rest_of_line ();
3993
3994 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
3995 into .unwind_save {..., sp...}. We aren't bothered about the value of
3996 ip because it is clobbered by calls. */
3997 if (unwind.sp_restored && unwind.fp_reg == 12
3998 && (range & 0x3000) == 0x1000)
3999 {
4000 unwind.opcode_count--;
4001 unwind.sp_restored = 0;
4002 range = (range | 0x2000) & ~0x1000;
4003 unwind.pending_offset = 0;
4004 }
4005
4006 /* Pop r4-r15. */
4007 if (range & 0xfff0)
4008 {
4009 /* See if we can use the short opcodes. These pop a block of up to 8
4010 registers starting with r4, plus maybe r14. */
4011 for (n = 0; n < 8; n++)
4012 {
4013 /* Break at the first non-saved register. */
4014 if ((range & (1 << (n + 4))) == 0)
4015 break;
4016 }
4017 /* See if there are any other bits set. */
4018 if (n == 0 || (range & (0xfff0 << n) & 0xbff0) != 0)
4019 {
4020 /* Use the long form. */
4021 op = 0x8000 | ((range >> 4) & 0xfff);
4022 add_unwind_opcode (op, 2);
4023 }
4024 else
4025 {
4026 /* Use the short form. */
4027 if (range & 0x4000)
4028 op = 0xa8; /* Pop r14. */
4029 else
4030 op = 0xa0; /* Do not pop r14. */
4031 op |= (n - 1);
4032 add_unwind_opcode (op, 1);
4033 }
4034 }
4035
4036 /* Pop r0-r3. */
4037 if (range & 0xf)
4038 {
4039 op = 0xb100 | (range & 0xf);
4040 add_unwind_opcode (op, 2);
4041 }
4042
4043 /* Record the number of bytes pushed. */
4044 for (n = 0; n < 16; n++)
4045 {
4046 if (range & (1 << n))
4047 unwind.frame_size += 4;
4048 }
4049 }
4050
4051
4052 /* Parse a directive saving FPA registers. */
4053
4054 static void
4055 s_arm_unwind_save_fpa (int reg)
4056 {
4057 expressionS exp;
4058 int num_regs;
4059 valueT op;
4060
4061 /* Get Number of registers to transfer. */
4062 if (skip_past_comma (&input_line_pointer) != FAIL)
4063 expression (&exp);
4064 else
4065 exp.X_op = O_illegal;
4066
4067 if (exp.X_op != O_constant)
4068 {
4069 as_bad (_("expected , <constant>"));
4070 ignore_rest_of_line ();
4071 return;
4072 }
4073
4074 num_regs = exp.X_add_number;
4075
4076 if (num_regs < 1 || num_regs > 4)
4077 {
4078 as_bad (_("number of registers must be in the range [1:4]"));
4079 ignore_rest_of_line ();
4080 return;
4081 }
4082
4083 demand_empty_rest_of_line ();
4084
4085 if (reg == 4)
4086 {
4087 /* Short form. */
4088 op = 0xb4 | (num_regs - 1);
4089 add_unwind_opcode (op, 1);
4090 }
4091 else
4092 {
4093 /* Long form. */
4094 op = 0xc800 | (reg << 4) | (num_regs - 1);
4095 add_unwind_opcode (op, 2);
4096 }
4097 unwind.frame_size += num_regs * 12;
4098 }
4099
4100
4101 /* Parse a directive saving VFP registers for ARMv6 and above. */
4102
4103 static void
4104 s_arm_unwind_save_vfp_armv6 (void)
4105 {
4106 int count;
4107 unsigned int start;
4108 valueT op;
4109 int num_vfpv3_regs = 0;
4110 int num_regs_below_16;
4111
4112 count = parse_vfp_reg_list (&input_line_pointer, &start, REGLIST_VFP_D);
4113 if (count == FAIL)
4114 {
4115 as_bad (_("expected register list"));
4116 ignore_rest_of_line ();
4117 return;
4118 }
4119
4120 demand_empty_rest_of_line ();
4121
4122 /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather
4123 than FSTMX/FLDMX-style ones). */
4124
4125 /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31. */
4126 if (start >= 16)
4127 num_vfpv3_regs = count;
4128 else if (start + count > 16)
4129 num_vfpv3_regs = start + count - 16;
4130
4131 if (num_vfpv3_regs > 0)
4132 {
4133 int start_offset = start > 16 ? start - 16 : 0;
4134 op = 0xc800 | (start_offset << 4) | (num_vfpv3_regs - 1);
4135 add_unwind_opcode (op, 2);
4136 }
4137
4138 /* Generate opcode for registers numbered in the range 0 .. 15. */
4139 num_regs_below_16 = num_vfpv3_regs > 0 ? 16 - (int) start : count;
4140 gas_assert (num_regs_below_16 + num_vfpv3_regs == count);
4141 if (num_regs_below_16 > 0)
4142 {
4143 op = 0xc900 | (start << 4) | (num_regs_below_16 - 1);
4144 add_unwind_opcode (op, 2);
4145 }
4146
4147 unwind.frame_size += count * 8;
4148 }
4149
4150
4151 /* Parse a directive saving VFP registers for pre-ARMv6. */
4152
4153 static void
4154 s_arm_unwind_save_vfp (void)
4155 {
4156 int count;
4157 unsigned int reg;
4158 valueT op;
4159
4160 count = parse_vfp_reg_list (&input_line_pointer, &reg, REGLIST_VFP_D);
4161 if (count == FAIL)
4162 {
4163 as_bad (_("expected register list"));
4164 ignore_rest_of_line ();
4165 return;
4166 }
4167
4168 demand_empty_rest_of_line ();
4169
4170 if (reg == 8)
4171 {
4172 /* Short form. */
4173 op = 0xb8 | (count - 1);
4174 add_unwind_opcode (op, 1);
4175 }
4176 else
4177 {
4178 /* Long form. */
4179 op = 0xb300 | (reg << 4) | (count - 1);
4180 add_unwind_opcode (op, 2);
4181 }
4182 unwind.frame_size += count * 8 + 4;
4183 }
4184
4185
4186 /* Parse a directive saving iWMMXt data registers. */
4187
4188 static void
4189 s_arm_unwind_save_mmxwr (void)
4190 {
4191 int reg;
4192 int hi_reg;
4193 int i;
4194 unsigned mask = 0;
4195 valueT op;
4196
4197 if (*input_line_pointer == '{')
4198 input_line_pointer++;
4199
4200 do
4201 {
4202 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
4203
4204 if (reg == FAIL)
4205 {
4206 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
4207 goto error;
4208 }
4209
4210 if (mask >> reg)
4211 as_tsktsk (_("register list not in ascending order"));
4212 mask |= 1 << reg;
4213
4214 if (*input_line_pointer == '-')
4215 {
4216 input_line_pointer++;
4217 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
4218 if (hi_reg == FAIL)
4219 {
4220 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
4221 goto error;
4222 }
4223 else if (reg >= hi_reg)
4224 {
4225 as_bad (_("bad register range"));
4226 goto error;
4227 }
4228 for (; reg < hi_reg; reg++)
4229 mask |= 1 << reg;
4230 }
4231 }
4232 while (skip_past_comma (&input_line_pointer) != FAIL);
4233
4234 skip_past_char (&input_line_pointer, '}');
4235
4236 demand_empty_rest_of_line ();
4237
4238 /* Generate any deferred opcodes because we're going to be looking at
4239 the list. */
4240 flush_pending_unwind ();
4241
4242 for (i = 0; i < 16; i++)
4243 {
4244 if (mask & (1 << i))
4245 unwind.frame_size += 8;
4246 }
4247
4248 /* Attempt to combine with a previous opcode. We do this because gcc
4249 likes to output separate unwind directives for a single block of
4250 registers. */
4251 if (unwind.opcode_count > 0)
4252 {
4253 i = unwind.opcodes[unwind.opcode_count - 1];
4254 if ((i & 0xf8) == 0xc0)
4255 {
4256 i &= 7;
4257 /* Only merge if the blocks are contiguous. */
4258 if (i < 6)
4259 {
4260 if ((mask & 0xfe00) == (1 << 9))
4261 {
4262 mask |= ((1 << (i + 11)) - 1) & 0xfc00;
4263 unwind.opcode_count--;
4264 }
4265 }
4266 else if (i == 6 && unwind.opcode_count >= 2)
4267 {
4268 i = unwind.opcodes[unwind.opcode_count - 2];
4269 reg = i >> 4;
4270 i &= 0xf;
4271
4272 op = 0xffff << (reg - 1);
4273 if (reg > 0
4274 && ((mask & op) == (1u << (reg - 1))))
4275 {
4276 op = (1 << (reg + i + 1)) - 1;
4277 op &= ~((1 << reg) - 1);
4278 mask |= op;
4279 unwind.opcode_count -= 2;
4280 }
4281 }
4282 }
4283 }
4284
4285 hi_reg = 15;
4286 /* We want to generate opcodes in the order the registers have been
4287 saved, ie. descending order. */
4288 for (reg = 15; reg >= -1; reg--)
4289 {
4290 /* Save registers in blocks. */
4291 if (reg < 0
4292 || !(mask & (1 << reg)))
4293 {
4294 /* We found an unsaved reg. Generate opcodes to save the
4295 preceding block. */
4296 if (reg != hi_reg)
4297 {
4298 if (reg == 9)
4299 {
4300 /* Short form. */
4301 op = 0xc0 | (hi_reg - 10);
4302 add_unwind_opcode (op, 1);
4303 }
4304 else
4305 {
4306 /* Long form. */
4307 op = 0xc600 | ((reg + 1) << 4) | ((hi_reg - reg) - 1);
4308 add_unwind_opcode (op, 2);
4309 }
4310 }
4311 hi_reg = reg - 1;
4312 }
4313 }
4314
4315 return;
4316 error:
4317 ignore_rest_of_line ();
4318 }
4319
4320 static void
4321 s_arm_unwind_save_mmxwcg (void)
4322 {
4323 int reg;
4324 int hi_reg;
4325 unsigned mask = 0;
4326 valueT op;
4327
4328 if (*input_line_pointer == '{')
4329 input_line_pointer++;
4330
4331 skip_whitespace (input_line_pointer);
4332
4333 do
4334 {
4335 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
4336
4337 if (reg == FAIL)
4338 {
4339 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
4340 goto error;
4341 }
4342
4343 reg -= 8;
4344 if (mask >> reg)
4345 as_tsktsk (_("register list not in ascending order"));
4346 mask |= 1 << reg;
4347
4348 if (*input_line_pointer == '-')
4349 {
4350 input_line_pointer++;
4351 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
4352 if (hi_reg == FAIL)
4353 {
4354 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
4355 goto error;
4356 }
4357 else if (reg >= hi_reg)
4358 {
4359 as_bad (_("bad register range"));
4360 goto error;
4361 }
4362 for (; reg < hi_reg; reg++)
4363 mask |= 1 << reg;
4364 }
4365 }
4366 while (skip_past_comma (&input_line_pointer) != FAIL);
4367
4368 skip_past_char (&input_line_pointer, '}');
4369
4370 demand_empty_rest_of_line ();
4371
4372 /* Generate any deferred opcodes because we're going to be looking at
4373 the list. */
4374 flush_pending_unwind ();
4375
4376 for (reg = 0; reg < 16; reg++)
4377 {
4378 if (mask & (1 << reg))
4379 unwind.frame_size += 4;
4380 }
4381 op = 0xc700 | mask;
4382 add_unwind_opcode (op, 2);
4383 return;
4384 error:
4385 ignore_rest_of_line ();
4386 }
4387
4388
4389 /* Parse an unwind_save directive.
4390 If the argument is non-zero, this is a .vsave directive. */
4391
4392 static void
4393 s_arm_unwind_save (int arch_v6)
4394 {
4395 char *peek;
4396 struct reg_entry *reg;
4397 bfd_boolean had_brace = FALSE;
4398
4399 if (!unwind.proc_start)
4400 as_bad (MISSING_FNSTART);
4401
4402 /* Figure out what sort of save we have. */
4403 peek = input_line_pointer;
4404
4405 if (*peek == '{')
4406 {
4407 had_brace = TRUE;
4408 peek++;
4409 }
4410
4411 reg = arm_reg_parse_multi (&peek);
4412
4413 if (!reg)
4414 {
4415 as_bad (_("register expected"));
4416 ignore_rest_of_line ();
4417 return;
4418 }
4419
4420 switch (reg->type)
4421 {
4422 case REG_TYPE_FN:
4423 if (had_brace)
4424 {
4425 as_bad (_("FPA .unwind_save does not take a register list"));
4426 ignore_rest_of_line ();
4427 return;
4428 }
4429 input_line_pointer = peek;
4430 s_arm_unwind_save_fpa (reg->number);
4431 return;
4432
4433 case REG_TYPE_RN:
4434 s_arm_unwind_save_core ();
4435 return;
4436
4437 case REG_TYPE_VFD:
4438 if (arch_v6)
4439 s_arm_unwind_save_vfp_armv6 ();
4440 else
4441 s_arm_unwind_save_vfp ();
4442 return;
4443
4444 case REG_TYPE_MMXWR:
4445 s_arm_unwind_save_mmxwr ();
4446 return;
4447
4448 case REG_TYPE_MMXWCG:
4449 s_arm_unwind_save_mmxwcg ();
4450 return;
4451
4452 default:
4453 as_bad (_(".unwind_save does not support this kind of register"));
4454 ignore_rest_of_line ();
4455 }
4456 }
4457
4458
4459 /* Parse an unwind_movsp directive. */
4460
4461 static void
4462 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED)
4463 {
4464 int reg;
4465 valueT op;
4466 int offset;
4467
4468 if (!unwind.proc_start)
4469 as_bad (MISSING_FNSTART);
4470
4471 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4472 if (reg == FAIL)
4473 {
4474 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_RN]));
4475 ignore_rest_of_line ();
4476 return;
4477 }
4478
4479 /* Optional constant. */
4480 if (skip_past_comma (&input_line_pointer) != FAIL)
4481 {
4482 if (immediate_for_directive (&offset) == FAIL)
4483 return;
4484 }
4485 else
4486 offset = 0;
4487
4488 demand_empty_rest_of_line ();
4489
4490 if (reg == REG_SP || reg == REG_PC)
4491 {
4492 as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
4493 return;
4494 }
4495
4496 if (unwind.fp_reg != REG_SP)
4497 as_bad (_("unexpected .unwind_movsp directive"));
4498
4499 /* Generate opcode to restore the value. */
4500 op = 0x90 | reg;
4501 add_unwind_opcode (op, 1);
4502
4503 /* Record the information for later. */
4504 unwind.fp_reg = reg;
4505 unwind.fp_offset = unwind.frame_size - offset;
4506 unwind.sp_restored = 1;
4507 }
4508
4509 /* Parse an unwind_pad directive. */
4510
4511 static void
4512 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED)
4513 {
4514 int offset;
4515
4516 if (!unwind.proc_start)
4517 as_bad (MISSING_FNSTART);
4518
4519 if (immediate_for_directive (&offset) == FAIL)
4520 return;
4521
4522 if (offset & 3)
4523 {
4524 as_bad (_("stack increment must be multiple of 4"));
4525 ignore_rest_of_line ();
4526 return;
4527 }
4528
4529 /* Don't generate any opcodes, just record the details for later. */
4530 unwind.frame_size += offset;
4531 unwind.pending_offset += offset;
4532
4533 demand_empty_rest_of_line ();
4534 }
4535
4536 /* Parse an unwind_setfp directive. */
4537
4538 static void
4539 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED)
4540 {
4541 int sp_reg;
4542 int fp_reg;
4543 int offset;
4544
4545 if (!unwind.proc_start)
4546 as_bad (MISSING_FNSTART);
4547
4548 fp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4549 if (skip_past_comma (&input_line_pointer) == FAIL)
4550 sp_reg = FAIL;
4551 else
4552 sp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4553
4554 if (fp_reg == FAIL || sp_reg == FAIL)
4555 {
4556 as_bad (_("expected <reg>, <reg>"));
4557 ignore_rest_of_line ();
4558 return;
4559 }
4560
4561 /* Optional constant. */
4562 if (skip_past_comma (&input_line_pointer) != FAIL)
4563 {
4564 if (immediate_for_directive (&offset) == FAIL)
4565 return;
4566 }
4567 else
4568 offset = 0;
4569
4570 demand_empty_rest_of_line ();
4571
4572 if (sp_reg != REG_SP && sp_reg != unwind.fp_reg)
4573 {
4574 as_bad (_("register must be either sp or set by a previous"
4575 "unwind_movsp directive"));
4576 return;
4577 }
4578
4579 /* Don't generate any opcodes, just record the information for later. */
4580 unwind.fp_reg = fp_reg;
4581 unwind.fp_used = 1;
4582 if (sp_reg == REG_SP)
4583 unwind.fp_offset = unwind.frame_size - offset;
4584 else
4585 unwind.fp_offset -= offset;
4586 }
4587
4588 /* Parse an unwind_raw directive. */
4589
4590 static void
4591 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED)
4592 {
4593 expressionS exp;
4594 /* This is an arbitrary limit. */
4595 unsigned char op[16];
4596 int count;
4597
4598 if (!unwind.proc_start)
4599 as_bad (MISSING_FNSTART);
4600
4601 expression (&exp);
4602 if (exp.X_op == O_constant
4603 && skip_past_comma (&input_line_pointer) != FAIL)
4604 {
4605 unwind.frame_size += exp.X_add_number;
4606 expression (&exp);
4607 }
4608 else
4609 exp.X_op = O_illegal;
4610
4611 if (exp.X_op != O_constant)
4612 {
4613 as_bad (_("expected <offset>, <opcode>"));
4614 ignore_rest_of_line ();
4615 return;
4616 }
4617
4618 count = 0;
4619
4620 /* Parse the opcode. */
4621 for (;;)
4622 {
4623 if (count >= 16)
4624 {
4625 as_bad (_("unwind opcode too long"));
4626 ignore_rest_of_line ();
4627 }
4628 if (exp.X_op != O_constant || exp.X_add_number & ~0xff)
4629 {
4630 as_bad (_("invalid unwind opcode"));
4631 ignore_rest_of_line ();
4632 return;
4633 }
4634 op[count++] = exp.X_add_number;
4635
4636 /* Parse the next byte. */
4637 if (skip_past_comma (&input_line_pointer) == FAIL)
4638 break;
4639
4640 expression (&exp);
4641 }
4642
4643 /* Add the opcode bytes in reverse order. */
4644 while (count--)
4645 add_unwind_opcode (op[count], 1);
4646
4647 demand_empty_rest_of_line ();
4648 }
4649
4650
4651 /* Parse a .eabi_attribute directive. */
4652
4653 static void
4654 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED)
4655 {
4656 int tag = obj_elf_vendor_attribute (OBJ_ATTR_PROC);
4657
4658 if (tag < NUM_KNOWN_OBJ_ATTRIBUTES)
4659 attributes_set_explicitly[tag] = 1;
4660 }
4661
4662 /* Emit a tls fix for the symbol. */
4663
4664 static void
4665 s_arm_tls_descseq (int ignored ATTRIBUTE_UNUSED)
4666 {
4667 char *p;
4668 expressionS exp;
4669 #ifdef md_flush_pending_output
4670 md_flush_pending_output ();
4671 #endif
4672
4673 #ifdef md_cons_align
4674 md_cons_align (4);
4675 #endif
4676
4677 /* Since we're just labelling the code, there's no need to define a
4678 mapping symbol. */
4679 expression (&exp);
4680 p = obstack_next_free (&frchain_now->frch_obstack);
4681 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 0,
4682 thumb_mode ? BFD_RELOC_ARM_THM_TLS_DESCSEQ
4683 : BFD_RELOC_ARM_TLS_DESCSEQ);
4684 }
4685 #endif /* OBJ_ELF */
4686
4687 static void s_arm_arch (int);
4688 static void s_arm_object_arch (int);
4689 static void s_arm_cpu (int);
4690 static void s_arm_fpu (int);
4691 static void s_arm_arch_extension (int);
4692
4693 #ifdef TE_PE
4694
4695 static void
4696 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
4697 {
4698 expressionS exp;
4699
4700 do
4701 {
4702 expression (&exp);
4703 if (exp.X_op == O_symbol)
4704 exp.X_op = O_secrel;
4705
4706 emit_expr (&exp, 4);
4707 }
4708 while (*input_line_pointer++ == ',');
4709
4710 input_line_pointer--;
4711 demand_empty_rest_of_line ();
4712 }
4713 #endif /* TE_PE */
4714
4715 /* This table describes all the machine specific pseudo-ops the assembler
4716 has to support. The fields are:
4717 pseudo-op name without dot
4718 function to call to execute this pseudo-op
4719 Integer arg to pass to the function. */
4720
4721 const pseudo_typeS md_pseudo_table[] =
4722 {
4723 /* Never called because '.req' does not start a line. */
4724 { "req", s_req, 0 },
4725 /* Following two are likewise never called. */
4726 { "dn", s_dn, 0 },
4727 { "qn", s_qn, 0 },
4728 { "unreq", s_unreq, 0 },
4729 { "bss", s_bss, 0 },
4730 { "align", s_align_ptwo, 2 },
4731 { "arm", s_arm, 0 },
4732 { "thumb", s_thumb, 0 },
4733 { "code", s_code, 0 },
4734 { "force_thumb", s_force_thumb, 0 },
4735 { "thumb_func", s_thumb_func, 0 },
4736 { "thumb_set", s_thumb_set, 0 },
4737 { "even", s_even, 0 },
4738 { "ltorg", s_ltorg, 0 },
4739 { "pool", s_ltorg, 0 },
4740 { "syntax", s_syntax, 0 },
4741 { "cpu", s_arm_cpu, 0 },
4742 { "arch", s_arm_arch, 0 },
4743 { "object_arch", s_arm_object_arch, 0 },
4744 { "fpu", s_arm_fpu, 0 },
4745 { "arch_extension", s_arm_arch_extension, 0 },
4746 #ifdef OBJ_ELF
4747 { "word", s_arm_elf_cons, 4 },
4748 { "long", s_arm_elf_cons, 4 },
4749 { "inst.n", s_arm_elf_inst, 2 },
4750 { "inst.w", s_arm_elf_inst, 4 },
4751 { "inst", s_arm_elf_inst, 0 },
4752 { "rel31", s_arm_rel31, 0 },
4753 { "fnstart", s_arm_unwind_fnstart, 0 },
4754 { "fnend", s_arm_unwind_fnend, 0 },
4755 { "cantunwind", s_arm_unwind_cantunwind, 0 },
4756 { "personality", s_arm_unwind_personality, 0 },
4757 { "personalityindex", s_arm_unwind_personalityindex, 0 },
4758 { "handlerdata", s_arm_unwind_handlerdata, 0 },
4759 { "save", s_arm_unwind_save, 0 },
4760 { "vsave", s_arm_unwind_save, 1 },
4761 { "movsp", s_arm_unwind_movsp, 0 },
4762 { "pad", s_arm_unwind_pad, 0 },
4763 { "setfp", s_arm_unwind_setfp, 0 },
4764 { "unwind_raw", s_arm_unwind_raw, 0 },
4765 { "eabi_attribute", s_arm_eabi_attribute, 0 },
4766 { "tlsdescseq", s_arm_tls_descseq, 0 },
4767 #else
4768 { "word", cons, 4},
4769
4770 /* These are used for dwarf. */
4771 {"2byte", cons, 2},
4772 {"4byte", cons, 4},
4773 {"8byte", cons, 8},
4774 /* These are used for dwarf2. */
4775 { "file", dwarf2_directive_file, 0 },
4776 { "loc", dwarf2_directive_loc, 0 },
4777 { "loc_mark_labels", dwarf2_directive_loc_mark_labels, 0 },
4778 #endif
4779 { "extend", float_cons, 'x' },
4780 { "ldouble", float_cons, 'x' },
4781 { "packed", float_cons, 'p' },
4782 #ifdef TE_PE
4783 {"secrel32", pe_directive_secrel, 0},
4784 #endif
4785
4786 /* These are for compatibility with CodeComposer Studio. */
4787 {"ref", s_ccs_ref, 0},
4788 {"def", s_ccs_def, 0},
4789 {"asmfunc", s_ccs_asmfunc, 0},
4790 {"endasmfunc", s_ccs_endasmfunc, 0},
4791
4792 { 0, 0, 0 }
4793 };
4794 \f
4795 /* Parser functions used exclusively in instruction operands. */
4796
4797 /* Generic immediate-value read function for use in insn parsing.
4798 STR points to the beginning of the immediate (the leading #);
4799 VAL receives the value; if the value is outside [MIN, MAX]
4800 issue an error. PREFIX_OPT is true if the immediate prefix is
4801 optional. */
4802
4803 static int
4804 parse_immediate (char **str, int *val, int min, int max,
4805 bfd_boolean prefix_opt)
4806 {
4807 expressionS exp;
4808
4809 my_get_expression (&exp, str, prefix_opt ? GE_OPT_PREFIX : GE_IMM_PREFIX);
4810 if (exp.X_op != O_constant)
4811 {
4812 inst.error = _("constant expression required");
4813 return FAIL;
4814 }
4815
4816 if (exp.X_add_number < min || exp.X_add_number > max)
4817 {
4818 inst.error = _("immediate value out of range");
4819 return FAIL;
4820 }
4821
4822 *val = exp.X_add_number;
4823 return SUCCESS;
4824 }
4825
4826 /* Less-generic immediate-value read function with the possibility of loading a
4827 big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate
4828 instructions. Puts the result directly in inst.operands[i]. */
4829
4830 static int
4831 parse_big_immediate (char **str, int i, expressionS *in_exp,
4832 bfd_boolean allow_symbol_p)
4833 {
4834 expressionS exp;
4835 expressionS *exp_p = in_exp ? in_exp : &exp;
4836 char *ptr = *str;
4837
4838 my_get_expression (exp_p, &ptr, GE_OPT_PREFIX_BIG);
4839
4840 if (exp_p->X_op == O_constant)
4841 {
4842 inst.operands[i].imm = exp_p->X_add_number & 0xffffffff;
4843 /* If we're on a 64-bit host, then a 64-bit number can be returned using
4844 O_constant. We have to be careful not to break compilation for
4845 32-bit X_add_number, though. */
4846 if ((exp_p->X_add_number & ~(offsetT)(0xffffffffU)) != 0)
4847 {
4848 /* X >> 32 is illegal if sizeof (exp_p->X_add_number) == 4. */
4849 inst.operands[i].reg = (((exp_p->X_add_number >> 16) >> 16)
4850 & 0xffffffff);
4851 inst.operands[i].regisimm = 1;
4852 }
4853 }
4854 else if (exp_p->X_op == O_big
4855 && LITTLENUM_NUMBER_OF_BITS * exp_p->X_add_number > 32)
4856 {
4857 unsigned parts = 32 / LITTLENUM_NUMBER_OF_BITS, j, idx = 0;
4858
4859 /* Bignums have their least significant bits in
4860 generic_bignum[0]. Make sure we put 32 bits in imm and
4861 32 bits in reg, in a (hopefully) portable way. */
4862 gas_assert (parts != 0);
4863
4864 /* Make sure that the number is not too big.
4865 PR 11972: Bignums can now be sign-extended to the
4866 size of a .octa so check that the out of range bits
4867 are all zero or all one. */
4868 if (LITTLENUM_NUMBER_OF_BITS * exp_p->X_add_number > 64)
4869 {
4870 LITTLENUM_TYPE m = -1;
4871
4872 if (generic_bignum[parts * 2] != 0
4873 && generic_bignum[parts * 2] != m)
4874 return FAIL;
4875
4876 for (j = parts * 2 + 1; j < (unsigned) exp_p->X_add_number; j++)
4877 if (generic_bignum[j] != generic_bignum[j-1])
4878 return FAIL;
4879 }
4880
4881 inst.operands[i].imm = 0;
4882 for (j = 0; j < parts; j++, idx++)
4883 inst.operands[i].imm |= generic_bignum[idx]
4884 << (LITTLENUM_NUMBER_OF_BITS * j);
4885 inst.operands[i].reg = 0;
4886 for (j = 0; j < parts; j++, idx++)
4887 inst.operands[i].reg |= generic_bignum[idx]
4888 << (LITTLENUM_NUMBER_OF_BITS * j);
4889 inst.operands[i].regisimm = 1;
4890 }
4891 else if (!(exp_p->X_op == O_symbol && allow_symbol_p))
4892 return FAIL;
4893
4894 *str = ptr;
4895
4896 return SUCCESS;
4897 }
4898
4899 /* Returns the pseudo-register number of an FPA immediate constant,
4900 or FAIL if there isn't a valid constant here. */
4901
4902 static int
4903 parse_fpa_immediate (char ** str)
4904 {
4905 LITTLENUM_TYPE words[MAX_LITTLENUMS];
4906 char * save_in;
4907 expressionS exp;
4908 int i;
4909 int j;
4910
4911 /* First try and match exact strings, this is to guarantee
4912 that some formats will work even for cross assembly. */
4913
4914 for (i = 0; fp_const[i]; i++)
4915 {
4916 if (strncmp (*str, fp_const[i], strlen (fp_const[i])) == 0)
4917 {
4918 char *start = *str;
4919
4920 *str += strlen (fp_const[i]);
4921 if (is_end_of_line[(unsigned char) **str])
4922 return i + 8;
4923 *str = start;
4924 }
4925 }
4926
4927 /* Just because we didn't get a match doesn't mean that the constant
4928 isn't valid, just that it is in a format that we don't
4929 automatically recognize. Try parsing it with the standard
4930 expression routines. */
4931
4932 memset (words, 0, MAX_LITTLENUMS * sizeof (LITTLENUM_TYPE));
4933
4934 /* Look for a raw floating point number. */
4935 if ((save_in = atof_ieee (*str, 'x', words)) != NULL
4936 && is_end_of_line[(unsigned char) *save_in])
4937 {
4938 for (i = 0; i < NUM_FLOAT_VALS; i++)
4939 {
4940 for (j = 0; j < MAX_LITTLENUMS; j++)
4941 {
4942 if (words[j] != fp_values[i][j])
4943 break;
4944 }
4945
4946 if (j == MAX_LITTLENUMS)
4947 {
4948 *str = save_in;
4949 return i + 8;
4950 }
4951 }
4952 }
4953
4954 /* Try and parse a more complex expression, this will probably fail
4955 unless the code uses a floating point prefix (eg "0f"). */
4956 save_in = input_line_pointer;
4957 input_line_pointer = *str;
4958 if (expression (&exp) == absolute_section
4959 && exp.X_op == O_big
4960 && exp.X_add_number < 0)
4961 {
4962 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
4963 Ditto for 15. */
4964 #define X_PRECISION 5
4965 #define E_PRECISION 15L
4966 if (gen_to_words (words, X_PRECISION, E_PRECISION) == 0)
4967 {
4968 for (i = 0; i < NUM_FLOAT_VALS; i++)
4969 {
4970 for (j = 0; j < MAX_LITTLENUMS; j++)
4971 {
4972 if (words[j] != fp_values[i][j])
4973 break;
4974 }
4975
4976 if (j == MAX_LITTLENUMS)
4977 {
4978 *str = input_line_pointer;
4979 input_line_pointer = save_in;
4980 return i + 8;
4981 }
4982 }
4983 }
4984 }
4985
4986 *str = input_line_pointer;
4987 input_line_pointer = save_in;
4988 inst.error = _("invalid FPA immediate expression");
4989 return FAIL;
4990 }
4991
4992 /* Returns 1 if a number has "quarter-precision" float format
4993 0baBbbbbbc defgh000 00000000 00000000. */
4994
4995 static int
4996 is_quarter_float (unsigned imm)
4997 {
4998 int bs = (imm & 0x20000000) ? 0x3e000000 : 0x40000000;
4999 return (imm & 0x7ffff) == 0 && ((imm & 0x7e000000) ^ bs) == 0;
5000 }
5001
5002
5003 /* Detect the presence of a floating point or integer zero constant,
5004 i.e. #0.0 or #0. */
5005
5006 static bfd_boolean
5007 parse_ifimm_zero (char **in)
5008 {
5009 int error_code;
5010
5011 if (!is_immediate_prefix (**in))
5012 {
5013 /* In unified syntax, all prefixes are optional. */
5014 if (!unified_syntax)
5015 return FALSE;
5016 }
5017 else
5018 ++*in;
5019
5020 /* Accept #0x0 as a synonym for #0. */
5021 if (strncmp (*in, "0x", 2) == 0)
5022 {
5023 int val;
5024 if (parse_immediate (in, &val, 0, 0, TRUE) == FAIL)
5025 return FALSE;
5026 return TRUE;
5027 }
5028
5029 error_code = atof_generic (in, ".", EXP_CHARS,
5030 &generic_floating_point_number);
5031
5032 if (!error_code
5033 && generic_floating_point_number.sign == '+'
5034 && (generic_floating_point_number.low
5035 > generic_floating_point_number.leader))
5036 return TRUE;
5037
5038 return FALSE;
5039 }
5040
5041 /* Parse an 8-bit "quarter-precision" floating point number of the form:
5042 0baBbbbbbc defgh000 00000000 00000000.
5043 The zero and minus-zero cases need special handling, since they can't be
5044 encoded in the "quarter-precision" float format, but can nonetheless be
5045 loaded as integer constants. */
5046
5047 static unsigned
5048 parse_qfloat_immediate (char **ccp, int *immed)
5049 {
5050 char *str = *ccp;
5051 char *fpnum;
5052 LITTLENUM_TYPE words[MAX_LITTLENUMS];
5053 int found_fpchar = 0;
5054
5055 skip_past_char (&str, '#');
5056
5057 /* We must not accidentally parse an integer as a floating-point number. Make
5058 sure that the value we parse is not an integer by checking for special
5059 characters '.' or 'e'.
5060 FIXME: This is a horrible hack, but doing better is tricky because type
5061 information isn't in a very usable state at parse time. */
5062 fpnum = str;
5063 skip_whitespace (fpnum);
5064
5065 if (strncmp (fpnum, "0x", 2) == 0)
5066 return FAIL;
5067 else
5068 {
5069 for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
5070 if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
5071 {
5072 found_fpchar = 1;
5073 break;
5074 }
5075
5076 if (!found_fpchar)
5077 return FAIL;
5078 }
5079
5080 if ((str = atof_ieee (str, 's', words)) != NULL)
5081 {
5082 unsigned fpword = 0;
5083 int i;
5084
5085 /* Our FP word must be 32 bits (single-precision FP). */
5086 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
5087 {
5088 fpword <<= LITTLENUM_NUMBER_OF_BITS;
5089 fpword |= words[i];
5090 }
5091
5092 if (is_quarter_float (fpword) || (fpword & 0x7fffffff) == 0)
5093 *immed = fpword;
5094 else
5095 return FAIL;
5096
5097 *ccp = str;
5098
5099 return SUCCESS;
5100 }
5101
5102 return FAIL;
5103 }
5104
5105 /* Shift operands. */
5106 enum shift_kind
5107 {
5108 SHIFT_LSL, SHIFT_LSR, SHIFT_ASR, SHIFT_ROR, SHIFT_RRX
5109 };
5110
5111 struct asm_shift_name
5112 {
5113 const char *name;
5114 enum shift_kind kind;
5115 };
5116
5117 /* Third argument to parse_shift. */
5118 enum parse_shift_mode
5119 {
5120 NO_SHIFT_RESTRICT, /* Any kind of shift is accepted. */
5121 SHIFT_IMMEDIATE, /* Shift operand must be an immediate. */
5122 SHIFT_LSL_OR_ASR_IMMEDIATE, /* Shift must be LSL or ASR immediate. */
5123 SHIFT_ASR_IMMEDIATE, /* Shift must be ASR immediate. */
5124 SHIFT_LSL_IMMEDIATE, /* Shift must be LSL immediate. */
5125 };
5126
5127 /* Parse a <shift> specifier on an ARM data processing instruction.
5128 This has three forms:
5129
5130 (LSL|LSR|ASL|ASR|ROR) Rs
5131 (LSL|LSR|ASL|ASR|ROR) #imm
5132 RRX
5133
5134 Note that ASL is assimilated to LSL in the instruction encoding, and
5135 RRX to ROR #0 (which cannot be written as such). */
5136
5137 static int
5138 parse_shift (char **str, int i, enum parse_shift_mode mode)
5139 {
5140 const struct asm_shift_name *shift_name;
5141 enum shift_kind shift;
5142 char *s = *str;
5143 char *p = s;
5144 int reg;
5145
5146 for (p = *str; ISALPHA (*p); p++)
5147 ;
5148
5149 if (p == *str)
5150 {
5151 inst.error = _("shift expression expected");
5152 return FAIL;
5153 }
5154
5155 shift_name = (const struct asm_shift_name *) hash_find_n (arm_shift_hsh, *str,
5156 p - *str);
5157
5158 if (shift_name == NULL)
5159 {
5160 inst.error = _("shift expression expected");
5161 return FAIL;
5162 }
5163
5164 shift = shift_name->kind;
5165
5166 switch (mode)
5167 {
5168 case NO_SHIFT_RESTRICT:
5169 case SHIFT_IMMEDIATE: break;
5170
5171 case SHIFT_LSL_OR_ASR_IMMEDIATE:
5172 if (shift != SHIFT_LSL && shift != SHIFT_ASR)
5173 {
5174 inst.error = _("'LSL' or 'ASR' required");
5175 return FAIL;
5176 }
5177 break;
5178
5179 case SHIFT_LSL_IMMEDIATE:
5180 if (shift != SHIFT_LSL)
5181 {
5182 inst.error = _("'LSL' required");
5183 return FAIL;
5184 }
5185 break;
5186
5187 case SHIFT_ASR_IMMEDIATE:
5188 if (shift != SHIFT_ASR)
5189 {
5190 inst.error = _("'ASR' required");
5191 return FAIL;
5192 }
5193 break;
5194
5195 default: abort ();
5196 }
5197
5198 if (shift != SHIFT_RRX)
5199 {
5200 /* Whitespace can appear here if the next thing is a bare digit. */
5201 skip_whitespace (p);
5202
5203 if (mode == NO_SHIFT_RESTRICT
5204 && (reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5205 {
5206 inst.operands[i].imm = reg;
5207 inst.operands[i].immisreg = 1;
5208 }
5209 else if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5210 return FAIL;
5211 }
5212 inst.operands[i].shift_kind = shift;
5213 inst.operands[i].shifted = 1;
5214 *str = p;
5215 return SUCCESS;
5216 }
5217
5218 /* Parse a <shifter_operand> for an ARM data processing instruction:
5219
5220 #<immediate>
5221 #<immediate>, <rotate>
5222 <Rm>
5223 <Rm>, <shift>
5224
5225 where <shift> is defined by parse_shift above, and <rotate> is a
5226 multiple of 2 between 0 and 30. Validation of immediate operands
5227 is deferred to md_apply_fix. */
5228
5229 static int
5230 parse_shifter_operand (char **str, int i)
5231 {
5232 int value;
5233 expressionS exp;
5234
5235 if ((value = arm_reg_parse (str, REG_TYPE_RN)) != FAIL)
5236 {
5237 inst.operands[i].reg = value;
5238 inst.operands[i].isreg = 1;
5239
5240 /* parse_shift will override this if appropriate */
5241 inst.reloc.exp.X_op = O_constant;
5242 inst.reloc.exp.X_add_number = 0;
5243
5244 if (skip_past_comma (str) == FAIL)
5245 return SUCCESS;
5246
5247 /* Shift operation on register. */
5248 return parse_shift (str, i, NO_SHIFT_RESTRICT);
5249 }
5250
5251 if (my_get_expression (&inst.reloc.exp, str, GE_IMM_PREFIX))
5252 return FAIL;
5253
5254 if (skip_past_comma (str) == SUCCESS)
5255 {
5256 /* #x, y -- ie explicit rotation by Y. */
5257 if (my_get_expression (&exp, str, GE_NO_PREFIX))
5258 return FAIL;
5259
5260 if (exp.X_op != O_constant || inst.reloc.exp.X_op != O_constant)
5261 {
5262 inst.error = _("constant expression expected");
5263 return FAIL;
5264 }
5265
5266 value = exp.X_add_number;
5267 if (value < 0 || value > 30 || value % 2 != 0)
5268 {
5269 inst.error = _("invalid rotation");
5270 return FAIL;
5271 }
5272 if (inst.reloc.exp.X_add_number < 0 || inst.reloc.exp.X_add_number > 255)
5273 {
5274 inst.error = _("invalid constant");
5275 return FAIL;
5276 }
5277
5278 /* Encode as specified. */
5279 inst.operands[i].imm = inst.reloc.exp.X_add_number | value << 7;
5280 return SUCCESS;
5281 }
5282
5283 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
5284 inst.reloc.pc_rel = 0;
5285 return SUCCESS;
5286 }
5287
5288 /* Group relocation information. Each entry in the table contains the
5289 textual name of the relocation as may appear in assembler source
5290 and must end with a colon.
5291 Along with this textual name are the relocation codes to be used if
5292 the corresponding instruction is an ALU instruction (ADD or SUB only),
5293 an LDR, an LDRS, or an LDC. */
5294
5295 struct group_reloc_table_entry
5296 {
5297 const char *name;
5298 int alu_code;
5299 int ldr_code;
5300 int ldrs_code;
5301 int ldc_code;
5302 };
5303
5304 typedef enum
5305 {
5306 /* Varieties of non-ALU group relocation. */
5307
5308 GROUP_LDR,
5309 GROUP_LDRS,
5310 GROUP_LDC
5311 } group_reloc_type;
5312
5313 static struct group_reloc_table_entry group_reloc_table[] =
5314 { /* Program counter relative: */
5315 { "pc_g0_nc",
5316 BFD_RELOC_ARM_ALU_PC_G0_NC, /* ALU */
5317 0, /* LDR */
5318 0, /* LDRS */
5319 0 }, /* LDC */
5320 { "pc_g0",
5321 BFD_RELOC_ARM_ALU_PC_G0, /* ALU */
5322 BFD_RELOC_ARM_LDR_PC_G0, /* LDR */
5323 BFD_RELOC_ARM_LDRS_PC_G0, /* LDRS */
5324 BFD_RELOC_ARM_LDC_PC_G0 }, /* LDC */
5325 { "pc_g1_nc",
5326 BFD_RELOC_ARM_ALU_PC_G1_NC, /* ALU */
5327 0, /* LDR */
5328 0, /* LDRS */
5329 0 }, /* LDC */
5330 { "pc_g1",
5331 BFD_RELOC_ARM_ALU_PC_G1, /* ALU */
5332 BFD_RELOC_ARM_LDR_PC_G1, /* LDR */
5333 BFD_RELOC_ARM_LDRS_PC_G1, /* LDRS */
5334 BFD_RELOC_ARM_LDC_PC_G1 }, /* LDC */
5335 { "pc_g2",
5336 BFD_RELOC_ARM_ALU_PC_G2, /* ALU */
5337 BFD_RELOC_ARM_LDR_PC_G2, /* LDR */
5338 BFD_RELOC_ARM_LDRS_PC_G2, /* LDRS */
5339 BFD_RELOC_ARM_LDC_PC_G2 }, /* LDC */
5340 /* Section base relative */
5341 { "sb_g0_nc",
5342 BFD_RELOC_ARM_ALU_SB_G0_NC, /* ALU */
5343 0, /* LDR */
5344 0, /* LDRS */
5345 0 }, /* LDC */
5346 { "sb_g0",
5347 BFD_RELOC_ARM_ALU_SB_G0, /* ALU */
5348 BFD_RELOC_ARM_LDR_SB_G0, /* LDR */
5349 BFD_RELOC_ARM_LDRS_SB_G0, /* LDRS */
5350 BFD_RELOC_ARM_LDC_SB_G0 }, /* LDC */
5351 { "sb_g1_nc",
5352 BFD_RELOC_ARM_ALU_SB_G1_NC, /* ALU */
5353 0, /* LDR */
5354 0, /* LDRS */
5355 0 }, /* LDC */
5356 { "sb_g1",
5357 BFD_RELOC_ARM_ALU_SB_G1, /* ALU */
5358 BFD_RELOC_ARM_LDR_SB_G1, /* LDR */
5359 BFD_RELOC_ARM_LDRS_SB_G1, /* LDRS */
5360 BFD_RELOC_ARM_LDC_SB_G1 }, /* LDC */
5361 { "sb_g2",
5362 BFD_RELOC_ARM_ALU_SB_G2, /* ALU */
5363 BFD_RELOC_ARM_LDR_SB_G2, /* LDR */
5364 BFD_RELOC_ARM_LDRS_SB_G2, /* LDRS */
5365 BFD_RELOC_ARM_LDC_SB_G2 }, /* LDC */
5366 /* Absolute thumb alu relocations. */
5367 { "lower0_7",
5368 BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC,/* ALU. */
5369 0, /* LDR. */
5370 0, /* LDRS. */
5371 0 }, /* LDC. */
5372 { "lower8_15",
5373 BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC,/* ALU. */
5374 0, /* LDR. */
5375 0, /* LDRS. */
5376 0 }, /* LDC. */
5377 { "upper0_7",
5378 BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC,/* ALU. */
5379 0, /* LDR. */
5380 0, /* LDRS. */
5381 0 }, /* LDC. */
5382 { "upper8_15",
5383 BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC,/* ALU. */
5384 0, /* LDR. */
5385 0, /* LDRS. */
5386 0 } }; /* LDC. */
5387
5388 /* Given the address of a pointer pointing to the textual name of a group
5389 relocation as may appear in assembler source, attempt to find its details
5390 in group_reloc_table. The pointer will be updated to the character after
5391 the trailing colon. On failure, FAIL will be returned; SUCCESS
5392 otherwise. On success, *entry will be updated to point at the relevant
5393 group_reloc_table entry. */
5394
5395 static int
5396 find_group_reloc_table_entry (char **str, struct group_reloc_table_entry **out)
5397 {
5398 unsigned int i;
5399 for (i = 0; i < ARRAY_SIZE (group_reloc_table); i++)
5400 {
5401 int length = strlen (group_reloc_table[i].name);
5402
5403 if (strncasecmp (group_reloc_table[i].name, *str, length) == 0
5404 && (*str)[length] == ':')
5405 {
5406 *out = &group_reloc_table[i];
5407 *str += (length + 1);
5408 return SUCCESS;
5409 }
5410 }
5411
5412 return FAIL;
5413 }
5414
5415 /* Parse a <shifter_operand> for an ARM data processing instruction
5416 (as for parse_shifter_operand) where group relocations are allowed:
5417
5418 #<immediate>
5419 #<immediate>, <rotate>
5420 #:<group_reloc>:<expression>
5421 <Rm>
5422 <Rm>, <shift>
5423
5424 where <group_reloc> is one of the strings defined in group_reloc_table.
5425 The hashes are optional.
5426
5427 Everything else is as for parse_shifter_operand. */
5428
5429 static parse_operand_result
5430 parse_shifter_operand_group_reloc (char **str, int i)
5431 {
5432 /* Determine if we have the sequence of characters #: or just :
5433 coming next. If we do, then we check for a group relocation.
5434 If we don't, punt the whole lot to parse_shifter_operand. */
5435
5436 if (((*str)[0] == '#' && (*str)[1] == ':')
5437 || (*str)[0] == ':')
5438 {
5439 struct group_reloc_table_entry *entry;
5440
5441 if ((*str)[0] == '#')
5442 (*str) += 2;
5443 else
5444 (*str)++;
5445
5446 /* Try to parse a group relocation. Anything else is an error. */
5447 if (find_group_reloc_table_entry (str, &entry) == FAIL)
5448 {
5449 inst.error = _("unknown group relocation");
5450 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5451 }
5452
5453 /* We now have the group relocation table entry corresponding to
5454 the name in the assembler source. Next, we parse the expression. */
5455 if (my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX))
5456 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5457
5458 /* Record the relocation type (always the ALU variant here). */
5459 inst.reloc.type = (bfd_reloc_code_real_type) entry->alu_code;
5460 gas_assert (inst.reloc.type != 0);
5461
5462 return PARSE_OPERAND_SUCCESS;
5463 }
5464 else
5465 return parse_shifter_operand (str, i) == SUCCESS
5466 ? PARSE_OPERAND_SUCCESS : PARSE_OPERAND_FAIL;
5467
5468 /* Never reached. */
5469 }
5470
5471 /* Parse a Neon alignment expression. Information is written to
5472 inst.operands[i]. We assume the initial ':' has been skipped.
5473
5474 align .imm = align << 8, .immisalign=1, .preind=0 */
5475 static parse_operand_result
5476 parse_neon_alignment (char **str, int i)
5477 {
5478 char *p = *str;
5479 expressionS exp;
5480
5481 my_get_expression (&exp, &p, GE_NO_PREFIX);
5482
5483 if (exp.X_op != O_constant)
5484 {
5485 inst.error = _("alignment must be constant");
5486 return PARSE_OPERAND_FAIL;
5487 }
5488
5489 inst.operands[i].imm = exp.X_add_number << 8;
5490 inst.operands[i].immisalign = 1;
5491 /* Alignments are not pre-indexes. */
5492 inst.operands[i].preind = 0;
5493
5494 *str = p;
5495 return PARSE_OPERAND_SUCCESS;
5496 }
5497
5498 /* Parse all forms of an ARM address expression. Information is written
5499 to inst.operands[i] and/or inst.reloc.
5500
5501 Preindexed addressing (.preind=1):
5502
5503 [Rn, #offset] .reg=Rn .reloc.exp=offset
5504 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5505 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5506 .shift_kind=shift .reloc.exp=shift_imm
5507
5508 These three may have a trailing ! which causes .writeback to be set also.
5509
5510 Postindexed addressing (.postind=1, .writeback=1):
5511
5512 [Rn], #offset .reg=Rn .reloc.exp=offset
5513 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5514 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5515 .shift_kind=shift .reloc.exp=shift_imm
5516
5517 Unindexed addressing (.preind=0, .postind=0):
5518
5519 [Rn], {option} .reg=Rn .imm=option .immisreg=0
5520
5521 Other:
5522
5523 [Rn]{!} shorthand for [Rn,#0]{!}
5524 =immediate .isreg=0 .reloc.exp=immediate
5525 label .reg=PC .reloc.pc_rel=1 .reloc.exp=label
5526
5527 It is the caller's responsibility to check for addressing modes not
5528 supported by the instruction, and to set inst.reloc.type. */
5529
5530 static parse_operand_result
5531 parse_address_main (char **str, int i, int group_relocations,
5532 group_reloc_type group_type)
5533 {
5534 char *p = *str;
5535 int reg;
5536
5537 if (skip_past_char (&p, '[') == FAIL)
5538 {
5539 if (skip_past_char (&p, '=') == FAIL)
5540 {
5541 /* Bare address - translate to PC-relative offset. */
5542 inst.reloc.pc_rel = 1;
5543 inst.operands[i].reg = REG_PC;
5544 inst.operands[i].isreg = 1;
5545 inst.operands[i].preind = 1;
5546
5547 if (my_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX_BIG))
5548 return PARSE_OPERAND_FAIL;
5549 }
5550 else if (parse_big_immediate (&p, i, &inst.reloc.exp,
5551 /*allow_symbol_p=*/TRUE))
5552 return PARSE_OPERAND_FAIL;
5553
5554 *str = p;
5555 return PARSE_OPERAND_SUCCESS;
5556 }
5557
5558 /* PR gas/14887: Allow for whitespace after the opening bracket. */
5559 skip_whitespace (p);
5560
5561 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5562 {
5563 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
5564 return PARSE_OPERAND_FAIL;
5565 }
5566 inst.operands[i].reg = reg;
5567 inst.operands[i].isreg = 1;
5568
5569 if (skip_past_comma (&p) == SUCCESS)
5570 {
5571 inst.operands[i].preind = 1;
5572
5573 if (*p == '+') p++;
5574 else if (*p == '-') p++, inst.operands[i].negative = 1;
5575
5576 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5577 {
5578 inst.operands[i].imm = reg;
5579 inst.operands[i].immisreg = 1;
5580
5581 if (skip_past_comma (&p) == SUCCESS)
5582 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
5583 return PARSE_OPERAND_FAIL;
5584 }
5585 else if (skip_past_char (&p, ':') == SUCCESS)
5586 {
5587 /* FIXME: '@' should be used here, but it's filtered out by generic
5588 code before we get to see it here. This may be subject to
5589 change. */
5590 parse_operand_result result = parse_neon_alignment (&p, i);
5591
5592 if (result != PARSE_OPERAND_SUCCESS)
5593 return result;
5594 }
5595 else
5596 {
5597 if (inst.operands[i].negative)
5598 {
5599 inst.operands[i].negative = 0;
5600 p--;
5601 }
5602
5603 if (group_relocations
5604 && ((*p == '#' && *(p + 1) == ':') || *p == ':'))
5605 {
5606 struct group_reloc_table_entry *entry;
5607
5608 /* Skip over the #: or : sequence. */
5609 if (*p == '#')
5610 p += 2;
5611 else
5612 p++;
5613
5614 /* Try to parse a group relocation. Anything else is an
5615 error. */
5616 if (find_group_reloc_table_entry (&p, &entry) == FAIL)
5617 {
5618 inst.error = _("unknown group relocation");
5619 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5620 }
5621
5622 /* We now have the group relocation table entry corresponding to
5623 the name in the assembler source. Next, we parse the
5624 expression. */
5625 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
5626 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5627
5628 /* Record the relocation type. */
5629 switch (group_type)
5630 {
5631 case GROUP_LDR:
5632 inst.reloc.type = (bfd_reloc_code_real_type) entry->ldr_code;
5633 break;
5634
5635 case GROUP_LDRS:
5636 inst.reloc.type = (bfd_reloc_code_real_type) entry->ldrs_code;
5637 break;
5638
5639 case GROUP_LDC:
5640 inst.reloc.type = (bfd_reloc_code_real_type) entry->ldc_code;
5641 break;
5642
5643 default:
5644 gas_assert (0);
5645 }
5646
5647 if (inst.reloc.type == 0)
5648 {
5649 inst.error = _("this group relocation is not allowed on this instruction");
5650 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5651 }
5652 }
5653 else
5654 {
5655 char *q = p;
5656
5657 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5658 return PARSE_OPERAND_FAIL;
5659 /* If the offset is 0, find out if it's a +0 or -0. */
5660 if (inst.reloc.exp.X_op == O_constant
5661 && inst.reloc.exp.X_add_number == 0)
5662 {
5663 skip_whitespace (q);
5664 if (*q == '#')
5665 {
5666 q++;
5667 skip_whitespace (q);
5668 }
5669 if (*q == '-')
5670 inst.operands[i].negative = 1;
5671 }
5672 }
5673 }
5674 }
5675 else if (skip_past_char (&p, ':') == SUCCESS)
5676 {
5677 /* FIXME: '@' should be used here, but it's filtered out by generic code
5678 before we get to see it here. This may be subject to change. */
5679 parse_operand_result result = parse_neon_alignment (&p, i);
5680
5681 if (result != PARSE_OPERAND_SUCCESS)
5682 return result;
5683 }
5684
5685 if (skip_past_char (&p, ']') == FAIL)
5686 {
5687 inst.error = _("']' expected");
5688 return PARSE_OPERAND_FAIL;
5689 }
5690
5691 if (skip_past_char (&p, '!') == SUCCESS)
5692 inst.operands[i].writeback = 1;
5693
5694 else if (skip_past_comma (&p) == SUCCESS)
5695 {
5696 if (skip_past_char (&p, '{') == SUCCESS)
5697 {
5698 /* [Rn], {expr} - unindexed, with option */
5699 if (parse_immediate (&p, &inst.operands[i].imm,
5700 0, 255, TRUE) == FAIL)
5701 return PARSE_OPERAND_FAIL;
5702
5703 if (skip_past_char (&p, '}') == FAIL)
5704 {
5705 inst.error = _("'}' expected at end of 'option' field");
5706 return PARSE_OPERAND_FAIL;
5707 }
5708 if (inst.operands[i].preind)
5709 {
5710 inst.error = _("cannot combine index with option");
5711 return PARSE_OPERAND_FAIL;
5712 }
5713 *str = p;
5714 return PARSE_OPERAND_SUCCESS;
5715 }
5716 else
5717 {
5718 inst.operands[i].postind = 1;
5719 inst.operands[i].writeback = 1;
5720
5721 if (inst.operands[i].preind)
5722 {
5723 inst.error = _("cannot combine pre- and post-indexing");
5724 return PARSE_OPERAND_FAIL;
5725 }
5726
5727 if (*p == '+') p++;
5728 else if (*p == '-') p++, inst.operands[i].negative = 1;
5729
5730 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5731 {
5732 /* We might be using the immediate for alignment already. If we
5733 are, OR the register number into the low-order bits. */
5734 if (inst.operands[i].immisalign)
5735 inst.operands[i].imm |= reg;
5736 else
5737 inst.operands[i].imm = reg;
5738 inst.operands[i].immisreg = 1;
5739
5740 if (skip_past_comma (&p) == SUCCESS)
5741 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
5742 return PARSE_OPERAND_FAIL;
5743 }
5744 else
5745 {
5746 char *q = p;
5747
5748 if (inst.operands[i].negative)
5749 {
5750 inst.operands[i].negative = 0;
5751 p--;
5752 }
5753 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5754 return PARSE_OPERAND_FAIL;
5755 /* If the offset is 0, find out if it's a +0 or -0. */
5756 if (inst.reloc.exp.X_op == O_constant
5757 && inst.reloc.exp.X_add_number == 0)
5758 {
5759 skip_whitespace (q);
5760 if (*q == '#')
5761 {
5762 q++;
5763 skip_whitespace (q);
5764 }
5765 if (*q == '-')
5766 inst.operands[i].negative = 1;
5767 }
5768 }
5769 }
5770 }
5771
5772 /* If at this point neither .preind nor .postind is set, we have a
5773 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
5774 if (inst.operands[i].preind == 0 && inst.operands[i].postind == 0)
5775 {
5776 inst.operands[i].preind = 1;
5777 inst.reloc.exp.X_op = O_constant;
5778 inst.reloc.exp.X_add_number = 0;
5779 }
5780 *str = p;
5781 return PARSE_OPERAND_SUCCESS;
5782 }
5783
5784 static int
5785 parse_address (char **str, int i)
5786 {
5787 return parse_address_main (str, i, 0, GROUP_LDR) == PARSE_OPERAND_SUCCESS
5788 ? SUCCESS : FAIL;
5789 }
5790
5791 static parse_operand_result
5792 parse_address_group_reloc (char **str, int i, group_reloc_type type)
5793 {
5794 return parse_address_main (str, i, 1, type);
5795 }
5796
5797 /* Parse an operand for a MOVW or MOVT instruction. */
5798 static int
5799 parse_half (char **str)
5800 {
5801 char * p;
5802
5803 p = *str;
5804 skip_past_char (&p, '#');
5805 if (strncasecmp (p, ":lower16:", 9) == 0)
5806 inst.reloc.type = BFD_RELOC_ARM_MOVW;
5807 else if (strncasecmp (p, ":upper16:", 9) == 0)
5808 inst.reloc.type = BFD_RELOC_ARM_MOVT;
5809
5810 if (inst.reloc.type != BFD_RELOC_UNUSED)
5811 {
5812 p += 9;
5813 skip_whitespace (p);
5814 }
5815
5816 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
5817 return FAIL;
5818
5819 if (inst.reloc.type == BFD_RELOC_UNUSED)
5820 {
5821 if (inst.reloc.exp.X_op != O_constant)
5822 {
5823 inst.error = _("constant expression expected");
5824 return FAIL;
5825 }
5826 if (inst.reloc.exp.X_add_number < 0
5827 || inst.reloc.exp.X_add_number > 0xffff)
5828 {
5829 inst.error = _("immediate value out of range");
5830 return FAIL;
5831 }
5832 }
5833 *str = p;
5834 return SUCCESS;
5835 }
5836
5837 /* Miscellaneous. */
5838
5839 /* Parse a PSR flag operand. The value returned is FAIL on syntax error,
5840 or a bitmask suitable to be or-ed into the ARM msr instruction. */
5841 static int
5842 parse_psr (char **str, bfd_boolean lhs)
5843 {
5844 char *p;
5845 unsigned long psr_field;
5846 const struct asm_psr *psr;
5847 char *start;
5848 bfd_boolean is_apsr = FALSE;
5849 bfd_boolean m_profile = ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m);
5850
5851 /* PR gas/12698: If the user has specified -march=all then m_profile will
5852 be TRUE, but we want to ignore it in this case as we are building for any
5853 CPU type, including non-m variants. */
5854 if (ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any))
5855 m_profile = FALSE;
5856
5857 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
5858 feature for ease of use and backwards compatibility. */
5859 p = *str;
5860 if (strncasecmp (p, "SPSR", 4) == 0)
5861 {
5862 if (m_profile)
5863 goto unsupported_psr;
5864
5865 psr_field = SPSR_BIT;
5866 }
5867 else if (strncasecmp (p, "CPSR", 4) == 0)
5868 {
5869 if (m_profile)
5870 goto unsupported_psr;
5871
5872 psr_field = 0;
5873 }
5874 else if (strncasecmp (p, "APSR", 4) == 0)
5875 {
5876 /* APSR[_<bits>] can be used as a synonym for CPSR[_<flags>] on ARMv7-A
5877 and ARMv7-R architecture CPUs. */
5878 is_apsr = TRUE;
5879 psr_field = 0;
5880 }
5881 else if (m_profile)
5882 {
5883 start = p;
5884 do
5885 p++;
5886 while (ISALNUM (*p) || *p == '_');
5887
5888 if (strncasecmp (start, "iapsr", 5) == 0
5889 || strncasecmp (start, "eapsr", 5) == 0
5890 || strncasecmp (start, "xpsr", 4) == 0
5891 || strncasecmp (start, "psr", 3) == 0)
5892 p = start + strcspn (start, "rR") + 1;
5893
5894 psr = (const struct asm_psr *) hash_find_n (arm_v7m_psr_hsh, start,
5895 p - start);
5896
5897 if (!psr)
5898 return FAIL;
5899
5900 /* If APSR is being written, a bitfield may be specified. Note that
5901 APSR itself is handled above. */
5902 if (psr->field <= 3)
5903 {
5904 psr_field = psr->field;
5905 is_apsr = TRUE;
5906 goto check_suffix;
5907 }
5908
5909 *str = p;
5910 /* M-profile MSR instructions have the mask field set to "10", except
5911 *PSR variants which modify APSR, which may use a different mask (and
5912 have been handled already). Do that by setting the PSR_f field
5913 here. */
5914 return psr->field | (lhs ? PSR_f : 0);
5915 }
5916 else
5917 goto unsupported_psr;
5918
5919 p += 4;
5920 check_suffix:
5921 if (*p == '_')
5922 {
5923 /* A suffix follows. */
5924 p++;
5925 start = p;
5926
5927 do
5928 p++;
5929 while (ISALNUM (*p) || *p == '_');
5930
5931 if (is_apsr)
5932 {
5933 /* APSR uses a notation for bits, rather than fields. */
5934 unsigned int nzcvq_bits = 0;
5935 unsigned int g_bit = 0;
5936 char *bit;
5937
5938 for (bit = start; bit != p; bit++)
5939 {
5940 switch (TOLOWER (*bit))
5941 {
5942 case 'n':
5943 nzcvq_bits |= (nzcvq_bits & 0x01) ? 0x20 : 0x01;
5944 break;
5945
5946 case 'z':
5947 nzcvq_bits |= (nzcvq_bits & 0x02) ? 0x20 : 0x02;
5948 break;
5949
5950 case 'c':
5951 nzcvq_bits |= (nzcvq_bits & 0x04) ? 0x20 : 0x04;
5952 break;
5953
5954 case 'v':
5955 nzcvq_bits |= (nzcvq_bits & 0x08) ? 0x20 : 0x08;
5956 break;
5957
5958 case 'q':
5959 nzcvq_bits |= (nzcvq_bits & 0x10) ? 0x20 : 0x10;
5960 break;
5961
5962 case 'g':
5963 g_bit |= (g_bit & 0x1) ? 0x2 : 0x1;
5964 break;
5965
5966 default:
5967 inst.error = _("unexpected bit specified after APSR");
5968 return FAIL;
5969 }
5970 }
5971
5972 if (nzcvq_bits == 0x1f)
5973 psr_field |= PSR_f;
5974
5975 if (g_bit == 0x1)
5976 {
5977 if (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp))
5978 {
5979 inst.error = _("selected processor does not "
5980 "support DSP extension");
5981 return FAIL;
5982 }
5983
5984 psr_field |= PSR_s;
5985 }
5986
5987 if ((nzcvq_bits & 0x20) != 0
5988 || (nzcvq_bits != 0x1f && nzcvq_bits != 0)
5989 || (g_bit & 0x2) != 0)
5990 {
5991 inst.error = _("bad bitmask specified after APSR");
5992 return FAIL;
5993 }
5994 }
5995 else
5996 {
5997 psr = (const struct asm_psr *) hash_find_n (arm_psr_hsh, start,
5998 p - start);
5999 if (!psr)
6000 goto error;
6001
6002 psr_field |= psr->field;
6003 }
6004 }
6005 else
6006 {
6007 if (ISALNUM (*p))
6008 goto error; /* Garbage after "[CS]PSR". */
6009
6010 /* Unadorned APSR is equivalent to APSR_nzcvq/CPSR_f (for writes). This
6011 is deprecated, but allow it anyway. */
6012 if (is_apsr && lhs)
6013 {
6014 psr_field |= PSR_f;
6015 as_tsktsk (_("writing to APSR without specifying a bitmask is "
6016 "deprecated"));
6017 }
6018 else if (!m_profile)
6019 /* These bits are never right for M-profile devices: don't set them
6020 (only code paths which read/write APSR reach here). */
6021 psr_field |= (PSR_c | PSR_f);
6022 }
6023 *str = p;
6024 return psr_field;
6025
6026 unsupported_psr:
6027 inst.error = _("selected processor does not support requested special "
6028 "purpose register");
6029 return FAIL;
6030
6031 error:
6032 inst.error = _("flag for {c}psr instruction expected");
6033 return FAIL;
6034 }
6035
6036 /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
6037 value suitable for splatting into the AIF field of the instruction. */
6038
6039 static int
6040 parse_cps_flags (char **str)
6041 {
6042 int val = 0;
6043 int saw_a_flag = 0;
6044 char *s = *str;
6045
6046 for (;;)
6047 switch (*s++)
6048 {
6049 case '\0': case ',':
6050 goto done;
6051
6052 case 'a': case 'A': saw_a_flag = 1; val |= 0x4; break;
6053 case 'i': case 'I': saw_a_flag = 1; val |= 0x2; break;
6054 case 'f': case 'F': saw_a_flag = 1; val |= 0x1; break;
6055
6056 default:
6057 inst.error = _("unrecognized CPS flag");
6058 return FAIL;
6059 }
6060
6061 done:
6062 if (saw_a_flag == 0)
6063 {
6064 inst.error = _("missing CPS flags");
6065 return FAIL;
6066 }
6067
6068 *str = s - 1;
6069 return val;
6070 }
6071
6072 /* Parse an endian specifier ("BE" or "LE", case insensitive);
6073 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
6074
6075 static int
6076 parse_endian_specifier (char **str)
6077 {
6078 int little_endian;
6079 char *s = *str;
6080
6081 if (strncasecmp (s, "BE", 2))
6082 little_endian = 0;
6083 else if (strncasecmp (s, "LE", 2))
6084 little_endian = 1;
6085 else
6086 {
6087 inst.error = _("valid endian specifiers are be or le");
6088 return FAIL;
6089 }
6090
6091 if (ISALNUM (s[2]) || s[2] == '_')
6092 {
6093 inst.error = _("valid endian specifiers are be or le");
6094 return FAIL;
6095 }
6096
6097 *str = s + 2;
6098 return little_endian;
6099 }
6100
6101 /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
6102 value suitable for poking into the rotate field of an sxt or sxta
6103 instruction, or FAIL on error. */
6104
6105 static int
6106 parse_ror (char **str)
6107 {
6108 int rot;
6109 char *s = *str;
6110
6111 if (strncasecmp (s, "ROR", 3) == 0)
6112 s += 3;
6113 else
6114 {
6115 inst.error = _("missing rotation field after comma");
6116 return FAIL;
6117 }
6118
6119 if (parse_immediate (&s, &rot, 0, 24, FALSE) == FAIL)
6120 return FAIL;
6121
6122 switch (rot)
6123 {
6124 case 0: *str = s; return 0x0;
6125 case 8: *str = s; return 0x1;
6126 case 16: *str = s; return 0x2;
6127 case 24: *str = s; return 0x3;
6128
6129 default:
6130 inst.error = _("rotation can only be 0, 8, 16, or 24");
6131 return FAIL;
6132 }
6133 }
6134
6135 /* Parse a conditional code (from conds[] below). The value returned is in the
6136 range 0 .. 14, or FAIL. */
6137 static int
6138 parse_cond (char **str)
6139 {
6140 char *q;
6141 const struct asm_cond *c;
6142 int n;
6143 /* Condition codes are always 2 characters, so matching up to
6144 3 characters is sufficient. */
6145 char cond[3];
6146
6147 q = *str;
6148 n = 0;
6149 while (ISALPHA (*q) && n < 3)
6150 {
6151 cond[n] = TOLOWER (*q);
6152 q++;
6153 n++;
6154 }
6155
6156 c = (const struct asm_cond *) hash_find_n (arm_cond_hsh, cond, n);
6157 if (!c)
6158 {
6159 inst.error = _("condition required");
6160 return FAIL;
6161 }
6162
6163 *str = q;
6164 return c->value;
6165 }
6166
6167 /* Record a use of the given feature. */
6168 static void
6169 record_feature_use (const arm_feature_set *feature)
6170 {
6171 if (thumb_mode)
6172 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, *feature);
6173 else
6174 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, *feature);
6175 }
6176
6177 /* If the given feature is currently allowed, mark it as used and return TRUE.
6178 Return FALSE otherwise. */
6179 static bfd_boolean
6180 mark_feature_used (const arm_feature_set *feature)
6181 {
6182 /* Ensure the option is currently allowed. */
6183 if (!ARM_CPU_HAS_FEATURE (cpu_variant, *feature))
6184 return FALSE;
6185
6186 /* Add the appropriate architecture feature for the barrier option used. */
6187 record_feature_use (feature);
6188
6189 return TRUE;
6190 }
6191
6192 /* Parse an option for a barrier instruction. Returns the encoding for the
6193 option, or FAIL. */
6194 static int
6195 parse_barrier (char **str)
6196 {
6197 char *p, *q;
6198 const struct asm_barrier_opt *o;
6199
6200 p = q = *str;
6201 while (ISALPHA (*q))
6202 q++;
6203
6204 o = (const struct asm_barrier_opt *) hash_find_n (arm_barrier_opt_hsh, p,
6205 q - p);
6206 if (!o)
6207 return FAIL;
6208
6209 if (!mark_feature_used (&o->arch))
6210 return FAIL;
6211
6212 *str = q;
6213 return o->value;
6214 }
6215
6216 /* Parse the operands of a table branch instruction. Similar to a memory
6217 operand. */
6218 static int
6219 parse_tb (char **str)
6220 {
6221 char * p = *str;
6222 int reg;
6223
6224 if (skip_past_char (&p, '[') == FAIL)
6225 {
6226 inst.error = _("'[' expected");
6227 return FAIL;
6228 }
6229
6230 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
6231 {
6232 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
6233 return FAIL;
6234 }
6235 inst.operands[0].reg = reg;
6236
6237 if (skip_past_comma (&p) == FAIL)
6238 {
6239 inst.error = _("',' expected");
6240 return FAIL;
6241 }
6242
6243 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
6244 {
6245 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
6246 return FAIL;
6247 }
6248 inst.operands[0].imm = reg;
6249
6250 if (skip_past_comma (&p) == SUCCESS)
6251 {
6252 if (parse_shift (&p, 0, SHIFT_LSL_IMMEDIATE) == FAIL)
6253 return FAIL;
6254 if (inst.reloc.exp.X_add_number != 1)
6255 {
6256 inst.error = _("invalid shift");
6257 return FAIL;
6258 }
6259 inst.operands[0].shifted = 1;
6260 }
6261
6262 if (skip_past_char (&p, ']') == FAIL)
6263 {
6264 inst.error = _("']' expected");
6265 return FAIL;
6266 }
6267 *str = p;
6268 return SUCCESS;
6269 }
6270
6271 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
6272 information on the types the operands can take and how they are encoded.
6273 Up to four operands may be read; this function handles setting the
6274 ".present" field for each read operand itself.
6275 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
6276 else returns FAIL. */
6277
6278 static int
6279 parse_neon_mov (char **str, int *which_operand)
6280 {
6281 int i = *which_operand, val;
6282 enum arm_reg_type rtype;
6283 char *ptr = *str;
6284 struct neon_type_el optype;
6285
6286 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
6287 {
6288 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
6289 inst.operands[i].reg = val;
6290 inst.operands[i].isscalar = 1;
6291 inst.operands[i].vectype = optype;
6292 inst.operands[i++].present = 1;
6293
6294 if (skip_past_comma (&ptr) == FAIL)
6295 goto wanted_comma;
6296
6297 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6298 goto wanted_arm;
6299
6300 inst.operands[i].reg = val;
6301 inst.operands[i].isreg = 1;
6302 inst.operands[i].present = 1;
6303 }
6304 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype, &optype))
6305 != FAIL)
6306 {
6307 /* Cases 0, 1, 2, 3, 5 (D only). */
6308 if (skip_past_comma (&ptr) == FAIL)
6309 goto wanted_comma;
6310
6311 inst.operands[i].reg = val;
6312 inst.operands[i].isreg = 1;
6313 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
6314 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6315 inst.operands[i].isvec = 1;
6316 inst.operands[i].vectype = optype;
6317 inst.operands[i++].present = 1;
6318
6319 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6320 {
6321 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
6322 Case 13: VMOV <Sd>, <Rm> */
6323 inst.operands[i].reg = val;
6324 inst.operands[i].isreg = 1;
6325 inst.operands[i].present = 1;
6326
6327 if (rtype == REG_TYPE_NQ)
6328 {
6329 first_error (_("can't use Neon quad register here"));
6330 return FAIL;
6331 }
6332 else if (rtype != REG_TYPE_VFS)
6333 {
6334 i++;
6335 if (skip_past_comma (&ptr) == FAIL)
6336 goto wanted_comma;
6337 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6338 goto wanted_arm;
6339 inst.operands[i].reg = val;
6340 inst.operands[i].isreg = 1;
6341 inst.operands[i].present = 1;
6342 }
6343 }
6344 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype,
6345 &optype)) != FAIL)
6346 {
6347 /* Case 0: VMOV<c><q> <Qd>, <Qm>
6348 Case 1: VMOV<c><q> <Dd>, <Dm>
6349 Case 8: VMOV.F32 <Sd>, <Sm>
6350 Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm> */
6351
6352 inst.operands[i].reg = val;
6353 inst.operands[i].isreg = 1;
6354 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
6355 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6356 inst.operands[i].isvec = 1;
6357 inst.operands[i].vectype = optype;
6358 inst.operands[i].present = 1;
6359
6360 if (skip_past_comma (&ptr) == SUCCESS)
6361 {
6362 /* Case 15. */
6363 i++;
6364
6365 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6366 goto wanted_arm;
6367
6368 inst.operands[i].reg = val;
6369 inst.operands[i].isreg = 1;
6370 inst.operands[i++].present = 1;
6371
6372 if (skip_past_comma (&ptr) == FAIL)
6373 goto wanted_comma;
6374
6375 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6376 goto wanted_arm;
6377
6378 inst.operands[i].reg = val;
6379 inst.operands[i].isreg = 1;
6380 inst.operands[i].present = 1;
6381 }
6382 }
6383 else if (parse_qfloat_immediate (&ptr, &inst.operands[i].imm) == SUCCESS)
6384 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
6385 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
6386 Case 10: VMOV.F32 <Sd>, #<imm>
6387 Case 11: VMOV.F64 <Dd>, #<imm> */
6388 inst.operands[i].immisfloat = 1;
6389 else if (parse_big_immediate (&ptr, i, NULL, /*allow_symbol_p=*/FALSE)
6390 == SUCCESS)
6391 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
6392 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
6393 ;
6394 else
6395 {
6396 first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
6397 return FAIL;
6398 }
6399 }
6400 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6401 {
6402 /* Cases 6, 7. */
6403 inst.operands[i].reg = val;
6404 inst.operands[i].isreg = 1;
6405 inst.operands[i++].present = 1;
6406
6407 if (skip_past_comma (&ptr) == FAIL)
6408 goto wanted_comma;
6409
6410 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
6411 {
6412 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
6413 inst.operands[i].reg = val;
6414 inst.operands[i].isscalar = 1;
6415 inst.operands[i].present = 1;
6416 inst.operands[i].vectype = optype;
6417 }
6418 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6419 {
6420 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
6421 inst.operands[i].reg = val;
6422 inst.operands[i].isreg = 1;
6423 inst.operands[i++].present = 1;
6424
6425 if (skip_past_comma (&ptr) == FAIL)
6426 goto wanted_comma;
6427
6428 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFSD, &rtype, &optype))
6429 == FAIL)
6430 {
6431 first_error (_(reg_expected_msgs[REG_TYPE_VFSD]));
6432 return FAIL;
6433 }
6434
6435 inst.operands[i].reg = val;
6436 inst.operands[i].isreg = 1;
6437 inst.operands[i].isvec = 1;
6438 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6439 inst.operands[i].vectype = optype;
6440 inst.operands[i].present = 1;
6441
6442 if (rtype == REG_TYPE_VFS)
6443 {
6444 /* Case 14. */
6445 i++;
6446 if (skip_past_comma (&ptr) == FAIL)
6447 goto wanted_comma;
6448 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL,
6449 &optype)) == FAIL)
6450 {
6451 first_error (_(reg_expected_msgs[REG_TYPE_VFS]));
6452 return FAIL;
6453 }
6454 inst.operands[i].reg = val;
6455 inst.operands[i].isreg = 1;
6456 inst.operands[i].isvec = 1;
6457 inst.operands[i].issingle = 1;
6458 inst.operands[i].vectype = optype;
6459 inst.operands[i].present = 1;
6460 }
6461 }
6462 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL, &optype))
6463 != FAIL)
6464 {
6465 /* Case 13. */
6466 inst.operands[i].reg = val;
6467 inst.operands[i].isreg = 1;
6468 inst.operands[i].isvec = 1;
6469 inst.operands[i].issingle = 1;
6470 inst.operands[i].vectype = optype;
6471 inst.operands[i].present = 1;
6472 }
6473 }
6474 else
6475 {
6476 first_error (_("parse error"));
6477 return FAIL;
6478 }
6479
6480 /* Successfully parsed the operands. Update args. */
6481 *which_operand = i;
6482 *str = ptr;
6483 return SUCCESS;
6484
6485 wanted_comma:
6486 first_error (_("expected comma"));
6487 return FAIL;
6488
6489 wanted_arm:
6490 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
6491 return FAIL;
6492 }
6493
6494 /* Use this macro when the operand constraints are different
6495 for ARM and THUMB (e.g. ldrd). */
6496 #define MIX_ARM_THUMB_OPERANDS(arm_operand, thumb_operand) \
6497 ((arm_operand) | ((thumb_operand) << 16))
6498
6499 /* Matcher codes for parse_operands. */
6500 enum operand_parse_code
6501 {
6502 OP_stop, /* end of line */
6503
6504 OP_RR, /* ARM register */
6505 OP_RRnpc, /* ARM register, not r15 */
6506 OP_RRnpcsp, /* ARM register, neither r15 nor r13 (a.k.a. 'BadReg') */
6507 OP_RRnpcb, /* ARM register, not r15, in square brackets */
6508 OP_RRnpctw, /* ARM register, not r15 in Thumb-state or with writeback,
6509 optional trailing ! */
6510 OP_RRw, /* ARM register, not r15, optional trailing ! */
6511 OP_RCP, /* Coprocessor number */
6512 OP_RCN, /* Coprocessor register */
6513 OP_RF, /* FPA register */
6514 OP_RVS, /* VFP single precision register */
6515 OP_RVD, /* VFP double precision register (0..15) */
6516 OP_RND, /* Neon double precision register (0..31) */
6517 OP_RNQ, /* Neon quad precision register */
6518 OP_RVSD, /* VFP single or double precision register */
6519 OP_RNSD, /* Neon single or double precision register */
6520 OP_RNDQ, /* Neon double or quad precision register */
6521 OP_RNSDQ, /* Neon single, double or quad precision register */
6522 OP_RNSC, /* Neon scalar D[X] */
6523 OP_RVC, /* VFP control register */
6524 OP_RMF, /* Maverick F register */
6525 OP_RMD, /* Maverick D register */
6526 OP_RMFX, /* Maverick FX register */
6527 OP_RMDX, /* Maverick DX register */
6528 OP_RMAX, /* Maverick AX register */
6529 OP_RMDS, /* Maverick DSPSC register */
6530 OP_RIWR, /* iWMMXt wR register */
6531 OP_RIWC, /* iWMMXt wC register */
6532 OP_RIWG, /* iWMMXt wCG register */
6533 OP_RXA, /* XScale accumulator register */
6534
6535 OP_REGLST, /* ARM register list */
6536 OP_VRSLST, /* VFP single-precision register list */
6537 OP_VRDLST, /* VFP double-precision register list */
6538 OP_VRSDLST, /* VFP single or double-precision register list (& quad) */
6539 OP_NRDLST, /* Neon double-precision register list (d0-d31, qN aliases) */
6540 OP_NSTRLST, /* Neon element/structure list */
6541
6542 OP_RNDQ_I0, /* Neon D or Q reg, or immediate zero. */
6543 OP_RVSD_I0, /* VFP S or D reg, or immediate zero. */
6544 OP_RSVD_FI0, /* VFP S or D reg, or floating point immediate zero. */
6545 OP_RR_RNSC, /* ARM reg or Neon scalar. */
6546 OP_RNSD_RNSC, /* Neon S or D reg, or Neon scalar. */
6547 OP_RNSDQ_RNSC, /* Vector S, D or Q reg, or Neon scalar. */
6548 OP_RNDQ_RNSC, /* Neon D or Q reg, or Neon scalar. */
6549 OP_RND_RNSC, /* Neon D reg, or Neon scalar. */
6550 OP_VMOV, /* Neon VMOV operands. */
6551 OP_RNDQ_Ibig, /* Neon D or Q reg, or big immediate for logic and VMVN. */
6552 OP_RNDQ_I63b, /* Neon D or Q reg, or immediate for shift. */
6553 OP_RIWR_I32z, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2. */
6554
6555 OP_I0, /* immediate zero */
6556 OP_I7, /* immediate value 0 .. 7 */
6557 OP_I15, /* 0 .. 15 */
6558 OP_I16, /* 1 .. 16 */
6559 OP_I16z, /* 0 .. 16 */
6560 OP_I31, /* 0 .. 31 */
6561 OP_I31w, /* 0 .. 31, optional trailing ! */
6562 OP_I32, /* 1 .. 32 */
6563 OP_I32z, /* 0 .. 32 */
6564 OP_I63, /* 0 .. 63 */
6565 OP_I63s, /* -64 .. 63 */
6566 OP_I64, /* 1 .. 64 */
6567 OP_I64z, /* 0 .. 64 */
6568 OP_I255, /* 0 .. 255 */
6569
6570 OP_I4b, /* immediate, prefix optional, 1 .. 4 */
6571 OP_I7b, /* 0 .. 7 */
6572 OP_I15b, /* 0 .. 15 */
6573 OP_I31b, /* 0 .. 31 */
6574
6575 OP_SH, /* shifter operand */
6576 OP_SHG, /* shifter operand with possible group relocation */
6577 OP_ADDR, /* Memory address expression (any mode) */
6578 OP_ADDRGLDR, /* Mem addr expr (any mode) with possible LDR group reloc */
6579 OP_ADDRGLDRS, /* Mem addr expr (any mode) with possible LDRS group reloc */
6580 OP_ADDRGLDC, /* Mem addr expr (any mode) with possible LDC group reloc */
6581 OP_EXP, /* arbitrary expression */
6582 OP_EXPi, /* same, with optional immediate prefix */
6583 OP_EXPr, /* same, with optional relocation suffix */
6584 OP_HALF, /* 0 .. 65535 or low/high reloc. */
6585 OP_IROT1, /* VCADD rotate immediate: 90, 270. */
6586 OP_IROT2, /* VCMLA rotate immediate: 0, 90, 180, 270. */
6587
6588 OP_CPSF, /* CPS flags */
6589 OP_ENDI, /* Endianness specifier */
6590 OP_wPSR, /* CPSR/SPSR/APSR mask for msr (writing). */
6591 OP_rPSR, /* CPSR/SPSR/APSR mask for msr (reading). */
6592 OP_COND, /* conditional code */
6593 OP_TB, /* Table branch. */
6594
6595 OP_APSR_RR, /* ARM register or "APSR_nzcv". */
6596
6597 OP_RRnpc_I0, /* ARM register or literal 0 */
6598 OP_RR_EXr, /* ARM register or expression with opt. reloc stuff. */
6599 OP_RR_EXi, /* ARM register or expression with imm prefix */
6600 OP_RF_IF, /* FPA register or immediate */
6601 OP_RIWR_RIWC, /* iWMMXt R or C reg */
6602 OP_RIWC_RIWG, /* iWMMXt wC or wCG reg */
6603
6604 /* Optional operands. */
6605 OP_oI7b, /* immediate, prefix optional, 0 .. 7 */
6606 OP_oI31b, /* 0 .. 31 */
6607 OP_oI32b, /* 1 .. 32 */
6608 OP_oI32z, /* 0 .. 32 */
6609 OP_oIffffb, /* 0 .. 65535 */
6610 OP_oI255c, /* curly-brace enclosed, 0 .. 255 */
6611
6612 OP_oRR, /* ARM register */
6613 OP_oRRnpc, /* ARM register, not the PC */
6614 OP_oRRnpcsp, /* ARM register, neither the PC nor the SP (a.k.a. BadReg) */
6615 OP_oRRw, /* ARM register, not r15, optional trailing ! */
6616 OP_oRND, /* Optional Neon double precision register */
6617 OP_oRNQ, /* Optional Neon quad precision register */
6618 OP_oRNDQ, /* Optional Neon double or quad precision register */
6619 OP_oRNSDQ, /* Optional single, double or quad precision vector register */
6620 OP_oSHll, /* LSL immediate */
6621 OP_oSHar, /* ASR immediate */
6622 OP_oSHllar, /* LSL or ASR immediate */
6623 OP_oROR, /* ROR 0/8/16/24 */
6624 OP_oBARRIER_I15, /* Option argument for a barrier instruction. */
6625
6626 /* Some pre-defined mixed (ARM/THUMB) operands. */
6627 OP_RR_npcsp = MIX_ARM_THUMB_OPERANDS (OP_RR, OP_RRnpcsp),
6628 OP_RRnpc_npcsp = MIX_ARM_THUMB_OPERANDS (OP_RRnpc, OP_RRnpcsp),
6629 OP_oRRnpc_npcsp = MIX_ARM_THUMB_OPERANDS (OP_oRRnpc, OP_oRRnpcsp),
6630
6631 OP_FIRST_OPTIONAL = OP_oI7b
6632 };
6633
6634 /* Generic instruction operand parser. This does no encoding and no
6635 semantic validation; it merely squirrels values away in the inst
6636 structure. Returns SUCCESS or FAIL depending on whether the
6637 specified grammar matched. */
6638 static int
6639 parse_operands (char *str, const unsigned int *pattern, bfd_boolean thumb)
6640 {
6641 unsigned const int *upat = pattern;
6642 char *backtrack_pos = 0;
6643 const char *backtrack_error = 0;
6644 int i, val = 0, backtrack_index = 0;
6645 enum arm_reg_type rtype;
6646 parse_operand_result result;
6647 unsigned int op_parse_code;
6648
6649 #define po_char_or_fail(chr) \
6650 do \
6651 { \
6652 if (skip_past_char (&str, chr) == FAIL) \
6653 goto bad_args; \
6654 } \
6655 while (0)
6656
6657 #define po_reg_or_fail(regtype) \
6658 do \
6659 { \
6660 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6661 & inst.operands[i].vectype); \
6662 if (val == FAIL) \
6663 { \
6664 first_error (_(reg_expected_msgs[regtype])); \
6665 goto failure; \
6666 } \
6667 inst.operands[i].reg = val; \
6668 inst.operands[i].isreg = 1; \
6669 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6670 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6671 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6672 || rtype == REG_TYPE_VFD \
6673 || rtype == REG_TYPE_NQ); \
6674 } \
6675 while (0)
6676
6677 #define po_reg_or_goto(regtype, label) \
6678 do \
6679 { \
6680 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6681 & inst.operands[i].vectype); \
6682 if (val == FAIL) \
6683 goto label; \
6684 \
6685 inst.operands[i].reg = val; \
6686 inst.operands[i].isreg = 1; \
6687 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6688 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6689 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6690 || rtype == REG_TYPE_VFD \
6691 || rtype == REG_TYPE_NQ); \
6692 } \
6693 while (0)
6694
6695 #define po_imm_or_fail(min, max, popt) \
6696 do \
6697 { \
6698 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
6699 goto failure; \
6700 inst.operands[i].imm = val; \
6701 } \
6702 while (0)
6703
6704 #define po_scalar_or_goto(elsz, label) \
6705 do \
6706 { \
6707 val = parse_scalar (& str, elsz, & inst.operands[i].vectype); \
6708 if (val == FAIL) \
6709 goto label; \
6710 inst.operands[i].reg = val; \
6711 inst.operands[i].isscalar = 1; \
6712 } \
6713 while (0)
6714
6715 #define po_misc_or_fail(expr) \
6716 do \
6717 { \
6718 if (expr) \
6719 goto failure; \
6720 } \
6721 while (0)
6722
6723 #define po_misc_or_fail_no_backtrack(expr) \
6724 do \
6725 { \
6726 result = expr; \
6727 if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK) \
6728 backtrack_pos = 0; \
6729 if (result != PARSE_OPERAND_SUCCESS) \
6730 goto failure; \
6731 } \
6732 while (0)
6733
6734 #define po_barrier_or_imm(str) \
6735 do \
6736 { \
6737 val = parse_barrier (&str); \
6738 if (val == FAIL && ! ISALPHA (*str)) \
6739 goto immediate; \
6740 if (val == FAIL \
6741 /* ISB can only take SY as an option. */ \
6742 || ((inst.instruction & 0xf0) == 0x60 \
6743 && val != 0xf)) \
6744 { \
6745 inst.error = _("invalid barrier type"); \
6746 backtrack_pos = 0; \
6747 goto failure; \
6748 } \
6749 } \
6750 while (0)
6751
6752 skip_whitespace (str);
6753
6754 for (i = 0; upat[i] != OP_stop; i++)
6755 {
6756 op_parse_code = upat[i];
6757 if (op_parse_code >= 1<<16)
6758 op_parse_code = thumb ? (op_parse_code >> 16)
6759 : (op_parse_code & ((1<<16)-1));
6760
6761 if (op_parse_code >= OP_FIRST_OPTIONAL)
6762 {
6763 /* Remember where we are in case we need to backtrack. */
6764 gas_assert (!backtrack_pos);
6765 backtrack_pos = str;
6766 backtrack_error = inst.error;
6767 backtrack_index = i;
6768 }
6769
6770 if (i > 0 && (i > 1 || inst.operands[0].present))
6771 po_char_or_fail (',');
6772
6773 switch (op_parse_code)
6774 {
6775 /* Registers */
6776 case OP_oRRnpc:
6777 case OP_oRRnpcsp:
6778 case OP_RRnpc:
6779 case OP_RRnpcsp:
6780 case OP_oRR:
6781 case OP_RR: po_reg_or_fail (REG_TYPE_RN); break;
6782 case OP_RCP: po_reg_or_fail (REG_TYPE_CP); break;
6783 case OP_RCN: po_reg_or_fail (REG_TYPE_CN); break;
6784 case OP_RF: po_reg_or_fail (REG_TYPE_FN); break;
6785 case OP_RVS: po_reg_or_fail (REG_TYPE_VFS); break;
6786 case OP_RVD: po_reg_or_fail (REG_TYPE_VFD); break;
6787 case OP_oRND:
6788 case OP_RND: po_reg_or_fail (REG_TYPE_VFD); break;
6789 case OP_RVC:
6790 po_reg_or_goto (REG_TYPE_VFC, coproc_reg);
6791 break;
6792 /* Also accept generic coprocessor regs for unknown registers. */
6793 coproc_reg:
6794 po_reg_or_fail (REG_TYPE_CN);
6795 break;
6796 case OP_RMF: po_reg_or_fail (REG_TYPE_MVF); break;
6797 case OP_RMD: po_reg_or_fail (REG_TYPE_MVD); break;
6798 case OP_RMFX: po_reg_or_fail (REG_TYPE_MVFX); break;
6799 case OP_RMDX: po_reg_or_fail (REG_TYPE_MVDX); break;
6800 case OP_RMAX: po_reg_or_fail (REG_TYPE_MVAX); break;
6801 case OP_RMDS: po_reg_or_fail (REG_TYPE_DSPSC); break;
6802 case OP_RIWR: po_reg_or_fail (REG_TYPE_MMXWR); break;
6803 case OP_RIWC: po_reg_or_fail (REG_TYPE_MMXWC); break;
6804 case OP_RIWG: po_reg_or_fail (REG_TYPE_MMXWCG); break;
6805 case OP_RXA: po_reg_or_fail (REG_TYPE_XSCALE); break;
6806 case OP_oRNQ:
6807 case OP_RNQ: po_reg_or_fail (REG_TYPE_NQ); break;
6808 case OP_RNSD: po_reg_or_fail (REG_TYPE_NSD); break;
6809 case OP_oRNDQ:
6810 case OP_RNDQ: po_reg_or_fail (REG_TYPE_NDQ); break;
6811 case OP_RVSD: po_reg_or_fail (REG_TYPE_VFSD); break;
6812 case OP_oRNSDQ:
6813 case OP_RNSDQ: po_reg_or_fail (REG_TYPE_NSDQ); break;
6814
6815 /* Neon scalar. Using an element size of 8 means that some invalid
6816 scalars are accepted here, so deal with those in later code. */
6817 case OP_RNSC: po_scalar_or_goto (8, failure); break;
6818
6819 case OP_RNDQ_I0:
6820 {
6821 po_reg_or_goto (REG_TYPE_NDQ, try_imm0);
6822 break;
6823 try_imm0:
6824 po_imm_or_fail (0, 0, TRUE);
6825 }
6826 break;
6827
6828 case OP_RVSD_I0:
6829 po_reg_or_goto (REG_TYPE_VFSD, try_imm0);
6830 break;
6831
6832 case OP_RSVD_FI0:
6833 {
6834 po_reg_or_goto (REG_TYPE_VFSD, try_ifimm0);
6835 break;
6836 try_ifimm0:
6837 if (parse_ifimm_zero (&str))
6838 inst.operands[i].imm = 0;
6839 else
6840 {
6841 inst.error
6842 = _("only floating point zero is allowed as immediate value");
6843 goto failure;
6844 }
6845 }
6846 break;
6847
6848 case OP_RR_RNSC:
6849 {
6850 po_scalar_or_goto (8, try_rr);
6851 break;
6852 try_rr:
6853 po_reg_or_fail (REG_TYPE_RN);
6854 }
6855 break;
6856
6857 case OP_RNSDQ_RNSC:
6858 {
6859 po_scalar_or_goto (8, try_nsdq);
6860 break;
6861 try_nsdq:
6862 po_reg_or_fail (REG_TYPE_NSDQ);
6863 }
6864 break;
6865
6866 case OP_RNSD_RNSC:
6867 {
6868 po_scalar_or_goto (8, try_s_scalar);
6869 break;
6870 try_s_scalar:
6871 po_scalar_or_goto (4, try_nsd);
6872 break;
6873 try_nsd:
6874 po_reg_or_fail (REG_TYPE_NSD);
6875 }
6876 break;
6877
6878 case OP_RNDQ_RNSC:
6879 {
6880 po_scalar_or_goto (8, try_ndq);
6881 break;
6882 try_ndq:
6883 po_reg_or_fail (REG_TYPE_NDQ);
6884 }
6885 break;
6886
6887 case OP_RND_RNSC:
6888 {
6889 po_scalar_or_goto (8, try_vfd);
6890 break;
6891 try_vfd:
6892 po_reg_or_fail (REG_TYPE_VFD);
6893 }
6894 break;
6895
6896 case OP_VMOV:
6897 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
6898 not careful then bad things might happen. */
6899 po_misc_or_fail (parse_neon_mov (&str, &i) == FAIL);
6900 break;
6901
6902 case OP_RNDQ_Ibig:
6903 {
6904 po_reg_or_goto (REG_TYPE_NDQ, try_immbig);
6905 break;
6906 try_immbig:
6907 /* There's a possibility of getting a 64-bit immediate here, so
6908 we need special handling. */
6909 if (parse_big_immediate (&str, i, NULL, /*allow_symbol_p=*/FALSE)
6910 == FAIL)
6911 {
6912 inst.error = _("immediate value is out of range");
6913 goto failure;
6914 }
6915 }
6916 break;
6917
6918 case OP_RNDQ_I63b:
6919 {
6920 po_reg_or_goto (REG_TYPE_NDQ, try_shimm);
6921 break;
6922 try_shimm:
6923 po_imm_or_fail (0, 63, TRUE);
6924 }
6925 break;
6926
6927 case OP_RRnpcb:
6928 po_char_or_fail ('[');
6929 po_reg_or_fail (REG_TYPE_RN);
6930 po_char_or_fail (']');
6931 break;
6932
6933 case OP_RRnpctw:
6934 case OP_RRw:
6935 case OP_oRRw:
6936 po_reg_or_fail (REG_TYPE_RN);
6937 if (skip_past_char (&str, '!') == SUCCESS)
6938 inst.operands[i].writeback = 1;
6939 break;
6940
6941 /* Immediates */
6942 case OP_I7: po_imm_or_fail ( 0, 7, FALSE); break;
6943 case OP_I15: po_imm_or_fail ( 0, 15, FALSE); break;
6944 case OP_I16: po_imm_or_fail ( 1, 16, FALSE); break;
6945 case OP_I16z: po_imm_or_fail ( 0, 16, FALSE); break;
6946 case OP_I31: po_imm_or_fail ( 0, 31, FALSE); break;
6947 case OP_I32: po_imm_or_fail ( 1, 32, FALSE); break;
6948 case OP_I32z: po_imm_or_fail ( 0, 32, FALSE); break;
6949 case OP_I63s: po_imm_or_fail (-64, 63, FALSE); break;
6950 case OP_I63: po_imm_or_fail ( 0, 63, FALSE); break;
6951 case OP_I64: po_imm_or_fail ( 1, 64, FALSE); break;
6952 case OP_I64z: po_imm_or_fail ( 0, 64, FALSE); break;
6953 case OP_I255: po_imm_or_fail ( 0, 255, FALSE); break;
6954
6955 case OP_I4b: po_imm_or_fail ( 1, 4, TRUE); break;
6956 case OP_oI7b:
6957 case OP_I7b: po_imm_or_fail ( 0, 7, TRUE); break;
6958 case OP_I15b: po_imm_or_fail ( 0, 15, TRUE); break;
6959 case OP_oI31b:
6960 case OP_I31b: po_imm_or_fail ( 0, 31, TRUE); break;
6961 case OP_oI32b: po_imm_or_fail ( 1, 32, TRUE); break;
6962 case OP_oI32z: po_imm_or_fail ( 0, 32, TRUE); break;
6963 case OP_oIffffb: po_imm_or_fail ( 0, 0xffff, TRUE); break;
6964
6965 /* Immediate variants */
6966 case OP_oI255c:
6967 po_char_or_fail ('{');
6968 po_imm_or_fail (0, 255, TRUE);
6969 po_char_or_fail ('}');
6970 break;
6971
6972 case OP_I31w:
6973 /* The expression parser chokes on a trailing !, so we have
6974 to find it first and zap it. */
6975 {
6976 char *s = str;
6977 while (*s && *s != ',')
6978 s++;
6979 if (s[-1] == '!')
6980 {
6981 s[-1] = '\0';
6982 inst.operands[i].writeback = 1;
6983 }
6984 po_imm_or_fail (0, 31, TRUE);
6985 if (str == s - 1)
6986 str = s;
6987 }
6988 break;
6989
6990 /* Expressions */
6991 case OP_EXPi: EXPi:
6992 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6993 GE_OPT_PREFIX));
6994 break;
6995
6996 case OP_EXP:
6997 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6998 GE_NO_PREFIX));
6999 break;
7000
7001 case OP_EXPr: EXPr:
7002 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
7003 GE_NO_PREFIX));
7004 if (inst.reloc.exp.X_op == O_symbol)
7005 {
7006 val = parse_reloc (&str);
7007 if (val == -1)
7008 {
7009 inst.error = _("unrecognized relocation suffix");
7010 goto failure;
7011 }
7012 else if (val != BFD_RELOC_UNUSED)
7013 {
7014 inst.operands[i].imm = val;
7015 inst.operands[i].hasreloc = 1;
7016 }
7017 }
7018 break;
7019
7020 /* Operand for MOVW or MOVT. */
7021 case OP_HALF:
7022 po_misc_or_fail (parse_half (&str));
7023 break;
7024
7025 /* Register or expression. */
7026 case OP_RR_EXr: po_reg_or_goto (REG_TYPE_RN, EXPr); break;
7027 case OP_RR_EXi: po_reg_or_goto (REG_TYPE_RN, EXPi); break;
7028
7029 /* Register or immediate. */
7030 case OP_RRnpc_I0: po_reg_or_goto (REG_TYPE_RN, I0); break;
7031 I0: po_imm_or_fail (0, 0, FALSE); break;
7032
7033 case OP_RF_IF: po_reg_or_goto (REG_TYPE_FN, IF); break;
7034 IF:
7035 if (!is_immediate_prefix (*str))
7036 goto bad_args;
7037 str++;
7038 val = parse_fpa_immediate (&str);
7039 if (val == FAIL)
7040 goto failure;
7041 /* FPA immediates are encoded as registers 8-15.
7042 parse_fpa_immediate has already applied the offset. */
7043 inst.operands[i].reg = val;
7044 inst.operands[i].isreg = 1;
7045 break;
7046
7047 case OP_RIWR_I32z: po_reg_or_goto (REG_TYPE_MMXWR, I32z); break;
7048 I32z: po_imm_or_fail (0, 32, FALSE); break;
7049
7050 /* Two kinds of register. */
7051 case OP_RIWR_RIWC:
7052 {
7053 struct reg_entry *rege = arm_reg_parse_multi (&str);
7054 if (!rege
7055 || (rege->type != REG_TYPE_MMXWR
7056 && rege->type != REG_TYPE_MMXWC
7057 && rege->type != REG_TYPE_MMXWCG))
7058 {
7059 inst.error = _("iWMMXt data or control register expected");
7060 goto failure;
7061 }
7062 inst.operands[i].reg = rege->number;
7063 inst.operands[i].isreg = (rege->type == REG_TYPE_MMXWR);
7064 }
7065 break;
7066
7067 case OP_RIWC_RIWG:
7068 {
7069 struct reg_entry *rege = arm_reg_parse_multi (&str);
7070 if (!rege
7071 || (rege->type != REG_TYPE_MMXWC
7072 && rege->type != REG_TYPE_MMXWCG))
7073 {
7074 inst.error = _("iWMMXt control register expected");
7075 goto failure;
7076 }
7077 inst.operands[i].reg = rege->number;
7078 inst.operands[i].isreg = 1;
7079 }
7080 break;
7081
7082 /* Misc */
7083 case OP_CPSF: val = parse_cps_flags (&str); break;
7084 case OP_ENDI: val = parse_endian_specifier (&str); break;
7085 case OP_oROR: val = parse_ror (&str); break;
7086 case OP_COND: val = parse_cond (&str); break;
7087 case OP_oBARRIER_I15:
7088 po_barrier_or_imm (str); break;
7089 immediate:
7090 if (parse_immediate (&str, &val, 0, 15, TRUE) == FAIL)
7091 goto failure;
7092 break;
7093
7094 case OP_wPSR:
7095 case OP_rPSR:
7096 po_reg_or_goto (REG_TYPE_RNB, try_psr);
7097 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_virt))
7098 {
7099 inst.error = _("Banked registers are not available with this "
7100 "architecture.");
7101 goto failure;
7102 }
7103 break;
7104 try_psr:
7105 val = parse_psr (&str, op_parse_code == OP_wPSR);
7106 break;
7107
7108 case OP_APSR_RR:
7109 po_reg_or_goto (REG_TYPE_RN, try_apsr);
7110 break;
7111 try_apsr:
7112 /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
7113 instruction). */
7114 if (strncasecmp (str, "APSR_", 5) == 0)
7115 {
7116 unsigned found = 0;
7117 str += 5;
7118 while (found < 15)
7119 switch (*str++)
7120 {
7121 case 'c': found = (found & 1) ? 16 : found | 1; break;
7122 case 'n': found = (found & 2) ? 16 : found | 2; break;
7123 case 'z': found = (found & 4) ? 16 : found | 4; break;
7124 case 'v': found = (found & 8) ? 16 : found | 8; break;
7125 default: found = 16;
7126 }
7127 if (found != 15)
7128 goto failure;
7129 inst.operands[i].isvec = 1;
7130 /* APSR_nzcv is encoded in instructions as if it were the REG_PC. */
7131 inst.operands[i].reg = REG_PC;
7132 }
7133 else
7134 goto failure;
7135 break;
7136
7137 case OP_TB:
7138 po_misc_or_fail (parse_tb (&str));
7139 break;
7140
7141 /* Register lists. */
7142 case OP_REGLST:
7143 val = parse_reg_list (&str);
7144 if (*str == '^')
7145 {
7146 inst.operands[i].writeback = 1;
7147 str++;
7148 }
7149 break;
7150
7151 case OP_VRSLST:
7152 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_S);
7153 break;
7154
7155 case OP_VRDLST:
7156 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_D);
7157 break;
7158
7159 case OP_VRSDLST:
7160 /* Allow Q registers too. */
7161 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7162 REGLIST_NEON_D);
7163 if (val == FAIL)
7164 {
7165 inst.error = NULL;
7166 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7167 REGLIST_VFP_S);
7168 inst.operands[i].issingle = 1;
7169 }
7170 break;
7171
7172 case OP_NRDLST:
7173 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7174 REGLIST_NEON_D);
7175 break;
7176
7177 case OP_NSTRLST:
7178 val = parse_neon_el_struct_list (&str, &inst.operands[i].reg,
7179 &inst.operands[i].vectype);
7180 break;
7181
7182 /* Addressing modes */
7183 case OP_ADDR:
7184 po_misc_or_fail (parse_address (&str, i));
7185 break;
7186
7187 case OP_ADDRGLDR:
7188 po_misc_or_fail_no_backtrack (
7189 parse_address_group_reloc (&str, i, GROUP_LDR));
7190 break;
7191
7192 case OP_ADDRGLDRS:
7193 po_misc_or_fail_no_backtrack (
7194 parse_address_group_reloc (&str, i, GROUP_LDRS));
7195 break;
7196
7197 case OP_ADDRGLDC:
7198 po_misc_or_fail_no_backtrack (
7199 parse_address_group_reloc (&str, i, GROUP_LDC));
7200 break;
7201
7202 case OP_SH:
7203 po_misc_or_fail (parse_shifter_operand (&str, i));
7204 break;
7205
7206 case OP_SHG:
7207 po_misc_or_fail_no_backtrack (
7208 parse_shifter_operand_group_reloc (&str, i));
7209 break;
7210
7211 case OP_oSHll:
7212 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_IMMEDIATE));
7213 break;
7214
7215 case OP_oSHar:
7216 po_misc_or_fail (parse_shift (&str, i, SHIFT_ASR_IMMEDIATE));
7217 break;
7218
7219 case OP_oSHllar:
7220 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_OR_ASR_IMMEDIATE));
7221 break;
7222
7223 default:
7224 as_fatal (_("unhandled operand code %d"), op_parse_code);
7225 }
7226
7227 /* Various value-based sanity checks and shared operations. We
7228 do not signal immediate failures for the register constraints;
7229 this allows a syntax error to take precedence. */
7230 switch (op_parse_code)
7231 {
7232 case OP_oRRnpc:
7233 case OP_RRnpc:
7234 case OP_RRnpcb:
7235 case OP_RRw:
7236 case OP_oRRw:
7237 case OP_RRnpc_I0:
7238 if (inst.operands[i].isreg && inst.operands[i].reg == REG_PC)
7239 inst.error = BAD_PC;
7240 break;
7241
7242 case OP_oRRnpcsp:
7243 case OP_RRnpcsp:
7244 if (inst.operands[i].isreg)
7245 {
7246 if (inst.operands[i].reg == REG_PC)
7247 inst.error = BAD_PC;
7248 else if (inst.operands[i].reg == REG_SP
7249 /* The restriction on Rd/Rt/Rt2 on Thumb mode has been
7250 relaxed since ARMv8-A. */
7251 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
7252 {
7253 gas_assert (thumb);
7254 inst.error = BAD_SP;
7255 }
7256 }
7257 break;
7258
7259 case OP_RRnpctw:
7260 if (inst.operands[i].isreg
7261 && inst.operands[i].reg == REG_PC
7262 && (inst.operands[i].writeback || thumb))
7263 inst.error = BAD_PC;
7264 break;
7265
7266 case OP_CPSF:
7267 case OP_ENDI:
7268 case OP_oROR:
7269 case OP_wPSR:
7270 case OP_rPSR:
7271 case OP_COND:
7272 case OP_oBARRIER_I15:
7273 case OP_REGLST:
7274 case OP_VRSLST:
7275 case OP_VRDLST:
7276 case OP_VRSDLST:
7277 case OP_NRDLST:
7278 case OP_NSTRLST:
7279 if (val == FAIL)
7280 goto failure;
7281 inst.operands[i].imm = val;
7282 break;
7283
7284 default:
7285 break;
7286 }
7287
7288 /* If we get here, this operand was successfully parsed. */
7289 inst.operands[i].present = 1;
7290 continue;
7291
7292 bad_args:
7293 inst.error = BAD_ARGS;
7294
7295 failure:
7296 if (!backtrack_pos)
7297 {
7298 /* The parse routine should already have set inst.error, but set a
7299 default here just in case. */
7300 if (!inst.error)
7301 inst.error = _("syntax error");
7302 return FAIL;
7303 }
7304
7305 /* Do not backtrack over a trailing optional argument that
7306 absorbed some text. We will only fail again, with the
7307 'garbage following instruction' error message, which is
7308 probably less helpful than the current one. */
7309 if (backtrack_index == i && backtrack_pos != str
7310 && upat[i+1] == OP_stop)
7311 {
7312 if (!inst.error)
7313 inst.error = _("syntax error");
7314 return FAIL;
7315 }
7316
7317 /* Try again, skipping the optional argument at backtrack_pos. */
7318 str = backtrack_pos;
7319 inst.error = backtrack_error;
7320 inst.operands[backtrack_index].present = 0;
7321 i = backtrack_index;
7322 backtrack_pos = 0;
7323 }
7324
7325 /* Check that we have parsed all the arguments. */
7326 if (*str != '\0' && !inst.error)
7327 inst.error = _("garbage following instruction");
7328
7329 return inst.error ? FAIL : SUCCESS;
7330 }
7331
7332 #undef po_char_or_fail
7333 #undef po_reg_or_fail
7334 #undef po_reg_or_goto
7335 #undef po_imm_or_fail
7336 #undef po_scalar_or_fail
7337 #undef po_barrier_or_imm
7338
7339 /* Shorthand macro for instruction encoding functions issuing errors. */
7340 #define constraint(expr, err) \
7341 do \
7342 { \
7343 if (expr) \
7344 { \
7345 inst.error = err; \
7346 return; \
7347 } \
7348 } \
7349 while (0)
7350
7351 /* Reject "bad registers" for Thumb-2 instructions. Many Thumb-2
7352 instructions are unpredictable if these registers are used. This
7353 is the BadReg predicate in ARM's Thumb-2 documentation.
7354
7355 Before ARMv8-A, REG_PC and REG_SP were not allowed in quite a few
7356 places, while the restriction on REG_SP was relaxed since ARMv8-A. */
7357 #define reject_bad_reg(reg) \
7358 do \
7359 if (reg == REG_PC) \
7360 { \
7361 inst.error = BAD_PC; \
7362 return; \
7363 } \
7364 else if (reg == REG_SP \
7365 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8)) \
7366 { \
7367 inst.error = BAD_SP; \
7368 return; \
7369 } \
7370 while (0)
7371
7372 /* If REG is R13 (the stack pointer), warn that its use is
7373 deprecated. */
7374 #define warn_deprecated_sp(reg) \
7375 do \
7376 if (warn_on_deprecated && reg == REG_SP) \
7377 as_tsktsk (_("use of r13 is deprecated")); \
7378 while (0)
7379
7380 /* Functions for operand encoding. ARM, then Thumb. */
7381
7382 #define rotate_left(v, n) (v << (n & 31) | v >> ((32 - n) & 31))
7383
7384 /* If the current inst is scalar ARMv8.2 fp16 instruction, do special encoding.
7385
7386 The only binary encoding difference is the Coprocessor number. Coprocessor
7387 9 is used for half-precision calculations or conversions. The format of the
7388 instruction is the same as the equivalent Coprocessor 10 instruction that
7389 exists for Single-Precision operation. */
7390
7391 static void
7392 do_scalar_fp16_v82_encode (void)
7393 {
7394 if (inst.cond != COND_ALWAYS)
7395 as_warn (_("ARMv8.2 scalar fp16 instruction cannot be conditional,"
7396 " the behaviour is UNPREDICTABLE"));
7397 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16),
7398 _(BAD_FP16));
7399
7400 inst.instruction = (inst.instruction & 0xfffff0ff) | 0x900;
7401 mark_feature_used (&arm_ext_fp16);
7402 }
7403
7404 /* If VAL can be encoded in the immediate field of an ARM instruction,
7405 return the encoded form. Otherwise, return FAIL. */
7406
7407 static unsigned int
7408 encode_arm_immediate (unsigned int val)
7409 {
7410 unsigned int a, i;
7411
7412 if (val <= 0xff)
7413 return val;
7414
7415 for (i = 2; i < 32; i += 2)
7416 if ((a = rotate_left (val, i)) <= 0xff)
7417 return a | (i << 7); /* 12-bit pack: [shift-cnt,const]. */
7418
7419 return FAIL;
7420 }
7421
7422 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
7423 return the encoded form. Otherwise, return FAIL. */
7424 static unsigned int
7425 encode_thumb32_immediate (unsigned int val)
7426 {
7427 unsigned int a, i;
7428
7429 if (val <= 0xff)
7430 return val;
7431
7432 for (i = 1; i <= 24; i++)
7433 {
7434 a = val >> i;
7435 if ((val & ~(0xff << i)) == 0)
7436 return ((val >> i) & 0x7f) | ((32 - i) << 7);
7437 }
7438
7439 a = val & 0xff;
7440 if (val == ((a << 16) | a))
7441 return 0x100 | a;
7442 if (val == ((a << 24) | (a << 16) | (a << 8) | a))
7443 return 0x300 | a;
7444
7445 a = val & 0xff00;
7446 if (val == ((a << 16) | a))
7447 return 0x200 | (a >> 8);
7448
7449 return FAIL;
7450 }
7451 /* Encode a VFP SP or DP register number into inst.instruction. */
7452
7453 static void
7454 encode_arm_vfp_reg (int reg, enum vfp_reg_pos pos)
7455 {
7456 if ((pos == VFP_REG_Dd || pos == VFP_REG_Dn || pos == VFP_REG_Dm)
7457 && reg > 15)
7458 {
7459 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
7460 {
7461 if (thumb_mode)
7462 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
7463 fpu_vfp_ext_d32);
7464 else
7465 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
7466 fpu_vfp_ext_d32);
7467 }
7468 else
7469 {
7470 first_error (_("D register out of range for selected VFP version"));
7471 return;
7472 }
7473 }
7474
7475 switch (pos)
7476 {
7477 case VFP_REG_Sd:
7478 inst.instruction |= ((reg >> 1) << 12) | ((reg & 1) << 22);
7479 break;
7480
7481 case VFP_REG_Sn:
7482 inst.instruction |= ((reg >> 1) << 16) | ((reg & 1) << 7);
7483 break;
7484
7485 case VFP_REG_Sm:
7486 inst.instruction |= ((reg >> 1) << 0) | ((reg & 1) << 5);
7487 break;
7488
7489 case VFP_REG_Dd:
7490 inst.instruction |= ((reg & 15) << 12) | ((reg >> 4) << 22);
7491 break;
7492
7493 case VFP_REG_Dn:
7494 inst.instruction |= ((reg & 15) << 16) | ((reg >> 4) << 7);
7495 break;
7496
7497 case VFP_REG_Dm:
7498 inst.instruction |= (reg & 15) | ((reg >> 4) << 5);
7499 break;
7500
7501 default:
7502 abort ();
7503 }
7504 }
7505
7506 /* Encode a <shift> in an ARM-format instruction. The immediate,
7507 if any, is handled by md_apply_fix. */
7508 static void
7509 encode_arm_shift (int i)
7510 {
7511 /* register-shifted register. */
7512 if (inst.operands[i].immisreg)
7513 {
7514 int op_index;
7515 for (op_index = 0; op_index <= i; ++op_index)
7516 {
7517 /* Check the operand only when it's presented. In pre-UAL syntax,
7518 if the destination register is the same as the first operand, two
7519 register form of the instruction can be used. */
7520 if (inst.operands[op_index].present && inst.operands[op_index].isreg
7521 && inst.operands[op_index].reg == REG_PC)
7522 as_warn (UNPRED_REG ("r15"));
7523 }
7524
7525 if (inst.operands[i].imm == REG_PC)
7526 as_warn (UNPRED_REG ("r15"));
7527 }
7528
7529 if (inst.operands[i].shift_kind == SHIFT_RRX)
7530 inst.instruction |= SHIFT_ROR << 5;
7531 else
7532 {
7533 inst.instruction |= inst.operands[i].shift_kind << 5;
7534 if (inst.operands[i].immisreg)
7535 {
7536 inst.instruction |= SHIFT_BY_REG;
7537 inst.instruction |= inst.operands[i].imm << 8;
7538 }
7539 else
7540 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
7541 }
7542 }
7543
7544 static void
7545 encode_arm_shifter_operand (int i)
7546 {
7547 if (inst.operands[i].isreg)
7548 {
7549 inst.instruction |= inst.operands[i].reg;
7550 encode_arm_shift (i);
7551 }
7552 else
7553 {
7554 inst.instruction |= INST_IMMEDIATE;
7555 if (inst.reloc.type != BFD_RELOC_ARM_IMMEDIATE)
7556 inst.instruction |= inst.operands[i].imm;
7557 }
7558 }
7559
7560 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
7561 static void
7562 encode_arm_addr_mode_common (int i, bfd_boolean is_t)
7563 {
7564 /* PR 14260:
7565 Generate an error if the operand is not a register. */
7566 constraint (!inst.operands[i].isreg,
7567 _("Instruction does not support =N addresses"));
7568
7569 inst.instruction |= inst.operands[i].reg << 16;
7570
7571 if (inst.operands[i].preind)
7572 {
7573 if (is_t)
7574 {
7575 inst.error = _("instruction does not accept preindexed addressing");
7576 return;
7577 }
7578 inst.instruction |= PRE_INDEX;
7579 if (inst.operands[i].writeback)
7580 inst.instruction |= WRITE_BACK;
7581
7582 }
7583 else if (inst.operands[i].postind)
7584 {
7585 gas_assert (inst.operands[i].writeback);
7586 if (is_t)
7587 inst.instruction |= WRITE_BACK;
7588 }
7589 else /* unindexed - only for coprocessor */
7590 {
7591 inst.error = _("instruction does not accept unindexed addressing");
7592 return;
7593 }
7594
7595 if (((inst.instruction & WRITE_BACK) || !(inst.instruction & PRE_INDEX))
7596 && (((inst.instruction & 0x000f0000) >> 16)
7597 == ((inst.instruction & 0x0000f000) >> 12)))
7598 as_warn ((inst.instruction & LOAD_BIT)
7599 ? _("destination register same as write-back base")
7600 : _("source register same as write-back base"));
7601 }
7602
7603 /* inst.operands[i] was set up by parse_address. Encode it into an
7604 ARM-format mode 2 load or store instruction. If is_t is true,
7605 reject forms that cannot be used with a T instruction (i.e. not
7606 post-indexed). */
7607 static void
7608 encode_arm_addr_mode_2 (int i, bfd_boolean is_t)
7609 {
7610 const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
7611
7612 encode_arm_addr_mode_common (i, is_t);
7613
7614 if (inst.operands[i].immisreg)
7615 {
7616 constraint ((inst.operands[i].imm == REG_PC
7617 || (is_pc && inst.operands[i].writeback)),
7618 BAD_PC_ADDRESSING);
7619 inst.instruction |= INST_IMMEDIATE; /* yes, this is backwards */
7620 inst.instruction |= inst.operands[i].imm;
7621 if (!inst.operands[i].negative)
7622 inst.instruction |= INDEX_UP;
7623 if (inst.operands[i].shifted)
7624 {
7625 if (inst.operands[i].shift_kind == SHIFT_RRX)
7626 inst.instruction |= SHIFT_ROR << 5;
7627 else
7628 {
7629 inst.instruction |= inst.operands[i].shift_kind << 5;
7630 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
7631 }
7632 }
7633 }
7634 else /* immediate offset in inst.reloc */
7635 {
7636 if (is_pc && !inst.reloc.pc_rel)
7637 {
7638 const bfd_boolean is_load = ((inst.instruction & LOAD_BIT) != 0);
7639
7640 /* If is_t is TRUE, it's called from do_ldstt. ldrt/strt
7641 cannot use PC in addressing.
7642 PC cannot be used in writeback addressing, either. */
7643 constraint ((is_t || inst.operands[i].writeback),
7644 BAD_PC_ADDRESSING);
7645
7646 /* Use of PC in str is deprecated for ARMv7. */
7647 if (warn_on_deprecated
7648 && !is_load
7649 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7))
7650 as_tsktsk (_("use of PC in this instruction is deprecated"));
7651 }
7652
7653 if (inst.reloc.type == BFD_RELOC_UNUSED)
7654 {
7655 /* Prefer + for zero encoded value. */
7656 if (!inst.operands[i].negative)
7657 inst.instruction |= INDEX_UP;
7658 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM;
7659 }
7660 }
7661 }
7662
7663 /* inst.operands[i] was set up by parse_address. Encode it into an
7664 ARM-format mode 3 load or store instruction. Reject forms that
7665 cannot be used with such instructions. If is_t is true, reject
7666 forms that cannot be used with a T instruction (i.e. not
7667 post-indexed). */
7668 static void
7669 encode_arm_addr_mode_3 (int i, bfd_boolean is_t)
7670 {
7671 if (inst.operands[i].immisreg && inst.operands[i].shifted)
7672 {
7673 inst.error = _("instruction does not accept scaled register index");
7674 return;
7675 }
7676
7677 encode_arm_addr_mode_common (i, is_t);
7678
7679 if (inst.operands[i].immisreg)
7680 {
7681 constraint ((inst.operands[i].imm == REG_PC
7682 || (is_t && inst.operands[i].reg == REG_PC)),
7683 BAD_PC_ADDRESSING);
7684 constraint (inst.operands[i].reg == REG_PC && inst.operands[i].writeback,
7685 BAD_PC_WRITEBACK);
7686 inst.instruction |= inst.operands[i].imm;
7687 if (!inst.operands[i].negative)
7688 inst.instruction |= INDEX_UP;
7689 }
7690 else /* immediate offset in inst.reloc */
7691 {
7692 constraint ((inst.operands[i].reg == REG_PC && !inst.reloc.pc_rel
7693 && inst.operands[i].writeback),
7694 BAD_PC_WRITEBACK);
7695 inst.instruction |= HWOFFSET_IMM;
7696 if (inst.reloc.type == BFD_RELOC_UNUSED)
7697 {
7698 /* Prefer + for zero encoded value. */
7699 if (!inst.operands[i].negative)
7700 inst.instruction |= INDEX_UP;
7701
7702 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM8;
7703 }
7704 }
7705 }
7706
7707 /* Write immediate bits [7:0] to the following locations:
7708
7709 |28/24|23 19|18 16|15 4|3 0|
7710 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
7711
7712 This function is used by VMOV/VMVN/VORR/VBIC. */
7713
7714 static void
7715 neon_write_immbits (unsigned immbits)
7716 {
7717 inst.instruction |= immbits & 0xf;
7718 inst.instruction |= ((immbits >> 4) & 0x7) << 16;
7719 inst.instruction |= ((immbits >> 7) & 0x1) << (thumb_mode ? 28 : 24);
7720 }
7721
7722 /* Invert low-order SIZE bits of XHI:XLO. */
7723
7724 static void
7725 neon_invert_size (unsigned *xlo, unsigned *xhi, int size)
7726 {
7727 unsigned immlo = xlo ? *xlo : 0;
7728 unsigned immhi = xhi ? *xhi : 0;
7729
7730 switch (size)
7731 {
7732 case 8:
7733 immlo = (~immlo) & 0xff;
7734 break;
7735
7736 case 16:
7737 immlo = (~immlo) & 0xffff;
7738 break;
7739
7740 case 64:
7741 immhi = (~immhi) & 0xffffffff;
7742 /* fall through. */
7743
7744 case 32:
7745 immlo = (~immlo) & 0xffffffff;
7746 break;
7747
7748 default:
7749 abort ();
7750 }
7751
7752 if (xlo)
7753 *xlo = immlo;
7754
7755 if (xhi)
7756 *xhi = immhi;
7757 }
7758
7759 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
7760 A, B, C, D. */
7761
7762 static int
7763 neon_bits_same_in_bytes (unsigned imm)
7764 {
7765 return ((imm & 0x000000ff) == 0 || (imm & 0x000000ff) == 0x000000ff)
7766 && ((imm & 0x0000ff00) == 0 || (imm & 0x0000ff00) == 0x0000ff00)
7767 && ((imm & 0x00ff0000) == 0 || (imm & 0x00ff0000) == 0x00ff0000)
7768 && ((imm & 0xff000000) == 0 || (imm & 0xff000000) == 0xff000000);
7769 }
7770
7771 /* For immediate of above form, return 0bABCD. */
7772
7773 static unsigned
7774 neon_squash_bits (unsigned imm)
7775 {
7776 return (imm & 0x01) | ((imm & 0x0100) >> 7) | ((imm & 0x010000) >> 14)
7777 | ((imm & 0x01000000) >> 21);
7778 }
7779
7780 /* Compress quarter-float representation to 0b...000 abcdefgh. */
7781
7782 static unsigned
7783 neon_qfloat_bits (unsigned imm)
7784 {
7785 return ((imm >> 19) & 0x7f) | ((imm >> 24) & 0x80);
7786 }
7787
7788 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
7789 the instruction. *OP is passed as the initial value of the op field, and
7790 may be set to a different value depending on the constant (i.e.
7791 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
7792 MVN). If the immediate looks like a repeated pattern then also
7793 try smaller element sizes. */
7794
7795 static int
7796 neon_cmode_for_move_imm (unsigned immlo, unsigned immhi, int float_p,
7797 unsigned *immbits, int *op, int size,
7798 enum neon_el_type type)
7799 {
7800 /* Only permit float immediates (including 0.0/-0.0) if the operand type is
7801 float. */
7802 if (type == NT_float && !float_p)
7803 return FAIL;
7804
7805 if (type == NT_float && is_quarter_float (immlo) && immhi == 0)
7806 {
7807 if (size != 32 || *op == 1)
7808 return FAIL;
7809 *immbits = neon_qfloat_bits (immlo);
7810 return 0xf;
7811 }
7812
7813 if (size == 64)
7814 {
7815 if (neon_bits_same_in_bytes (immhi)
7816 && neon_bits_same_in_bytes (immlo))
7817 {
7818 if (*op == 1)
7819 return FAIL;
7820 *immbits = (neon_squash_bits (immhi) << 4)
7821 | neon_squash_bits (immlo);
7822 *op = 1;
7823 return 0xe;
7824 }
7825
7826 if (immhi != immlo)
7827 return FAIL;
7828 }
7829
7830 if (size >= 32)
7831 {
7832 if (immlo == (immlo & 0x000000ff))
7833 {
7834 *immbits = immlo;
7835 return 0x0;
7836 }
7837 else if (immlo == (immlo & 0x0000ff00))
7838 {
7839 *immbits = immlo >> 8;
7840 return 0x2;
7841 }
7842 else if (immlo == (immlo & 0x00ff0000))
7843 {
7844 *immbits = immlo >> 16;
7845 return 0x4;
7846 }
7847 else if (immlo == (immlo & 0xff000000))
7848 {
7849 *immbits = immlo >> 24;
7850 return 0x6;
7851 }
7852 else if (immlo == ((immlo & 0x0000ff00) | 0x000000ff))
7853 {
7854 *immbits = (immlo >> 8) & 0xff;
7855 return 0xc;
7856 }
7857 else if (immlo == ((immlo & 0x00ff0000) | 0x0000ffff))
7858 {
7859 *immbits = (immlo >> 16) & 0xff;
7860 return 0xd;
7861 }
7862
7863 if ((immlo & 0xffff) != (immlo >> 16))
7864 return FAIL;
7865 immlo &= 0xffff;
7866 }
7867
7868 if (size >= 16)
7869 {
7870 if (immlo == (immlo & 0x000000ff))
7871 {
7872 *immbits = immlo;
7873 return 0x8;
7874 }
7875 else if (immlo == (immlo & 0x0000ff00))
7876 {
7877 *immbits = immlo >> 8;
7878 return 0xa;
7879 }
7880
7881 if ((immlo & 0xff) != (immlo >> 8))
7882 return FAIL;
7883 immlo &= 0xff;
7884 }
7885
7886 if (immlo == (immlo & 0x000000ff))
7887 {
7888 /* Don't allow MVN with 8-bit immediate. */
7889 if (*op == 1)
7890 return FAIL;
7891 *immbits = immlo;
7892 return 0xe;
7893 }
7894
7895 return FAIL;
7896 }
7897
7898 #if defined BFD_HOST_64_BIT
7899 /* Returns TRUE if double precision value V may be cast
7900 to single precision without loss of accuracy. */
7901
7902 static bfd_boolean
7903 is_double_a_single (bfd_int64_t v)
7904 {
7905 int exp = (int)((v >> 52) & 0x7FF);
7906 bfd_int64_t mantissa = (v & (bfd_int64_t)0xFFFFFFFFFFFFFULL);
7907
7908 return (exp == 0 || exp == 0x7FF
7909 || (exp >= 1023 - 126 && exp <= 1023 + 127))
7910 && (mantissa & 0x1FFFFFFFl) == 0;
7911 }
7912
7913 /* Returns a double precision value casted to single precision
7914 (ignoring the least significant bits in exponent and mantissa). */
7915
7916 static int
7917 double_to_single (bfd_int64_t v)
7918 {
7919 int sign = (int) ((v >> 63) & 1l);
7920 int exp = (int) ((v >> 52) & 0x7FF);
7921 bfd_int64_t mantissa = (v & (bfd_int64_t)0xFFFFFFFFFFFFFULL);
7922
7923 if (exp == 0x7FF)
7924 exp = 0xFF;
7925 else
7926 {
7927 exp = exp - 1023 + 127;
7928 if (exp >= 0xFF)
7929 {
7930 /* Infinity. */
7931 exp = 0x7F;
7932 mantissa = 0;
7933 }
7934 else if (exp < 0)
7935 {
7936 /* No denormalized numbers. */
7937 exp = 0;
7938 mantissa = 0;
7939 }
7940 }
7941 mantissa >>= 29;
7942 return (sign << 31) | (exp << 23) | mantissa;
7943 }
7944 #endif /* BFD_HOST_64_BIT */
7945
7946 enum lit_type
7947 {
7948 CONST_THUMB,
7949 CONST_ARM,
7950 CONST_VEC
7951 };
7952
7953 static void do_vfp_nsyn_opcode (const char *);
7954
7955 /* inst.reloc.exp describes an "=expr" load pseudo-operation.
7956 Determine whether it can be performed with a move instruction; if
7957 it can, convert inst.instruction to that move instruction and
7958 return TRUE; if it can't, convert inst.instruction to a literal-pool
7959 load and return FALSE. If this is not a valid thing to do in the
7960 current context, set inst.error and return TRUE.
7961
7962 inst.operands[i] describes the destination register. */
7963
7964 static bfd_boolean
7965 move_or_literal_pool (int i, enum lit_type t, bfd_boolean mode_3)
7966 {
7967 unsigned long tbit;
7968 bfd_boolean thumb_p = (t == CONST_THUMB);
7969 bfd_boolean arm_p = (t == CONST_ARM);
7970
7971 if (thumb_p)
7972 tbit = (inst.instruction > 0xffff) ? THUMB2_LOAD_BIT : THUMB_LOAD_BIT;
7973 else
7974 tbit = LOAD_BIT;
7975
7976 if ((inst.instruction & tbit) == 0)
7977 {
7978 inst.error = _("invalid pseudo operation");
7979 return TRUE;
7980 }
7981
7982 if (inst.reloc.exp.X_op != O_constant
7983 && inst.reloc.exp.X_op != O_symbol
7984 && inst.reloc.exp.X_op != O_big)
7985 {
7986 inst.error = _("constant expression expected");
7987 return TRUE;
7988 }
7989
7990 if (inst.reloc.exp.X_op == O_constant
7991 || inst.reloc.exp.X_op == O_big)
7992 {
7993 #if defined BFD_HOST_64_BIT
7994 bfd_int64_t v;
7995 #else
7996 offsetT v;
7997 #endif
7998 if (inst.reloc.exp.X_op == O_big)
7999 {
8000 LITTLENUM_TYPE w[X_PRECISION];
8001 LITTLENUM_TYPE * l;
8002
8003 if (inst.reloc.exp.X_add_number == -1)
8004 {
8005 gen_to_words (w, X_PRECISION, E_PRECISION);
8006 l = w;
8007 /* FIXME: Should we check words w[2..5] ? */
8008 }
8009 else
8010 l = generic_bignum;
8011
8012 #if defined BFD_HOST_64_BIT
8013 v =
8014 ((((((((bfd_int64_t) l[3] & LITTLENUM_MASK)
8015 << LITTLENUM_NUMBER_OF_BITS)
8016 | ((bfd_int64_t) l[2] & LITTLENUM_MASK))
8017 << LITTLENUM_NUMBER_OF_BITS)
8018 | ((bfd_int64_t) l[1] & LITTLENUM_MASK))
8019 << LITTLENUM_NUMBER_OF_BITS)
8020 | ((bfd_int64_t) l[0] & LITTLENUM_MASK));
8021 #else
8022 v = ((l[1] & LITTLENUM_MASK) << LITTLENUM_NUMBER_OF_BITS)
8023 | (l[0] & LITTLENUM_MASK);
8024 #endif
8025 }
8026 else
8027 v = inst.reloc.exp.X_add_number;
8028
8029 if (!inst.operands[i].issingle)
8030 {
8031 if (thumb_p)
8032 {
8033 /* LDR should not use lead in a flag-setting instruction being
8034 chosen so we do not check whether movs can be used. */
8035
8036 if ((ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2)
8037 || ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m))
8038 && inst.operands[i].reg != 13
8039 && inst.operands[i].reg != 15)
8040 {
8041 /* Check if on thumb2 it can be done with a mov.w, mvn or
8042 movw instruction. */
8043 unsigned int newimm;
8044 bfd_boolean isNegated;
8045
8046 newimm = encode_thumb32_immediate (v);
8047 if (newimm != (unsigned int) FAIL)
8048 isNegated = FALSE;
8049 else
8050 {
8051 newimm = encode_thumb32_immediate (~v);
8052 if (newimm != (unsigned int) FAIL)
8053 isNegated = TRUE;
8054 }
8055
8056 /* The number can be loaded with a mov.w or mvn
8057 instruction. */
8058 if (newimm != (unsigned int) FAIL
8059 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2))
8060 {
8061 inst.instruction = (0xf04f0000 /* MOV.W. */
8062 | (inst.operands[i].reg << 8));
8063 /* Change to MOVN. */
8064 inst.instruction |= (isNegated ? 0x200000 : 0);
8065 inst.instruction |= (newimm & 0x800) << 15;
8066 inst.instruction |= (newimm & 0x700) << 4;
8067 inst.instruction |= (newimm & 0x0ff);
8068 return TRUE;
8069 }
8070 /* The number can be loaded with a movw instruction. */
8071 else if ((v & ~0xFFFF) == 0
8072 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m))
8073 {
8074 int imm = v & 0xFFFF;
8075
8076 inst.instruction = 0xf2400000; /* MOVW. */
8077 inst.instruction |= (inst.operands[i].reg << 8);
8078 inst.instruction |= (imm & 0xf000) << 4;
8079 inst.instruction |= (imm & 0x0800) << 15;
8080 inst.instruction |= (imm & 0x0700) << 4;
8081 inst.instruction |= (imm & 0x00ff);
8082 return TRUE;
8083 }
8084 }
8085 }
8086 else if (arm_p)
8087 {
8088 int value = encode_arm_immediate (v);
8089
8090 if (value != FAIL)
8091 {
8092 /* This can be done with a mov instruction. */
8093 inst.instruction &= LITERAL_MASK;
8094 inst.instruction |= INST_IMMEDIATE | (OPCODE_MOV << DATA_OP_SHIFT);
8095 inst.instruction |= value & 0xfff;
8096 return TRUE;
8097 }
8098
8099 value = encode_arm_immediate (~ v);
8100 if (value != FAIL)
8101 {
8102 /* This can be done with a mvn instruction. */
8103 inst.instruction &= LITERAL_MASK;
8104 inst.instruction |= INST_IMMEDIATE | (OPCODE_MVN << DATA_OP_SHIFT);
8105 inst.instruction |= value & 0xfff;
8106 return TRUE;
8107 }
8108 }
8109 else if (t == CONST_VEC && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1))
8110 {
8111 int op = 0;
8112 unsigned immbits = 0;
8113 unsigned immlo = inst.operands[1].imm;
8114 unsigned immhi = inst.operands[1].regisimm
8115 ? inst.operands[1].reg
8116 : inst.reloc.exp.X_unsigned
8117 ? 0
8118 : ((bfd_int64_t)((int) immlo)) >> 32;
8119 int cmode = neon_cmode_for_move_imm (immlo, immhi, FALSE, &immbits,
8120 &op, 64, NT_invtype);
8121
8122 if (cmode == FAIL)
8123 {
8124 neon_invert_size (&immlo, &immhi, 64);
8125 op = !op;
8126 cmode = neon_cmode_for_move_imm (immlo, immhi, FALSE, &immbits,
8127 &op, 64, NT_invtype);
8128 }
8129
8130 if (cmode != FAIL)
8131 {
8132 inst.instruction = (inst.instruction & VLDR_VMOV_SAME)
8133 | (1 << 23)
8134 | (cmode << 8)
8135 | (op << 5)
8136 | (1 << 4);
8137
8138 /* Fill other bits in vmov encoding for both thumb and arm. */
8139 if (thumb_mode)
8140 inst.instruction |= (0x7U << 29) | (0xF << 24);
8141 else
8142 inst.instruction |= (0xFU << 28) | (0x1 << 25);
8143 neon_write_immbits (immbits);
8144 return TRUE;
8145 }
8146 }
8147 }
8148
8149 if (t == CONST_VEC)
8150 {
8151 /* Check if vldr Rx, =constant could be optimized to vmov Rx, #constant. */
8152 if (inst.operands[i].issingle
8153 && is_quarter_float (inst.operands[1].imm)
8154 && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3xd))
8155 {
8156 inst.operands[1].imm =
8157 neon_qfloat_bits (v);
8158 do_vfp_nsyn_opcode ("fconsts");
8159 return TRUE;
8160 }
8161
8162 /* If our host does not support a 64-bit type then we cannot perform
8163 the following optimization. This mean that there will be a
8164 discrepancy between the output produced by an assembler built for
8165 a 32-bit-only host and the output produced from a 64-bit host, but
8166 this cannot be helped. */
8167 #if defined BFD_HOST_64_BIT
8168 else if (!inst.operands[1].issingle
8169 && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3))
8170 {
8171 if (is_double_a_single (v)
8172 && is_quarter_float (double_to_single (v)))
8173 {
8174 inst.operands[1].imm =
8175 neon_qfloat_bits (double_to_single (v));
8176 do_vfp_nsyn_opcode ("fconstd");
8177 return TRUE;
8178 }
8179 }
8180 #endif
8181 }
8182 }
8183
8184 if (add_to_lit_pool ((!inst.operands[i].isvec
8185 || inst.operands[i].issingle) ? 4 : 8) == FAIL)
8186 return TRUE;
8187
8188 inst.operands[1].reg = REG_PC;
8189 inst.operands[1].isreg = 1;
8190 inst.operands[1].preind = 1;
8191 inst.reloc.pc_rel = 1;
8192 inst.reloc.type = (thumb_p
8193 ? BFD_RELOC_ARM_THUMB_OFFSET
8194 : (mode_3
8195 ? BFD_RELOC_ARM_HWLITERAL
8196 : BFD_RELOC_ARM_LITERAL));
8197 return FALSE;
8198 }
8199
8200 /* inst.operands[i] was set up by parse_address. Encode it into an
8201 ARM-format instruction. Reject all forms which cannot be encoded
8202 into a coprocessor load/store instruction. If wb_ok is false,
8203 reject use of writeback; if unind_ok is false, reject use of
8204 unindexed addressing. If reloc_override is not 0, use it instead
8205 of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
8206 (in which case it is preserved). */
8207
8208 static int
8209 encode_arm_cp_address (int i, int wb_ok, int unind_ok, int reloc_override)
8210 {
8211 if (!inst.operands[i].isreg)
8212 {
8213 /* PR 18256 */
8214 if (! inst.operands[0].isvec)
8215 {
8216 inst.error = _("invalid co-processor operand");
8217 return FAIL;
8218 }
8219 if (move_or_literal_pool (0, CONST_VEC, /*mode_3=*/FALSE))
8220 return SUCCESS;
8221 }
8222
8223 inst.instruction |= inst.operands[i].reg << 16;
8224
8225 gas_assert (!(inst.operands[i].preind && inst.operands[i].postind));
8226
8227 if (!inst.operands[i].preind && !inst.operands[i].postind) /* unindexed */
8228 {
8229 gas_assert (!inst.operands[i].writeback);
8230 if (!unind_ok)
8231 {
8232 inst.error = _("instruction does not support unindexed addressing");
8233 return FAIL;
8234 }
8235 inst.instruction |= inst.operands[i].imm;
8236 inst.instruction |= INDEX_UP;
8237 return SUCCESS;
8238 }
8239
8240 if (inst.operands[i].preind)
8241 inst.instruction |= PRE_INDEX;
8242
8243 if (inst.operands[i].writeback)
8244 {
8245 if (inst.operands[i].reg == REG_PC)
8246 {
8247 inst.error = _("pc may not be used with write-back");
8248 return FAIL;
8249 }
8250 if (!wb_ok)
8251 {
8252 inst.error = _("instruction does not support writeback");
8253 return FAIL;
8254 }
8255 inst.instruction |= WRITE_BACK;
8256 }
8257
8258 if (reloc_override)
8259 inst.reloc.type = (bfd_reloc_code_real_type) reloc_override;
8260 else if ((inst.reloc.type < BFD_RELOC_ARM_ALU_PC_G0_NC
8261 || inst.reloc.type > BFD_RELOC_ARM_LDC_SB_G2)
8262 && inst.reloc.type != BFD_RELOC_ARM_LDR_PC_G0)
8263 {
8264 if (thumb_mode)
8265 inst.reloc.type = BFD_RELOC_ARM_T32_CP_OFF_IMM;
8266 else
8267 inst.reloc.type = BFD_RELOC_ARM_CP_OFF_IMM;
8268 }
8269
8270 /* Prefer + for zero encoded value. */
8271 if (!inst.operands[i].negative)
8272 inst.instruction |= INDEX_UP;
8273
8274 return SUCCESS;
8275 }
8276
8277 /* Functions for instruction encoding, sorted by sub-architecture.
8278 First some generics; their names are taken from the conventional
8279 bit positions for register arguments in ARM format instructions. */
8280
8281 static void
8282 do_noargs (void)
8283 {
8284 }
8285
8286 static void
8287 do_rd (void)
8288 {
8289 inst.instruction |= inst.operands[0].reg << 12;
8290 }
8291
8292 static void
8293 do_rn (void)
8294 {
8295 inst.instruction |= inst.operands[0].reg << 16;
8296 }
8297
8298 static void
8299 do_rd_rm (void)
8300 {
8301 inst.instruction |= inst.operands[0].reg << 12;
8302 inst.instruction |= inst.operands[1].reg;
8303 }
8304
8305 static void
8306 do_rm_rn (void)
8307 {
8308 inst.instruction |= inst.operands[0].reg;
8309 inst.instruction |= inst.operands[1].reg << 16;
8310 }
8311
8312 static void
8313 do_rd_rn (void)
8314 {
8315 inst.instruction |= inst.operands[0].reg << 12;
8316 inst.instruction |= inst.operands[1].reg << 16;
8317 }
8318
8319 static void
8320 do_rn_rd (void)
8321 {
8322 inst.instruction |= inst.operands[0].reg << 16;
8323 inst.instruction |= inst.operands[1].reg << 12;
8324 }
8325
8326 static void
8327 do_tt (void)
8328 {
8329 inst.instruction |= inst.operands[0].reg << 8;
8330 inst.instruction |= inst.operands[1].reg << 16;
8331 }
8332
8333 static bfd_boolean
8334 check_obsolete (const arm_feature_set *feature, const char *msg)
8335 {
8336 if (ARM_CPU_IS_ANY (cpu_variant))
8337 {
8338 as_tsktsk ("%s", msg);
8339 return TRUE;
8340 }
8341 else if (ARM_CPU_HAS_FEATURE (cpu_variant, *feature))
8342 {
8343 as_bad ("%s", msg);
8344 return TRUE;
8345 }
8346
8347 return FALSE;
8348 }
8349
8350 static void
8351 do_rd_rm_rn (void)
8352 {
8353 unsigned Rn = inst.operands[2].reg;
8354 /* Enforce restrictions on SWP instruction. */
8355 if ((inst.instruction & 0x0fbfffff) == 0x01000090)
8356 {
8357 constraint (Rn == inst.operands[0].reg || Rn == inst.operands[1].reg,
8358 _("Rn must not overlap other operands"));
8359
8360 /* SWP{b} is obsolete for ARMv8-A, and deprecated for ARMv6* and ARMv7.
8361 */
8362 if (!check_obsolete (&arm_ext_v8,
8363 _("swp{b} use is obsoleted for ARMv8 and later"))
8364 && warn_on_deprecated
8365 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6))
8366 as_tsktsk (_("swp{b} use is deprecated for ARMv6 and ARMv7"));
8367 }
8368
8369 inst.instruction |= inst.operands[0].reg << 12;
8370 inst.instruction |= inst.operands[1].reg;
8371 inst.instruction |= Rn << 16;
8372 }
8373
8374 static void
8375 do_rd_rn_rm (void)
8376 {
8377 inst.instruction |= inst.operands[0].reg << 12;
8378 inst.instruction |= inst.operands[1].reg << 16;
8379 inst.instruction |= inst.operands[2].reg;
8380 }
8381
8382 static void
8383 do_rm_rd_rn (void)
8384 {
8385 constraint ((inst.operands[2].reg == REG_PC), BAD_PC);
8386 constraint (((inst.reloc.exp.X_op != O_constant
8387 && inst.reloc.exp.X_op != O_illegal)
8388 || inst.reloc.exp.X_add_number != 0),
8389 BAD_ADDR_MODE);
8390 inst.instruction |= inst.operands[0].reg;
8391 inst.instruction |= inst.operands[1].reg << 12;
8392 inst.instruction |= inst.operands[2].reg << 16;
8393 }
8394
8395 static void
8396 do_imm0 (void)
8397 {
8398 inst.instruction |= inst.operands[0].imm;
8399 }
8400
8401 static void
8402 do_rd_cpaddr (void)
8403 {
8404 inst.instruction |= inst.operands[0].reg << 12;
8405 encode_arm_cp_address (1, TRUE, TRUE, 0);
8406 }
8407
8408 /* ARM instructions, in alphabetical order by function name (except
8409 that wrapper functions appear immediately after the function they
8410 wrap). */
8411
8412 /* This is a pseudo-op of the form "adr rd, label" to be converted
8413 into a relative address of the form "add rd, pc, #label-.-8". */
8414
8415 static void
8416 do_adr (void)
8417 {
8418 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
8419
8420 /* Frag hacking will turn this into a sub instruction if the offset turns
8421 out to be negative. */
8422 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
8423 inst.reloc.pc_rel = 1;
8424 inst.reloc.exp.X_add_number -= 8;
8425
8426 if (support_interwork
8427 && inst.reloc.exp.X_op == O_symbol
8428 && inst.reloc.exp.X_add_symbol != NULL
8429 && S_IS_DEFINED (inst.reloc.exp.X_add_symbol)
8430 && THUMB_IS_FUNC (inst.reloc.exp.X_add_symbol))
8431 inst.reloc.exp.X_add_number |= 1;
8432 }
8433
8434 /* This is a pseudo-op of the form "adrl rd, label" to be converted
8435 into a relative address of the form:
8436 add rd, pc, #low(label-.-8)"
8437 add rd, rd, #high(label-.-8)" */
8438
8439 static void
8440 do_adrl (void)
8441 {
8442 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
8443
8444 /* Frag hacking will turn this into a sub instruction if the offset turns
8445 out to be negative. */
8446 inst.reloc.type = BFD_RELOC_ARM_ADRL_IMMEDIATE;
8447 inst.reloc.pc_rel = 1;
8448 inst.size = INSN_SIZE * 2;
8449 inst.reloc.exp.X_add_number -= 8;
8450
8451 if (support_interwork
8452 && inst.reloc.exp.X_op == O_symbol
8453 && inst.reloc.exp.X_add_symbol != NULL
8454 && S_IS_DEFINED (inst.reloc.exp.X_add_symbol)
8455 && THUMB_IS_FUNC (inst.reloc.exp.X_add_symbol))
8456 inst.reloc.exp.X_add_number |= 1;
8457 }
8458
8459 static void
8460 do_arit (void)
8461 {
8462 constraint (inst.reloc.type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
8463 && inst.reloc.type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC ,
8464 THUMB1_RELOC_ONLY);
8465 if (!inst.operands[1].present)
8466 inst.operands[1].reg = inst.operands[0].reg;
8467 inst.instruction |= inst.operands[0].reg << 12;
8468 inst.instruction |= inst.operands[1].reg << 16;
8469 encode_arm_shifter_operand (2);
8470 }
8471
8472 static void
8473 do_barrier (void)
8474 {
8475 if (inst.operands[0].present)
8476 inst.instruction |= inst.operands[0].imm;
8477 else
8478 inst.instruction |= 0xf;
8479 }
8480
8481 static void
8482 do_bfc (void)
8483 {
8484 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
8485 constraint (msb > 32, _("bit-field extends past end of register"));
8486 /* The instruction encoding stores the LSB and MSB,
8487 not the LSB and width. */
8488 inst.instruction |= inst.operands[0].reg << 12;
8489 inst.instruction |= inst.operands[1].imm << 7;
8490 inst.instruction |= (msb - 1) << 16;
8491 }
8492
8493 static void
8494 do_bfi (void)
8495 {
8496 unsigned int msb;
8497
8498 /* #0 in second position is alternative syntax for bfc, which is
8499 the same instruction but with REG_PC in the Rm field. */
8500 if (!inst.operands[1].isreg)
8501 inst.operands[1].reg = REG_PC;
8502
8503 msb = inst.operands[2].imm + inst.operands[3].imm;
8504 constraint (msb > 32, _("bit-field extends past end of register"));
8505 /* The instruction encoding stores the LSB and MSB,
8506 not the LSB and width. */
8507 inst.instruction |= inst.operands[0].reg << 12;
8508 inst.instruction |= inst.operands[1].reg;
8509 inst.instruction |= inst.operands[2].imm << 7;
8510 inst.instruction |= (msb - 1) << 16;
8511 }
8512
8513 static void
8514 do_bfx (void)
8515 {
8516 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
8517 _("bit-field extends past end of register"));
8518 inst.instruction |= inst.operands[0].reg << 12;
8519 inst.instruction |= inst.operands[1].reg;
8520 inst.instruction |= inst.operands[2].imm << 7;
8521 inst.instruction |= (inst.operands[3].imm - 1) << 16;
8522 }
8523
8524 /* ARM V5 breakpoint instruction (argument parse)
8525 BKPT <16 bit unsigned immediate>
8526 Instruction is not conditional.
8527 The bit pattern given in insns[] has the COND_ALWAYS condition,
8528 and it is an error if the caller tried to override that. */
8529
8530 static void
8531 do_bkpt (void)
8532 {
8533 /* Top 12 of 16 bits to bits 19:8. */
8534 inst.instruction |= (inst.operands[0].imm & 0xfff0) << 4;
8535
8536 /* Bottom 4 of 16 bits to bits 3:0. */
8537 inst.instruction |= inst.operands[0].imm & 0xf;
8538 }
8539
8540 static void
8541 encode_branch (int default_reloc)
8542 {
8543 if (inst.operands[0].hasreloc)
8544 {
8545 constraint (inst.operands[0].imm != BFD_RELOC_ARM_PLT32
8546 && inst.operands[0].imm != BFD_RELOC_ARM_TLS_CALL,
8547 _("the only valid suffixes here are '(plt)' and '(tlscall)'"));
8548 inst.reloc.type = inst.operands[0].imm == BFD_RELOC_ARM_PLT32
8549 ? BFD_RELOC_ARM_PLT32
8550 : thumb_mode ? BFD_RELOC_ARM_THM_TLS_CALL : BFD_RELOC_ARM_TLS_CALL;
8551 }
8552 else
8553 inst.reloc.type = (bfd_reloc_code_real_type) default_reloc;
8554 inst.reloc.pc_rel = 1;
8555 }
8556
8557 static void
8558 do_branch (void)
8559 {
8560 #ifdef OBJ_ELF
8561 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
8562 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
8563 else
8564 #endif
8565 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
8566 }
8567
8568 static void
8569 do_bl (void)
8570 {
8571 #ifdef OBJ_ELF
8572 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
8573 {
8574 if (inst.cond == COND_ALWAYS)
8575 encode_branch (BFD_RELOC_ARM_PCREL_CALL);
8576 else
8577 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
8578 }
8579 else
8580 #endif
8581 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
8582 }
8583
8584 /* ARM V5 branch-link-exchange instruction (argument parse)
8585 BLX <target_addr> ie BLX(1)
8586 BLX{<condition>} <Rm> ie BLX(2)
8587 Unfortunately, there are two different opcodes for this mnemonic.
8588 So, the insns[].value is not used, and the code here zaps values
8589 into inst.instruction.
8590 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
8591
8592 static void
8593 do_blx (void)
8594 {
8595 if (inst.operands[0].isreg)
8596 {
8597 /* Arg is a register; the opcode provided by insns[] is correct.
8598 It is not illegal to do "blx pc", just useless. */
8599 if (inst.operands[0].reg == REG_PC)
8600 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
8601
8602 inst.instruction |= inst.operands[0].reg;
8603 }
8604 else
8605 {
8606 /* Arg is an address; this instruction cannot be executed
8607 conditionally, and the opcode must be adjusted.
8608 We retain the BFD_RELOC_ARM_PCREL_BLX till the very end
8609 where we generate out a BFD_RELOC_ARM_PCREL_CALL instead. */
8610 constraint (inst.cond != COND_ALWAYS, BAD_COND);
8611 inst.instruction = 0xfa000000;
8612 encode_branch (BFD_RELOC_ARM_PCREL_BLX);
8613 }
8614 }
8615
8616 static void
8617 do_bx (void)
8618 {
8619 bfd_boolean want_reloc;
8620
8621 if (inst.operands[0].reg == REG_PC)
8622 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
8623
8624 inst.instruction |= inst.operands[0].reg;
8625 /* Output R_ARM_V4BX relocations if is an EABI object that looks like
8626 it is for ARMv4t or earlier. */
8627 want_reloc = !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5);
8628 if (!ARM_FEATURE_ZERO (selected_object_arch)
8629 && !ARM_CPU_HAS_FEATURE (selected_object_arch, arm_ext_v5))
8630 want_reloc = TRUE;
8631
8632 #ifdef OBJ_ELF
8633 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
8634 #endif
8635 want_reloc = FALSE;
8636
8637 if (want_reloc)
8638 inst.reloc.type = BFD_RELOC_ARM_V4BX;
8639 }
8640
8641
8642 /* ARM v5TEJ. Jump to Jazelle code. */
8643
8644 static void
8645 do_bxj (void)
8646 {
8647 if (inst.operands[0].reg == REG_PC)
8648 as_tsktsk (_("use of r15 in bxj is not really useful"));
8649
8650 inst.instruction |= inst.operands[0].reg;
8651 }
8652
8653 /* Co-processor data operation:
8654 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
8655 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
8656 static void
8657 do_cdp (void)
8658 {
8659 inst.instruction |= inst.operands[0].reg << 8;
8660 inst.instruction |= inst.operands[1].imm << 20;
8661 inst.instruction |= inst.operands[2].reg << 12;
8662 inst.instruction |= inst.operands[3].reg << 16;
8663 inst.instruction |= inst.operands[4].reg;
8664 inst.instruction |= inst.operands[5].imm << 5;
8665 }
8666
8667 static void
8668 do_cmp (void)
8669 {
8670 inst.instruction |= inst.operands[0].reg << 16;
8671 encode_arm_shifter_operand (1);
8672 }
8673
8674 /* Transfer between coprocessor and ARM registers.
8675 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
8676 MRC2
8677 MCR{cond}
8678 MCR2
8679
8680 No special properties. */
8681
8682 struct deprecated_coproc_regs_s
8683 {
8684 unsigned cp;
8685 int opc1;
8686 unsigned crn;
8687 unsigned crm;
8688 int opc2;
8689 arm_feature_set deprecated;
8690 arm_feature_set obsoleted;
8691 const char *dep_msg;
8692 const char *obs_msg;
8693 };
8694
8695 #define DEPR_ACCESS_V8 \
8696 N_("This coprocessor register access is deprecated in ARMv8")
8697
8698 /* Table of all deprecated coprocessor registers. */
8699 static struct deprecated_coproc_regs_s deprecated_coproc_regs[] =
8700 {
8701 {15, 0, 7, 10, 5, /* CP15DMB. */
8702 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8703 DEPR_ACCESS_V8, NULL},
8704 {15, 0, 7, 10, 4, /* CP15DSB. */
8705 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8706 DEPR_ACCESS_V8, NULL},
8707 {15, 0, 7, 5, 4, /* CP15ISB. */
8708 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8709 DEPR_ACCESS_V8, NULL},
8710 {14, 6, 1, 0, 0, /* TEEHBR. */
8711 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8712 DEPR_ACCESS_V8, NULL},
8713 {14, 6, 0, 0, 0, /* TEECR. */
8714 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8715 DEPR_ACCESS_V8, NULL},
8716 };
8717
8718 #undef DEPR_ACCESS_V8
8719
8720 static const size_t deprecated_coproc_reg_count =
8721 sizeof (deprecated_coproc_regs) / sizeof (deprecated_coproc_regs[0]);
8722
8723 static void
8724 do_co_reg (void)
8725 {
8726 unsigned Rd;
8727 size_t i;
8728
8729 Rd = inst.operands[2].reg;
8730 if (thumb_mode)
8731 {
8732 if (inst.instruction == 0xee000010
8733 || inst.instruction == 0xfe000010)
8734 /* MCR, MCR2 */
8735 reject_bad_reg (Rd);
8736 else if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
8737 /* MRC, MRC2 */
8738 constraint (Rd == REG_SP, BAD_SP);
8739 }
8740 else
8741 {
8742 /* MCR */
8743 if (inst.instruction == 0xe000010)
8744 constraint (Rd == REG_PC, BAD_PC);
8745 }
8746
8747 for (i = 0; i < deprecated_coproc_reg_count; ++i)
8748 {
8749 const struct deprecated_coproc_regs_s *r =
8750 deprecated_coproc_regs + i;
8751
8752 if (inst.operands[0].reg == r->cp
8753 && inst.operands[1].imm == r->opc1
8754 && inst.operands[3].reg == r->crn
8755 && inst.operands[4].reg == r->crm
8756 && inst.operands[5].imm == r->opc2)
8757 {
8758 if (! ARM_CPU_IS_ANY (cpu_variant)
8759 && warn_on_deprecated
8760 && ARM_CPU_HAS_FEATURE (cpu_variant, r->deprecated))
8761 as_tsktsk ("%s", r->dep_msg);
8762 }
8763 }
8764
8765 inst.instruction |= inst.operands[0].reg << 8;
8766 inst.instruction |= inst.operands[1].imm << 21;
8767 inst.instruction |= Rd << 12;
8768 inst.instruction |= inst.operands[3].reg << 16;
8769 inst.instruction |= inst.operands[4].reg;
8770 inst.instruction |= inst.operands[5].imm << 5;
8771 }
8772
8773 /* Transfer between coprocessor register and pair of ARM registers.
8774 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
8775 MCRR2
8776 MRRC{cond}
8777 MRRC2
8778
8779 Two XScale instructions are special cases of these:
8780
8781 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
8782 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
8783
8784 Result unpredictable if Rd or Rn is R15. */
8785
8786 static void
8787 do_co_reg2c (void)
8788 {
8789 unsigned Rd, Rn;
8790
8791 Rd = inst.operands[2].reg;
8792 Rn = inst.operands[3].reg;
8793
8794 if (thumb_mode)
8795 {
8796 reject_bad_reg (Rd);
8797 reject_bad_reg (Rn);
8798 }
8799 else
8800 {
8801 constraint (Rd == REG_PC, BAD_PC);
8802 constraint (Rn == REG_PC, BAD_PC);
8803 }
8804
8805 /* Only check the MRRC{2} variants. */
8806 if ((inst.instruction & 0x0FF00000) == 0x0C500000)
8807 {
8808 /* If Rd == Rn, error that the operation is
8809 unpredictable (example MRRC p3,#1,r1,r1,c4). */
8810 constraint (Rd == Rn, BAD_OVERLAP);
8811 }
8812
8813 inst.instruction |= inst.operands[0].reg << 8;
8814 inst.instruction |= inst.operands[1].imm << 4;
8815 inst.instruction |= Rd << 12;
8816 inst.instruction |= Rn << 16;
8817 inst.instruction |= inst.operands[4].reg;
8818 }
8819
8820 static void
8821 do_cpsi (void)
8822 {
8823 inst.instruction |= inst.operands[0].imm << 6;
8824 if (inst.operands[1].present)
8825 {
8826 inst.instruction |= CPSI_MMOD;
8827 inst.instruction |= inst.operands[1].imm;
8828 }
8829 }
8830
8831 static void
8832 do_dbg (void)
8833 {
8834 inst.instruction |= inst.operands[0].imm;
8835 }
8836
8837 static void
8838 do_div (void)
8839 {
8840 unsigned Rd, Rn, Rm;
8841
8842 Rd = inst.operands[0].reg;
8843 Rn = (inst.operands[1].present
8844 ? inst.operands[1].reg : Rd);
8845 Rm = inst.operands[2].reg;
8846
8847 constraint ((Rd == REG_PC), BAD_PC);
8848 constraint ((Rn == REG_PC), BAD_PC);
8849 constraint ((Rm == REG_PC), BAD_PC);
8850
8851 inst.instruction |= Rd << 16;
8852 inst.instruction |= Rn << 0;
8853 inst.instruction |= Rm << 8;
8854 }
8855
8856 static void
8857 do_it (void)
8858 {
8859 /* There is no IT instruction in ARM mode. We
8860 process it to do the validation as if in
8861 thumb mode, just in case the code gets
8862 assembled for thumb using the unified syntax. */
8863
8864 inst.size = 0;
8865 if (unified_syntax)
8866 {
8867 set_it_insn_type (IT_INSN);
8868 now_it.mask = (inst.instruction & 0xf) | 0x10;
8869 now_it.cc = inst.operands[0].imm;
8870 }
8871 }
8872
8873 /* If there is only one register in the register list,
8874 then return its register number. Otherwise return -1. */
8875 static int
8876 only_one_reg_in_list (int range)
8877 {
8878 int i = ffs (range) - 1;
8879 return (i > 15 || range != (1 << i)) ? -1 : i;
8880 }
8881
8882 static void
8883 encode_ldmstm(int from_push_pop_mnem)
8884 {
8885 int base_reg = inst.operands[0].reg;
8886 int range = inst.operands[1].imm;
8887 int one_reg;
8888
8889 inst.instruction |= base_reg << 16;
8890 inst.instruction |= range;
8891
8892 if (inst.operands[1].writeback)
8893 inst.instruction |= LDM_TYPE_2_OR_3;
8894
8895 if (inst.operands[0].writeback)
8896 {
8897 inst.instruction |= WRITE_BACK;
8898 /* Check for unpredictable uses of writeback. */
8899 if (inst.instruction & LOAD_BIT)
8900 {
8901 /* Not allowed in LDM type 2. */
8902 if ((inst.instruction & LDM_TYPE_2_OR_3)
8903 && ((range & (1 << REG_PC)) == 0))
8904 as_warn (_("writeback of base register is UNPREDICTABLE"));
8905 /* Only allowed if base reg not in list for other types. */
8906 else if (range & (1 << base_reg))
8907 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
8908 }
8909 else /* STM. */
8910 {
8911 /* Not allowed for type 2. */
8912 if (inst.instruction & LDM_TYPE_2_OR_3)
8913 as_warn (_("writeback of base register is UNPREDICTABLE"));
8914 /* Only allowed if base reg not in list, or first in list. */
8915 else if ((range & (1 << base_reg))
8916 && (range & ((1 << base_reg) - 1)))
8917 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
8918 }
8919 }
8920
8921 /* If PUSH/POP has only one register, then use the A2 encoding. */
8922 one_reg = only_one_reg_in_list (range);
8923 if (from_push_pop_mnem && one_reg >= 0)
8924 {
8925 int is_push = (inst.instruction & A_PUSH_POP_OP_MASK) == A1_OPCODE_PUSH;
8926
8927 if (is_push && one_reg == 13 /* SP */)
8928 /* PR 22483: The A2 encoding cannot be used when
8929 pushing the stack pointer as this is UNPREDICTABLE. */
8930 return;
8931
8932 inst.instruction &= A_COND_MASK;
8933 inst.instruction |= is_push ? A2_OPCODE_PUSH : A2_OPCODE_POP;
8934 inst.instruction |= one_reg << 12;
8935 }
8936 }
8937
8938 static void
8939 do_ldmstm (void)
8940 {
8941 encode_ldmstm (/*from_push_pop_mnem=*/FALSE);
8942 }
8943
8944 /* ARMv5TE load-consecutive (argument parse)
8945 Mode is like LDRH.
8946
8947 LDRccD R, mode
8948 STRccD R, mode. */
8949
8950 static void
8951 do_ldrd (void)
8952 {
8953 constraint (inst.operands[0].reg % 2 != 0,
8954 _("first transfer register must be even"));
8955 constraint (inst.operands[1].present
8956 && inst.operands[1].reg != inst.operands[0].reg + 1,
8957 _("can only transfer two consecutive registers"));
8958 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
8959 constraint (!inst.operands[2].isreg, _("'[' expected"));
8960
8961 if (!inst.operands[1].present)
8962 inst.operands[1].reg = inst.operands[0].reg + 1;
8963
8964 /* encode_arm_addr_mode_3 will diagnose overlap between the base
8965 register and the first register written; we have to diagnose
8966 overlap between the base and the second register written here. */
8967
8968 if (inst.operands[2].reg == inst.operands[1].reg
8969 && (inst.operands[2].writeback || inst.operands[2].postind))
8970 as_warn (_("base register written back, and overlaps "
8971 "second transfer register"));
8972
8973 if (!(inst.instruction & V4_STR_BIT))
8974 {
8975 /* For an index-register load, the index register must not overlap the
8976 destination (even if not write-back). */
8977 if (inst.operands[2].immisreg
8978 && ((unsigned) inst.operands[2].imm == inst.operands[0].reg
8979 || (unsigned) inst.operands[2].imm == inst.operands[1].reg))
8980 as_warn (_("index register overlaps transfer register"));
8981 }
8982 inst.instruction |= inst.operands[0].reg << 12;
8983 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE);
8984 }
8985
8986 static void
8987 do_ldrex (void)
8988 {
8989 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
8990 || inst.operands[1].postind || inst.operands[1].writeback
8991 || inst.operands[1].immisreg || inst.operands[1].shifted
8992 || inst.operands[1].negative
8993 /* This can arise if the programmer has written
8994 strex rN, rM, foo
8995 or if they have mistakenly used a register name as the last
8996 operand, eg:
8997 strex rN, rM, rX
8998 It is very difficult to distinguish between these two cases
8999 because "rX" might actually be a label. ie the register
9000 name has been occluded by a symbol of the same name. So we
9001 just generate a general 'bad addressing mode' type error
9002 message and leave it up to the programmer to discover the
9003 true cause and fix their mistake. */
9004 || (inst.operands[1].reg == REG_PC),
9005 BAD_ADDR_MODE);
9006
9007 constraint (inst.reloc.exp.X_op != O_constant
9008 || inst.reloc.exp.X_add_number != 0,
9009 _("offset must be zero in ARM encoding"));
9010
9011 constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
9012
9013 inst.instruction |= inst.operands[0].reg << 12;
9014 inst.instruction |= inst.operands[1].reg << 16;
9015 inst.reloc.type = BFD_RELOC_UNUSED;
9016 }
9017
9018 static void
9019 do_ldrexd (void)
9020 {
9021 constraint (inst.operands[0].reg % 2 != 0,
9022 _("even register required"));
9023 constraint (inst.operands[1].present
9024 && inst.operands[1].reg != inst.operands[0].reg + 1,
9025 _("can only load two consecutive registers"));
9026 /* If op 1 were present and equal to PC, this function wouldn't
9027 have been called in the first place. */
9028 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
9029
9030 inst.instruction |= inst.operands[0].reg << 12;
9031 inst.instruction |= inst.operands[2].reg << 16;
9032 }
9033
9034 /* In both ARM and thumb state 'ldr pc, #imm' with an immediate
9035 which is not a multiple of four is UNPREDICTABLE. */
9036 static void
9037 check_ldr_r15_aligned (void)
9038 {
9039 constraint (!(inst.operands[1].immisreg)
9040 && (inst.operands[0].reg == REG_PC
9041 && inst.operands[1].reg == REG_PC
9042 && (inst.reloc.exp.X_add_number & 0x3)),
9043 _("ldr to register 15 must be 4-byte aligned"));
9044 }
9045
9046 static void
9047 do_ldst (void)
9048 {
9049 inst.instruction |= inst.operands[0].reg << 12;
9050 if (!inst.operands[1].isreg)
9051 if (move_or_literal_pool (0, CONST_ARM, /*mode_3=*/FALSE))
9052 return;
9053 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE);
9054 check_ldr_r15_aligned ();
9055 }
9056
9057 static void
9058 do_ldstt (void)
9059 {
9060 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
9061 reject [Rn,...]. */
9062 if (inst.operands[1].preind)
9063 {
9064 constraint (inst.reloc.exp.X_op != O_constant
9065 || inst.reloc.exp.X_add_number != 0,
9066 _("this instruction requires a post-indexed address"));
9067
9068 inst.operands[1].preind = 0;
9069 inst.operands[1].postind = 1;
9070 inst.operands[1].writeback = 1;
9071 }
9072 inst.instruction |= inst.operands[0].reg << 12;
9073 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE);
9074 }
9075
9076 /* Halfword and signed-byte load/store operations. */
9077
9078 static void
9079 do_ldstv4 (void)
9080 {
9081 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
9082 inst.instruction |= inst.operands[0].reg << 12;
9083 if (!inst.operands[1].isreg)
9084 if (move_or_literal_pool (0, CONST_ARM, /*mode_3=*/TRUE))
9085 return;
9086 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE);
9087 }
9088
9089 static void
9090 do_ldsttv4 (void)
9091 {
9092 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
9093 reject [Rn,...]. */
9094 if (inst.operands[1].preind)
9095 {
9096 constraint (inst.reloc.exp.X_op != O_constant
9097 || inst.reloc.exp.X_add_number != 0,
9098 _("this instruction requires a post-indexed address"));
9099
9100 inst.operands[1].preind = 0;
9101 inst.operands[1].postind = 1;
9102 inst.operands[1].writeback = 1;
9103 }
9104 inst.instruction |= inst.operands[0].reg << 12;
9105 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE);
9106 }
9107
9108 /* Co-processor register load/store.
9109 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
9110 static void
9111 do_lstc (void)
9112 {
9113 inst.instruction |= inst.operands[0].reg << 8;
9114 inst.instruction |= inst.operands[1].reg << 12;
9115 encode_arm_cp_address (2, TRUE, TRUE, 0);
9116 }
9117
9118 static void
9119 do_mlas (void)
9120 {
9121 /* This restriction does not apply to mls (nor to mla in v6 or later). */
9122 if (inst.operands[0].reg == inst.operands[1].reg
9123 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6)
9124 && !(inst.instruction & 0x00400000))
9125 as_tsktsk (_("Rd and Rm should be different in mla"));
9126
9127 inst.instruction |= inst.operands[0].reg << 16;
9128 inst.instruction |= inst.operands[1].reg;
9129 inst.instruction |= inst.operands[2].reg << 8;
9130 inst.instruction |= inst.operands[3].reg << 12;
9131 }
9132
9133 static void
9134 do_mov (void)
9135 {
9136 constraint (inst.reloc.type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
9137 && inst.reloc.type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC ,
9138 THUMB1_RELOC_ONLY);
9139 inst.instruction |= inst.operands[0].reg << 12;
9140 encode_arm_shifter_operand (1);
9141 }
9142
9143 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
9144 static void
9145 do_mov16 (void)
9146 {
9147 bfd_vma imm;
9148 bfd_boolean top;
9149
9150 top = (inst.instruction & 0x00400000) != 0;
9151 constraint (top && inst.reloc.type == BFD_RELOC_ARM_MOVW,
9152 _(":lower16: not allowed in this instruction"));
9153 constraint (!top && inst.reloc.type == BFD_RELOC_ARM_MOVT,
9154 _(":upper16: not allowed in this instruction"));
9155 inst.instruction |= inst.operands[0].reg << 12;
9156 if (inst.reloc.type == BFD_RELOC_UNUSED)
9157 {
9158 imm = inst.reloc.exp.X_add_number;
9159 /* The value is in two pieces: 0:11, 16:19. */
9160 inst.instruction |= (imm & 0x00000fff);
9161 inst.instruction |= (imm & 0x0000f000) << 4;
9162 }
9163 }
9164
9165 static int
9166 do_vfp_nsyn_mrs (void)
9167 {
9168 if (inst.operands[0].isvec)
9169 {
9170 if (inst.operands[1].reg != 1)
9171 first_error (_("operand 1 must be FPSCR"));
9172 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
9173 memset (&inst.operands[1], '\0', sizeof (inst.operands[1]));
9174 do_vfp_nsyn_opcode ("fmstat");
9175 }
9176 else if (inst.operands[1].isvec)
9177 do_vfp_nsyn_opcode ("fmrx");
9178 else
9179 return FAIL;
9180
9181 return SUCCESS;
9182 }
9183
9184 static int
9185 do_vfp_nsyn_msr (void)
9186 {
9187 if (inst.operands[0].isvec)
9188 do_vfp_nsyn_opcode ("fmxr");
9189 else
9190 return FAIL;
9191
9192 return SUCCESS;
9193 }
9194
9195 static void
9196 do_vmrs (void)
9197 {
9198 unsigned Rt = inst.operands[0].reg;
9199
9200 if (thumb_mode && Rt == REG_SP)
9201 {
9202 inst.error = BAD_SP;
9203 return;
9204 }
9205
9206 /* MVFR2 is only valid at ARMv8-A. */
9207 if (inst.operands[1].reg == 5)
9208 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
9209 _(BAD_FPU));
9210
9211 /* APSR_ sets isvec. All other refs to PC are illegal. */
9212 if (!inst.operands[0].isvec && Rt == REG_PC)
9213 {
9214 inst.error = BAD_PC;
9215 return;
9216 }
9217
9218 /* If we get through parsing the register name, we just insert the number
9219 generated into the instruction without further validation. */
9220 inst.instruction |= (inst.operands[1].reg << 16);
9221 inst.instruction |= (Rt << 12);
9222 }
9223
9224 static void
9225 do_vmsr (void)
9226 {
9227 unsigned Rt = inst.operands[1].reg;
9228
9229 if (thumb_mode)
9230 reject_bad_reg (Rt);
9231 else if (Rt == REG_PC)
9232 {
9233 inst.error = BAD_PC;
9234 return;
9235 }
9236
9237 /* MVFR2 is only valid for ARMv8-A. */
9238 if (inst.operands[0].reg == 5)
9239 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
9240 _(BAD_FPU));
9241
9242 /* If we get through parsing the register name, we just insert the number
9243 generated into the instruction without further validation. */
9244 inst.instruction |= (inst.operands[0].reg << 16);
9245 inst.instruction |= (Rt << 12);
9246 }
9247
9248 static void
9249 do_mrs (void)
9250 {
9251 unsigned br;
9252
9253 if (do_vfp_nsyn_mrs () == SUCCESS)
9254 return;
9255
9256 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
9257 inst.instruction |= inst.operands[0].reg << 12;
9258
9259 if (inst.operands[1].isreg)
9260 {
9261 br = inst.operands[1].reg;
9262 if (((br & 0x200) == 0) && ((br & 0xf0000) != 0xf0000))
9263 as_bad (_("bad register for mrs"));
9264 }
9265 else
9266 {
9267 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
9268 constraint ((inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f))
9269 != (PSR_c|PSR_f),
9270 _("'APSR', 'CPSR' or 'SPSR' expected"));
9271 br = (15<<16) | (inst.operands[1].imm & SPSR_BIT);
9272 }
9273
9274 inst.instruction |= br;
9275 }
9276
9277 /* Two possible forms:
9278 "{C|S}PSR_<field>, Rm",
9279 "{C|S}PSR_f, #expression". */
9280
9281 static void
9282 do_msr (void)
9283 {
9284 if (do_vfp_nsyn_msr () == SUCCESS)
9285 return;
9286
9287 inst.instruction |= inst.operands[0].imm;
9288 if (inst.operands[1].isreg)
9289 inst.instruction |= inst.operands[1].reg;
9290 else
9291 {
9292 inst.instruction |= INST_IMMEDIATE;
9293 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
9294 inst.reloc.pc_rel = 0;
9295 }
9296 }
9297
9298 static void
9299 do_mul (void)
9300 {
9301 constraint (inst.operands[2].reg == REG_PC, BAD_PC);
9302
9303 if (!inst.operands[2].present)
9304 inst.operands[2].reg = inst.operands[0].reg;
9305 inst.instruction |= inst.operands[0].reg << 16;
9306 inst.instruction |= inst.operands[1].reg;
9307 inst.instruction |= inst.operands[2].reg << 8;
9308
9309 if (inst.operands[0].reg == inst.operands[1].reg
9310 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
9311 as_tsktsk (_("Rd and Rm should be different in mul"));
9312 }
9313
9314 /* Long Multiply Parser
9315 UMULL RdLo, RdHi, Rm, Rs
9316 SMULL RdLo, RdHi, Rm, Rs
9317 UMLAL RdLo, RdHi, Rm, Rs
9318 SMLAL RdLo, RdHi, Rm, Rs. */
9319
9320 static void
9321 do_mull (void)
9322 {
9323 inst.instruction |= inst.operands[0].reg << 12;
9324 inst.instruction |= inst.operands[1].reg << 16;
9325 inst.instruction |= inst.operands[2].reg;
9326 inst.instruction |= inst.operands[3].reg << 8;
9327
9328 /* rdhi and rdlo must be different. */
9329 if (inst.operands[0].reg == inst.operands[1].reg)
9330 as_tsktsk (_("rdhi and rdlo must be different"));
9331
9332 /* rdhi, rdlo and rm must all be different before armv6. */
9333 if ((inst.operands[0].reg == inst.operands[2].reg
9334 || inst.operands[1].reg == inst.operands[2].reg)
9335 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
9336 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
9337 }
9338
9339 static void
9340 do_nop (void)
9341 {
9342 if (inst.operands[0].present
9343 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6k))
9344 {
9345 /* Architectural NOP hints are CPSR sets with no bits selected. */
9346 inst.instruction &= 0xf0000000;
9347 inst.instruction |= 0x0320f000;
9348 if (inst.operands[0].present)
9349 inst.instruction |= inst.operands[0].imm;
9350 }
9351 }
9352
9353 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
9354 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
9355 Condition defaults to COND_ALWAYS.
9356 Error if Rd, Rn or Rm are R15. */
9357
9358 static void
9359 do_pkhbt (void)
9360 {
9361 inst.instruction |= inst.operands[0].reg << 12;
9362 inst.instruction |= inst.operands[1].reg << 16;
9363 inst.instruction |= inst.operands[2].reg;
9364 if (inst.operands[3].present)
9365 encode_arm_shift (3);
9366 }
9367
9368 /* ARM V6 PKHTB (Argument Parse). */
9369
9370 static void
9371 do_pkhtb (void)
9372 {
9373 if (!inst.operands[3].present)
9374 {
9375 /* If the shift specifier is omitted, turn the instruction
9376 into pkhbt rd, rm, rn. */
9377 inst.instruction &= 0xfff00010;
9378 inst.instruction |= inst.operands[0].reg << 12;
9379 inst.instruction |= inst.operands[1].reg;
9380 inst.instruction |= inst.operands[2].reg << 16;
9381 }
9382 else
9383 {
9384 inst.instruction |= inst.operands[0].reg << 12;
9385 inst.instruction |= inst.operands[1].reg << 16;
9386 inst.instruction |= inst.operands[2].reg;
9387 encode_arm_shift (3);
9388 }
9389 }
9390
9391 /* ARMv5TE: Preload-Cache
9392 MP Extensions: Preload for write
9393
9394 PLD(W) <addr_mode>
9395
9396 Syntactically, like LDR with B=1, W=0, L=1. */
9397
9398 static void
9399 do_pld (void)
9400 {
9401 constraint (!inst.operands[0].isreg,
9402 _("'[' expected after PLD mnemonic"));
9403 constraint (inst.operands[0].postind,
9404 _("post-indexed expression used in preload instruction"));
9405 constraint (inst.operands[0].writeback,
9406 _("writeback used in preload instruction"));
9407 constraint (!inst.operands[0].preind,
9408 _("unindexed addressing used in preload instruction"));
9409 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
9410 }
9411
9412 /* ARMv7: PLI <addr_mode> */
9413 static void
9414 do_pli (void)
9415 {
9416 constraint (!inst.operands[0].isreg,
9417 _("'[' expected after PLI mnemonic"));
9418 constraint (inst.operands[0].postind,
9419 _("post-indexed expression used in preload instruction"));
9420 constraint (inst.operands[0].writeback,
9421 _("writeback used in preload instruction"));
9422 constraint (!inst.operands[0].preind,
9423 _("unindexed addressing used in preload instruction"));
9424 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
9425 inst.instruction &= ~PRE_INDEX;
9426 }
9427
9428 static void
9429 do_push_pop (void)
9430 {
9431 constraint (inst.operands[0].writeback,
9432 _("push/pop do not support {reglist}^"));
9433 inst.operands[1] = inst.operands[0];
9434 memset (&inst.operands[0], 0, sizeof inst.operands[0]);
9435 inst.operands[0].isreg = 1;
9436 inst.operands[0].writeback = 1;
9437 inst.operands[0].reg = REG_SP;
9438 encode_ldmstm (/*from_push_pop_mnem=*/TRUE);
9439 }
9440
9441 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
9442 word at the specified address and the following word
9443 respectively.
9444 Unconditionally executed.
9445 Error if Rn is R15. */
9446
9447 static void
9448 do_rfe (void)
9449 {
9450 inst.instruction |= inst.operands[0].reg << 16;
9451 if (inst.operands[0].writeback)
9452 inst.instruction |= WRITE_BACK;
9453 }
9454
9455 /* ARM V6 ssat (argument parse). */
9456
9457 static void
9458 do_ssat (void)
9459 {
9460 inst.instruction |= inst.operands[0].reg << 12;
9461 inst.instruction |= (inst.operands[1].imm - 1) << 16;
9462 inst.instruction |= inst.operands[2].reg;
9463
9464 if (inst.operands[3].present)
9465 encode_arm_shift (3);
9466 }
9467
9468 /* ARM V6 usat (argument parse). */
9469
9470 static void
9471 do_usat (void)
9472 {
9473 inst.instruction |= inst.operands[0].reg << 12;
9474 inst.instruction |= inst.operands[1].imm << 16;
9475 inst.instruction |= inst.operands[2].reg;
9476
9477 if (inst.operands[3].present)
9478 encode_arm_shift (3);
9479 }
9480
9481 /* ARM V6 ssat16 (argument parse). */
9482
9483 static void
9484 do_ssat16 (void)
9485 {
9486 inst.instruction |= inst.operands[0].reg << 12;
9487 inst.instruction |= ((inst.operands[1].imm - 1) << 16);
9488 inst.instruction |= inst.operands[2].reg;
9489 }
9490
9491 static void
9492 do_usat16 (void)
9493 {
9494 inst.instruction |= inst.operands[0].reg << 12;
9495 inst.instruction |= inst.operands[1].imm << 16;
9496 inst.instruction |= inst.operands[2].reg;
9497 }
9498
9499 /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
9500 preserving the other bits.
9501
9502 setend <endian_specifier>, where <endian_specifier> is either
9503 BE or LE. */
9504
9505 static void
9506 do_setend (void)
9507 {
9508 if (warn_on_deprecated
9509 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
9510 as_tsktsk (_("setend use is deprecated for ARMv8"));
9511
9512 if (inst.operands[0].imm)
9513 inst.instruction |= 0x200;
9514 }
9515
9516 static void
9517 do_shift (void)
9518 {
9519 unsigned int Rm = (inst.operands[1].present
9520 ? inst.operands[1].reg
9521 : inst.operands[0].reg);
9522
9523 inst.instruction |= inst.operands[0].reg << 12;
9524 inst.instruction |= Rm;
9525 if (inst.operands[2].isreg) /* Rd, {Rm,} Rs */
9526 {
9527 inst.instruction |= inst.operands[2].reg << 8;
9528 inst.instruction |= SHIFT_BY_REG;
9529 /* PR 12854: Error on extraneous shifts. */
9530 constraint (inst.operands[2].shifted,
9531 _("extraneous shift as part of operand to shift insn"));
9532 }
9533 else
9534 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
9535 }
9536
9537 static void
9538 do_smc (void)
9539 {
9540 inst.reloc.type = BFD_RELOC_ARM_SMC;
9541 inst.reloc.pc_rel = 0;
9542 }
9543
9544 static void
9545 do_hvc (void)
9546 {
9547 inst.reloc.type = BFD_RELOC_ARM_HVC;
9548 inst.reloc.pc_rel = 0;
9549 }
9550
9551 static void
9552 do_swi (void)
9553 {
9554 inst.reloc.type = BFD_RELOC_ARM_SWI;
9555 inst.reloc.pc_rel = 0;
9556 }
9557
9558 static void
9559 do_setpan (void)
9560 {
9561 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_pan),
9562 _("selected processor does not support SETPAN instruction"));
9563
9564 inst.instruction |= ((inst.operands[0].imm & 1) << 9);
9565 }
9566
9567 static void
9568 do_t_setpan (void)
9569 {
9570 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_pan),
9571 _("selected processor does not support SETPAN instruction"));
9572
9573 inst.instruction |= (inst.operands[0].imm << 3);
9574 }
9575
9576 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
9577 SMLAxy{cond} Rd,Rm,Rs,Rn
9578 SMLAWy{cond} Rd,Rm,Rs,Rn
9579 Error if any register is R15. */
9580
9581 static void
9582 do_smla (void)
9583 {
9584 inst.instruction |= inst.operands[0].reg << 16;
9585 inst.instruction |= inst.operands[1].reg;
9586 inst.instruction |= inst.operands[2].reg << 8;
9587 inst.instruction |= inst.operands[3].reg << 12;
9588 }
9589
9590 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
9591 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
9592 Error if any register is R15.
9593 Warning if Rdlo == Rdhi. */
9594
9595 static void
9596 do_smlal (void)
9597 {
9598 inst.instruction |= inst.operands[0].reg << 12;
9599 inst.instruction |= inst.operands[1].reg << 16;
9600 inst.instruction |= inst.operands[2].reg;
9601 inst.instruction |= inst.operands[3].reg << 8;
9602
9603 if (inst.operands[0].reg == inst.operands[1].reg)
9604 as_tsktsk (_("rdhi and rdlo must be different"));
9605 }
9606
9607 /* ARM V5E (El Segundo) signed-multiply (argument parse)
9608 SMULxy{cond} Rd,Rm,Rs
9609 Error if any register is R15. */
9610
9611 static void
9612 do_smul (void)
9613 {
9614 inst.instruction |= inst.operands[0].reg << 16;
9615 inst.instruction |= inst.operands[1].reg;
9616 inst.instruction |= inst.operands[2].reg << 8;
9617 }
9618
9619 /* ARM V6 srs (argument parse). The variable fields in the encoding are
9620 the same for both ARM and Thumb-2. */
9621
9622 static void
9623 do_srs (void)
9624 {
9625 int reg;
9626
9627 if (inst.operands[0].present)
9628 {
9629 reg = inst.operands[0].reg;
9630 constraint (reg != REG_SP, _("SRS base register must be r13"));
9631 }
9632 else
9633 reg = REG_SP;
9634
9635 inst.instruction |= reg << 16;
9636 inst.instruction |= inst.operands[1].imm;
9637 if (inst.operands[0].writeback || inst.operands[1].writeback)
9638 inst.instruction |= WRITE_BACK;
9639 }
9640
9641 /* ARM V6 strex (argument parse). */
9642
9643 static void
9644 do_strex (void)
9645 {
9646 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
9647 || inst.operands[2].postind || inst.operands[2].writeback
9648 || inst.operands[2].immisreg || inst.operands[2].shifted
9649 || inst.operands[2].negative
9650 /* See comment in do_ldrex(). */
9651 || (inst.operands[2].reg == REG_PC),
9652 BAD_ADDR_MODE);
9653
9654 constraint (inst.operands[0].reg == inst.operands[1].reg
9655 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9656
9657 constraint (inst.reloc.exp.X_op != O_constant
9658 || inst.reloc.exp.X_add_number != 0,
9659 _("offset must be zero in ARM encoding"));
9660
9661 inst.instruction |= inst.operands[0].reg << 12;
9662 inst.instruction |= inst.operands[1].reg;
9663 inst.instruction |= inst.operands[2].reg << 16;
9664 inst.reloc.type = BFD_RELOC_UNUSED;
9665 }
9666
9667 static void
9668 do_t_strexbh (void)
9669 {
9670 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
9671 || inst.operands[2].postind || inst.operands[2].writeback
9672 || inst.operands[2].immisreg || inst.operands[2].shifted
9673 || inst.operands[2].negative,
9674 BAD_ADDR_MODE);
9675
9676 constraint (inst.operands[0].reg == inst.operands[1].reg
9677 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9678
9679 do_rm_rd_rn ();
9680 }
9681
9682 static void
9683 do_strexd (void)
9684 {
9685 constraint (inst.operands[1].reg % 2 != 0,
9686 _("even register required"));
9687 constraint (inst.operands[2].present
9688 && inst.operands[2].reg != inst.operands[1].reg + 1,
9689 _("can only store two consecutive registers"));
9690 /* If op 2 were present and equal to PC, this function wouldn't
9691 have been called in the first place. */
9692 constraint (inst.operands[1].reg == REG_LR, _("r14 not allowed here"));
9693
9694 constraint (inst.operands[0].reg == inst.operands[1].reg
9695 || inst.operands[0].reg == inst.operands[1].reg + 1
9696 || inst.operands[0].reg == inst.operands[3].reg,
9697 BAD_OVERLAP);
9698
9699 inst.instruction |= inst.operands[0].reg << 12;
9700 inst.instruction |= inst.operands[1].reg;
9701 inst.instruction |= inst.operands[3].reg << 16;
9702 }
9703
9704 /* ARM V8 STRL. */
9705 static void
9706 do_stlex (void)
9707 {
9708 constraint (inst.operands[0].reg == inst.operands[1].reg
9709 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9710
9711 do_rd_rm_rn ();
9712 }
9713
9714 static void
9715 do_t_stlex (void)
9716 {
9717 constraint (inst.operands[0].reg == inst.operands[1].reg
9718 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9719
9720 do_rm_rd_rn ();
9721 }
9722
9723 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
9724 extends it to 32-bits, and adds the result to a value in another
9725 register. You can specify a rotation by 0, 8, 16, or 24 bits
9726 before extracting the 16-bit value.
9727 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
9728 Condition defaults to COND_ALWAYS.
9729 Error if any register uses R15. */
9730
9731 static void
9732 do_sxtah (void)
9733 {
9734 inst.instruction |= inst.operands[0].reg << 12;
9735 inst.instruction |= inst.operands[1].reg << 16;
9736 inst.instruction |= inst.operands[2].reg;
9737 inst.instruction |= inst.operands[3].imm << 10;
9738 }
9739
9740 /* ARM V6 SXTH.
9741
9742 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
9743 Condition defaults to COND_ALWAYS.
9744 Error if any register uses R15. */
9745
9746 static void
9747 do_sxth (void)
9748 {
9749 inst.instruction |= inst.operands[0].reg << 12;
9750 inst.instruction |= inst.operands[1].reg;
9751 inst.instruction |= inst.operands[2].imm << 10;
9752 }
9753 \f
9754 /* VFP instructions. In a logical order: SP variant first, monad
9755 before dyad, arithmetic then move then load/store. */
9756
9757 static void
9758 do_vfp_sp_monadic (void)
9759 {
9760 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9761 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
9762 }
9763
9764 static void
9765 do_vfp_sp_dyadic (void)
9766 {
9767 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9768 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
9769 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
9770 }
9771
9772 static void
9773 do_vfp_sp_compare_z (void)
9774 {
9775 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9776 }
9777
9778 static void
9779 do_vfp_dp_sp_cvt (void)
9780 {
9781 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9782 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
9783 }
9784
9785 static void
9786 do_vfp_sp_dp_cvt (void)
9787 {
9788 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9789 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
9790 }
9791
9792 static void
9793 do_vfp_reg_from_sp (void)
9794 {
9795 inst.instruction |= inst.operands[0].reg << 12;
9796 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
9797 }
9798
9799 static void
9800 do_vfp_reg2_from_sp2 (void)
9801 {
9802 constraint (inst.operands[2].imm != 2,
9803 _("only two consecutive VFP SP registers allowed here"));
9804 inst.instruction |= inst.operands[0].reg << 12;
9805 inst.instruction |= inst.operands[1].reg << 16;
9806 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
9807 }
9808
9809 static void
9810 do_vfp_sp_from_reg (void)
9811 {
9812 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sn);
9813 inst.instruction |= inst.operands[1].reg << 12;
9814 }
9815
9816 static void
9817 do_vfp_sp2_from_reg2 (void)
9818 {
9819 constraint (inst.operands[0].imm != 2,
9820 _("only two consecutive VFP SP registers allowed here"));
9821 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sm);
9822 inst.instruction |= inst.operands[1].reg << 12;
9823 inst.instruction |= inst.operands[2].reg << 16;
9824 }
9825
9826 static void
9827 do_vfp_sp_ldst (void)
9828 {
9829 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9830 encode_arm_cp_address (1, FALSE, TRUE, 0);
9831 }
9832
9833 static void
9834 do_vfp_dp_ldst (void)
9835 {
9836 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9837 encode_arm_cp_address (1, FALSE, TRUE, 0);
9838 }
9839
9840
9841 static void
9842 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type)
9843 {
9844 if (inst.operands[0].writeback)
9845 inst.instruction |= WRITE_BACK;
9846 else
9847 constraint (ldstm_type != VFP_LDSTMIA,
9848 _("this addressing mode requires base-register writeback"));
9849 inst.instruction |= inst.operands[0].reg << 16;
9850 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sd);
9851 inst.instruction |= inst.operands[1].imm;
9852 }
9853
9854 static void
9855 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type)
9856 {
9857 int count;
9858
9859 if (inst.operands[0].writeback)
9860 inst.instruction |= WRITE_BACK;
9861 else
9862 constraint (ldstm_type != VFP_LDSTMIA && ldstm_type != VFP_LDSTMIAX,
9863 _("this addressing mode requires base-register writeback"));
9864
9865 inst.instruction |= inst.operands[0].reg << 16;
9866 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
9867
9868 count = inst.operands[1].imm << 1;
9869 if (ldstm_type == VFP_LDSTMIAX || ldstm_type == VFP_LDSTMDBX)
9870 count += 1;
9871
9872 inst.instruction |= count;
9873 }
9874
9875 static void
9876 do_vfp_sp_ldstmia (void)
9877 {
9878 vfp_sp_ldstm (VFP_LDSTMIA);
9879 }
9880
9881 static void
9882 do_vfp_sp_ldstmdb (void)
9883 {
9884 vfp_sp_ldstm (VFP_LDSTMDB);
9885 }
9886
9887 static void
9888 do_vfp_dp_ldstmia (void)
9889 {
9890 vfp_dp_ldstm (VFP_LDSTMIA);
9891 }
9892
9893 static void
9894 do_vfp_dp_ldstmdb (void)
9895 {
9896 vfp_dp_ldstm (VFP_LDSTMDB);
9897 }
9898
9899 static void
9900 do_vfp_xp_ldstmia (void)
9901 {
9902 vfp_dp_ldstm (VFP_LDSTMIAX);
9903 }
9904
9905 static void
9906 do_vfp_xp_ldstmdb (void)
9907 {
9908 vfp_dp_ldstm (VFP_LDSTMDBX);
9909 }
9910
9911 static void
9912 do_vfp_dp_rd_rm (void)
9913 {
9914 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9915 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
9916 }
9917
9918 static void
9919 do_vfp_dp_rn_rd (void)
9920 {
9921 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dn);
9922 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
9923 }
9924
9925 static void
9926 do_vfp_dp_rd_rn (void)
9927 {
9928 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9929 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
9930 }
9931
9932 static void
9933 do_vfp_dp_rd_rn_rm (void)
9934 {
9935 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9936 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
9937 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dm);
9938 }
9939
9940 static void
9941 do_vfp_dp_rd (void)
9942 {
9943 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9944 }
9945
9946 static void
9947 do_vfp_dp_rm_rd_rn (void)
9948 {
9949 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dm);
9950 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
9951 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dn);
9952 }
9953
9954 /* VFPv3 instructions. */
9955 static void
9956 do_vfp_sp_const (void)
9957 {
9958 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9959 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
9960 inst.instruction |= (inst.operands[1].imm & 0x0f);
9961 }
9962
9963 static void
9964 do_vfp_dp_const (void)
9965 {
9966 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9967 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
9968 inst.instruction |= (inst.operands[1].imm & 0x0f);
9969 }
9970
9971 static void
9972 vfp_conv (int srcsize)
9973 {
9974 int immbits = srcsize - inst.operands[1].imm;
9975
9976 if (srcsize == 16 && !(immbits >= 0 && immbits <= srcsize))
9977 {
9978 /* If srcsize is 16, inst.operands[1].imm must be in the range 0-16.
9979 i.e. immbits must be in range 0 - 16. */
9980 inst.error = _("immediate value out of range, expected range [0, 16]");
9981 return;
9982 }
9983 else if (srcsize == 32 && !(immbits >= 0 && immbits < srcsize))
9984 {
9985 /* If srcsize is 32, inst.operands[1].imm must be in the range 1-32.
9986 i.e. immbits must be in range 0 - 31. */
9987 inst.error = _("immediate value out of range, expected range [1, 32]");
9988 return;
9989 }
9990
9991 inst.instruction |= (immbits & 1) << 5;
9992 inst.instruction |= (immbits >> 1);
9993 }
9994
9995 static void
9996 do_vfp_sp_conv_16 (void)
9997 {
9998 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9999 vfp_conv (16);
10000 }
10001
10002 static void
10003 do_vfp_dp_conv_16 (void)
10004 {
10005 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10006 vfp_conv (16);
10007 }
10008
10009 static void
10010 do_vfp_sp_conv_32 (void)
10011 {
10012 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10013 vfp_conv (32);
10014 }
10015
10016 static void
10017 do_vfp_dp_conv_32 (void)
10018 {
10019 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10020 vfp_conv (32);
10021 }
10022 \f
10023 /* FPA instructions. Also in a logical order. */
10024
10025 static void
10026 do_fpa_cmp (void)
10027 {
10028 inst.instruction |= inst.operands[0].reg << 16;
10029 inst.instruction |= inst.operands[1].reg;
10030 }
10031
10032 static void
10033 do_fpa_ldmstm (void)
10034 {
10035 inst.instruction |= inst.operands[0].reg << 12;
10036 switch (inst.operands[1].imm)
10037 {
10038 case 1: inst.instruction |= CP_T_X; break;
10039 case 2: inst.instruction |= CP_T_Y; break;
10040 case 3: inst.instruction |= CP_T_Y | CP_T_X; break;
10041 case 4: break;
10042 default: abort ();
10043 }
10044
10045 if (inst.instruction & (PRE_INDEX | INDEX_UP))
10046 {
10047 /* The instruction specified "ea" or "fd", so we can only accept
10048 [Rn]{!}. The instruction does not really support stacking or
10049 unstacking, so we have to emulate these by setting appropriate
10050 bits and offsets. */
10051 constraint (inst.reloc.exp.X_op != O_constant
10052 || inst.reloc.exp.X_add_number != 0,
10053 _("this instruction does not support indexing"));
10054
10055 if ((inst.instruction & PRE_INDEX) || inst.operands[2].writeback)
10056 inst.reloc.exp.X_add_number = 12 * inst.operands[1].imm;
10057
10058 if (!(inst.instruction & INDEX_UP))
10059 inst.reloc.exp.X_add_number = -inst.reloc.exp.X_add_number;
10060
10061 if (!(inst.instruction & PRE_INDEX) && inst.operands[2].writeback)
10062 {
10063 inst.operands[2].preind = 0;
10064 inst.operands[2].postind = 1;
10065 }
10066 }
10067
10068 encode_arm_cp_address (2, TRUE, TRUE, 0);
10069 }
10070 \f
10071 /* iWMMXt instructions: strictly in alphabetical order. */
10072
10073 static void
10074 do_iwmmxt_tandorc (void)
10075 {
10076 constraint (inst.operands[0].reg != REG_PC, _("only r15 allowed here"));
10077 }
10078
10079 static void
10080 do_iwmmxt_textrc (void)
10081 {
10082 inst.instruction |= inst.operands[0].reg << 12;
10083 inst.instruction |= inst.operands[1].imm;
10084 }
10085
10086 static void
10087 do_iwmmxt_textrm (void)
10088 {
10089 inst.instruction |= inst.operands[0].reg << 12;
10090 inst.instruction |= inst.operands[1].reg << 16;
10091 inst.instruction |= inst.operands[2].imm;
10092 }
10093
10094 static void
10095 do_iwmmxt_tinsr (void)
10096 {
10097 inst.instruction |= inst.operands[0].reg << 16;
10098 inst.instruction |= inst.operands[1].reg << 12;
10099 inst.instruction |= inst.operands[2].imm;
10100 }
10101
10102 static void
10103 do_iwmmxt_tmia (void)
10104 {
10105 inst.instruction |= inst.operands[0].reg << 5;
10106 inst.instruction |= inst.operands[1].reg;
10107 inst.instruction |= inst.operands[2].reg << 12;
10108 }
10109
10110 static void
10111 do_iwmmxt_waligni (void)
10112 {
10113 inst.instruction |= inst.operands[0].reg << 12;
10114 inst.instruction |= inst.operands[1].reg << 16;
10115 inst.instruction |= inst.operands[2].reg;
10116 inst.instruction |= inst.operands[3].imm << 20;
10117 }
10118
10119 static void
10120 do_iwmmxt_wmerge (void)
10121 {
10122 inst.instruction |= inst.operands[0].reg << 12;
10123 inst.instruction |= inst.operands[1].reg << 16;
10124 inst.instruction |= inst.operands[2].reg;
10125 inst.instruction |= inst.operands[3].imm << 21;
10126 }
10127
10128 static void
10129 do_iwmmxt_wmov (void)
10130 {
10131 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
10132 inst.instruction |= inst.operands[0].reg << 12;
10133 inst.instruction |= inst.operands[1].reg << 16;
10134 inst.instruction |= inst.operands[1].reg;
10135 }
10136
10137 static void
10138 do_iwmmxt_wldstbh (void)
10139 {
10140 int reloc;
10141 inst.instruction |= inst.operands[0].reg << 12;
10142 if (thumb_mode)
10143 reloc = BFD_RELOC_ARM_T32_CP_OFF_IMM_S2;
10144 else
10145 reloc = BFD_RELOC_ARM_CP_OFF_IMM_S2;
10146 encode_arm_cp_address (1, TRUE, FALSE, reloc);
10147 }
10148
10149 static void
10150 do_iwmmxt_wldstw (void)
10151 {
10152 /* RIWR_RIWC clears .isreg for a control register. */
10153 if (!inst.operands[0].isreg)
10154 {
10155 constraint (inst.cond != COND_ALWAYS, BAD_COND);
10156 inst.instruction |= 0xf0000000;
10157 }
10158
10159 inst.instruction |= inst.operands[0].reg << 12;
10160 encode_arm_cp_address (1, TRUE, TRUE, 0);
10161 }
10162
10163 static void
10164 do_iwmmxt_wldstd (void)
10165 {
10166 inst.instruction |= inst.operands[0].reg << 12;
10167 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2)
10168 && inst.operands[1].immisreg)
10169 {
10170 inst.instruction &= ~0x1a000ff;
10171 inst.instruction |= (0xfU << 28);
10172 if (inst.operands[1].preind)
10173 inst.instruction |= PRE_INDEX;
10174 if (!inst.operands[1].negative)
10175 inst.instruction |= INDEX_UP;
10176 if (inst.operands[1].writeback)
10177 inst.instruction |= WRITE_BACK;
10178 inst.instruction |= inst.operands[1].reg << 16;
10179 inst.instruction |= inst.reloc.exp.X_add_number << 4;
10180 inst.instruction |= inst.operands[1].imm;
10181 }
10182 else
10183 encode_arm_cp_address (1, TRUE, FALSE, 0);
10184 }
10185
10186 static void
10187 do_iwmmxt_wshufh (void)
10188 {
10189 inst.instruction |= inst.operands[0].reg << 12;
10190 inst.instruction |= inst.operands[1].reg << 16;
10191 inst.instruction |= ((inst.operands[2].imm & 0xf0) << 16);
10192 inst.instruction |= (inst.operands[2].imm & 0x0f);
10193 }
10194
10195 static void
10196 do_iwmmxt_wzero (void)
10197 {
10198 /* WZERO reg is an alias for WANDN reg, reg, reg. */
10199 inst.instruction |= inst.operands[0].reg;
10200 inst.instruction |= inst.operands[0].reg << 12;
10201 inst.instruction |= inst.operands[0].reg << 16;
10202 }
10203
10204 static void
10205 do_iwmmxt_wrwrwr_or_imm5 (void)
10206 {
10207 if (inst.operands[2].isreg)
10208 do_rd_rn_rm ();
10209 else {
10210 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2),
10211 _("immediate operand requires iWMMXt2"));
10212 do_rd_rn ();
10213 if (inst.operands[2].imm == 0)
10214 {
10215 switch ((inst.instruction >> 20) & 0xf)
10216 {
10217 case 4:
10218 case 5:
10219 case 6:
10220 case 7:
10221 /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16. */
10222 inst.operands[2].imm = 16;
10223 inst.instruction = (inst.instruction & 0xff0fffff) | (0x7 << 20);
10224 break;
10225 case 8:
10226 case 9:
10227 case 10:
10228 case 11:
10229 /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32. */
10230 inst.operands[2].imm = 32;
10231 inst.instruction = (inst.instruction & 0xff0fffff) | (0xb << 20);
10232 break;
10233 case 12:
10234 case 13:
10235 case 14:
10236 case 15:
10237 {
10238 /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn. */
10239 unsigned long wrn;
10240 wrn = (inst.instruction >> 16) & 0xf;
10241 inst.instruction &= 0xff0fff0f;
10242 inst.instruction |= wrn;
10243 /* Bail out here; the instruction is now assembled. */
10244 return;
10245 }
10246 }
10247 }
10248 /* Map 32 -> 0, etc. */
10249 inst.operands[2].imm &= 0x1f;
10250 inst.instruction |= (0xfU << 28) | ((inst.operands[2].imm & 0x10) << 4) | (inst.operands[2].imm & 0xf);
10251 }
10252 }
10253 \f
10254 /* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
10255 operations first, then control, shift, and load/store. */
10256
10257 /* Insns like "foo X,Y,Z". */
10258
10259 static void
10260 do_mav_triple (void)
10261 {
10262 inst.instruction |= inst.operands[0].reg << 16;
10263 inst.instruction |= inst.operands[1].reg;
10264 inst.instruction |= inst.operands[2].reg << 12;
10265 }
10266
10267 /* Insns like "foo W,X,Y,Z".
10268 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
10269
10270 static void
10271 do_mav_quad (void)
10272 {
10273 inst.instruction |= inst.operands[0].reg << 5;
10274 inst.instruction |= inst.operands[1].reg << 12;
10275 inst.instruction |= inst.operands[2].reg << 16;
10276 inst.instruction |= inst.operands[3].reg;
10277 }
10278
10279 /* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
10280 static void
10281 do_mav_dspsc (void)
10282 {
10283 inst.instruction |= inst.operands[1].reg << 12;
10284 }
10285
10286 /* Maverick shift immediate instructions.
10287 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
10288 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
10289
10290 static void
10291 do_mav_shift (void)
10292 {
10293 int imm = inst.operands[2].imm;
10294
10295 inst.instruction |= inst.operands[0].reg << 12;
10296 inst.instruction |= inst.operands[1].reg << 16;
10297
10298 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
10299 Bits 5-7 of the insn should have bits 4-6 of the immediate.
10300 Bit 4 should be 0. */
10301 imm = (imm & 0xf) | ((imm & 0x70) << 1);
10302
10303 inst.instruction |= imm;
10304 }
10305 \f
10306 /* XScale instructions. Also sorted arithmetic before move. */
10307
10308 /* Xscale multiply-accumulate (argument parse)
10309 MIAcc acc0,Rm,Rs
10310 MIAPHcc acc0,Rm,Rs
10311 MIAxycc acc0,Rm,Rs. */
10312
10313 static void
10314 do_xsc_mia (void)
10315 {
10316 inst.instruction |= inst.operands[1].reg;
10317 inst.instruction |= inst.operands[2].reg << 12;
10318 }
10319
10320 /* Xscale move-accumulator-register (argument parse)
10321
10322 MARcc acc0,RdLo,RdHi. */
10323
10324 static void
10325 do_xsc_mar (void)
10326 {
10327 inst.instruction |= inst.operands[1].reg << 12;
10328 inst.instruction |= inst.operands[2].reg << 16;
10329 }
10330
10331 /* Xscale move-register-accumulator (argument parse)
10332
10333 MRAcc RdLo,RdHi,acc0. */
10334
10335 static void
10336 do_xsc_mra (void)
10337 {
10338 constraint (inst.operands[0].reg == inst.operands[1].reg, BAD_OVERLAP);
10339 inst.instruction |= inst.operands[0].reg << 12;
10340 inst.instruction |= inst.operands[1].reg << 16;
10341 }
10342 \f
10343 /* Encoding functions relevant only to Thumb. */
10344
10345 /* inst.operands[i] is a shifted-register operand; encode
10346 it into inst.instruction in the format used by Thumb32. */
10347
10348 static void
10349 encode_thumb32_shifted_operand (int i)
10350 {
10351 unsigned int value = inst.reloc.exp.X_add_number;
10352 unsigned int shift = inst.operands[i].shift_kind;
10353
10354 constraint (inst.operands[i].immisreg,
10355 _("shift by register not allowed in thumb mode"));
10356 inst.instruction |= inst.operands[i].reg;
10357 if (shift == SHIFT_RRX)
10358 inst.instruction |= SHIFT_ROR << 4;
10359 else
10360 {
10361 constraint (inst.reloc.exp.X_op != O_constant,
10362 _("expression too complex"));
10363
10364 constraint (value > 32
10365 || (value == 32 && (shift == SHIFT_LSL
10366 || shift == SHIFT_ROR)),
10367 _("shift expression is too large"));
10368
10369 if (value == 0)
10370 shift = SHIFT_LSL;
10371 else if (value == 32)
10372 value = 0;
10373
10374 inst.instruction |= shift << 4;
10375 inst.instruction |= (value & 0x1c) << 10;
10376 inst.instruction |= (value & 0x03) << 6;
10377 }
10378 }
10379
10380
10381 /* inst.operands[i] was set up by parse_address. Encode it into a
10382 Thumb32 format load or store instruction. Reject forms that cannot
10383 be used with such instructions. If is_t is true, reject forms that
10384 cannot be used with a T instruction; if is_d is true, reject forms
10385 that cannot be used with a D instruction. If it is a store insn,
10386 reject PC in Rn. */
10387
10388 static void
10389 encode_thumb32_addr_mode (int i, bfd_boolean is_t, bfd_boolean is_d)
10390 {
10391 const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
10392
10393 constraint (!inst.operands[i].isreg,
10394 _("Instruction does not support =N addresses"));
10395
10396 inst.instruction |= inst.operands[i].reg << 16;
10397 if (inst.operands[i].immisreg)
10398 {
10399 constraint (is_pc, BAD_PC_ADDRESSING);
10400 constraint (is_t || is_d, _("cannot use register index with this instruction"));
10401 constraint (inst.operands[i].negative,
10402 _("Thumb does not support negative register indexing"));
10403 constraint (inst.operands[i].postind,
10404 _("Thumb does not support register post-indexing"));
10405 constraint (inst.operands[i].writeback,
10406 _("Thumb does not support register indexing with writeback"));
10407 constraint (inst.operands[i].shifted && inst.operands[i].shift_kind != SHIFT_LSL,
10408 _("Thumb supports only LSL in shifted register indexing"));
10409
10410 inst.instruction |= inst.operands[i].imm;
10411 if (inst.operands[i].shifted)
10412 {
10413 constraint (inst.reloc.exp.X_op != O_constant,
10414 _("expression too complex"));
10415 constraint (inst.reloc.exp.X_add_number < 0
10416 || inst.reloc.exp.X_add_number > 3,
10417 _("shift out of range"));
10418 inst.instruction |= inst.reloc.exp.X_add_number << 4;
10419 }
10420 inst.reloc.type = BFD_RELOC_UNUSED;
10421 }
10422 else if (inst.operands[i].preind)
10423 {
10424 constraint (is_pc && inst.operands[i].writeback, BAD_PC_WRITEBACK);
10425 constraint (is_t && inst.operands[i].writeback,
10426 _("cannot use writeback with this instruction"));
10427 constraint (is_pc && ((inst.instruction & THUMB2_LOAD_BIT) == 0),
10428 BAD_PC_ADDRESSING);
10429
10430 if (is_d)
10431 {
10432 inst.instruction |= 0x01000000;
10433 if (inst.operands[i].writeback)
10434 inst.instruction |= 0x00200000;
10435 }
10436 else
10437 {
10438 inst.instruction |= 0x00000c00;
10439 if (inst.operands[i].writeback)
10440 inst.instruction |= 0x00000100;
10441 }
10442 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
10443 }
10444 else if (inst.operands[i].postind)
10445 {
10446 gas_assert (inst.operands[i].writeback);
10447 constraint (is_pc, _("cannot use post-indexing with PC-relative addressing"));
10448 constraint (is_t, _("cannot use post-indexing with this instruction"));
10449
10450 if (is_d)
10451 inst.instruction |= 0x00200000;
10452 else
10453 inst.instruction |= 0x00000900;
10454 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
10455 }
10456 else /* unindexed - only for coprocessor */
10457 inst.error = _("instruction does not accept unindexed addressing");
10458 }
10459
10460 /* Table of Thumb instructions which exist in both 16- and 32-bit
10461 encodings (the latter only in post-V6T2 cores). The index is the
10462 value used in the insns table below. When there is more than one
10463 possible 16-bit encoding for the instruction, this table always
10464 holds variant (1).
10465 Also contains several pseudo-instructions used during relaxation. */
10466 #define T16_32_TAB \
10467 X(_adc, 4140, eb400000), \
10468 X(_adcs, 4140, eb500000), \
10469 X(_add, 1c00, eb000000), \
10470 X(_adds, 1c00, eb100000), \
10471 X(_addi, 0000, f1000000), \
10472 X(_addis, 0000, f1100000), \
10473 X(_add_pc,000f, f20f0000), \
10474 X(_add_sp,000d, f10d0000), \
10475 X(_adr, 000f, f20f0000), \
10476 X(_and, 4000, ea000000), \
10477 X(_ands, 4000, ea100000), \
10478 X(_asr, 1000, fa40f000), \
10479 X(_asrs, 1000, fa50f000), \
10480 X(_b, e000, f000b000), \
10481 X(_bcond, d000, f0008000), \
10482 X(_bic, 4380, ea200000), \
10483 X(_bics, 4380, ea300000), \
10484 X(_cmn, 42c0, eb100f00), \
10485 X(_cmp, 2800, ebb00f00), \
10486 X(_cpsie, b660, f3af8400), \
10487 X(_cpsid, b670, f3af8600), \
10488 X(_cpy, 4600, ea4f0000), \
10489 X(_dec_sp,80dd, f1ad0d00), \
10490 X(_eor, 4040, ea800000), \
10491 X(_eors, 4040, ea900000), \
10492 X(_inc_sp,00dd, f10d0d00), \
10493 X(_ldmia, c800, e8900000), \
10494 X(_ldr, 6800, f8500000), \
10495 X(_ldrb, 7800, f8100000), \
10496 X(_ldrh, 8800, f8300000), \
10497 X(_ldrsb, 5600, f9100000), \
10498 X(_ldrsh, 5e00, f9300000), \
10499 X(_ldr_pc,4800, f85f0000), \
10500 X(_ldr_pc2,4800, f85f0000), \
10501 X(_ldr_sp,9800, f85d0000), \
10502 X(_lsl, 0000, fa00f000), \
10503 X(_lsls, 0000, fa10f000), \
10504 X(_lsr, 0800, fa20f000), \
10505 X(_lsrs, 0800, fa30f000), \
10506 X(_mov, 2000, ea4f0000), \
10507 X(_movs, 2000, ea5f0000), \
10508 X(_mul, 4340, fb00f000), \
10509 X(_muls, 4340, ffffffff), /* no 32b muls */ \
10510 X(_mvn, 43c0, ea6f0000), \
10511 X(_mvns, 43c0, ea7f0000), \
10512 X(_neg, 4240, f1c00000), /* rsb #0 */ \
10513 X(_negs, 4240, f1d00000), /* rsbs #0 */ \
10514 X(_orr, 4300, ea400000), \
10515 X(_orrs, 4300, ea500000), \
10516 X(_pop, bc00, e8bd0000), /* ldmia sp!,... */ \
10517 X(_push, b400, e92d0000), /* stmdb sp!,... */ \
10518 X(_rev, ba00, fa90f080), \
10519 X(_rev16, ba40, fa90f090), \
10520 X(_revsh, bac0, fa90f0b0), \
10521 X(_ror, 41c0, fa60f000), \
10522 X(_rors, 41c0, fa70f000), \
10523 X(_sbc, 4180, eb600000), \
10524 X(_sbcs, 4180, eb700000), \
10525 X(_stmia, c000, e8800000), \
10526 X(_str, 6000, f8400000), \
10527 X(_strb, 7000, f8000000), \
10528 X(_strh, 8000, f8200000), \
10529 X(_str_sp,9000, f84d0000), \
10530 X(_sub, 1e00, eba00000), \
10531 X(_subs, 1e00, ebb00000), \
10532 X(_subi, 8000, f1a00000), \
10533 X(_subis, 8000, f1b00000), \
10534 X(_sxtb, b240, fa4ff080), \
10535 X(_sxth, b200, fa0ff080), \
10536 X(_tst, 4200, ea100f00), \
10537 X(_uxtb, b2c0, fa5ff080), \
10538 X(_uxth, b280, fa1ff080), \
10539 X(_nop, bf00, f3af8000), \
10540 X(_yield, bf10, f3af8001), \
10541 X(_wfe, bf20, f3af8002), \
10542 X(_wfi, bf30, f3af8003), \
10543 X(_sev, bf40, f3af8004), \
10544 X(_sevl, bf50, f3af8005), \
10545 X(_udf, de00, f7f0a000)
10546
10547 /* To catch errors in encoding functions, the codes are all offset by
10548 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
10549 as 16-bit instructions. */
10550 #define X(a,b,c) T_MNEM##a
10551 enum t16_32_codes { T16_32_OFFSET = 0xF7FF, T16_32_TAB };
10552 #undef X
10553
10554 #define X(a,b,c) 0x##b
10555 static const unsigned short thumb_op16[] = { T16_32_TAB };
10556 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
10557 #undef X
10558
10559 #define X(a,b,c) 0x##c
10560 static const unsigned int thumb_op32[] = { T16_32_TAB };
10561 #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
10562 #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
10563 #undef X
10564 #undef T16_32_TAB
10565
10566 /* Thumb instruction encoders, in alphabetical order. */
10567
10568 /* ADDW or SUBW. */
10569
10570 static void
10571 do_t_add_sub_w (void)
10572 {
10573 int Rd, Rn;
10574
10575 Rd = inst.operands[0].reg;
10576 Rn = inst.operands[1].reg;
10577
10578 /* If Rn is REG_PC, this is ADR; if Rn is REG_SP, then this
10579 is the SP-{plus,minus}-immediate form of the instruction. */
10580 if (Rn == REG_SP)
10581 constraint (Rd == REG_PC, BAD_PC);
10582 else
10583 reject_bad_reg (Rd);
10584
10585 inst.instruction |= (Rn << 16) | (Rd << 8);
10586 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
10587 }
10588
10589 /* Parse an add or subtract instruction. We get here with inst.instruction
10590 equaling any of THUMB_OPCODE_add, adds, sub, or subs. */
10591
10592 static void
10593 do_t_add_sub (void)
10594 {
10595 int Rd, Rs, Rn;
10596
10597 Rd = inst.operands[0].reg;
10598 Rs = (inst.operands[1].present
10599 ? inst.operands[1].reg /* Rd, Rs, foo */
10600 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
10601
10602 if (Rd == REG_PC)
10603 set_it_insn_type_last ();
10604
10605 if (unified_syntax)
10606 {
10607 bfd_boolean flags;
10608 bfd_boolean narrow;
10609 int opcode;
10610
10611 flags = (inst.instruction == T_MNEM_adds
10612 || inst.instruction == T_MNEM_subs);
10613 if (flags)
10614 narrow = !in_it_block ();
10615 else
10616 narrow = in_it_block ();
10617 if (!inst.operands[2].isreg)
10618 {
10619 int add;
10620
10621 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
10622 constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
10623
10624 add = (inst.instruction == T_MNEM_add
10625 || inst.instruction == T_MNEM_adds);
10626 opcode = 0;
10627 if (inst.size_req != 4)
10628 {
10629 /* Attempt to use a narrow opcode, with relaxation if
10630 appropriate. */
10631 if (Rd == REG_SP && Rs == REG_SP && !flags)
10632 opcode = add ? T_MNEM_inc_sp : T_MNEM_dec_sp;
10633 else if (Rd <= 7 && Rs == REG_SP && add && !flags)
10634 opcode = T_MNEM_add_sp;
10635 else if (Rd <= 7 && Rs == REG_PC && add && !flags)
10636 opcode = T_MNEM_add_pc;
10637 else if (Rd <= 7 && Rs <= 7 && narrow)
10638 {
10639 if (flags)
10640 opcode = add ? T_MNEM_addis : T_MNEM_subis;
10641 else
10642 opcode = add ? T_MNEM_addi : T_MNEM_subi;
10643 }
10644 if (opcode)
10645 {
10646 inst.instruction = THUMB_OP16(opcode);
10647 inst.instruction |= (Rd << 4) | Rs;
10648 if (inst.reloc.type < BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
10649 || inst.reloc.type > BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC)
10650 {
10651 if (inst.size_req == 2)
10652 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
10653 else
10654 inst.relax = opcode;
10655 }
10656 }
10657 else
10658 constraint (inst.size_req == 2, BAD_HIREG);
10659 }
10660 if (inst.size_req == 4
10661 || (inst.size_req != 2 && !opcode))
10662 {
10663 constraint (inst.reloc.type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
10664 && inst.reloc.type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC ,
10665 THUMB1_RELOC_ONLY);
10666 if (Rd == REG_PC)
10667 {
10668 constraint (add, BAD_PC);
10669 constraint (Rs != REG_LR || inst.instruction != T_MNEM_subs,
10670 _("only SUBS PC, LR, #const allowed"));
10671 constraint (inst.reloc.exp.X_op != O_constant,
10672 _("expression too complex"));
10673 constraint (inst.reloc.exp.X_add_number < 0
10674 || inst.reloc.exp.X_add_number > 0xff,
10675 _("immediate value out of range"));
10676 inst.instruction = T2_SUBS_PC_LR
10677 | inst.reloc.exp.X_add_number;
10678 inst.reloc.type = BFD_RELOC_UNUSED;
10679 return;
10680 }
10681 else if (Rs == REG_PC)
10682 {
10683 /* Always use addw/subw. */
10684 inst.instruction = add ? 0xf20f0000 : 0xf2af0000;
10685 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
10686 }
10687 else
10688 {
10689 inst.instruction = THUMB_OP32 (inst.instruction);
10690 inst.instruction = (inst.instruction & 0xe1ffffff)
10691 | 0x10000000;
10692 if (flags)
10693 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10694 else
10695 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_IMM;
10696 }
10697 inst.instruction |= Rd << 8;
10698 inst.instruction |= Rs << 16;
10699 }
10700 }
10701 else
10702 {
10703 unsigned int value = inst.reloc.exp.X_add_number;
10704 unsigned int shift = inst.operands[2].shift_kind;
10705
10706 Rn = inst.operands[2].reg;
10707 /* See if we can do this with a 16-bit instruction. */
10708 if (!inst.operands[2].shifted && inst.size_req != 4)
10709 {
10710 if (Rd > 7 || Rs > 7 || Rn > 7)
10711 narrow = FALSE;
10712
10713 if (narrow)
10714 {
10715 inst.instruction = ((inst.instruction == T_MNEM_adds
10716 || inst.instruction == T_MNEM_add)
10717 ? T_OPCODE_ADD_R3
10718 : T_OPCODE_SUB_R3);
10719 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
10720 return;
10721 }
10722
10723 if (inst.instruction == T_MNEM_add && (Rd == Rs || Rd == Rn))
10724 {
10725 /* Thumb-1 cores (except v6-M) require at least one high
10726 register in a narrow non flag setting add. */
10727 if (Rd > 7 || Rn > 7
10728 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2)
10729 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_msr))
10730 {
10731 if (Rd == Rn)
10732 {
10733 Rn = Rs;
10734 Rs = Rd;
10735 }
10736 inst.instruction = T_OPCODE_ADD_HI;
10737 inst.instruction |= (Rd & 8) << 4;
10738 inst.instruction |= (Rd & 7);
10739 inst.instruction |= Rn << 3;
10740 return;
10741 }
10742 }
10743 }
10744
10745 constraint (Rd == REG_PC, BAD_PC);
10746 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
10747 constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
10748 constraint (Rs == REG_PC, BAD_PC);
10749 reject_bad_reg (Rn);
10750
10751 /* If we get here, it can't be done in 16 bits. */
10752 constraint (inst.operands[2].shifted && inst.operands[2].immisreg,
10753 _("shift must be constant"));
10754 inst.instruction = THUMB_OP32 (inst.instruction);
10755 inst.instruction |= Rd << 8;
10756 inst.instruction |= Rs << 16;
10757 constraint (Rd == REG_SP && Rs == REG_SP && value > 3,
10758 _("shift value over 3 not allowed in thumb mode"));
10759 constraint (Rd == REG_SP && Rs == REG_SP && shift != SHIFT_LSL,
10760 _("only LSL shift allowed in thumb mode"));
10761 encode_thumb32_shifted_operand (2);
10762 }
10763 }
10764 else
10765 {
10766 constraint (inst.instruction == T_MNEM_adds
10767 || inst.instruction == T_MNEM_subs,
10768 BAD_THUMB32);
10769
10770 if (!inst.operands[2].isreg) /* Rd, Rs, #imm */
10771 {
10772 constraint ((Rd > 7 && (Rd != REG_SP || Rs != REG_SP))
10773 || (Rs > 7 && Rs != REG_SP && Rs != REG_PC),
10774 BAD_HIREG);
10775
10776 inst.instruction = (inst.instruction == T_MNEM_add
10777 ? 0x0000 : 0x8000);
10778 inst.instruction |= (Rd << 4) | Rs;
10779 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
10780 return;
10781 }
10782
10783 Rn = inst.operands[2].reg;
10784 constraint (inst.operands[2].shifted, _("unshifted register required"));
10785
10786 /* We now have Rd, Rs, and Rn set to registers. */
10787 if (Rd > 7 || Rs > 7 || Rn > 7)
10788 {
10789 /* Can't do this for SUB. */
10790 constraint (inst.instruction == T_MNEM_sub, BAD_HIREG);
10791 inst.instruction = T_OPCODE_ADD_HI;
10792 inst.instruction |= (Rd & 8) << 4;
10793 inst.instruction |= (Rd & 7);
10794 if (Rs == Rd)
10795 inst.instruction |= Rn << 3;
10796 else if (Rn == Rd)
10797 inst.instruction |= Rs << 3;
10798 else
10799 constraint (1, _("dest must overlap one source register"));
10800 }
10801 else
10802 {
10803 inst.instruction = (inst.instruction == T_MNEM_add
10804 ? T_OPCODE_ADD_R3 : T_OPCODE_SUB_R3);
10805 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
10806 }
10807 }
10808 }
10809
10810 static void
10811 do_t_adr (void)
10812 {
10813 unsigned Rd;
10814
10815 Rd = inst.operands[0].reg;
10816 reject_bad_reg (Rd);
10817
10818 if (unified_syntax && inst.size_req == 0 && Rd <= 7)
10819 {
10820 /* Defer to section relaxation. */
10821 inst.relax = inst.instruction;
10822 inst.instruction = THUMB_OP16 (inst.instruction);
10823 inst.instruction |= Rd << 4;
10824 }
10825 else if (unified_syntax && inst.size_req != 2)
10826 {
10827 /* Generate a 32-bit opcode. */
10828 inst.instruction = THUMB_OP32 (inst.instruction);
10829 inst.instruction |= Rd << 8;
10830 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_PC12;
10831 inst.reloc.pc_rel = 1;
10832 }
10833 else
10834 {
10835 /* Generate a 16-bit opcode. */
10836 inst.instruction = THUMB_OP16 (inst.instruction);
10837 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
10838 inst.reloc.exp.X_add_number -= 4; /* PC relative adjust. */
10839 inst.reloc.pc_rel = 1;
10840 inst.instruction |= Rd << 4;
10841 }
10842
10843 if (inst.reloc.exp.X_op == O_symbol
10844 && inst.reloc.exp.X_add_symbol != NULL
10845 && S_IS_DEFINED (inst.reloc.exp.X_add_symbol)
10846 && THUMB_IS_FUNC (inst.reloc.exp.X_add_symbol))
10847 inst.reloc.exp.X_add_number += 1;
10848 }
10849
10850 /* Arithmetic instructions for which there is just one 16-bit
10851 instruction encoding, and it allows only two low registers.
10852 For maximal compatibility with ARM syntax, we allow three register
10853 operands even when Thumb-32 instructions are not available, as long
10854 as the first two are identical. For instance, both "sbc r0,r1" and
10855 "sbc r0,r0,r1" are allowed. */
10856 static void
10857 do_t_arit3 (void)
10858 {
10859 int Rd, Rs, Rn;
10860
10861 Rd = inst.operands[0].reg;
10862 Rs = (inst.operands[1].present
10863 ? inst.operands[1].reg /* Rd, Rs, foo */
10864 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
10865 Rn = inst.operands[2].reg;
10866
10867 reject_bad_reg (Rd);
10868 reject_bad_reg (Rs);
10869 if (inst.operands[2].isreg)
10870 reject_bad_reg (Rn);
10871
10872 if (unified_syntax)
10873 {
10874 if (!inst.operands[2].isreg)
10875 {
10876 /* For an immediate, we always generate a 32-bit opcode;
10877 section relaxation will shrink it later if possible. */
10878 inst.instruction = THUMB_OP32 (inst.instruction);
10879 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
10880 inst.instruction |= Rd << 8;
10881 inst.instruction |= Rs << 16;
10882 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10883 }
10884 else
10885 {
10886 bfd_boolean narrow;
10887
10888 /* See if we can do this with a 16-bit instruction. */
10889 if (THUMB_SETS_FLAGS (inst.instruction))
10890 narrow = !in_it_block ();
10891 else
10892 narrow = in_it_block ();
10893
10894 if (Rd > 7 || Rn > 7 || Rs > 7)
10895 narrow = FALSE;
10896 if (inst.operands[2].shifted)
10897 narrow = FALSE;
10898 if (inst.size_req == 4)
10899 narrow = FALSE;
10900
10901 if (narrow
10902 && Rd == Rs)
10903 {
10904 inst.instruction = THUMB_OP16 (inst.instruction);
10905 inst.instruction |= Rd;
10906 inst.instruction |= Rn << 3;
10907 return;
10908 }
10909
10910 /* If we get here, it can't be done in 16 bits. */
10911 constraint (inst.operands[2].shifted
10912 && inst.operands[2].immisreg,
10913 _("shift must be constant"));
10914 inst.instruction = THUMB_OP32 (inst.instruction);
10915 inst.instruction |= Rd << 8;
10916 inst.instruction |= Rs << 16;
10917 encode_thumb32_shifted_operand (2);
10918 }
10919 }
10920 else
10921 {
10922 /* On its face this is a lie - the instruction does set the
10923 flags. However, the only supported mnemonic in this mode
10924 says it doesn't. */
10925 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
10926
10927 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
10928 _("unshifted register required"));
10929 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
10930 constraint (Rd != Rs,
10931 _("dest and source1 must be the same register"));
10932
10933 inst.instruction = THUMB_OP16 (inst.instruction);
10934 inst.instruction |= Rd;
10935 inst.instruction |= Rn << 3;
10936 }
10937 }
10938
10939 /* Similarly, but for instructions where the arithmetic operation is
10940 commutative, so we can allow either of them to be different from
10941 the destination operand in a 16-bit instruction. For instance, all
10942 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
10943 accepted. */
10944 static void
10945 do_t_arit3c (void)
10946 {
10947 int Rd, Rs, Rn;
10948
10949 Rd = inst.operands[0].reg;
10950 Rs = (inst.operands[1].present
10951 ? inst.operands[1].reg /* Rd, Rs, foo */
10952 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
10953 Rn = inst.operands[2].reg;
10954
10955 reject_bad_reg (Rd);
10956 reject_bad_reg (Rs);
10957 if (inst.operands[2].isreg)
10958 reject_bad_reg (Rn);
10959
10960 if (unified_syntax)
10961 {
10962 if (!inst.operands[2].isreg)
10963 {
10964 /* For an immediate, we always generate a 32-bit opcode;
10965 section relaxation will shrink it later if possible. */
10966 inst.instruction = THUMB_OP32 (inst.instruction);
10967 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
10968 inst.instruction |= Rd << 8;
10969 inst.instruction |= Rs << 16;
10970 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10971 }
10972 else
10973 {
10974 bfd_boolean narrow;
10975
10976 /* See if we can do this with a 16-bit instruction. */
10977 if (THUMB_SETS_FLAGS (inst.instruction))
10978 narrow = !in_it_block ();
10979 else
10980 narrow = in_it_block ();
10981
10982 if (Rd > 7 || Rn > 7 || Rs > 7)
10983 narrow = FALSE;
10984 if (inst.operands[2].shifted)
10985 narrow = FALSE;
10986 if (inst.size_req == 4)
10987 narrow = FALSE;
10988
10989 if (narrow)
10990 {
10991 if (Rd == Rs)
10992 {
10993 inst.instruction = THUMB_OP16 (inst.instruction);
10994 inst.instruction |= Rd;
10995 inst.instruction |= Rn << 3;
10996 return;
10997 }
10998 if (Rd == Rn)
10999 {
11000 inst.instruction = THUMB_OP16 (inst.instruction);
11001 inst.instruction |= Rd;
11002 inst.instruction |= Rs << 3;
11003 return;
11004 }
11005 }
11006
11007 /* If we get here, it can't be done in 16 bits. */
11008 constraint (inst.operands[2].shifted
11009 && inst.operands[2].immisreg,
11010 _("shift must be constant"));
11011 inst.instruction = THUMB_OP32 (inst.instruction);
11012 inst.instruction |= Rd << 8;
11013 inst.instruction |= Rs << 16;
11014 encode_thumb32_shifted_operand (2);
11015 }
11016 }
11017 else
11018 {
11019 /* On its face this is a lie - the instruction does set the
11020 flags. However, the only supported mnemonic in this mode
11021 says it doesn't. */
11022 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
11023
11024 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
11025 _("unshifted register required"));
11026 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
11027
11028 inst.instruction = THUMB_OP16 (inst.instruction);
11029 inst.instruction |= Rd;
11030
11031 if (Rd == Rs)
11032 inst.instruction |= Rn << 3;
11033 else if (Rd == Rn)
11034 inst.instruction |= Rs << 3;
11035 else
11036 constraint (1, _("dest must overlap one source register"));
11037 }
11038 }
11039
11040 static void
11041 do_t_bfc (void)
11042 {
11043 unsigned Rd;
11044 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
11045 constraint (msb > 32, _("bit-field extends past end of register"));
11046 /* The instruction encoding stores the LSB and MSB,
11047 not the LSB and width. */
11048 Rd = inst.operands[0].reg;
11049 reject_bad_reg (Rd);
11050 inst.instruction |= Rd << 8;
11051 inst.instruction |= (inst.operands[1].imm & 0x1c) << 10;
11052 inst.instruction |= (inst.operands[1].imm & 0x03) << 6;
11053 inst.instruction |= msb - 1;
11054 }
11055
11056 static void
11057 do_t_bfi (void)
11058 {
11059 int Rd, Rn;
11060 unsigned int msb;
11061
11062 Rd = inst.operands[0].reg;
11063 reject_bad_reg (Rd);
11064
11065 /* #0 in second position is alternative syntax for bfc, which is
11066 the same instruction but with REG_PC in the Rm field. */
11067 if (!inst.operands[1].isreg)
11068 Rn = REG_PC;
11069 else
11070 {
11071 Rn = inst.operands[1].reg;
11072 reject_bad_reg (Rn);
11073 }
11074
11075 msb = inst.operands[2].imm + inst.operands[3].imm;
11076 constraint (msb > 32, _("bit-field extends past end of register"));
11077 /* The instruction encoding stores the LSB and MSB,
11078 not the LSB and width. */
11079 inst.instruction |= Rd << 8;
11080 inst.instruction |= Rn << 16;
11081 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
11082 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
11083 inst.instruction |= msb - 1;
11084 }
11085
11086 static void
11087 do_t_bfx (void)
11088 {
11089 unsigned Rd, Rn;
11090
11091 Rd = inst.operands[0].reg;
11092 Rn = inst.operands[1].reg;
11093
11094 reject_bad_reg (Rd);
11095 reject_bad_reg (Rn);
11096
11097 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
11098 _("bit-field extends past end of register"));
11099 inst.instruction |= Rd << 8;
11100 inst.instruction |= Rn << 16;
11101 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
11102 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
11103 inst.instruction |= inst.operands[3].imm - 1;
11104 }
11105
11106 /* ARM V5 Thumb BLX (argument parse)
11107 BLX <target_addr> which is BLX(1)
11108 BLX <Rm> which is BLX(2)
11109 Unfortunately, there are two different opcodes for this mnemonic.
11110 So, the insns[].value is not used, and the code here zaps values
11111 into inst.instruction.
11112
11113 ??? How to take advantage of the additional two bits of displacement
11114 available in Thumb32 mode? Need new relocation? */
11115
11116 static void
11117 do_t_blx (void)
11118 {
11119 set_it_insn_type_last ();
11120
11121 if (inst.operands[0].isreg)
11122 {
11123 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
11124 /* We have a register, so this is BLX(2). */
11125 inst.instruction |= inst.operands[0].reg << 3;
11126 }
11127 else
11128 {
11129 /* No register. This must be BLX(1). */
11130 inst.instruction = 0xf000e800;
11131 encode_branch (BFD_RELOC_THUMB_PCREL_BLX);
11132 }
11133 }
11134
11135 static void
11136 do_t_branch (void)
11137 {
11138 int opcode;
11139 int cond;
11140 bfd_reloc_code_real_type reloc;
11141
11142 cond = inst.cond;
11143 set_it_insn_type (IF_INSIDE_IT_LAST_INSN);
11144
11145 if (in_it_block ())
11146 {
11147 /* Conditional branches inside IT blocks are encoded as unconditional
11148 branches. */
11149 cond = COND_ALWAYS;
11150 }
11151 else
11152 cond = inst.cond;
11153
11154 if (cond != COND_ALWAYS)
11155 opcode = T_MNEM_bcond;
11156 else
11157 opcode = inst.instruction;
11158
11159 if (unified_syntax
11160 && (inst.size_req == 4
11161 || (inst.size_req != 2
11162 && (inst.operands[0].hasreloc
11163 || inst.reloc.exp.X_op == O_constant))))
11164 {
11165 inst.instruction = THUMB_OP32(opcode);
11166 if (cond == COND_ALWAYS)
11167 reloc = BFD_RELOC_THUMB_PCREL_BRANCH25;
11168 else
11169 {
11170 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2),
11171 _("selected architecture does not support "
11172 "wide conditional branch instruction"));
11173
11174 gas_assert (cond != 0xF);
11175 inst.instruction |= cond << 22;
11176 reloc = BFD_RELOC_THUMB_PCREL_BRANCH20;
11177 }
11178 }
11179 else
11180 {
11181 inst.instruction = THUMB_OP16(opcode);
11182 if (cond == COND_ALWAYS)
11183 reloc = BFD_RELOC_THUMB_PCREL_BRANCH12;
11184 else
11185 {
11186 inst.instruction |= cond << 8;
11187 reloc = BFD_RELOC_THUMB_PCREL_BRANCH9;
11188 }
11189 /* Allow section relaxation. */
11190 if (unified_syntax && inst.size_req != 2)
11191 inst.relax = opcode;
11192 }
11193 inst.reloc.type = reloc;
11194 inst.reloc.pc_rel = 1;
11195 }
11196
11197 /* Actually do the work for Thumb state bkpt and hlt. The only difference
11198 between the two is the maximum immediate allowed - which is passed in
11199 RANGE. */
11200 static void
11201 do_t_bkpt_hlt1 (int range)
11202 {
11203 constraint (inst.cond != COND_ALWAYS,
11204 _("instruction is always unconditional"));
11205 if (inst.operands[0].present)
11206 {
11207 constraint (inst.operands[0].imm > range,
11208 _("immediate value out of range"));
11209 inst.instruction |= inst.operands[0].imm;
11210 }
11211
11212 set_it_insn_type (NEUTRAL_IT_INSN);
11213 }
11214
11215 static void
11216 do_t_hlt (void)
11217 {
11218 do_t_bkpt_hlt1 (63);
11219 }
11220
11221 static void
11222 do_t_bkpt (void)
11223 {
11224 do_t_bkpt_hlt1 (255);
11225 }
11226
11227 static void
11228 do_t_branch23 (void)
11229 {
11230 set_it_insn_type_last ();
11231 encode_branch (BFD_RELOC_THUMB_PCREL_BRANCH23);
11232
11233 /* md_apply_fix blows up with 'bl foo(PLT)' where foo is defined in
11234 this file. We used to simply ignore the PLT reloc type here --
11235 the branch encoding is now needed to deal with TLSCALL relocs.
11236 So if we see a PLT reloc now, put it back to how it used to be to
11237 keep the preexisting behaviour. */
11238 if (inst.reloc.type == BFD_RELOC_ARM_PLT32)
11239 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH23;
11240
11241 #if defined(OBJ_COFF)
11242 /* If the destination of the branch is a defined symbol which does not have
11243 the THUMB_FUNC attribute, then we must be calling a function which has
11244 the (interfacearm) attribute. We look for the Thumb entry point to that
11245 function and change the branch to refer to that function instead. */
11246 if ( inst.reloc.exp.X_op == O_symbol
11247 && inst.reloc.exp.X_add_symbol != NULL
11248 && S_IS_DEFINED (inst.reloc.exp.X_add_symbol)
11249 && ! THUMB_IS_FUNC (inst.reloc.exp.X_add_symbol))
11250 inst.reloc.exp.X_add_symbol =
11251 find_real_start (inst.reloc.exp.X_add_symbol);
11252 #endif
11253 }
11254
11255 static void
11256 do_t_bx (void)
11257 {
11258 set_it_insn_type_last ();
11259 inst.instruction |= inst.operands[0].reg << 3;
11260 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
11261 should cause the alignment to be checked once it is known. This is
11262 because BX PC only works if the instruction is word aligned. */
11263 }
11264
11265 static void
11266 do_t_bxj (void)
11267 {
11268 int Rm;
11269
11270 set_it_insn_type_last ();
11271 Rm = inst.operands[0].reg;
11272 reject_bad_reg (Rm);
11273 inst.instruction |= Rm << 16;
11274 }
11275
11276 static void
11277 do_t_clz (void)
11278 {
11279 unsigned Rd;
11280 unsigned Rm;
11281
11282 Rd = inst.operands[0].reg;
11283 Rm = inst.operands[1].reg;
11284
11285 reject_bad_reg (Rd);
11286 reject_bad_reg (Rm);
11287
11288 inst.instruction |= Rd << 8;
11289 inst.instruction |= Rm << 16;
11290 inst.instruction |= Rm;
11291 }
11292
11293 static void
11294 do_t_csdb (void)
11295 {
11296 set_it_insn_type (OUTSIDE_IT_INSN);
11297 }
11298
11299 static void
11300 do_t_cps (void)
11301 {
11302 set_it_insn_type (OUTSIDE_IT_INSN);
11303 inst.instruction |= inst.operands[0].imm;
11304 }
11305
11306 static void
11307 do_t_cpsi (void)
11308 {
11309 set_it_insn_type (OUTSIDE_IT_INSN);
11310 if (unified_syntax
11311 && (inst.operands[1].present || inst.size_req == 4)
11312 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6_notm))
11313 {
11314 unsigned int imod = (inst.instruction & 0x0030) >> 4;
11315 inst.instruction = 0xf3af8000;
11316 inst.instruction |= imod << 9;
11317 inst.instruction |= inst.operands[0].imm << 5;
11318 if (inst.operands[1].present)
11319 inst.instruction |= 0x100 | inst.operands[1].imm;
11320 }
11321 else
11322 {
11323 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1)
11324 && (inst.operands[0].imm & 4),
11325 _("selected processor does not support 'A' form "
11326 "of this instruction"));
11327 constraint (inst.operands[1].present || inst.size_req == 4,
11328 _("Thumb does not support the 2-argument "
11329 "form of this instruction"));
11330 inst.instruction |= inst.operands[0].imm;
11331 }
11332 }
11333
11334 /* THUMB CPY instruction (argument parse). */
11335
11336 static void
11337 do_t_cpy (void)
11338 {
11339 if (inst.size_req == 4)
11340 {
11341 inst.instruction = THUMB_OP32 (T_MNEM_mov);
11342 inst.instruction |= inst.operands[0].reg << 8;
11343 inst.instruction |= inst.operands[1].reg;
11344 }
11345 else
11346 {
11347 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
11348 inst.instruction |= (inst.operands[0].reg & 0x7);
11349 inst.instruction |= inst.operands[1].reg << 3;
11350 }
11351 }
11352
11353 static void
11354 do_t_cbz (void)
11355 {
11356 set_it_insn_type (OUTSIDE_IT_INSN);
11357 constraint (inst.operands[0].reg > 7, BAD_HIREG);
11358 inst.instruction |= inst.operands[0].reg;
11359 inst.reloc.pc_rel = 1;
11360 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH7;
11361 }
11362
11363 static void
11364 do_t_dbg (void)
11365 {
11366 inst.instruction |= inst.operands[0].imm;
11367 }
11368
11369 static void
11370 do_t_div (void)
11371 {
11372 unsigned Rd, Rn, Rm;
11373
11374 Rd = inst.operands[0].reg;
11375 Rn = (inst.operands[1].present
11376 ? inst.operands[1].reg : Rd);
11377 Rm = inst.operands[2].reg;
11378
11379 reject_bad_reg (Rd);
11380 reject_bad_reg (Rn);
11381 reject_bad_reg (Rm);
11382
11383 inst.instruction |= Rd << 8;
11384 inst.instruction |= Rn << 16;
11385 inst.instruction |= Rm;
11386 }
11387
11388 static void
11389 do_t_hint (void)
11390 {
11391 if (unified_syntax && inst.size_req == 4)
11392 inst.instruction = THUMB_OP32 (inst.instruction);
11393 else
11394 inst.instruction = THUMB_OP16 (inst.instruction);
11395 }
11396
11397 static void
11398 do_t_it (void)
11399 {
11400 unsigned int cond = inst.operands[0].imm;
11401
11402 set_it_insn_type (IT_INSN);
11403 now_it.mask = (inst.instruction & 0xf) | 0x10;
11404 now_it.cc = cond;
11405 now_it.warn_deprecated = FALSE;
11406
11407 /* If the condition is a negative condition, invert the mask. */
11408 if ((cond & 0x1) == 0x0)
11409 {
11410 unsigned int mask = inst.instruction & 0x000f;
11411
11412 if ((mask & 0x7) == 0)
11413 {
11414 /* No conversion needed. */
11415 now_it.block_length = 1;
11416 }
11417 else if ((mask & 0x3) == 0)
11418 {
11419 mask ^= 0x8;
11420 now_it.block_length = 2;
11421 }
11422 else if ((mask & 0x1) == 0)
11423 {
11424 mask ^= 0xC;
11425 now_it.block_length = 3;
11426 }
11427 else
11428 {
11429 mask ^= 0xE;
11430 now_it.block_length = 4;
11431 }
11432
11433 inst.instruction &= 0xfff0;
11434 inst.instruction |= mask;
11435 }
11436
11437 inst.instruction |= cond << 4;
11438 }
11439
11440 /* Helper function used for both push/pop and ldm/stm. */
11441 static void
11442 encode_thumb2_ldmstm (int base, unsigned mask, bfd_boolean writeback)
11443 {
11444 bfd_boolean load;
11445
11446 load = (inst.instruction & (1 << 20)) != 0;
11447
11448 if (mask & (1 << 13))
11449 inst.error = _("SP not allowed in register list");
11450
11451 if ((mask & (1 << base)) != 0
11452 && writeback)
11453 inst.error = _("having the base register in the register list when "
11454 "using write back is UNPREDICTABLE");
11455
11456 if (load)
11457 {
11458 if (mask & (1 << 15))
11459 {
11460 if (mask & (1 << 14))
11461 inst.error = _("LR and PC should not both be in register list");
11462 else
11463 set_it_insn_type_last ();
11464 }
11465 }
11466 else
11467 {
11468 if (mask & (1 << 15))
11469 inst.error = _("PC not allowed in register list");
11470 }
11471
11472 if ((mask & (mask - 1)) == 0)
11473 {
11474 /* Single register transfers implemented as str/ldr. */
11475 if (writeback)
11476 {
11477 if (inst.instruction & (1 << 23))
11478 inst.instruction = 0x00000b04; /* ia! -> [base], #4 */
11479 else
11480 inst.instruction = 0x00000d04; /* db! -> [base, #-4]! */
11481 }
11482 else
11483 {
11484 if (inst.instruction & (1 << 23))
11485 inst.instruction = 0x00800000; /* ia -> [base] */
11486 else
11487 inst.instruction = 0x00000c04; /* db -> [base, #-4] */
11488 }
11489
11490 inst.instruction |= 0xf8400000;
11491 if (load)
11492 inst.instruction |= 0x00100000;
11493
11494 mask = ffs (mask) - 1;
11495 mask <<= 12;
11496 }
11497 else if (writeback)
11498 inst.instruction |= WRITE_BACK;
11499
11500 inst.instruction |= mask;
11501 inst.instruction |= base << 16;
11502 }
11503
11504 static void
11505 do_t_ldmstm (void)
11506 {
11507 /* This really doesn't seem worth it. */
11508 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
11509 _("expression too complex"));
11510 constraint (inst.operands[1].writeback,
11511 _("Thumb load/store multiple does not support {reglist}^"));
11512
11513 if (unified_syntax)
11514 {
11515 bfd_boolean narrow;
11516 unsigned mask;
11517
11518 narrow = FALSE;
11519 /* See if we can use a 16-bit instruction. */
11520 if (inst.instruction < 0xffff /* not ldmdb/stmdb */
11521 && inst.size_req != 4
11522 && !(inst.operands[1].imm & ~0xff))
11523 {
11524 mask = 1 << inst.operands[0].reg;
11525
11526 if (inst.operands[0].reg <= 7)
11527 {
11528 if (inst.instruction == T_MNEM_stmia
11529 ? inst.operands[0].writeback
11530 : (inst.operands[0].writeback
11531 == !(inst.operands[1].imm & mask)))
11532 {
11533 if (inst.instruction == T_MNEM_stmia
11534 && (inst.operands[1].imm & mask)
11535 && (inst.operands[1].imm & (mask - 1)))
11536 as_warn (_("value stored for r%d is UNKNOWN"),
11537 inst.operands[0].reg);
11538
11539 inst.instruction = THUMB_OP16 (inst.instruction);
11540 inst.instruction |= inst.operands[0].reg << 8;
11541 inst.instruction |= inst.operands[1].imm;
11542 narrow = TRUE;
11543 }
11544 else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
11545 {
11546 /* This means 1 register in reg list one of 3 situations:
11547 1. Instruction is stmia, but without writeback.
11548 2. lmdia without writeback, but with Rn not in
11549 reglist.
11550 3. ldmia with writeback, but with Rn in reglist.
11551 Case 3 is UNPREDICTABLE behaviour, so we handle
11552 case 1 and 2 which can be converted into a 16-bit
11553 str or ldr. The SP cases are handled below. */
11554 unsigned long opcode;
11555 /* First, record an error for Case 3. */
11556 if (inst.operands[1].imm & mask
11557 && inst.operands[0].writeback)
11558 inst.error =
11559 _("having the base register in the register list when "
11560 "using write back is UNPREDICTABLE");
11561
11562 opcode = (inst.instruction == T_MNEM_stmia ? T_MNEM_str
11563 : T_MNEM_ldr);
11564 inst.instruction = THUMB_OP16 (opcode);
11565 inst.instruction |= inst.operands[0].reg << 3;
11566 inst.instruction |= (ffs (inst.operands[1].imm)-1);
11567 narrow = TRUE;
11568 }
11569 }
11570 else if (inst.operands[0] .reg == REG_SP)
11571 {
11572 if (inst.operands[0].writeback)
11573 {
11574 inst.instruction =
11575 THUMB_OP16 (inst.instruction == T_MNEM_stmia
11576 ? T_MNEM_push : T_MNEM_pop);
11577 inst.instruction |= inst.operands[1].imm;
11578 narrow = TRUE;
11579 }
11580 else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
11581 {
11582 inst.instruction =
11583 THUMB_OP16 (inst.instruction == T_MNEM_stmia
11584 ? T_MNEM_str_sp : T_MNEM_ldr_sp);
11585 inst.instruction |= ((ffs (inst.operands[1].imm)-1) << 8);
11586 narrow = TRUE;
11587 }
11588 }
11589 }
11590
11591 if (!narrow)
11592 {
11593 if (inst.instruction < 0xffff)
11594 inst.instruction = THUMB_OP32 (inst.instruction);
11595
11596 encode_thumb2_ldmstm (inst.operands[0].reg, inst.operands[1].imm,
11597 inst.operands[0].writeback);
11598 }
11599 }
11600 else
11601 {
11602 constraint (inst.operands[0].reg > 7
11603 || (inst.operands[1].imm & ~0xff), BAD_HIREG);
11604 constraint (inst.instruction != T_MNEM_ldmia
11605 && inst.instruction != T_MNEM_stmia,
11606 _("Thumb-2 instruction only valid in unified syntax"));
11607 if (inst.instruction == T_MNEM_stmia)
11608 {
11609 if (!inst.operands[0].writeback)
11610 as_warn (_("this instruction will write back the base register"));
11611 if ((inst.operands[1].imm & (1 << inst.operands[0].reg))
11612 && (inst.operands[1].imm & ((1 << inst.operands[0].reg) - 1)))
11613 as_warn (_("value stored for r%d is UNKNOWN"),
11614 inst.operands[0].reg);
11615 }
11616 else
11617 {
11618 if (!inst.operands[0].writeback
11619 && !(inst.operands[1].imm & (1 << inst.operands[0].reg)))
11620 as_warn (_("this instruction will write back the base register"));
11621 else if (inst.operands[0].writeback
11622 && (inst.operands[1].imm & (1 << inst.operands[0].reg)))
11623 as_warn (_("this instruction will not write back the base register"));
11624 }
11625
11626 inst.instruction = THUMB_OP16 (inst.instruction);
11627 inst.instruction |= inst.operands[0].reg << 8;
11628 inst.instruction |= inst.operands[1].imm;
11629 }
11630 }
11631
11632 static void
11633 do_t_ldrex (void)
11634 {
11635 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
11636 || inst.operands[1].postind || inst.operands[1].writeback
11637 || inst.operands[1].immisreg || inst.operands[1].shifted
11638 || inst.operands[1].negative,
11639 BAD_ADDR_MODE);
11640
11641 constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
11642
11643 inst.instruction |= inst.operands[0].reg << 12;
11644 inst.instruction |= inst.operands[1].reg << 16;
11645 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
11646 }
11647
11648 static void
11649 do_t_ldrexd (void)
11650 {
11651 if (!inst.operands[1].present)
11652 {
11653 constraint (inst.operands[0].reg == REG_LR,
11654 _("r14 not allowed as first register "
11655 "when second register is omitted"));
11656 inst.operands[1].reg = inst.operands[0].reg + 1;
11657 }
11658 constraint (inst.operands[0].reg == inst.operands[1].reg,
11659 BAD_OVERLAP);
11660
11661 inst.instruction |= inst.operands[0].reg << 12;
11662 inst.instruction |= inst.operands[1].reg << 8;
11663 inst.instruction |= inst.operands[2].reg << 16;
11664 }
11665
11666 static void
11667 do_t_ldst (void)
11668 {
11669 unsigned long opcode;
11670 int Rn;
11671
11672 if (inst.operands[0].isreg
11673 && !inst.operands[0].preind
11674 && inst.operands[0].reg == REG_PC)
11675 set_it_insn_type_last ();
11676
11677 opcode = inst.instruction;
11678 if (unified_syntax)
11679 {
11680 if (!inst.operands[1].isreg)
11681 {
11682 if (opcode <= 0xffff)
11683 inst.instruction = THUMB_OP32 (opcode);
11684 if (move_or_literal_pool (0, CONST_THUMB, /*mode_3=*/FALSE))
11685 return;
11686 }
11687 if (inst.operands[1].isreg
11688 && !inst.operands[1].writeback
11689 && !inst.operands[1].shifted && !inst.operands[1].postind
11690 && !inst.operands[1].negative && inst.operands[0].reg <= 7
11691 && opcode <= 0xffff
11692 && inst.size_req != 4)
11693 {
11694 /* Insn may have a 16-bit form. */
11695 Rn = inst.operands[1].reg;
11696 if (inst.operands[1].immisreg)
11697 {
11698 inst.instruction = THUMB_OP16 (opcode);
11699 /* [Rn, Rik] */
11700 if (Rn <= 7 && inst.operands[1].imm <= 7)
11701 goto op16;
11702 else if (opcode != T_MNEM_ldr && opcode != T_MNEM_str)
11703 reject_bad_reg (inst.operands[1].imm);
11704 }
11705 else if ((Rn <= 7 && opcode != T_MNEM_ldrsh
11706 && opcode != T_MNEM_ldrsb)
11707 || ((Rn == REG_PC || Rn == REG_SP) && opcode == T_MNEM_ldr)
11708 || (Rn == REG_SP && opcode == T_MNEM_str))
11709 {
11710 /* [Rn, #const] */
11711 if (Rn > 7)
11712 {
11713 if (Rn == REG_PC)
11714 {
11715 if (inst.reloc.pc_rel)
11716 opcode = T_MNEM_ldr_pc2;
11717 else
11718 opcode = T_MNEM_ldr_pc;
11719 }
11720 else
11721 {
11722 if (opcode == T_MNEM_ldr)
11723 opcode = T_MNEM_ldr_sp;
11724 else
11725 opcode = T_MNEM_str_sp;
11726 }
11727 inst.instruction = inst.operands[0].reg << 8;
11728 }
11729 else
11730 {
11731 inst.instruction = inst.operands[0].reg;
11732 inst.instruction |= inst.operands[1].reg << 3;
11733 }
11734 inst.instruction |= THUMB_OP16 (opcode);
11735 if (inst.size_req == 2)
11736 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
11737 else
11738 inst.relax = opcode;
11739 return;
11740 }
11741 }
11742 /* Definitely a 32-bit variant. */
11743
11744 /* Warning for Erratum 752419. */
11745 if (opcode == T_MNEM_ldr
11746 && inst.operands[0].reg == REG_SP
11747 && inst.operands[1].writeback == 1
11748 && !inst.operands[1].immisreg)
11749 {
11750 if (no_cpu_selected ()
11751 || (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7)
11752 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a)
11753 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7r)))
11754 as_warn (_("This instruction may be unpredictable "
11755 "if executed on M-profile cores "
11756 "with interrupts enabled."));
11757 }
11758
11759 /* Do some validations regarding addressing modes. */
11760 if (inst.operands[1].immisreg)
11761 reject_bad_reg (inst.operands[1].imm);
11762
11763 constraint (inst.operands[1].writeback == 1
11764 && inst.operands[0].reg == inst.operands[1].reg,
11765 BAD_OVERLAP);
11766
11767 inst.instruction = THUMB_OP32 (opcode);
11768 inst.instruction |= inst.operands[0].reg << 12;
11769 encode_thumb32_addr_mode (1, /*is_t=*/FALSE, /*is_d=*/FALSE);
11770 check_ldr_r15_aligned ();
11771 return;
11772 }
11773
11774 constraint (inst.operands[0].reg > 7, BAD_HIREG);
11775
11776 if (inst.instruction == T_MNEM_ldrsh || inst.instruction == T_MNEM_ldrsb)
11777 {
11778 /* Only [Rn,Rm] is acceptable. */
11779 constraint (inst.operands[1].reg > 7 || inst.operands[1].imm > 7, BAD_HIREG);
11780 constraint (!inst.operands[1].isreg || !inst.operands[1].immisreg
11781 || inst.operands[1].postind || inst.operands[1].shifted
11782 || inst.operands[1].negative,
11783 _("Thumb does not support this addressing mode"));
11784 inst.instruction = THUMB_OP16 (inst.instruction);
11785 goto op16;
11786 }
11787
11788 inst.instruction = THUMB_OP16 (inst.instruction);
11789 if (!inst.operands[1].isreg)
11790 if (move_or_literal_pool (0, CONST_THUMB, /*mode_3=*/FALSE))
11791 return;
11792
11793 constraint (!inst.operands[1].preind
11794 || inst.operands[1].shifted
11795 || inst.operands[1].writeback,
11796 _("Thumb does not support this addressing mode"));
11797 if (inst.operands[1].reg == REG_PC || inst.operands[1].reg == REG_SP)
11798 {
11799 constraint (inst.instruction & 0x0600,
11800 _("byte or halfword not valid for base register"));
11801 constraint (inst.operands[1].reg == REG_PC
11802 && !(inst.instruction & THUMB_LOAD_BIT),
11803 _("r15 based store not allowed"));
11804 constraint (inst.operands[1].immisreg,
11805 _("invalid base register for register offset"));
11806
11807 if (inst.operands[1].reg == REG_PC)
11808 inst.instruction = T_OPCODE_LDR_PC;
11809 else if (inst.instruction & THUMB_LOAD_BIT)
11810 inst.instruction = T_OPCODE_LDR_SP;
11811 else
11812 inst.instruction = T_OPCODE_STR_SP;
11813
11814 inst.instruction |= inst.operands[0].reg << 8;
11815 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
11816 return;
11817 }
11818
11819 constraint (inst.operands[1].reg > 7, BAD_HIREG);
11820 if (!inst.operands[1].immisreg)
11821 {
11822 /* Immediate offset. */
11823 inst.instruction |= inst.operands[0].reg;
11824 inst.instruction |= inst.operands[1].reg << 3;
11825 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
11826 return;
11827 }
11828
11829 /* Register offset. */
11830 constraint (inst.operands[1].imm > 7, BAD_HIREG);
11831 constraint (inst.operands[1].negative,
11832 _("Thumb does not support this addressing mode"));
11833
11834 op16:
11835 switch (inst.instruction)
11836 {
11837 case T_OPCODE_STR_IW: inst.instruction = T_OPCODE_STR_RW; break;
11838 case T_OPCODE_STR_IH: inst.instruction = T_OPCODE_STR_RH; break;
11839 case T_OPCODE_STR_IB: inst.instruction = T_OPCODE_STR_RB; break;
11840 case T_OPCODE_LDR_IW: inst.instruction = T_OPCODE_LDR_RW; break;
11841 case T_OPCODE_LDR_IH: inst.instruction = T_OPCODE_LDR_RH; break;
11842 case T_OPCODE_LDR_IB: inst.instruction = T_OPCODE_LDR_RB; break;
11843 case 0x5600 /* ldrsb */:
11844 case 0x5e00 /* ldrsh */: break;
11845 default: abort ();
11846 }
11847
11848 inst.instruction |= inst.operands[0].reg;
11849 inst.instruction |= inst.operands[1].reg << 3;
11850 inst.instruction |= inst.operands[1].imm << 6;
11851 }
11852
11853 static void
11854 do_t_ldstd (void)
11855 {
11856 if (!inst.operands[1].present)
11857 {
11858 inst.operands[1].reg = inst.operands[0].reg + 1;
11859 constraint (inst.operands[0].reg == REG_LR,
11860 _("r14 not allowed here"));
11861 constraint (inst.operands[0].reg == REG_R12,
11862 _("r12 not allowed here"));
11863 }
11864
11865 if (inst.operands[2].writeback
11866 && (inst.operands[0].reg == inst.operands[2].reg
11867 || inst.operands[1].reg == inst.operands[2].reg))
11868 as_warn (_("base register written back, and overlaps "
11869 "one of transfer registers"));
11870
11871 inst.instruction |= inst.operands[0].reg << 12;
11872 inst.instruction |= inst.operands[1].reg << 8;
11873 encode_thumb32_addr_mode (2, /*is_t=*/FALSE, /*is_d=*/TRUE);
11874 }
11875
11876 static void
11877 do_t_ldstt (void)
11878 {
11879 inst.instruction |= inst.operands[0].reg << 12;
11880 encode_thumb32_addr_mode (1, /*is_t=*/TRUE, /*is_d=*/FALSE);
11881 }
11882
11883 static void
11884 do_t_mla (void)
11885 {
11886 unsigned Rd, Rn, Rm, Ra;
11887
11888 Rd = inst.operands[0].reg;
11889 Rn = inst.operands[1].reg;
11890 Rm = inst.operands[2].reg;
11891 Ra = inst.operands[3].reg;
11892
11893 reject_bad_reg (Rd);
11894 reject_bad_reg (Rn);
11895 reject_bad_reg (Rm);
11896 reject_bad_reg (Ra);
11897
11898 inst.instruction |= Rd << 8;
11899 inst.instruction |= Rn << 16;
11900 inst.instruction |= Rm;
11901 inst.instruction |= Ra << 12;
11902 }
11903
11904 static void
11905 do_t_mlal (void)
11906 {
11907 unsigned RdLo, RdHi, Rn, Rm;
11908
11909 RdLo = inst.operands[0].reg;
11910 RdHi = inst.operands[1].reg;
11911 Rn = inst.operands[2].reg;
11912 Rm = inst.operands[3].reg;
11913
11914 reject_bad_reg (RdLo);
11915 reject_bad_reg (RdHi);
11916 reject_bad_reg (Rn);
11917 reject_bad_reg (Rm);
11918
11919 inst.instruction |= RdLo << 12;
11920 inst.instruction |= RdHi << 8;
11921 inst.instruction |= Rn << 16;
11922 inst.instruction |= Rm;
11923 }
11924
11925 static void
11926 do_t_mov_cmp (void)
11927 {
11928 unsigned Rn, Rm;
11929
11930 Rn = inst.operands[0].reg;
11931 Rm = inst.operands[1].reg;
11932
11933 if (Rn == REG_PC)
11934 set_it_insn_type_last ();
11935
11936 if (unified_syntax)
11937 {
11938 int r0off = (inst.instruction == T_MNEM_mov
11939 || inst.instruction == T_MNEM_movs) ? 8 : 16;
11940 unsigned long opcode;
11941 bfd_boolean narrow;
11942 bfd_boolean low_regs;
11943
11944 low_regs = (Rn <= 7 && Rm <= 7);
11945 opcode = inst.instruction;
11946 if (in_it_block ())
11947 narrow = opcode != T_MNEM_movs;
11948 else
11949 narrow = opcode != T_MNEM_movs || low_regs;
11950 if (inst.size_req == 4
11951 || inst.operands[1].shifted)
11952 narrow = FALSE;
11953
11954 /* MOVS PC, LR is encoded as SUBS PC, LR, #0. */
11955 if (opcode == T_MNEM_movs && inst.operands[1].isreg
11956 && !inst.operands[1].shifted
11957 && Rn == REG_PC
11958 && Rm == REG_LR)
11959 {
11960 inst.instruction = T2_SUBS_PC_LR;
11961 return;
11962 }
11963
11964 if (opcode == T_MNEM_cmp)
11965 {
11966 constraint (Rn == REG_PC, BAD_PC);
11967 if (narrow)
11968 {
11969 /* In the Thumb-2 ISA, use of R13 as Rm is deprecated,
11970 but valid. */
11971 warn_deprecated_sp (Rm);
11972 /* R15 was documented as a valid choice for Rm in ARMv6,
11973 but as UNPREDICTABLE in ARMv7. ARM's proprietary
11974 tools reject R15, so we do too. */
11975 constraint (Rm == REG_PC, BAD_PC);
11976 }
11977 else
11978 reject_bad_reg (Rm);
11979 }
11980 else if (opcode == T_MNEM_mov
11981 || opcode == T_MNEM_movs)
11982 {
11983 if (inst.operands[1].isreg)
11984 {
11985 if (opcode == T_MNEM_movs)
11986 {
11987 reject_bad_reg (Rn);
11988 reject_bad_reg (Rm);
11989 }
11990 else if (narrow)
11991 {
11992 /* This is mov.n. */
11993 if ((Rn == REG_SP || Rn == REG_PC)
11994 && (Rm == REG_SP || Rm == REG_PC))
11995 {
11996 as_tsktsk (_("Use of r%u as a source register is "
11997 "deprecated when r%u is the destination "
11998 "register."), Rm, Rn);
11999 }
12000 }
12001 else
12002 {
12003 /* This is mov.w. */
12004 constraint (Rn == REG_PC, BAD_PC);
12005 constraint (Rm == REG_PC, BAD_PC);
12006 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
12007 constraint (Rn == REG_SP && Rm == REG_SP, BAD_SP);
12008 }
12009 }
12010 else
12011 reject_bad_reg (Rn);
12012 }
12013
12014 if (!inst.operands[1].isreg)
12015 {
12016 /* Immediate operand. */
12017 if (!in_it_block () && opcode == T_MNEM_mov)
12018 narrow = 0;
12019 if (low_regs && narrow)
12020 {
12021 inst.instruction = THUMB_OP16 (opcode);
12022 inst.instruction |= Rn << 8;
12023 if (inst.reloc.type < BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
12024 || inst.reloc.type > BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC)
12025 {
12026 if (inst.size_req == 2)
12027 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
12028 else
12029 inst.relax = opcode;
12030 }
12031 }
12032 else
12033 {
12034 constraint (inst.reloc.type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
12035 && inst.reloc.type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC ,
12036 THUMB1_RELOC_ONLY);
12037
12038 inst.instruction = THUMB_OP32 (inst.instruction);
12039 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
12040 inst.instruction |= Rn << r0off;
12041 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
12042 }
12043 }
12044 else if (inst.operands[1].shifted && inst.operands[1].immisreg
12045 && (inst.instruction == T_MNEM_mov
12046 || inst.instruction == T_MNEM_movs))
12047 {
12048 /* Register shifts are encoded as separate shift instructions. */
12049 bfd_boolean flags = (inst.instruction == T_MNEM_movs);
12050
12051 if (in_it_block ())
12052 narrow = !flags;
12053 else
12054 narrow = flags;
12055
12056 if (inst.size_req == 4)
12057 narrow = FALSE;
12058
12059 if (!low_regs || inst.operands[1].imm > 7)
12060 narrow = FALSE;
12061
12062 if (Rn != Rm)
12063 narrow = FALSE;
12064
12065 switch (inst.operands[1].shift_kind)
12066 {
12067 case SHIFT_LSL:
12068 opcode = narrow ? T_OPCODE_LSL_R : THUMB_OP32 (T_MNEM_lsl);
12069 break;
12070 case SHIFT_ASR:
12071 opcode = narrow ? T_OPCODE_ASR_R : THUMB_OP32 (T_MNEM_asr);
12072 break;
12073 case SHIFT_LSR:
12074 opcode = narrow ? T_OPCODE_LSR_R : THUMB_OP32 (T_MNEM_lsr);
12075 break;
12076 case SHIFT_ROR:
12077 opcode = narrow ? T_OPCODE_ROR_R : THUMB_OP32 (T_MNEM_ror);
12078 break;
12079 default:
12080 abort ();
12081 }
12082
12083 inst.instruction = opcode;
12084 if (narrow)
12085 {
12086 inst.instruction |= Rn;
12087 inst.instruction |= inst.operands[1].imm << 3;
12088 }
12089 else
12090 {
12091 if (flags)
12092 inst.instruction |= CONDS_BIT;
12093
12094 inst.instruction |= Rn << 8;
12095 inst.instruction |= Rm << 16;
12096 inst.instruction |= inst.operands[1].imm;
12097 }
12098 }
12099 else if (!narrow)
12100 {
12101 /* Some mov with immediate shift have narrow variants.
12102 Register shifts are handled above. */
12103 if (low_regs && inst.operands[1].shifted
12104 && (inst.instruction == T_MNEM_mov
12105 || inst.instruction == T_MNEM_movs))
12106 {
12107 if (in_it_block ())
12108 narrow = (inst.instruction == T_MNEM_mov);
12109 else
12110 narrow = (inst.instruction == T_MNEM_movs);
12111 }
12112
12113 if (narrow)
12114 {
12115 switch (inst.operands[1].shift_kind)
12116 {
12117 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
12118 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
12119 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
12120 default: narrow = FALSE; break;
12121 }
12122 }
12123
12124 if (narrow)
12125 {
12126 inst.instruction |= Rn;
12127 inst.instruction |= Rm << 3;
12128 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
12129 }
12130 else
12131 {
12132 inst.instruction = THUMB_OP32 (inst.instruction);
12133 inst.instruction |= Rn << r0off;
12134 encode_thumb32_shifted_operand (1);
12135 }
12136 }
12137 else
12138 switch (inst.instruction)
12139 {
12140 case T_MNEM_mov:
12141 /* In v4t or v5t a move of two lowregs produces unpredictable
12142 results. Don't allow this. */
12143 if (low_regs)
12144 {
12145 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6),
12146 "MOV Rd, Rs with two low registers is not "
12147 "permitted on this architecture");
12148 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
12149 arm_ext_v6);
12150 }
12151
12152 inst.instruction = T_OPCODE_MOV_HR;
12153 inst.instruction |= (Rn & 0x8) << 4;
12154 inst.instruction |= (Rn & 0x7);
12155 inst.instruction |= Rm << 3;
12156 break;
12157
12158 case T_MNEM_movs:
12159 /* We know we have low registers at this point.
12160 Generate LSLS Rd, Rs, #0. */
12161 inst.instruction = T_OPCODE_LSL_I;
12162 inst.instruction |= Rn;
12163 inst.instruction |= Rm << 3;
12164 break;
12165
12166 case T_MNEM_cmp:
12167 if (low_regs)
12168 {
12169 inst.instruction = T_OPCODE_CMP_LR;
12170 inst.instruction |= Rn;
12171 inst.instruction |= Rm << 3;
12172 }
12173 else
12174 {
12175 inst.instruction = T_OPCODE_CMP_HR;
12176 inst.instruction |= (Rn & 0x8) << 4;
12177 inst.instruction |= (Rn & 0x7);
12178 inst.instruction |= Rm << 3;
12179 }
12180 break;
12181 }
12182 return;
12183 }
12184
12185 inst.instruction = THUMB_OP16 (inst.instruction);
12186
12187 /* PR 10443: Do not silently ignore shifted operands. */
12188 constraint (inst.operands[1].shifted,
12189 _("shifts in CMP/MOV instructions are only supported in unified syntax"));
12190
12191 if (inst.operands[1].isreg)
12192 {
12193 if (Rn < 8 && Rm < 8)
12194 {
12195 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
12196 since a MOV instruction produces unpredictable results. */
12197 if (inst.instruction == T_OPCODE_MOV_I8)
12198 inst.instruction = T_OPCODE_ADD_I3;
12199 else
12200 inst.instruction = T_OPCODE_CMP_LR;
12201
12202 inst.instruction |= Rn;
12203 inst.instruction |= Rm << 3;
12204 }
12205 else
12206 {
12207 if (inst.instruction == T_OPCODE_MOV_I8)
12208 inst.instruction = T_OPCODE_MOV_HR;
12209 else
12210 inst.instruction = T_OPCODE_CMP_HR;
12211 do_t_cpy ();
12212 }
12213 }
12214 else
12215 {
12216 constraint (Rn > 7,
12217 _("only lo regs allowed with immediate"));
12218 inst.instruction |= Rn << 8;
12219 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
12220 }
12221 }
12222
12223 static void
12224 do_t_mov16 (void)
12225 {
12226 unsigned Rd;
12227 bfd_vma imm;
12228 bfd_boolean top;
12229
12230 top = (inst.instruction & 0x00800000) != 0;
12231 if (inst.reloc.type == BFD_RELOC_ARM_MOVW)
12232 {
12233 constraint (top, _(":lower16: not allowed in this instruction"));
12234 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVW;
12235 }
12236 else if (inst.reloc.type == BFD_RELOC_ARM_MOVT)
12237 {
12238 constraint (!top, _(":upper16: not allowed in this instruction"));
12239 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVT;
12240 }
12241
12242 Rd = inst.operands[0].reg;
12243 reject_bad_reg (Rd);
12244
12245 inst.instruction |= Rd << 8;
12246 if (inst.reloc.type == BFD_RELOC_UNUSED)
12247 {
12248 imm = inst.reloc.exp.X_add_number;
12249 inst.instruction |= (imm & 0xf000) << 4;
12250 inst.instruction |= (imm & 0x0800) << 15;
12251 inst.instruction |= (imm & 0x0700) << 4;
12252 inst.instruction |= (imm & 0x00ff);
12253 }
12254 }
12255
12256 static void
12257 do_t_mvn_tst (void)
12258 {
12259 unsigned Rn, Rm;
12260
12261 Rn = inst.operands[0].reg;
12262 Rm = inst.operands[1].reg;
12263
12264 if (inst.instruction == T_MNEM_cmp
12265 || inst.instruction == T_MNEM_cmn)
12266 constraint (Rn == REG_PC, BAD_PC);
12267 else
12268 reject_bad_reg (Rn);
12269 reject_bad_reg (Rm);
12270
12271 if (unified_syntax)
12272 {
12273 int r0off = (inst.instruction == T_MNEM_mvn
12274 || inst.instruction == T_MNEM_mvns) ? 8 : 16;
12275 bfd_boolean narrow;
12276
12277 if (inst.size_req == 4
12278 || inst.instruction > 0xffff
12279 || inst.operands[1].shifted
12280 || Rn > 7 || Rm > 7)
12281 narrow = FALSE;
12282 else if (inst.instruction == T_MNEM_cmn
12283 || inst.instruction == T_MNEM_tst)
12284 narrow = TRUE;
12285 else if (THUMB_SETS_FLAGS (inst.instruction))
12286 narrow = !in_it_block ();
12287 else
12288 narrow = in_it_block ();
12289
12290 if (!inst.operands[1].isreg)
12291 {
12292 /* For an immediate, we always generate a 32-bit opcode;
12293 section relaxation will shrink it later if possible. */
12294 if (inst.instruction < 0xffff)
12295 inst.instruction = THUMB_OP32 (inst.instruction);
12296 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
12297 inst.instruction |= Rn << r0off;
12298 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
12299 }
12300 else
12301 {
12302 /* See if we can do this with a 16-bit instruction. */
12303 if (narrow)
12304 {
12305 inst.instruction = THUMB_OP16 (inst.instruction);
12306 inst.instruction |= Rn;
12307 inst.instruction |= Rm << 3;
12308 }
12309 else
12310 {
12311 constraint (inst.operands[1].shifted
12312 && inst.operands[1].immisreg,
12313 _("shift must be constant"));
12314 if (inst.instruction < 0xffff)
12315 inst.instruction = THUMB_OP32 (inst.instruction);
12316 inst.instruction |= Rn << r0off;
12317 encode_thumb32_shifted_operand (1);
12318 }
12319 }
12320 }
12321 else
12322 {
12323 constraint (inst.instruction > 0xffff
12324 || inst.instruction == T_MNEM_mvns, BAD_THUMB32);
12325 constraint (!inst.operands[1].isreg || inst.operands[1].shifted,
12326 _("unshifted register required"));
12327 constraint (Rn > 7 || Rm > 7,
12328 BAD_HIREG);
12329
12330 inst.instruction = THUMB_OP16 (inst.instruction);
12331 inst.instruction |= Rn;
12332 inst.instruction |= Rm << 3;
12333 }
12334 }
12335
12336 static void
12337 do_t_mrs (void)
12338 {
12339 unsigned Rd;
12340
12341 if (do_vfp_nsyn_mrs () == SUCCESS)
12342 return;
12343
12344 Rd = inst.operands[0].reg;
12345 reject_bad_reg (Rd);
12346 inst.instruction |= Rd << 8;
12347
12348 if (inst.operands[1].isreg)
12349 {
12350 unsigned br = inst.operands[1].reg;
12351 if (((br & 0x200) == 0) && ((br & 0xf000) != 0xf000))
12352 as_bad (_("bad register for mrs"));
12353
12354 inst.instruction |= br & (0xf << 16);
12355 inst.instruction |= (br & 0x300) >> 4;
12356 inst.instruction |= (br & SPSR_BIT) >> 2;
12357 }
12358 else
12359 {
12360 int flags = inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
12361
12362 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
12363 {
12364 /* PR gas/12698: The constraint is only applied for m_profile.
12365 If the user has specified -march=all, we want to ignore it as
12366 we are building for any CPU type, including non-m variants. */
12367 bfd_boolean m_profile =
12368 !ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any);
12369 constraint ((flags != 0) && m_profile, _("selected processor does "
12370 "not support requested special purpose register"));
12371 }
12372 else
12373 /* mrs only accepts APSR/CPSR/SPSR/CPSR_all/SPSR_all (for non-M profile
12374 devices). */
12375 constraint ((flags & ~SPSR_BIT) != (PSR_c|PSR_f),
12376 _("'APSR', 'CPSR' or 'SPSR' expected"));
12377
12378 inst.instruction |= (flags & SPSR_BIT) >> 2;
12379 inst.instruction |= inst.operands[1].imm & 0xff;
12380 inst.instruction |= 0xf0000;
12381 }
12382 }
12383
12384 static void
12385 do_t_msr (void)
12386 {
12387 int flags;
12388 unsigned Rn;
12389
12390 if (do_vfp_nsyn_msr () == SUCCESS)
12391 return;
12392
12393 constraint (!inst.operands[1].isreg,
12394 _("Thumb encoding does not support an immediate here"));
12395
12396 if (inst.operands[0].isreg)
12397 flags = (int)(inst.operands[0].reg);
12398 else
12399 flags = inst.operands[0].imm;
12400
12401 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
12402 {
12403 int bits = inst.operands[0].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
12404
12405 /* PR gas/12698: The constraint is only applied for m_profile.
12406 If the user has specified -march=all, we want to ignore it as
12407 we are building for any CPU type, including non-m variants. */
12408 bfd_boolean m_profile =
12409 !ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any);
12410 constraint (((ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
12411 && (bits & ~(PSR_s | PSR_f)) != 0)
12412 || (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
12413 && bits != PSR_f)) && m_profile,
12414 _("selected processor does not support requested special "
12415 "purpose register"));
12416 }
12417 else
12418 constraint ((flags & 0xff) != 0, _("selected processor does not support "
12419 "requested special purpose register"));
12420
12421 Rn = inst.operands[1].reg;
12422 reject_bad_reg (Rn);
12423
12424 inst.instruction |= (flags & SPSR_BIT) >> 2;
12425 inst.instruction |= (flags & 0xf0000) >> 8;
12426 inst.instruction |= (flags & 0x300) >> 4;
12427 inst.instruction |= (flags & 0xff);
12428 inst.instruction |= Rn << 16;
12429 }
12430
12431 static void
12432 do_t_mul (void)
12433 {
12434 bfd_boolean narrow;
12435 unsigned Rd, Rn, Rm;
12436
12437 if (!inst.operands[2].present)
12438 inst.operands[2].reg = inst.operands[0].reg;
12439
12440 Rd = inst.operands[0].reg;
12441 Rn = inst.operands[1].reg;
12442 Rm = inst.operands[2].reg;
12443
12444 if (unified_syntax)
12445 {
12446 if (inst.size_req == 4
12447 || (Rd != Rn
12448 && Rd != Rm)
12449 || Rn > 7
12450 || Rm > 7)
12451 narrow = FALSE;
12452 else if (inst.instruction == T_MNEM_muls)
12453 narrow = !in_it_block ();
12454 else
12455 narrow = in_it_block ();
12456 }
12457 else
12458 {
12459 constraint (inst.instruction == T_MNEM_muls, BAD_THUMB32);
12460 constraint (Rn > 7 || Rm > 7,
12461 BAD_HIREG);
12462 narrow = TRUE;
12463 }
12464
12465 if (narrow)
12466 {
12467 /* 16-bit MULS/Conditional MUL. */
12468 inst.instruction = THUMB_OP16 (inst.instruction);
12469 inst.instruction |= Rd;
12470
12471 if (Rd == Rn)
12472 inst.instruction |= Rm << 3;
12473 else if (Rd == Rm)
12474 inst.instruction |= Rn << 3;
12475 else
12476 constraint (1, _("dest must overlap one source register"));
12477 }
12478 else
12479 {
12480 constraint (inst.instruction != T_MNEM_mul,
12481 _("Thumb-2 MUL must not set flags"));
12482 /* 32-bit MUL. */
12483 inst.instruction = THUMB_OP32 (inst.instruction);
12484 inst.instruction |= Rd << 8;
12485 inst.instruction |= Rn << 16;
12486 inst.instruction |= Rm << 0;
12487
12488 reject_bad_reg (Rd);
12489 reject_bad_reg (Rn);
12490 reject_bad_reg (Rm);
12491 }
12492 }
12493
12494 static void
12495 do_t_mull (void)
12496 {
12497 unsigned RdLo, RdHi, Rn, Rm;
12498
12499 RdLo = inst.operands[0].reg;
12500 RdHi = inst.operands[1].reg;
12501 Rn = inst.operands[2].reg;
12502 Rm = inst.operands[3].reg;
12503
12504 reject_bad_reg (RdLo);
12505 reject_bad_reg (RdHi);
12506 reject_bad_reg (Rn);
12507 reject_bad_reg (Rm);
12508
12509 inst.instruction |= RdLo << 12;
12510 inst.instruction |= RdHi << 8;
12511 inst.instruction |= Rn << 16;
12512 inst.instruction |= Rm;
12513
12514 if (RdLo == RdHi)
12515 as_tsktsk (_("rdhi and rdlo must be different"));
12516 }
12517
12518 static void
12519 do_t_nop (void)
12520 {
12521 set_it_insn_type (NEUTRAL_IT_INSN);
12522
12523 if (unified_syntax)
12524 {
12525 if (inst.size_req == 4 || inst.operands[0].imm > 15)
12526 {
12527 inst.instruction = THUMB_OP32 (inst.instruction);
12528 inst.instruction |= inst.operands[0].imm;
12529 }
12530 else
12531 {
12532 /* PR9722: Check for Thumb2 availability before
12533 generating a thumb2 nop instruction. */
12534 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2))
12535 {
12536 inst.instruction = THUMB_OP16 (inst.instruction);
12537 inst.instruction |= inst.operands[0].imm << 4;
12538 }
12539 else
12540 inst.instruction = 0x46c0;
12541 }
12542 }
12543 else
12544 {
12545 constraint (inst.operands[0].present,
12546 _("Thumb does not support NOP with hints"));
12547 inst.instruction = 0x46c0;
12548 }
12549 }
12550
12551 static void
12552 do_t_neg (void)
12553 {
12554 if (unified_syntax)
12555 {
12556 bfd_boolean narrow;
12557
12558 if (THUMB_SETS_FLAGS (inst.instruction))
12559 narrow = !in_it_block ();
12560 else
12561 narrow = in_it_block ();
12562 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
12563 narrow = FALSE;
12564 if (inst.size_req == 4)
12565 narrow = FALSE;
12566
12567 if (!narrow)
12568 {
12569 inst.instruction = THUMB_OP32 (inst.instruction);
12570 inst.instruction |= inst.operands[0].reg << 8;
12571 inst.instruction |= inst.operands[1].reg << 16;
12572 }
12573 else
12574 {
12575 inst.instruction = THUMB_OP16 (inst.instruction);
12576 inst.instruction |= inst.operands[0].reg;
12577 inst.instruction |= inst.operands[1].reg << 3;
12578 }
12579 }
12580 else
12581 {
12582 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
12583 BAD_HIREG);
12584 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
12585
12586 inst.instruction = THUMB_OP16 (inst.instruction);
12587 inst.instruction |= inst.operands[0].reg;
12588 inst.instruction |= inst.operands[1].reg << 3;
12589 }
12590 }
12591
12592 static void
12593 do_t_orn (void)
12594 {
12595 unsigned Rd, Rn;
12596
12597 Rd = inst.operands[0].reg;
12598 Rn = inst.operands[1].present ? inst.operands[1].reg : Rd;
12599
12600 reject_bad_reg (Rd);
12601 /* Rn == REG_SP is unpredictable; Rn == REG_PC is MVN. */
12602 reject_bad_reg (Rn);
12603
12604 inst.instruction |= Rd << 8;
12605 inst.instruction |= Rn << 16;
12606
12607 if (!inst.operands[2].isreg)
12608 {
12609 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
12610 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
12611 }
12612 else
12613 {
12614 unsigned Rm;
12615
12616 Rm = inst.operands[2].reg;
12617 reject_bad_reg (Rm);
12618
12619 constraint (inst.operands[2].shifted
12620 && inst.operands[2].immisreg,
12621 _("shift must be constant"));
12622 encode_thumb32_shifted_operand (2);
12623 }
12624 }
12625
12626 static void
12627 do_t_pkhbt (void)
12628 {
12629 unsigned Rd, Rn, Rm;
12630
12631 Rd = inst.operands[0].reg;
12632 Rn = inst.operands[1].reg;
12633 Rm = inst.operands[2].reg;
12634
12635 reject_bad_reg (Rd);
12636 reject_bad_reg (Rn);
12637 reject_bad_reg (Rm);
12638
12639 inst.instruction |= Rd << 8;
12640 inst.instruction |= Rn << 16;
12641 inst.instruction |= Rm;
12642 if (inst.operands[3].present)
12643 {
12644 unsigned int val = inst.reloc.exp.X_add_number;
12645 constraint (inst.reloc.exp.X_op != O_constant,
12646 _("expression too complex"));
12647 inst.instruction |= (val & 0x1c) << 10;
12648 inst.instruction |= (val & 0x03) << 6;
12649 }
12650 }
12651
12652 static void
12653 do_t_pkhtb (void)
12654 {
12655 if (!inst.operands[3].present)
12656 {
12657 unsigned Rtmp;
12658
12659 inst.instruction &= ~0x00000020;
12660
12661 /* PR 10168. Swap the Rm and Rn registers. */
12662 Rtmp = inst.operands[1].reg;
12663 inst.operands[1].reg = inst.operands[2].reg;
12664 inst.operands[2].reg = Rtmp;
12665 }
12666 do_t_pkhbt ();
12667 }
12668
12669 static void
12670 do_t_pld (void)
12671 {
12672 if (inst.operands[0].immisreg)
12673 reject_bad_reg (inst.operands[0].imm);
12674
12675 encode_thumb32_addr_mode (0, /*is_t=*/FALSE, /*is_d=*/FALSE);
12676 }
12677
12678 static void
12679 do_t_push_pop (void)
12680 {
12681 unsigned mask;
12682
12683 constraint (inst.operands[0].writeback,
12684 _("push/pop do not support {reglist}^"));
12685 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
12686 _("expression too complex"));
12687
12688 mask = inst.operands[0].imm;
12689 if (inst.size_req != 4 && (mask & ~0xff) == 0)
12690 inst.instruction = THUMB_OP16 (inst.instruction) | mask;
12691 else if (inst.size_req != 4
12692 && (mask & ~0xff) == (1U << (inst.instruction == T_MNEM_push
12693 ? REG_LR : REG_PC)))
12694 {
12695 inst.instruction = THUMB_OP16 (inst.instruction);
12696 inst.instruction |= THUMB_PP_PC_LR;
12697 inst.instruction |= mask & 0xff;
12698 }
12699 else if (unified_syntax)
12700 {
12701 inst.instruction = THUMB_OP32 (inst.instruction);
12702 encode_thumb2_ldmstm (13, mask, TRUE);
12703 }
12704 else
12705 {
12706 inst.error = _("invalid register list to push/pop instruction");
12707 return;
12708 }
12709 }
12710
12711 static void
12712 do_t_rbit (void)
12713 {
12714 unsigned Rd, Rm;
12715
12716 Rd = inst.operands[0].reg;
12717 Rm = inst.operands[1].reg;
12718
12719 reject_bad_reg (Rd);
12720 reject_bad_reg (Rm);
12721
12722 inst.instruction |= Rd << 8;
12723 inst.instruction |= Rm << 16;
12724 inst.instruction |= Rm;
12725 }
12726
12727 static void
12728 do_t_rev (void)
12729 {
12730 unsigned Rd, Rm;
12731
12732 Rd = inst.operands[0].reg;
12733 Rm = inst.operands[1].reg;
12734
12735 reject_bad_reg (Rd);
12736 reject_bad_reg (Rm);
12737
12738 if (Rd <= 7 && Rm <= 7
12739 && inst.size_req != 4)
12740 {
12741 inst.instruction = THUMB_OP16 (inst.instruction);
12742 inst.instruction |= Rd;
12743 inst.instruction |= Rm << 3;
12744 }
12745 else if (unified_syntax)
12746 {
12747 inst.instruction = THUMB_OP32 (inst.instruction);
12748 inst.instruction |= Rd << 8;
12749 inst.instruction |= Rm << 16;
12750 inst.instruction |= Rm;
12751 }
12752 else
12753 inst.error = BAD_HIREG;
12754 }
12755
12756 static void
12757 do_t_rrx (void)
12758 {
12759 unsigned Rd, Rm;
12760
12761 Rd = inst.operands[0].reg;
12762 Rm = inst.operands[1].reg;
12763
12764 reject_bad_reg (Rd);
12765 reject_bad_reg (Rm);
12766
12767 inst.instruction |= Rd << 8;
12768 inst.instruction |= Rm;
12769 }
12770
12771 static void
12772 do_t_rsb (void)
12773 {
12774 unsigned Rd, Rs;
12775
12776 Rd = inst.operands[0].reg;
12777 Rs = (inst.operands[1].present
12778 ? inst.operands[1].reg /* Rd, Rs, foo */
12779 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
12780
12781 reject_bad_reg (Rd);
12782 reject_bad_reg (Rs);
12783 if (inst.operands[2].isreg)
12784 reject_bad_reg (inst.operands[2].reg);
12785
12786 inst.instruction |= Rd << 8;
12787 inst.instruction |= Rs << 16;
12788 if (!inst.operands[2].isreg)
12789 {
12790 bfd_boolean narrow;
12791
12792 if ((inst.instruction & 0x00100000) != 0)
12793 narrow = !in_it_block ();
12794 else
12795 narrow = in_it_block ();
12796
12797 if (Rd > 7 || Rs > 7)
12798 narrow = FALSE;
12799
12800 if (inst.size_req == 4 || !unified_syntax)
12801 narrow = FALSE;
12802
12803 if (inst.reloc.exp.X_op != O_constant
12804 || inst.reloc.exp.X_add_number != 0)
12805 narrow = FALSE;
12806
12807 /* Turn rsb #0 into 16-bit neg. We should probably do this via
12808 relaxation, but it doesn't seem worth the hassle. */
12809 if (narrow)
12810 {
12811 inst.reloc.type = BFD_RELOC_UNUSED;
12812 inst.instruction = THUMB_OP16 (T_MNEM_negs);
12813 inst.instruction |= Rs << 3;
12814 inst.instruction |= Rd;
12815 }
12816 else
12817 {
12818 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
12819 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
12820 }
12821 }
12822 else
12823 encode_thumb32_shifted_operand (2);
12824 }
12825
12826 static void
12827 do_t_setend (void)
12828 {
12829 if (warn_on_deprecated
12830 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
12831 as_tsktsk (_("setend use is deprecated for ARMv8"));
12832
12833 set_it_insn_type (OUTSIDE_IT_INSN);
12834 if (inst.operands[0].imm)
12835 inst.instruction |= 0x8;
12836 }
12837
12838 static void
12839 do_t_shift (void)
12840 {
12841 if (!inst.operands[1].present)
12842 inst.operands[1].reg = inst.operands[0].reg;
12843
12844 if (unified_syntax)
12845 {
12846 bfd_boolean narrow;
12847 int shift_kind;
12848
12849 switch (inst.instruction)
12850 {
12851 case T_MNEM_asr:
12852 case T_MNEM_asrs: shift_kind = SHIFT_ASR; break;
12853 case T_MNEM_lsl:
12854 case T_MNEM_lsls: shift_kind = SHIFT_LSL; break;
12855 case T_MNEM_lsr:
12856 case T_MNEM_lsrs: shift_kind = SHIFT_LSR; break;
12857 case T_MNEM_ror:
12858 case T_MNEM_rors: shift_kind = SHIFT_ROR; break;
12859 default: abort ();
12860 }
12861
12862 if (THUMB_SETS_FLAGS (inst.instruction))
12863 narrow = !in_it_block ();
12864 else
12865 narrow = in_it_block ();
12866 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
12867 narrow = FALSE;
12868 if (!inst.operands[2].isreg && shift_kind == SHIFT_ROR)
12869 narrow = FALSE;
12870 if (inst.operands[2].isreg
12871 && (inst.operands[1].reg != inst.operands[0].reg
12872 || inst.operands[2].reg > 7))
12873 narrow = FALSE;
12874 if (inst.size_req == 4)
12875 narrow = FALSE;
12876
12877 reject_bad_reg (inst.operands[0].reg);
12878 reject_bad_reg (inst.operands[1].reg);
12879
12880 if (!narrow)
12881 {
12882 if (inst.operands[2].isreg)
12883 {
12884 reject_bad_reg (inst.operands[2].reg);
12885 inst.instruction = THUMB_OP32 (inst.instruction);
12886 inst.instruction |= inst.operands[0].reg << 8;
12887 inst.instruction |= inst.operands[1].reg << 16;
12888 inst.instruction |= inst.operands[2].reg;
12889
12890 /* PR 12854: Error on extraneous shifts. */
12891 constraint (inst.operands[2].shifted,
12892 _("extraneous shift as part of operand to shift insn"));
12893 }
12894 else
12895 {
12896 inst.operands[1].shifted = 1;
12897 inst.operands[1].shift_kind = shift_kind;
12898 inst.instruction = THUMB_OP32 (THUMB_SETS_FLAGS (inst.instruction)
12899 ? T_MNEM_movs : T_MNEM_mov);
12900 inst.instruction |= inst.operands[0].reg << 8;
12901 encode_thumb32_shifted_operand (1);
12902 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
12903 inst.reloc.type = BFD_RELOC_UNUSED;
12904 }
12905 }
12906 else
12907 {
12908 if (inst.operands[2].isreg)
12909 {
12910 switch (shift_kind)
12911 {
12912 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_R; break;
12913 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_R; break;
12914 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_R; break;
12915 case SHIFT_ROR: inst.instruction = T_OPCODE_ROR_R; break;
12916 default: abort ();
12917 }
12918
12919 inst.instruction |= inst.operands[0].reg;
12920 inst.instruction |= inst.operands[2].reg << 3;
12921
12922 /* PR 12854: Error on extraneous shifts. */
12923 constraint (inst.operands[2].shifted,
12924 _("extraneous shift as part of operand to shift insn"));
12925 }
12926 else
12927 {
12928 switch (shift_kind)
12929 {
12930 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
12931 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
12932 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
12933 default: abort ();
12934 }
12935 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
12936 inst.instruction |= inst.operands[0].reg;
12937 inst.instruction |= inst.operands[1].reg << 3;
12938 }
12939 }
12940 }
12941 else
12942 {
12943 constraint (inst.operands[0].reg > 7
12944 || inst.operands[1].reg > 7, BAD_HIREG);
12945 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
12946
12947 if (inst.operands[2].isreg) /* Rd, {Rs,} Rn */
12948 {
12949 constraint (inst.operands[2].reg > 7, BAD_HIREG);
12950 constraint (inst.operands[0].reg != inst.operands[1].reg,
12951 _("source1 and dest must be same register"));
12952
12953 switch (inst.instruction)
12954 {
12955 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_R; break;
12956 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_R; break;
12957 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_R; break;
12958 case T_MNEM_ror: inst.instruction = T_OPCODE_ROR_R; break;
12959 default: abort ();
12960 }
12961
12962 inst.instruction |= inst.operands[0].reg;
12963 inst.instruction |= inst.operands[2].reg << 3;
12964
12965 /* PR 12854: Error on extraneous shifts. */
12966 constraint (inst.operands[2].shifted,
12967 _("extraneous shift as part of operand to shift insn"));
12968 }
12969 else
12970 {
12971 switch (inst.instruction)
12972 {
12973 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_I; break;
12974 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_I; break;
12975 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_I; break;
12976 case T_MNEM_ror: inst.error = _("ror #imm not supported"); return;
12977 default: abort ();
12978 }
12979 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
12980 inst.instruction |= inst.operands[0].reg;
12981 inst.instruction |= inst.operands[1].reg << 3;
12982 }
12983 }
12984 }
12985
12986 static void
12987 do_t_simd (void)
12988 {
12989 unsigned Rd, Rn, Rm;
12990
12991 Rd = inst.operands[0].reg;
12992 Rn = inst.operands[1].reg;
12993 Rm = inst.operands[2].reg;
12994
12995 reject_bad_reg (Rd);
12996 reject_bad_reg (Rn);
12997 reject_bad_reg (Rm);
12998
12999 inst.instruction |= Rd << 8;
13000 inst.instruction |= Rn << 16;
13001 inst.instruction |= Rm;
13002 }
13003
13004 static void
13005 do_t_simd2 (void)
13006 {
13007 unsigned Rd, Rn, Rm;
13008
13009 Rd = inst.operands[0].reg;
13010 Rm = inst.operands[1].reg;
13011 Rn = inst.operands[2].reg;
13012
13013 reject_bad_reg (Rd);
13014 reject_bad_reg (Rn);
13015 reject_bad_reg (Rm);
13016
13017 inst.instruction |= Rd << 8;
13018 inst.instruction |= Rn << 16;
13019 inst.instruction |= Rm;
13020 }
13021
13022 static void
13023 do_t_smc (void)
13024 {
13025 unsigned int value = inst.reloc.exp.X_add_number;
13026 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a),
13027 _("SMC is not permitted on this architecture"));
13028 constraint (inst.reloc.exp.X_op != O_constant,
13029 _("expression too complex"));
13030 inst.reloc.type = BFD_RELOC_UNUSED;
13031 inst.instruction |= (value & 0xf000) >> 12;
13032 inst.instruction |= (value & 0x0ff0);
13033 inst.instruction |= (value & 0x000f) << 16;
13034 /* PR gas/15623: SMC instructions must be last in an IT block. */
13035 set_it_insn_type_last ();
13036 }
13037
13038 static void
13039 do_t_hvc (void)
13040 {
13041 unsigned int value = inst.reloc.exp.X_add_number;
13042
13043 inst.reloc.type = BFD_RELOC_UNUSED;
13044 inst.instruction |= (value & 0x0fff);
13045 inst.instruction |= (value & 0xf000) << 4;
13046 }
13047
13048 static void
13049 do_t_ssat_usat (int bias)
13050 {
13051 unsigned Rd, Rn;
13052
13053 Rd = inst.operands[0].reg;
13054 Rn = inst.operands[2].reg;
13055
13056 reject_bad_reg (Rd);
13057 reject_bad_reg (Rn);
13058
13059 inst.instruction |= Rd << 8;
13060 inst.instruction |= inst.operands[1].imm - bias;
13061 inst.instruction |= Rn << 16;
13062
13063 if (inst.operands[3].present)
13064 {
13065 offsetT shift_amount = inst.reloc.exp.X_add_number;
13066
13067 inst.reloc.type = BFD_RELOC_UNUSED;
13068
13069 constraint (inst.reloc.exp.X_op != O_constant,
13070 _("expression too complex"));
13071
13072 if (shift_amount != 0)
13073 {
13074 constraint (shift_amount > 31,
13075 _("shift expression is too large"));
13076
13077 if (inst.operands[3].shift_kind == SHIFT_ASR)
13078 inst.instruction |= 0x00200000; /* sh bit. */
13079
13080 inst.instruction |= (shift_amount & 0x1c) << 10;
13081 inst.instruction |= (shift_amount & 0x03) << 6;
13082 }
13083 }
13084 }
13085
13086 static void
13087 do_t_ssat (void)
13088 {
13089 do_t_ssat_usat (1);
13090 }
13091
13092 static void
13093 do_t_ssat16 (void)
13094 {
13095 unsigned Rd, Rn;
13096
13097 Rd = inst.operands[0].reg;
13098 Rn = inst.operands[2].reg;
13099
13100 reject_bad_reg (Rd);
13101 reject_bad_reg (Rn);
13102
13103 inst.instruction |= Rd << 8;
13104 inst.instruction |= inst.operands[1].imm - 1;
13105 inst.instruction |= Rn << 16;
13106 }
13107
13108 static void
13109 do_t_strex (void)
13110 {
13111 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
13112 || inst.operands[2].postind || inst.operands[2].writeback
13113 || inst.operands[2].immisreg || inst.operands[2].shifted
13114 || inst.operands[2].negative,
13115 BAD_ADDR_MODE);
13116
13117 constraint (inst.operands[2].reg == REG_PC, BAD_PC);
13118
13119 inst.instruction |= inst.operands[0].reg << 8;
13120 inst.instruction |= inst.operands[1].reg << 12;
13121 inst.instruction |= inst.operands[2].reg << 16;
13122 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
13123 }
13124
13125 static void
13126 do_t_strexd (void)
13127 {
13128 if (!inst.operands[2].present)
13129 inst.operands[2].reg = inst.operands[1].reg + 1;
13130
13131 constraint (inst.operands[0].reg == inst.operands[1].reg
13132 || inst.operands[0].reg == inst.operands[2].reg
13133 || inst.operands[0].reg == inst.operands[3].reg,
13134 BAD_OVERLAP);
13135
13136 inst.instruction |= inst.operands[0].reg;
13137 inst.instruction |= inst.operands[1].reg << 12;
13138 inst.instruction |= inst.operands[2].reg << 8;
13139 inst.instruction |= inst.operands[3].reg << 16;
13140 }
13141
13142 static void
13143 do_t_sxtah (void)
13144 {
13145 unsigned Rd, Rn, Rm;
13146
13147 Rd = inst.operands[0].reg;
13148 Rn = inst.operands[1].reg;
13149 Rm = inst.operands[2].reg;
13150
13151 reject_bad_reg (Rd);
13152 reject_bad_reg (Rn);
13153 reject_bad_reg (Rm);
13154
13155 inst.instruction |= Rd << 8;
13156 inst.instruction |= Rn << 16;
13157 inst.instruction |= Rm;
13158 inst.instruction |= inst.operands[3].imm << 4;
13159 }
13160
13161 static void
13162 do_t_sxth (void)
13163 {
13164 unsigned Rd, Rm;
13165
13166 Rd = inst.operands[0].reg;
13167 Rm = inst.operands[1].reg;
13168
13169 reject_bad_reg (Rd);
13170 reject_bad_reg (Rm);
13171
13172 if (inst.instruction <= 0xffff
13173 && inst.size_req != 4
13174 && Rd <= 7 && Rm <= 7
13175 && (!inst.operands[2].present || inst.operands[2].imm == 0))
13176 {
13177 inst.instruction = THUMB_OP16 (inst.instruction);
13178 inst.instruction |= Rd;
13179 inst.instruction |= Rm << 3;
13180 }
13181 else if (unified_syntax)
13182 {
13183 if (inst.instruction <= 0xffff)
13184 inst.instruction = THUMB_OP32 (inst.instruction);
13185 inst.instruction |= Rd << 8;
13186 inst.instruction |= Rm;
13187 inst.instruction |= inst.operands[2].imm << 4;
13188 }
13189 else
13190 {
13191 constraint (inst.operands[2].present && inst.operands[2].imm != 0,
13192 _("Thumb encoding does not support rotation"));
13193 constraint (1, BAD_HIREG);
13194 }
13195 }
13196
13197 static void
13198 do_t_swi (void)
13199 {
13200 inst.reloc.type = BFD_RELOC_ARM_SWI;
13201 }
13202
13203 static void
13204 do_t_tb (void)
13205 {
13206 unsigned Rn, Rm;
13207 int half;
13208
13209 half = (inst.instruction & 0x10) != 0;
13210 set_it_insn_type_last ();
13211 constraint (inst.operands[0].immisreg,
13212 _("instruction requires register index"));
13213
13214 Rn = inst.operands[0].reg;
13215 Rm = inst.operands[0].imm;
13216
13217 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
13218 constraint (Rn == REG_SP, BAD_SP);
13219 reject_bad_reg (Rm);
13220
13221 constraint (!half && inst.operands[0].shifted,
13222 _("instruction does not allow shifted index"));
13223 inst.instruction |= (Rn << 16) | Rm;
13224 }
13225
13226 static void
13227 do_t_udf (void)
13228 {
13229 if (!inst.operands[0].present)
13230 inst.operands[0].imm = 0;
13231
13232 if ((unsigned int) inst.operands[0].imm > 255 || inst.size_req == 4)
13233 {
13234 constraint (inst.size_req == 2,
13235 _("immediate value out of range"));
13236 inst.instruction = THUMB_OP32 (inst.instruction);
13237 inst.instruction |= (inst.operands[0].imm & 0xf000u) << 4;
13238 inst.instruction |= (inst.operands[0].imm & 0x0fffu) << 0;
13239 }
13240 else
13241 {
13242 inst.instruction = THUMB_OP16 (inst.instruction);
13243 inst.instruction |= inst.operands[0].imm;
13244 }
13245
13246 set_it_insn_type (NEUTRAL_IT_INSN);
13247 }
13248
13249
13250 static void
13251 do_t_usat (void)
13252 {
13253 do_t_ssat_usat (0);
13254 }
13255
13256 static void
13257 do_t_usat16 (void)
13258 {
13259 unsigned Rd, Rn;
13260
13261 Rd = inst.operands[0].reg;
13262 Rn = inst.operands[2].reg;
13263
13264 reject_bad_reg (Rd);
13265 reject_bad_reg (Rn);
13266
13267 inst.instruction |= Rd << 8;
13268 inst.instruction |= inst.operands[1].imm;
13269 inst.instruction |= Rn << 16;
13270 }
13271
13272 /* Neon instruction encoder helpers. */
13273
13274 /* Encodings for the different types for various Neon opcodes. */
13275
13276 /* An "invalid" code for the following tables. */
13277 #define N_INV -1u
13278
13279 struct neon_tab_entry
13280 {
13281 unsigned integer;
13282 unsigned float_or_poly;
13283 unsigned scalar_or_imm;
13284 };
13285
13286 /* Map overloaded Neon opcodes to their respective encodings. */
13287 #define NEON_ENC_TAB \
13288 X(vabd, 0x0000700, 0x1200d00, N_INV), \
13289 X(vmax, 0x0000600, 0x0000f00, N_INV), \
13290 X(vmin, 0x0000610, 0x0200f00, N_INV), \
13291 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
13292 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
13293 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
13294 X(vadd, 0x0000800, 0x0000d00, N_INV), \
13295 X(vsub, 0x1000800, 0x0200d00, N_INV), \
13296 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
13297 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
13298 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
13299 /* Register variants of the following two instructions are encoded as
13300 vcge / vcgt with the operands reversed. */ \
13301 X(vclt, 0x0000300, 0x1200e00, 0x1b10200), \
13302 X(vcle, 0x0000310, 0x1000e00, 0x1b10180), \
13303 X(vfma, N_INV, 0x0000c10, N_INV), \
13304 X(vfms, N_INV, 0x0200c10, N_INV), \
13305 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
13306 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
13307 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
13308 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
13309 X(vmlal, 0x0800800, N_INV, 0x0800240), \
13310 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
13311 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
13312 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
13313 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
13314 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
13315 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
13316 X(vqrdmlah, 0x3000b10, N_INV, 0x0800e40), \
13317 X(vqrdmlsh, 0x3000c10, N_INV, 0x0800f40), \
13318 X(vshl, 0x0000400, N_INV, 0x0800510), \
13319 X(vqshl, 0x0000410, N_INV, 0x0800710), \
13320 X(vand, 0x0000110, N_INV, 0x0800030), \
13321 X(vbic, 0x0100110, N_INV, 0x0800030), \
13322 X(veor, 0x1000110, N_INV, N_INV), \
13323 X(vorn, 0x0300110, N_INV, 0x0800010), \
13324 X(vorr, 0x0200110, N_INV, 0x0800010), \
13325 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
13326 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
13327 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
13328 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
13329 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
13330 X(vst1, 0x0000000, 0x0800000, N_INV), \
13331 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
13332 X(vst2, 0x0000100, 0x0800100, N_INV), \
13333 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
13334 X(vst3, 0x0000200, 0x0800200, N_INV), \
13335 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
13336 X(vst4, 0x0000300, 0x0800300, N_INV), \
13337 X(vmovn, 0x1b20200, N_INV, N_INV), \
13338 X(vtrn, 0x1b20080, N_INV, N_INV), \
13339 X(vqmovn, 0x1b20200, N_INV, N_INV), \
13340 X(vqmovun, 0x1b20240, N_INV, N_INV), \
13341 X(vnmul, 0xe200a40, 0xe200b40, N_INV), \
13342 X(vnmla, 0xe100a40, 0xe100b40, N_INV), \
13343 X(vnmls, 0xe100a00, 0xe100b00, N_INV), \
13344 X(vfnma, 0xe900a40, 0xe900b40, N_INV), \
13345 X(vfnms, 0xe900a00, 0xe900b00, N_INV), \
13346 X(vcmp, 0xeb40a40, 0xeb40b40, N_INV), \
13347 X(vcmpz, 0xeb50a40, 0xeb50b40, N_INV), \
13348 X(vcmpe, 0xeb40ac0, 0xeb40bc0, N_INV), \
13349 X(vcmpez, 0xeb50ac0, 0xeb50bc0, N_INV), \
13350 X(vseleq, 0xe000a00, N_INV, N_INV), \
13351 X(vselvs, 0xe100a00, N_INV, N_INV), \
13352 X(vselge, 0xe200a00, N_INV, N_INV), \
13353 X(vselgt, 0xe300a00, N_INV, N_INV), \
13354 X(vmaxnm, 0xe800a00, 0x3000f10, N_INV), \
13355 X(vminnm, 0xe800a40, 0x3200f10, N_INV), \
13356 X(vcvta, 0xebc0a40, 0x3bb0000, N_INV), \
13357 X(vrintr, 0xeb60a40, 0x3ba0400, N_INV), \
13358 X(vrinta, 0xeb80a40, 0x3ba0400, N_INV), \
13359 X(aes, 0x3b00300, N_INV, N_INV), \
13360 X(sha3op, 0x2000c00, N_INV, N_INV), \
13361 X(sha1h, 0x3b902c0, N_INV, N_INV), \
13362 X(sha2op, 0x3ba0380, N_INV, N_INV)
13363
13364 enum neon_opc
13365 {
13366 #define X(OPC,I,F,S) N_MNEM_##OPC
13367 NEON_ENC_TAB
13368 #undef X
13369 };
13370
13371 static const struct neon_tab_entry neon_enc_tab[] =
13372 {
13373 #define X(OPC,I,F,S) { (I), (F), (S) }
13374 NEON_ENC_TAB
13375 #undef X
13376 };
13377
13378 /* Do not use these macros; instead, use NEON_ENCODE defined below. */
13379 #define NEON_ENC_INTEGER_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13380 #define NEON_ENC_ARMREG_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13381 #define NEON_ENC_POLY_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13382 #define NEON_ENC_FLOAT_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13383 #define NEON_ENC_SCALAR_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13384 #define NEON_ENC_IMMED_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13385 #define NEON_ENC_INTERLV_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13386 #define NEON_ENC_LANE_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13387 #define NEON_ENC_DUP_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13388 #define NEON_ENC_SINGLE_(X) \
13389 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
13390 #define NEON_ENC_DOUBLE_(X) \
13391 ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
13392 #define NEON_ENC_FPV8_(X) \
13393 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf000000))
13394
13395 #define NEON_ENCODE(type, inst) \
13396 do \
13397 { \
13398 inst.instruction = NEON_ENC_##type##_ (inst.instruction); \
13399 inst.is_neon = 1; \
13400 } \
13401 while (0)
13402
13403 #define check_neon_suffixes \
13404 do \
13405 { \
13406 if (!inst.error && inst.vectype.elems > 0 && !inst.is_neon) \
13407 { \
13408 as_bad (_("invalid neon suffix for non neon instruction")); \
13409 return; \
13410 } \
13411 } \
13412 while (0)
13413
13414 /* Define shapes for instruction operands. The following mnemonic characters
13415 are used in this table:
13416
13417 F - VFP S<n> register
13418 D - Neon D<n> register
13419 Q - Neon Q<n> register
13420 I - Immediate
13421 S - Scalar
13422 R - ARM register
13423 L - D<n> register list
13424
13425 This table is used to generate various data:
13426 - enumerations of the form NS_DDR to be used as arguments to
13427 neon_select_shape.
13428 - a table classifying shapes into single, double, quad, mixed.
13429 - a table used to drive neon_select_shape. */
13430
13431 #define NEON_SHAPE_DEF \
13432 X(3, (D, D, D), DOUBLE), \
13433 X(3, (Q, Q, Q), QUAD), \
13434 X(3, (D, D, I), DOUBLE), \
13435 X(3, (Q, Q, I), QUAD), \
13436 X(3, (D, D, S), DOUBLE), \
13437 X(3, (Q, Q, S), QUAD), \
13438 X(2, (D, D), DOUBLE), \
13439 X(2, (Q, Q), QUAD), \
13440 X(2, (D, S), DOUBLE), \
13441 X(2, (Q, S), QUAD), \
13442 X(2, (D, R), DOUBLE), \
13443 X(2, (Q, R), QUAD), \
13444 X(2, (D, I), DOUBLE), \
13445 X(2, (Q, I), QUAD), \
13446 X(3, (D, L, D), DOUBLE), \
13447 X(2, (D, Q), MIXED), \
13448 X(2, (Q, D), MIXED), \
13449 X(3, (D, Q, I), MIXED), \
13450 X(3, (Q, D, I), MIXED), \
13451 X(3, (Q, D, D), MIXED), \
13452 X(3, (D, Q, Q), MIXED), \
13453 X(3, (Q, Q, D), MIXED), \
13454 X(3, (Q, D, S), MIXED), \
13455 X(3, (D, Q, S), MIXED), \
13456 X(4, (D, D, D, I), DOUBLE), \
13457 X(4, (Q, Q, Q, I), QUAD), \
13458 X(4, (D, D, S, I), DOUBLE), \
13459 X(4, (Q, Q, S, I), QUAD), \
13460 X(2, (F, F), SINGLE), \
13461 X(3, (F, F, F), SINGLE), \
13462 X(2, (F, I), SINGLE), \
13463 X(2, (F, D), MIXED), \
13464 X(2, (D, F), MIXED), \
13465 X(3, (F, F, I), MIXED), \
13466 X(4, (R, R, F, F), SINGLE), \
13467 X(4, (F, F, R, R), SINGLE), \
13468 X(3, (D, R, R), DOUBLE), \
13469 X(3, (R, R, D), DOUBLE), \
13470 X(2, (S, R), SINGLE), \
13471 X(2, (R, S), SINGLE), \
13472 X(2, (F, R), SINGLE), \
13473 X(2, (R, F), SINGLE), \
13474 /* Half float shape supported so far. */\
13475 X (2, (H, D), MIXED), \
13476 X (2, (D, H), MIXED), \
13477 X (2, (H, F), MIXED), \
13478 X (2, (F, H), MIXED), \
13479 X (2, (H, H), HALF), \
13480 X (2, (H, R), HALF), \
13481 X (2, (R, H), HALF), \
13482 X (2, (H, I), HALF), \
13483 X (3, (H, H, H), HALF), \
13484 X (3, (H, F, I), MIXED), \
13485 X (3, (F, H, I), MIXED), \
13486 X (3, (D, H, H), MIXED), \
13487 X (3, (D, H, S), MIXED)
13488
13489 #define S2(A,B) NS_##A##B
13490 #define S3(A,B,C) NS_##A##B##C
13491 #define S4(A,B,C,D) NS_##A##B##C##D
13492
13493 #define X(N, L, C) S##N L
13494
13495 enum neon_shape
13496 {
13497 NEON_SHAPE_DEF,
13498 NS_NULL
13499 };
13500
13501 #undef X
13502 #undef S2
13503 #undef S3
13504 #undef S4
13505
13506 enum neon_shape_class
13507 {
13508 SC_HALF,
13509 SC_SINGLE,
13510 SC_DOUBLE,
13511 SC_QUAD,
13512 SC_MIXED
13513 };
13514
13515 #define X(N, L, C) SC_##C
13516
13517 static enum neon_shape_class neon_shape_class[] =
13518 {
13519 NEON_SHAPE_DEF
13520 };
13521
13522 #undef X
13523
13524 enum neon_shape_el
13525 {
13526 SE_H,
13527 SE_F,
13528 SE_D,
13529 SE_Q,
13530 SE_I,
13531 SE_S,
13532 SE_R,
13533 SE_L
13534 };
13535
13536 /* Register widths of above. */
13537 static unsigned neon_shape_el_size[] =
13538 {
13539 16,
13540 32,
13541 64,
13542 128,
13543 0,
13544 32,
13545 32,
13546 0
13547 };
13548
13549 struct neon_shape_info
13550 {
13551 unsigned els;
13552 enum neon_shape_el el[NEON_MAX_TYPE_ELS];
13553 };
13554
13555 #define S2(A,B) { SE_##A, SE_##B }
13556 #define S3(A,B,C) { SE_##A, SE_##B, SE_##C }
13557 #define S4(A,B,C,D) { SE_##A, SE_##B, SE_##C, SE_##D }
13558
13559 #define X(N, L, C) { N, S##N L }
13560
13561 static struct neon_shape_info neon_shape_tab[] =
13562 {
13563 NEON_SHAPE_DEF
13564 };
13565
13566 #undef X
13567 #undef S2
13568 #undef S3
13569 #undef S4
13570
13571 /* Bit masks used in type checking given instructions.
13572 'N_EQK' means the type must be the same as (or based on in some way) the key
13573 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
13574 set, various other bits can be set as well in order to modify the meaning of
13575 the type constraint. */
13576
13577 enum neon_type_mask
13578 {
13579 N_S8 = 0x0000001,
13580 N_S16 = 0x0000002,
13581 N_S32 = 0x0000004,
13582 N_S64 = 0x0000008,
13583 N_U8 = 0x0000010,
13584 N_U16 = 0x0000020,
13585 N_U32 = 0x0000040,
13586 N_U64 = 0x0000080,
13587 N_I8 = 0x0000100,
13588 N_I16 = 0x0000200,
13589 N_I32 = 0x0000400,
13590 N_I64 = 0x0000800,
13591 N_8 = 0x0001000,
13592 N_16 = 0x0002000,
13593 N_32 = 0x0004000,
13594 N_64 = 0x0008000,
13595 N_P8 = 0x0010000,
13596 N_P16 = 0x0020000,
13597 N_F16 = 0x0040000,
13598 N_F32 = 0x0080000,
13599 N_F64 = 0x0100000,
13600 N_P64 = 0x0200000,
13601 N_KEY = 0x1000000, /* Key element (main type specifier). */
13602 N_EQK = 0x2000000, /* Given operand has the same type & size as the key. */
13603 N_VFP = 0x4000000, /* VFP mode: operand size must match register width. */
13604 N_UNT = 0x8000000, /* Must be explicitly untyped. */
13605 N_DBL = 0x0000001, /* If N_EQK, this operand is twice the size. */
13606 N_HLF = 0x0000002, /* If N_EQK, this operand is half the size. */
13607 N_SGN = 0x0000004, /* If N_EQK, this operand is forced to be signed. */
13608 N_UNS = 0x0000008, /* If N_EQK, this operand is forced to be unsigned. */
13609 N_INT = 0x0000010, /* If N_EQK, this operand is forced to be integer. */
13610 N_FLT = 0x0000020, /* If N_EQK, this operand is forced to be float. */
13611 N_SIZ = 0x0000040, /* If N_EQK, this operand is forced to be size-only. */
13612 N_UTYP = 0,
13613 N_MAX_NONSPECIAL = N_P64
13614 };
13615
13616 #define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
13617
13618 #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
13619 #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
13620 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
13621 #define N_S_32 (N_S8 | N_S16 | N_S32)
13622 #define N_F_16_32 (N_F16 | N_F32)
13623 #define N_SUF_32 (N_SU_32 | N_F_16_32)
13624 #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
13625 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F16 | N_F32)
13626 #define N_F_ALL (N_F16 | N_F32 | N_F64)
13627
13628 /* Pass this as the first type argument to neon_check_type to ignore types
13629 altogether. */
13630 #define N_IGNORE_TYPE (N_KEY | N_EQK)
13631
13632 /* Select a "shape" for the current instruction (describing register types or
13633 sizes) from a list of alternatives. Return NS_NULL if the current instruction
13634 doesn't fit. For non-polymorphic shapes, checking is usually done as a
13635 function of operand parsing, so this function doesn't need to be called.
13636 Shapes should be listed in order of decreasing length. */
13637
13638 static enum neon_shape
13639 neon_select_shape (enum neon_shape shape, ...)
13640 {
13641 va_list ap;
13642 enum neon_shape first_shape = shape;
13643
13644 /* Fix missing optional operands. FIXME: we don't know at this point how
13645 many arguments we should have, so this makes the assumption that we have
13646 > 1. This is true of all current Neon opcodes, I think, but may not be
13647 true in the future. */
13648 if (!inst.operands[1].present)
13649 inst.operands[1] = inst.operands[0];
13650
13651 va_start (ap, shape);
13652
13653 for (; shape != NS_NULL; shape = (enum neon_shape) va_arg (ap, int))
13654 {
13655 unsigned j;
13656 int matches = 1;
13657
13658 for (j = 0; j < neon_shape_tab[shape].els; j++)
13659 {
13660 if (!inst.operands[j].present)
13661 {
13662 matches = 0;
13663 break;
13664 }
13665
13666 switch (neon_shape_tab[shape].el[j])
13667 {
13668 /* If a .f16, .16, .u16, .s16 type specifier is given over
13669 a VFP single precision register operand, it's essentially
13670 means only half of the register is used.
13671
13672 If the type specifier is given after the mnemonics, the
13673 information is stored in inst.vectype. If the type specifier
13674 is given after register operand, the information is stored
13675 in inst.operands[].vectype.
13676
13677 When there is only one type specifier, and all the register
13678 operands are the same type of hardware register, the type
13679 specifier applies to all register operands.
13680
13681 If no type specifier is given, the shape is inferred from
13682 operand information.
13683
13684 for example:
13685 vadd.f16 s0, s1, s2: NS_HHH
13686 vabs.f16 s0, s1: NS_HH
13687 vmov.f16 s0, r1: NS_HR
13688 vmov.f16 r0, s1: NS_RH
13689 vcvt.f16 r0, s1: NS_RH
13690 vcvt.f16.s32 s2, s2, #29: NS_HFI
13691 vcvt.f16.s32 s2, s2: NS_HF
13692 */
13693 case SE_H:
13694 if (!(inst.operands[j].isreg
13695 && inst.operands[j].isvec
13696 && inst.operands[j].issingle
13697 && !inst.operands[j].isquad
13698 && ((inst.vectype.elems == 1
13699 && inst.vectype.el[0].size == 16)
13700 || (inst.vectype.elems > 1
13701 && inst.vectype.el[j].size == 16)
13702 || (inst.vectype.elems == 0
13703 && inst.operands[j].vectype.type != NT_invtype
13704 && inst.operands[j].vectype.size == 16))))
13705 matches = 0;
13706 break;
13707
13708 case SE_F:
13709 if (!(inst.operands[j].isreg
13710 && inst.operands[j].isvec
13711 && inst.operands[j].issingle
13712 && !inst.operands[j].isquad
13713 && ((inst.vectype.elems == 1 && inst.vectype.el[0].size == 32)
13714 || (inst.vectype.elems > 1 && inst.vectype.el[j].size == 32)
13715 || (inst.vectype.elems == 0
13716 && (inst.operands[j].vectype.size == 32
13717 || inst.operands[j].vectype.type == NT_invtype)))))
13718 matches = 0;
13719 break;
13720
13721 case SE_D:
13722 if (!(inst.operands[j].isreg
13723 && inst.operands[j].isvec
13724 && !inst.operands[j].isquad
13725 && !inst.operands[j].issingle))
13726 matches = 0;
13727 break;
13728
13729 case SE_R:
13730 if (!(inst.operands[j].isreg
13731 && !inst.operands[j].isvec))
13732 matches = 0;
13733 break;
13734
13735 case SE_Q:
13736 if (!(inst.operands[j].isreg
13737 && inst.operands[j].isvec
13738 && inst.operands[j].isquad
13739 && !inst.operands[j].issingle))
13740 matches = 0;
13741 break;
13742
13743 case SE_I:
13744 if (!(!inst.operands[j].isreg
13745 && !inst.operands[j].isscalar))
13746 matches = 0;
13747 break;
13748
13749 case SE_S:
13750 if (!(!inst.operands[j].isreg
13751 && inst.operands[j].isscalar))
13752 matches = 0;
13753 break;
13754
13755 case SE_L:
13756 break;
13757 }
13758 if (!matches)
13759 break;
13760 }
13761 if (matches && (j >= ARM_IT_MAX_OPERANDS || !inst.operands[j].present))
13762 /* We've matched all the entries in the shape table, and we don't
13763 have any left over operands which have not been matched. */
13764 break;
13765 }
13766
13767 va_end (ap);
13768
13769 if (shape == NS_NULL && first_shape != NS_NULL)
13770 first_error (_("invalid instruction shape"));
13771
13772 return shape;
13773 }
13774
13775 /* True if SHAPE is predominantly a quadword operation (most of the time, this
13776 means the Q bit should be set). */
13777
13778 static int
13779 neon_quad (enum neon_shape shape)
13780 {
13781 return neon_shape_class[shape] == SC_QUAD;
13782 }
13783
13784 static void
13785 neon_modify_type_size (unsigned typebits, enum neon_el_type *g_type,
13786 unsigned *g_size)
13787 {
13788 /* Allow modification to be made to types which are constrained to be
13789 based on the key element, based on bits set alongside N_EQK. */
13790 if ((typebits & N_EQK) != 0)
13791 {
13792 if ((typebits & N_HLF) != 0)
13793 *g_size /= 2;
13794 else if ((typebits & N_DBL) != 0)
13795 *g_size *= 2;
13796 if ((typebits & N_SGN) != 0)
13797 *g_type = NT_signed;
13798 else if ((typebits & N_UNS) != 0)
13799 *g_type = NT_unsigned;
13800 else if ((typebits & N_INT) != 0)
13801 *g_type = NT_integer;
13802 else if ((typebits & N_FLT) != 0)
13803 *g_type = NT_float;
13804 else if ((typebits & N_SIZ) != 0)
13805 *g_type = NT_untyped;
13806 }
13807 }
13808
13809 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
13810 operand type, i.e. the single type specified in a Neon instruction when it
13811 is the only one given. */
13812
13813 static struct neon_type_el
13814 neon_type_promote (struct neon_type_el *key, unsigned thisarg)
13815 {
13816 struct neon_type_el dest = *key;
13817
13818 gas_assert ((thisarg & N_EQK) != 0);
13819
13820 neon_modify_type_size (thisarg, &dest.type, &dest.size);
13821
13822 return dest;
13823 }
13824
13825 /* Convert Neon type and size into compact bitmask representation. */
13826
13827 static enum neon_type_mask
13828 type_chk_of_el_type (enum neon_el_type type, unsigned size)
13829 {
13830 switch (type)
13831 {
13832 case NT_untyped:
13833 switch (size)
13834 {
13835 case 8: return N_8;
13836 case 16: return N_16;
13837 case 32: return N_32;
13838 case 64: return N_64;
13839 default: ;
13840 }
13841 break;
13842
13843 case NT_integer:
13844 switch (size)
13845 {
13846 case 8: return N_I8;
13847 case 16: return N_I16;
13848 case 32: return N_I32;
13849 case 64: return N_I64;
13850 default: ;
13851 }
13852 break;
13853
13854 case NT_float:
13855 switch (size)
13856 {
13857 case 16: return N_F16;
13858 case 32: return N_F32;
13859 case 64: return N_F64;
13860 default: ;
13861 }
13862 break;
13863
13864 case NT_poly:
13865 switch (size)
13866 {
13867 case 8: return N_P8;
13868 case 16: return N_P16;
13869 case 64: return N_P64;
13870 default: ;
13871 }
13872 break;
13873
13874 case NT_signed:
13875 switch (size)
13876 {
13877 case 8: return N_S8;
13878 case 16: return N_S16;
13879 case 32: return N_S32;
13880 case 64: return N_S64;
13881 default: ;
13882 }
13883 break;
13884
13885 case NT_unsigned:
13886 switch (size)
13887 {
13888 case 8: return N_U8;
13889 case 16: return N_U16;
13890 case 32: return N_U32;
13891 case 64: return N_U64;
13892 default: ;
13893 }
13894 break;
13895
13896 default: ;
13897 }
13898
13899 return N_UTYP;
13900 }
13901
13902 /* Convert compact Neon bitmask type representation to a type and size. Only
13903 handles the case where a single bit is set in the mask. */
13904
13905 static int
13906 el_type_of_type_chk (enum neon_el_type *type, unsigned *size,
13907 enum neon_type_mask mask)
13908 {
13909 if ((mask & N_EQK) != 0)
13910 return FAIL;
13911
13912 if ((mask & (N_S8 | N_U8 | N_I8 | N_8 | N_P8)) != 0)
13913 *size = 8;
13914 else if ((mask & (N_S16 | N_U16 | N_I16 | N_16 | N_F16 | N_P16)) != 0)
13915 *size = 16;
13916 else if ((mask & (N_S32 | N_U32 | N_I32 | N_32 | N_F32)) != 0)
13917 *size = 32;
13918 else if ((mask & (N_S64 | N_U64 | N_I64 | N_64 | N_F64 | N_P64)) != 0)
13919 *size = 64;
13920 else
13921 return FAIL;
13922
13923 if ((mask & (N_S8 | N_S16 | N_S32 | N_S64)) != 0)
13924 *type = NT_signed;
13925 else if ((mask & (N_U8 | N_U16 | N_U32 | N_U64)) != 0)
13926 *type = NT_unsigned;
13927 else if ((mask & (N_I8 | N_I16 | N_I32 | N_I64)) != 0)
13928 *type = NT_integer;
13929 else if ((mask & (N_8 | N_16 | N_32 | N_64)) != 0)
13930 *type = NT_untyped;
13931 else if ((mask & (N_P8 | N_P16 | N_P64)) != 0)
13932 *type = NT_poly;
13933 else if ((mask & (N_F_ALL)) != 0)
13934 *type = NT_float;
13935 else
13936 return FAIL;
13937
13938 return SUCCESS;
13939 }
13940
13941 /* Modify a bitmask of allowed types. This is only needed for type
13942 relaxation. */
13943
13944 static unsigned
13945 modify_types_allowed (unsigned allowed, unsigned mods)
13946 {
13947 unsigned size;
13948 enum neon_el_type type;
13949 unsigned destmask;
13950 int i;
13951
13952 destmask = 0;
13953
13954 for (i = 1; i <= N_MAX_NONSPECIAL; i <<= 1)
13955 {
13956 if (el_type_of_type_chk (&type, &size,
13957 (enum neon_type_mask) (allowed & i)) == SUCCESS)
13958 {
13959 neon_modify_type_size (mods, &type, &size);
13960 destmask |= type_chk_of_el_type (type, size);
13961 }
13962 }
13963
13964 return destmask;
13965 }
13966
13967 /* Check type and return type classification.
13968 The manual states (paraphrase): If one datatype is given, it indicates the
13969 type given in:
13970 - the second operand, if there is one
13971 - the operand, if there is no second operand
13972 - the result, if there are no operands.
13973 This isn't quite good enough though, so we use a concept of a "key" datatype
13974 which is set on a per-instruction basis, which is the one which matters when
13975 only one data type is written.
13976 Note: this function has side-effects (e.g. filling in missing operands). All
13977 Neon instructions should call it before performing bit encoding. */
13978
13979 static struct neon_type_el
13980 neon_check_type (unsigned els, enum neon_shape ns, ...)
13981 {
13982 va_list ap;
13983 unsigned i, pass, key_el = 0;
13984 unsigned types[NEON_MAX_TYPE_ELS];
13985 enum neon_el_type k_type = NT_invtype;
13986 unsigned k_size = -1u;
13987 struct neon_type_el badtype = {NT_invtype, -1};
13988 unsigned key_allowed = 0;
13989
13990 /* Optional registers in Neon instructions are always (not) in operand 1.
13991 Fill in the missing operand here, if it was omitted. */
13992 if (els > 1 && !inst.operands[1].present)
13993 inst.operands[1] = inst.operands[0];
13994
13995 /* Suck up all the varargs. */
13996 va_start (ap, ns);
13997 for (i = 0; i < els; i++)
13998 {
13999 unsigned thisarg = va_arg (ap, unsigned);
14000 if (thisarg == N_IGNORE_TYPE)
14001 {
14002 va_end (ap);
14003 return badtype;
14004 }
14005 types[i] = thisarg;
14006 if ((thisarg & N_KEY) != 0)
14007 key_el = i;
14008 }
14009 va_end (ap);
14010
14011 if (inst.vectype.elems > 0)
14012 for (i = 0; i < els; i++)
14013 if (inst.operands[i].vectype.type != NT_invtype)
14014 {
14015 first_error (_("types specified in both the mnemonic and operands"));
14016 return badtype;
14017 }
14018
14019 /* Duplicate inst.vectype elements here as necessary.
14020 FIXME: No idea if this is exactly the same as the ARM assembler,
14021 particularly when an insn takes one register and one non-register
14022 operand. */
14023 if (inst.vectype.elems == 1 && els > 1)
14024 {
14025 unsigned j;
14026 inst.vectype.elems = els;
14027 inst.vectype.el[key_el] = inst.vectype.el[0];
14028 for (j = 0; j < els; j++)
14029 if (j != key_el)
14030 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
14031 types[j]);
14032 }
14033 else if (inst.vectype.elems == 0 && els > 0)
14034 {
14035 unsigned j;
14036 /* No types were given after the mnemonic, so look for types specified
14037 after each operand. We allow some flexibility here; as long as the
14038 "key" operand has a type, we can infer the others. */
14039 for (j = 0; j < els; j++)
14040 if (inst.operands[j].vectype.type != NT_invtype)
14041 inst.vectype.el[j] = inst.operands[j].vectype;
14042
14043 if (inst.operands[key_el].vectype.type != NT_invtype)
14044 {
14045 for (j = 0; j < els; j++)
14046 if (inst.operands[j].vectype.type == NT_invtype)
14047 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
14048 types[j]);
14049 }
14050 else
14051 {
14052 first_error (_("operand types can't be inferred"));
14053 return badtype;
14054 }
14055 }
14056 else if (inst.vectype.elems != els)
14057 {
14058 first_error (_("type specifier has the wrong number of parts"));
14059 return badtype;
14060 }
14061
14062 for (pass = 0; pass < 2; pass++)
14063 {
14064 for (i = 0; i < els; i++)
14065 {
14066 unsigned thisarg = types[i];
14067 unsigned types_allowed = ((thisarg & N_EQK) != 0 && pass != 0)
14068 ? modify_types_allowed (key_allowed, thisarg) : thisarg;
14069 enum neon_el_type g_type = inst.vectype.el[i].type;
14070 unsigned g_size = inst.vectype.el[i].size;
14071
14072 /* Decay more-specific signed & unsigned types to sign-insensitive
14073 integer types if sign-specific variants are unavailable. */
14074 if ((g_type == NT_signed || g_type == NT_unsigned)
14075 && (types_allowed & N_SU_ALL) == 0)
14076 g_type = NT_integer;
14077
14078 /* If only untyped args are allowed, decay any more specific types to
14079 them. Some instructions only care about signs for some element
14080 sizes, so handle that properly. */
14081 if (((types_allowed & N_UNT) == 0)
14082 && ((g_size == 8 && (types_allowed & N_8) != 0)
14083 || (g_size == 16 && (types_allowed & N_16) != 0)
14084 || (g_size == 32 && (types_allowed & N_32) != 0)
14085 || (g_size == 64 && (types_allowed & N_64) != 0)))
14086 g_type = NT_untyped;
14087
14088 if (pass == 0)
14089 {
14090 if ((thisarg & N_KEY) != 0)
14091 {
14092 k_type = g_type;
14093 k_size = g_size;
14094 key_allowed = thisarg & ~N_KEY;
14095
14096 /* Check architecture constraint on FP16 extension. */
14097 if (k_size == 16
14098 && k_type == NT_float
14099 && ! ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16))
14100 {
14101 inst.error = _(BAD_FP16);
14102 return badtype;
14103 }
14104 }
14105 }
14106 else
14107 {
14108 if ((thisarg & N_VFP) != 0)
14109 {
14110 enum neon_shape_el regshape;
14111 unsigned regwidth, match;
14112
14113 /* PR 11136: Catch the case where we are passed a shape of NS_NULL. */
14114 if (ns == NS_NULL)
14115 {
14116 first_error (_("invalid instruction shape"));
14117 return badtype;
14118 }
14119 regshape = neon_shape_tab[ns].el[i];
14120 regwidth = neon_shape_el_size[regshape];
14121
14122 /* In VFP mode, operands must match register widths. If we
14123 have a key operand, use its width, else use the width of
14124 the current operand. */
14125 if (k_size != -1u)
14126 match = k_size;
14127 else
14128 match = g_size;
14129
14130 /* FP16 will use a single precision register. */
14131 if (regwidth == 32 && match == 16)
14132 {
14133 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16))
14134 match = regwidth;
14135 else
14136 {
14137 inst.error = _(BAD_FP16);
14138 return badtype;
14139 }
14140 }
14141
14142 if (regwidth != match)
14143 {
14144 first_error (_("operand size must match register width"));
14145 return badtype;
14146 }
14147 }
14148
14149 if ((thisarg & N_EQK) == 0)
14150 {
14151 unsigned given_type = type_chk_of_el_type (g_type, g_size);
14152
14153 if ((given_type & types_allowed) == 0)
14154 {
14155 first_error (_("bad type in Neon instruction"));
14156 return badtype;
14157 }
14158 }
14159 else
14160 {
14161 enum neon_el_type mod_k_type = k_type;
14162 unsigned mod_k_size = k_size;
14163 neon_modify_type_size (thisarg, &mod_k_type, &mod_k_size);
14164 if (g_type != mod_k_type || g_size != mod_k_size)
14165 {
14166 first_error (_("inconsistent types in Neon instruction"));
14167 return badtype;
14168 }
14169 }
14170 }
14171 }
14172 }
14173
14174 return inst.vectype.el[key_el];
14175 }
14176
14177 /* Neon-style VFP instruction forwarding. */
14178
14179 /* Thumb VFP instructions have 0xE in the condition field. */
14180
14181 static void
14182 do_vfp_cond_or_thumb (void)
14183 {
14184 inst.is_neon = 1;
14185
14186 if (thumb_mode)
14187 inst.instruction |= 0xe0000000;
14188 else
14189 inst.instruction |= inst.cond << 28;
14190 }
14191
14192 /* Look up and encode a simple mnemonic, for use as a helper function for the
14193 Neon-style VFP syntax. This avoids duplication of bits of the insns table,
14194 etc. It is assumed that operand parsing has already been done, and that the
14195 operands are in the form expected by the given opcode (this isn't necessarily
14196 the same as the form in which they were parsed, hence some massaging must
14197 take place before this function is called).
14198 Checks current arch version against that in the looked-up opcode. */
14199
14200 static void
14201 do_vfp_nsyn_opcode (const char *opname)
14202 {
14203 const struct asm_opcode *opcode;
14204
14205 opcode = (const struct asm_opcode *) hash_find (arm_ops_hsh, opname);
14206
14207 if (!opcode)
14208 abort ();
14209
14210 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant,
14211 thumb_mode ? *opcode->tvariant : *opcode->avariant),
14212 _(BAD_FPU));
14213
14214 inst.is_neon = 1;
14215
14216 if (thumb_mode)
14217 {
14218 inst.instruction = opcode->tvalue;
14219 opcode->tencode ();
14220 }
14221 else
14222 {
14223 inst.instruction = (inst.cond << 28) | opcode->avalue;
14224 opcode->aencode ();
14225 }
14226 }
14227
14228 static void
14229 do_vfp_nsyn_add_sub (enum neon_shape rs)
14230 {
14231 int is_add = (inst.instruction & 0x0fffffff) == N_MNEM_vadd;
14232
14233 if (rs == NS_FFF || rs == NS_HHH)
14234 {
14235 if (is_add)
14236 do_vfp_nsyn_opcode ("fadds");
14237 else
14238 do_vfp_nsyn_opcode ("fsubs");
14239
14240 /* ARMv8.2 fp16 instruction. */
14241 if (rs == NS_HHH)
14242 do_scalar_fp16_v82_encode ();
14243 }
14244 else
14245 {
14246 if (is_add)
14247 do_vfp_nsyn_opcode ("faddd");
14248 else
14249 do_vfp_nsyn_opcode ("fsubd");
14250 }
14251 }
14252
14253 /* Check operand types to see if this is a VFP instruction, and if so call
14254 PFN (). */
14255
14256 static int
14257 try_vfp_nsyn (int args, void (*pfn) (enum neon_shape))
14258 {
14259 enum neon_shape rs;
14260 struct neon_type_el et;
14261
14262 switch (args)
14263 {
14264 case 2:
14265 rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_NULL);
14266 et = neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY | N_VFP);
14267 break;
14268
14269 case 3:
14270 rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_NULL);
14271 et = neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
14272 N_F_ALL | N_KEY | N_VFP);
14273 break;
14274
14275 default:
14276 abort ();
14277 }
14278
14279 if (et.type != NT_invtype)
14280 {
14281 pfn (rs);
14282 return SUCCESS;
14283 }
14284
14285 inst.error = NULL;
14286 return FAIL;
14287 }
14288
14289 static void
14290 do_vfp_nsyn_mla_mls (enum neon_shape rs)
14291 {
14292 int is_mla = (inst.instruction & 0x0fffffff) == N_MNEM_vmla;
14293
14294 if (rs == NS_FFF || rs == NS_HHH)
14295 {
14296 if (is_mla)
14297 do_vfp_nsyn_opcode ("fmacs");
14298 else
14299 do_vfp_nsyn_opcode ("fnmacs");
14300
14301 /* ARMv8.2 fp16 instruction. */
14302 if (rs == NS_HHH)
14303 do_scalar_fp16_v82_encode ();
14304 }
14305 else
14306 {
14307 if (is_mla)
14308 do_vfp_nsyn_opcode ("fmacd");
14309 else
14310 do_vfp_nsyn_opcode ("fnmacd");
14311 }
14312 }
14313
14314 static void
14315 do_vfp_nsyn_fma_fms (enum neon_shape rs)
14316 {
14317 int is_fma = (inst.instruction & 0x0fffffff) == N_MNEM_vfma;
14318
14319 if (rs == NS_FFF || rs == NS_HHH)
14320 {
14321 if (is_fma)
14322 do_vfp_nsyn_opcode ("ffmas");
14323 else
14324 do_vfp_nsyn_opcode ("ffnmas");
14325
14326 /* ARMv8.2 fp16 instruction. */
14327 if (rs == NS_HHH)
14328 do_scalar_fp16_v82_encode ();
14329 }
14330 else
14331 {
14332 if (is_fma)
14333 do_vfp_nsyn_opcode ("ffmad");
14334 else
14335 do_vfp_nsyn_opcode ("ffnmad");
14336 }
14337 }
14338
14339 static void
14340 do_vfp_nsyn_mul (enum neon_shape rs)
14341 {
14342 if (rs == NS_FFF || rs == NS_HHH)
14343 {
14344 do_vfp_nsyn_opcode ("fmuls");
14345
14346 /* ARMv8.2 fp16 instruction. */
14347 if (rs == NS_HHH)
14348 do_scalar_fp16_v82_encode ();
14349 }
14350 else
14351 do_vfp_nsyn_opcode ("fmuld");
14352 }
14353
14354 static void
14355 do_vfp_nsyn_abs_neg (enum neon_shape rs)
14356 {
14357 int is_neg = (inst.instruction & 0x80) != 0;
14358 neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_VFP | N_KEY);
14359
14360 if (rs == NS_FF || rs == NS_HH)
14361 {
14362 if (is_neg)
14363 do_vfp_nsyn_opcode ("fnegs");
14364 else
14365 do_vfp_nsyn_opcode ("fabss");
14366
14367 /* ARMv8.2 fp16 instruction. */
14368 if (rs == NS_HH)
14369 do_scalar_fp16_v82_encode ();
14370 }
14371 else
14372 {
14373 if (is_neg)
14374 do_vfp_nsyn_opcode ("fnegd");
14375 else
14376 do_vfp_nsyn_opcode ("fabsd");
14377 }
14378 }
14379
14380 /* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
14381 insns belong to Neon, and are handled elsewhere. */
14382
14383 static void
14384 do_vfp_nsyn_ldm_stm (int is_dbmode)
14385 {
14386 int is_ldm = (inst.instruction & (1 << 20)) != 0;
14387 if (is_ldm)
14388 {
14389 if (is_dbmode)
14390 do_vfp_nsyn_opcode ("fldmdbs");
14391 else
14392 do_vfp_nsyn_opcode ("fldmias");
14393 }
14394 else
14395 {
14396 if (is_dbmode)
14397 do_vfp_nsyn_opcode ("fstmdbs");
14398 else
14399 do_vfp_nsyn_opcode ("fstmias");
14400 }
14401 }
14402
14403 static void
14404 do_vfp_nsyn_sqrt (void)
14405 {
14406 enum neon_shape rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_NULL);
14407 neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY | N_VFP);
14408
14409 if (rs == NS_FF || rs == NS_HH)
14410 {
14411 do_vfp_nsyn_opcode ("fsqrts");
14412
14413 /* ARMv8.2 fp16 instruction. */
14414 if (rs == NS_HH)
14415 do_scalar_fp16_v82_encode ();
14416 }
14417 else
14418 do_vfp_nsyn_opcode ("fsqrtd");
14419 }
14420
14421 static void
14422 do_vfp_nsyn_div (void)
14423 {
14424 enum neon_shape rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_NULL);
14425 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
14426 N_F_ALL | N_KEY | N_VFP);
14427
14428 if (rs == NS_FFF || rs == NS_HHH)
14429 {
14430 do_vfp_nsyn_opcode ("fdivs");
14431
14432 /* ARMv8.2 fp16 instruction. */
14433 if (rs == NS_HHH)
14434 do_scalar_fp16_v82_encode ();
14435 }
14436 else
14437 do_vfp_nsyn_opcode ("fdivd");
14438 }
14439
14440 static void
14441 do_vfp_nsyn_nmul (void)
14442 {
14443 enum neon_shape rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_NULL);
14444 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
14445 N_F_ALL | N_KEY | N_VFP);
14446
14447 if (rs == NS_FFF || rs == NS_HHH)
14448 {
14449 NEON_ENCODE (SINGLE, inst);
14450 do_vfp_sp_dyadic ();
14451
14452 /* ARMv8.2 fp16 instruction. */
14453 if (rs == NS_HHH)
14454 do_scalar_fp16_v82_encode ();
14455 }
14456 else
14457 {
14458 NEON_ENCODE (DOUBLE, inst);
14459 do_vfp_dp_rd_rn_rm ();
14460 }
14461 do_vfp_cond_or_thumb ();
14462
14463 }
14464
14465 static void
14466 do_vfp_nsyn_cmp (void)
14467 {
14468 enum neon_shape rs;
14469 if (inst.operands[1].isreg)
14470 {
14471 rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_NULL);
14472 neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY | N_VFP);
14473
14474 if (rs == NS_FF || rs == NS_HH)
14475 {
14476 NEON_ENCODE (SINGLE, inst);
14477 do_vfp_sp_monadic ();
14478 }
14479 else
14480 {
14481 NEON_ENCODE (DOUBLE, inst);
14482 do_vfp_dp_rd_rm ();
14483 }
14484 }
14485 else
14486 {
14487 rs = neon_select_shape (NS_HI, NS_FI, NS_DI, NS_NULL);
14488 neon_check_type (2, rs, N_F_ALL | N_KEY | N_VFP, N_EQK);
14489
14490 switch (inst.instruction & 0x0fffffff)
14491 {
14492 case N_MNEM_vcmp:
14493 inst.instruction += N_MNEM_vcmpz - N_MNEM_vcmp;
14494 break;
14495 case N_MNEM_vcmpe:
14496 inst.instruction += N_MNEM_vcmpez - N_MNEM_vcmpe;
14497 break;
14498 default:
14499 abort ();
14500 }
14501
14502 if (rs == NS_FI || rs == NS_HI)
14503 {
14504 NEON_ENCODE (SINGLE, inst);
14505 do_vfp_sp_compare_z ();
14506 }
14507 else
14508 {
14509 NEON_ENCODE (DOUBLE, inst);
14510 do_vfp_dp_rd ();
14511 }
14512 }
14513 do_vfp_cond_or_thumb ();
14514
14515 /* ARMv8.2 fp16 instruction. */
14516 if (rs == NS_HI || rs == NS_HH)
14517 do_scalar_fp16_v82_encode ();
14518 }
14519
14520 static void
14521 nsyn_insert_sp (void)
14522 {
14523 inst.operands[1] = inst.operands[0];
14524 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
14525 inst.operands[0].reg = REG_SP;
14526 inst.operands[0].isreg = 1;
14527 inst.operands[0].writeback = 1;
14528 inst.operands[0].present = 1;
14529 }
14530
14531 static void
14532 do_vfp_nsyn_push (void)
14533 {
14534 nsyn_insert_sp ();
14535
14536 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
14537 _("register list must contain at least 1 and at most 16 "
14538 "registers"));
14539
14540 if (inst.operands[1].issingle)
14541 do_vfp_nsyn_opcode ("fstmdbs");
14542 else
14543 do_vfp_nsyn_opcode ("fstmdbd");
14544 }
14545
14546 static void
14547 do_vfp_nsyn_pop (void)
14548 {
14549 nsyn_insert_sp ();
14550
14551 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
14552 _("register list must contain at least 1 and at most 16 "
14553 "registers"));
14554
14555 if (inst.operands[1].issingle)
14556 do_vfp_nsyn_opcode ("fldmias");
14557 else
14558 do_vfp_nsyn_opcode ("fldmiad");
14559 }
14560
14561 /* Fix up Neon data-processing instructions, ORing in the correct bits for
14562 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
14563
14564 static void
14565 neon_dp_fixup (struct arm_it* insn)
14566 {
14567 unsigned int i = insn->instruction;
14568 insn->is_neon = 1;
14569
14570 if (thumb_mode)
14571 {
14572 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
14573 if (i & (1 << 24))
14574 i |= 1 << 28;
14575
14576 i &= ~(1 << 24);
14577
14578 i |= 0xef000000;
14579 }
14580 else
14581 i |= 0xf2000000;
14582
14583 insn->instruction = i;
14584 }
14585
14586 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
14587 (0, 1, 2, 3). */
14588
14589 static unsigned
14590 neon_logbits (unsigned x)
14591 {
14592 return ffs (x) - 4;
14593 }
14594
14595 #define LOW4(R) ((R) & 0xf)
14596 #define HI1(R) (((R) >> 4) & 1)
14597
14598 /* Encode insns with bit pattern:
14599
14600 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
14601 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
14602
14603 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
14604 different meaning for some instruction. */
14605
14606 static void
14607 neon_three_same (int isquad, int ubit, int size)
14608 {
14609 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14610 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14611 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
14612 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
14613 inst.instruction |= LOW4 (inst.operands[2].reg);
14614 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
14615 inst.instruction |= (isquad != 0) << 6;
14616 inst.instruction |= (ubit != 0) << 24;
14617 if (size != -1)
14618 inst.instruction |= neon_logbits (size) << 20;
14619
14620 neon_dp_fixup (&inst);
14621 }
14622
14623 /* Encode instructions of the form:
14624
14625 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
14626 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
14627
14628 Don't write size if SIZE == -1. */
14629
14630 static void
14631 neon_two_same (int qbit, int ubit, int size)
14632 {
14633 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14634 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14635 inst.instruction |= LOW4 (inst.operands[1].reg);
14636 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14637 inst.instruction |= (qbit != 0) << 6;
14638 inst.instruction |= (ubit != 0) << 24;
14639
14640 if (size != -1)
14641 inst.instruction |= neon_logbits (size) << 18;
14642
14643 neon_dp_fixup (&inst);
14644 }
14645
14646 /* Neon instruction encoders, in approximate order of appearance. */
14647
14648 static void
14649 do_neon_dyadic_i_su (void)
14650 {
14651 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14652 struct neon_type_el et = neon_check_type (3, rs,
14653 N_EQK, N_EQK, N_SU_32 | N_KEY);
14654 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14655 }
14656
14657 static void
14658 do_neon_dyadic_i64_su (void)
14659 {
14660 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14661 struct neon_type_el et = neon_check_type (3, rs,
14662 N_EQK, N_EQK, N_SU_ALL | N_KEY);
14663 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14664 }
14665
14666 static void
14667 neon_imm_shift (int write_ubit, int uval, int isquad, struct neon_type_el et,
14668 unsigned immbits)
14669 {
14670 unsigned size = et.size >> 3;
14671 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14672 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14673 inst.instruction |= LOW4 (inst.operands[1].reg);
14674 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14675 inst.instruction |= (isquad != 0) << 6;
14676 inst.instruction |= immbits << 16;
14677 inst.instruction |= (size >> 3) << 7;
14678 inst.instruction |= (size & 0x7) << 19;
14679 if (write_ubit)
14680 inst.instruction |= (uval != 0) << 24;
14681
14682 neon_dp_fixup (&inst);
14683 }
14684
14685 static void
14686 do_neon_shl_imm (void)
14687 {
14688 if (!inst.operands[2].isreg)
14689 {
14690 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14691 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_KEY | N_I_ALL);
14692 int imm = inst.operands[2].imm;
14693
14694 constraint (imm < 0 || (unsigned)imm >= et.size,
14695 _("immediate out of range for shift"));
14696 NEON_ENCODE (IMMED, inst);
14697 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
14698 }
14699 else
14700 {
14701 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14702 struct neon_type_el et = neon_check_type (3, rs,
14703 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
14704 unsigned int tmp;
14705
14706 /* VSHL/VQSHL 3-register variants have syntax such as:
14707 vshl.xx Dd, Dm, Dn
14708 whereas other 3-register operations encoded by neon_three_same have
14709 syntax like:
14710 vadd.xx Dd, Dn, Dm
14711 (i.e. with Dn & Dm reversed). Swap operands[1].reg and operands[2].reg
14712 here. */
14713 tmp = inst.operands[2].reg;
14714 inst.operands[2].reg = inst.operands[1].reg;
14715 inst.operands[1].reg = tmp;
14716 NEON_ENCODE (INTEGER, inst);
14717 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14718 }
14719 }
14720
14721 static void
14722 do_neon_qshl_imm (void)
14723 {
14724 if (!inst.operands[2].isreg)
14725 {
14726 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14727 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
14728 int imm = inst.operands[2].imm;
14729
14730 constraint (imm < 0 || (unsigned)imm >= et.size,
14731 _("immediate out of range for shift"));
14732 NEON_ENCODE (IMMED, inst);
14733 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et, imm);
14734 }
14735 else
14736 {
14737 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14738 struct neon_type_el et = neon_check_type (3, rs,
14739 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
14740 unsigned int tmp;
14741
14742 /* See note in do_neon_shl_imm. */
14743 tmp = inst.operands[2].reg;
14744 inst.operands[2].reg = inst.operands[1].reg;
14745 inst.operands[1].reg = tmp;
14746 NEON_ENCODE (INTEGER, inst);
14747 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14748 }
14749 }
14750
14751 static void
14752 do_neon_rshl (void)
14753 {
14754 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14755 struct neon_type_el et = neon_check_type (3, rs,
14756 N_EQK, N_EQK, N_SU_ALL | N_KEY);
14757 unsigned int tmp;
14758
14759 tmp = inst.operands[2].reg;
14760 inst.operands[2].reg = inst.operands[1].reg;
14761 inst.operands[1].reg = tmp;
14762 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14763 }
14764
14765 static int
14766 neon_cmode_for_logic_imm (unsigned immediate, unsigned *immbits, int size)
14767 {
14768 /* Handle .I8 pseudo-instructions. */
14769 if (size == 8)
14770 {
14771 /* Unfortunately, this will make everything apart from zero out-of-range.
14772 FIXME is this the intended semantics? There doesn't seem much point in
14773 accepting .I8 if so. */
14774 immediate |= immediate << 8;
14775 size = 16;
14776 }
14777
14778 if (size >= 32)
14779 {
14780 if (immediate == (immediate & 0x000000ff))
14781 {
14782 *immbits = immediate;
14783 return 0x1;
14784 }
14785 else if (immediate == (immediate & 0x0000ff00))
14786 {
14787 *immbits = immediate >> 8;
14788 return 0x3;
14789 }
14790 else if (immediate == (immediate & 0x00ff0000))
14791 {
14792 *immbits = immediate >> 16;
14793 return 0x5;
14794 }
14795 else if (immediate == (immediate & 0xff000000))
14796 {
14797 *immbits = immediate >> 24;
14798 return 0x7;
14799 }
14800 if ((immediate & 0xffff) != (immediate >> 16))
14801 goto bad_immediate;
14802 immediate &= 0xffff;
14803 }
14804
14805 if (immediate == (immediate & 0x000000ff))
14806 {
14807 *immbits = immediate;
14808 return 0x9;
14809 }
14810 else if (immediate == (immediate & 0x0000ff00))
14811 {
14812 *immbits = immediate >> 8;
14813 return 0xb;
14814 }
14815
14816 bad_immediate:
14817 first_error (_("immediate value out of range"));
14818 return FAIL;
14819 }
14820
14821 static void
14822 do_neon_logic (void)
14823 {
14824 if (inst.operands[2].present && inst.operands[2].isreg)
14825 {
14826 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14827 neon_check_type (3, rs, N_IGNORE_TYPE);
14828 /* U bit and size field were set as part of the bitmask. */
14829 NEON_ENCODE (INTEGER, inst);
14830 neon_three_same (neon_quad (rs), 0, -1);
14831 }
14832 else
14833 {
14834 const int three_ops_form = (inst.operands[2].present
14835 && !inst.operands[2].isreg);
14836 const int immoperand = (three_ops_form ? 2 : 1);
14837 enum neon_shape rs = (three_ops_form
14838 ? neon_select_shape (NS_DDI, NS_QQI, NS_NULL)
14839 : neon_select_shape (NS_DI, NS_QI, NS_NULL));
14840 struct neon_type_el et = neon_check_type (2, rs,
14841 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
14842 enum neon_opc opcode = (enum neon_opc) inst.instruction & 0x0fffffff;
14843 unsigned immbits;
14844 int cmode;
14845
14846 if (et.type == NT_invtype)
14847 return;
14848
14849 if (three_ops_form)
14850 constraint (inst.operands[0].reg != inst.operands[1].reg,
14851 _("first and second operands shall be the same register"));
14852
14853 NEON_ENCODE (IMMED, inst);
14854
14855 immbits = inst.operands[immoperand].imm;
14856 if (et.size == 64)
14857 {
14858 /* .i64 is a pseudo-op, so the immediate must be a repeating
14859 pattern. */
14860 if (immbits != (inst.operands[immoperand].regisimm ?
14861 inst.operands[immoperand].reg : 0))
14862 {
14863 /* Set immbits to an invalid constant. */
14864 immbits = 0xdeadbeef;
14865 }
14866 }
14867
14868 switch (opcode)
14869 {
14870 case N_MNEM_vbic:
14871 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14872 break;
14873
14874 case N_MNEM_vorr:
14875 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14876 break;
14877
14878 case N_MNEM_vand:
14879 /* Pseudo-instruction for VBIC. */
14880 neon_invert_size (&immbits, 0, et.size);
14881 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14882 break;
14883
14884 case N_MNEM_vorn:
14885 /* Pseudo-instruction for VORR. */
14886 neon_invert_size (&immbits, 0, et.size);
14887 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14888 break;
14889
14890 default:
14891 abort ();
14892 }
14893
14894 if (cmode == FAIL)
14895 return;
14896
14897 inst.instruction |= neon_quad (rs) << 6;
14898 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14899 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14900 inst.instruction |= cmode << 8;
14901 neon_write_immbits (immbits);
14902
14903 neon_dp_fixup (&inst);
14904 }
14905 }
14906
14907 static void
14908 do_neon_bitfield (void)
14909 {
14910 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14911 neon_check_type (3, rs, N_IGNORE_TYPE);
14912 neon_three_same (neon_quad (rs), 0, -1);
14913 }
14914
14915 static void
14916 neon_dyadic_misc (enum neon_el_type ubit_meaning, unsigned types,
14917 unsigned destbits)
14918 {
14919 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14920 struct neon_type_el et = neon_check_type (3, rs, N_EQK | destbits, N_EQK,
14921 types | N_KEY);
14922 if (et.type == NT_float)
14923 {
14924 NEON_ENCODE (FLOAT, inst);
14925 neon_three_same (neon_quad (rs), 0, et.size == 16 ? (int) et.size : -1);
14926 }
14927 else
14928 {
14929 NEON_ENCODE (INTEGER, inst);
14930 neon_three_same (neon_quad (rs), et.type == ubit_meaning, et.size);
14931 }
14932 }
14933
14934 static void
14935 do_neon_dyadic_if_su (void)
14936 {
14937 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
14938 }
14939
14940 static void
14941 do_neon_dyadic_if_su_d (void)
14942 {
14943 /* This version only allow D registers, but that constraint is enforced during
14944 operand parsing so we don't need to do anything extra here. */
14945 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
14946 }
14947
14948 static void
14949 do_neon_dyadic_if_i_d (void)
14950 {
14951 /* The "untyped" case can't happen. Do this to stop the "U" bit being
14952 affected if we specify unsigned args. */
14953 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
14954 }
14955
14956 enum vfp_or_neon_is_neon_bits
14957 {
14958 NEON_CHECK_CC = 1,
14959 NEON_CHECK_ARCH = 2,
14960 NEON_CHECK_ARCH8 = 4
14961 };
14962
14963 /* Call this function if an instruction which may have belonged to the VFP or
14964 Neon instruction sets, but turned out to be a Neon instruction (due to the
14965 operand types involved, etc.). We have to check and/or fix-up a couple of
14966 things:
14967
14968 - Make sure the user hasn't attempted to make a Neon instruction
14969 conditional.
14970 - Alter the value in the condition code field if necessary.
14971 - Make sure that the arch supports Neon instructions.
14972
14973 Which of these operations take place depends on bits from enum
14974 vfp_or_neon_is_neon_bits.
14975
14976 WARNING: This function has side effects! If NEON_CHECK_CC is used and the
14977 current instruction's condition is COND_ALWAYS, the condition field is
14978 changed to inst.uncond_value. This is necessary because instructions shared
14979 between VFP and Neon may be conditional for the VFP variants only, and the
14980 unconditional Neon version must have, e.g., 0xF in the condition field. */
14981
14982 static int
14983 vfp_or_neon_is_neon (unsigned check)
14984 {
14985 /* Conditions are always legal in Thumb mode (IT blocks). */
14986 if (!thumb_mode && (check & NEON_CHECK_CC))
14987 {
14988 if (inst.cond != COND_ALWAYS)
14989 {
14990 first_error (_(BAD_COND));
14991 return FAIL;
14992 }
14993 if (inst.uncond_value != -1)
14994 inst.instruction |= inst.uncond_value << 28;
14995 }
14996
14997 if ((check & NEON_CHECK_ARCH)
14998 && !mark_feature_used (&fpu_neon_ext_v1))
14999 {
15000 first_error (_(BAD_FPU));
15001 return FAIL;
15002 }
15003
15004 if ((check & NEON_CHECK_ARCH8)
15005 && !mark_feature_used (&fpu_neon_ext_armv8))
15006 {
15007 first_error (_(BAD_FPU));
15008 return FAIL;
15009 }
15010
15011 return SUCCESS;
15012 }
15013
15014 static void
15015 do_neon_addsub_if_i (void)
15016 {
15017 if (try_vfp_nsyn (3, do_vfp_nsyn_add_sub) == SUCCESS)
15018 return;
15019
15020 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15021 return;
15022
15023 /* The "untyped" case can't happen. Do this to stop the "U" bit being
15024 affected if we specify unsigned args. */
15025 neon_dyadic_misc (NT_untyped, N_IF_32 | N_I64, 0);
15026 }
15027
15028 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
15029 result to be:
15030 V<op> A,B (A is operand 0, B is operand 2)
15031 to mean:
15032 V<op> A,B,A
15033 not:
15034 V<op> A,B,B
15035 so handle that case specially. */
15036
15037 static void
15038 neon_exchange_operands (void)
15039 {
15040 if (inst.operands[1].present)
15041 {
15042 void *scratch = xmalloc (sizeof (inst.operands[0]));
15043
15044 /* Swap operands[1] and operands[2]. */
15045 memcpy (scratch, &inst.operands[1], sizeof (inst.operands[0]));
15046 inst.operands[1] = inst.operands[2];
15047 memcpy (&inst.operands[2], scratch, sizeof (inst.operands[0]));
15048 free (scratch);
15049 }
15050 else
15051 {
15052 inst.operands[1] = inst.operands[2];
15053 inst.operands[2] = inst.operands[0];
15054 }
15055 }
15056
15057 static void
15058 neon_compare (unsigned regtypes, unsigned immtypes, int invert)
15059 {
15060 if (inst.operands[2].isreg)
15061 {
15062 if (invert)
15063 neon_exchange_operands ();
15064 neon_dyadic_misc (NT_unsigned, regtypes, N_SIZ);
15065 }
15066 else
15067 {
15068 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
15069 struct neon_type_el et = neon_check_type (2, rs,
15070 N_EQK | N_SIZ, immtypes | N_KEY);
15071
15072 NEON_ENCODE (IMMED, inst);
15073 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15074 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15075 inst.instruction |= LOW4 (inst.operands[1].reg);
15076 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15077 inst.instruction |= neon_quad (rs) << 6;
15078 inst.instruction |= (et.type == NT_float) << 10;
15079 inst.instruction |= neon_logbits (et.size) << 18;
15080
15081 neon_dp_fixup (&inst);
15082 }
15083 }
15084
15085 static void
15086 do_neon_cmp (void)
15087 {
15088 neon_compare (N_SUF_32, N_S_32 | N_F_16_32, FALSE);
15089 }
15090
15091 static void
15092 do_neon_cmp_inv (void)
15093 {
15094 neon_compare (N_SUF_32, N_S_32 | N_F_16_32, TRUE);
15095 }
15096
15097 static void
15098 do_neon_ceq (void)
15099 {
15100 neon_compare (N_IF_32, N_IF_32, FALSE);
15101 }
15102
15103 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
15104 scalars, which are encoded in 5 bits, M : Rm.
15105 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
15106 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
15107 index in M.
15108
15109 Dot Product instructions are similar to multiply instructions except elsize
15110 should always be 32.
15111
15112 This function translates SCALAR, which is GAS's internal encoding of indexed
15113 scalar register, to raw encoding. There is also register and index range
15114 check based on ELSIZE. */
15115
15116 static unsigned
15117 neon_scalar_for_mul (unsigned scalar, unsigned elsize)
15118 {
15119 unsigned regno = NEON_SCALAR_REG (scalar);
15120 unsigned elno = NEON_SCALAR_INDEX (scalar);
15121
15122 switch (elsize)
15123 {
15124 case 16:
15125 if (regno > 7 || elno > 3)
15126 goto bad_scalar;
15127 return regno | (elno << 3);
15128
15129 case 32:
15130 if (regno > 15 || elno > 1)
15131 goto bad_scalar;
15132 return regno | (elno << 4);
15133
15134 default:
15135 bad_scalar:
15136 first_error (_("scalar out of range for multiply instruction"));
15137 }
15138
15139 return 0;
15140 }
15141
15142 /* Encode multiply / multiply-accumulate scalar instructions. */
15143
15144 static void
15145 neon_mul_mac (struct neon_type_el et, int ubit)
15146 {
15147 unsigned scalar;
15148
15149 /* Give a more helpful error message if we have an invalid type. */
15150 if (et.type == NT_invtype)
15151 return;
15152
15153 scalar = neon_scalar_for_mul (inst.operands[2].reg, et.size);
15154 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15155 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15156 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
15157 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
15158 inst.instruction |= LOW4 (scalar);
15159 inst.instruction |= HI1 (scalar) << 5;
15160 inst.instruction |= (et.type == NT_float) << 8;
15161 inst.instruction |= neon_logbits (et.size) << 20;
15162 inst.instruction |= (ubit != 0) << 24;
15163
15164 neon_dp_fixup (&inst);
15165 }
15166
15167 static void
15168 do_neon_mac_maybe_scalar (void)
15169 {
15170 if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls) == SUCCESS)
15171 return;
15172
15173 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15174 return;
15175
15176 if (inst.operands[2].isscalar)
15177 {
15178 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
15179 struct neon_type_el et = neon_check_type (3, rs,
15180 N_EQK, N_EQK, N_I16 | N_I32 | N_F_16_32 | N_KEY);
15181 NEON_ENCODE (SCALAR, inst);
15182 neon_mul_mac (et, neon_quad (rs));
15183 }
15184 else
15185 {
15186 /* The "untyped" case can't happen. Do this to stop the "U" bit being
15187 affected if we specify unsigned args. */
15188 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
15189 }
15190 }
15191
15192 static void
15193 do_neon_fmac (void)
15194 {
15195 if (try_vfp_nsyn (3, do_vfp_nsyn_fma_fms) == SUCCESS)
15196 return;
15197
15198 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15199 return;
15200
15201 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
15202 }
15203
15204 static void
15205 do_neon_tst (void)
15206 {
15207 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15208 struct neon_type_el et = neon_check_type (3, rs,
15209 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_KEY);
15210 neon_three_same (neon_quad (rs), 0, et.size);
15211 }
15212
15213 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
15214 same types as the MAC equivalents. The polynomial type for this instruction
15215 is encoded the same as the integer type. */
15216
15217 static void
15218 do_neon_mul (void)
15219 {
15220 if (try_vfp_nsyn (3, do_vfp_nsyn_mul) == SUCCESS)
15221 return;
15222
15223 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15224 return;
15225
15226 if (inst.operands[2].isscalar)
15227 do_neon_mac_maybe_scalar ();
15228 else
15229 neon_dyadic_misc (NT_poly, N_I8 | N_I16 | N_I32 | N_F16 | N_F32 | N_P8, 0);
15230 }
15231
15232 static void
15233 do_neon_qdmulh (void)
15234 {
15235 if (inst.operands[2].isscalar)
15236 {
15237 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
15238 struct neon_type_el et = neon_check_type (3, rs,
15239 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
15240 NEON_ENCODE (SCALAR, inst);
15241 neon_mul_mac (et, neon_quad (rs));
15242 }
15243 else
15244 {
15245 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15246 struct neon_type_el et = neon_check_type (3, rs,
15247 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
15248 NEON_ENCODE (INTEGER, inst);
15249 /* The U bit (rounding) comes from bit mask. */
15250 neon_three_same (neon_quad (rs), 0, et.size);
15251 }
15252 }
15253
15254 static void
15255 do_neon_qrdmlah (void)
15256 {
15257 /* Check we're on the correct architecture. */
15258 if (!mark_feature_used (&fpu_neon_ext_armv8))
15259 inst.error =
15260 _("instruction form not available on this architecture.");
15261 else if (!mark_feature_used (&fpu_neon_ext_v8_1))
15262 {
15263 as_warn (_("this instruction implies use of ARMv8.1 AdvSIMD."));
15264 record_feature_use (&fpu_neon_ext_v8_1);
15265 }
15266
15267 if (inst.operands[2].isscalar)
15268 {
15269 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
15270 struct neon_type_el et = neon_check_type (3, rs,
15271 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
15272 NEON_ENCODE (SCALAR, inst);
15273 neon_mul_mac (et, neon_quad (rs));
15274 }
15275 else
15276 {
15277 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15278 struct neon_type_el et = neon_check_type (3, rs,
15279 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
15280 NEON_ENCODE (INTEGER, inst);
15281 /* The U bit (rounding) comes from bit mask. */
15282 neon_three_same (neon_quad (rs), 0, et.size);
15283 }
15284 }
15285
15286 static void
15287 do_neon_fcmp_absolute (void)
15288 {
15289 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15290 struct neon_type_el et = neon_check_type (3, rs, N_EQK, N_EQK,
15291 N_F_16_32 | N_KEY);
15292 /* Size field comes from bit mask. */
15293 neon_three_same (neon_quad (rs), 1, et.size == 16 ? (int) et.size : -1);
15294 }
15295
15296 static void
15297 do_neon_fcmp_absolute_inv (void)
15298 {
15299 neon_exchange_operands ();
15300 do_neon_fcmp_absolute ();
15301 }
15302
15303 static void
15304 do_neon_step (void)
15305 {
15306 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15307 struct neon_type_el et = neon_check_type (3, rs, N_EQK, N_EQK,
15308 N_F_16_32 | N_KEY);
15309 neon_three_same (neon_quad (rs), 0, et.size == 16 ? (int) et.size : -1);
15310 }
15311
15312 static void
15313 do_neon_abs_neg (void)
15314 {
15315 enum neon_shape rs;
15316 struct neon_type_el et;
15317
15318 if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg) == SUCCESS)
15319 return;
15320
15321 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15322 return;
15323
15324 rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15325 et = neon_check_type (2, rs, N_EQK, N_S_32 | N_F_16_32 | N_KEY);
15326
15327 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15328 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15329 inst.instruction |= LOW4 (inst.operands[1].reg);
15330 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15331 inst.instruction |= neon_quad (rs) << 6;
15332 inst.instruction |= (et.type == NT_float) << 10;
15333 inst.instruction |= neon_logbits (et.size) << 18;
15334
15335 neon_dp_fixup (&inst);
15336 }
15337
15338 static void
15339 do_neon_sli (void)
15340 {
15341 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
15342 struct neon_type_el et = neon_check_type (2, rs,
15343 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
15344 int imm = inst.operands[2].imm;
15345 constraint (imm < 0 || (unsigned)imm >= et.size,
15346 _("immediate out of range for insert"));
15347 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
15348 }
15349
15350 static void
15351 do_neon_sri (void)
15352 {
15353 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
15354 struct neon_type_el et = neon_check_type (2, rs,
15355 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
15356 int imm = inst.operands[2].imm;
15357 constraint (imm < 1 || (unsigned)imm > et.size,
15358 _("immediate out of range for insert"));
15359 neon_imm_shift (FALSE, 0, neon_quad (rs), et, et.size - imm);
15360 }
15361
15362 static void
15363 do_neon_qshlu_imm (void)
15364 {
15365 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
15366 struct neon_type_el et = neon_check_type (2, rs,
15367 N_EQK | N_UNS, N_S8 | N_S16 | N_S32 | N_S64 | N_KEY);
15368 int imm = inst.operands[2].imm;
15369 constraint (imm < 0 || (unsigned)imm >= et.size,
15370 _("immediate out of range for shift"));
15371 /* Only encodes the 'U present' variant of the instruction.
15372 In this case, signed types have OP (bit 8) set to 0.
15373 Unsigned types have OP set to 1. */
15374 inst.instruction |= (et.type == NT_unsigned) << 8;
15375 /* The rest of the bits are the same as other immediate shifts. */
15376 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
15377 }
15378
15379 static void
15380 do_neon_qmovn (void)
15381 {
15382 struct neon_type_el et = neon_check_type (2, NS_DQ,
15383 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
15384 /* Saturating move where operands can be signed or unsigned, and the
15385 destination has the same signedness. */
15386 NEON_ENCODE (INTEGER, inst);
15387 if (et.type == NT_unsigned)
15388 inst.instruction |= 0xc0;
15389 else
15390 inst.instruction |= 0x80;
15391 neon_two_same (0, 1, et.size / 2);
15392 }
15393
15394 static void
15395 do_neon_qmovun (void)
15396 {
15397 struct neon_type_el et = neon_check_type (2, NS_DQ,
15398 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
15399 /* Saturating move with unsigned results. Operands must be signed. */
15400 NEON_ENCODE (INTEGER, inst);
15401 neon_two_same (0, 1, et.size / 2);
15402 }
15403
15404 static void
15405 do_neon_rshift_sat_narrow (void)
15406 {
15407 /* FIXME: Types for narrowing. If operands are signed, results can be signed
15408 or unsigned. If operands are unsigned, results must also be unsigned. */
15409 struct neon_type_el et = neon_check_type (2, NS_DQI,
15410 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
15411 int imm = inst.operands[2].imm;
15412 /* This gets the bounds check, size encoding and immediate bits calculation
15413 right. */
15414 et.size /= 2;
15415
15416 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
15417 VQMOVN.I<size> <Dd>, <Qm>. */
15418 if (imm == 0)
15419 {
15420 inst.operands[2].present = 0;
15421 inst.instruction = N_MNEM_vqmovn;
15422 do_neon_qmovn ();
15423 return;
15424 }
15425
15426 constraint (imm < 1 || (unsigned)imm > et.size,
15427 _("immediate out of range"));
15428 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, et.size - imm);
15429 }
15430
15431 static void
15432 do_neon_rshift_sat_narrow_u (void)
15433 {
15434 /* FIXME: Types for narrowing. If operands are signed, results can be signed
15435 or unsigned. If operands are unsigned, results must also be unsigned. */
15436 struct neon_type_el et = neon_check_type (2, NS_DQI,
15437 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
15438 int imm = inst.operands[2].imm;
15439 /* This gets the bounds check, size encoding and immediate bits calculation
15440 right. */
15441 et.size /= 2;
15442
15443 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
15444 VQMOVUN.I<size> <Dd>, <Qm>. */
15445 if (imm == 0)
15446 {
15447 inst.operands[2].present = 0;
15448 inst.instruction = N_MNEM_vqmovun;
15449 do_neon_qmovun ();
15450 return;
15451 }
15452
15453 constraint (imm < 1 || (unsigned)imm > et.size,
15454 _("immediate out of range"));
15455 /* FIXME: The manual is kind of unclear about what value U should have in
15456 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
15457 must be 1. */
15458 neon_imm_shift (TRUE, 1, 0, et, et.size - imm);
15459 }
15460
15461 static void
15462 do_neon_movn (void)
15463 {
15464 struct neon_type_el et = neon_check_type (2, NS_DQ,
15465 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
15466 NEON_ENCODE (INTEGER, inst);
15467 neon_two_same (0, 1, et.size / 2);
15468 }
15469
15470 static void
15471 do_neon_rshift_narrow (void)
15472 {
15473 struct neon_type_el et = neon_check_type (2, NS_DQI,
15474 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
15475 int imm = inst.operands[2].imm;
15476 /* This gets the bounds check, size encoding and immediate bits calculation
15477 right. */
15478 et.size /= 2;
15479
15480 /* If immediate is zero then we are a pseudo-instruction for
15481 VMOVN.I<size> <Dd>, <Qm> */
15482 if (imm == 0)
15483 {
15484 inst.operands[2].present = 0;
15485 inst.instruction = N_MNEM_vmovn;
15486 do_neon_movn ();
15487 return;
15488 }
15489
15490 constraint (imm < 1 || (unsigned)imm > et.size,
15491 _("immediate out of range for narrowing operation"));
15492 neon_imm_shift (FALSE, 0, 0, et, et.size - imm);
15493 }
15494
15495 static void
15496 do_neon_shll (void)
15497 {
15498 /* FIXME: Type checking when lengthening. */
15499 struct neon_type_el et = neon_check_type (2, NS_QDI,
15500 N_EQK | N_DBL, N_I8 | N_I16 | N_I32 | N_KEY);
15501 unsigned imm = inst.operands[2].imm;
15502
15503 if (imm == et.size)
15504 {
15505 /* Maximum shift variant. */
15506 NEON_ENCODE (INTEGER, inst);
15507 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15508 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15509 inst.instruction |= LOW4 (inst.operands[1].reg);
15510 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15511 inst.instruction |= neon_logbits (et.size) << 18;
15512
15513 neon_dp_fixup (&inst);
15514 }
15515 else
15516 {
15517 /* A more-specific type check for non-max versions. */
15518 et = neon_check_type (2, NS_QDI,
15519 N_EQK | N_DBL, N_SU_32 | N_KEY);
15520 NEON_ENCODE (IMMED, inst);
15521 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, imm);
15522 }
15523 }
15524
15525 /* Check the various types for the VCVT instruction, and return which version
15526 the current instruction is. */
15527
15528 #define CVT_FLAVOUR_VAR \
15529 CVT_VAR (s32_f32, N_S32, N_F32, whole_reg, "ftosls", "ftosis", "ftosizs") \
15530 CVT_VAR (u32_f32, N_U32, N_F32, whole_reg, "ftouls", "ftouis", "ftouizs") \
15531 CVT_VAR (f32_s32, N_F32, N_S32, whole_reg, "fsltos", "fsitos", NULL) \
15532 CVT_VAR (f32_u32, N_F32, N_U32, whole_reg, "fultos", "fuitos", NULL) \
15533 /* Half-precision conversions. */ \
15534 CVT_VAR (s16_f16, N_S16, N_F16 | N_KEY, whole_reg, NULL, NULL, NULL) \
15535 CVT_VAR (u16_f16, N_U16, N_F16 | N_KEY, whole_reg, NULL, NULL, NULL) \
15536 CVT_VAR (f16_s16, N_F16 | N_KEY, N_S16, whole_reg, NULL, NULL, NULL) \
15537 CVT_VAR (f16_u16, N_F16 | N_KEY, N_U16, whole_reg, NULL, NULL, NULL) \
15538 CVT_VAR (f32_f16, N_F32, N_F16, whole_reg, NULL, NULL, NULL) \
15539 CVT_VAR (f16_f32, N_F16, N_F32, whole_reg, NULL, NULL, NULL) \
15540 /* New VCVT instructions introduced by ARMv8.2 fp16 extension. \
15541 Compared with single/double precision variants, only the co-processor \
15542 field is different, so the encoding flow is reused here. */ \
15543 CVT_VAR (f16_s32, N_F16 | N_KEY, N_S32, N_VFP, "fsltos", "fsitos", NULL) \
15544 CVT_VAR (f16_u32, N_F16 | N_KEY, N_U32, N_VFP, "fultos", "fuitos", NULL) \
15545 CVT_VAR (u32_f16, N_U32, N_F16 | N_KEY, N_VFP, "ftouls", "ftouis", "ftouizs")\
15546 CVT_VAR (s32_f16, N_S32, N_F16 | N_KEY, N_VFP, "ftosls", "ftosis", "ftosizs")\
15547 /* VFP instructions. */ \
15548 CVT_VAR (f32_f64, N_F32, N_F64, N_VFP, NULL, "fcvtsd", NULL) \
15549 CVT_VAR (f64_f32, N_F64, N_F32, N_VFP, NULL, "fcvtds", NULL) \
15550 CVT_VAR (s32_f64, N_S32, N_F64 | key, N_VFP, "ftosld", "ftosid", "ftosizd") \
15551 CVT_VAR (u32_f64, N_U32, N_F64 | key, N_VFP, "ftould", "ftouid", "ftouizd") \
15552 CVT_VAR (f64_s32, N_F64 | key, N_S32, N_VFP, "fsltod", "fsitod", NULL) \
15553 CVT_VAR (f64_u32, N_F64 | key, N_U32, N_VFP, "fultod", "fuitod", NULL) \
15554 /* VFP instructions with bitshift. */ \
15555 CVT_VAR (f32_s16, N_F32 | key, N_S16, N_VFP, "fshtos", NULL, NULL) \
15556 CVT_VAR (f32_u16, N_F32 | key, N_U16, N_VFP, "fuhtos", NULL, NULL) \
15557 CVT_VAR (f64_s16, N_F64 | key, N_S16, N_VFP, "fshtod", NULL, NULL) \
15558 CVT_VAR (f64_u16, N_F64 | key, N_U16, N_VFP, "fuhtod", NULL, NULL) \
15559 CVT_VAR (s16_f32, N_S16, N_F32 | key, N_VFP, "ftoshs", NULL, NULL) \
15560 CVT_VAR (u16_f32, N_U16, N_F32 | key, N_VFP, "ftouhs", NULL, NULL) \
15561 CVT_VAR (s16_f64, N_S16, N_F64 | key, N_VFP, "ftoshd", NULL, NULL) \
15562 CVT_VAR (u16_f64, N_U16, N_F64 | key, N_VFP, "ftouhd", NULL, NULL)
15563
15564 #define CVT_VAR(C, X, Y, R, BSN, CN, ZN) \
15565 neon_cvt_flavour_##C,
15566
15567 /* The different types of conversions we can do. */
15568 enum neon_cvt_flavour
15569 {
15570 CVT_FLAVOUR_VAR
15571 neon_cvt_flavour_invalid,
15572 neon_cvt_flavour_first_fp = neon_cvt_flavour_f32_f64
15573 };
15574
15575 #undef CVT_VAR
15576
15577 static enum neon_cvt_flavour
15578 get_neon_cvt_flavour (enum neon_shape rs)
15579 {
15580 #define CVT_VAR(C,X,Y,R,BSN,CN,ZN) \
15581 et = neon_check_type (2, rs, (R) | (X), (R) | (Y)); \
15582 if (et.type != NT_invtype) \
15583 { \
15584 inst.error = NULL; \
15585 return (neon_cvt_flavour_##C); \
15586 }
15587
15588 struct neon_type_el et;
15589 unsigned whole_reg = (rs == NS_FFI || rs == NS_FD || rs == NS_DF
15590 || rs == NS_FF) ? N_VFP : 0;
15591 /* The instruction versions which take an immediate take one register
15592 argument, which is extended to the width of the full register. Thus the
15593 "source" and "destination" registers must have the same width. Hack that
15594 here by making the size equal to the key (wider, in this case) operand. */
15595 unsigned key = (rs == NS_QQI || rs == NS_DDI || rs == NS_FFI) ? N_KEY : 0;
15596
15597 CVT_FLAVOUR_VAR;
15598
15599 return neon_cvt_flavour_invalid;
15600 #undef CVT_VAR
15601 }
15602
15603 enum neon_cvt_mode
15604 {
15605 neon_cvt_mode_a,
15606 neon_cvt_mode_n,
15607 neon_cvt_mode_p,
15608 neon_cvt_mode_m,
15609 neon_cvt_mode_z,
15610 neon_cvt_mode_x,
15611 neon_cvt_mode_r
15612 };
15613
15614 /* Neon-syntax VFP conversions. */
15615
15616 static void
15617 do_vfp_nsyn_cvt (enum neon_shape rs, enum neon_cvt_flavour flavour)
15618 {
15619 const char *opname = 0;
15620
15621 if (rs == NS_DDI || rs == NS_QQI || rs == NS_FFI
15622 || rs == NS_FHI || rs == NS_HFI)
15623 {
15624 /* Conversions with immediate bitshift. */
15625 const char *enc[] =
15626 {
15627 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) BSN,
15628 CVT_FLAVOUR_VAR
15629 NULL
15630 #undef CVT_VAR
15631 };
15632
15633 if (flavour < (int) ARRAY_SIZE (enc))
15634 {
15635 opname = enc[flavour];
15636 constraint (inst.operands[0].reg != inst.operands[1].reg,
15637 _("operands 0 and 1 must be the same register"));
15638 inst.operands[1] = inst.operands[2];
15639 memset (&inst.operands[2], '\0', sizeof (inst.operands[2]));
15640 }
15641 }
15642 else
15643 {
15644 /* Conversions without bitshift. */
15645 const char *enc[] =
15646 {
15647 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) CN,
15648 CVT_FLAVOUR_VAR
15649 NULL
15650 #undef CVT_VAR
15651 };
15652
15653 if (flavour < (int) ARRAY_SIZE (enc))
15654 opname = enc[flavour];
15655 }
15656
15657 if (opname)
15658 do_vfp_nsyn_opcode (opname);
15659
15660 /* ARMv8.2 fp16 VCVT instruction. */
15661 if (flavour == neon_cvt_flavour_s32_f16
15662 || flavour == neon_cvt_flavour_u32_f16
15663 || flavour == neon_cvt_flavour_f16_u32
15664 || flavour == neon_cvt_flavour_f16_s32)
15665 do_scalar_fp16_v82_encode ();
15666 }
15667
15668 static void
15669 do_vfp_nsyn_cvtz (void)
15670 {
15671 enum neon_shape rs = neon_select_shape (NS_FH, NS_FF, NS_FD, NS_NULL);
15672 enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
15673 const char *enc[] =
15674 {
15675 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) ZN,
15676 CVT_FLAVOUR_VAR
15677 NULL
15678 #undef CVT_VAR
15679 };
15680
15681 if (flavour < (int) ARRAY_SIZE (enc) && enc[flavour])
15682 do_vfp_nsyn_opcode (enc[flavour]);
15683 }
15684
15685 static void
15686 do_vfp_nsyn_cvt_fpv8 (enum neon_cvt_flavour flavour,
15687 enum neon_cvt_mode mode)
15688 {
15689 int sz, op;
15690 int rm;
15691
15692 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
15693 D register operands. */
15694 if (flavour == neon_cvt_flavour_s32_f64
15695 || flavour == neon_cvt_flavour_u32_f64)
15696 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
15697 _(BAD_FPU));
15698
15699 if (flavour == neon_cvt_flavour_s32_f16
15700 || flavour == neon_cvt_flavour_u32_f16)
15701 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16),
15702 _(BAD_FP16));
15703
15704 set_it_insn_type (OUTSIDE_IT_INSN);
15705
15706 switch (flavour)
15707 {
15708 case neon_cvt_flavour_s32_f64:
15709 sz = 1;
15710 op = 1;
15711 break;
15712 case neon_cvt_flavour_s32_f32:
15713 sz = 0;
15714 op = 1;
15715 break;
15716 case neon_cvt_flavour_s32_f16:
15717 sz = 0;
15718 op = 1;
15719 break;
15720 case neon_cvt_flavour_u32_f64:
15721 sz = 1;
15722 op = 0;
15723 break;
15724 case neon_cvt_flavour_u32_f32:
15725 sz = 0;
15726 op = 0;
15727 break;
15728 case neon_cvt_flavour_u32_f16:
15729 sz = 0;
15730 op = 0;
15731 break;
15732 default:
15733 first_error (_("invalid instruction shape"));
15734 return;
15735 }
15736
15737 switch (mode)
15738 {
15739 case neon_cvt_mode_a: rm = 0; break;
15740 case neon_cvt_mode_n: rm = 1; break;
15741 case neon_cvt_mode_p: rm = 2; break;
15742 case neon_cvt_mode_m: rm = 3; break;
15743 default: first_error (_("invalid rounding mode")); return;
15744 }
15745
15746 NEON_ENCODE (FPV8, inst);
15747 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
15748 encode_arm_vfp_reg (inst.operands[1].reg, sz == 1 ? VFP_REG_Dm : VFP_REG_Sm);
15749 inst.instruction |= sz << 8;
15750
15751 /* ARMv8.2 fp16 VCVT instruction. */
15752 if (flavour == neon_cvt_flavour_s32_f16
15753 ||flavour == neon_cvt_flavour_u32_f16)
15754 do_scalar_fp16_v82_encode ();
15755 inst.instruction |= op << 7;
15756 inst.instruction |= rm << 16;
15757 inst.instruction |= 0xf0000000;
15758 inst.is_neon = TRUE;
15759 }
15760
15761 static void
15762 do_neon_cvt_1 (enum neon_cvt_mode mode)
15763 {
15764 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_FFI, NS_DD, NS_QQ,
15765 NS_FD, NS_DF, NS_FF, NS_QD, NS_DQ,
15766 NS_FH, NS_HF, NS_FHI, NS_HFI,
15767 NS_NULL);
15768 enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
15769
15770 if (flavour == neon_cvt_flavour_invalid)
15771 return;
15772
15773 /* PR11109: Handle round-to-zero for VCVT conversions. */
15774 if (mode == neon_cvt_mode_z
15775 && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_vfp_v2)
15776 && (flavour == neon_cvt_flavour_s16_f16
15777 || flavour == neon_cvt_flavour_u16_f16
15778 || flavour == neon_cvt_flavour_s32_f32
15779 || flavour == neon_cvt_flavour_u32_f32
15780 || flavour == neon_cvt_flavour_s32_f64
15781 || flavour == neon_cvt_flavour_u32_f64)
15782 && (rs == NS_FD || rs == NS_FF))
15783 {
15784 do_vfp_nsyn_cvtz ();
15785 return;
15786 }
15787
15788 /* ARMv8.2 fp16 VCVT conversions. */
15789 if (mode == neon_cvt_mode_z
15790 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16)
15791 && (flavour == neon_cvt_flavour_s32_f16
15792 || flavour == neon_cvt_flavour_u32_f16)
15793 && (rs == NS_FH))
15794 {
15795 do_vfp_nsyn_cvtz ();
15796 do_scalar_fp16_v82_encode ();
15797 return;
15798 }
15799
15800 /* VFP rather than Neon conversions. */
15801 if (flavour >= neon_cvt_flavour_first_fp)
15802 {
15803 if (mode == neon_cvt_mode_x || mode == neon_cvt_mode_z)
15804 do_vfp_nsyn_cvt (rs, flavour);
15805 else
15806 do_vfp_nsyn_cvt_fpv8 (flavour, mode);
15807
15808 return;
15809 }
15810
15811 switch (rs)
15812 {
15813 case NS_DDI:
15814 case NS_QQI:
15815 {
15816 unsigned immbits;
15817 unsigned enctab[] = {0x0000100, 0x1000100, 0x0, 0x1000000,
15818 0x0000100, 0x1000100, 0x0, 0x1000000};
15819
15820 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15821 return;
15822
15823 /* Fixed-point conversion with #0 immediate is encoded as an
15824 integer conversion. */
15825 if (inst.operands[2].present && inst.operands[2].imm == 0)
15826 goto int_encode;
15827 NEON_ENCODE (IMMED, inst);
15828 if (flavour != neon_cvt_flavour_invalid)
15829 inst.instruction |= enctab[flavour];
15830 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15831 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15832 inst.instruction |= LOW4 (inst.operands[1].reg);
15833 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15834 inst.instruction |= neon_quad (rs) << 6;
15835 inst.instruction |= 1 << 21;
15836 if (flavour < neon_cvt_flavour_s16_f16)
15837 {
15838 inst.instruction |= 1 << 21;
15839 immbits = 32 - inst.operands[2].imm;
15840 inst.instruction |= immbits << 16;
15841 }
15842 else
15843 {
15844 inst.instruction |= 3 << 20;
15845 immbits = 16 - inst.operands[2].imm;
15846 inst.instruction |= immbits << 16;
15847 inst.instruction &= ~(1 << 9);
15848 }
15849
15850 neon_dp_fixup (&inst);
15851 }
15852 break;
15853
15854 case NS_DD:
15855 case NS_QQ:
15856 if (mode != neon_cvt_mode_x && mode != neon_cvt_mode_z)
15857 {
15858 NEON_ENCODE (FLOAT, inst);
15859 set_it_insn_type (OUTSIDE_IT_INSN);
15860
15861 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
15862 return;
15863
15864 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15865 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15866 inst.instruction |= LOW4 (inst.operands[1].reg);
15867 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15868 inst.instruction |= neon_quad (rs) << 6;
15869 inst.instruction |= (flavour == neon_cvt_flavour_u16_f16
15870 || flavour == neon_cvt_flavour_u32_f32) << 7;
15871 inst.instruction |= mode << 8;
15872 if (flavour == neon_cvt_flavour_u16_f16
15873 || flavour == neon_cvt_flavour_s16_f16)
15874 /* Mask off the original size bits and reencode them. */
15875 inst.instruction = ((inst.instruction & 0xfff3ffff) | (1 << 18));
15876
15877 if (thumb_mode)
15878 inst.instruction |= 0xfc000000;
15879 else
15880 inst.instruction |= 0xf0000000;
15881 }
15882 else
15883 {
15884 int_encode:
15885 {
15886 unsigned enctab[] = { 0x100, 0x180, 0x0, 0x080,
15887 0x100, 0x180, 0x0, 0x080};
15888
15889 NEON_ENCODE (INTEGER, inst);
15890
15891 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15892 return;
15893
15894 if (flavour != neon_cvt_flavour_invalid)
15895 inst.instruction |= enctab[flavour];
15896
15897 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15898 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15899 inst.instruction |= LOW4 (inst.operands[1].reg);
15900 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15901 inst.instruction |= neon_quad (rs) << 6;
15902 if (flavour >= neon_cvt_flavour_s16_f16
15903 && flavour <= neon_cvt_flavour_f16_u16)
15904 /* Half precision. */
15905 inst.instruction |= 1 << 18;
15906 else
15907 inst.instruction |= 2 << 18;
15908
15909 neon_dp_fixup (&inst);
15910 }
15911 }
15912 break;
15913
15914 /* Half-precision conversions for Advanced SIMD -- neon. */
15915 case NS_QD:
15916 case NS_DQ:
15917
15918 if ((rs == NS_DQ)
15919 && (inst.vectype.el[0].size != 16 || inst.vectype.el[1].size != 32))
15920 {
15921 as_bad (_("operand size must match register width"));
15922 break;
15923 }
15924
15925 if ((rs == NS_QD)
15926 && ((inst.vectype.el[0].size != 32 || inst.vectype.el[1].size != 16)))
15927 {
15928 as_bad (_("operand size must match register width"));
15929 break;
15930 }
15931
15932 if (rs == NS_DQ)
15933 inst.instruction = 0x3b60600;
15934 else
15935 inst.instruction = 0x3b60700;
15936
15937 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15938 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15939 inst.instruction |= LOW4 (inst.operands[1].reg);
15940 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15941 neon_dp_fixup (&inst);
15942 break;
15943
15944 default:
15945 /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32). */
15946 if (mode == neon_cvt_mode_x || mode == neon_cvt_mode_z)
15947 do_vfp_nsyn_cvt (rs, flavour);
15948 else
15949 do_vfp_nsyn_cvt_fpv8 (flavour, mode);
15950 }
15951 }
15952
15953 static void
15954 do_neon_cvtr (void)
15955 {
15956 do_neon_cvt_1 (neon_cvt_mode_x);
15957 }
15958
15959 static void
15960 do_neon_cvt (void)
15961 {
15962 do_neon_cvt_1 (neon_cvt_mode_z);
15963 }
15964
15965 static void
15966 do_neon_cvta (void)
15967 {
15968 do_neon_cvt_1 (neon_cvt_mode_a);
15969 }
15970
15971 static void
15972 do_neon_cvtn (void)
15973 {
15974 do_neon_cvt_1 (neon_cvt_mode_n);
15975 }
15976
15977 static void
15978 do_neon_cvtp (void)
15979 {
15980 do_neon_cvt_1 (neon_cvt_mode_p);
15981 }
15982
15983 static void
15984 do_neon_cvtm (void)
15985 {
15986 do_neon_cvt_1 (neon_cvt_mode_m);
15987 }
15988
15989 static void
15990 do_neon_cvttb_2 (bfd_boolean t, bfd_boolean to, bfd_boolean is_double)
15991 {
15992 if (is_double)
15993 mark_feature_used (&fpu_vfp_ext_armv8);
15994
15995 encode_arm_vfp_reg (inst.operands[0].reg,
15996 (is_double && !to) ? VFP_REG_Dd : VFP_REG_Sd);
15997 encode_arm_vfp_reg (inst.operands[1].reg,
15998 (is_double && to) ? VFP_REG_Dm : VFP_REG_Sm);
15999 inst.instruction |= to ? 0x10000 : 0;
16000 inst.instruction |= t ? 0x80 : 0;
16001 inst.instruction |= is_double ? 0x100 : 0;
16002 do_vfp_cond_or_thumb ();
16003 }
16004
16005 static void
16006 do_neon_cvttb_1 (bfd_boolean t)
16007 {
16008 enum neon_shape rs = neon_select_shape (NS_HF, NS_HD, NS_FH, NS_FF, NS_FD,
16009 NS_DF, NS_DH, NS_NULL);
16010
16011 if (rs == NS_NULL)
16012 return;
16013 else if (neon_check_type (2, rs, N_F16, N_F32 | N_VFP).type != NT_invtype)
16014 {
16015 inst.error = NULL;
16016 do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/FALSE);
16017 }
16018 else if (neon_check_type (2, rs, N_F32 | N_VFP, N_F16).type != NT_invtype)
16019 {
16020 inst.error = NULL;
16021 do_neon_cvttb_2 (t, /*to=*/FALSE, /*is_double=*/FALSE);
16022 }
16023 else if (neon_check_type (2, rs, N_F16, N_F64 | N_VFP).type != NT_invtype)
16024 {
16025 /* The VCVTB and VCVTT instructions with D-register operands
16026 don't work for SP only targets. */
16027 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
16028 _(BAD_FPU));
16029
16030 inst.error = NULL;
16031 do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/TRUE);
16032 }
16033 else if (neon_check_type (2, rs, N_F64 | N_VFP, N_F16).type != NT_invtype)
16034 {
16035 /* The VCVTB and VCVTT instructions with D-register operands
16036 don't work for SP only targets. */
16037 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
16038 _(BAD_FPU));
16039
16040 inst.error = NULL;
16041 do_neon_cvttb_2 (t, /*to=*/FALSE, /*is_double=*/TRUE);
16042 }
16043 else
16044 return;
16045 }
16046
16047 static void
16048 do_neon_cvtb (void)
16049 {
16050 do_neon_cvttb_1 (FALSE);
16051 }
16052
16053
16054 static void
16055 do_neon_cvtt (void)
16056 {
16057 do_neon_cvttb_1 (TRUE);
16058 }
16059
16060 static void
16061 neon_move_immediate (void)
16062 {
16063 enum neon_shape rs = neon_select_shape (NS_DI, NS_QI, NS_NULL);
16064 struct neon_type_el et = neon_check_type (2, rs,
16065 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
16066 unsigned immlo, immhi = 0, immbits;
16067 int op, cmode, float_p;
16068
16069 constraint (et.type == NT_invtype,
16070 _("operand size must be specified for immediate VMOV"));
16071
16072 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
16073 op = (inst.instruction & (1 << 5)) != 0;
16074
16075 immlo = inst.operands[1].imm;
16076 if (inst.operands[1].regisimm)
16077 immhi = inst.operands[1].reg;
16078
16079 constraint (et.size < 32 && (immlo & ~((1 << et.size) - 1)) != 0,
16080 _("immediate has bits set outside the operand size"));
16081
16082 float_p = inst.operands[1].immisfloat;
16083
16084 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits, &op,
16085 et.size, et.type)) == FAIL)
16086 {
16087 /* Invert relevant bits only. */
16088 neon_invert_size (&immlo, &immhi, et.size);
16089 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
16090 with one or the other; those cases are caught by
16091 neon_cmode_for_move_imm. */
16092 op = !op;
16093 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits,
16094 &op, et.size, et.type)) == FAIL)
16095 {
16096 first_error (_("immediate out of range"));
16097 return;
16098 }
16099 }
16100
16101 inst.instruction &= ~(1 << 5);
16102 inst.instruction |= op << 5;
16103
16104 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16105 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16106 inst.instruction |= neon_quad (rs) << 6;
16107 inst.instruction |= cmode << 8;
16108
16109 neon_write_immbits (immbits);
16110 }
16111
16112 static void
16113 do_neon_mvn (void)
16114 {
16115 if (inst.operands[1].isreg)
16116 {
16117 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16118
16119 NEON_ENCODE (INTEGER, inst);
16120 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16121 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16122 inst.instruction |= LOW4 (inst.operands[1].reg);
16123 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16124 inst.instruction |= neon_quad (rs) << 6;
16125 }
16126 else
16127 {
16128 NEON_ENCODE (IMMED, inst);
16129 neon_move_immediate ();
16130 }
16131
16132 neon_dp_fixup (&inst);
16133 }
16134
16135 /* Encode instructions of form:
16136
16137 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
16138 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm | */
16139
16140 static void
16141 neon_mixed_length (struct neon_type_el et, unsigned size)
16142 {
16143 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16144 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16145 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16146 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16147 inst.instruction |= LOW4 (inst.operands[2].reg);
16148 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16149 inst.instruction |= (et.type == NT_unsigned) << 24;
16150 inst.instruction |= neon_logbits (size) << 20;
16151
16152 neon_dp_fixup (&inst);
16153 }
16154
16155 static void
16156 do_neon_dyadic_long (void)
16157 {
16158 /* FIXME: Type checking for lengthening op. */
16159 struct neon_type_el et = neon_check_type (3, NS_QDD,
16160 N_EQK | N_DBL, N_EQK, N_SU_32 | N_KEY);
16161 neon_mixed_length (et, et.size);
16162 }
16163
16164 static void
16165 do_neon_abal (void)
16166 {
16167 struct neon_type_el et = neon_check_type (3, NS_QDD,
16168 N_EQK | N_INT | N_DBL, N_EQK, N_SU_32 | N_KEY);
16169 neon_mixed_length (et, et.size);
16170 }
16171
16172 static void
16173 neon_mac_reg_scalar_long (unsigned regtypes, unsigned scalartypes)
16174 {
16175 if (inst.operands[2].isscalar)
16176 {
16177 struct neon_type_el et = neon_check_type (3, NS_QDS,
16178 N_EQK | N_DBL, N_EQK, regtypes | N_KEY);
16179 NEON_ENCODE (SCALAR, inst);
16180 neon_mul_mac (et, et.type == NT_unsigned);
16181 }
16182 else
16183 {
16184 struct neon_type_el et = neon_check_type (3, NS_QDD,
16185 N_EQK | N_DBL, N_EQK, scalartypes | N_KEY);
16186 NEON_ENCODE (INTEGER, inst);
16187 neon_mixed_length (et, et.size);
16188 }
16189 }
16190
16191 static void
16192 do_neon_mac_maybe_scalar_long (void)
16193 {
16194 neon_mac_reg_scalar_long (N_S16 | N_S32 | N_U16 | N_U32, N_SU_32);
16195 }
16196
16197 /* Like neon_scalar_for_mul, this function generate Rm encoding from GAS's
16198 internal SCALAR. QUAD_P is 1 if it's for Q format, otherwise it's 0. */
16199
16200 static unsigned
16201 neon_scalar_for_fmac_fp16_long (unsigned scalar, unsigned quad_p)
16202 {
16203 unsigned regno = NEON_SCALAR_REG (scalar);
16204 unsigned elno = NEON_SCALAR_INDEX (scalar);
16205
16206 if (quad_p)
16207 {
16208 if (regno > 7 || elno > 3)
16209 goto bad_scalar;
16210
16211 return ((regno & 0x7)
16212 | ((elno & 0x1) << 3)
16213 | (((elno >> 1) & 0x1) << 5));
16214 }
16215 else
16216 {
16217 if (regno > 15 || elno > 1)
16218 goto bad_scalar;
16219
16220 return (((regno & 0x1) << 5)
16221 | ((regno >> 1) & 0x7)
16222 | ((elno & 0x1) << 3));
16223 }
16224
16225 bad_scalar:
16226 first_error (_("scalar out of range for multiply instruction"));
16227 return 0;
16228 }
16229
16230 static void
16231 do_neon_fmac_maybe_scalar_long (int subtype)
16232 {
16233 enum neon_shape rs;
16234 int high8;
16235 /* NOTE: vfmal/vfmsl use slightly different NEON three-same encoding. 'size"
16236 field (bits[21:20]) has different meaning. For scalar index variant, it's
16237 used to differentiate add and subtract, otherwise it's with fixed value
16238 0x2. */
16239 int size = -1;
16240
16241 if (inst.cond != COND_ALWAYS)
16242 as_warn (_("vfmal/vfmsl with FP16 type cannot be conditional, the "
16243 "behaviour is UNPREDICTABLE"));
16244
16245 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16_fml),
16246 _(BAD_FP16));
16247
16248 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_armv8),
16249 _(BAD_FPU));
16250
16251 /* vfmal/vfmsl are in three-same D/Q register format or the third operand can
16252 be a scalar index register. */
16253 if (inst.operands[2].isscalar)
16254 {
16255 high8 = 0xfe000000;
16256 if (subtype)
16257 size = 16;
16258 rs = neon_select_shape (NS_DHS, NS_QDS, NS_NULL);
16259 }
16260 else
16261 {
16262 high8 = 0xfc000000;
16263 size = 32;
16264 if (subtype)
16265 inst.instruction |= (0x1 << 23);
16266 rs = neon_select_shape (NS_DHH, NS_QDD, NS_NULL);
16267 }
16268
16269 neon_check_type (3, rs, N_EQK, N_EQK, N_KEY | N_F16);
16270
16271 /* "opcode" from template has included "ubit", so simply pass 0 here. Also,
16272 the "S" bit in size field has been reused to differentiate vfmal and vfmsl,
16273 so we simply pass -1 as size. */
16274 unsigned quad_p = (rs == NS_QDD || rs == NS_QDS);
16275 neon_three_same (quad_p, 0, size);
16276
16277 /* Undo neon_dp_fixup. Redo the high eight bits. */
16278 inst.instruction &= 0x00ffffff;
16279 inst.instruction |= high8;
16280
16281 #define LOW1(R) ((R) & 0x1)
16282 #define HI4(R) (((R) >> 1) & 0xf)
16283 /* Unlike usually NEON three-same, encoding for Vn and Vm will depend on
16284 whether the instruction is in Q form and whether Vm is a scalar indexed
16285 operand. */
16286 if (inst.operands[2].isscalar)
16287 {
16288 unsigned rm
16289 = neon_scalar_for_fmac_fp16_long (inst.operands[2].reg, quad_p);
16290 inst.instruction &= 0xffffffd0;
16291 inst.instruction |= rm;
16292
16293 if (!quad_p)
16294 {
16295 /* Redo Rn as well. */
16296 inst.instruction &= 0xfff0ff7f;
16297 inst.instruction |= HI4 (inst.operands[1].reg) << 16;
16298 inst.instruction |= LOW1 (inst.operands[1].reg) << 7;
16299 }
16300 }
16301 else if (!quad_p)
16302 {
16303 /* Redo Rn and Rm. */
16304 inst.instruction &= 0xfff0ff50;
16305 inst.instruction |= HI4 (inst.operands[1].reg) << 16;
16306 inst.instruction |= LOW1 (inst.operands[1].reg) << 7;
16307 inst.instruction |= HI4 (inst.operands[2].reg);
16308 inst.instruction |= LOW1 (inst.operands[2].reg) << 5;
16309 }
16310 }
16311
16312 static void
16313 do_neon_vfmal (void)
16314 {
16315 return do_neon_fmac_maybe_scalar_long (0);
16316 }
16317
16318 static void
16319 do_neon_vfmsl (void)
16320 {
16321 return do_neon_fmac_maybe_scalar_long (1);
16322 }
16323
16324 static void
16325 do_neon_dyadic_wide (void)
16326 {
16327 struct neon_type_el et = neon_check_type (3, NS_QQD,
16328 N_EQK | N_DBL, N_EQK | N_DBL, N_SU_32 | N_KEY);
16329 neon_mixed_length (et, et.size);
16330 }
16331
16332 static void
16333 do_neon_dyadic_narrow (void)
16334 {
16335 struct neon_type_el et = neon_check_type (3, NS_QDD,
16336 N_EQK | N_DBL, N_EQK, N_I16 | N_I32 | N_I64 | N_KEY);
16337 /* Operand sign is unimportant, and the U bit is part of the opcode,
16338 so force the operand type to integer. */
16339 et.type = NT_integer;
16340 neon_mixed_length (et, et.size / 2);
16341 }
16342
16343 static void
16344 do_neon_mul_sat_scalar_long (void)
16345 {
16346 neon_mac_reg_scalar_long (N_S16 | N_S32, N_S16 | N_S32);
16347 }
16348
16349 static void
16350 do_neon_vmull (void)
16351 {
16352 if (inst.operands[2].isscalar)
16353 do_neon_mac_maybe_scalar_long ();
16354 else
16355 {
16356 struct neon_type_el et = neon_check_type (3, NS_QDD,
16357 N_EQK | N_DBL, N_EQK, N_SU_32 | N_P8 | N_P64 | N_KEY);
16358
16359 if (et.type == NT_poly)
16360 NEON_ENCODE (POLY, inst);
16361 else
16362 NEON_ENCODE (INTEGER, inst);
16363
16364 /* For polynomial encoding the U bit must be zero, and the size must
16365 be 8 (encoded as 0b00) or, on ARMv8 or later 64 (encoded, non
16366 obviously, as 0b10). */
16367 if (et.size == 64)
16368 {
16369 /* Check we're on the correct architecture. */
16370 if (!mark_feature_used (&fpu_crypto_ext_armv8))
16371 inst.error =
16372 _("Instruction form not available on this architecture.");
16373
16374 et.size = 32;
16375 }
16376
16377 neon_mixed_length (et, et.size);
16378 }
16379 }
16380
16381 static void
16382 do_neon_ext (void)
16383 {
16384 enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
16385 struct neon_type_el et = neon_check_type (3, rs,
16386 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
16387 unsigned imm = (inst.operands[3].imm * et.size) / 8;
16388
16389 constraint (imm >= (unsigned) (neon_quad (rs) ? 16 : 8),
16390 _("shift out of range"));
16391 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16392 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16393 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16394 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16395 inst.instruction |= LOW4 (inst.operands[2].reg);
16396 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16397 inst.instruction |= neon_quad (rs) << 6;
16398 inst.instruction |= imm << 8;
16399
16400 neon_dp_fixup (&inst);
16401 }
16402
16403 static void
16404 do_neon_rev (void)
16405 {
16406 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16407 struct neon_type_el et = neon_check_type (2, rs,
16408 N_EQK, N_8 | N_16 | N_32 | N_KEY);
16409 unsigned op = (inst.instruction >> 7) & 3;
16410 /* N (width of reversed regions) is encoded as part of the bitmask. We
16411 extract it here to check the elements to be reversed are smaller.
16412 Otherwise we'd get a reserved instruction. */
16413 unsigned elsize = (op == 2) ? 16 : (op == 1) ? 32 : (op == 0) ? 64 : 0;
16414 gas_assert (elsize != 0);
16415 constraint (et.size >= elsize,
16416 _("elements must be smaller than reversal region"));
16417 neon_two_same (neon_quad (rs), 1, et.size);
16418 }
16419
16420 static void
16421 do_neon_dup (void)
16422 {
16423 if (inst.operands[1].isscalar)
16424 {
16425 enum neon_shape rs = neon_select_shape (NS_DS, NS_QS, NS_NULL);
16426 struct neon_type_el et = neon_check_type (2, rs,
16427 N_EQK, N_8 | N_16 | N_32 | N_KEY);
16428 unsigned sizebits = et.size >> 3;
16429 unsigned dm = NEON_SCALAR_REG (inst.operands[1].reg);
16430 int logsize = neon_logbits (et.size);
16431 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg) << logsize;
16432
16433 if (vfp_or_neon_is_neon (NEON_CHECK_CC) == FAIL)
16434 return;
16435
16436 NEON_ENCODE (SCALAR, inst);
16437 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16438 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16439 inst.instruction |= LOW4 (dm);
16440 inst.instruction |= HI1 (dm) << 5;
16441 inst.instruction |= neon_quad (rs) << 6;
16442 inst.instruction |= x << 17;
16443 inst.instruction |= sizebits << 16;
16444
16445 neon_dp_fixup (&inst);
16446 }
16447 else
16448 {
16449 enum neon_shape rs = neon_select_shape (NS_DR, NS_QR, NS_NULL);
16450 struct neon_type_el et = neon_check_type (2, rs,
16451 N_8 | N_16 | N_32 | N_KEY, N_EQK);
16452 /* Duplicate ARM register to lanes of vector. */
16453 NEON_ENCODE (ARMREG, inst);
16454 switch (et.size)
16455 {
16456 case 8: inst.instruction |= 0x400000; break;
16457 case 16: inst.instruction |= 0x000020; break;
16458 case 32: inst.instruction |= 0x000000; break;
16459 default: break;
16460 }
16461 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
16462 inst.instruction |= LOW4 (inst.operands[0].reg) << 16;
16463 inst.instruction |= HI1 (inst.operands[0].reg) << 7;
16464 inst.instruction |= neon_quad (rs) << 21;
16465 /* The encoding for this instruction is identical for the ARM and Thumb
16466 variants, except for the condition field. */
16467 do_vfp_cond_or_thumb ();
16468 }
16469 }
16470
16471 /* VMOV has particularly many variations. It can be one of:
16472 0. VMOV<c><q> <Qd>, <Qm>
16473 1. VMOV<c><q> <Dd>, <Dm>
16474 (Register operations, which are VORR with Rm = Rn.)
16475 2. VMOV<c><q>.<dt> <Qd>, #<imm>
16476 3. VMOV<c><q>.<dt> <Dd>, #<imm>
16477 (Immediate loads.)
16478 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
16479 (ARM register to scalar.)
16480 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
16481 (Two ARM registers to vector.)
16482 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
16483 (Scalar to ARM register.)
16484 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
16485 (Vector to two ARM registers.)
16486 8. VMOV.F32 <Sd>, <Sm>
16487 9. VMOV.F64 <Dd>, <Dm>
16488 (VFP register moves.)
16489 10. VMOV.F32 <Sd>, #imm
16490 11. VMOV.F64 <Dd>, #imm
16491 (VFP float immediate load.)
16492 12. VMOV <Rd>, <Sm>
16493 (VFP single to ARM reg.)
16494 13. VMOV <Sd>, <Rm>
16495 (ARM reg to VFP single.)
16496 14. VMOV <Rd>, <Re>, <Sn>, <Sm>
16497 (Two ARM regs to two VFP singles.)
16498 15. VMOV <Sd>, <Se>, <Rn>, <Rm>
16499 (Two VFP singles to two ARM regs.)
16500
16501 These cases can be disambiguated using neon_select_shape, except cases 1/9
16502 and 3/11 which depend on the operand type too.
16503
16504 All the encoded bits are hardcoded by this function.
16505
16506 Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
16507 Cases 5, 7 may be used with VFPv2 and above.
16508
16509 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
16510 can specify a type where it doesn't make sense to, and is ignored). */
16511
16512 static void
16513 do_neon_mov (void)
16514 {
16515 enum neon_shape rs = neon_select_shape (NS_RRFF, NS_FFRR, NS_DRR, NS_RRD,
16516 NS_QQ, NS_DD, NS_QI, NS_DI, NS_SR,
16517 NS_RS, NS_FF, NS_FI, NS_RF, NS_FR,
16518 NS_HR, NS_RH, NS_HI, NS_NULL);
16519 struct neon_type_el et;
16520 const char *ldconst = 0;
16521
16522 switch (rs)
16523 {
16524 case NS_DD: /* case 1/9. */
16525 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
16526 /* It is not an error here if no type is given. */
16527 inst.error = NULL;
16528 if (et.type == NT_float && et.size == 64)
16529 {
16530 do_vfp_nsyn_opcode ("fcpyd");
16531 break;
16532 }
16533 /* fall through. */
16534
16535 case NS_QQ: /* case 0/1. */
16536 {
16537 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
16538 return;
16539 /* The architecture manual I have doesn't explicitly state which
16540 value the U bit should have for register->register moves, but
16541 the equivalent VORR instruction has U = 0, so do that. */
16542 inst.instruction = 0x0200110;
16543 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16544 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16545 inst.instruction |= LOW4 (inst.operands[1].reg);
16546 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16547 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16548 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16549 inst.instruction |= neon_quad (rs) << 6;
16550
16551 neon_dp_fixup (&inst);
16552 }
16553 break;
16554
16555 case NS_DI: /* case 3/11. */
16556 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
16557 inst.error = NULL;
16558 if (et.type == NT_float && et.size == 64)
16559 {
16560 /* case 11 (fconstd). */
16561 ldconst = "fconstd";
16562 goto encode_fconstd;
16563 }
16564 /* fall through. */
16565
16566 case NS_QI: /* case 2/3. */
16567 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
16568 return;
16569 inst.instruction = 0x0800010;
16570 neon_move_immediate ();
16571 neon_dp_fixup (&inst);
16572 break;
16573
16574 case NS_SR: /* case 4. */
16575 {
16576 unsigned bcdebits = 0;
16577 int logsize;
16578 unsigned dn = NEON_SCALAR_REG (inst.operands[0].reg);
16579 unsigned x = NEON_SCALAR_INDEX (inst.operands[0].reg);
16580
16581 /* .<size> is optional here, defaulting to .32. */
16582 if (inst.vectype.elems == 0
16583 && inst.operands[0].vectype.type == NT_invtype
16584 && inst.operands[1].vectype.type == NT_invtype)
16585 {
16586 inst.vectype.el[0].type = NT_untyped;
16587 inst.vectype.el[0].size = 32;
16588 inst.vectype.elems = 1;
16589 }
16590
16591 et = neon_check_type (2, NS_NULL, N_8 | N_16 | N_32 | N_KEY, N_EQK);
16592 logsize = neon_logbits (et.size);
16593
16594 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
16595 _(BAD_FPU));
16596 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
16597 && et.size != 32, _(BAD_FPU));
16598 constraint (et.type == NT_invtype, _("bad type for scalar"));
16599 constraint (x >= 64 / et.size, _("scalar index out of range"));
16600
16601 switch (et.size)
16602 {
16603 case 8: bcdebits = 0x8; break;
16604 case 16: bcdebits = 0x1; break;
16605 case 32: bcdebits = 0x0; break;
16606 default: ;
16607 }
16608
16609 bcdebits |= x << logsize;
16610
16611 inst.instruction = 0xe000b10;
16612 do_vfp_cond_or_thumb ();
16613 inst.instruction |= LOW4 (dn) << 16;
16614 inst.instruction |= HI1 (dn) << 7;
16615 inst.instruction |= inst.operands[1].reg << 12;
16616 inst.instruction |= (bcdebits & 3) << 5;
16617 inst.instruction |= (bcdebits >> 2) << 21;
16618 }
16619 break;
16620
16621 case NS_DRR: /* case 5 (fmdrr). */
16622 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
16623 _(BAD_FPU));
16624
16625 inst.instruction = 0xc400b10;
16626 do_vfp_cond_or_thumb ();
16627 inst.instruction |= LOW4 (inst.operands[0].reg);
16628 inst.instruction |= HI1 (inst.operands[0].reg) << 5;
16629 inst.instruction |= inst.operands[1].reg << 12;
16630 inst.instruction |= inst.operands[2].reg << 16;
16631 break;
16632
16633 case NS_RS: /* case 6. */
16634 {
16635 unsigned logsize;
16636 unsigned dn = NEON_SCALAR_REG (inst.operands[1].reg);
16637 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg);
16638 unsigned abcdebits = 0;
16639
16640 /* .<dt> is optional here, defaulting to .32. */
16641 if (inst.vectype.elems == 0
16642 && inst.operands[0].vectype.type == NT_invtype
16643 && inst.operands[1].vectype.type == NT_invtype)
16644 {
16645 inst.vectype.el[0].type = NT_untyped;
16646 inst.vectype.el[0].size = 32;
16647 inst.vectype.elems = 1;
16648 }
16649
16650 et = neon_check_type (2, NS_NULL,
16651 N_EQK, N_S8 | N_S16 | N_U8 | N_U16 | N_32 | N_KEY);
16652 logsize = neon_logbits (et.size);
16653
16654 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
16655 _(BAD_FPU));
16656 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
16657 && et.size != 32, _(BAD_FPU));
16658 constraint (et.type == NT_invtype, _("bad type for scalar"));
16659 constraint (x >= 64 / et.size, _("scalar index out of range"));
16660
16661 switch (et.size)
16662 {
16663 case 8: abcdebits = (et.type == NT_signed) ? 0x08 : 0x18; break;
16664 case 16: abcdebits = (et.type == NT_signed) ? 0x01 : 0x11; break;
16665 case 32: abcdebits = 0x00; break;
16666 default: ;
16667 }
16668
16669 abcdebits |= x << logsize;
16670 inst.instruction = 0xe100b10;
16671 do_vfp_cond_or_thumb ();
16672 inst.instruction |= LOW4 (dn) << 16;
16673 inst.instruction |= HI1 (dn) << 7;
16674 inst.instruction |= inst.operands[0].reg << 12;
16675 inst.instruction |= (abcdebits & 3) << 5;
16676 inst.instruction |= (abcdebits >> 2) << 21;
16677 }
16678 break;
16679
16680 case NS_RRD: /* case 7 (fmrrd). */
16681 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
16682 _(BAD_FPU));
16683
16684 inst.instruction = 0xc500b10;
16685 do_vfp_cond_or_thumb ();
16686 inst.instruction |= inst.operands[0].reg << 12;
16687 inst.instruction |= inst.operands[1].reg << 16;
16688 inst.instruction |= LOW4 (inst.operands[2].reg);
16689 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16690 break;
16691
16692 case NS_FF: /* case 8 (fcpys). */
16693 do_vfp_nsyn_opcode ("fcpys");
16694 break;
16695
16696 case NS_HI:
16697 case NS_FI: /* case 10 (fconsts). */
16698 ldconst = "fconsts";
16699 encode_fconstd:
16700 if (!inst.operands[1].immisfloat)
16701 {
16702 unsigned new_imm;
16703 /* Immediate has to fit in 8 bits so float is enough. */
16704 float imm = (float) inst.operands[1].imm;
16705 memcpy (&new_imm, &imm, sizeof (float));
16706 /* But the assembly may have been written to provide an integer
16707 bit pattern that equates to a float, so check that the
16708 conversion has worked. */
16709 if (is_quarter_float (new_imm))
16710 {
16711 if (is_quarter_float (inst.operands[1].imm))
16712 as_warn (_("immediate constant is valid both as a bit-pattern and a floating point value (using the fp value)"));
16713
16714 inst.operands[1].imm = new_imm;
16715 inst.operands[1].immisfloat = 1;
16716 }
16717 }
16718
16719 if (is_quarter_float (inst.operands[1].imm))
16720 {
16721 inst.operands[1].imm = neon_qfloat_bits (inst.operands[1].imm);
16722 do_vfp_nsyn_opcode (ldconst);
16723
16724 /* ARMv8.2 fp16 vmov.f16 instruction. */
16725 if (rs == NS_HI)
16726 do_scalar_fp16_v82_encode ();
16727 }
16728 else
16729 first_error (_("immediate out of range"));
16730 break;
16731
16732 case NS_RH:
16733 case NS_RF: /* case 12 (fmrs). */
16734 do_vfp_nsyn_opcode ("fmrs");
16735 /* ARMv8.2 fp16 vmov.f16 instruction. */
16736 if (rs == NS_RH)
16737 do_scalar_fp16_v82_encode ();
16738 break;
16739
16740 case NS_HR:
16741 case NS_FR: /* case 13 (fmsr). */
16742 do_vfp_nsyn_opcode ("fmsr");
16743 /* ARMv8.2 fp16 vmov.f16 instruction. */
16744 if (rs == NS_HR)
16745 do_scalar_fp16_v82_encode ();
16746 break;
16747
16748 /* The encoders for the fmrrs and fmsrr instructions expect three operands
16749 (one of which is a list), but we have parsed four. Do some fiddling to
16750 make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
16751 expect. */
16752 case NS_RRFF: /* case 14 (fmrrs). */
16753 constraint (inst.operands[3].reg != inst.operands[2].reg + 1,
16754 _("VFP registers must be adjacent"));
16755 inst.operands[2].imm = 2;
16756 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
16757 do_vfp_nsyn_opcode ("fmrrs");
16758 break;
16759
16760 case NS_FFRR: /* case 15 (fmsrr). */
16761 constraint (inst.operands[1].reg != inst.operands[0].reg + 1,
16762 _("VFP registers must be adjacent"));
16763 inst.operands[1] = inst.operands[2];
16764 inst.operands[2] = inst.operands[3];
16765 inst.operands[0].imm = 2;
16766 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
16767 do_vfp_nsyn_opcode ("fmsrr");
16768 break;
16769
16770 case NS_NULL:
16771 /* neon_select_shape has determined that the instruction
16772 shape is wrong and has already set the error message. */
16773 break;
16774
16775 default:
16776 abort ();
16777 }
16778 }
16779
16780 static void
16781 do_neon_rshift_round_imm (void)
16782 {
16783 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
16784 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
16785 int imm = inst.operands[2].imm;
16786
16787 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
16788 if (imm == 0)
16789 {
16790 inst.operands[2].present = 0;
16791 do_neon_mov ();
16792 return;
16793 }
16794
16795 constraint (imm < 1 || (unsigned)imm > et.size,
16796 _("immediate out of range for shift"));
16797 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et,
16798 et.size - imm);
16799 }
16800
16801 static void
16802 do_neon_movhf (void)
16803 {
16804 enum neon_shape rs = neon_select_shape (NS_HH, NS_NULL);
16805 constraint (rs != NS_HH, _("invalid suffix"));
16806
16807 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
16808 _(BAD_FPU));
16809
16810 if (inst.cond != COND_ALWAYS)
16811 {
16812 if (thumb_mode)
16813 {
16814 as_warn (_("ARMv8.2 scalar fp16 instruction cannot be conditional,"
16815 " the behaviour is UNPREDICTABLE"));
16816 }
16817 else
16818 {
16819 inst.error = BAD_COND;
16820 return;
16821 }
16822 }
16823
16824 do_vfp_sp_monadic ();
16825
16826 inst.is_neon = 1;
16827 inst.instruction |= 0xf0000000;
16828 }
16829
16830 static void
16831 do_neon_movl (void)
16832 {
16833 struct neon_type_el et = neon_check_type (2, NS_QD,
16834 N_EQK | N_DBL, N_SU_32 | N_KEY);
16835 unsigned sizebits = et.size >> 3;
16836 inst.instruction |= sizebits << 19;
16837 neon_two_same (0, et.type == NT_unsigned, -1);
16838 }
16839
16840 static void
16841 do_neon_trn (void)
16842 {
16843 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16844 struct neon_type_el et = neon_check_type (2, rs,
16845 N_EQK, N_8 | N_16 | N_32 | N_KEY);
16846 NEON_ENCODE (INTEGER, inst);
16847 neon_two_same (neon_quad (rs), 1, et.size);
16848 }
16849
16850 static void
16851 do_neon_zip_uzp (void)
16852 {
16853 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16854 struct neon_type_el et = neon_check_type (2, rs,
16855 N_EQK, N_8 | N_16 | N_32 | N_KEY);
16856 if (rs == NS_DD && et.size == 32)
16857 {
16858 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
16859 inst.instruction = N_MNEM_vtrn;
16860 do_neon_trn ();
16861 return;
16862 }
16863 neon_two_same (neon_quad (rs), 1, et.size);
16864 }
16865
16866 static void
16867 do_neon_sat_abs_neg (void)
16868 {
16869 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16870 struct neon_type_el et = neon_check_type (2, rs,
16871 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
16872 neon_two_same (neon_quad (rs), 1, et.size);
16873 }
16874
16875 static void
16876 do_neon_pair_long (void)
16877 {
16878 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16879 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_32 | N_KEY);
16880 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
16881 inst.instruction |= (et.type == NT_unsigned) << 7;
16882 neon_two_same (neon_quad (rs), 1, et.size);
16883 }
16884
16885 static void
16886 do_neon_recip_est (void)
16887 {
16888 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16889 struct neon_type_el et = neon_check_type (2, rs,
16890 N_EQK | N_FLT, N_F_16_32 | N_U32 | N_KEY);
16891 inst.instruction |= (et.type == NT_float) << 8;
16892 neon_two_same (neon_quad (rs), 1, et.size);
16893 }
16894
16895 static void
16896 do_neon_cls (void)
16897 {
16898 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16899 struct neon_type_el et = neon_check_type (2, rs,
16900 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
16901 neon_two_same (neon_quad (rs), 1, et.size);
16902 }
16903
16904 static void
16905 do_neon_clz (void)
16906 {
16907 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16908 struct neon_type_el et = neon_check_type (2, rs,
16909 N_EQK, N_I8 | N_I16 | N_I32 | N_KEY);
16910 neon_two_same (neon_quad (rs), 1, et.size);
16911 }
16912
16913 static void
16914 do_neon_cnt (void)
16915 {
16916 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16917 struct neon_type_el et = neon_check_type (2, rs,
16918 N_EQK | N_INT, N_8 | N_KEY);
16919 neon_two_same (neon_quad (rs), 1, et.size);
16920 }
16921
16922 static void
16923 do_neon_swp (void)
16924 {
16925 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16926 neon_two_same (neon_quad (rs), 1, -1);
16927 }
16928
16929 static void
16930 do_neon_tbl_tbx (void)
16931 {
16932 unsigned listlenbits;
16933 neon_check_type (3, NS_DLD, N_EQK, N_EQK, N_8 | N_KEY);
16934
16935 if (inst.operands[1].imm < 1 || inst.operands[1].imm > 4)
16936 {
16937 first_error (_("bad list length for table lookup"));
16938 return;
16939 }
16940
16941 listlenbits = inst.operands[1].imm - 1;
16942 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16943 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16944 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16945 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16946 inst.instruction |= LOW4 (inst.operands[2].reg);
16947 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16948 inst.instruction |= listlenbits << 8;
16949
16950 neon_dp_fixup (&inst);
16951 }
16952
16953 static void
16954 do_neon_ldm_stm (void)
16955 {
16956 /* P, U and L bits are part of bitmask. */
16957 int is_dbmode = (inst.instruction & (1 << 24)) != 0;
16958 unsigned offsetbits = inst.operands[1].imm * 2;
16959
16960 if (inst.operands[1].issingle)
16961 {
16962 do_vfp_nsyn_ldm_stm (is_dbmode);
16963 return;
16964 }
16965
16966 constraint (is_dbmode && !inst.operands[0].writeback,
16967 _("writeback (!) must be used for VLDMDB and VSTMDB"));
16968
16969 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
16970 _("register list must contain at least 1 and at most 16 "
16971 "registers"));
16972
16973 inst.instruction |= inst.operands[0].reg << 16;
16974 inst.instruction |= inst.operands[0].writeback << 21;
16975 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
16976 inst.instruction |= HI1 (inst.operands[1].reg) << 22;
16977
16978 inst.instruction |= offsetbits;
16979
16980 do_vfp_cond_or_thumb ();
16981 }
16982
16983 static void
16984 do_neon_ldr_str (void)
16985 {
16986 int is_ldr = (inst.instruction & (1 << 20)) != 0;
16987
16988 /* Use of PC in vstr in ARM mode is deprecated in ARMv7.
16989 And is UNPREDICTABLE in thumb mode. */
16990 if (!is_ldr
16991 && inst.operands[1].reg == REG_PC
16992 && (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7) || thumb_mode))
16993 {
16994 if (thumb_mode)
16995 inst.error = _("Use of PC here is UNPREDICTABLE");
16996 else if (warn_on_deprecated)
16997 as_tsktsk (_("Use of PC here is deprecated"));
16998 }
16999
17000 if (inst.operands[0].issingle)
17001 {
17002 if (is_ldr)
17003 do_vfp_nsyn_opcode ("flds");
17004 else
17005 do_vfp_nsyn_opcode ("fsts");
17006
17007 /* ARMv8.2 vldr.16/vstr.16 instruction. */
17008 if (inst.vectype.el[0].size == 16)
17009 do_scalar_fp16_v82_encode ();
17010 }
17011 else
17012 {
17013 if (is_ldr)
17014 do_vfp_nsyn_opcode ("fldd");
17015 else
17016 do_vfp_nsyn_opcode ("fstd");
17017 }
17018 }
17019
17020 /* "interleave" version also handles non-interleaving register VLD1/VST1
17021 instructions. */
17022
17023 static void
17024 do_neon_ld_st_interleave (void)
17025 {
17026 struct neon_type_el et = neon_check_type (1, NS_NULL,
17027 N_8 | N_16 | N_32 | N_64);
17028 unsigned alignbits = 0;
17029 unsigned idx;
17030 /* The bits in this table go:
17031 0: register stride of one (0) or two (1)
17032 1,2: register list length, minus one (1, 2, 3, 4).
17033 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
17034 We use -1 for invalid entries. */
17035 const int typetable[] =
17036 {
17037 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
17038 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
17039 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
17040 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
17041 };
17042 int typebits;
17043
17044 if (et.type == NT_invtype)
17045 return;
17046
17047 if (inst.operands[1].immisalign)
17048 switch (inst.operands[1].imm >> 8)
17049 {
17050 case 64: alignbits = 1; break;
17051 case 128:
17052 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2
17053 && NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
17054 goto bad_alignment;
17055 alignbits = 2;
17056 break;
17057 case 256:
17058 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
17059 goto bad_alignment;
17060 alignbits = 3;
17061 break;
17062 default:
17063 bad_alignment:
17064 first_error (_("bad alignment"));
17065 return;
17066 }
17067
17068 inst.instruction |= alignbits << 4;
17069 inst.instruction |= neon_logbits (et.size) << 6;
17070
17071 /* Bits [4:6] of the immediate in a list specifier encode register stride
17072 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
17073 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
17074 up the right value for "type" in a table based on this value and the given
17075 list style, then stick it back. */
17076 idx = ((inst.operands[0].imm >> 4) & 7)
17077 | (((inst.instruction >> 8) & 3) << 3);
17078
17079 typebits = typetable[idx];
17080
17081 constraint (typebits == -1, _("bad list type for instruction"));
17082 constraint (((inst.instruction >> 8) & 3) && et.size == 64,
17083 _("bad element type for instruction"));
17084
17085 inst.instruction &= ~0xf00;
17086 inst.instruction |= typebits << 8;
17087 }
17088
17089 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
17090 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
17091 otherwise. The variable arguments are a list of pairs of legal (size, align)
17092 values, terminated with -1. */
17093
17094 static int
17095 neon_alignment_bit (int size, int align, int *do_alignment, ...)
17096 {
17097 va_list ap;
17098 int result = FAIL, thissize, thisalign;
17099
17100 if (!inst.operands[1].immisalign)
17101 {
17102 *do_alignment = 0;
17103 return SUCCESS;
17104 }
17105
17106 va_start (ap, do_alignment);
17107
17108 do
17109 {
17110 thissize = va_arg (ap, int);
17111 if (thissize == -1)
17112 break;
17113 thisalign = va_arg (ap, int);
17114
17115 if (size == thissize && align == thisalign)
17116 result = SUCCESS;
17117 }
17118 while (result != SUCCESS);
17119
17120 va_end (ap);
17121
17122 if (result == SUCCESS)
17123 *do_alignment = 1;
17124 else
17125 first_error (_("unsupported alignment for instruction"));
17126
17127 return result;
17128 }
17129
17130 static void
17131 do_neon_ld_st_lane (void)
17132 {
17133 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
17134 int align_good, do_alignment = 0;
17135 int logsize = neon_logbits (et.size);
17136 int align = inst.operands[1].imm >> 8;
17137 int n = (inst.instruction >> 8) & 3;
17138 int max_el = 64 / et.size;
17139
17140 if (et.type == NT_invtype)
17141 return;
17142
17143 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != n + 1,
17144 _("bad list length"));
17145 constraint (NEON_LANE (inst.operands[0].imm) >= max_el,
17146 _("scalar index out of range"));
17147 constraint (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2
17148 && et.size == 8,
17149 _("stride of 2 unavailable when element size is 8"));
17150
17151 switch (n)
17152 {
17153 case 0: /* VLD1 / VST1. */
17154 align_good = neon_alignment_bit (et.size, align, &do_alignment, 16, 16,
17155 32, 32, -1);
17156 if (align_good == FAIL)
17157 return;
17158 if (do_alignment)
17159 {
17160 unsigned alignbits = 0;
17161 switch (et.size)
17162 {
17163 case 16: alignbits = 0x1; break;
17164 case 32: alignbits = 0x3; break;
17165 default: ;
17166 }
17167 inst.instruction |= alignbits << 4;
17168 }
17169 break;
17170
17171 case 1: /* VLD2 / VST2. */
17172 align_good = neon_alignment_bit (et.size, align, &do_alignment, 8, 16,
17173 16, 32, 32, 64, -1);
17174 if (align_good == FAIL)
17175 return;
17176 if (do_alignment)
17177 inst.instruction |= 1 << 4;
17178 break;
17179
17180 case 2: /* VLD3 / VST3. */
17181 constraint (inst.operands[1].immisalign,
17182 _("can't use alignment with this instruction"));
17183 break;
17184
17185 case 3: /* VLD4 / VST4. */
17186 align_good = neon_alignment_bit (et.size, align, &do_alignment, 8, 32,
17187 16, 64, 32, 64, 32, 128, -1);
17188 if (align_good == FAIL)
17189 return;
17190 if (do_alignment)
17191 {
17192 unsigned alignbits = 0;
17193 switch (et.size)
17194 {
17195 case 8: alignbits = 0x1; break;
17196 case 16: alignbits = 0x1; break;
17197 case 32: alignbits = (align == 64) ? 0x1 : 0x2; break;
17198 default: ;
17199 }
17200 inst.instruction |= alignbits << 4;
17201 }
17202 break;
17203
17204 default: ;
17205 }
17206
17207 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
17208 if (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2)
17209 inst.instruction |= 1 << (4 + logsize);
17210
17211 inst.instruction |= NEON_LANE (inst.operands[0].imm) << (logsize + 5);
17212 inst.instruction |= logsize << 10;
17213 }
17214
17215 /* Encode single n-element structure to all lanes VLD<n> instructions. */
17216
17217 static void
17218 do_neon_ld_dup (void)
17219 {
17220 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
17221 int align_good, do_alignment = 0;
17222
17223 if (et.type == NT_invtype)
17224 return;
17225
17226 switch ((inst.instruction >> 8) & 3)
17227 {
17228 case 0: /* VLD1. */
17229 gas_assert (NEON_REG_STRIDE (inst.operands[0].imm) != 2);
17230 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
17231 &do_alignment, 16, 16, 32, 32, -1);
17232 if (align_good == FAIL)
17233 return;
17234 switch (NEON_REGLIST_LENGTH (inst.operands[0].imm))
17235 {
17236 case 1: break;
17237 case 2: inst.instruction |= 1 << 5; break;
17238 default: first_error (_("bad list length")); return;
17239 }
17240 inst.instruction |= neon_logbits (et.size) << 6;
17241 break;
17242
17243 case 1: /* VLD2. */
17244 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
17245 &do_alignment, 8, 16, 16, 32, 32, 64,
17246 -1);
17247 if (align_good == FAIL)
17248 return;
17249 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2,
17250 _("bad list length"));
17251 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
17252 inst.instruction |= 1 << 5;
17253 inst.instruction |= neon_logbits (et.size) << 6;
17254 break;
17255
17256 case 2: /* VLD3. */
17257 constraint (inst.operands[1].immisalign,
17258 _("can't use alignment with this instruction"));
17259 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 3,
17260 _("bad list length"));
17261 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
17262 inst.instruction |= 1 << 5;
17263 inst.instruction |= neon_logbits (et.size) << 6;
17264 break;
17265
17266 case 3: /* VLD4. */
17267 {
17268 int align = inst.operands[1].imm >> 8;
17269 align_good = neon_alignment_bit (et.size, align, &do_alignment, 8, 32,
17270 16, 64, 32, 64, 32, 128, -1);
17271 if (align_good == FAIL)
17272 return;
17273 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4,
17274 _("bad list length"));
17275 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
17276 inst.instruction |= 1 << 5;
17277 if (et.size == 32 && align == 128)
17278 inst.instruction |= 0x3 << 6;
17279 else
17280 inst.instruction |= neon_logbits (et.size) << 6;
17281 }
17282 break;
17283
17284 default: ;
17285 }
17286
17287 inst.instruction |= do_alignment << 4;
17288 }
17289
17290 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
17291 apart from bits [11:4]. */
17292
17293 static void
17294 do_neon_ldx_stx (void)
17295 {
17296 if (inst.operands[1].isreg)
17297 constraint (inst.operands[1].reg == REG_PC, BAD_PC);
17298
17299 switch (NEON_LANE (inst.operands[0].imm))
17300 {
17301 case NEON_INTERLEAVE_LANES:
17302 NEON_ENCODE (INTERLV, inst);
17303 do_neon_ld_st_interleave ();
17304 break;
17305
17306 case NEON_ALL_LANES:
17307 NEON_ENCODE (DUP, inst);
17308 if (inst.instruction == N_INV)
17309 {
17310 first_error ("only loads support such operands");
17311 break;
17312 }
17313 do_neon_ld_dup ();
17314 break;
17315
17316 default:
17317 NEON_ENCODE (LANE, inst);
17318 do_neon_ld_st_lane ();
17319 }
17320
17321 /* L bit comes from bit mask. */
17322 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17323 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17324 inst.instruction |= inst.operands[1].reg << 16;
17325
17326 if (inst.operands[1].postind)
17327 {
17328 int postreg = inst.operands[1].imm & 0xf;
17329 constraint (!inst.operands[1].immisreg,
17330 _("post-index must be a register"));
17331 constraint (postreg == 0xd || postreg == 0xf,
17332 _("bad register for post-index"));
17333 inst.instruction |= postreg;
17334 }
17335 else
17336 {
17337 constraint (inst.operands[1].immisreg, BAD_ADDR_MODE);
17338 constraint (inst.reloc.exp.X_op != O_constant
17339 || inst.reloc.exp.X_add_number != 0,
17340 BAD_ADDR_MODE);
17341
17342 if (inst.operands[1].writeback)
17343 {
17344 inst.instruction |= 0xd;
17345 }
17346 else
17347 inst.instruction |= 0xf;
17348 }
17349
17350 if (thumb_mode)
17351 inst.instruction |= 0xf9000000;
17352 else
17353 inst.instruction |= 0xf4000000;
17354 }
17355
17356 /* FP v8. */
17357 static void
17358 do_vfp_nsyn_fpv8 (enum neon_shape rs)
17359 {
17360 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
17361 D register operands. */
17362 if (neon_shape_class[rs] == SC_DOUBLE)
17363 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
17364 _(BAD_FPU));
17365
17366 NEON_ENCODE (FPV8, inst);
17367
17368 if (rs == NS_FFF || rs == NS_HHH)
17369 {
17370 do_vfp_sp_dyadic ();
17371
17372 /* ARMv8.2 fp16 instruction. */
17373 if (rs == NS_HHH)
17374 do_scalar_fp16_v82_encode ();
17375 }
17376 else
17377 do_vfp_dp_rd_rn_rm ();
17378
17379 if (rs == NS_DDD)
17380 inst.instruction |= 0x100;
17381
17382 inst.instruction |= 0xf0000000;
17383 }
17384
17385 static void
17386 do_vsel (void)
17387 {
17388 set_it_insn_type (OUTSIDE_IT_INSN);
17389
17390 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8) != SUCCESS)
17391 first_error (_("invalid instruction shape"));
17392 }
17393
17394 static void
17395 do_vmaxnm (void)
17396 {
17397 set_it_insn_type (OUTSIDE_IT_INSN);
17398
17399 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8) == SUCCESS)
17400 return;
17401
17402 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
17403 return;
17404
17405 neon_dyadic_misc (NT_untyped, N_F_16_32, 0);
17406 }
17407
17408 static void
17409 do_vrint_1 (enum neon_cvt_mode mode)
17410 {
17411 enum neon_shape rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_QQ, NS_NULL);
17412 struct neon_type_el et;
17413
17414 if (rs == NS_NULL)
17415 return;
17416
17417 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
17418 D register operands. */
17419 if (neon_shape_class[rs] == SC_DOUBLE)
17420 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
17421 _(BAD_FPU));
17422
17423 et = neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY
17424 | N_VFP);
17425 if (et.type != NT_invtype)
17426 {
17427 /* VFP encodings. */
17428 if (mode == neon_cvt_mode_a || mode == neon_cvt_mode_n
17429 || mode == neon_cvt_mode_p || mode == neon_cvt_mode_m)
17430 set_it_insn_type (OUTSIDE_IT_INSN);
17431
17432 NEON_ENCODE (FPV8, inst);
17433 if (rs == NS_FF || rs == NS_HH)
17434 do_vfp_sp_monadic ();
17435 else
17436 do_vfp_dp_rd_rm ();
17437
17438 switch (mode)
17439 {
17440 case neon_cvt_mode_r: inst.instruction |= 0x00000000; break;
17441 case neon_cvt_mode_z: inst.instruction |= 0x00000080; break;
17442 case neon_cvt_mode_x: inst.instruction |= 0x00010000; break;
17443 case neon_cvt_mode_a: inst.instruction |= 0xf0000000; break;
17444 case neon_cvt_mode_n: inst.instruction |= 0xf0010000; break;
17445 case neon_cvt_mode_p: inst.instruction |= 0xf0020000; break;
17446 case neon_cvt_mode_m: inst.instruction |= 0xf0030000; break;
17447 default: abort ();
17448 }
17449
17450 inst.instruction |= (rs == NS_DD) << 8;
17451 do_vfp_cond_or_thumb ();
17452
17453 /* ARMv8.2 fp16 vrint instruction. */
17454 if (rs == NS_HH)
17455 do_scalar_fp16_v82_encode ();
17456 }
17457 else
17458 {
17459 /* Neon encodings (or something broken...). */
17460 inst.error = NULL;
17461 et = neon_check_type (2, rs, N_EQK, N_F_16_32 | N_KEY);
17462
17463 if (et.type == NT_invtype)
17464 return;
17465
17466 set_it_insn_type (OUTSIDE_IT_INSN);
17467 NEON_ENCODE (FLOAT, inst);
17468
17469 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
17470 return;
17471
17472 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17473 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17474 inst.instruction |= LOW4 (inst.operands[1].reg);
17475 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
17476 inst.instruction |= neon_quad (rs) << 6;
17477 /* Mask off the original size bits and reencode them. */
17478 inst.instruction = ((inst.instruction & 0xfff3ffff)
17479 | neon_logbits (et.size) << 18);
17480
17481 switch (mode)
17482 {
17483 case neon_cvt_mode_z: inst.instruction |= 3 << 7; break;
17484 case neon_cvt_mode_x: inst.instruction |= 1 << 7; break;
17485 case neon_cvt_mode_a: inst.instruction |= 2 << 7; break;
17486 case neon_cvt_mode_n: inst.instruction |= 0 << 7; break;
17487 case neon_cvt_mode_p: inst.instruction |= 7 << 7; break;
17488 case neon_cvt_mode_m: inst.instruction |= 5 << 7; break;
17489 case neon_cvt_mode_r: inst.error = _("invalid rounding mode"); break;
17490 default: abort ();
17491 }
17492
17493 if (thumb_mode)
17494 inst.instruction |= 0xfc000000;
17495 else
17496 inst.instruction |= 0xf0000000;
17497 }
17498 }
17499
17500 static void
17501 do_vrintx (void)
17502 {
17503 do_vrint_1 (neon_cvt_mode_x);
17504 }
17505
17506 static void
17507 do_vrintz (void)
17508 {
17509 do_vrint_1 (neon_cvt_mode_z);
17510 }
17511
17512 static void
17513 do_vrintr (void)
17514 {
17515 do_vrint_1 (neon_cvt_mode_r);
17516 }
17517
17518 static void
17519 do_vrinta (void)
17520 {
17521 do_vrint_1 (neon_cvt_mode_a);
17522 }
17523
17524 static void
17525 do_vrintn (void)
17526 {
17527 do_vrint_1 (neon_cvt_mode_n);
17528 }
17529
17530 static void
17531 do_vrintp (void)
17532 {
17533 do_vrint_1 (neon_cvt_mode_p);
17534 }
17535
17536 static void
17537 do_vrintm (void)
17538 {
17539 do_vrint_1 (neon_cvt_mode_m);
17540 }
17541
17542 static unsigned
17543 neon_scalar_for_vcmla (unsigned opnd, unsigned elsize)
17544 {
17545 unsigned regno = NEON_SCALAR_REG (opnd);
17546 unsigned elno = NEON_SCALAR_INDEX (opnd);
17547
17548 if (elsize == 16 && elno < 2 && regno < 16)
17549 return regno | (elno << 4);
17550 else if (elsize == 32 && elno == 0)
17551 return regno;
17552
17553 first_error (_("scalar out of range"));
17554 return 0;
17555 }
17556
17557 static void
17558 do_vcmla (void)
17559 {
17560 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_armv8),
17561 _(BAD_FPU));
17562 constraint (inst.reloc.exp.X_op != O_constant, _("expression too complex"));
17563 unsigned rot = inst.reloc.exp.X_add_number;
17564 constraint (rot != 0 && rot != 90 && rot != 180 && rot != 270,
17565 _("immediate out of range"));
17566 rot /= 90;
17567 if (inst.operands[2].isscalar)
17568 {
17569 enum neon_shape rs = neon_select_shape (NS_DDSI, NS_QQSI, NS_NULL);
17570 unsigned size = neon_check_type (3, rs, N_EQK, N_EQK,
17571 N_KEY | N_F16 | N_F32).size;
17572 unsigned m = neon_scalar_for_vcmla (inst.operands[2].reg, size);
17573 inst.is_neon = 1;
17574 inst.instruction = 0xfe000800;
17575 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17576 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17577 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
17578 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
17579 inst.instruction |= LOW4 (m);
17580 inst.instruction |= HI1 (m) << 5;
17581 inst.instruction |= neon_quad (rs) << 6;
17582 inst.instruction |= rot << 20;
17583 inst.instruction |= (size == 32) << 23;
17584 }
17585 else
17586 {
17587 enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
17588 unsigned size = neon_check_type (3, rs, N_EQK, N_EQK,
17589 N_KEY | N_F16 | N_F32).size;
17590 neon_three_same (neon_quad (rs), 0, -1);
17591 inst.instruction &= 0x00ffffff; /* Undo neon_dp_fixup. */
17592 inst.instruction |= 0xfc200800;
17593 inst.instruction |= rot << 23;
17594 inst.instruction |= (size == 32) << 20;
17595 }
17596 }
17597
17598 static void
17599 do_vcadd (void)
17600 {
17601 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_armv8),
17602 _(BAD_FPU));
17603 constraint (inst.reloc.exp.X_op != O_constant, _("expression too complex"));
17604 unsigned rot = inst.reloc.exp.X_add_number;
17605 constraint (rot != 90 && rot != 270, _("immediate out of range"));
17606 enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
17607 unsigned size = neon_check_type (3, rs, N_EQK, N_EQK,
17608 N_KEY | N_F16 | N_F32).size;
17609 neon_three_same (neon_quad (rs), 0, -1);
17610 inst.instruction &= 0x00ffffff; /* Undo neon_dp_fixup. */
17611 inst.instruction |= 0xfc800800;
17612 inst.instruction |= (rot == 270) << 24;
17613 inst.instruction |= (size == 32) << 20;
17614 }
17615
17616 /* Dot Product instructions encoding support. */
17617
17618 static void
17619 do_neon_dotproduct (int unsigned_p)
17620 {
17621 enum neon_shape rs;
17622 unsigned scalar_oprd2 = 0;
17623 int high8;
17624
17625 if (inst.cond != COND_ALWAYS)
17626 as_warn (_("Dot Product instructions cannot be conditional, the behaviour "
17627 "is UNPREDICTABLE"));
17628
17629 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_armv8),
17630 _(BAD_FPU));
17631
17632 /* Dot Product instructions are in three-same D/Q register format or the third
17633 operand can be a scalar index register. */
17634 if (inst.operands[2].isscalar)
17635 {
17636 scalar_oprd2 = neon_scalar_for_mul (inst.operands[2].reg, 32);
17637 high8 = 0xfe000000;
17638 rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
17639 }
17640 else
17641 {
17642 high8 = 0xfc000000;
17643 rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
17644 }
17645
17646 if (unsigned_p)
17647 neon_check_type (3, rs, N_EQK, N_EQK, N_KEY | N_U8);
17648 else
17649 neon_check_type (3, rs, N_EQK, N_EQK, N_KEY | N_S8);
17650
17651 /* The "U" bit in traditional Three Same encoding is fixed to 0 for Dot
17652 Product instruction, so we pass 0 as the "ubit" parameter. And the
17653 "Size" field are fixed to 0x2, so we pass 32 as the "size" parameter. */
17654 neon_three_same (neon_quad (rs), 0, 32);
17655
17656 /* Undo neon_dp_fixup. Dot Product instructions are using a slightly
17657 different NEON three-same encoding. */
17658 inst.instruction &= 0x00ffffff;
17659 inst.instruction |= high8;
17660 /* Encode 'U' bit which indicates signedness. */
17661 inst.instruction |= (unsigned_p ? 1 : 0) << 4;
17662 /* Re-encode operand2 if it's indexed scalar operand. What has been encoded
17663 from inst.operand[2].reg in neon_three_same is GAS's internal encoding, not
17664 the instruction encoding. */
17665 if (inst.operands[2].isscalar)
17666 {
17667 inst.instruction &= 0xffffffd0;
17668 inst.instruction |= LOW4 (scalar_oprd2);
17669 inst.instruction |= HI1 (scalar_oprd2) << 5;
17670 }
17671 }
17672
17673 /* Dot Product instructions for signed integer. */
17674
17675 static void
17676 do_neon_dotproduct_s (void)
17677 {
17678 return do_neon_dotproduct (0);
17679 }
17680
17681 /* Dot Product instructions for unsigned integer. */
17682
17683 static void
17684 do_neon_dotproduct_u (void)
17685 {
17686 return do_neon_dotproduct (1);
17687 }
17688
17689 /* Crypto v1 instructions. */
17690 static void
17691 do_crypto_2op_1 (unsigned elttype, int op)
17692 {
17693 set_it_insn_type (OUTSIDE_IT_INSN);
17694
17695 if (neon_check_type (2, NS_QQ, N_EQK | N_UNT, elttype | N_UNT | N_KEY).type
17696 == NT_invtype)
17697 return;
17698
17699 inst.error = NULL;
17700
17701 NEON_ENCODE (INTEGER, inst);
17702 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17703 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17704 inst.instruction |= LOW4 (inst.operands[1].reg);
17705 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
17706 if (op != -1)
17707 inst.instruction |= op << 6;
17708
17709 if (thumb_mode)
17710 inst.instruction |= 0xfc000000;
17711 else
17712 inst.instruction |= 0xf0000000;
17713 }
17714
17715 static void
17716 do_crypto_3op_1 (int u, int op)
17717 {
17718 set_it_insn_type (OUTSIDE_IT_INSN);
17719
17720 if (neon_check_type (3, NS_QQQ, N_EQK | N_UNT, N_EQK | N_UNT,
17721 N_32 | N_UNT | N_KEY).type == NT_invtype)
17722 return;
17723
17724 inst.error = NULL;
17725
17726 NEON_ENCODE (INTEGER, inst);
17727 neon_three_same (1, u, 8 << op);
17728 }
17729
17730 static void
17731 do_aese (void)
17732 {
17733 do_crypto_2op_1 (N_8, 0);
17734 }
17735
17736 static void
17737 do_aesd (void)
17738 {
17739 do_crypto_2op_1 (N_8, 1);
17740 }
17741
17742 static void
17743 do_aesmc (void)
17744 {
17745 do_crypto_2op_1 (N_8, 2);
17746 }
17747
17748 static void
17749 do_aesimc (void)
17750 {
17751 do_crypto_2op_1 (N_8, 3);
17752 }
17753
17754 static void
17755 do_sha1c (void)
17756 {
17757 do_crypto_3op_1 (0, 0);
17758 }
17759
17760 static void
17761 do_sha1p (void)
17762 {
17763 do_crypto_3op_1 (0, 1);
17764 }
17765
17766 static void
17767 do_sha1m (void)
17768 {
17769 do_crypto_3op_1 (0, 2);
17770 }
17771
17772 static void
17773 do_sha1su0 (void)
17774 {
17775 do_crypto_3op_1 (0, 3);
17776 }
17777
17778 static void
17779 do_sha256h (void)
17780 {
17781 do_crypto_3op_1 (1, 0);
17782 }
17783
17784 static void
17785 do_sha256h2 (void)
17786 {
17787 do_crypto_3op_1 (1, 1);
17788 }
17789
17790 static void
17791 do_sha256su1 (void)
17792 {
17793 do_crypto_3op_1 (1, 2);
17794 }
17795
17796 static void
17797 do_sha1h (void)
17798 {
17799 do_crypto_2op_1 (N_32, -1);
17800 }
17801
17802 static void
17803 do_sha1su1 (void)
17804 {
17805 do_crypto_2op_1 (N_32, 0);
17806 }
17807
17808 static void
17809 do_sha256su0 (void)
17810 {
17811 do_crypto_2op_1 (N_32, 1);
17812 }
17813
17814 static void
17815 do_crc32_1 (unsigned int poly, unsigned int sz)
17816 {
17817 unsigned int Rd = inst.operands[0].reg;
17818 unsigned int Rn = inst.operands[1].reg;
17819 unsigned int Rm = inst.operands[2].reg;
17820
17821 set_it_insn_type (OUTSIDE_IT_INSN);
17822 inst.instruction |= LOW4 (Rd) << (thumb_mode ? 8 : 12);
17823 inst.instruction |= LOW4 (Rn) << 16;
17824 inst.instruction |= LOW4 (Rm);
17825 inst.instruction |= sz << (thumb_mode ? 4 : 21);
17826 inst.instruction |= poly << (thumb_mode ? 20 : 9);
17827
17828 if (Rd == REG_PC || Rn == REG_PC || Rm == REG_PC)
17829 as_warn (UNPRED_REG ("r15"));
17830 }
17831
17832 static void
17833 do_crc32b (void)
17834 {
17835 do_crc32_1 (0, 0);
17836 }
17837
17838 static void
17839 do_crc32h (void)
17840 {
17841 do_crc32_1 (0, 1);
17842 }
17843
17844 static void
17845 do_crc32w (void)
17846 {
17847 do_crc32_1 (0, 2);
17848 }
17849
17850 static void
17851 do_crc32cb (void)
17852 {
17853 do_crc32_1 (1, 0);
17854 }
17855
17856 static void
17857 do_crc32ch (void)
17858 {
17859 do_crc32_1 (1, 1);
17860 }
17861
17862 static void
17863 do_crc32cw (void)
17864 {
17865 do_crc32_1 (1, 2);
17866 }
17867
17868 static void
17869 do_vjcvt (void)
17870 {
17871 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
17872 _(BAD_FPU));
17873 neon_check_type (2, NS_FD, N_S32, N_F64);
17874 do_vfp_sp_dp_cvt ();
17875 do_vfp_cond_or_thumb ();
17876 }
17877
17878 \f
17879 /* Overall per-instruction processing. */
17880
17881 /* We need to be able to fix up arbitrary expressions in some statements.
17882 This is so that we can handle symbols that are an arbitrary distance from
17883 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
17884 which returns part of an address in a form which will be valid for
17885 a data instruction. We do this by pushing the expression into a symbol
17886 in the expr_section, and creating a fix for that. */
17887
17888 static void
17889 fix_new_arm (fragS * frag,
17890 int where,
17891 short int size,
17892 expressionS * exp,
17893 int pc_rel,
17894 int reloc)
17895 {
17896 fixS * new_fix;
17897
17898 switch (exp->X_op)
17899 {
17900 case O_constant:
17901 if (pc_rel)
17902 {
17903 /* Create an absolute valued symbol, so we have something to
17904 refer to in the object file. Unfortunately for us, gas's
17905 generic expression parsing will already have folded out
17906 any use of .set foo/.type foo %function that may have
17907 been used to set type information of the target location,
17908 that's being specified symbolically. We have to presume
17909 the user knows what they are doing. */
17910 char name[16 + 8];
17911 symbolS *symbol;
17912
17913 sprintf (name, "*ABS*0x%lx", (unsigned long)exp->X_add_number);
17914
17915 symbol = symbol_find_or_make (name);
17916 S_SET_SEGMENT (symbol, absolute_section);
17917 symbol_set_frag (symbol, &zero_address_frag);
17918 S_SET_VALUE (symbol, exp->X_add_number);
17919 exp->X_op = O_symbol;
17920 exp->X_add_symbol = symbol;
17921 exp->X_add_number = 0;
17922 }
17923 /* FALLTHROUGH */
17924 case O_symbol:
17925 case O_add:
17926 case O_subtract:
17927 new_fix = fix_new_exp (frag, where, size, exp, pc_rel,
17928 (enum bfd_reloc_code_real) reloc);
17929 break;
17930
17931 default:
17932 new_fix = (fixS *) fix_new (frag, where, size, make_expr_symbol (exp), 0,
17933 pc_rel, (enum bfd_reloc_code_real) reloc);
17934 break;
17935 }
17936
17937 /* Mark whether the fix is to a THUMB instruction, or an ARM
17938 instruction. */
17939 new_fix->tc_fix_data = thumb_mode;
17940 }
17941
17942 /* Create a frg for an instruction requiring relaxation. */
17943 static void
17944 output_relax_insn (void)
17945 {
17946 char * to;
17947 symbolS *sym;
17948 int offset;
17949
17950 /* The size of the instruction is unknown, so tie the debug info to the
17951 start of the instruction. */
17952 dwarf2_emit_insn (0);
17953
17954 switch (inst.reloc.exp.X_op)
17955 {
17956 case O_symbol:
17957 sym = inst.reloc.exp.X_add_symbol;
17958 offset = inst.reloc.exp.X_add_number;
17959 break;
17960 case O_constant:
17961 sym = NULL;
17962 offset = inst.reloc.exp.X_add_number;
17963 break;
17964 default:
17965 sym = make_expr_symbol (&inst.reloc.exp);
17966 offset = 0;
17967 break;
17968 }
17969 to = frag_var (rs_machine_dependent, INSN_SIZE, THUMB_SIZE,
17970 inst.relax, sym, offset, NULL/*offset, opcode*/);
17971 md_number_to_chars (to, inst.instruction, THUMB_SIZE);
17972 }
17973
17974 /* Write a 32-bit thumb instruction to buf. */
17975 static void
17976 put_thumb32_insn (char * buf, unsigned long insn)
17977 {
17978 md_number_to_chars (buf, insn >> 16, THUMB_SIZE);
17979 md_number_to_chars (buf + THUMB_SIZE, insn, THUMB_SIZE);
17980 }
17981
17982 static void
17983 output_inst (const char * str)
17984 {
17985 char * to = NULL;
17986
17987 if (inst.error)
17988 {
17989 as_bad ("%s -- `%s'", inst.error, str);
17990 return;
17991 }
17992 if (inst.relax)
17993 {
17994 output_relax_insn ();
17995 return;
17996 }
17997 if (inst.size == 0)
17998 return;
17999
18000 to = frag_more (inst.size);
18001 /* PR 9814: Record the thumb mode into the current frag so that we know
18002 what type of NOP padding to use, if necessary. We override any previous
18003 setting so that if the mode has changed then the NOPS that we use will
18004 match the encoding of the last instruction in the frag. */
18005 frag_now->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
18006
18007 if (thumb_mode && (inst.size > THUMB_SIZE))
18008 {
18009 gas_assert (inst.size == (2 * THUMB_SIZE));
18010 put_thumb32_insn (to, inst.instruction);
18011 }
18012 else if (inst.size > INSN_SIZE)
18013 {
18014 gas_assert (inst.size == (2 * INSN_SIZE));
18015 md_number_to_chars (to, inst.instruction, INSN_SIZE);
18016 md_number_to_chars (to + INSN_SIZE, inst.instruction, INSN_SIZE);
18017 }
18018 else
18019 md_number_to_chars (to, inst.instruction, inst.size);
18020
18021 if (inst.reloc.type != BFD_RELOC_UNUSED)
18022 fix_new_arm (frag_now, to - frag_now->fr_literal,
18023 inst.size, & inst.reloc.exp, inst.reloc.pc_rel,
18024 inst.reloc.type);
18025
18026 dwarf2_emit_insn (inst.size);
18027 }
18028
18029 static char *
18030 output_it_inst (int cond, int mask, char * to)
18031 {
18032 unsigned long instruction = 0xbf00;
18033
18034 mask &= 0xf;
18035 instruction |= mask;
18036 instruction |= cond << 4;
18037
18038 if (to == NULL)
18039 {
18040 to = frag_more (2);
18041 #ifdef OBJ_ELF
18042 dwarf2_emit_insn (2);
18043 #endif
18044 }
18045
18046 md_number_to_chars (to, instruction, 2);
18047
18048 return to;
18049 }
18050
18051 /* Tag values used in struct asm_opcode's tag field. */
18052 enum opcode_tag
18053 {
18054 OT_unconditional, /* Instruction cannot be conditionalized.
18055 The ARM condition field is still 0xE. */
18056 OT_unconditionalF, /* Instruction cannot be conditionalized
18057 and carries 0xF in its ARM condition field. */
18058 OT_csuffix, /* Instruction takes a conditional suffix. */
18059 OT_csuffixF, /* Some forms of the instruction take a conditional
18060 suffix, others place 0xF where the condition field
18061 would be. */
18062 OT_cinfix3, /* Instruction takes a conditional infix,
18063 beginning at character index 3. (In
18064 unified mode, it becomes a suffix.) */
18065 OT_cinfix3_deprecated, /* The same as OT_cinfix3. This is used for
18066 tsts, cmps, cmns, and teqs. */
18067 OT_cinfix3_legacy, /* Legacy instruction takes a conditional infix at
18068 character index 3, even in unified mode. Used for
18069 legacy instructions where suffix and infix forms
18070 may be ambiguous. */
18071 OT_csuf_or_in3, /* Instruction takes either a conditional
18072 suffix or an infix at character index 3. */
18073 OT_odd_infix_unc, /* This is the unconditional variant of an
18074 instruction that takes a conditional infix
18075 at an unusual position. In unified mode,
18076 this variant will accept a suffix. */
18077 OT_odd_infix_0 /* Values greater than or equal to OT_odd_infix_0
18078 are the conditional variants of instructions that
18079 take conditional infixes in unusual positions.
18080 The infix appears at character index
18081 (tag - OT_odd_infix_0). These are not accepted
18082 in unified mode. */
18083 };
18084
18085 /* Subroutine of md_assemble, responsible for looking up the primary
18086 opcode from the mnemonic the user wrote. STR points to the
18087 beginning of the mnemonic.
18088
18089 This is not simply a hash table lookup, because of conditional
18090 variants. Most instructions have conditional variants, which are
18091 expressed with a _conditional affix_ to the mnemonic. If we were
18092 to encode each conditional variant as a literal string in the opcode
18093 table, it would have approximately 20,000 entries.
18094
18095 Most mnemonics take this affix as a suffix, and in unified syntax,
18096 'most' is upgraded to 'all'. However, in the divided syntax, some
18097 instructions take the affix as an infix, notably the s-variants of
18098 the arithmetic instructions. Of those instructions, all but six
18099 have the infix appear after the third character of the mnemonic.
18100
18101 Accordingly, the algorithm for looking up primary opcodes given
18102 an identifier is:
18103
18104 1. Look up the identifier in the opcode table.
18105 If we find a match, go to step U.
18106
18107 2. Look up the last two characters of the identifier in the
18108 conditions table. If we find a match, look up the first N-2
18109 characters of the identifier in the opcode table. If we
18110 find a match, go to step CE.
18111
18112 3. Look up the fourth and fifth characters of the identifier in
18113 the conditions table. If we find a match, extract those
18114 characters from the identifier, and look up the remaining
18115 characters in the opcode table. If we find a match, go
18116 to step CM.
18117
18118 4. Fail.
18119
18120 U. Examine the tag field of the opcode structure, in case this is
18121 one of the six instructions with its conditional infix in an
18122 unusual place. If it is, the tag tells us where to find the
18123 infix; look it up in the conditions table and set inst.cond
18124 accordingly. Otherwise, this is an unconditional instruction.
18125 Again set inst.cond accordingly. Return the opcode structure.
18126
18127 CE. Examine the tag field to make sure this is an instruction that
18128 should receive a conditional suffix. If it is not, fail.
18129 Otherwise, set inst.cond from the suffix we already looked up,
18130 and return the opcode structure.
18131
18132 CM. Examine the tag field to make sure this is an instruction that
18133 should receive a conditional infix after the third character.
18134 If it is not, fail. Otherwise, undo the edits to the current
18135 line of input and proceed as for case CE. */
18136
18137 static const struct asm_opcode *
18138 opcode_lookup (char **str)
18139 {
18140 char *end, *base;
18141 char *affix;
18142 const struct asm_opcode *opcode;
18143 const struct asm_cond *cond;
18144 char save[2];
18145
18146 /* Scan up to the end of the mnemonic, which must end in white space,
18147 '.' (in unified mode, or for Neon/VFP instructions), or end of string. */
18148 for (base = end = *str; *end != '\0'; end++)
18149 if (*end == ' ' || *end == '.')
18150 break;
18151
18152 if (end == base)
18153 return NULL;
18154
18155 /* Handle a possible width suffix and/or Neon type suffix. */
18156 if (end[0] == '.')
18157 {
18158 int offset = 2;
18159
18160 /* The .w and .n suffixes are only valid if the unified syntax is in
18161 use. */
18162 if (unified_syntax && end[1] == 'w')
18163 inst.size_req = 4;
18164 else if (unified_syntax && end[1] == 'n')
18165 inst.size_req = 2;
18166 else
18167 offset = 0;
18168
18169 inst.vectype.elems = 0;
18170
18171 *str = end + offset;
18172
18173 if (end[offset] == '.')
18174 {
18175 /* See if we have a Neon type suffix (possible in either unified or
18176 non-unified ARM syntax mode). */
18177 if (parse_neon_type (&inst.vectype, str) == FAIL)
18178 return NULL;
18179 }
18180 else if (end[offset] != '\0' && end[offset] != ' ')
18181 return NULL;
18182 }
18183 else
18184 *str = end;
18185
18186 /* Look for unaffixed or special-case affixed mnemonic. */
18187 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
18188 end - base);
18189 if (opcode)
18190 {
18191 /* step U */
18192 if (opcode->tag < OT_odd_infix_0)
18193 {
18194 inst.cond = COND_ALWAYS;
18195 return opcode;
18196 }
18197
18198 if (warn_on_deprecated && unified_syntax)
18199 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
18200 affix = base + (opcode->tag - OT_odd_infix_0);
18201 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
18202 gas_assert (cond);
18203
18204 inst.cond = cond->value;
18205 return opcode;
18206 }
18207
18208 /* Cannot have a conditional suffix on a mnemonic of less than two
18209 characters. */
18210 if (end - base < 3)
18211 return NULL;
18212
18213 /* Look for suffixed mnemonic. */
18214 affix = end - 2;
18215 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
18216 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
18217 affix - base);
18218 if (opcode && cond)
18219 {
18220 /* step CE */
18221 switch (opcode->tag)
18222 {
18223 case OT_cinfix3_legacy:
18224 /* Ignore conditional suffixes matched on infix only mnemonics. */
18225 break;
18226
18227 case OT_cinfix3:
18228 case OT_cinfix3_deprecated:
18229 case OT_odd_infix_unc:
18230 if (!unified_syntax)
18231 return NULL;
18232 /* Fall through. */
18233
18234 case OT_csuffix:
18235 case OT_csuffixF:
18236 case OT_csuf_or_in3:
18237 inst.cond = cond->value;
18238 return opcode;
18239
18240 case OT_unconditional:
18241 case OT_unconditionalF:
18242 if (thumb_mode)
18243 inst.cond = cond->value;
18244 else
18245 {
18246 /* Delayed diagnostic. */
18247 inst.error = BAD_COND;
18248 inst.cond = COND_ALWAYS;
18249 }
18250 return opcode;
18251
18252 default:
18253 return NULL;
18254 }
18255 }
18256
18257 /* Cannot have a usual-position infix on a mnemonic of less than
18258 six characters (five would be a suffix). */
18259 if (end - base < 6)
18260 return NULL;
18261
18262 /* Look for infixed mnemonic in the usual position. */
18263 affix = base + 3;
18264 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
18265 if (!cond)
18266 return NULL;
18267
18268 memcpy (save, affix, 2);
18269 memmove (affix, affix + 2, (end - affix) - 2);
18270 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
18271 (end - base) - 2);
18272 memmove (affix + 2, affix, (end - affix) - 2);
18273 memcpy (affix, save, 2);
18274
18275 if (opcode
18276 && (opcode->tag == OT_cinfix3
18277 || opcode->tag == OT_cinfix3_deprecated
18278 || opcode->tag == OT_csuf_or_in3
18279 || opcode->tag == OT_cinfix3_legacy))
18280 {
18281 /* Step CM. */
18282 if (warn_on_deprecated && unified_syntax
18283 && (opcode->tag == OT_cinfix3
18284 || opcode->tag == OT_cinfix3_deprecated))
18285 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
18286
18287 inst.cond = cond->value;
18288 return opcode;
18289 }
18290
18291 return NULL;
18292 }
18293
18294 /* This function generates an initial IT instruction, leaving its block
18295 virtually open for the new instructions. Eventually,
18296 the mask will be updated by now_it_add_mask () each time
18297 a new instruction needs to be included in the IT block.
18298 Finally, the block is closed with close_automatic_it_block ().
18299 The block closure can be requested either from md_assemble (),
18300 a tencode (), or due to a label hook. */
18301
18302 static void
18303 new_automatic_it_block (int cond)
18304 {
18305 now_it.state = AUTOMATIC_IT_BLOCK;
18306 now_it.mask = 0x18;
18307 now_it.cc = cond;
18308 now_it.block_length = 1;
18309 mapping_state (MAP_THUMB);
18310 now_it.insn = output_it_inst (cond, now_it.mask, NULL);
18311 now_it.warn_deprecated = FALSE;
18312 now_it.insn_cond = TRUE;
18313 }
18314
18315 /* Close an automatic IT block.
18316 See comments in new_automatic_it_block (). */
18317
18318 static void
18319 close_automatic_it_block (void)
18320 {
18321 now_it.mask = 0x10;
18322 now_it.block_length = 0;
18323 }
18324
18325 /* Update the mask of the current automatically-generated IT
18326 instruction. See comments in new_automatic_it_block (). */
18327
18328 static void
18329 now_it_add_mask (int cond)
18330 {
18331 #define CLEAR_BIT(value, nbit) ((value) & ~(1 << (nbit)))
18332 #define SET_BIT_VALUE(value, bitvalue, nbit) (CLEAR_BIT (value, nbit) \
18333 | ((bitvalue) << (nbit)))
18334 const int resulting_bit = (cond & 1);
18335
18336 now_it.mask &= 0xf;
18337 now_it.mask = SET_BIT_VALUE (now_it.mask,
18338 resulting_bit,
18339 (5 - now_it.block_length));
18340 now_it.mask = SET_BIT_VALUE (now_it.mask,
18341 1,
18342 ((5 - now_it.block_length) - 1) );
18343 output_it_inst (now_it.cc, now_it.mask, now_it.insn);
18344
18345 #undef CLEAR_BIT
18346 #undef SET_BIT_VALUE
18347 }
18348
18349 /* The IT blocks handling machinery is accessed through the these functions:
18350 it_fsm_pre_encode () from md_assemble ()
18351 set_it_insn_type () optional, from the tencode functions
18352 set_it_insn_type_last () ditto
18353 in_it_block () ditto
18354 it_fsm_post_encode () from md_assemble ()
18355 force_automatic_it_block_close () from label handling functions
18356
18357 Rationale:
18358 1) md_assemble () calls it_fsm_pre_encode () before calling tencode (),
18359 initializing the IT insn type with a generic initial value depending
18360 on the inst.condition.
18361 2) During the tencode function, two things may happen:
18362 a) The tencode function overrides the IT insn type by
18363 calling either set_it_insn_type (type) or set_it_insn_type_last ().
18364 b) The tencode function queries the IT block state by
18365 calling in_it_block () (i.e. to determine narrow/not narrow mode).
18366
18367 Both set_it_insn_type and in_it_block run the internal FSM state
18368 handling function (handle_it_state), because: a) setting the IT insn
18369 type may incur in an invalid state (exiting the function),
18370 and b) querying the state requires the FSM to be updated.
18371 Specifically we want to avoid creating an IT block for conditional
18372 branches, so it_fsm_pre_encode is actually a guess and we can't
18373 determine whether an IT block is required until the tencode () routine
18374 has decided what type of instruction this actually it.
18375 Because of this, if set_it_insn_type and in_it_block have to be used,
18376 set_it_insn_type has to be called first.
18377
18378 set_it_insn_type_last () is a wrapper of set_it_insn_type (type), that
18379 determines the insn IT type depending on the inst.cond code.
18380 When a tencode () routine encodes an instruction that can be
18381 either outside an IT block, or, in the case of being inside, has to be
18382 the last one, set_it_insn_type_last () will determine the proper
18383 IT instruction type based on the inst.cond code. Otherwise,
18384 set_it_insn_type can be called for overriding that logic or
18385 for covering other cases.
18386
18387 Calling handle_it_state () may not transition the IT block state to
18388 OUTSIDE_IT_BLOCK immediately, since the (current) state could be
18389 still queried. Instead, if the FSM determines that the state should
18390 be transitioned to OUTSIDE_IT_BLOCK, a flag is marked to be closed
18391 after the tencode () function: that's what it_fsm_post_encode () does.
18392
18393 Since in_it_block () calls the state handling function to get an
18394 updated state, an error may occur (due to invalid insns combination).
18395 In that case, inst.error is set.
18396 Therefore, inst.error has to be checked after the execution of
18397 the tencode () routine.
18398
18399 3) Back in md_assemble(), it_fsm_post_encode () is called to commit
18400 any pending state change (if any) that didn't take place in
18401 handle_it_state () as explained above. */
18402
18403 static void
18404 it_fsm_pre_encode (void)
18405 {
18406 if (inst.cond != COND_ALWAYS)
18407 inst.it_insn_type = INSIDE_IT_INSN;
18408 else
18409 inst.it_insn_type = OUTSIDE_IT_INSN;
18410
18411 now_it.state_handled = 0;
18412 }
18413
18414 /* IT state FSM handling function. */
18415
18416 static int
18417 handle_it_state (void)
18418 {
18419 now_it.state_handled = 1;
18420 now_it.insn_cond = FALSE;
18421
18422 switch (now_it.state)
18423 {
18424 case OUTSIDE_IT_BLOCK:
18425 switch (inst.it_insn_type)
18426 {
18427 case OUTSIDE_IT_INSN:
18428 break;
18429
18430 case INSIDE_IT_INSN:
18431 case INSIDE_IT_LAST_INSN:
18432 if (thumb_mode == 0)
18433 {
18434 if (unified_syntax
18435 && !(implicit_it_mode & IMPLICIT_IT_MODE_ARM))
18436 as_tsktsk (_("Warning: conditional outside an IT block"\
18437 " for Thumb."));
18438 }
18439 else
18440 {
18441 if ((implicit_it_mode & IMPLICIT_IT_MODE_THUMB)
18442 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2))
18443 {
18444 /* Automatically generate the IT instruction. */
18445 new_automatic_it_block (inst.cond);
18446 if (inst.it_insn_type == INSIDE_IT_LAST_INSN)
18447 close_automatic_it_block ();
18448 }
18449 else
18450 {
18451 inst.error = BAD_OUT_IT;
18452 return FAIL;
18453 }
18454 }
18455 break;
18456
18457 case IF_INSIDE_IT_LAST_INSN:
18458 case NEUTRAL_IT_INSN:
18459 break;
18460
18461 case IT_INSN:
18462 now_it.state = MANUAL_IT_BLOCK;
18463 now_it.block_length = 0;
18464 break;
18465 }
18466 break;
18467
18468 case AUTOMATIC_IT_BLOCK:
18469 /* Three things may happen now:
18470 a) We should increment current it block size;
18471 b) We should close current it block (closing insn or 4 insns);
18472 c) We should close current it block and start a new one (due
18473 to incompatible conditions or
18474 4 insns-length block reached). */
18475
18476 switch (inst.it_insn_type)
18477 {
18478 case OUTSIDE_IT_INSN:
18479 /* The closure of the block shall happen immediately,
18480 so any in_it_block () call reports the block as closed. */
18481 force_automatic_it_block_close ();
18482 break;
18483
18484 case INSIDE_IT_INSN:
18485 case INSIDE_IT_LAST_INSN:
18486 case IF_INSIDE_IT_LAST_INSN:
18487 now_it.block_length++;
18488
18489 if (now_it.block_length > 4
18490 || !now_it_compatible (inst.cond))
18491 {
18492 force_automatic_it_block_close ();
18493 if (inst.it_insn_type != IF_INSIDE_IT_LAST_INSN)
18494 new_automatic_it_block (inst.cond);
18495 }
18496 else
18497 {
18498 now_it.insn_cond = TRUE;
18499 now_it_add_mask (inst.cond);
18500 }
18501
18502 if (now_it.state == AUTOMATIC_IT_BLOCK
18503 && (inst.it_insn_type == INSIDE_IT_LAST_INSN
18504 || inst.it_insn_type == IF_INSIDE_IT_LAST_INSN))
18505 close_automatic_it_block ();
18506 break;
18507
18508 case NEUTRAL_IT_INSN:
18509 now_it.block_length++;
18510 now_it.insn_cond = TRUE;
18511
18512 if (now_it.block_length > 4)
18513 force_automatic_it_block_close ();
18514 else
18515 now_it_add_mask (now_it.cc & 1);
18516 break;
18517
18518 case IT_INSN:
18519 close_automatic_it_block ();
18520 now_it.state = MANUAL_IT_BLOCK;
18521 break;
18522 }
18523 break;
18524
18525 case MANUAL_IT_BLOCK:
18526 {
18527 /* Check conditional suffixes. */
18528 const int cond = now_it.cc ^ ((now_it.mask >> 4) & 1) ^ 1;
18529 int is_last;
18530 now_it.mask <<= 1;
18531 now_it.mask &= 0x1f;
18532 is_last = (now_it.mask == 0x10);
18533 now_it.insn_cond = TRUE;
18534
18535 switch (inst.it_insn_type)
18536 {
18537 case OUTSIDE_IT_INSN:
18538 inst.error = BAD_NOT_IT;
18539 return FAIL;
18540
18541 case INSIDE_IT_INSN:
18542 if (cond != inst.cond)
18543 {
18544 inst.error = BAD_IT_COND;
18545 return FAIL;
18546 }
18547 break;
18548
18549 case INSIDE_IT_LAST_INSN:
18550 case IF_INSIDE_IT_LAST_INSN:
18551 if (cond != inst.cond)
18552 {
18553 inst.error = BAD_IT_COND;
18554 return FAIL;
18555 }
18556 if (!is_last)
18557 {
18558 inst.error = BAD_BRANCH;
18559 return FAIL;
18560 }
18561 break;
18562
18563 case NEUTRAL_IT_INSN:
18564 /* The BKPT instruction is unconditional even in an IT block. */
18565 break;
18566
18567 case IT_INSN:
18568 inst.error = BAD_IT_IT;
18569 return FAIL;
18570 }
18571 }
18572 break;
18573 }
18574
18575 return SUCCESS;
18576 }
18577
18578 struct depr_insn_mask
18579 {
18580 unsigned long pattern;
18581 unsigned long mask;
18582 const char* description;
18583 };
18584
18585 /* List of 16-bit instruction patterns deprecated in an IT block in
18586 ARMv8. */
18587 static const struct depr_insn_mask depr_it_insns[] = {
18588 { 0xc000, 0xc000, N_("Short branches, Undefined, SVC, LDM/STM") },
18589 { 0xb000, 0xb000, N_("Miscellaneous 16-bit instructions") },
18590 { 0xa000, 0xb800, N_("ADR") },
18591 { 0x4800, 0xf800, N_("Literal loads") },
18592 { 0x4478, 0xf478, N_("Hi-register ADD, MOV, CMP, BX, BLX using pc") },
18593 { 0x4487, 0xfc87, N_("Hi-register ADD, MOV, CMP using pc") },
18594 /* NOTE: 0x00dd is not the real encoding, instead, it is the 'tvalue'
18595 field in asm_opcode. 'tvalue' is used at the stage this check happen. */
18596 { 0x00dd, 0x7fff, N_("ADD/SUB sp, sp #imm") },
18597 { 0, 0, NULL }
18598 };
18599
18600 static void
18601 it_fsm_post_encode (void)
18602 {
18603 int is_last;
18604
18605 if (!now_it.state_handled)
18606 handle_it_state ();
18607
18608 if (now_it.insn_cond
18609 && !now_it.warn_deprecated
18610 && warn_on_deprecated
18611 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8)
18612 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_m))
18613 {
18614 if (inst.instruction >= 0x10000)
18615 {
18616 as_tsktsk (_("IT blocks containing 32-bit Thumb instructions are "
18617 "performance deprecated in ARMv8-A and ARMv8-R"));
18618 now_it.warn_deprecated = TRUE;
18619 }
18620 else
18621 {
18622 const struct depr_insn_mask *p = depr_it_insns;
18623
18624 while (p->mask != 0)
18625 {
18626 if ((inst.instruction & p->mask) == p->pattern)
18627 {
18628 as_tsktsk (_("IT blocks containing 16-bit Thumb "
18629 "instructions of the following class are "
18630 "performance deprecated in ARMv8-A and "
18631 "ARMv8-R: %s"), p->description);
18632 now_it.warn_deprecated = TRUE;
18633 break;
18634 }
18635
18636 ++p;
18637 }
18638 }
18639
18640 if (now_it.block_length > 1)
18641 {
18642 as_tsktsk (_("IT blocks containing more than one conditional "
18643 "instruction are performance deprecated in ARMv8-A and "
18644 "ARMv8-R"));
18645 now_it.warn_deprecated = TRUE;
18646 }
18647 }
18648
18649 is_last = (now_it.mask == 0x10);
18650 if (is_last)
18651 {
18652 now_it.state = OUTSIDE_IT_BLOCK;
18653 now_it.mask = 0;
18654 }
18655 }
18656
18657 static void
18658 force_automatic_it_block_close (void)
18659 {
18660 if (now_it.state == AUTOMATIC_IT_BLOCK)
18661 {
18662 close_automatic_it_block ();
18663 now_it.state = OUTSIDE_IT_BLOCK;
18664 now_it.mask = 0;
18665 }
18666 }
18667
18668 static int
18669 in_it_block (void)
18670 {
18671 if (!now_it.state_handled)
18672 handle_it_state ();
18673
18674 return now_it.state != OUTSIDE_IT_BLOCK;
18675 }
18676
18677 /* Whether OPCODE only has T32 encoding. Since this function is only used by
18678 t32_insn_ok, OPCODE enabled by v6t2 extension bit do not need to be listed
18679 here, hence the "known" in the function name. */
18680
18681 static bfd_boolean
18682 known_t32_only_insn (const struct asm_opcode *opcode)
18683 {
18684 /* Original Thumb-1 wide instruction. */
18685 if (opcode->tencode == do_t_blx
18686 || opcode->tencode == do_t_branch23
18687 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_msr)
18688 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_barrier))
18689 return TRUE;
18690
18691 /* Wide-only instruction added to ARMv8-M Baseline. */
18692 if (ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_v8m_m_only)
18693 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_atomics)
18694 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_v6t2_v8m)
18695 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_div))
18696 return TRUE;
18697
18698 return FALSE;
18699 }
18700
18701 /* Whether wide instruction variant can be used if available for a valid OPCODE
18702 in ARCH. */
18703
18704 static bfd_boolean
18705 t32_insn_ok (arm_feature_set arch, const struct asm_opcode *opcode)
18706 {
18707 if (known_t32_only_insn (opcode))
18708 return TRUE;
18709
18710 /* Instruction with narrow and wide encoding added to ARMv8-M. Availability
18711 of variant T3 of B.W is checked in do_t_branch. */
18712 if (ARM_CPU_HAS_FEATURE (arch, arm_ext_v8m)
18713 && opcode->tencode == do_t_branch)
18714 return TRUE;
18715
18716 /* MOV accepts T1/T3 encodings under Baseline, T3 encoding is 32bit. */
18717 if (ARM_CPU_HAS_FEATURE (arch, arm_ext_v8m)
18718 && opcode->tencode == do_t_mov_cmp
18719 /* Make sure CMP instruction is not affected. */
18720 && opcode->aencode == do_mov)
18721 return TRUE;
18722
18723 /* Wide instruction variants of all instructions with narrow *and* wide
18724 variants become available with ARMv6t2. Other opcodes are either
18725 narrow-only or wide-only and are thus available if OPCODE is valid. */
18726 if (ARM_CPU_HAS_FEATURE (arch, arm_ext_v6t2))
18727 return TRUE;
18728
18729 /* OPCODE with narrow only instruction variant or wide variant not
18730 available. */
18731 return FALSE;
18732 }
18733
18734 void
18735 md_assemble (char *str)
18736 {
18737 char *p = str;
18738 const struct asm_opcode * opcode;
18739
18740 /* Align the previous label if needed. */
18741 if (last_label_seen != NULL)
18742 {
18743 symbol_set_frag (last_label_seen, frag_now);
18744 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
18745 S_SET_SEGMENT (last_label_seen, now_seg);
18746 }
18747
18748 memset (&inst, '\0', sizeof (inst));
18749 inst.reloc.type = BFD_RELOC_UNUSED;
18750
18751 opcode = opcode_lookup (&p);
18752 if (!opcode)
18753 {
18754 /* It wasn't an instruction, but it might be a register alias of
18755 the form alias .req reg, or a Neon .dn/.qn directive. */
18756 if (! create_register_alias (str, p)
18757 && ! create_neon_reg_alias (str, p))
18758 as_bad (_("bad instruction `%s'"), str);
18759
18760 return;
18761 }
18762
18763 if (warn_on_deprecated && opcode->tag == OT_cinfix3_deprecated)
18764 as_tsktsk (_("s suffix on comparison instruction is deprecated"));
18765
18766 /* The value which unconditional instructions should have in place of the
18767 condition field. */
18768 inst.uncond_value = (opcode->tag == OT_csuffixF) ? 0xf : -1;
18769
18770 if (thumb_mode)
18771 {
18772 arm_feature_set variant;
18773
18774 variant = cpu_variant;
18775 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
18776 if (!ARM_CPU_HAS_FEATURE (variant, arm_arch_t2))
18777 ARM_CLEAR_FEATURE (variant, variant, fpu_any_hard);
18778 /* Check that this instruction is supported for this CPU. */
18779 if (!opcode->tvariant
18780 || (thumb_mode == 1
18781 && !ARM_CPU_HAS_FEATURE (variant, *opcode->tvariant)))
18782 {
18783 if (opcode->tencode == do_t_swi)
18784 as_bad (_("SVC is not permitted on this architecture"));
18785 else
18786 as_bad (_("selected processor does not support `%s' in Thumb mode"), str);
18787 return;
18788 }
18789 if (inst.cond != COND_ALWAYS && !unified_syntax
18790 && opcode->tencode != do_t_branch)
18791 {
18792 as_bad (_("Thumb does not support conditional execution"));
18793 return;
18794 }
18795
18796 /* Two things are addressed here:
18797 1) Implicit require narrow instructions on Thumb-1.
18798 This avoids relaxation accidentally introducing Thumb-2
18799 instructions.
18800 2) Reject wide instructions in non Thumb-2 cores.
18801
18802 Only instructions with narrow and wide variants need to be handled
18803 but selecting all non wide-only instructions is easier. */
18804 if (!ARM_CPU_HAS_FEATURE (variant, arm_ext_v6t2)
18805 && !t32_insn_ok (variant, opcode))
18806 {
18807 if (inst.size_req == 0)
18808 inst.size_req = 2;
18809 else if (inst.size_req == 4)
18810 {
18811 if (ARM_CPU_HAS_FEATURE (variant, arm_ext_v8m))
18812 as_bad (_("selected processor does not support 32bit wide "
18813 "variant of instruction `%s'"), str);
18814 else
18815 as_bad (_("selected processor does not support `%s' in "
18816 "Thumb-2 mode"), str);
18817 return;
18818 }
18819 }
18820
18821 inst.instruction = opcode->tvalue;
18822
18823 if (!parse_operands (p, opcode->operands, /*thumb=*/TRUE))
18824 {
18825 /* Prepare the it_insn_type for those encodings that don't set
18826 it. */
18827 it_fsm_pre_encode ();
18828
18829 opcode->tencode ();
18830
18831 it_fsm_post_encode ();
18832 }
18833
18834 if (!(inst.error || inst.relax))
18835 {
18836 gas_assert (inst.instruction < 0xe800 || inst.instruction > 0xffff);
18837 inst.size = (inst.instruction > 0xffff ? 4 : 2);
18838 if (inst.size_req && inst.size_req != inst.size)
18839 {
18840 as_bad (_("cannot honor width suffix -- `%s'"), str);
18841 return;
18842 }
18843 }
18844
18845 /* Something has gone badly wrong if we try to relax a fixed size
18846 instruction. */
18847 gas_assert (inst.size_req == 0 || !inst.relax);
18848
18849 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
18850 *opcode->tvariant);
18851 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
18852 set those bits when Thumb-2 32-bit instructions are seen. The impact
18853 of relaxable instructions will be considered later after we finish all
18854 relaxation. */
18855 if (ARM_FEATURE_CORE_EQUAL (cpu_variant, arm_arch_any))
18856 variant = arm_arch_none;
18857 else
18858 variant = cpu_variant;
18859 if (inst.size == 4 && !t32_insn_ok (variant, opcode))
18860 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
18861 arm_ext_v6t2);
18862
18863 check_neon_suffixes;
18864
18865 if (!inst.error)
18866 {
18867 mapping_state (MAP_THUMB);
18868 }
18869 }
18870 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
18871 {
18872 bfd_boolean is_bx;
18873
18874 /* bx is allowed on v5 cores, and sometimes on v4 cores. */
18875 is_bx = (opcode->aencode == do_bx);
18876
18877 /* Check that this instruction is supported for this CPU. */
18878 if (!(is_bx && fix_v4bx)
18879 && !(opcode->avariant &&
18880 ARM_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant)))
18881 {
18882 as_bad (_("selected processor does not support `%s' in ARM mode"), str);
18883 return;
18884 }
18885 if (inst.size_req)
18886 {
18887 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str);
18888 return;
18889 }
18890
18891 inst.instruction = opcode->avalue;
18892 if (opcode->tag == OT_unconditionalF)
18893 inst.instruction |= 0xFU << 28;
18894 else
18895 inst.instruction |= inst.cond << 28;
18896 inst.size = INSN_SIZE;
18897 if (!parse_operands (p, opcode->operands, /*thumb=*/FALSE))
18898 {
18899 it_fsm_pre_encode ();
18900 opcode->aencode ();
18901 it_fsm_post_encode ();
18902 }
18903 /* Arm mode bx is marked as both v4T and v5 because it's still required
18904 on a hypothetical non-thumb v5 core. */
18905 if (is_bx)
18906 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, arm_ext_v4t);
18907 else
18908 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
18909 *opcode->avariant);
18910
18911 check_neon_suffixes;
18912
18913 if (!inst.error)
18914 {
18915 mapping_state (MAP_ARM);
18916 }
18917 }
18918 else
18919 {
18920 as_bad (_("attempt to use an ARM instruction on a Thumb-only processor "
18921 "-- `%s'"), str);
18922 return;
18923 }
18924 output_inst (str);
18925 }
18926
18927 static void
18928 check_it_blocks_finished (void)
18929 {
18930 #ifdef OBJ_ELF
18931 asection *sect;
18932
18933 for (sect = stdoutput->sections; sect != NULL; sect = sect->next)
18934 if (seg_info (sect)->tc_segment_info_data.current_it.state
18935 == MANUAL_IT_BLOCK)
18936 {
18937 as_warn (_("section '%s' finished with an open IT block."),
18938 sect->name);
18939 }
18940 #else
18941 if (now_it.state == MANUAL_IT_BLOCK)
18942 as_warn (_("file finished with an open IT block."));
18943 #endif
18944 }
18945
18946 /* Various frobbings of labels and their addresses. */
18947
18948 void
18949 arm_start_line_hook (void)
18950 {
18951 last_label_seen = NULL;
18952 }
18953
18954 void
18955 arm_frob_label (symbolS * sym)
18956 {
18957 last_label_seen = sym;
18958
18959 ARM_SET_THUMB (sym, thumb_mode);
18960
18961 #if defined OBJ_COFF || defined OBJ_ELF
18962 ARM_SET_INTERWORK (sym, support_interwork);
18963 #endif
18964
18965 force_automatic_it_block_close ();
18966
18967 /* Note - do not allow local symbols (.Lxxx) to be labelled
18968 as Thumb functions. This is because these labels, whilst
18969 they exist inside Thumb code, are not the entry points for
18970 possible ARM->Thumb calls. Also, these labels can be used
18971 as part of a computed goto or switch statement. eg gcc
18972 can generate code that looks like this:
18973
18974 ldr r2, [pc, .Laaa]
18975 lsl r3, r3, #2
18976 ldr r2, [r3, r2]
18977 mov pc, r2
18978
18979 .Lbbb: .word .Lxxx
18980 .Lccc: .word .Lyyy
18981 ..etc...
18982 .Laaa: .word Lbbb
18983
18984 The first instruction loads the address of the jump table.
18985 The second instruction converts a table index into a byte offset.
18986 The third instruction gets the jump address out of the table.
18987 The fourth instruction performs the jump.
18988
18989 If the address stored at .Laaa is that of a symbol which has the
18990 Thumb_Func bit set, then the linker will arrange for this address
18991 to have the bottom bit set, which in turn would mean that the
18992 address computation performed by the third instruction would end
18993 up with the bottom bit set. Since the ARM is capable of unaligned
18994 word loads, the instruction would then load the incorrect address
18995 out of the jump table, and chaos would ensue. */
18996 if (label_is_thumb_function_name
18997 && (S_GET_NAME (sym)[0] != '.' || S_GET_NAME (sym)[1] != 'L')
18998 && (bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE) != 0)
18999 {
19000 /* When the address of a Thumb function is taken the bottom
19001 bit of that address should be set. This will allow
19002 interworking between Arm and Thumb functions to work
19003 correctly. */
19004
19005 THUMB_SET_FUNC (sym, 1);
19006
19007 label_is_thumb_function_name = FALSE;
19008 }
19009
19010 dwarf2_emit_label (sym);
19011 }
19012
19013 bfd_boolean
19014 arm_data_in_code (void)
19015 {
19016 if (thumb_mode && ! strncmp (input_line_pointer + 1, "data:", 5))
19017 {
19018 *input_line_pointer = '/';
19019 input_line_pointer += 5;
19020 *input_line_pointer = 0;
19021 return TRUE;
19022 }
19023
19024 return FALSE;
19025 }
19026
19027 char *
19028 arm_canonicalize_symbol_name (char * name)
19029 {
19030 int len;
19031
19032 if (thumb_mode && (len = strlen (name)) > 5
19033 && streq (name + len - 5, "/data"))
19034 *(name + len - 5) = 0;
19035
19036 return name;
19037 }
19038 \f
19039 /* Table of all register names defined by default. The user can
19040 define additional names with .req. Note that all register names
19041 should appear in both upper and lowercase variants. Some registers
19042 also have mixed-case names. */
19043
19044 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
19045 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
19046 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
19047 #define REGSET(p,t) \
19048 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
19049 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
19050 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
19051 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
19052 #define REGSETH(p,t) \
19053 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
19054 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
19055 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
19056 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
19057 #define REGSET2(p,t) \
19058 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
19059 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
19060 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
19061 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
19062 #define SPLRBANK(base,bank,t) \
19063 REGDEF(lr_##bank, 768|((base+0)<<16), t), \
19064 REGDEF(sp_##bank, 768|((base+1)<<16), t), \
19065 REGDEF(spsr_##bank, 768|(base<<16)|SPSR_BIT, t), \
19066 REGDEF(LR_##bank, 768|((base+0)<<16), t), \
19067 REGDEF(SP_##bank, 768|((base+1)<<16), t), \
19068 REGDEF(SPSR_##bank, 768|(base<<16)|SPSR_BIT, t)
19069
19070 static const struct reg_entry reg_names[] =
19071 {
19072 /* ARM integer registers. */
19073 REGSET(r, RN), REGSET(R, RN),
19074
19075 /* ATPCS synonyms. */
19076 REGDEF(a1,0,RN), REGDEF(a2,1,RN), REGDEF(a3, 2,RN), REGDEF(a4, 3,RN),
19077 REGDEF(v1,4,RN), REGDEF(v2,5,RN), REGDEF(v3, 6,RN), REGDEF(v4, 7,RN),
19078 REGDEF(v5,8,RN), REGDEF(v6,9,RN), REGDEF(v7,10,RN), REGDEF(v8,11,RN),
19079
19080 REGDEF(A1,0,RN), REGDEF(A2,1,RN), REGDEF(A3, 2,RN), REGDEF(A4, 3,RN),
19081 REGDEF(V1,4,RN), REGDEF(V2,5,RN), REGDEF(V3, 6,RN), REGDEF(V4, 7,RN),
19082 REGDEF(V5,8,RN), REGDEF(V6,9,RN), REGDEF(V7,10,RN), REGDEF(V8,11,RN),
19083
19084 /* Well-known aliases. */
19085 REGDEF(wr, 7,RN), REGDEF(sb, 9,RN), REGDEF(sl,10,RN), REGDEF(fp,11,RN),
19086 REGDEF(ip,12,RN), REGDEF(sp,13,RN), REGDEF(lr,14,RN), REGDEF(pc,15,RN),
19087
19088 REGDEF(WR, 7,RN), REGDEF(SB, 9,RN), REGDEF(SL,10,RN), REGDEF(FP,11,RN),
19089 REGDEF(IP,12,RN), REGDEF(SP,13,RN), REGDEF(LR,14,RN), REGDEF(PC,15,RN),
19090
19091 /* Coprocessor numbers. */
19092 REGSET(p, CP), REGSET(P, CP),
19093
19094 /* Coprocessor register numbers. The "cr" variants are for backward
19095 compatibility. */
19096 REGSET(c, CN), REGSET(C, CN),
19097 REGSET(cr, CN), REGSET(CR, CN),
19098
19099 /* ARM banked registers. */
19100 REGDEF(R8_usr,512|(0<<16),RNB), REGDEF(r8_usr,512|(0<<16),RNB),
19101 REGDEF(R9_usr,512|(1<<16),RNB), REGDEF(r9_usr,512|(1<<16),RNB),
19102 REGDEF(R10_usr,512|(2<<16),RNB), REGDEF(r10_usr,512|(2<<16),RNB),
19103 REGDEF(R11_usr,512|(3<<16),RNB), REGDEF(r11_usr,512|(3<<16),RNB),
19104 REGDEF(R12_usr,512|(4<<16),RNB), REGDEF(r12_usr,512|(4<<16),RNB),
19105 REGDEF(SP_usr,512|(5<<16),RNB), REGDEF(sp_usr,512|(5<<16),RNB),
19106 REGDEF(LR_usr,512|(6<<16),RNB), REGDEF(lr_usr,512|(6<<16),RNB),
19107
19108 REGDEF(R8_fiq,512|(8<<16),RNB), REGDEF(r8_fiq,512|(8<<16),RNB),
19109 REGDEF(R9_fiq,512|(9<<16),RNB), REGDEF(r9_fiq,512|(9<<16),RNB),
19110 REGDEF(R10_fiq,512|(10<<16),RNB), REGDEF(r10_fiq,512|(10<<16),RNB),
19111 REGDEF(R11_fiq,512|(11<<16),RNB), REGDEF(r11_fiq,512|(11<<16),RNB),
19112 REGDEF(R12_fiq,512|(12<<16),RNB), REGDEF(r12_fiq,512|(12<<16),RNB),
19113 REGDEF(SP_fiq,512|(13<<16),RNB), REGDEF(sp_fiq,512|(13<<16),RNB),
19114 REGDEF(LR_fiq,512|(14<<16),RNB), REGDEF(lr_fiq,512|(14<<16),RNB),
19115 REGDEF(SPSR_fiq,512|(14<<16)|SPSR_BIT,RNB), REGDEF(spsr_fiq,512|(14<<16)|SPSR_BIT,RNB),
19116
19117 SPLRBANK(0,IRQ,RNB), SPLRBANK(0,irq,RNB),
19118 SPLRBANK(2,SVC,RNB), SPLRBANK(2,svc,RNB),
19119 SPLRBANK(4,ABT,RNB), SPLRBANK(4,abt,RNB),
19120 SPLRBANK(6,UND,RNB), SPLRBANK(6,und,RNB),
19121 SPLRBANK(12,MON,RNB), SPLRBANK(12,mon,RNB),
19122 REGDEF(elr_hyp,768|(14<<16),RNB), REGDEF(ELR_hyp,768|(14<<16),RNB),
19123 REGDEF(sp_hyp,768|(15<<16),RNB), REGDEF(SP_hyp,768|(15<<16),RNB),
19124 REGDEF(spsr_hyp,768|(14<<16)|SPSR_BIT,RNB),
19125 REGDEF(SPSR_hyp,768|(14<<16)|SPSR_BIT,RNB),
19126
19127 /* FPA registers. */
19128 REGNUM(f,0,FN), REGNUM(f,1,FN), REGNUM(f,2,FN), REGNUM(f,3,FN),
19129 REGNUM(f,4,FN), REGNUM(f,5,FN), REGNUM(f,6,FN), REGNUM(f,7, FN),
19130
19131 REGNUM(F,0,FN), REGNUM(F,1,FN), REGNUM(F,2,FN), REGNUM(F,3,FN),
19132 REGNUM(F,4,FN), REGNUM(F,5,FN), REGNUM(F,6,FN), REGNUM(F,7, FN),
19133
19134 /* VFP SP registers. */
19135 REGSET(s,VFS), REGSET(S,VFS),
19136 REGSETH(s,VFS), REGSETH(S,VFS),
19137
19138 /* VFP DP Registers. */
19139 REGSET(d,VFD), REGSET(D,VFD),
19140 /* Extra Neon DP registers. */
19141 REGSETH(d,VFD), REGSETH(D,VFD),
19142
19143 /* Neon QP registers. */
19144 REGSET2(q,NQ), REGSET2(Q,NQ),
19145
19146 /* VFP control registers. */
19147 REGDEF(fpsid,0,VFC), REGDEF(fpscr,1,VFC), REGDEF(fpexc,8,VFC),
19148 REGDEF(FPSID,0,VFC), REGDEF(FPSCR,1,VFC), REGDEF(FPEXC,8,VFC),
19149 REGDEF(fpinst,9,VFC), REGDEF(fpinst2,10,VFC),
19150 REGDEF(FPINST,9,VFC), REGDEF(FPINST2,10,VFC),
19151 REGDEF(mvfr0,7,VFC), REGDEF(mvfr1,6,VFC),
19152 REGDEF(MVFR0,7,VFC), REGDEF(MVFR1,6,VFC),
19153 REGDEF(mvfr2,5,VFC), REGDEF(MVFR2,5,VFC),
19154
19155 /* Maverick DSP coprocessor registers. */
19156 REGSET(mvf,MVF), REGSET(mvd,MVD), REGSET(mvfx,MVFX), REGSET(mvdx,MVDX),
19157 REGSET(MVF,MVF), REGSET(MVD,MVD), REGSET(MVFX,MVFX), REGSET(MVDX,MVDX),
19158
19159 REGNUM(mvax,0,MVAX), REGNUM(mvax,1,MVAX),
19160 REGNUM(mvax,2,MVAX), REGNUM(mvax,3,MVAX),
19161 REGDEF(dspsc,0,DSPSC),
19162
19163 REGNUM(MVAX,0,MVAX), REGNUM(MVAX,1,MVAX),
19164 REGNUM(MVAX,2,MVAX), REGNUM(MVAX,3,MVAX),
19165 REGDEF(DSPSC,0,DSPSC),
19166
19167 /* iWMMXt data registers - p0, c0-15. */
19168 REGSET(wr,MMXWR), REGSET(wR,MMXWR), REGSET(WR, MMXWR),
19169
19170 /* iWMMXt control registers - p1, c0-3. */
19171 REGDEF(wcid, 0,MMXWC), REGDEF(wCID, 0,MMXWC), REGDEF(WCID, 0,MMXWC),
19172 REGDEF(wcon, 1,MMXWC), REGDEF(wCon, 1,MMXWC), REGDEF(WCON, 1,MMXWC),
19173 REGDEF(wcssf, 2,MMXWC), REGDEF(wCSSF, 2,MMXWC), REGDEF(WCSSF, 2,MMXWC),
19174 REGDEF(wcasf, 3,MMXWC), REGDEF(wCASF, 3,MMXWC), REGDEF(WCASF, 3,MMXWC),
19175
19176 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
19177 REGDEF(wcgr0, 8,MMXWCG), REGDEF(wCGR0, 8,MMXWCG), REGDEF(WCGR0, 8,MMXWCG),
19178 REGDEF(wcgr1, 9,MMXWCG), REGDEF(wCGR1, 9,MMXWCG), REGDEF(WCGR1, 9,MMXWCG),
19179 REGDEF(wcgr2,10,MMXWCG), REGDEF(wCGR2,10,MMXWCG), REGDEF(WCGR2,10,MMXWCG),
19180 REGDEF(wcgr3,11,MMXWCG), REGDEF(wCGR3,11,MMXWCG), REGDEF(WCGR3,11,MMXWCG),
19181
19182 /* XScale accumulator registers. */
19183 REGNUM(acc,0,XSCALE), REGNUM(ACC,0,XSCALE),
19184 };
19185 #undef REGDEF
19186 #undef REGNUM
19187 #undef REGSET
19188
19189 /* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
19190 within psr_required_here. */
19191 static const struct asm_psr psrs[] =
19192 {
19193 /* Backward compatibility notation. Note that "all" is no longer
19194 truly all possible PSR bits. */
19195 {"all", PSR_c | PSR_f},
19196 {"flg", PSR_f},
19197 {"ctl", PSR_c},
19198
19199 /* Individual flags. */
19200 {"f", PSR_f},
19201 {"c", PSR_c},
19202 {"x", PSR_x},
19203 {"s", PSR_s},
19204
19205 /* Combinations of flags. */
19206 {"fs", PSR_f | PSR_s},
19207 {"fx", PSR_f | PSR_x},
19208 {"fc", PSR_f | PSR_c},
19209 {"sf", PSR_s | PSR_f},
19210 {"sx", PSR_s | PSR_x},
19211 {"sc", PSR_s | PSR_c},
19212 {"xf", PSR_x | PSR_f},
19213 {"xs", PSR_x | PSR_s},
19214 {"xc", PSR_x | PSR_c},
19215 {"cf", PSR_c | PSR_f},
19216 {"cs", PSR_c | PSR_s},
19217 {"cx", PSR_c | PSR_x},
19218 {"fsx", PSR_f | PSR_s | PSR_x},
19219 {"fsc", PSR_f | PSR_s | PSR_c},
19220 {"fxs", PSR_f | PSR_x | PSR_s},
19221 {"fxc", PSR_f | PSR_x | PSR_c},
19222 {"fcs", PSR_f | PSR_c | PSR_s},
19223 {"fcx", PSR_f | PSR_c | PSR_x},
19224 {"sfx", PSR_s | PSR_f | PSR_x},
19225 {"sfc", PSR_s | PSR_f | PSR_c},
19226 {"sxf", PSR_s | PSR_x | PSR_f},
19227 {"sxc", PSR_s | PSR_x | PSR_c},
19228 {"scf", PSR_s | PSR_c | PSR_f},
19229 {"scx", PSR_s | PSR_c | PSR_x},
19230 {"xfs", PSR_x | PSR_f | PSR_s},
19231 {"xfc", PSR_x | PSR_f | PSR_c},
19232 {"xsf", PSR_x | PSR_s | PSR_f},
19233 {"xsc", PSR_x | PSR_s | PSR_c},
19234 {"xcf", PSR_x | PSR_c | PSR_f},
19235 {"xcs", PSR_x | PSR_c | PSR_s},
19236 {"cfs", PSR_c | PSR_f | PSR_s},
19237 {"cfx", PSR_c | PSR_f | PSR_x},
19238 {"csf", PSR_c | PSR_s | PSR_f},
19239 {"csx", PSR_c | PSR_s | PSR_x},
19240 {"cxf", PSR_c | PSR_x | PSR_f},
19241 {"cxs", PSR_c | PSR_x | PSR_s},
19242 {"fsxc", PSR_f | PSR_s | PSR_x | PSR_c},
19243 {"fscx", PSR_f | PSR_s | PSR_c | PSR_x},
19244 {"fxsc", PSR_f | PSR_x | PSR_s | PSR_c},
19245 {"fxcs", PSR_f | PSR_x | PSR_c | PSR_s},
19246 {"fcsx", PSR_f | PSR_c | PSR_s | PSR_x},
19247 {"fcxs", PSR_f | PSR_c | PSR_x | PSR_s},
19248 {"sfxc", PSR_s | PSR_f | PSR_x | PSR_c},
19249 {"sfcx", PSR_s | PSR_f | PSR_c | PSR_x},
19250 {"sxfc", PSR_s | PSR_x | PSR_f | PSR_c},
19251 {"sxcf", PSR_s | PSR_x | PSR_c | PSR_f},
19252 {"scfx", PSR_s | PSR_c | PSR_f | PSR_x},
19253 {"scxf", PSR_s | PSR_c | PSR_x | PSR_f},
19254 {"xfsc", PSR_x | PSR_f | PSR_s | PSR_c},
19255 {"xfcs", PSR_x | PSR_f | PSR_c | PSR_s},
19256 {"xsfc", PSR_x | PSR_s | PSR_f | PSR_c},
19257 {"xscf", PSR_x | PSR_s | PSR_c | PSR_f},
19258 {"xcfs", PSR_x | PSR_c | PSR_f | PSR_s},
19259 {"xcsf", PSR_x | PSR_c | PSR_s | PSR_f},
19260 {"cfsx", PSR_c | PSR_f | PSR_s | PSR_x},
19261 {"cfxs", PSR_c | PSR_f | PSR_x | PSR_s},
19262 {"csfx", PSR_c | PSR_s | PSR_f | PSR_x},
19263 {"csxf", PSR_c | PSR_s | PSR_x | PSR_f},
19264 {"cxfs", PSR_c | PSR_x | PSR_f | PSR_s},
19265 {"cxsf", PSR_c | PSR_x | PSR_s | PSR_f},
19266 };
19267
19268 /* Table of V7M psr names. */
19269 static const struct asm_psr v7m_psrs[] =
19270 {
19271 {"apsr", 0x0 }, {"APSR", 0x0 },
19272 {"iapsr", 0x1 }, {"IAPSR", 0x1 },
19273 {"eapsr", 0x2 }, {"EAPSR", 0x2 },
19274 {"psr", 0x3 }, {"PSR", 0x3 },
19275 {"xpsr", 0x3 }, {"XPSR", 0x3 }, {"xPSR", 3 },
19276 {"ipsr", 0x5 }, {"IPSR", 0x5 },
19277 {"epsr", 0x6 }, {"EPSR", 0x6 },
19278 {"iepsr", 0x7 }, {"IEPSR", 0x7 },
19279 {"msp", 0x8 }, {"MSP", 0x8 },
19280 {"psp", 0x9 }, {"PSP", 0x9 },
19281 {"msplim", 0xa }, {"MSPLIM", 0xa },
19282 {"psplim", 0xb }, {"PSPLIM", 0xb },
19283 {"primask", 0x10}, {"PRIMASK", 0x10},
19284 {"basepri", 0x11}, {"BASEPRI", 0x11},
19285 {"basepri_max", 0x12}, {"BASEPRI_MAX", 0x12},
19286 {"faultmask", 0x13}, {"FAULTMASK", 0x13},
19287 {"control", 0x14}, {"CONTROL", 0x14},
19288 {"msp_ns", 0x88}, {"MSP_NS", 0x88},
19289 {"psp_ns", 0x89}, {"PSP_NS", 0x89},
19290 {"msplim_ns", 0x8a}, {"MSPLIM_NS", 0x8a},
19291 {"psplim_ns", 0x8b}, {"PSPLIM_NS", 0x8b},
19292 {"primask_ns", 0x90}, {"PRIMASK_NS", 0x90},
19293 {"basepri_ns", 0x91}, {"BASEPRI_NS", 0x91},
19294 {"faultmask_ns", 0x93}, {"FAULTMASK_NS", 0x93},
19295 {"control_ns", 0x94}, {"CONTROL_NS", 0x94},
19296 {"sp_ns", 0x98}, {"SP_NS", 0x98 }
19297 };
19298
19299 /* Table of all shift-in-operand names. */
19300 static const struct asm_shift_name shift_names [] =
19301 {
19302 { "asl", SHIFT_LSL }, { "ASL", SHIFT_LSL },
19303 { "lsl", SHIFT_LSL }, { "LSL", SHIFT_LSL },
19304 { "lsr", SHIFT_LSR }, { "LSR", SHIFT_LSR },
19305 { "asr", SHIFT_ASR }, { "ASR", SHIFT_ASR },
19306 { "ror", SHIFT_ROR }, { "ROR", SHIFT_ROR },
19307 { "rrx", SHIFT_RRX }, { "RRX", SHIFT_RRX }
19308 };
19309
19310 /* Table of all explicit relocation names. */
19311 #ifdef OBJ_ELF
19312 static struct reloc_entry reloc_names[] =
19313 {
19314 { "got", BFD_RELOC_ARM_GOT32 }, { "GOT", BFD_RELOC_ARM_GOT32 },
19315 { "gotoff", BFD_RELOC_ARM_GOTOFF }, { "GOTOFF", BFD_RELOC_ARM_GOTOFF },
19316 { "plt", BFD_RELOC_ARM_PLT32 }, { "PLT", BFD_RELOC_ARM_PLT32 },
19317 { "target1", BFD_RELOC_ARM_TARGET1 }, { "TARGET1", BFD_RELOC_ARM_TARGET1 },
19318 { "target2", BFD_RELOC_ARM_TARGET2 }, { "TARGET2", BFD_RELOC_ARM_TARGET2 },
19319 { "sbrel", BFD_RELOC_ARM_SBREL32 }, { "SBREL", BFD_RELOC_ARM_SBREL32 },
19320 { "tlsgd", BFD_RELOC_ARM_TLS_GD32}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32},
19321 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32},
19322 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32},
19323 { "gottpoff",BFD_RELOC_ARM_TLS_IE32}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32},
19324 { "tpoff", BFD_RELOC_ARM_TLS_LE32}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32},
19325 { "got_prel", BFD_RELOC_ARM_GOT_PREL}, { "GOT_PREL", BFD_RELOC_ARM_GOT_PREL},
19326 { "tlsdesc", BFD_RELOC_ARM_TLS_GOTDESC},
19327 { "TLSDESC", BFD_RELOC_ARM_TLS_GOTDESC},
19328 { "tlscall", BFD_RELOC_ARM_TLS_CALL},
19329 { "TLSCALL", BFD_RELOC_ARM_TLS_CALL},
19330 { "tlsdescseq", BFD_RELOC_ARM_TLS_DESCSEQ},
19331 { "TLSDESCSEQ", BFD_RELOC_ARM_TLS_DESCSEQ},
19332 { "gotfuncdesc", BFD_RELOC_ARM_GOTFUNCDESC },
19333 { "GOTFUNCDESC", BFD_RELOC_ARM_GOTFUNCDESC },
19334 { "gotofffuncdesc", BFD_RELOC_ARM_GOTOFFFUNCDESC },
19335 { "GOTOFFFUNCDESC", BFD_RELOC_ARM_GOTOFFFUNCDESC },
19336 { "funcdesc", BFD_RELOC_ARM_FUNCDESC },
19337 { "FUNCDESC", BFD_RELOC_ARM_FUNCDESC },
19338 { "tlsgd_fdpic", BFD_RELOC_ARM_TLS_GD32_FDPIC }, { "TLSGD_FDPIC", BFD_RELOC_ARM_TLS_GD32_FDPIC },
19339 { "tlsldm_fdpic", BFD_RELOC_ARM_TLS_LDM32_FDPIC }, { "TLSLDM_FDPIC", BFD_RELOC_ARM_TLS_LDM32_FDPIC },
19340 { "gottpoff_fdpic", BFD_RELOC_ARM_TLS_IE32_FDPIC }, { "GOTTPOFF_FDIC", BFD_RELOC_ARM_TLS_IE32_FDPIC },
19341 };
19342 #endif
19343
19344 /* Table of all conditional affixes. 0xF is not defined as a condition code. */
19345 static const struct asm_cond conds[] =
19346 {
19347 {"eq", 0x0},
19348 {"ne", 0x1},
19349 {"cs", 0x2}, {"hs", 0x2},
19350 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
19351 {"mi", 0x4},
19352 {"pl", 0x5},
19353 {"vs", 0x6},
19354 {"vc", 0x7},
19355 {"hi", 0x8},
19356 {"ls", 0x9},
19357 {"ge", 0xa},
19358 {"lt", 0xb},
19359 {"gt", 0xc},
19360 {"le", 0xd},
19361 {"al", 0xe}
19362 };
19363
19364 #define UL_BARRIER(L,U,CODE,FEAT) \
19365 { L, CODE, ARM_FEATURE_CORE_LOW (FEAT) }, \
19366 { U, CODE, ARM_FEATURE_CORE_LOW (FEAT) }
19367
19368 static struct asm_barrier_opt barrier_opt_names[] =
19369 {
19370 UL_BARRIER ("sy", "SY", 0xf, ARM_EXT_BARRIER),
19371 UL_BARRIER ("st", "ST", 0xe, ARM_EXT_BARRIER),
19372 UL_BARRIER ("ld", "LD", 0xd, ARM_EXT_V8),
19373 UL_BARRIER ("ish", "ISH", 0xb, ARM_EXT_BARRIER),
19374 UL_BARRIER ("sh", "SH", 0xb, ARM_EXT_BARRIER),
19375 UL_BARRIER ("ishst", "ISHST", 0xa, ARM_EXT_BARRIER),
19376 UL_BARRIER ("shst", "SHST", 0xa, ARM_EXT_BARRIER),
19377 UL_BARRIER ("ishld", "ISHLD", 0x9, ARM_EXT_V8),
19378 UL_BARRIER ("un", "UN", 0x7, ARM_EXT_BARRIER),
19379 UL_BARRIER ("nsh", "NSH", 0x7, ARM_EXT_BARRIER),
19380 UL_BARRIER ("unst", "UNST", 0x6, ARM_EXT_BARRIER),
19381 UL_BARRIER ("nshst", "NSHST", 0x6, ARM_EXT_BARRIER),
19382 UL_BARRIER ("nshld", "NSHLD", 0x5, ARM_EXT_V8),
19383 UL_BARRIER ("osh", "OSH", 0x3, ARM_EXT_BARRIER),
19384 UL_BARRIER ("oshst", "OSHST", 0x2, ARM_EXT_BARRIER),
19385 UL_BARRIER ("oshld", "OSHLD", 0x1, ARM_EXT_V8)
19386 };
19387
19388 #undef UL_BARRIER
19389
19390 /* Table of ARM-format instructions. */
19391
19392 /* Macros for gluing together operand strings. N.B. In all cases
19393 other than OPS0, the trailing OP_stop comes from default
19394 zero-initialization of the unspecified elements of the array. */
19395 #define OPS0() { OP_stop, }
19396 #define OPS1(a) { OP_##a, }
19397 #define OPS2(a,b) { OP_##a,OP_##b, }
19398 #define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
19399 #define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
19400 #define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
19401 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
19402
19403 /* These macros are similar to the OPSn, but do not prepend the OP_ prefix.
19404 This is useful when mixing operands for ARM and THUMB, i.e. using the
19405 MIX_ARM_THUMB_OPERANDS macro.
19406 In order to use these macros, prefix the number of operands with _
19407 e.g. _3. */
19408 #define OPS_1(a) { a, }
19409 #define OPS_2(a,b) { a,b, }
19410 #define OPS_3(a,b,c) { a,b,c, }
19411 #define OPS_4(a,b,c,d) { a,b,c,d, }
19412 #define OPS_5(a,b,c,d,e) { a,b,c,d,e, }
19413 #define OPS_6(a,b,c,d,e,f) { a,b,c,d,e,f, }
19414
19415 /* These macros abstract out the exact format of the mnemonic table and
19416 save some repeated characters. */
19417
19418 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
19419 #define TxCE(mnem, op, top, nops, ops, ae, te) \
19420 { mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
19421 THUMB_VARIANT, do_##ae, do_##te }
19422
19423 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
19424 a T_MNEM_xyz enumerator. */
19425 #define TCE(mnem, aop, top, nops, ops, ae, te) \
19426 TxCE (mnem, aop, 0x##top, nops, ops, ae, te)
19427 #define tCE(mnem, aop, top, nops, ops, ae, te) \
19428 TxCE (mnem, aop, T_MNEM##top, nops, ops, ae, te)
19429
19430 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
19431 infix after the third character. */
19432 #define TxC3(mnem, op, top, nops, ops, ae, te) \
19433 { mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
19434 THUMB_VARIANT, do_##ae, do_##te }
19435 #define TxC3w(mnem, op, top, nops, ops, ae, te) \
19436 { mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
19437 THUMB_VARIANT, do_##ae, do_##te }
19438 #define TC3(mnem, aop, top, nops, ops, ae, te) \
19439 TxC3 (mnem, aop, 0x##top, nops, ops, ae, te)
19440 #define TC3w(mnem, aop, top, nops, ops, ae, te) \
19441 TxC3w (mnem, aop, 0x##top, nops, ops, ae, te)
19442 #define tC3(mnem, aop, top, nops, ops, ae, te) \
19443 TxC3 (mnem, aop, T_MNEM##top, nops, ops, ae, te)
19444 #define tC3w(mnem, aop, top, nops, ops, ae, te) \
19445 TxC3w (mnem, aop, T_MNEM##top, nops, ops, ae, te)
19446
19447 /* Mnemonic that cannot be conditionalized. The ARM condition-code
19448 field is still 0xE. Many of the Thumb variants can be executed
19449 conditionally, so this is checked separately. */
19450 #define TUE(mnem, op, top, nops, ops, ae, te) \
19451 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
19452 THUMB_VARIANT, do_##ae, do_##te }
19453
19454 /* Same as TUE but the encoding function for ARM and Thumb modes is the same.
19455 Used by mnemonics that have very minimal differences in the encoding for
19456 ARM and Thumb variants and can be handled in a common function. */
19457 #define TUEc(mnem, op, top, nops, ops, en) \
19458 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
19459 THUMB_VARIANT, do_##en, do_##en }
19460
19461 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
19462 condition code field. */
19463 #define TUF(mnem, op, top, nops, ops, ae, te) \
19464 { mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
19465 THUMB_VARIANT, do_##ae, do_##te }
19466
19467 /* ARM-only variants of all the above. */
19468 #define CE(mnem, op, nops, ops, ae) \
19469 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
19470
19471 #define C3(mnem, op, nops, ops, ae) \
19472 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
19473
19474 /* Thumb-only variants of TCE and TUE. */
19475 #define ToC(mnem, top, nops, ops, te) \
19476 { mnem, OPS##nops ops, OT_csuffix, 0x0, 0x##top, 0, THUMB_VARIANT, NULL, \
19477 do_##te }
19478
19479 #define ToU(mnem, top, nops, ops, te) \
19480 { mnem, OPS##nops ops, OT_unconditional, 0x0, 0x##top, 0, THUMB_VARIANT, \
19481 NULL, do_##te }
19482
19483 /* Legacy mnemonics that always have conditional infix after the third
19484 character. */
19485 #define CL(mnem, op, nops, ops, ae) \
19486 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
19487 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
19488
19489 /* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
19490 #define cCE(mnem, op, nops, ops, ae) \
19491 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
19492
19493 /* Legacy coprocessor instructions where conditional infix and conditional
19494 suffix are ambiguous. For consistency this includes all FPA instructions,
19495 not just the potentially ambiguous ones. */
19496 #define cCL(mnem, op, nops, ops, ae) \
19497 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
19498 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
19499
19500 /* Coprocessor, takes either a suffix or a position-3 infix
19501 (for an FPA corner case). */
19502 #define C3E(mnem, op, nops, ops, ae) \
19503 { mnem, OPS##nops ops, OT_csuf_or_in3, \
19504 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
19505
19506 #define xCM_(m1, m2, m3, op, nops, ops, ae) \
19507 { m1 #m2 m3, OPS##nops ops, \
19508 sizeof (#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof (m1) - 1, \
19509 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
19510
19511 #define CM(m1, m2, op, nops, ops, ae) \
19512 xCM_ (m1, , m2, op, nops, ops, ae), \
19513 xCM_ (m1, eq, m2, op, nops, ops, ae), \
19514 xCM_ (m1, ne, m2, op, nops, ops, ae), \
19515 xCM_ (m1, cs, m2, op, nops, ops, ae), \
19516 xCM_ (m1, hs, m2, op, nops, ops, ae), \
19517 xCM_ (m1, cc, m2, op, nops, ops, ae), \
19518 xCM_ (m1, ul, m2, op, nops, ops, ae), \
19519 xCM_ (m1, lo, m2, op, nops, ops, ae), \
19520 xCM_ (m1, mi, m2, op, nops, ops, ae), \
19521 xCM_ (m1, pl, m2, op, nops, ops, ae), \
19522 xCM_ (m1, vs, m2, op, nops, ops, ae), \
19523 xCM_ (m1, vc, m2, op, nops, ops, ae), \
19524 xCM_ (m1, hi, m2, op, nops, ops, ae), \
19525 xCM_ (m1, ls, m2, op, nops, ops, ae), \
19526 xCM_ (m1, ge, m2, op, nops, ops, ae), \
19527 xCM_ (m1, lt, m2, op, nops, ops, ae), \
19528 xCM_ (m1, gt, m2, op, nops, ops, ae), \
19529 xCM_ (m1, le, m2, op, nops, ops, ae), \
19530 xCM_ (m1, al, m2, op, nops, ops, ae)
19531
19532 #define UE(mnem, op, nops, ops, ae) \
19533 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
19534
19535 #define UF(mnem, op, nops, ops, ae) \
19536 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
19537
19538 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
19539 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
19540 use the same encoding function for each. */
19541 #define NUF(mnem, op, nops, ops, enc) \
19542 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
19543 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
19544
19545 /* Neon data processing, version which indirects through neon_enc_tab for
19546 the various overloaded versions of opcodes. */
19547 #define nUF(mnem, op, nops, ops, enc) \
19548 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM##op, N_MNEM##op, \
19549 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
19550
19551 /* Neon insn with conditional suffix for the ARM version, non-overloaded
19552 version. */
19553 #define NCE_tag(mnem, op, nops, ops, enc, tag) \
19554 { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT, \
19555 THUMB_VARIANT, do_##enc, do_##enc }
19556
19557 #define NCE(mnem, op, nops, ops, enc) \
19558 NCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
19559
19560 #define NCEF(mnem, op, nops, ops, enc) \
19561 NCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
19562
19563 /* Neon insn with conditional suffix for the ARM version, overloaded types. */
19564 #define nCE_tag(mnem, op, nops, ops, enc, tag) \
19565 { #mnem, OPS##nops ops, tag, N_MNEM##op, N_MNEM##op, \
19566 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
19567
19568 #define nCE(mnem, op, nops, ops, enc) \
19569 nCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
19570
19571 #define nCEF(mnem, op, nops, ops, enc) \
19572 nCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
19573
19574 #define do_0 0
19575
19576 static const struct asm_opcode insns[] =
19577 {
19578 #define ARM_VARIANT & arm_ext_v1 /* Core ARM Instructions. */
19579 #define THUMB_VARIANT & arm_ext_v4t
19580 tCE("and", 0000000, _and, 3, (RR, oRR, SH), arit, t_arit3c),
19581 tC3("ands", 0100000, _ands, 3, (RR, oRR, SH), arit, t_arit3c),
19582 tCE("eor", 0200000, _eor, 3, (RR, oRR, SH), arit, t_arit3c),
19583 tC3("eors", 0300000, _eors, 3, (RR, oRR, SH), arit, t_arit3c),
19584 tCE("sub", 0400000, _sub, 3, (RR, oRR, SH), arit, t_add_sub),
19585 tC3("subs", 0500000, _subs, 3, (RR, oRR, SH), arit, t_add_sub),
19586 tCE("add", 0800000, _add, 3, (RR, oRR, SHG), arit, t_add_sub),
19587 tC3("adds", 0900000, _adds, 3, (RR, oRR, SHG), arit, t_add_sub),
19588 tCE("adc", 0a00000, _adc, 3, (RR, oRR, SH), arit, t_arit3c),
19589 tC3("adcs", 0b00000, _adcs, 3, (RR, oRR, SH), arit, t_arit3c),
19590 tCE("sbc", 0c00000, _sbc, 3, (RR, oRR, SH), arit, t_arit3),
19591 tC3("sbcs", 0d00000, _sbcs, 3, (RR, oRR, SH), arit, t_arit3),
19592 tCE("orr", 1800000, _orr, 3, (RR, oRR, SH), arit, t_arit3c),
19593 tC3("orrs", 1900000, _orrs, 3, (RR, oRR, SH), arit, t_arit3c),
19594 tCE("bic", 1c00000, _bic, 3, (RR, oRR, SH), arit, t_arit3),
19595 tC3("bics", 1d00000, _bics, 3, (RR, oRR, SH), arit, t_arit3),
19596
19597 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
19598 for setting PSR flag bits. They are obsolete in V6 and do not
19599 have Thumb equivalents. */
19600 tCE("tst", 1100000, _tst, 2, (RR, SH), cmp, t_mvn_tst),
19601 tC3w("tsts", 1100000, _tst, 2, (RR, SH), cmp, t_mvn_tst),
19602 CL("tstp", 110f000, 2, (RR, SH), cmp),
19603 tCE("cmp", 1500000, _cmp, 2, (RR, SH), cmp, t_mov_cmp),
19604 tC3w("cmps", 1500000, _cmp, 2, (RR, SH), cmp, t_mov_cmp),
19605 CL("cmpp", 150f000, 2, (RR, SH), cmp),
19606 tCE("cmn", 1700000, _cmn, 2, (RR, SH), cmp, t_mvn_tst),
19607 tC3w("cmns", 1700000, _cmn, 2, (RR, SH), cmp, t_mvn_tst),
19608 CL("cmnp", 170f000, 2, (RR, SH), cmp),
19609
19610 tCE("mov", 1a00000, _mov, 2, (RR, SH), mov, t_mov_cmp),
19611 tC3("movs", 1b00000, _movs, 2, (RR, SHG), mov, t_mov_cmp),
19612 tCE("mvn", 1e00000, _mvn, 2, (RR, SH), mov, t_mvn_tst),
19613 tC3("mvns", 1f00000, _mvns, 2, (RR, SH), mov, t_mvn_tst),
19614
19615 tCE("ldr", 4100000, _ldr, 2, (RR, ADDRGLDR),ldst, t_ldst),
19616 tC3("ldrb", 4500000, _ldrb, 2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
19617 tCE("str", 4000000, _str, _2, (MIX_ARM_THUMB_OPERANDS (OP_RR,
19618 OP_RRnpc),
19619 OP_ADDRGLDR),ldst, t_ldst),
19620 tC3("strb", 4400000, _strb, 2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
19621
19622 tCE("stm", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19623 tC3("stmia", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19624 tC3("stmea", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19625 tCE("ldm", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19626 tC3("ldmia", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19627 tC3("ldmfd", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19628
19629 tCE("b", a000000, _b, 1, (EXPr), branch, t_branch),
19630 TCE("bl", b000000, f000f800, 1, (EXPr), bl, t_branch23),
19631
19632 /* Pseudo ops. */
19633 tCE("adr", 28f0000, _adr, 2, (RR, EXP), adr, t_adr),
19634 C3(adrl, 28f0000, 2, (RR, EXP), adrl),
19635 tCE("nop", 1a00000, _nop, 1, (oI255c), nop, t_nop),
19636 tCE("udf", 7f000f0, _udf, 1, (oIffffb), bkpt, t_udf),
19637
19638 /* Thumb-compatibility pseudo ops. */
19639 tCE("lsl", 1a00000, _lsl, 3, (RR, oRR, SH), shift, t_shift),
19640 tC3("lsls", 1b00000, _lsls, 3, (RR, oRR, SH), shift, t_shift),
19641 tCE("lsr", 1a00020, _lsr, 3, (RR, oRR, SH), shift, t_shift),
19642 tC3("lsrs", 1b00020, _lsrs, 3, (RR, oRR, SH), shift, t_shift),
19643 tCE("asr", 1a00040, _asr, 3, (RR, oRR, SH), shift, t_shift),
19644 tC3("asrs", 1b00040, _asrs, 3, (RR, oRR, SH), shift, t_shift),
19645 tCE("ror", 1a00060, _ror, 3, (RR, oRR, SH), shift, t_shift),
19646 tC3("rors", 1b00060, _rors, 3, (RR, oRR, SH), shift, t_shift),
19647 tCE("neg", 2600000, _neg, 2, (RR, RR), rd_rn, t_neg),
19648 tC3("negs", 2700000, _negs, 2, (RR, RR), rd_rn, t_neg),
19649 tCE("push", 92d0000, _push, 1, (REGLST), push_pop, t_push_pop),
19650 tCE("pop", 8bd0000, _pop, 1, (REGLST), push_pop, t_push_pop),
19651
19652 /* These may simplify to neg. */
19653 TCE("rsb", 0600000, ebc00000, 3, (RR, oRR, SH), arit, t_rsb),
19654 TC3("rsbs", 0700000, ebd00000, 3, (RR, oRR, SH), arit, t_rsb),
19655
19656 #undef THUMB_VARIANT
19657 #define THUMB_VARIANT & arm_ext_os
19658
19659 TCE("swi", f000000, df00, 1, (EXPi), swi, t_swi),
19660 TCE("svc", f000000, df00, 1, (EXPi), swi, t_swi),
19661
19662 #undef THUMB_VARIANT
19663 #define THUMB_VARIANT & arm_ext_v6
19664
19665 TCE("cpy", 1a00000, 4600, 2, (RR, RR), rd_rm, t_cpy),
19666
19667 /* V1 instructions with no Thumb analogue prior to V6T2. */
19668 #undef THUMB_VARIANT
19669 #define THUMB_VARIANT & arm_ext_v6t2
19670
19671 TCE("teq", 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
19672 TC3w("teqs", 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
19673 CL("teqp", 130f000, 2, (RR, SH), cmp),
19674
19675 TC3("ldrt", 4300000, f8500e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
19676 TC3("ldrbt", 4700000, f8100e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
19677 TC3("strt", 4200000, f8400e00, 2, (RR_npcsp, ADDR), ldstt, t_ldstt),
19678 TC3("strbt", 4600000, f8000e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
19679
19680 TC3("stmdb", 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19681 TC3("stmfd", 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19682
19683 TC3("ldmdb", 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19684 TC3("ldmea", 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19685
19686 /* V1 instructions with no Thumb analogue at all. */
19687 CE("rsc", 0e00000, 3, (RR, oRR, SH), arit),
19688 C3(rscs, 0f00000, 3, (RR, oRR, SH), arit),
19689
19690 C3(stmib, 9800000, 2, (RRw, REGLST), ldmstm),
19691 C3(stmfa, 9800000, 2, (RRw, REGLST), ldmstm),
19692 C3(stmda, 8000000, 2, (RRw, REGLST), ldmstm),
19693 C3(stmed, 8000000, 2, (RRw, REGLST), ldmstm),
19694 C3(ldmib, 9900000, 2, (RRw, REGLST), ldmstm),
19695 C3(ldmed, 9900000, 2, (RRw, REGLST), ldmstm),
19696 C3(ldmda, 8100000, 2, (RRw, REGLST), ldmstm),
19697 C3(ldmfa, 8100000, 2, (RRw, REGLST), ldmstm),
19698
19699 #undef ARM_VARIANT
19700 #define ARM_VARIANT & arm_ext_v2 /* ARM 2 - multiplies. */
19701 #undef THUMB_VARIANT
19702 #define THUMB_VARIANT & arm_ext_v4t
19703
19704 tCE("mul", 0000090, _mul, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
19705 tC3("muls", 0100090, _muls, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
19706
19707 #undef THUMB_VARIANT
19708 #define THUMB_VARIANT & arm_ext_v6t2
19709
19710 TCE("mla", 0200090, fb000000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
19711 C3(mlas, 0300090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas),
19712
19713 /* Generic coprocessor instructions. */
19714 TCE("cdp", e000000, ee000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
19715 TCE("ldc", c100000, ec100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
19716 TC3("ldcl", c500000, ec500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
19717 TCE("stc", c000000, ec000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
19718 TC3("stcl", c400000, ec400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
19719 TCE("mcr", e000010, ee000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
19720 TCE("mrc", e100010, ee100010, 6, (RCP, I7b, APSR_RR, RCN, RCN, oI7b), co_reg, co_reg),
19721
19722 #undef ARM_VARIANT
19723 #define ARM_VARIANT & arm_ext_v2s /* ARM 3 - swp instructions. */
19724
19725 CE("swp", 1000090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
19726 C3(swpb, 1400090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
19727
19728 #undef ARM_VARIANT
19729 #define ARM_VARIANT & arm_ext_v3 /* ARM 6 Status register instructions. */
19730 #undef THUMB_VARIANT
19731 #define THUMB_VARIANT & arm_ext_msr
19732
19733 TCE("mrs", 1000000, f3e08000, 2, (RRnpc, rPSR), mrs, t_mrs),
19734 TCE("msr", 120f000, f3808000, 2, (wPSR, RR_EXi), msr, t_msr),
19735
19736 #undef ARM_VARIANT
19737 #define ARM_VARIANT & arm_ext_v3m /* ARM 7M long multiplies. */
19738 #undef THUMB_VARIANT
19739 #define THUMB_VARIANT & arm_ext_v6t2
19740
19741 TCE("smull", 0c00090, fb800000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
19742 CM("smull","s", 0d00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
19743 TCE("umull", 0800090, fba00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
19744 CM("umull","s", 0900090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
19745 TCE("smlal", 0e00090, fbc00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
19746 CM("smlal","s", 0f00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
19747 TCE("umlal", 0a00090, fbe00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
19748 CM("umlal","s", 0b00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
19749
19750 #undef ARM_VARIANT
19751 #define ARM_VARIANT & arm_ext_v4 /* ARM Architecture 4. */
19752 #undef THUMB_VARIANT
19753 #define THUMB_VARIANT & arm_ext_v4t
19754
19755 tC3("ldrh", 01000b0, _ldrh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
19756 tC3("strh", 00000b0, _strh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
19757 tC3("ldrsh", 01000f0, _ldrsh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
19758 tC3("ldrsb", 01000d0, _ldrsb, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
19759 tC3("ldsh", 01000f0, _ldrsh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
19760 tC3("ldsb", 01000d0, _ldrsb, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
19761
19762 #undef ARM_VARIANT
19763 #define ARM_VARIANT & arm_ext_v4t_5
19764
19765 /* ARM Architecture 4T. */
19766 /* Note: bx (and blx) are required on V5, even if the processor does
19767 not support Thumb. */
19768 TCE("bx", 12fff10, 4700, 1, (RR), bx, t_bx),
19769
19770 #undef ARM_VARIANT
19771 #define ARM_VARIANT & arm_ext_v5 /* ARM Architecture 5T. */
19772 #undef THUMB_VARIANT
19773 #define THUMB_VARIANT & arm_ext_v5t
19774
19775 /* Note: blx has 2 variants; the .value coded here is for
19776 BLX(2). Only this variant has conditional execution. */
19777 TCE("blx", 12fff30, 4780, 1, (RR_EXr), blx, t_blx),
19778 TUE("bkpt", 1200070, be00, 1, (oIffffb), bkpt, t_bkpt),
19779
19780 #undef THUMB_VARIANT
19781 #define THUMB_VARIANT & arm_ext_v6t2
19782
19783 TCE("clz", 16f0f10, fab0f080, 2, (RRnpc, RRnpc), rd_rm, t_clz),
19784 TUF("ldc2", c100000, fc100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
19785 TUF("ldc2l", c500000, fc500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
19786 TUF("stc2", c000000, fc000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
19787 TUF("stc2l", c400000, fc400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
19788 TUF("cdp2", e000000, fe000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
19789 TUF("mcr2", e000010, fe000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
19790 TUF("mrc2", e100010, fe100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
19791
19792 #undef ARM_VARIANT
19793 #define ARM_VARIANT & arm_ext_v5exp /* ARM Architecture 5TExP. */
19794 #undef THUMB_VARIANT
19795 #define THUMB_VARIANT & arm_ext_v5exp
19796
19797 TCE("smlabb", 1000080, fb100000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
19798 TCE("smlatb", 10000a0, fb100020, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
19799 TCE("smlabt", 10000c0, fb100010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
19800 TCE("smlatt", 10000e0, fb100030, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
19801
19802 TCE("smlawb", 1200080, fb300000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
19803 TCE("smlawt", 12000c0, fb300010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
19804
19805 TCE("smlalbb", 1400080, fbc00080, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
19806 TCE("smlaltb", 14000a0, fbc000a0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
19807 TCE("smlalbt", 14000c0, fbc00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
19808 TCE("smlaltt", 14000e0, fbc000b0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
19809
19810 TCE("smulbb", 1600080, fb10f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19811 TCE("smultb", 16000a0, fb10f020, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19812 TCE("smulbt", 16000c0, fb10f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19813 TCE("smultt", 16000e0, fb10f030, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19814
19815 TCE("smulwb", 12000a0, fb30f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19816 TCE("smulwt", 12000e0, fb30f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19817
19818 TCE("qadd", 1000050, fa80f080, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
19819 TCE("qdadd", 1400050, fa80f090, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
19820 TCE("qsub", 1200050, fa80f0a0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
19821 TCE("qdsub", 1600050, fa80f0b0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
19822
19823 #undef ARM_VARIANT
19824 #define ARM_VARIANT & arm_ext_v5e /* ARM Architecture 5TE. */
19825 #undef THUMB_VARIANT
19826 #define THUMB_VARIANT & arm_ext_v6t2
19827
19828 TUF("pld", 450f000, f810f000, 1, (ADDR), pld, t_pld),
19829 TC3("ldrd", 00000d0, e8500000, 3, (RRnpc_npcsp, oRRnpc_npcsp, ADDRGLDRS),
19830 ldrd, t_ldstd),
19831 TC3("strd", 00000f0, e8400000, 3, (RRnpc_npcsp, oRRnpc_npcsp,
19832 ADDRGLDRS), ldrd, t_ldstd),
19833
19834 TCE("mcrr", c400000, ec400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
19835 TCE("mrrc", c500000, ec500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
19836
19837 #undef ARM_VARIANT
19838 #define ARM_VARIANT & arm_ext_v5j /* ARM Architecture 5TEJ. */
19839
19840 TCE("bxj", 12fff20, f3c08f00, 1, (RR), bxj, t_bxj),
19841
19842 #undef ARM_VARIANT
19843 #define ARM_VARIANT & arm_ext_v6 /* ARM V6. */
19844 #undef THUMB_VARIANT
19845 #define THUMB_VARIANT & arm_ext_v6
19846
19847 TUF("cpsie", 1080000, b660, 2, (CPSF, oI31b), cpsi, t_cpsi),
19848 TUF("cpsid", 10c0000, b670, 2, (CPSF, oI31b), cpsi, t_cpsi),
19849 tCE("rev", 6bf0f30, _rev, 2, (RRnpc, RRnpc), rd_rm, t_rev),
19850 tCE("rev16", 6bf0fb0, _rev16, 2, (RRnpc, RRnpc), rd_rm, t_rev),
19851 tCE("revsh", 6ff0fb0, _revsh, 2, (RRnpc, RRnpc), rd_rm, t_rev),
19852 tCE("sxth", 6bf0070, _sxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
19853 tCE("uxth", 6ff0070, _uxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
19854 tCE("sxtb", 6af0070, _sxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
19855 tCE("uxtb", 6ef0070, _uxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
19856 TUF("setend", 1010000, b650, 1, (ENDI), setend, t_setend),
19857
19858 #undef THUMB_VARIANT
19859 #define THUMB_VARIANT & arm_ext_v6t2_v8m
19860
19861 TCE("ldrex", 1900f9f, e8500f00, 2, (RRnpc_npcsp, ADDR), ldrex, t_ldrex),
19862 TCE("strex", 1800f90, e8400000, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
19863 strex, t_strex),
19864 #undef THUMB_VARIANT
19865 #define THUMB_VARIANT & arm_ext_v6t2
19866
19867 TUF("mcrr2", c400000, fc400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
19868 TUF("mrrc2", c500000, fc500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
19869
19870 TCE("ssat", 6a00010, f3000000, 4, (RRnpc, I32, RRnpc, oSHllar),ssat, t_ssat),
19871 TCE("usat", 6e00010, f3800000, 4, (RRnpc, I31, RRnpc, oSHllar),usat, t_usat),
19872
19873 /* ARM V6 not included in V7M. */
19874 #undef THUMB_VARIANT
19875 #define THUMB_VARIANT & arm_ext_v6_notm
19876 TUF("rfeia", 8900a00, e990c000, 1, (RRw), rfe, rfe),
19877 TUF("rfe", 8900a00, e990c000, 1, (RRw), rfe, rfe),
19878 UF(rfeib, 9900a00, 1, (RRw), rfe),
19879 UF(rfeda, 8100a00, 1, (RRw), rfe),
19880 TUF("rfedb", 9100a00, e810c000, 1, (RRw), rfe, rfe),
19881 TUF("rfefd", 8900a00, e990c000, 1, (RRw), rfe, rfe),
19882 UF(rfefa, 8100a00, 1, (RRw), rfe),
19883 TUF("rfeea", 9100a00, e810c000, 1, (RRw), rfe, rfe),
19884 UF(rfeed, 9900a00, 1, (RRw), rfe),
19885 TUF("srsia", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
19886 TUF("srs", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
19887 TUF("srsea", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
19888 UF(srsib, 9c00500, 2, (oRRw, I31w), srs),
19889 UF(srsfa, 9c00500, 2, (oRRw, I31w), srs),
19890 UF(srsda, 8400500, 2, (oRRw, I31w), srs),
19891 UF(srsed, 8400500, 2, (oRRw, I31w), srs),
19892 TUF("srsdb", 9400500, e800c000, 2, (oRRw, I31w), srs, srs),
19893 TUF("srsfd", 9400500, e800c000, 2, (oRRw, I31w), srs, srs),
19894 TUF("cps", 1020000, f3af8100, 1, (I31b), imm0, t_cps),
19895
19896 /* ARM V6 not included in V7M (eg. integer SIMD). */
19897 #undef THUMB_VARIANT
19898 #define THUMB_VARIANT & arm_ext_v6_dsp
19899 TCE("pkhbt", 6800010, eac00000, 4, (RRnpc, RRnpc, RRnpc, oSHll), pkhbt, t_pkhbt),
19900 TCE("pkhtb", 6800050, eac00020, 4, (RRnpc, RRnpc, RRnpc, oSHar), pkhtb, t_pkhtb),
19901 TCE("qadd16", 6200f10, fa90f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19902 TCE("qadd8", 6200f90, fa80f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19903 TCE("qasx", 6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19904 /* Old name for QASX. */
19905 TCE("qaddsubx",6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19906 TCE("qsax", 6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19907 /* Old name for QSAX. */
19908 TCE("qsubaddx",6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19909 TCE("qsub16", 6200f70, fad0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19910 TCE("qsub8", 6200ff0, fac0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19911 TCE("sadd16", 6100f10, fa90f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19912 TCE("sadd8", 6100f90, fa80f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19913 TCE("sasx", 6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19914 /* Old name for SASX. */
19915 TCE("saddsubx",6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19916 TCE("shadd16", 6300f10, fa90f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19917 TCE("shadd8", 6300f90, fa80f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19918 TCE("shasx", 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19919 /* Old name for SHASX. */
19920 TCE("shaddsubx", 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19921 TCE("shsax", 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19922 /* Old name for SHSAX. */
19923 TCE("shsubaddx", 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19924 TCE("shsub16", 6300f70, fad0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19925 TCE("shsub8", 6300ff0, fac0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19926 TCE("ssax", 6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19927 /* Old name for SSAX. */
19928 TCE("ssubaddx",6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19929 TCE("ssub16", 6100f70, fad0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19930 TCE("ssub8", 6100ff0, fac0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19931 TCE("uadd16", 6500f10, fa90f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19932 TCE("uadd8", 6500f90, fa80f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19933 TCE("uasx", 6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19934 /* Old name for UASX. */
19935 TCE("uaddsubx",6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19936 TCE("uhadd16", 6700f10, fa90f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19937 TCE("uhadd8", 6700f90, fa80f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19938 TCE("uhasx", 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19939 /* Old name for UHASX. */
19940 TCE("uhaddsubx", 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19941 TCE("uhsax", 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19942 /* Old name for UHSAX. */
19943 TCE("uhsubaddx", 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19944 TCE("uhsub16", 6700f70, fad0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19945 TCE("uhsub8", 6700ff0, fac0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19946 TCE("uqadd16", 6600f10, fa90f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19947 TCE("uqadd8", 6600f90, fa80f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19948 TCE("uqasx", 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19949 /* Old name for UQASX. */
19950 TCE("uqaddsubx", 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19951 TCE("uqsax", 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19952 /* Old name for UQSAX. */
19953 TCE("uqsubaddx", 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19954 TCE("uqsub16", 6600f70, fad0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19955 TCE("uqsub8", 6600ff0, fac0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19956 TCE("usub16", 6500f70, fad0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19957 TCE("usax", 6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19958 /* Old name for USAX. */
19959 TCE("usubaddx",6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19960 TCE("usub8", 6500ff0, fac0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19961 TCE("sxtah", 6b00070, fa00f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19962 TCE("sxtab16", 6800070, fa20f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19963 TCE("sxtab", 6a00070, fa40f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19964 TCE("sxtb16", 68f0070, fa2ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
19965 TCE("uxtah", 6f00070, fa10f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19966 TCE("uxtab16", 6c00070, fa30f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19967 TCE("uxtab", 6e00070, fa50f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19968 TCE("uxtb16", 6cf0070, fa3ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
19969 TCE("sel", 6800fb0, faa0f080, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19970 TCE("smlad", 7000010, fb200000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19971 TCE("smladx", 7000030, fb200010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19972 TCE("smlald", 7400010, fbc000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
19973 TCE("smlaldx", 7400030, fbc000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
19974 TCE("smlsd", 7000050, fb400000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19975 TCE("smlsdx", 7000070, fb400010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19976 TCE("smlsld", 7400050, fbd000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
19977 TCE("smlsldx", 7400070, fbd000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
19978 TCE("smmla", 7500010, fb500000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19979 TCE("smmlar", 7500030, fb500010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19980 TCE("smmls", 75000d0, fb600000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19981 TCE("smmlsr", 75000f0, fb600010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19982 TCE("smmul", 750f010, fb50f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19983 TCE("smmulr", 750f030, fb50f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19984 TCE("smuad", 700f010, fb20f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19985 TCE("smuadx", 700f030, fb20f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19986 TCE("smusd", 700f050, fb40f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19987 TCE("smusdx", 700f070, fb40f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19988 TCE("ssat16", 6a00f30, f3200000, 3, (RRnpc, I16, RRnpc), ssat16, t_ssat16),
19989 TCE("umaal", 0400090, fbe00060, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal, t_mlal),
19990 TCE("usad8", 780f010, fb70f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19991 TCE("usada8", 7800010, fb700000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19992 TCE("usat16", 6e00f30, f3a00000, 3, (RRnpc, I15, RRnpc), usat16, t_usat16),
19993
19994 #undef ARM_VARIANT
19995 #define ARM_VARIANT & arm_ext_v6k
19996 #undef THUMB_VARIANT
19997 #define THUMB_VARIANT & arm_ext_v6k
19998
19999 tCE("yield", 320f001, _yield, 0, (), noargs, t_hint),
20000 tCE("wfe", 320f002, _wfe, 0, (), noargs, t_hint),
20001 tCE("wfi", 320f003, _wfi, 0, (), noargs, t_hint),
20002 tCE("sev", 320f004, _sev, 0, (), noargs, t_hint),
20003
20004 #undef THUMB_VARIANT
20005 #define THUMB_VARIANT & arm_ext_v6_notm
20006 TCE("ldrexd", 1b00f9f, e8d0007f, 3, (RRnpc_npcsp, oRRnpc_npcsp, RRnpcb),
20007 ldrexd, t_ldrexd),
20008 TCE("strexd", 1a00f90, e8c00070, 4, (RRnpc_npcsp, RRnpc_npcsp, oRRnpc_npcsp,
20009 RRnpcb), strexd, t_strexd),
20010
20011 #undef THUMB_VARIANT
20012 #define THUMB_VARIANT & arm_ext_v6t2_v8m
20013 TCE("ldrexb", 1d00f9f, e8d00f4f, 2, (RRnpc_npcsp,RRnpcb),
20014 rd_rn, rd_rn),
20015 TCE("ldrexh", 1f00f9f, e8d00f5f, 2, (RRnpc_npcsp, RRnpcb),
20016 rd_rn, rd_rn),
20017 TCE("strexb", 1c00f90, e8c00f40, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
20018 strex, t_strexbh),
20019 TCE("strexh", 1e00f90, e8c00f50, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
20020 strex, t_strexbh),
20021 TUF("clrex", 57ff01f, f3bf8f2f, 0, (), noargs, noargs),
20022
20023 #undef ARM_VARIANT
20024 #define ARM_VARIANT & arm_ext_sec
20025 #undef THUMB_VARIANT
20026 #define THUMB_VARIANT & arm_ext_sec
20027
20028 TCE("smc", 1600070, f7f08000, 1, (EXPi), smc, t_smc),
20029
20030 #undef ARM_VARIANT
20031 #define ARM_VARIANT & arm_ext_virt
20032 #undef THUMB_VARIANT
20033 #define THUMB_VARIANT & arm_ext_virt
20034
20035 TCE("hvc", 1400070, f7e08000, 1, (EXPi), hvc, t_hvc),
20036 TCE("eret", 160006e, f3de8f00, 0, (), noargs, noargs),
20037
20038 #undef ARM_VARIANT
20039 #define ARM_VARIANT & arm_ext_pan
20040 #undef THUMB_VARIANT
20041 #define THUMB_VARIANT & arm_ext_pan
20042
20043 TUF("setpan", 1100000, b610, 1, (I7), setpan, t_setpan),
20044
20045 #undef ARM_VARIANT
20046 #define ARM_VARIANT & arm_ext_v6t2
20047 #undef THUMB_VARIANT
20048 #define THUMB_VARIANT & arm_ext_v6t2
20049
20050 TCE("bfc", 7c0001f, f36f0000, 3, (RRnpc, I31, I32), bfc, t_bfc),
20051 TCE("bfi", 7c00010, f3600000, 4, (RRnpc, RRnpc_I0, I31, I32), bfi, t_bfi),
20052 TCE("sbfx", 7a00050, f3400000, 4, (RR, RR, I31, I32), bfx, t_bfx),
20053 TCE("ubfx", 7e00050, f3c00000, 4, (RR, RR, I31, I32), bfx, t_bfx),
20054
20055 TCE("mls", 0600090, fb000010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
20056 TCE("rbit", 6ff0f30, fa90f0a0, 2, (RR, RR), rd_rm, t_rbit),
20057
20058 TC3("ldrht", 03000b0, f8300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
20059 TC3("ldrsht", 03000f0, f9300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
20060 TC3("ldrsbt", 03000d0, f9100e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
20061 TC3("strht", 02000b0, f8200e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
20062
20063 #undef ARM_VARIANT
20064 #define ARM_VARIANT & arm_ext_v3
20065 #undef THUMB_VARIANT
20066 #define THUMB_VARIANT & arm_ext_v6t2
20067
20068 TUE("csdb", 320f014, f3af8014, 0, (), noargs, t_csdb),
20069 TUF("ssbb", 57ff040, f3bf8f40, 0, (), noargs, t_csdb),
20070 TUF("pssbb", 57ff044, f3bf8f44, 0, (), noargs, t_csdb),
20071
20072 #undef ARM_VARIANT
20073 #define ARM_VARIANT & arm_ext_v6t2
20074 #undef THUMB_VARIANT
20075 #define THUMB_VARIANT & arm_ext_v6t2_v8m
20076 TCE("movw", 3000000, f2400000, 2, (RRnpc, HALF), mov16, t_mov16),
20077 TCE("movt", 3400000, f2c00000, 2, (RRnpc, HALF), mov16, t_mov16),
20078
20079 /* Thumb-only instructions. */
20080 #undef ARM_VARIANT
20081 #define ARM_VARIANT NULL
20082 TUE("cbnz", 0, b900, 2, (RR, EXP), 0, t_cbz),
20083 TUE("cbz", 0, b100, 2, (RR, EXP), 0, t_cbz),
20084
20085 /* ARM does not really have an IT instruction, so always allow it.
20086 The opcode is copied from Thumb in order to allow warnings in
20087 -mimplicit-it=[never | arm] modes. */
20088 #undef ARM_VARIANT
20089 #define ARM_VARIANT & arm_ext_v1
20090 #undef THUMB_VARIANT
20091 #define THUMB_VARIANT & arm_ext_v6t2
20092
20093 TUE("it", bf08, bf08, 1, (COND), it, t_it),
20094 TUE("itt", bf0c, bf0c, 1, (COND), it, t_it),
20095 TUE("ite", bf04, bf04, 1, (COND), it, t_it),
20096 TUE("ittt", bf0e, bf0e, 1, (COND), it, t_it),
20097 TUE("itet", bf06, bf06, 1, (COND), it, t_it),
20098 TUE("itte", bf0a, bf0a, 1, (COND), it, t_it),
20099 TUE("itee", bf02, bf02, 1, (COND), it, t_it),
20100 TUE("itttt", bf0f, bf0f, 1, (COND), it, t_it),
20101 TUE("itett", bf07, bf07, 1, (COND), it, t_it),
20102 TUE("ittet", bf0b, bf0b, 1, (COND), it, t_it),
20103 TUE("iteet", bf03, bf03, 1, (COND), it, t_it),
20104 TUE("ittte", bf0d, bf0d, 1, (COND), it, t_it),
20105 TUE("itete", bf05, bf05, 1, (COND), it, t_it),
20106 TUE("ittee", bf09, bf09, 1, (COND), it, t_it),
20107 TUE("iteee", bf01, bf01, 1, (COND), it, t_it),
20108 /* ARM/Thumb-2 instructions with no Thumb-1 equivalent. */
20109 TC3("rrx", 01a00060, ea4f0030, 2, (RR, RR), rd_rm, t_rrx),
20110 TC3("rrxs", 01b00060, ea5f0030, 2, (RR, RR), rd_rm, t_rrx),
20111
20112 /* Thumb2 only instructions. */
20113 #undef ARM_VARIANT
20114 #define ARM_VARIANT NULL
20115
20116 TCE("addw", 0, f2000000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
20117 TCE("subw", 0, f2a00000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
20118 TCE("orn", 0, ea600000, 3, (RR, oRR, SH), 0, t_orn),
20119 TCE("orns", 0, ea700000, 3, (RR, oRR, SH), 0, t_orn),
20120 TCE("tbb", 0, e8d0f000, 1, (TB), 0, t_tb),
20121 TCE("tbh", 0, e8d0f010, 1, (TB), 0, t_tb),
20122
20123 /* Hardware division instructions. */
20124 #undef ARM_VARIANT
20125 #define ARM_VARIANT & arm_ext_adiv
20126 #undef THUMB_VARIANT
20127 #define THUMB_VARIANT & arm_ext_div
20128
20129 TCE("sdiv", 710f010, fb90f0f0, 3, (RR, oRR, RR), div, t_div),
20130 TCE("udiv", 730f010, fbb0f0f0, 3, (RR, oRR, RR), div, t_div),
20131
20132 /* ARM V6M/V7 instructions. */
20133 #undef ARM_VARIANT
20134 #define ARM_VARIANT & arm_ext_barrier
20135 #undef THUMB_VARIANT
20136 #define THUMB_VARIANT & arm_ext_barrier
20137
20138 TUF("dmb", 57ff050, f3bf8f50, 1, (oBARRIER_I15), barrier, barrier),
20139 TUF("dsb", 57ff040, f3bf8f40, 1, (oBARRIER_I15), barrier, barrier),
20140 TUF("isb", 57ff060, f3bf8f60, 1, (oBARRIER_I15), barrier, barrier),
20141
20142 /* ARM V7 instructions. */
20143 #undef ARM_VARIANT
20144 #define ARM_VARIANT & arm_ext_v7
20145 #undef THUMB_VARIANT
20146 #define THUMB_VARIANT & arm_ext_v7
20147
20148 TUF("pli", 450f000, f910f000, 1, (ADDR), pli, t_pld),
20149 TCE("dbg", 320f0f0, f3af80f0, 1, (I15), dbg, t_dbg),
20150
20151 #undef ARM_VARIANT
20152 #define ARM_VARIANT & arm_ext_mp
20153 #undef THUMB_VARIANT
20154 #define THUMB_VARIANT & arm_ext_mp
20155
20156 TUF("pldw", 410f000, f830f000, 1, (ADDR), pld, t_pld),
20157
20158 /* AArchv8 instructions. */
20159 #undef ARM_VARIANT
20160 #define ARM_VARIANT & arm_ext_v8
20161
20162 /* Instructions shared between armv8-a and armv8-m. */
20163 #undef THUMB_VARIANT
20164 #define THUMB_VARIANT & arm_ext_atomics
20165
20166 TCE("lda", 1900c9f, e8d00faf, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
20167 TCE("ldab", 1d00c9f, e8d00f8f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
20168 TCE("ldah", 1f00c9f, e8d00f9f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
20169 TCE("stl", 180fc90, e8c00faf, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
20170 TCE("stlb", 1c0fc90, e8c00f8f, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
20171 TCE("stlh", 1e0fc90, e8c00f9f, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
20172 TCE("ldaex", 1900e9f, e8d00fef, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
20173 TCE("ldaexb", 1d00e9f, e8d00fcf, 2, (RRnpc,RRnpcb), rd_rn, rd_rn),
20174 TCE("ldaexh", 1f00e9f, e8d00fdf, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
20175 TCE("stlex", 1800e90, e8c00fe0, 3, (RRnpc, RRnpc, RRnpcb),
20176 stlex, t_stlex),
20177 TCE("stlexb", 1c00e90, e8c00fc0, 3, (RRnpc, RRnpc, RRnpcb),
20178 stlex, t_stlex),
20179 TCE("stlexh", 1e00e90, e8c00fd0, 3, (RRnpc, RRnpc, RRnpcb),
20180 stlex, t_stlex),
20181 #undef THUMB_VARIANT
20182 #define THUMB_VARIANT & arm_ext_v8
20183
20184 tCE("sevl", 320f005, _sevl, 0, (), noargs, t_hint),
20185 TUE("hlt", 1000070, ba80, 1, (oIffffb), bkpt, t_hlt),
20186 TCE("ldaexd", 1b00e9f, e8d000ff, 3, (RRnpc, oRRnpc, RRnpcb),
20187 ldrexd, t_ldrexd),
20188 TCE("stlexd", 1a00e90, e8c000f0, 4, (RRnpc, RRnpc, oRRnpc, RRnpcb),
20189 strexd, t_strexd),
20190 /* ARMv8 T32 only. */
20191 #undef ARM_VARIANT
20192 #define ARM_VARIANT NULL
20193 TUF("dcps1", 0, f78f8001, 0, (), noargs, noargs),
20194 TUF("dcps2", 0, f78f8002, 0, (), noargs, noargs),
20195 TUF("dcps3", 0, f78f8003, 0, (), noargs, noargs),
20196
20197 /* FP for ARMv8. */
20198 #undef ARM_VARIANT
20199 #define ARM_VARIANT & fpu_vfp_ext_armv8xd
20200 #undef THUMB_VARIANT
20201 #define THUMB_VARIANT & fpu_vfp_ext_armv8xd
20202
20203 nUF(vseleq, _vseleq, 3, (RVSD, RVSD, RVSD), vsel),
20204 nUF(vselvs, _vselvs, 3, (RVSD, RVSD, RVSD), vsel),
20205 nUF(vselge, _vselge, 3, (RVSD, RVSD, RVSD), vsel),
20206 nUF(vselgt, _vselgt, 3, (RVSD, RVSD, RVSD), vsel),
20207 nUF(vmaxnm, _vmaxnm, 3, (RNSDQ, oRNSDQ, RNSDQ), vmaxnm),
20208 nUF(vminnm, _vminnm, 3, (RNSDQ, oRNSDQ, RNSDQ), vmaxnm),
20209 nUF(vcvta, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvta),
20210 nUF(vcvtn, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvtn),
20211 nUF(vcvtp, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvtp),
20212 nUF(vcvtm, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvtm),
20213 nCE(vrintr, _vrintr, 2, (RNSDQ, oRNSDQ), vrintr),
20214 nCE(vrintz, _vrintr, 2, (RNSDQ, oRNSDQ), vrintz),
20215 nCE(vrintx, _vrintr, 2, (RNSDQ, oRNSDQ), vrintx),
20216 nUF(vrinta, _vrinta, 2, (RNSDQ, oRNSDQ), vrinta),
20217 nUF(vrintn, _vrinta, 2, (RNSDQ, oRNSDQ), vrintn),
20218 nUF(vrintp, _vrinta, 2, (RNSDQ, oRNSDQ), vrintp),
20219 nUF(vrintm, _vrinta, 2, (RNSDQ, oRNSDQ), vrintm),
20220
20221 /* Crypto v1 extensions. */
20222 #undef ARM_VARIANT
20223 #define ARM_VARIANT & fpu_crypto_ext_armv8
20224 #undef THUMB_VARIANT
20225 #define THUMB_VARIANT & fpu_crypto_ext_armv8
20226
20227 nUF(aese, _aes, 2, (RNQ, RNQ), aese),
20228 nUF(aesd, _aes, 2, (RNQ, RNQ), aesd),
20229 nUF(aesmc, _aes, 2, (RNQ, RNQ), aesmc),
20230 nUF(aesimc, _aes, 2, (RNQ, RNQ), aesimc),
20231 nUF(sha1c, _sha3op, 3, (RNQ, RNQ, RNQ), sha1c),
20232 nUF(sha1p, _sha3op, 3, (RNQ, RNQ, RNQ), sha1p),
20233 nUF(sha1m, _sha3op, 3, (RNQ, RNQ, RNQ), sha1m),
20234 nUF(sha1su0, _sha3op, 3, (RNQ, RNQ, RNQ), sha1su0),
20235 nUF(sha256h, _sha3op, 3, (RNQ, RNQ, RNQ), sha256h),
20236 nUF(sha256h2, _sha3op, 3, (RNQ, RNQ, RNQ), sha256h2),
20237 nUF(sha256su1, _sha3op, 3, (RNQ, RNQ, RNQ), sha256su1),
20238 nUF(sha1h, _sha1h, 2, (RNQ, RNQ), sha1h),
20239 nUF(sha1su1, _sha2op, 2, (RNQ, RNQ), sha1su1),
20240 nUF(sha256su0, _sha2op, 2, (RNQ, RNQ), sha256su0),
20241
20242 #undef ARM_VARIANT
20243 #define ARM_VARIANT & crc_ext_armv8
20244 #undef THUMB_VARIANT
20245 #define THUMB_VARIANT & crc_ext_armv8
20246 TUEc("crc32b", 1000040, fac0f080, 3, (RR, oRR, RR), crc32b),
20247 TUEc("crc32h", 1200040, fac0f090, 3, (RR, oRR, RR), crc32h),
20248 TUEc("crc32w", 1400040, fac0f0a0, 3, (RR, oRR, RR), crc32w),
20249 TUEc("crc32cb",1000240, fad0f080, 3, (RR, oRR, RR), crc32cb),
20250 TUEc("crc32ch",1200240, fad0f090, 3, (RR, oRR, RR), crc32ch),
20251 TUEc("crc32cw",1400240, fad0f0a0, 3, (RR, oRR, RR), crc32cw),
20252
20253 /* ARMv8.2 RAS extension. */
20254 #undef ARM_VARIANT
20255 #define ARM_VARIANT & arm_ext_ras
20256 #undef THUMB_VARIANT
20257 #define THUMB_VARIANT & arm_ext_ras
20258 TUE ("esb", 320f010, f3af8010, 0, (), noargs, noargs),
20259
20260 #undef ARM_VARIANT
20261 #define ARM_VARIANT & arm_ext_v8_3
20262 #undef THUMB_VARIANT
20263 #define THUMB_VARIANT & arm_ext_v8_3
20264 NCE (vjcvt, eb90bc0, 2, (RVS, RVD), vjcvt),
20265 NUF (vcmla, 0, 4, (RNDQ, RNDQ, RNDQ_RNSC, EXPi), vcmla),
20266 NUF (vcadd, 0, 4, (RNDQ, RNDQ, RNDQ, EXPi), vcadd),
20267
20268 #undef ARM_VARIANT
20269 #define ARM_VARIANT & fpu_neon_ext_dotprod
20270 #undef THUMB_VARIANT
20271 #define THUMB_VARIANT & fpu_neon_ext_dotprod
20272 NUF (vsdot, d00, 3, (RNDQ, RNDQ, RNDQ_RNSC), neon_dotproduct_s),
20273 NUF (vudot, d00, 3, (RNDQ, RNDQ, RNDQ_RNSC), neon_dotproduct_u),
20274
20275 #undef ARM_VARIANT
20276 #define ARM_VARIANT & fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
20277 #undef THUMB_VARIANT
20278 #define THUMB_VARIANT NULL
20279
20280 cCE("wfs", e200110, 1, (RR), rd),
20281 cCE("rfs", e300110, 1, (RR), rd),
20282 cCE("wfc", e400110, 1, (RR), rd),
20283 cCE("rfc", e500110, 1, (RR), rd),
20284
20285 cCL("ldfs", c100100, 2, (RF, ADDRGLDC), rd_cpaddr),
20286 cCL("ldfd", c108100, 2, (RF, ADDRGLDC), rd_cpaddr),
20287 cCL("ldfe", c500100, 2, (RF, ADDRGLDC), rd_cpaddr),
20288 cCL("ldfp", c508100, 2, (RF, ADDRGLDC), rd_cpaddr),
20289
20290 cCL("stfs", c000100, 2, (RF, ADDRGLDC), rd_cpaddr),
20291 cCL("stfd", c008100, 2, (RF, ADDRGLDC), rd_cpaddr),
20292 cCL("stfe", c400100, 2, (RF, ADDRGLDC), rd_cpaddr),
20293 cCL("stfp", c408100, 2, (RF, ADDRGLDC), rd_cpaddr),
20294
20295 cCL("mvfs", e008100, 2, (RF, RF_IF), rd_rm),
20296 cCL("mvfsp", e008120, 2, (RF, RF_IF), rd_rm),
20297 cCL("mvfsm", e008140, 2, (RF, RF_IF), rd_rm),
20298 cCL("mvfsz", e008160, 2, (RF, RF_IF), rd_rm),
20299 cCL("mvfd", e008180, 2, (RF, RF_IF), rd_rm),
20300 cCL("mvfdp", e0081a0, 2, (RF, RF_IF), rd_rm),
20301 cCL("mvfdm", e0081c0, 2, (RF, RF_IF), rd_rm),
20302 cCL("mvfdz", e0081e0, 2, (RF, RF_IF), rd_rm),
20303 cCL("mvfe", e088100, 2, (RF, RF_IF), rd_rm),
20304 cCL("mvfep", e088120, 2, (RF, RF_IF), rd_rm),
20305 cCL("mvfem", e088140, 2, (RF, RF_IF), rd_rm),
20306 cCL("mvfez", e088160, 2, (RF, RF_IF), rd_rm),
20307
20308 cCL("mnfs", e108100, 2, (RF, RF_IF), rd_rm),
20309 cCL("mnfsp", e108120, 2, (RF, RF_IF), rd_rm),
20310 cCL("mnfsm", e108140, 2, (RF, RF_IF), rd_rm),
20311 cCL("mnfsz", e108160, 2, (RF, RF_IF), rd_rm),
20312 cCL("mnfd", e108180, 2, (RF, RF_IF), rd_rm),
20313 cCL("mnfdp", e1081a0, 2, (RF, RF_IF), rd_rm),
20314 cCL("mnfdm", e1081c0, 2, (RF, RF_IF), rd_rm),
20315 cCL("mnfdz", e1081e0, 2, (RF, RF_IF), rd_rm),
20316 cCL("mnfe", e188100, 2, (RF, RF_IF), rd_rm),
20317 cCL("mnfep", e188120, 2, (RF, RF_IF), rd_rm),
20318 cCL("mnfem", e188140, 2, (RF, RF_IF), rd_rm),
20319 cCL("mnfez", e188160, 2, (RF, RF_IF), rd_rm),
20320
20321 cCL("abss", e208100, 2, (RF, RF_IF), rd_rm),
20322 cCL("abssp", e208120, 2, (RF, RF_IF), rd_rm),
20323 cCL("abssm", e208140, 2, (RF, RF_IF), rd_rm),
20324 cCL("abssz", e208160, 2, (RF, RF_IF), rd_rm),
20325 cCL("absd", e208180, 2, (RF, RF_IF), rd_rm),
20326 cCL("absdp", e2081a0, 2, (RF, RF_IF), rd_rm),
20327 cCL("absdm", e2081c0, 2, (RF, RF_IF), rd_rm),
20328 cCL("absdz", e2081e0, 2, (RF, RF_IF), rd_rm),
20329 cCL("abse", e288100, 2, (RF, RF_IF), rd_rm),
20330 cCL("absep", e288120, 2, (RF, RF_IF), rd_rm),
20331 cCL("absem", e288140, 2, (RF, RF_IF), rd_rm),
20332 cCL("absez", e288160, 2, (RF, RF_IF), rd_rm),
20333
20334 cCL("rnds", e308100, 2, (RF, RF_IF), rd_rm),
20335 cCL("rndsp", e308120, 2, (RF, RF_IF), rd_rm),
20336 cCL("rndsm", e308140, 2, (RF, RF_IF), rd_rm),
20337 cCL("rndsz", e308160, 2, (RF, RF_IF), rd_rm),
20338 cCL("rndd", e308180, 2, (RF, RF_IF), rd_rm),
20339 cCL("rnddp", e3081a0, 2, (RF, RF_IF), rd_rm),
20340 cCL("rnddm", e3081c0, 2, (RF, RF_IF), rd_rm),
20341 cCL("rnddz", e3081e0, 2, (RF, RF_IF), rd_rm),
20342 cCL("rnde", e388100, 2, (RF, RF_IF), rd_rm),
20343 cCL("rndep", e388120, 2, (RF, RF_IF), rd_rm),
20344 cCL("rndem", e388140, 2, (RF, RF_IF), rd_rm),
20345 cCL("rndez", e388160, 2, (RF, RF_IF), rd_rm),
20346
20347 cCL("sqts", e408100, 2, (RF, RF_IF), rd_rm),
20348 cCL("sqtsp", e408120, 2, (RF, RF_IF), rd_rm),
20349 cCL("sqtsm", e408140, 2, (RF, RF_IF), rd_rm),
20350 cCL("sqtsz", e408160, 2, (RF, RF_IF), rd_rm),
20351 cCL("sqtd", e408180, 2, (RF, RF_IF), rd_rm),
20352 cCL("sqtdp", e4081a0, 2, (RF, RF_IF), rd_rm),
20353 cCL("sqtdm", e4081c0, 2, (RF, RF_IF), rd_rm),
20354 cCL("sqtdz", e4081e0, 2, (RF, RF_IF), rd_rm),
20355 cCL("sqte", e488100, 2, (RF, RF_IF), rd_rm),
20356 cCL("sqtep", e488120, 2, (RF, RF_IF), rd_rm),
20357 cCL("sqtem", e488140, 2, (RF, RF_IF), rd_rm),
20358 cCL("sqtez", e488160, 2, (RF, RF_IF), rd_rm),
20359
20360 cCL("logs", e508100, 2, (RF, RF_IF), rd_rm),
20361 cCL("logsp", e508120, 2, (RF, RF_IF), rd_rm),
20362 cCL("logsm", e508140, 2, (RF, RF_IF), rd_rm),
20363 cCL("logsz", e508160, 2, (RF, RF_IF), rd_rm),
20364 cCL("logd", e508180, 2, (RF, RF_IF), rd_rm),
20365 cCL("logdp", e5081a0, 2, (RF, RF_IF), rd_rm),
20366 cCL("logdm", e5081c0, 2, (RF, RF_IF), rd_rm),
20367 cCL("logdz", e5081e0, 2, (RF, RF_IF), rd_rm),
20368 cCL("loge", e588100, 2, (RF, RF_IF), rd_rm),
20369 cCL("logep", e588120, 2, (RF, RF_IF), rd_rm),
20370 cCL("logem", e588140, 2, (RF, RF_IF), rd_rm),
20371 cCL("logez", e588160, 2, (RF, RF_IF), rd_rm),
20372
20373 cCL("lgns", e608100, 2, (RF, RF_IF), rd_rm),
20374 cCL("lgnsp", e608120, 2, (RF, RF_IF), rd_rm),
20375 cCL("lgnsm", e608140, 2, (RF, RF_IF), rd_rm),
20376 cCL("lgnsz", e608160, 2, (RF, RF_IF), rd_rm),
20377 cCL("lgnd", e608180, 2, (RF, RF_IF), rd_rm),
20378 cCL("lgndp", e6081a0, 2, (RF, RF_IF), rd_rm),
20379 cCL("lgndm", e6081c0, 2, (RF, RF_IF), rd_rm),
20380 cCL("lgndz", e6081e0, 2, (RF, RF_IF), rd_rm),
20381 cCL("lgne", e688100, 2, (RF, RF_IF), rd_rm),
20382 cCL("lgnep", e688120, 2, (RF, RF_IF), rd_rm),
20383 cCL("lgnem", e688140, 2, (RF, RF_IF), rd_rm),
20384 cCL("lgnez", e688160, 2, (RF, RF_IF), rd_rm),
20385
20386 cCL("exps", e708100, 2, (RF, RF_IF), rd_rm),
20387 cCL("expsp", e708120, 2, (RF, RF_IF), rd_rm),
20388 cCL("expsm", e708140, 2, (RF, RF_IF), rd_rm),
20389 cCL("expsz", e708160, 2, (RF, RF_IF), rd_rm),
20390 cCL("expd", e708180, 2, (RF, RF_IF), rd_rm),
20391 cCL("expdp", e7081a0, 2, (RF, RF_IF), rd_rm),
20392 cCL("expdm", e7081c0, 2, (RF, RF_IF), rd_rm),
20393 cCL("expdz", e7081e0, 2, (RF, RF_IF), rd_rm),
20394 cCL("expe", e788100, 2, (RF, RF_IF), rd_rm),
20395 cCL("expep", e788120, 2, (RF, RF_IF), rd_rm),
20396 cCL("expem", e788140, 2, (RF, RF_IF), rd_rm),
20397 cCL("expdz", e788160, 2, (RF, RF_IF), rd_rm),
20398
20399 cCL("sins", e808100, 2, (RF, RF_IF), rd_rm),
20400 cCL("sinsp", e808120, 2, (RF, RF_IF), rd_rm),
20401 cCL("sinsm", e808140, 2, (RF, RF_IF), rd_rm),
20402 cCL("sinsz", e808160, 2, (RF, RF_IF), rd_rm),
20403 cCL("sind", e808180, 2, (RF, RF_IF), rd_rm),
20404 cCL("sindp", e8081a0, 2, (RF, RF_IF), rd_rm),
20405 cCL("sindm", e8081c0, 2, (RF, RF_IF), rd_rm),
20406 cCL("sindz", e8081e0, 2, (RF, RF_IF), rd_rm),
20407 cCL("sine", e888100, 2, (RF, RF_IF), rd_rm),
20408 cCL("sinep", e888120, 2, (RF, RF_IF), rd_rm),
20409 cCL("sinem", e888140, 2, (RF, RF_IF), rd_rm),
20410 cCL("sinez", e888160, 2, (RF, RF_IF), rd_rm),
20411
20412 cCL("coss", e908100, 2, (RF, RF_IF), rd_rm),
20413 cCL("cossp", e908120, 2, (RF, RF_IF), rd_rm),
20414 cCL("cossm", e908140, 2, (RF, RF_IF), rd_rm),
20415 cCL("cossz", e908160, 2, (RF, RF_IF), rd_rm),
20416 cCL("cosd", e908180, 2, (RF, RF_IF), rd_rm),
20417 cCL("cosdp", e9081a0, 2, (RF, RF_IF), rd_rm),
20418 cCL("cosdm", e9081c0, 2, (RF, RF_IF), rd_rm),
20419 cCL("cosdz", e9081e0, 2, (RF, RF_IF), rd_rm),
20420 cCL("cose", e988100, 2, (RF, RF_IF), rd_rm),
20421 cCL("cosep", e988120, 2, (RF, RF_IF), rd_rm),
20422 cCL("cosem", e988140, 2, (RF, RF_IF), rd_rm),
20423 cCL("cosez", e988160, 2, (RF, RF_IF), rd_rm),
20424
20425 cCL("tans", ea08100, 2, (RF, RF_IF), rd_rm),
20426 cCL("tansp", ea08120, 2, (RF, RF_IF), rd_rm),
20427 cCL("tansm", ea08140, 2, (RF, RF_IF), rd_rm),
20428 cCL("tansz", ea08160, 2, (RF, RF_IF), rd_rm),
20429 cCL("tand", ea08180, 2, (RF, RF_IF), rd_rm),
20430 cCL("tandp", ea081a0, 2, (RF, RF_IF), rd_rm),
20431 cCL("tandm", ea081c0, 2, (RF, RF_IF), rd_rm),
20432 cCL("tandz", ea081e0, 2, (RF, RF_IF), rd_rm),
20433 cCL("tane", ea88100, 2, (RF, RF_IF), rd_rm),
20434 cCL("tanep", ea88120, 2, (RF, RF_IF), rd_rm),
20435 cCL("tanem", ea88140, 2, (RF, RF_IF), rd_rm),
20436 cCL("tanez", ea88160, 2, (RF, RF_IF), rd_rm),
20437
20438 cCL("asns", eb08100, 2, (RF, RF_IF), rd_rm),
20439 cCL("asnsp", eb08120, 2, (RF, RF_IF), rd_rm),
20440 cCL("asnsm", eb08140, 2, (RF, RF_IF), rd_rm),
20441 cCL("asnsz", eb08160, 2, (RF, RF_IF), rd_rm),
20442 cCL("asnd", eb08180, 2, (RF, RF_IF), rd_rm),
20443 cCL("asndp", eb081a0, 2, (RF, RF_IF), rd_rm),
20444 cCL("asndm", eb081c0, 2, (RF, RF_IF), rd_rm),
20445 cCL("asndz", eb081e0, 2, (RF, RF_IF), rd_rm),
20446 cCL("asne", eb88100, 2, (RF, RF_IF), rd_rm),
20447 cCL("asnep", eb88120, 2, (RF, RF_IF), rd_rm),
20448 cCL("asnem", eb88140, 2, (RF, RF_IF), rd_rm),
20449 cCL("asnez", eb88160, 2, (RF, RF_IF), rd_rm),
20450
20451 cCL("acss", ec08100, 2, (RF, RF_IF), rd_rm),
20452 cCL("acssp", ec08120, 2, (RF, RF_IF), rd_rm),
20453 cCL("acssm", ec08140, 2, (RF, RF_IF), rd_rm),
20454 cCL("acssz", ec08160, 2, (RF, RF_IF), rd_rm),
20455 cCL("acsd", ec08180, 2, (RF, RF_IF), rd_rm),
20456 cCL("acsdp", ec081a0, 2, (RF, RF_IF), rd_rm),
20457 cCL("acsdm", ec081c0, 2, (RF, RF_IF), rd_rm),
20458 cCL("acsdz", ec081e0, 2, (RF, RF_IF), rd_rm),
20459 cCL("acse", ec88100, 2, (RF, RF_IF), rd_rm),
20460 cCL("acsep", ec88120, 2, (RF, RF_IF), rd_rm),
20461 cCL("acsem", ec88140, 2, (RF, RF_IF), rd_rm),
20462 cCL("acsez", ec88160, 2, (RF, RF_IF), rd_rm),
20463
20464 cCL("atns", ed08100, 2, (RF, RF_IF), rd_rm),
20465 cCL("atnsp", ed08120, 2, (RF, RF_IF), rd_rm),
20466 cCL("atnsm", ed08140, 2, (RF, RF_IF), rd_rm),
20467 cCL("atnsz", ed08160, 2, (RF, RF_IF), rd_rm),
20468 cCL("atnd", ed08180, 2, (RF, RF_IF), rd_rm),
20469 cCL("atndp", ed081a0, 2, (RF, RF_IF), rd_rm),
20470 cCL("atndm", ed081c0, 2, (RF, RF_IF), rd_rm),
20471 cCL("atndz", ed081e0, 2, (RF, RF_IF), rd_rm),
20472 cCL("atne", ed88100, 2, (RF, RF_IF), rd_rm),
20473 cCL("atnep", ed88120, 2, (RF, RF_IF), rd_rm),
20474 cCL("atnem", ed88140, 2, (RF, RF_IF), rd_rm),
20475 cCL("atnez", ed88160, 2, (RF, RF_IF), rd_rm),
20476
20477 cCL("urds", ee08100, 2, (RF, RF_IF), rd_rm),
20478 cCL("urdsp", ee08120, 2, (RF, RF_IF), rd_rm),
20479 cCL("urdsm", ee08140, 2, (RF, RF_IF), rd_rm),
20480 cCL("urdsz", ee08160, 2, (RF, RF_IF), rd_rm),
20481 cCL("urdd", ee08180, 2, (RF, RF_IF), rd_rm),
20482 cCL("urddp", ee081a0, 2, (RF, RF_IF), rd_rm),
20483 cCL("urddm", ee081c0, 2, (RF, RF_IF), rd_rm),
20484 cCL("urddz", ee081e0, 2, (RF, RF_IF), rd_rm),
20485 cCL("urde", ee88100, 2, (RF, RF_IF), rd_rm),
20486 cCL("urdep", ee88120, 2, (RF, RF_IF), rd_rm),
20487 cCL("urdem", ee88140, 2, (RF, RF_IF), rd_rm),
20488 cCL("urdez", ee88160, 2, (RF, RF_IF), rd_rm),
20489
20490 cCL("nrms", ef08100, 2, (RF, RF_IF), rd_rm),
20491 cCL("nrmsp", ef08120, 2, (RF, RF_IF), rd_rm),
20492 cCL("nrmsm", ef08140, 2, (RF, RF_IF), rd_rm),
20493 cCL("nrmsz", ef08160, 2, (RF, RF_IF), rd_rm),
20494 cCL("nrmd", ef08180, 2, (RF, RF_IF), rd_rm),
20495 cCL("nrmdp", ef081a0, 2, (RF, RF_IF), rd_rm),
20496 cCL("nrmdm", ef081c0, 2, (RF, RF_IF), rd_rm),
20497 cCL("nrmdz", ef081e0, 2, (RF, RF_IF), rd_rm),
20498 cCL("nrme", ef88100, 2, (RF, RF_IF), rd_rm),
20499 cCL("nrmep", ef88120, 2, (RF, RF_IF), rd_rm),
20500 cCL("nrmem", ef88140, 2, (RF, RF_IF), rd_rm),
20501 cCL("nrmez", ef88160, 2, (RF, RF_IF), rd_rm),
20502
20503 cCL("adfs", e000100, 3, (RF, RF, RF_IF), rd_rn_rm),
20504 cCL("adfsp", e000120, 3, (RF, RF, RF_IF), rd_rn_rm),
20505 cCL("adfsm", e000140, 3, (RF, RF, RF_IF), rd_rn_rm),
20506 cCL("adfsz", e000160, 3, (RF, RF, RF_IF), rd_rn_rm),
20507 cCL("adfd", e000180, 3, (RF, RF, RF_IF), rd_rn_rm),
20508 cCL("adfdp", e0001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20509 cCL("adfdm", e0001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20510 cCL("adfdz", e0001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20511 cCL("adfe", e080100, 3, (RF, RF, RF_IF), rd_rn_rm),
20512 cCL("adfep", e080120, 3, (RF, RF, RF_IF), rd_rn_rm),
20513 cCL("adfem", e080140, 3, (RF, RF, RF_IF), rd_rn_rm),
20514 cCL("adfez", e080160, 3, (RF, RF, RF_IF), rd_rn_rm),
20515
20516 cCL("sufs", e200100, 3, (RF, RF, RF_IF), rd_rn_rm),
20517 cCL("sufsp", e200120, 3, (RF, RF, RF_IF), rd_rn_rm),
20518 cCL("sufsm", e200140, 3, (RF, RF, RF_IF), rd_rn_rm),
20519 cCL("sufsz", e200160, 3, (RF, RF, RF_IF), rd_rn_rm),
20520 cCL("sufd", e200180, 3, (RF, RF, RF_IF), rd_rn_rm),
20521 cCL("sufdp", e2001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20522 cCL("sufdm", e2001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20523 cCL("sufdz", e2001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20524 cCL("sufe", e280100, 3, (RF, RF, RF_IF), rd_rn_rm),
20525 cCL("sufep", e280120, 3, (RF, RF, RF_IF), rd_rn_rm),
20526 cCL("sufem", e280140, 3, (RF, RF, RF_IF), rd_rn_rm),
20527 cCL("sufez", e280160, 3, (RF, RF, RF_IF), rd_rn_rm),
20528
20529 cCL("rsfs", e300100, 3, (RF, RF, RF_IF), rd_rn_rm),
20530 cCL("rsfsp", e300120, 3, (RF, RF, RF_IF), rd_rn_rm),
20531 cCL("rsfsm", e300140, 3, (RF, RF, RF_IF), rd_rn_rm),
20532 cCL("rsfsz", e300160, 3, (RF, RF, RF_IF), rd_rn_rm),
20533 cCL("rsfd", e300180, 3, (RF, RF, RF_IF), rd_rn_rm),
20534 cCL("rsfdp", e3001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20535 cCL("rsfdm", e3001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20536 cCL("rsfdz", e3001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20537 cCL("rsfe", e380100, 3, (RF, RF, RF_IF), rd_rn_rm),
20538 cCL("rsfep", e380120, 3, (RF, RF, RF_IF), rd_rn_rm),
20539 cCL("rsfem", e380140, 3, (RF, RF, RF_IF), rd_rn_rm),
20540 cCL("rsfez", e380160, 3, (RF, RF, RF_IF), rd_rn_rm),
20541
20542 cCL("mufs", e100100, 3, (RF, RF, RF_IF), rd_rn_rm),
20543 cCL("mufsp", e100120, 3, (RF, RF, RF_IF), rd_rn_rm),
20544 cCL("mufsm", e100140, 3, (RF, RF, RF_IF), rd_rn_rm),
20545 cCL("mufsz", e100160, 3, (RF, RF, RF_IF), rd_rn_rm),
20546 cCL("mufd", e100180, 3, (RF, RF, RF_IF), rd_rn_rm),
20547 cCL("mufdp", e1001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20548 cCL("mufdm", e1001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20549 cCL("mufdz", e1001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20550 cCL("mufe", e180100, 3, (RF, RF, RF_IF), rd_rn_rm),
20551 cCL("mufep", e180120, 3, (RF, RF, RF_IF), rd_rn_rm),
20552 cCL("mufem", e180140, 3, (RF, RF, RF_IF), rd_rn_rm),
20553 cCL("mufez", e180160, 3, (RF, RF, RF_IF), rd_rn_rm),
20554
20555 cCL("dvfs", e400100, 3, (RF, RF, RF_IF), rd_rn_rm),
20556 cCL("dvfsp", e400120, 3, (RF, RF, RF_IF), rd_rn_rm),
20557 cCL("dvfsm", e400140, 3, (RF, RF, RF_IF), rd_rn_rm),
20558 cCL("dvfsz", e400160, 3, (RF, RF, RF_IF), rd_rn_rm),
20559 cCL("dvfd", e400180, 3, (RF, RF, RF_IF), rd_rn_rm),
20560 cCL("dvfdp", e4001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20561 cCL("dvfdm", e4001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20562 cCL("dvfdz", e4001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20563 cCL("dvfe", e480100, 3, (RF, RF, RF_IF), rd_rn_rm),
20564 cCL("dvfep", e480120, 3, (RF, RF, RF_IF), rd_rn_rm),
20565 cCL("dvfem", e480140, 3, (RF, RF, RF_IF), rd_rn_rm),
20566 cCL("dvfez", e480160, 3, (RF, RF, RF_IF), rd_rn_rm),
20567
20568 cCL("rdfs", e500100, 3, (RF, RF, RF_IF), rd_rn_rm),
20569 cCL("rdfsp", e500120, 3, (RF, RF, RF_IF), rd_rn_rm),
20570 cCL("rdfsm", e500140, 3, (RF, RF, RF_IF), rd_rn_rm),
20571 cCL("rdfsz", e500160, 3, (RF, RF, RF_IF), rd_rn_rm),
20572 cCL("rdfd", e500180, 3, (RF, RF, RF_IF), rd_rn_rm),
20573 cCL("rdfdp", e5001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20574 cCL("rdfdm", e5001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20575 cCL("rdfdz", e5001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20576 cCL("rdfe", e580100, 3, (RF, RF, RF_IF), rd_rn_rm),
20577 cCL("rdfep", e580120, 3, (RF, RF, RF_IF), rd_rn_rm),
20578 cCL("rdfem", e580140, 3, (RF, RF, RF_IF), rd_rn_rm),
20579 cCL("rdfez", e580160, 3, (RF, RF, RF_IF), rd_rn_rm),
20580
20581 cCL("pows", e600100, 3, (RF, RF, RF_IF), rd_rn_rm),
20582 cCL("powsp", e600120, 3, (RF, RF, RF_IF), rd_rn_rm),
20583 cCL("powsm", e600140, 3, (RF, RF, RF_IF), rd_rn_rm),
20584 cCL("powsz", e600160, 3, (RF, RF, RF_IF), rd_rn_rm),
20585 cCL("powd", e600180, 3, (RF, RF, RF_IF), rd_rn_rm),
20586 cCL("powdp", e6001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20587 cCL("powdm", e6001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20588 cCL("powdz", e6001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20589 cCL("powe", e680100, 3, (RF, RF, RF_IF), rd_rn_rm),
20590 cCL("powep", e680120, 3, (RF, RF, RF_IF), rd_rn_rm),
20591 cCL("powem", e680140, 3, (RF, RF, RF_IF), rd_rn_rm),
20592 cCL("powez", e680160, 3, (RF, RF, RF_IF), rd_rn_rm),
20593
20594 cCL("rpws", e700100, 3, (RF, RF, RF_IF), rd_rn_rm),
20595 cCL("rpwsp", e700120, 3, (RF, RF, RF_IF), rd_rn_rm),
20596 cCL("rpwsm", e700140, 3, (RF, RF, RF_IF), rd_rn_rm),
20597 cCL("rpwsz", e700160, 3, (RF, RF, RF_IF), rd_rn_rm),
20598 cCL("rpwd", e700180, 3, (RF, RF, RF_IF), rd_rn_rm),
20599 cCL("rpwdp", e7001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20600 cCL("rpwdm", e7001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20601 cCL("rpwdz", e7001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20602 cCL("rpwe", e780100, 3, (RF, RF, RF_IF), rd_rn_rm),
20603 cCL("rpwep", e780120, 3, (RF, RF, RF_IF), rd_rn_rm),
20604 cCL("rpwem", e780140, 3, (RF, RF, RF_IF), rd_rn_rm),
20605 cCL("rpwez", e780160, 3, (RF, RF, RF_IF), rd_rn_rm),
20606
20607 cCL("rmfs", e800100, 3, (RF, RF, RF_IF), rd_rn_rm),
20608 cCL("rmfsp", e800120, 3, (RF, RF, RF_IF), rd_rn_rm),
20609 cCL("rmfsm", e800140, 3, (RF, RF, RF_IF), rd_rn_rm),
20610 cCL("rmfsz", e800160, 3, (RF, RF, RF_IF), rd_rn_rm),
20611 cCL("rmfd", e800180, 3, (RF, RF, RF_IF), rd_rn_rm),
20612 cCL("rmfdp", e8001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20613 cCL("rmfdm", e8001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20614 cCL("rmfdz", e8001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20615 cCL("rmfe", e880100, 3, (RF, RF, RF_IF), rd_rn_rm),
20616 cCL("rmfep", e880120, 3, (RF, RF, RF_IF), rd_rn_rm),
20617 cCL("rmfem", e880140, 3, (RF, RF, RF_IF), rd_rn_rm),
20618 cCL("rmfez", e880160, 3, (RF, RF, RF_IF), rd_rn_rm),
20619
20620 cCL("fmls", e900100, 3, (RF, RF, RF_IF), rd_rn_rm),
20621 cCL("fmlsp", e900120, 3, (RF, RF, RF_IF), rd_rn_rm),
20622 cCL("fmlsm", e900140, 3, (RF, RF, RF_IF), rd_rn_rm),
20623 cCL("fmlsz", e900160, 3, (RF, RF, RF_IF), rd_rn_rm),
20624 cCL("fmld", e900180, 3, (RF, RF, RF_IF), rd_rn_rm),
20625 cCL("fmldp", e9001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20626 cCL("fmldm", e9001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20627 cCL("fmldz", e9001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20628 cCL("fmle", e980100, 3, (RF, RF, RF_IF), rd_rn_rm),
20629 cCL("fmlep", e980120, 3, (RF, RF, RF_IF), rd_rn_rm),
20630 cCL("fmlem", e980140, 3, (RF, RF, RF_IF), rd_rn_rm),
20631 cCL("fmlez", e980160, 3, (RF, RF, RF_IF), rd_rn_rm),
20632
20633 cCL("fdvs", ea00100, 3, (RF, RF, RF_IF), rd_rn_rm),
20634 cCL("fdvsp", ea00120, 3, (RF, RF, RF_IF), rd_rn_rm),
20635 cCL("fdvsm", ea00140, 3, (RF, RF, RF_IF), rd_rn_rm),
20636 cCL("fdvsz", ea00160, 3, (RF, RF, RF_IF), rd_rn_rm),
20637 cCL("fdvd", ea00180, 3, (RF, RF, RF_IF), rd_rn_rm),
20638 cCL("fdvdp", ea001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20639 cCL("fdvdm", ea001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20640 cCL("fdvdz", ea001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20641 cCL("fdve", ea80100, 3, (RF, RF, RF_IF), rd_rn_rm),
20642 cCL("fdvep", ea80120, 3, (RF, RF, RF_IF), rd_rn_rm),
20643 cCL("fdvem", ea80140, 3, (RF, RF, RF_IF), rd_rn_rm),
20644 cCL("fdvez", ea80160, 3, (RF, RF, RF_IF), rd_rn_rm),
20645
20646 cCL("frds", eb00100, 3, (RF, RF, RF_IF), rd_rn_rm),
20647 cCL("frdsp", eb00120, 3, (RF, RF, RF_IF), rd_rn_rm),
20648 cCL("frdsm", eb00140, 3, (RF, RF, RF_IF), rd_rn_rm),
20649 cCL("frdsz", eb00160, 3, (RF, RF, RF_IF), rd_rn_rm),
20650 cCL("frdd", eb00180, 3, (RF, RF, RF_IF), rd_rn_rm),
20651 cCL("frddp", eb001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20652 cCL("frddm", eb001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20653 cCL("frddz", eb001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20654 cCL("frde", eb80100, 3, (RF, RF, RF_IF), rd_rn_rm),
20655 cCL("frdep", eb80120, 3, (RF, RF, RF_IF), rd_rn_rm),
20656 cCL("frdem", eb80140, 3, (RF, RF, RF_IF), rd_rn_rm),
20657 cCL("frdez", eb80160, 3, (RF, RF, RF_IF), rd_rn_rm),
20658
20659 cCL("pols", ec00100, 3, (RF, RF, RF_IF), rd_rn_rm),
20660 cCL("polsp", ec00120, 3, (RF, RF, RF_IF), rd_rn_rm),
20661 cCL("polsm", ec00140, 3, (RF, RF, RF_IF), rd_rn_rm),
20662 cCL("polsz", ec00160, 3, (RF, RF, RF_IF), rd_rn_rm),
20663 cCL("pold", ec00180, 3, (RF, RF, RF_IF), rd_rn_rm),
20664 cCL("poldp", ec001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20665 cCL("poldm", ec001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20666 cCL("poldz", ec001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20667 cCL("pole", ec80100, 3, (RF, RF, RF_IF), rd_rn_rm),
20668 cCL("polep", ec80120, 3, (RF, RF, RF_IF), rd_rn_rm),
20669 cCL("polem", ec80140, 3, (RF, RF, RF_IF), rd_rn_rm),
20670 cCL("polez", ec80160, 3, (RF, RF, RF_IF), rd_rn_rm),
20671
20672 cCE("cmf", e90f110, 2, (RF, RF_IF), fpa_cmp),
20673 C3E("cmfe", ed0f110, 2, (RF, RF_IF), fpa_cmp),
20674 cCE("cnf", eb0f110, 2, (RF, RF_IF), fpa_cmp),
20675 C3E("cnfe", ef0f110, 2, (RF, RF_IF), fpa_cmp),
20676
20677 cCL("flts", e000110, 2, (RF, RR), rn_rd),
20678 cCL("fltsp", e000130, 2, (RF, RR), rn_rd),
20679 cCL("fltsm", e000150, 2, (RF, RR), rn_rd),
20680 cCL("fltsz", e000170, 2, (RF, RR), rn_rd),
20681 cCL("fltd", e000190, 2, (RF, RR), rn_rd),
20682 cCL("fltdp", e0001b0, 2, (RF, RR), rn_rd),
20683 cCL("fltdm", e0001d0, 2, (RF, RR), rn_rd),
20684 cCL("fltdz", e0001f0, 2, (RF, RR), rn_rd),
20685 cCL("flte", e080110, 2, (RF, RR), rn_rd),
20686 cCL("fltep", e080130, 2, (RF, RR), rn_rd),
20687 cCL("fltem", e080150, 2, (RF, RR), rn_rd),
20688 cCL("fltez", e080170, 2, (RF, RR), rn_rd),
20689
20690 /* The implementation of the FIX instruction is broken on some
20691 assemblers, in that it accepts a precision specifier as well as a
20692 rounding specifier, despite the fact that this is meaningless.
20693 To be more compatible, we accept it as well, though of course it
20694 does not set any bits. */
20695 cCE("fix", e100110, 2, (RR, RF), rd_rm),
20696 cCL("fixp", e100130, 2, (RR, RF), rd_rm),
20697 cCL("fixm", e100150, 2, (RR, RF), rd_rm),
20698 cCL("fixz", e100170, 2, (RR, RF), rd_rm),
20699 cCL("fixsp", e100130, 2, (RR, RF), rd_rm),
20700 cCL("fixsm", e100150, 2, (RR, RF), rd_rm),
20701 cCL("fixsz", e100170, 2, (RR, RF), rd_rm),
20702 cCL("fixdp", e100130, 2, (RR, RF), rd_rm),
20703 cCL("fixdm", e100150, 2, (RR, RF), rd_rm),
20704 cCL("fixdz", e100170, 2, (RR, RF), rd_rm),
20705 cCL("fixep", e100130, 2, (RR, RF), rd_rm),
20706 cCL("fixem", e100150, 2, (RR, RF), rd_rm),
20707 cCL("fixez", e100170, 2, (RR, RF), rd_rm),
20708
20709 /* Instructions that were new with the real FPA, call them V2. */
20710 #undef ARM_VARIANT
20711 #define ARM_VARIANT & fpu_fpa_ext_v2
20712
20713 cCE("lfm", c100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
20714 cCL("lfmfd", c900200, 3, (RF, I4b, ADDR), fpa_ldmstm),
20715 cCL("lfmea", d100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
20716 cCE("sfm", c000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
20717 cCL("sfmfd", d000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
20718 cCL("sfmea", c800200, 3, (RF, I4b, ADDR), fpa_ldmstm),
20719
20720 #undef ARM_VARIANT
20721 #define ARM_VARIANT & fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
20722
20723 /* Moves and type conversions. */
20724 cCE("fcpys", eb00a40, 2, (RVS, RVS), vfp_sp_monadic),
20725 cCE("fmrs", e100a10, 2, (RR, RVS), vfp_reg_from_sp),
20726 cCE("fmsr", e000a10, 2, (RVS, RR), vfp_sp_from_reg),
20727 cCE("fmstat", ef1fa10, 0, (), noargs),
20728 cCE("vmrs", ef00a10, 2, (APSR_RR, RVC), vmrs),
20729 cCE("vmsr", ee00a10, 2, (RVC, RR), vmsr),
20730 cCE("fsitos", eb80ac0, 2, (RVS, RVS), vfp_sp_monadic),
20731 cCE("fuitos", eb80a40, 2, (RVS, RVS), vfp_sp_monadic),
20732 cCE("ftosis", ebd0a40, 2, (RVS, RVS), vfp_sp_monadic),
20733 cCE("ftosizs", ebd0ac0, 2, (RVS, RVS), vfp_sp_monadic),
20734 cCE("ftouis", ebc0a40, 2, (RVS, RVS), vfp_sp_monadic),
20735 cCE("ftouizs", ebc0ac0, 2, (RVS, RVS), vfp_sp_monadic),
20736 cCE("fmrx", ef00a10, 2, (RR, RVC), rd_rn),
20737 cCE("fmxr", ee00a10, 2, (RVC, RR), rn_rd),
20738
20739 /* Memory operations. */
20740 cCE("flds", d100a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
20741 cCE("fsts", d000a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
20742 cCE("fldmias", c900a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
20743 cCE("fldmfds", c900a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
20744 cCE("fldmdbs", d300a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
20745 cCE("fldmeas", d300a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
20746 cCE("fldmiax", c900b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
20747 cCE("fldmfdx", c900b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
20748 cCE("fldmdbx", d300b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
20749 cCE("fldmeax", d300b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
20750 cCE("fstmias", c800a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
20751 cCE("fstmeas", c800a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
20752 cCE("fstmdbs", d200a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
20753 cCE("fstmfds", d200a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
20754 cCE("fstmiax", c800b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
20755 cCE("fstmeax", c800b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
20756 cCE("fstmdbx", d200b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
20757 cCE("fstmfdx", d200b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
20758
20759 /* Monadic operations. */
20760 cCE("fabss", eb00ac0, 2, (RVS, RVS), vfp_sp_monadic),
20761 cCE("fnegs", eb10a40, 2, (RVS, RVS), vfp_sp_monadic),
20762 cCE("fsqrts", eb10ac0, 2, (RVS, RVS), vfp_sp_monadic),
20763
20764 /* Dyadic operations. */
20765 cCE("fadds", e300a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20766 cCE("fsubs", e300a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20767 cCE("fmuls", e200a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20768 cCE("fdivs", e800a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20769 cCE("fmacs", e000a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20770 cCE("fmscs", e100a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20771 cCE("fnmuls", e200a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20772 cCE("fnmacs", e000a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20773 cCE("fnmscs", e100a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20774
20775 /* Comparisons. */
20776 cCE("fcmps", eb40a40, 2, (RVS, RVS), vfp_sp_monadic),
20777 cCE("fcmpzs", eb50a40, 1, (RVS), vfp_sp_compare_z),
20778 cCE("fcmpes", eb40ac0, 2, (RVS, RVS), vfp_sp_monadic),
20779 cCE("fcmpezs", eb50ac0, 1, (RVS), vfp_sp_compare_z),
20780
20781 /* Double precision load/store are still present on single precision
20782 implementations. */
20783 cCE("fldd", d100b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
20784 cCE("fstd", d000b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
20785 cCE("fldmiad", c900b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
20786 cCE("fldmfdd", c900b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
20787 cCE("fldmdbd", d300b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
20788 cCE("fldmead", d300b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
20789 cCE("fstmiad", c800b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
20790 cCE("fstmead", c800b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
20791 cCE("fstmdbd", d200b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
20792 cCE("fstmfdd", d200b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
20793
20794 #undef ARM_VARIANT
20795 #define ARM_VARIANT & fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
20796
20797 /* Moves and type conversions. */
20798 cCE("fcpyd", eb00b40, 2, (RVD, RVD), vfp_dp_rd_rm),
20799 cCE("fcvtds", eb70ac0, 2, (RVD, RVS), vfp_dp_sp_cvt),
20800 cCE("fcvtsd", eb70bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
20801 cCE("fmdhr", e200b10, 2, (RVD, RR), vfp_dp_rn_rd),
20802 cCE("fmdlr", e000b10, 2, (RVD, RR), vfp_dp_rn_rd),
20803 cCE("fmrdh", e300b10, 2, (RR, RVD), vfp_dp_rd_rn),
20804 cCE("fmrdl", e100b10, 2, (RR, RVD), vfp_dp_rd_rn),
20805 cCE("fsitod", eb80bc0, 2, (RVD, RVS), vfp_dp_sp_cvt),
20806 cCE("fuitod", eb80b40, 2, (RVD, RVS), vfp_dp_sp_cvt),
20807 cCE("ftosid", ebd0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
20808 cCE("ftosizd", ebd0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
20809 cCE("ftouid", ebc0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
20810 cCE("ftouizd", ebc0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
20811
20812 /* Monadic operations. */
20813 cCE("fabsd", eb00bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
20814 cCE("fnegd", eb10b40, 2, (RVD, RVD), vfp_dp_rd_rm),
20815 cCE("fsqrtd", eb10bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
20816
20817 /* Dyadic operations. */
20818 cCE("faddd", e300b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20819 cCE("fsubd", e300b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20820 cCE("fmuld", e200b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20821 cCE("fdivd", e800b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20822 cCE("fmacd", e000b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20823 cCE("fmscd", e100b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20824 cCE("fnmuld", e200b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20825 cCE("fnmacd", e000b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20826 cCE("fnmscd", e100b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20827
20828 /* Comparisons. */
20829 cCE("fcmpd", eb40b40, 2, (RVD, RVD), vfp_dp_rd_rm),
20830 cCE("fcmpzd", eb50b40, 1, (RVD), vfp_dp_rd),
20831 cCE("fcmped", eb40bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
20832 cCE("fcmpezd", eb50bc0, 1, (RVD), vfp_dp_rd),
20833
20834 #undef ARM_VARIANT
20835 #define ARM_VARIANT & fpu_vfp_ext_v2
20836
20837 cCE("fmsrr", c400a10, 3, (VRSLST, RR, RR), vfp_sp2_from_reg2),
20838 cCE("fmrrs", c500a10, 3, (RR, RR, VRSLST), vfp_reg2_from_sp2),
20839 cCE("fmdrr", c400b10, 3, (RVD, RR, RR), vfp_dp_rm_rd_rn),
20840 cCE("fmrrd", c500b10, 3, (RR, RR, RVD), vfp_dp_rd_rn_rm),
20841
20842 /* Instructions which may belong to either the Neon or VFP instruction sets.
20843 Individual encoder functions perform additional architecture checks. */
20844 #undef ARM_VARIANT
20845 #define ARM_VARIANT & fpu_vfp_ext_v1xd
20846 #undef THUMB_VARIANT
20847 #define THUMB_VARIANT & fpu_vfp_ext_v1xd
20848
20849 /* These mnemonics are unique to VFP. */
20850 NCE(vsqrt, 0, 2, (RVSD, RVSD), vfp_nsyn_sqrt),
20851 NCE(vdiv, 0, 3, (RVSD, RVSD, RVSD), vfp_nsyn_div),
20852 nCE(vnmul, _vnmul, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
20853 nCE(vnmla, _vnmla, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
20854 nCE(vnmls, _vnmls, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
20855 nCE(vcmp, _vcmp, 2, (RVSD, RSVD_FI0), vfp_nsyn_cmp),
20856 nCE(vcmpe, _vcmpe, 2, (RVSD, RSVD_FI0), vfp_nsyn_cmp),
20857 NCE(vpush, 0, 1, (VRSDLST), vfp_nsyn_push),
20858 NCE(vpop, 0, 1, (VRSDLST), vfp_nsyn_pop),
20859 NCE(vcvtz, 0, 2, (RVSD, RVSD), vfp_nsyn_cvtz),
20860
20861 /* Mnemonics shared by Neon and VFP. */
20862 nCEF(vmul, _vmul, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mul),
20863 nCEF(vmla, _vmla, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
20864 nCEF(vmls, _vmls, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
20865
20866 nCEF(vadd, _vadd, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
20867 nCEF(vsub, _vsub, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
20868
20869 NCEF(vabs, 1b10300, 2, (RNSDQ, RNSDQ), neon_abs_neg),
20870 NCEF(vneg, 1b10380, 2, (RNSDQ, RNSDQ), neon_abs_neg),
20871
20872 NCE(vldm, c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
20873 NCE(vldmia, c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
20874 NCE(vldmdb, d100b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
20875 NCE(vstm, c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
20876 NCE(vstmia, c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
20877 NCE(vstmdb, d000b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
20878 NCE(vldr, d100b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
20879 NCE(vstr, d000b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
20880
20881 nCEF(vcvt, _vcvt, 3, (RNSDQ, RNSDQ, oI32z), neon_cvt),
20882 nCEF(vcvtr, _vcvt, 2, (RNSDQ, RNSDQ), neon_cvtr),
20883 NCEF(vcvtb, eb20a40, 2, (RVSD, RVSD), neon_cvtb),
20884 NCEF(vcvtt, eb20a40, 2, (RVSD, RVSD), neon_cvtt),
20885
20886
20887 /* NOTE: All VMOV encoding is special-cased! */
20888 NCE(vmov, 0, 1, (VMOV), neon_mov),
20889 NCE(vmovq, 0, 1, (VMOV), neon_mov),
20890
20891 #undef ARM_VARIANT
20892 #define ARM_VARIANT & arm_ext_fp16
20893 #undef THUMB_VARIANT
20894 #define THUMB_VARIANT & arm_ext_fp16
20895 /* New instructions added from v8.2, allowing the extraction and insertion of
20896 the upper 16 bits of a 32-bit vector register. */
20897 NCE (vmovx, eb00a40, 2, (RVS, RVS), neon_movhf),
20898 NCE (vins, eb00ac0, 2, (RVS, RVS), neon_movhf),
20899
20900 /* New backported fma/fms instructions optional in v8.2. */
20901 NCE (vfmal, 810, 3, (RNDQ, RNSD, RNSD_RNSC), neon_vfmal),
20902 NCE (vfmsl, 810, 3, (RNDQ, RNSD, RNSD_RNSC), neon_vfmsl),
20903
20904 #undef THUMB_VARIANT
20905 #define THUMB_VARIANT & fpu_neon_ext_v1
20906 #undef ARM_VARIANT
20907 #define ARM_VARIANT & fpu_neon_ext_v1
20908
20909 /* Data processing with three registers of the same length. */
20910 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
20911 NUF(vaba, 0000710, 3, (RNDQ, RNDQ, RNDQ), neon_dyadic_i_su),
20912 NUF(vabaq, 0000710, 3, (RNQ, RNQ, RNQ), neon_dyadic_i_su),
20913 NUF(vhadd, 0000000, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
20914 NUF(vhaddq, 0000000, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
20915 NUF(vrhadd, 0000100, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
20916 NUF(vrhaddq, 0000100, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
20917 NUF(vhsub, 0000200, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
20918 NUF(vhsubq, 0000200, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
20919 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
20920 NUF(vqadd, 0000010, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
20921 NUF(vqaddq, 0000010, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
20922 NUF(vqsub, 0000210, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
20923 NUF(vqsubq, 0000210, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
20924 NUF(vrshl, 0000500, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
20925 NUF(vrshlq, 0000500, 3, (RNQ, oRNQ, RNQ), neon_rshl),
20926 NUF(vqrshl, 0000510, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
20927 NUF(vqrshlq, 0000510, 3, (RNQ, oRNQ, RNQ), neon_rshl),
20928 /* If not immediate, fall back to neon_dyadic_i64_su.
20929 shl_imm should accept I8 I16 I32 I64,
20930 qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64. */
20931 nUF(vshl, _vshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_shl_imm),
20932 nUF(vshlq, _vshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_shl_imm),
20933 nUF(vqshl, _vqshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_qshl_imm),
20934 nUF(vqshlq, _vqshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_qshl_imm),
20935 /* Logic ops, types optional & ignored. */
20936 nUF(vand, _vand, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
20937 nUF(vandq, _vand, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
20938 nUF(vbic, _vbic, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
20939 nUF(vbicq, _vbic, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
20940 nUF(vorr, _vorr, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
20941 nUF(vorrq, _vorr, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
20942 nUF(vorn, _vorn, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
20943 nUF(vornq, _vorn, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
20944 nUF(veor, _veor, 3, (RNDQ, oRNDQ, RNDQ), neon_logic),
20945 nUF(veorq, _veor, 3, (RNQ, oRNQ, RNQ), neon_logic),
20946 /* Bitfield ops, untyped. */
20947 NUF(vbsl, 1100110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
20948 NUF(vbslq, 1100110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
20949 NUF(vbit, 1200110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
20950 NUF(vbitq, 1200110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
20951 NUF(vbif, 1300110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
20952 NUF(vbifq, 1300110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
20953 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F16 F32. */
20954 nUF(vabd, _vabd, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
20955 nUF(vabdq, _vabd, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
20956 nUF(vmax, _vmax, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
20957 nUF(vmaxq, _vmax, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
20958 nUF(vmin, _vmin, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
20959 nUF(vminq, _vmin, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
20960 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
20961 back to neon_dyadic_if_su. */
20962 nUF(vcge, _vcge, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
20963 nUF(vcgeq, _vcge, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
20964 nUF(vcgt, _vcgt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
20965 nUF(vcgtq, _vcgt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
20966 nUF(vclt, _vclt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
20967 nUF(vcltq, _vclt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
20968 nUF(vcle, _vcle, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
20969 nUF(vcleq, _vcle, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
20970 /* Comparison. Type I8 I16 I32 F32. */
20971 nUF(vceq, _vceq, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_ceq),
20972 nUF(vceqq, _vceq, 3, (RNQ, oRNQ, RNDQ_I0), neon_ceq),
20973 /* As above, D registers only. */
20974 nUF(vpmax, _vpmax, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
20975 nUF(vpmin, _vpmin, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
20976 /* Int and float variants, signedness unimportant. */
20977 nUF(vmlaq, _vmla, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
20978 nUF(vmlsq, _vmls, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
20979 nUF(vpadd, _vpadd, 3, (RND, oRND, RND), neon_dyadic_if_i_d),
20980 /* Add/sub take types I8 I16 I32 I64 F32. */
20981 nUF(vaddq, _vadd, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
20982 nUF(vsubq, _vsub, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
20983 /* vtst takes sizes 8, 16, 32. */
20984 NUF(vtst, 0000810, 3, (RNDQ, oRNDQ, RNDQ), neon_tst),
20985 NUF(vtstq, 0000810, 3, (RNQ, oRNQ, RNQ), neon_tst),
20986 /* VMUL takes I8 I16 I32 F32 P8. */
20987 nUF(vmulq, _vmul, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mul),
20988 /* VQD{R}MULH takes S16 S32. */
20989 nUF(vqdmulh, _vqdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
20990 nUF(vqdmulhq, _vqdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
20991 nUF(vqrdmulh, _vqrdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
20992 nUF(vqrdmulhq, _vqrdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
20993 NUF(vacge, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
20994 NUF(vacgeq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
20995 NUF(vacgt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
20996 NUF(vacgtq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
20997 NUF(vaclt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
20998 NUF(vacltq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
20999 NUF(vacle, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
21000 NUF(vacleq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
21001 NUF(vrecps, 0000f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
21002 NUF(vrecpsq, 0000f10, 3, (RNQ, oRNQ, RNQ), neon_step),
21003 NUF(vrsqrts, 0200f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
21004 NUF(vrsqrtsq, 0200f10, 3, (RNQ, oRNQ, RNQ), neon_step),
21005 /* ARM v8.1 extension. */
21006 nUF (vqrdmlah, _vqrdmlah, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qrdmlah),
21007 nUF (vqrdmlahq, _vqrdmlah, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qrdmlah),
21008 nUF (vqrdmlsh, _vqrdmlsh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qrdmlah),
21009 nUF (vqrdmlshq, _vqrdmlsh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qrdmlah),
21010
21011 /* Two address, int/float. Types S8 S16 S32 F32. */
21012 NUF(vabsq, 1b10300, 2, (RNQ, RNQ), neon_abs_neg),
21013 NUF(vnegq, 1b10380, 2, (RNQ, RNQ), neon_abs_neg),
21014
21015 /* Data processing with two registers and a shift amount. */
21016 /* Right shifts, and variants with rounding.
21017 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
21018 NUF(vshr, 0800010, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
21019 NUF(vshrq, 0800010, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
21020 NUF(vrshr, 0800210, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
21021 NUF(vrshrq, 0800210, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
21022 NUF(vsra, 0800110, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
21023 NUF(vsraq, 0800110, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
21024 NUF(vrsra, 0800310, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
21025 NUF(vrsraq, 0800310, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
21026 /* Shift and insert. Sizes accepted 8 16 32 64. */
21027 NUF(vsli, 1800510, 3, (RNDQ, oRNDQ, I63), neon_sli),
21028 NUF(vsliq, 1800510, 3, (RNQ, oRNQ, I63), neon_sli),
21029 NUF(vsri, 1800410, 3, (RNDQ, oRNDQ, I64), neon_sri),
21030 NUF(vsriq, 1800410, 3, (RNQ, oRNQ, I64), neon_sri),
21031 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
21032 NUF(vqshlu, 1800610, 3, (RNDQ, oRNDQ, I63), neon_qshlu_imm),
21033 NUF(vqshluq, 1800610, 3, (RNQ, oRNQ, I63), neon_qshlu_imm),
21034 /* Right shift immediate, saturating & narrowing, with rounding variants.
21035 Types accepted S16 S32 S64 U16 U32 U64. */
21036 NUF(vqshrn, 0800910, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
21037 NUF(vqrshrn, 0800950, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
21038 /* As above, unsigned. Types accepted S16 S32 S64. */
21039 NUF(vqshrun, 0800810, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
21040 NUF(vqrshrun, 0800850, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
21041 /* Right shift narrowing. Types accepted I16 I32 I64. */
21042 NUF(vshrn, 0800810, 3, (RND, RNQ, I32z), neon_rshift_narrow),
21043 NUF(vrshrn, 0800850, 3, (RND, RNQ, I32z), neon_rshift_narrow),
21044 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
21045 nUF(vshll, _vshll, 3, (RNQ, RND, I32), neon_shll),
21046 /* CVT with optional immediate for fixed-point variant. */
21047 nUF(vcvtq, _vcvt, 3, (RNQ, RNQ, oI32b), neon_cvt),
21048
21049 nUF(vmvn, _vmvn, 2, (RNDQ, RNDQ_Ibig), neon_mvn),
21050 nUF(vmvnq, _vmvn, 2, (RNQ, RNDQ_Ibig), neon_mvn),
21051
21052 /* Data processing, three registers of different lengths. */
21053 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
21054 NUF(vabal, 0800500, 3, (RNQ, RND, RND), neon_abal),
21055 NUF(vabdl, 0800700, 3, (RNQ, RND, RND), neon_dyadic_long),
21056 NUF(vaddl, 0800000, 3, (RNQ, RND, RND), neon_dyadic_long),
21057 NUF(vsubl, 0800200, 3, (RNQ, RND, RND), neon_dyadic_long),
21058 /* If not scalar, fall back to neon_dyadic_long.
21059 Vector types as above, scalar types S16 S32 U16 U32. */
21060 nUF(vmlal, _vmlal, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
21061 nUF(vmlsl, _vmlsl, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
21062 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
21063 NUF(vaddw, 0800100, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
21064 NUF(vsubw, 0800300, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
21065 /* Dyadic, narrowing insns. Types I16 I32 I64. */
21066 NUF(vaddhn, 0800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
21067 NUF(vraddhn, 1800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
21068 NUF(vsubhn, 0800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
21069 NUF(vrsubhn, 1800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
21070 /* Saturating doubling multiplies. Types S16 S32. */
21071 nUF(vqdmlal, _vqdmlal, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
21072 nUF(vqdmlsl, _vqdmlsl, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
21073 nUF(vqdmull, _vqdmull, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
21074 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
21075 S16 S32 U16 U32. */
21076 nUF(vmull, _vmull, 3, (RNQ, RND, RND_RNSC), neon_vmull),
21077
21078 /* Extract. Size 8. */
21079 NUF(vext, 0b00000, 4, (RNDQ, oRNDQ, RNDQ, I15), neon_ext),
21080 NUF(vextq, 0b00000, 4, (RNQ, oRNQ, RNQ, I15), neon_ext),
21081
21082 /* Two registers, miscellaneous. */
21083 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
21084 NUF(vrev64, 1b00000, 2, (RNDQ, RNDQ), neon_rev),
21085 NUF(vrev64q, 1b00000, 2, (RNQ, RNQ), neon_rev),
21086 NUF(vrev32, 1b00080, 2, (RNDQ, RNDQ), neon_rev),
21087 NUF(vrev32q, 1b00080, 2, (RNQ, RNQ), neon_rev),
21088 NUF(vrev16, 1b00100, 2, (RNDQ, RNDQ), neon_rev),
21089 NUF(vrev16q, 1b00100, 2, (RNQ, RNQ), neon_rev),
21090 /* Vector replicate. Sizes 8 16 32. */
21091 nCE(vdup, _vdup, 2, (RNDQ, RR_RNSC), neon_dup),
21092 nCE(vdupq, _vdup, 2, (RNQ, RR_RNSC), neon_dup),
21093 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
21094 NUF(vmovl, 0800a10, 2, (RNQ, RND), neon_movl),
21095 /* VMOVN. Types I16 I32 I64. */
21096 nUF(vmovn, _vmovn, 2, (RND, RNQ), neon_movn),
21097 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
21098 nUF(vqmovn, _vqmovn, 2, (RND, RNQ), neon_qmovn),
21099 /* VQMOVUN. Types S16 S32 S64. */
21100 nUF(vqmovun, _vqmovun, 2, (RND, RNQ), neon_qmovun),
21101 /* VZIP / VUZP. Sizes 8 16 32. */
21102 NUF(vzip, 1b20180, 2, (RNDQ, RNDQ), neon_zip_uzp),
21103 NUF(vzipq, 1b20180, 2, (RNQ, RNQ), neon_zip_uzp),
21104 NUF(vuzp, 1b20100, 2, (RNDQ, RNDQ), neon_zip_uzp),
21105 NUF(vuzpq, 1b20100, 2, (RNQ, RNQ), neon_zip_uzp),
21106 /* VQABS / VQNEG. Types S8 S16 S32. */
21107 NUF(vqabs, 1b00700, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
21108 NUF(vqabsq, 1b00700, 2, (RNQ, RNQ), neon_sat_abs_neg),
21109 NUF(vqneg, 1b00780, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
21110 NUF(vqnegq, 1b00780, 2, (RNQ, RNQ), neon_sat_abs_neg),
21111 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
21112 NUF(vpadal, 1b00600, 2, (RNDQ, RNDQ), neon_pair_long),
21113 NUF(vpadalq, 1b00600, 2, (RNQ, RNQ), neon_pair_long),
21114 NUF(vpaddl, 1b00200, 2, (RNDQ, RNDQ), neon_pair_long),
21115 NUF(vpaddlq, 1b00200, 2, (RNQ, RNQ), neon_pair_long),
21116 /* Reciprocal estimates. Types U32 F16 F32. */
21117 NUF(vrecpe, 1b30400, 2, (RNDQ, RNDQ), neon_recip_est),
21118 NUF(vrecpeq, 1b30400, 2, (RNQ, RNQ), neon_recip_est),
21119 NUF(vrsqrte, 1b30480, 2, (RNDQ, RNDQ), neon_recip_est),
21120 NUF(vrsqrteq, 1b30480, 2, (RNQ, RNQ), neon_recip_est),
21121 /* VCLS. Types S8 S16 S32. */
21122 NUF(vcls, 1b00400, 2, (RNDQ, RNDQ), neon_cls),
21123 NUF(vclsq, 1b00400, 2, (RNQ, RNQ), neon_cls),
21124 /* VCLZ. Types I8 I16 I32. */
21125 NUF(vclz, 1b00480, 2, (RNDQ, RNDQ), neon_clz),
21126 NUF(vclzq, 1b00480, 2, (RNQ, RNQ), neon_clz),
21127 /* VCNT. Size 8. */
21128 NUF(vcnt, 1b00500, 2, (RNDQ, RNDQ), neon_cnt),
21129 NUF(vcntq, 1b00500, 2, (RNQ, RNQ), neon_cnt),
21130 /* Two address, untyped. */
21131 NUF(vswp, 1b20000, 2, (RNDQ, RNDQ), neon_swp),
21132 NUF(vswpq, 1b20000, 2, (RNQ, RNQ), neon_swp),
21133 /* VTRN. Sizes 8 16 32. */
21134 nUF(vtrn, _vtrn, 2, (RNDQ, RNDQ), neon_trn),
21135 nUF(vtrnq, _vtrn, 2, (RNQ, RNQ), neon_trn),
21136
21137 /* Table lookup. Size 8. */
21138 NUF(vtbl, 1b00800, 3, (RND, NRDLST, RND), neon_tbl_tbx),
21139 NUF(vtbx, 1b00840, 3, (RND, NRDLST, RND), neon_tbl_tbx),
21140
21141 #undef THUMB_VARIANT
21142 #define THUMB_VARIANT & fpu_vfp_v3_or_neon_ext
21143 #undef ARM_VARIANT
21144 #define ARM_VARIANT & fpu_vfp_v3_or_neon_ext
21145
21146 /* Neon element/structure load/store. */
21147 nUF(vld1, _vld1, 2, (NSTRLST, ADDR), neon_ldx_stx),
21148 nUF(vst1, _vst1, 2, (NSTRLST, ADDR), neon_ldx_stx),
21149 nUF(vld2, _vld2, 2, (NSTRLST, ADDR), neon_ldx_stx),
21150 nUF(vst2, _vst2, 2, (NSTRLST, ADDR), neon_ldx_stx),
21151 nUF(vld3, _vld3, 2, (NSTRLST, ADDR), neon_ldx_stx),
21152 nUF(vst3, _vst3, 2, (NSTRLST, ADDR), neon_ldx_stx),
21153 nUF(vld4, _vld4, 2, (NSTRLST, ADDR), neon_ldx_stx),
21154 nUF(vst4, _vst4, 2, (NSTRLST, ADDR), neon_ldx_stx),
21155
21156 #undef THUMB_VARIANT
21157 #define THUMB_VARIANT & fpu_vfp_ext_v3xd
21158 #undef ARM_VARIANT
21159 #define ARM_VARIANT & fpu_vfp_ext_v3xd
21160 cCE("fconsts", eb00a00, 2, (RVS, I255), vfp_sp_const),
21161 cCE("fshtos", eba0a40, 2, (RVS, I16z), vfp_sp_conv_16),
21162 cCE("fsltos", eba0ac0, 2, (RVS, I32), vfp_sp_conv_32),
21163 cCE("fuhtos", ebb0a40, 2, (RVS, I16z), vfp_sp_conv_16),
21164 cCE("fultos", ebb0ac0, 2, (RVS, I32), vfp_sp_conv_32),
21165 cCE("ftoshs", ebe0a40, 2, (RVS, I16z), vfp_sp_conv_16),
21166 cCE("ftosls", ebe0ac0, 2, (RVS, I32), vfp_sp_conv_32),
21167 cCE("ftouhs", ebf0a40, 2, (RVS, I16z), vfp_sp_conv_16),
21168 cCE("ftouls", ebf0ac0, 2, (RVS, I32), vfp_sp_conv_32),
21169
21170 #undef THUMB_VARIANT
21171 #define THUMB_VARIANT & fpu_vfp_ext_v3
21172 #undef ARM_VARIANT
21173 #define ARM_VARIANT & fpu_vfp_ext_v3
21174
21175 cCE("fconstd", eb00b00, 2, (RVD, I255), vfp_dp_const),
21176 cCE("fshtod", eba0b40, 2, (RVD, I16z), vfp_dp_conv_16),
21177 cCE("fsltod", eba0bc0, 2, (RVD, I32), vfp_dp_conv_32),
21178 cCE("fuhtod", ebb0b40, 2, (RVD, I16z), vfp_dp_conv_16),
21179 cCE("fultod", ebb0bc0, 2, (RVD, I32), vfp_dp_conv_32),
21180 cCE("ftoshd", ebe0b40, 2, (RVD, I16z), vfp_dp_conv_16),
21181 cCE("ftosld", ebe0bc0, 2, (RVD, I32), vfp_dp_conv_32),
21182 cCE("ftouhd", ebf0b40, 2, (RVD, I16z), vfp_dp_conv_16),
21183 cCE("ftould", ebf0bc0, 2, (RVD, I32), vfp_dp_conv_32),
21184
21185 #undef ARM_VARIANT
21186 #define ARM_VARIANT & fpu_vfp_ext_fma
21187 #undef THUMB_VARIANT
21188 #define THUMB_VARIANT & fpu_vfp_ext_fma
21189 /* Mnemonics shared by Neon and VFP. These are included in the
21190 VFP FMA variant; NEON and VFP FMA always includes the NEON
21191 FMA instructions. */
21192 nCEF(vfma, _vfma, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_fmac),
21193 nCEF(vfms, _vfms, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_fmac),
21194 /* ffmas/ffmad/ffmss/ffmsd are dummy mnemonics to satisfy gas;
21195 the v form should always be used. */
21196 cCE("ffmas", ea00a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
21197 cCE("ffnmas", ea00a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
21198 cCE("ffmad", ea00b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
21199 cCE("ffnmad", ea00b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
21200 nCE(vfnma, _vfnma, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
21201 nCE(vfnms, _vfnms, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
21202
21203 #undef THUMB_VARIANT
21204 #undef ARM_VARIANT
21205 #define ARM_VARIANT & arm_cext_xscale /* Intel XScale extensions. */
21206
21207 cCE("mia", e200010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
21208 cCE("miaph", e280010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
21209 cCE("miabb", e2c0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
21210 cCE("miabt", e2d0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
21211 cCE("miatb", e2e0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
21212 cCE("miatt", e2f0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
21213 cCE("mar", c400000, 3, (RXA, RRnpc, RRnpc), xsc_mar),
21214 cCE("mra", c500000, 3, (RRnpc, RRnpc, RXA), xsc_mra),
21215
21216 #undef ARM_VARIANT
21217 #define ARM_VARIANT & arm_cext_iwmmxt /* Intel Wireless MMX technology. */
21218
21219 cCE("tandcb", e13f130, 1, (RR), iwmmxt_tandorc),
21220 cCE("tandch", e53f130, 1, (RR), iwmmxt_tandorc),
21221 cCE("tandcw", e93f130, 1, (RR), iwmmxt_tandorc),
21222 cCE("tbcstb", e400010, 2, (RIWR, RR), rn_rd),
21223 cCE("tbcsth", e400050, 2, (RIWR, RR), rn_rd),
21224 cCE("tbcstw", e400090, 2, (RIWR, RR), rn_rd),
21225 cCE("textrcb", e130170, 2, (RR, I7), iwmmxt_textrc),
21226 cCE("textrch", e530170, 2, (RR, I7), iwmmxt_textrc),
21227 cCE("textrcw", e930170, 2, (RR, I7), iwmmxt_textrc),
21228 cCE("textrmub",e100070, 3, (RR, RIWR, I7), iwmmxt_textrm),
21229 cCE("textrmuh",e500070, 3, (RR, RIWR, I7), iwmmxt_textrm),
21230 cCE("textrmuw",e900070, 3, (RR, RIWR, I7), iwmmxt_textrm),
21231 cCE("textrmsb",e100078, 3, (RR, RIWR, I7), iwmmxt_textrm),
21232 cCE("textrmsh",e500078, 3, (RR, RIWR, I7), iwmmxt_textrm),
21233 cCE("textrmsw",e900078, 3, (RR, RIWR, I7), iwmmxt_textrm),
21234 cCE("tinsrb", e600010, 3, (RIWR, RR, I7), iwmmxt_tinsr),
21235 cCE("tinsrh", e600050, 3, (RIWR, RR, I7), iwmmxt_tinsr),
21236 cCE("tinsrw", e600090, 3, (RIWR, RR, I7), iwmmxt_tinsr),
21237 cCE("tmcr", e000110, 2, (RIWC_RIWG, RR), rn_rd),
21238 cCE("tmcrr", c400000, 3, (RIWR, RR, RR), rm_rd_rn),
21239 cCE("tmia", e200010, 3, (RIWR, RR, RR), iwmmxt_tmia),
21240 cCE("tmiaph", e280010, 3, (RIWR, RR, RR), iwmmxt_tmia),
21241 cCE("tmiabb", e2c0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
21242 cCE("tmiabt", e2d0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
21243 cCE("tmiatb", e2e0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
21244 cCE("tmiatt", e2f0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
21245 cCE("tmovmskb",e100030, 2, (RR, RIWR), rd_rn),
21246 cCE("tmovmskh",e500030, 2, (RR, RIWR), rd_rn),
21247 cCE("tmovmskw",e900030, 2, (RR, RIWR), rd_rn),
21248 cCE("tmrc", e100110, 2, (RR, RIWC_RIWG), rd_rn),
21249 cCE("tmrrc", c500000, 3, (RR, RR, RIWR), rd_rn_rm),
21250 cCE("torcb", e13f150, 1, (RR), iwmmxt_tandorc),
21251 cCE("torch", e53f150, 1, (RR), iwmmxt_tandorc),
21252 cCE("torcw", e93f150, 1, (RR), iwmmxt_tandorc),
21253 cCE("waccb", e0001c0, 2, (RIWR, RIWR), rd_rn),
21254 cCE("wacch", e4001c0, 2, (RIWR, RIWR), rd_rn),
21255 cCE("waccw", e8001c0, 2, (RIWR, RIWR), rd_rn),
21256 cCE("waddbss", e300180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21257 cCE("waddb", e000180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21258 cCE("waddbus", e100180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21259 cCE("waddhss", e700180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21260 cCE("waddh", e400180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21261 cCE("waddhus", e500180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21262 cCE("waddwss", eb00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21263 cCE("waddw", e800180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21264 cCE("waddwus", e900180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21265 cCE("waligni", e000020, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_waligni),
21266 cCE("walignr0",e800020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21267 cCE("walignr1",e900020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21268 cCE("walignr2",ea00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21269 cCE("walignr3",eb00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21270 cCE("wand", e200000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21271 cCE("wandn", e300000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21272 cCE("wavg2b", e800000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21273 cCE("wavg2br", e900000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21274 cCE("wavg2h", ec00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21275 cCE("wavg2hr", ed00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21276 cCE("wcmpeqb", e000060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21277 cCE("wcmpeqh", e400060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21278 cCE("wcmpeqw", e800060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21279 cCE("wcmpgtub",e100060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21280 cCE("wcmpgtuh",e500060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21281 cCE("wcmpgtuw",e900060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21282 cCE("wcmpgtsb",e300060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21283 cCE("wcmpgtsh",e700060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21284 cCE("wcmpgtsw",eb00060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21285 cCE("wldrb", c100000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
21286 cCE("wldrh", c500000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
21287 cCE("wldrw", c100100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
21288 cCE("wldrd", c500100, 2, (RIWR, ADDR), iwmmxt_wldstd),
21289 cCE("wmacs", e600100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21290 cCE("wmacsz", e700100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21291 cCE("wmacu", e400100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21292 cCE("wmacuz", e500100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21293 cCE("wmadds", ea00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21294 cCE("wmaddu", e800100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21295 cCE("wmaxsb", e200160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21296 cCE("wmaxsh", e600160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21297 cCE("wmaxsw", ea00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21298 cCE("wmaxub", e000160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21299 cCE("wmaxuh", e400160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21300 cCE("wmaxuw", e800160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21301 cCE("wminsb", e300160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21302 cCE("wminsh", e700160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21303 cCE("wminsw", eb00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21304 cCE("wminub", e100160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21305 cCE("wminuh", e500160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21306 cCE("wminuw", e900160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21307 cCE("wmov", e000000, 2, (RIWR, RIWR), iwmmxt_wmov),
21308 cCE("wmulsm", e300100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21309 cCE("wmulsl", e200100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21310 cCE("wmulum", e100100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21311 cCE("wmulul", e000100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21312 cCE("wor", e000000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21313 cCE("wpackhss",e700080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21314 cCE("wpackhus",e500080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21315 cCE("wpackwss",eb00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21316 cCE("wpackwus",e900080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21317 cCE("wpackdss",ef00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21318 cCE("wpackdus",ed00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21319 cCE("wrorh", e700040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21320 cCE("wrorhg", e700148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21321 cCE("wrorw", eb00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21322 cCE("wrorwg", eb00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21323 cCE("wrord", ef00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21324 cCE("wrordg", ef00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21325 cCE("wsadb", e000120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21326 cCE("wsadbz", e100120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21327 cCE("wsadh", e400120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21328 cCE("wsadhz", e500120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21329 cCE("wshufh", e0001e0, 3, (RIWR, RIWR, I255), iwmmxt_wshufh),
21330 cCE("wsllh", e500040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21331 cCE("wsllhg", e500148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21332 cCE("wsllw", e900040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21333 cCE("wsllwg", e900148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21334 cCE("wslld", ed00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21335 cCE("wslldg", ed00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21336 cCE("wsrah", e400040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21337 cCE("wsrahg", e400148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21338 cCE("wsraw", e800040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21339 cCE("wsrawg", e800148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21340 cCE("wsrad", ec00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21341 cCE("wsradg", ec00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21342 cCE("wsrlh", e600040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21343 cCE("wsrlhg", e600148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21344 cCE("wsrlw", ea00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21345 cCE("wsrlwg", ea00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21346 cCE("wsrld", ee00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21347 cCE("wsrldg", ee00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21348 cCE("wstrb", c000000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
21349 cCE("wstrh", c400000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
21350 cCE("wstrw", c000100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
21351 cCE("wstrd", c400100, 2, (RIWR, ADDR), iwmmxt_wldstd),
21352 cCE("wsubbss", e3001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21353 cCE("wsubb", e0001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21354 cCE("wsubbus", e1001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21355 cCE("wsubhss", e7001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21356 cCE("wsubh", e4001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21357 cCE("wsubhus", e5001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21358 cCE("wsubwss", eb001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21359 cCE("wsubw", e8001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21360 cCE("wsubwus", e9001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21361 cCE("wunpckehub",e0000c0, 2, (RIWR, RIWR), rd_rn),
21362 cCE("wunpckehuh",e4000c0, 2, (RIWR, RIWR), rd_rn),
21363 cCE("wunpckehuw",e8000c0, 2, (RIWR, RIWR), rd_rn),
21364 cCE("wunpckehsb",e2000c0, 2, (RIWR, RIWR), rd_rn),
21365 cCE("wunpckehsh",e6000c0, 2, (RIWR, RIWR), rd_rn),
21366 cCE("wunpckehsw",ea000c0, 2, (RIWR, RIWR), rd_rn),
21367 cCE("wunpckihb", e1000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21368 cCE("wunpckihh", e5000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21369 cCE("wunpckihw", e9000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21370 cCE("wunpckelub",e0000e0, 2, (RIWR, RIWR), rd_rn),
21371 cCE("wunpckeluh",e4000e0, 2, (RIWR, RIWR), rd_rn),
21372 cCE("wunpckeluw",e8000e0, 2, (RIWR, RIWR), rd_rn),
21373 cCE("wunpckelsb",e2000e0, 2, (RIWR, RIWR), rd_rn),
21374 cCE("wunpckelsh",e6000e0, 2, (RIWR, RIWR), rd_rn),
21375 cCE("wunpckelsw",ea000e0, 2, (RIWR, RIWR), rd_rn),
21376 cCE("wunpckilb", e1000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21377 cCE("wunpckilh", e5000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21378 cCE("wunpckilw", e9000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21379 cCE("wxor", e100000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21380 cCE("wzero", e300000, 1, (RIWR), iwmmxt_wzero),
21381
21382 #undef ARM_VARIANT
21383 #define ARM_VARIANT & arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2. */
21384
21385 cCE("torvscb", e12f190, 1, (RR), iwmmxt_tandorc),
21386 cCE("torvsch", e52f190, 1, (RR), iwmmxt_tandorc),
21387 cCE("torvscw", e92f190, 1, (RR), iwmmxt_tandorc),
21388 cCE("wabsb", e2001c0, 2, (RIWR, RIWR), rd_rn),
21389 cCE("wabsh", e6001c0, 2, (RIWR, RIWR), rd_rn),
21390 cCE("wabsw", ea001c0, 2, (RIWR, RIWR), rd_rn),
21391 cCE("wabsdiffb", e1001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21392 cCE("wabsdiffh", e5001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21393 cCE("wabsdiffw", e9001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21394 cCE("waddbhusl", e2001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21395 cCE("waddbhusm", e6001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21396 cCE("waddhc", e600180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21397 cCE("waddwc", ea00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21398 cCE("waddsubhx", ea001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21399 cCE("wavg4", e400000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21400 cCE("wavg4r", e500000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21401 cCE("wmaddsn", ee00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21402 cCE("wmaddsx", eb00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21403 cCE("wmaddun", ec00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21404 cCE("wmaddux", e900100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21405 cCE("wmerge", e000080, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_wmerge),
21406 cCE("wmiabb", e0000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21407 cCE("wmiabt", e1000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21408 cCE("wmiatb", e2000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21409 cCE("wmiatt", e3000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21410 cCE("wmiabbn", e4000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21411 cCE("wmiabtn", e5000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21412 cCE("wmiatbn", e6000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21413 cCE("wmiattn", e7000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21414 cCE("wmiawbb", e800120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21415 cCE("wmiawbt", e900120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21416 cCE("wmiawtb", ea00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21417 cCE("wmiawtt", eb00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21418 cCE("wmiawbbn", ec00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21419 cCE("wmiawbtn", ed00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21420 cCE("wmiawtbn", ee00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21421 cCE("wmiawttn", ef00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21422 cCE("wmulsmr", ef00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21423 cCE("wmulumr", ed00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21424 cCE("wmulwumr", ec000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21425 cCE("wmulwsmr", ee000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21426 cCE("wmulwum", ed000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21427 cCE("wmulwsm", ef000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21428 cCE("wmulwl", eb000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21429 cCE("wqmiabb", e8000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21430 cCE("wqmiabt", e9000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21431 cCE("wqmiatb", ea000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21432 cCE("wqmiatt", eb000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21433 cCE("wqmiabbn", ec000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21434 cCE("wqmiabtn", ed000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21435 cCE("wqmiatbn", ee000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21436 cCE("wqmiattn", ef000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21437 cCE("wqmulm", e100080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21438 cCE("wqmulmr", e300080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21439 cCE("wqmulwm", ec000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21440 cCE("wqmulwmr", ee000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21441 cCE("wsubaddhx", ed001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21442
21443 #undef ARM_VARIANT
21444 #define ARM_VARIANT & arm_cext_maverick /* Cirrus Maverick instructions. */
21445
21446 cCE("cfldrs", c100400, 2, (RMF, ADDRGLDC), rd_cpaddr),
21447 cCE("cfldrd", c500400, 2, (RMD, ADDRGLDC), rd_cpaddr),
21448 cCE("cfldr32", c100500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
21449 cCE("cfldr64", c500500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
21450 cCE("cfstrs", c000400, 2, (RMF, ADDRGLDC), rd_cpaddr),
21451 cCE("cfstrd", c400400, 2, (RMD, ADDRGLDC), rd_cpaddr),
21452 cCE("cfstr32", c000500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
21453 cCE("cfstr64", c400500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
21454 cCE("cfmvsr", e000450, 2, (RMF, RR), rn_rd),
21455 cCE("cfmvrs", e100450, 2, (RR, RMF), rd_rn),
21456 cCE("cfmvdlr", e000410, 2, (RMD, RR), rn_rd),
21457 cCE("cfmvrdl", e100410, 2, (RR, RMD), rd_rn),
21458 cCE("cfmvdhr", e000430, 2, (RMD, RR), rn_rd),
21459 cCE("cfmvrdh", e100430, 2, (RR, RMD), rd_rn),
21460 cCE("cfmv64lr",e000510, 2, (RMDX, RR), rn_rd),
21461 cCE("cfmvr64l",e100510, 2, (RR, RMDX), rd_rn),
21462 cCE("cfmv64hr",e000530, 2, (RMDX, RR), rn_rd),
21463 cCE("cfmvr64h",e100530, 2, (RR, RMDX), rd_rn),
21464 cCE("cfmval32",e200440, 2, (RMAX, RMFX), rd_rn),
21465 cCE("cfmv32al",e100440, 2, (RMFX, RMAX), rd_rn),
21466 cCE("cfmvam32",e200460, 2, (RMAX, RMFX), rd_rn),
21467 cCE("cfmv32am",e100460, 2, (RMFX, RMAX), rd_rn),
21468 cCE("cfmvah32",e200480, 2, (RMAX, RMFX), rd_rn),
21469 cCE("cfmv32ah",e100480, 2, (RMFX, RMAX), rd_rn),
21470 cCE("cfmva32", e2004a0, 2, (RMAX, RMFX), rd_rn),
21471 cCE("cfmv32a", e1004a0, 2, (RMFX, RMAX), rd_rn),
21472 cCE("cfmva64", e2004c0, 2, (RMAX, RMDX), rd_rn),
21473 cCE("cfmv64a", e1004c0, 2, (RMDX, RMAX), rd_rn),
21474 cCE("cfmvsc32",e2004e0, 2, (RMDS, RMDX), mav_dspsc),
21475 cCE("cfmv32sc",e1004e0, 2, (RMDX, RMDS), rd),
21476 cCE("cfcpys", e000400, 2, (RMF, RMF), rd_rn),
21477 cCE("cfcpyd", e000420, 2, (RMD, RMD), rd_rn),
21478 cCE("cfcvtsd", e000460, 2, (RMD, RMF), rd_rn),
21479 cCE("cfcvtds", e000440, 2, (RMF, RMD), rd_rn),
21480 cCE("cfcvt32s",e000480, 2, (RMF, RMFX), rd_rn),
21481 cCE("cfcvt32d",e0004a0, 2, (RMD, RMFX), rd_rn),
21482 cCE("cfcvt64s",e0004c0, 2, (RMF, RMDX), rd_rn),
21483 cCE("cfcvt64d",e0004e0, 2, (RMD, RMDX), rd_rn),
21484 cCE("cfcvts32",e100580, 2, (RMFX, RMF), rd_rn),
21485 cCE("cfcvtd32",e1005a0, 2, (RMFX, RMD), rd_rn),
21486 cCE("cftruncs32",e1005c0, 2, (RMFX, RMF), rd_rn),
21487 cCE("cftruncd32",e1005e0, 2, (RMFX, RMD), rd_rn),
21488 cCE("cfrshl32",e000550, 3, (RMFX, RMFX, RR), mav_triple),
21489 cCE("cfrshl64",e000570, 3, (RMDX, RMDX, RR), mav_triple),
21490 cCE("cfsh32", e000500, 3, (RMFX, RMFX, I63s), mav_shift),
21491 cCE("cfsh64", e200500, 3, (RMDX, RMDX, I63s), mav_shift),
21492 cCE("cfcmps", e100490, 3, (RR, RMF, RMF), rd_rn_rm),
21493 cCE("cfcmpd", e1004b0, 3, (RR, RMD, RMD), rd_rn_rm),
21494 cCE("cfcmp32", e100590, 3, (RR, RMFX, RMFX), rd_rn_rm),
21495 cCE("cfcmp64", e1005b0, 3, (RR, RMDX, RMDX), rd_rn_rm),
21496 cCE("cfabss", e300400, 2, (RMF, RMF), rd_rn),
21497 cCE("cfabsd", e300420, 2, (RMD, RMD), rd_rn),
21498 cCE("cfnegs", e300440, 2, (RMF, RMF), rd_rn),
21499 cCE("cfnegd", e300460, 2, (RMD, RMD), rd_rn),
21500 cCE("cfadds", e300480, 3, (RMF, RMF, RMF), rd_rn_rm),
21501 cCE("cfaddd", e3004a0, 3, (RMD, RMD, RMD), rd_rn_rm),
21502 cCE("cfsubs", e3004c0, 3, (RMF, RMF, RMF), rd_rn_rm),
21503 cCE("cfsubd", e3004e0, 3, (RMD, RMD, RMD), rd_rn_rm),
21504 cCE("cfmuls", e100400, 3, (RMF, RMF, RMF), rd_rn_rm),
21505 cCE("cfmuld", e100420, 3, (RMD, RMD, RMD), rd_rn_rm),
21506 cCE("cfabs32", e300500, 2, (RMFX, RMFX), rd_rn),
21507 cCE("cfabs64", e300520, 2, (RMDX, RMDX), rd_rn),
21508 cCE("cfneg32", e300540, 2, (RMFX, RMFX), rd_rn),
21509 cCE("cfneg64", e300560, 2, (RMDX, RMDX), rd_rn),
21510 cCE("cfadd32", e300580, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
21511 cCE("cfadd64", e3005a0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
21512 cCE("cfsub32", e3005c0, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
21513 cCE("cfsub64", e3005e0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
21514 cCE("cfmul32", e100500, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
21515 cCE("cfmul64", e100520, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
21516 cCE("cfmac32", e100540, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
21517 cCE("cfmsc32", e100560, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
21518 cCE("cfmadd32",e000600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
21519 cCE("cfmsub32",e100600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
21520 cCE("cfmadda32", e200600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
21521 cCE("cfmsuba32", e300600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
21522
21523 /* ARMv8.5-A instructions. */
21524 #undef ARM_VARIANT
21525 #define ARM_VARIANT & arm_ext_sb
21526 #undef THUMB_VARIANT
21527 #define THUMB_VARIANT & arm_ext_sb
21528 TUF("sb", 57ff070, f3bf8f70, 0, (), noargs, noargs),
21529
21530 #undef ARM_VARIANT
21531 #define ARM_VARIANT & arm_ext_predres
21532 #undef THUMB_VARIANT
21533 #define THUMB_VARIANT & arm_ext_predres
21534 CE("cfprctx", e070f93, 1, (RRnpc), rd),
21535 CE("dvprctx", e070fb3, 1, (RRnpc), rd),
21536 CE("cpprctx", e070ff3, 1, (RRnpc), rd),
21537
21538 /* ARMv8-M instructions. */
21539 #undef ARM_VARIANT
21540 #define ARM_VARIANT NULL
21541 #undef THUMB_VARIANT
21542 #define THUMB_VARIANT & arm_ext_v8m
21543 ToU("sg", e97fe97f, 0, (), noargs),
21544 ToC("blxns", 4784, 1, (RRnpc), t_blx),
21545 ToC("bxns", 4704, 1, (RRnpc), t_bx),
21546 ToC("tt", e840f000, 2, (RRnpc, RRnpc), tt),
21547 ToC("ttt", e840f040, 2, (RRnpc, RRnpc), tt),
21548 ToC("tta", e840f080, 2, (RRnpc, RRnpc), tt),
21549 ToC("ttat", e840f0c0, 2, (RRnpc, RRnpc), tt),
21550
21551 /* FP for ARMv8-M Mainline. Enabled for ARMv8-M Mainline because the
21552 instructions behave as nop if no VFP is present. */
21553 #undef THUMB_VARIANT
21554 #define THUMB_VARIANT & arm_ext_v8m_main
21555 ToC("vlldm", ec300a00, 1, (RRnpc), rn),
21556 ToC("vlstm", ec200a00, 1, (RRnpc), rn),
21557 };
21558 #undef ARM_VARIANT
21559 #undef THUMB_VARIANT
21560 #undef TCE
21561 #undef TUE
21562 #undef TUF
21563 #undef TCC
21564 #undef cCE
21565 #undef cCL
21566 #undef C3E
21567 #undef CE
21568 #undef CM
21569 #undef UE
21570 #undef UF
21571 #undef UT
21572 #undef NUF
21573 #undef nUF
21574 #undef NCE
21575 #undef nCE
21576 #undef OPS0
21577 #undef OPS1
21578 #undef OPS2
21579 #undef OPS3
21580 #undef OPS4
21581 #undef OPS5
21582 #undef OPS6
21583 #undef do_0
21584 \f
21585 /* MD interface: bits in the object file. */
21586
21587 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
21588 for use in the a.out file, and stores them in the array pointed to by buf.
21589 This knows about the endian-ness of the target machine and does
21590 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
21591 2 (short) and 4 (long) Floating numbers are put out as a series of
21592 LITTLENUMS (shorts, here at least). */
21593
21594 void
21595 md_number_to_chars (char * buf, valueT val, int n)
21596 {
21597 if (target_big_endian)
21598 number_to_chars_bigendian (buf, val, n);
21599 else
21600 number_to_chars_littleendian (buf, val, n);
21601 }
21602
21603 static valueT
21604 md_chars_to_number (char * buf, int n)
21605 {
21606 valueT result = 0;
21607 unsigned char * where = (unsigned char *) buf;
21608
21609 if (target_big_endian)
21610 {
21611 while (n--)
21612 {
21613 result <<= 8;
21614 result |= (*where++ & 255);
21615 }
21616 }
21617 else
21618 {
21619 while (n--)
21620 {
21621 result <<= 8;
21622 result |= (where[n] & 255);
21623 }
21624 }
21625
21626 return result;
21627 }
21628
21629 /* MD interface: Sections. */
21630
21631 /* Calculate the maximum variable size (i.e., excluding fr_fix)
21632 that an rs_machine_dependent frag may reach. */
21633
21634 unsigned int
21635 arm_frag_max_var (fragS *fragp)
21636 {
21637 /* We only use rs_machine_dependent for variable-size Thumb instructions,
21638 which are either THUMB_SIZE (2) or INSN_SIZE (4).
21639
21640 Note that we generate relaxable instructions even for cases that don't
21641 really need it, like an immediate that's a trivial constant. So we're
21642 overestimating the instruction size for some of those cases. Rather
21643 than putting more intelligence here, it would probably be better to
21644 avoid generating a relaxation frag in the first place when it can be
21645 determined up front that a short instruction will suffice. */
21646
21647 gas_assert (fragp->fr_type == rs_machine_dependent);
21648 return INSN_SIZE;
21649 }
21650
21651 /* Estimate the size of a frag before relaxing. Assume everything fits in
21652 2 bytes. */
21653
21654 int
21655 md_estimate_size_before_relax (fragS * fragp,
21656 segT segtype ATTRIBUTE_UNUSED)
21657 {
21658 fragp->fr_var = 2;
21659 return 2;
21660 }
21661
21662 /* Convert a machine dependent frag. */
21663
21664 void
21665 md_convert_frag (bfd *abfd, segT asec ATTRIBUTE_UNUSED, fragS *fragp)
21666 {
21667 unsigned long insn;
21668 unsigned long old_op;
21669 char *buf;
21670 expressionS exp;
21671 fixS *fixp;
21672 int reloc_type;
21673 int pc_rel;
21674 int opcode;
21675
21676 buf = fragp->fr_literal + fragp->fr_fix;
21677
21678 old_op = bfd_get_16(abfd, buf);
21679 if (fragp->fr_symbol)
21680 {
21681 exp.X_op = O_symbol;
21682 exp.X_add_symbol = fragp->fr_symbol;
21683 }
21684 else
21685 {
21686 exp.X_op = O_constant;
21687 }
21688 exp.X_add_number = fragp->fr_offset;
21689 opcode = fragp->fr_subtype;
21690 switch (opcode)
21691 {
21692 case T_MNEM_ldr_pc:
21693 case T_MNEM_ldr_pc2:
21694 case T_MNEM_ldr_sp:
21695 case T_MNEM_str_sp:
21696 case T_MNEM_ldr:
21697 case T_MNEM_ldrb:
21698 case T_MNEM_ldrh:
21699 case T_MNEM_str:
21700 case T_MNEM_strb:
21701 case T_MNEM_strh:
21702 if (fragp->fr_var == 4)
21703 {
21704 insn = THUMB_OP32 (opcode);
21705 if ((old_op >> 12) == 4 || (old_op >> 12) == 9)
21706 {
21707 insn |= (old_op & 0x700) << 4;
21708 }
21709 else
21710 {
21711 insn |= (old_op & 7) << 12;
21712 insn |= (old_op & 0x38) << 13;
21713 }
21714 insn |= 0x00000c00;
21715 put_thumb32_insn (buf, insn);
21716 reloc_type = BFD_RELOC_ARM_T32_OFFSET_IMM;
21717 }
21718 else
21719 {
21720 reloc_type = BFD_RELOC_ARM_THUMB_OFFSET;
21721 }
21722 pc_rel = (opcode == T_MNEM_ldr_pc2);
21723 break;
21724 case T_MNEM_adr:
21725 if (fragp->fr_var == 4)
21726 {
21727 insn = THUMB_OP32 (opcode);
21728 insn |= (old_op & 0xf0) << 4;
21729 put_thumb32_insn (buf, insn);
21730 reloc_type = BFD_RELOC_ARM_T32_ADD_PC12;
21731 }
21732 else
21733 {
21734 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
21735 exp.X_add_number -= 4;
21736 }
21737 pc_rel = 1;
21738 break;
21739 case T_MNEM_mov:
21740 case T_MNEM_movs:
21741 case T_MNEM_cmp:
21742 case T_MNEM_cmn:
21743 if (fragp->fr_var == 4)
21744 {
21745 int r0off = (opcode == T_MNEM_mov
21746 || opcode == T_MNEM_movs) ? 0 : 8;
21747 insn = THUMB_OP32 (opcode);
21748 insn = (insn & 0xe1ffffff) | 0x10000000;
21749 insn |= (old_op & 0x700) << r0off;
21750 put_thumb32_insn (buf, insn);
21751 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
21752 }
21753 else
21754 {
21755 reloc_type = BFD_RELOC_ARM_THUMB_IMM;
21756 }
21757 pc_rel = 0;
21758 break;
21759 case T_MNEM_b:
21760 if (fragp->fr_var == 4)
21761 {
21762 insn = THUMB_OP32(opcode);
21763 put_thumb32_insn (buf, insn);
21764 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH25;
21765 }
21766 else
21767 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH12;
21768 pc_rel = 1;
21769 break;
21770 case T_MNEM_bcond:
21771 if (fragp->fr_var == 4)
21772 {
21773 insn = THUMB_OP32(opcode);
21774 insn |= (old_op & 0xf00) << 14;
21775 put_thumb32_insn (buf, insn);
21776 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH20;
21777 }
21778 else
21779 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH9;
21780 pc_rel = 1;
21781 break;
21782 case T_MNEM_add_sp:
21783 case T_MNEM_add_pc:
21784 case T_MNEM_inc_sp:
21785 case T_MNEM_dec_sp:
21786 if (fragp->fr_var == 4)
21787 {
21788 /* ??? Choose between add and addw. */
21789 insn = THUMB_OP32 (opcode);
21790 insn |= (old_op & 0xf0) << 4;
21791 put_thumb32_insn (buf, insn);
21792 if (opcode == T_MNEM_add_pc)
21793 reloc_type = BFD_RELOC_ARM_T32_IMM12;
21794 else
21795 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
21796 }
21797 else
21798 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
21799 pc_rel = 0;
21800 break;
21801
21802 case T_MNEM_addi:
21803 case T_MNEM_addis:
21804 case T_MNEM_subi:
21805 case T_MNEM_subis:
21806 if (fragp->fr_var == 4)
21807 {
21808 insn = THUMB_OP32 (opcode);
21809 insn |= (old_op & 0xf0) << 4;
21810 insn |= (old_op & 0xf) << 16;
21811 put_thumb32_insn (buf, insn);
21812 if (insn & (1 << 20))
21813 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
21814 else
21815 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
21816 }
21817 else
21818 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
21819 pc_rel = 0;
21820 break;
21821 default:
21822 abort ();
21823 }
21824 fixp = fix_new_exp (fragp, fragp->fr_fix, fragp->fr_var, &exp, pc_rel,
21825 (enum bfd_reloc_code_real) reloc_type);
21826 fixp->fx_file = fragp->fr_file;
21827 fixp->fx_line = fragp->fr_line;
21828 fragp->fr_fix += fragp->fr_var;
21829
21830 /* Set whether we use thumb-2 ISA based on final relaxation results. */
21831 if (thumb_mode && fragp->fr_var == 4 && no_cpu_selected ()
21832 && !ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_t2))
21833 ARM_MERGE_FEATURE_SETS (arm_arch_used, thumb_arch_used, arm_ext_v6t2);
21834 }
21835
21836 /* Return the size of a relaxable immediate operand instruction.
21837 SHIFT and SIZE specify the form of the allowable immediate. */
21838 static int
21839 relax_immediate (fragS *fragp, int size, int shift)
21840 {
21841 offsetT offset;
21842 offsetT mask;
21843 offsetT low;
21844
21845 /* ??? Should be able to do better than this. */
21846 if (fragp->fr_symbol)
21847 return 4;
21848
21849 low = (1 << shift) - 1;
21850 mask = (1 << (shift + size)) - (1 << shift);
21851 offset = fragp->fr_offset;
21852 /* Force misaligned offsets to 32-bit variant. */
21853 if (offset & low)
21854 return 4;
21855 if (offset & ~mask)
21856 return 4;
21857 return 2;
21858 }
21859
21860 /* Get the address of a symbol during relaxation. */
21861 static addressT
21862 relaxed_symbol_addr (fragS *fragp, long stretch)
21863 {
21864 fragS *sym_frag;
21865 addressT addr;
21866 symbolS *sym;
21867
21868 sym = fragp->fr_symbol;
21869 sym_frag = symbol_get_frag (sym);
21870 know (S_GET_SEGMENT (sym) != absolute_section
21871 || sym_frag == &zero_address_frag);
21872 addr = S_GET_VALUE (sym) + fragp->fr_offset;
21873
21874 /* If frag has yet to be reached on this pass, assume it will
21875 move by STRETCH just as we did. If this is not so, it will
21876 be because some frag between grows, and that will force
21877 another pass. */
21878
21879 if (stretch != 0
21880 && sym_frag->relax_marker != fragp->relax_marker)
21881 {
21882 fragS *f;
21883
21884 /* Adjust stretch for any alignment frag. Note that if have
21885 been expanding the earlier code, the symbol may be
21886 defined in what appears to be an earlier frag. FIXME:
21887 This doesn't handle the fr_subtype field, which specifies
21888 a maximum number of bytes to skip when doing an
21889 alignment. */
21890 for (f = fragp; f != NULL && f != sym_frag; f = f->fr_next)
21891 {
21892 if (f->fr_type == rs_align || f->fr_type == rs_align_code)
21893 {
21894 if (stretch < 0)
21895 stretch = - ((- stretch)
21896 & ~ ((1 << (int) f->fr_offset) - 1));
21897 else
21898 stretch &= ~ ((1 << (int) f->fr_offset) - 1);
21899 if (stretch == 0)
21900 break;
21901 }
21902 }
21903 if (f != NULL)
21904 addr += stretch;
21905 }
21906
21907 return addr;
21908 }
21909
21910 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
21911 load. */
21912 static int
21913 relax_adr (fragS *fragp, asection *sec, long stretch)
21914 {
21915 addressT addr;
21916 offsetT val;
21917
21918 /* Assume worst case for symbols not known to be in the same section. */
21919 if (fragp->fr_symbol == NULL
21920 || !S_IS_DEFINED (fragp->fr_symbol)
21921 || sec != S_GET_SEGMENT (fragp->fr_symbol)
21922 || S_IS_WEAK (fragp->fr_symbol))
21923 return 4;
21924
21925 val = relaxed_symbol_addr (fragp, stretch);
21926 addr = fragp->fr_address + fragp->fr_fix;
21927 addr = (addr + 4) & ~3;
21928 /* Force misaligned targets to 32-bit variant. */
21929 if (val & 3)
21930 return 4;
21931 val -= addr;
21932 if (val < 0 || val > 1020)
21933 return 4;
21934 return 2;
21935 }
21936
21937 /* Return the size of a relaxable add/sub immediate instruction. */
21938 static int
21939 relax_addsub (fragS *fragp, asection *sec)
21940 {
21941 char *buf;
21942 int op;
21943
21944 buf = fragp->fr_literal + fragp->fr_fix;
21945 op = bfd_get_16(sec->owner, buf);
21946 if ((op & 0xf) == ((op >> 4) & 0xf))
21947 return relax_immediate (fragp, 8, 0);
21948 else
21949 return relax_immediate (fragp, 3, 0);
21950 }
21951
21952 /* Return TRUE iff the definition of symbol S could be pre-empted
21953 (overridden) at link or load time. */
21954 static bfd_boolean
21955 symbol_preemptible (symbolS *s)
21956 {
21957 /* Weak symbols can always be pre-empted. */
21958 if (S_IS_WEAK (s))
21959 return TRUE;
21960
21961 /* Non-global symbols cannot be pre-empted. */
21962 if (! S_IS_EXTERNAL (s))
21963 return FALSE;
21964
21965 #ifdef OBJ_ELF
21966 /* In ELF, a global symbol can be marked protected, or private. In that
21967 case it can't be pre-empted (other definitions in the same link unit
21968 would violate the ODR). */
21969 if (ELF_ST_VISIBILITY (S_GET_OTHER (s)) > STV_DEFAULT)
21970 return FALSE;
21971 #endif
21972
21973 /* Other global symbols might be pre-empted. */
21974 return TRUE;
21975 }
21976
21977 /* Return the size of a relaxable branch instruction. BITS is the
21978 size of the offset field in the narrow instruction. */
21979
21980 static int
21981 relax_branch (fragS *fragp, asection *sec, int bits, long stretch)
21982 {
21983 addressT addr;
21984 offsetT val;
21985 offsetT limit;
21986
21987 /* Assume worst case for symbols not known to be in the same section. */
21988 if (!S_IS_DEFINED (fragp->fr_symbol)
21989 || sec != S_GET_SEGMENT (fragp->fr_symbol)
21990 || S_IS_WEAK (fragp->fr_symbol))
21991 return 4;
21992
21993 #ifdef OBJ_ELF
21994 /* A branch to a function in ARM state will require interworking. */
21995 if (S_IS_DEFINED (fragp->fr_symbol)
21996 && ARM_IS_FUNC (fragp->fr_symbol))
21997 return 4;
21998 #endif
21999
22000 if (symbol_preemptible (fragp->fr_symbol))
22001 return 4;
22002
22003 val = relaxed_symbol_addr (fragp, stretch);
22004 addr = fragp->fr_address + fragp->fr_fix + 4;
22005 val -= addr;
22006
22007 /* Offset is a signed value *2 */
22008 limit = 1 << bits;
22009 if (val >= limit || val < -limit)
22010 return 4;
22011 return 2;
22012 }
22013
22014
22015 /* Relax a machine dependent frag. This returns the amount by which
22016 the current size of the frag should change. */
22017
22018 int
22019 arm_relax_frag (asection *sec, fragS *fragp, long stretch)
22020 {
22021 int oldsize;
22022 int newsize;
22023
22024 oldsize = fragp->fr_var;
22025 switch (fragp->fr_subtype)
22026 {
22027 case T_MNEM_ldr_pc2:
22028 newsize = relax_adr (fragp, sec, stretch);
22029 break;
22030 case T_MNEM_ldr_pc:
22031 case T_MNEM_ldr_sp:
22032 case T_MNEM_str_sp:
22033 newsize = relax_immediate (fragp, 8, 2);
22034 break;
22035 case T_MNEM_ldr:
22036 case T_MNEM_str:
22037 newsize = relax_immediate (fragp, 5, 2);
22038 break;
22039 case T_MNEM_ldrh:
22040 case T_MNEM_strh:
22041 newsize = relax_immediate (fragp, 5, 1);
22042 break;
22043 case T_MNEM_ldrb:
22044 case T_MNEM_strb:
22045 newsize = relax_immediate (fragp, 5, 0);
22046 break;
22047 case T_MNEM_adr:
22048 newsize = relax_adr (fragp, sec, stretch);
22049 break;
22050 case T_MNEM_mov:
22051 case T_MNEM_movs:
22052 case T_MNEM_cmp:
22053 case T_MNEM_cmn:
22054 newsize = relax_immediate (fragp, 8, 0);
22055 break;
22056 case T_MNEM_b:
22057 newsize = relax_branch (fragp, sec, 11, stretch);
22058 break;
22059 case T_MNEM_bcond:
22060 newsize = relax_branch (fragp, sec, 8, stretch);
22061 break;
22062 case T_MNEM_add_sp:
22063 case T_MNEM_add_pc:
22064 newsize = relax_immediate (fragp, 8, 2);
22065 break;
22066 case T_MNEM_inc_sp:
22067 case T_MNEM_dec_sp:
22068 newsize = relax_immediate (fragp, 7, 2);
22069 break;
22070 case T_MNEM_addi:
22071 case T_MNEM_addis:
22072 case T_MNEM_subi:
22073 case T_MNEM_subis:
22074 newsize = relax_addsub (fragp, sec);
22075 break;
22076 default:
22077 abort ();
22078 }
22079
22080 fragp->fr_var = newsize;
22081 /* Freeze wide instructions that are at or before the same location as
22082 in the previous pass. This avoids infinite loops.
22083 Don't freeze them unconditionally because targets may be artificially
22084 misaligned by the expansion of preceding frags. */
22085 if (stretch <= 0 && newsize > 2)
22086 {
22087 md_convert_frag (sec->owner, sec, fragp);
22088 frag_wane (fragp);
22089 }
22090
22091 return newsize - oldsize;
22092 }
22093
22094 /* Round up a section size to the appropriate boundary. */
22095
22096 valueT
22097 md_section_align (segT segment ATTRIBUTE_UNUSED,
22098 valueT size)
22099 {
22100 return size;
22101 }
22102
22103 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
22104 of an rs_align_code fragment. */
22105
22106 void
22107 arm_handle_align (fragS * fragP)
22108 {
22109 static unsigned char const arm_noop[2][2][4] =
22110 {
22111 { /* ARMv1 */
22112 {0x00, 0x00, 0xa0, 0xe1}, /* LE */
22113 {0xe1, 0xa0, 0x00, 0x00}, /* BE */
22114 },
22115 { /* ARMv6k */
22116 {0x00, 0xf0, 0x20, 0xe3}, /* LE */
22117 {0xe3, 0x20, 0xf0, 0x00}, /* BE */
22118 },
22119 };
22120 static unsigned char const thumb_noop[2][2][2] =
22121 {
22122 { /* Thumb-1 */
22123 {0xc0, 0x46}, /* LE */
22124 {0x46, 0xc0}, /* BE */
22125 },
22126 { /* Thumb-2 */
22127 {0x00, 0xbf}, /* LE */
22128 {0xbf, 0x00} /* BE */
22129 }
22130 };
22131 static unsigned char const wide_thumb_noop[2][4] =
22132 { /* Wide Thumb-2 */
22133 {0xaf, 0xf3, 0x00, 0x80}, /* LE */
22134 {0xf3, 0xaf, 0x80, 0x00}, /* BE */
22135 };
22136
22137 unsigned bytes, fix, noop_size;
22138 char * p;
22139 const unsigned char * noop;
22140 const unsigned char *narrow_noop = NULL;
22141 #ifdef OBJ_ELF
22142 enum mstate state;
22143 #endif
22144
22145 if (fragP->fr_type != rs_align_code)
22146 return;
22147
22148 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
22149 p = fragP->fr_literal + fragP->fr_fix;
22150 fix = 0;
22151
22152 if (bytes > MAX_MEM_FOR_RS_ALIGN_CODE)
22153 bytes &= MAX_MEM_FOR_RS_ALIGN_CODE;
22154
22155 gas_assert ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) != 0);
22156
22157 if (fragP->tc_frag_data.thumb_mode & (~ MODE_RECORDED))
22158 {
22159 if (ARM_CPU_HAS_FEATURE (selected_cpu_name[0]
22160 ? selected_cpu : arm_arch_none, arm_ext_v6t2))
22161 {
22162 narrow_noop = thumb_noop[1][target_big_endian];
22163 noop = wide_thumb_noop[target_big_endian];
22164 }
22165 else
22166 noop = thumb_noop[0][target_big_endian];
22167 noop_size = 2;
22168 #ifdef OBJ_ELF
22169 state = MAP_THUMB;
22170 #endif
22171 }
22172 else
22173 {
22174 noop = arm_noop[ARM_CPU_HAS_FEATURE (selected_cpu_name[0]
22175 ? selected_cpu : arm_arch_none,
22176 arm_ext_v6k) != 0]
22177 [target_big_endian];
22178 noop_size = 4;
22179 #ifdef OBJ_ELF
22180 state = MAP_ARM;
22181 #endif
22182 }
22183
22184 fragP->fr_var = noop_size;
22185
22186 if (bytes & (noop_size - 1))
22187 {
22188 fix = bytes & (noop_size - 1);
22189 #ifdef OBJ_ELF
22190 insert_data_mapping_symbol (state, fragP->fr_fix, fragP, fix);
22191 #endif
22192 memset (p, 0, fix);
22193 p += fix;
22194 bytes -= fix;
22195 }
22196
22197 if (narrow_noop)
22198 {
22199 if (bytes & noop_size)
22200 {
22201 /* Insert a narrow noop. */
22202 memcpy (p, narrow_noop, noop_size);
22203 p += noop_size;
22204 bytes -= noop_size;
22205 fix += noop_size;
22206 }
22207
22208 /* Use wide noops for the remainder */
22209 noop_size = 4;
22210 }
22211
22212 while (bytes >= noop_size)
22213 {
22214 memcpy (p, noop, noop_size);
22215 p += noop_size;
22216 bytes -= noop_size;
22217 fix += noop_size;
22218 }
22219
22220 fragP->fr_fix += fix;
22221 }
22222
22223 /* Called from md_do_align. Used to create an alignment
22224 frag in a code section. */
22225
22226 void
22227 arm_frag_align_code (int n, int max)
22228 {
22229 char * p;
22230
22231 /* We assume that there will never be a requirement
22232 to support alignments greater than MAX_MEM_FOR_RS_ALIGN_CODE bytes. */
22233 if (max > MAX_MEM_FOR_RS_ALIGN_CODE)
22234 {
22235 char err_msg[128];
22236
22237 sprintf (err_msg,
22238 _("alignments greater than %d bytes not supported in .text sections."),
22239 MAX_MEM_FOR_RS_ALIGN_CODE + 1);
22240 as_fatal ("%s", err_msg);
22241 }
22242
22243 p = frag_var (rs_align_code,
22244 MAX_MEM_FOR_RS_ALIGN_CODE,
22245 1,
22246 (relax_substateT) max,
22247 (symbolS *) NULL,
22248 (offsetT) n,
22249 (char *) NULL);
22250 *p = 0;
22251 }
22252
22253 /* Perform target specific initialisation of a frag.
22254 Note - despite the name this initialisation is not done when the frag
22255 is created, but only when its type is assigned. A frag can be created
22256 and used a long time before its type is set, so beware of assuming that
22257 this initialisation is performed first. */
22258
22259 #ifndef OBJ_ELF
22260 void
22261 arm_init_frag (fragS * fragP, int max_chars ATTRIBUTE_UNUSED)
22262 {
22263 /* Record whether this frag is in an ARM or a THUMB area. */
22264 fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
22265 }
22266
22267 #else /* OBJ_ELF is defined. */
22268 void
22269 arm_init_frag (fragS * fragP, int max_chars)
22270 {
22271 bfd_boolean frag_thumb_mode;
22272
22273 /* If the current ARM vs THUMB mode has not already
22274 been recorded into this frag then do so now. */
22275 if ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) == 0)
22276 fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
22277
22278 /* PR 21809: Do not set a mapping state for debug sections
22279 - it just confuses other tools. */
22280 if (bfd_get_section_flags (NULL, now_seg) & SEC_DEBUGGING)
22281 return;
22282
22283 frag_thumb_mode = fragP->tc_frag_data.thumb_mode ^ MODE_RECORDED;
22284
22285 /* Record a mapping symbol for alignment frags. We will delete this
22286 later if the alignment ends up empty. */
22287 switch (fragP->fr_type)
22288 {
22289 case rs_align:
22290 case rs_align_test:
22291 case rs_fill:
22292 mapping_state_2 (MAP_DATA, max_chars);
22293 break;
22294 case rs_align_code:
22295 mapping_state_2 (frag_thumb_mode ? MAP_THUMB : MAP_ARM, max_chars);
22296 break;
22297 default:
22298 break;
22299 }
22300 }
22301
22302 /* When we change sections we need to issue a new mapping symbol. */
22303
22304 void
22305 arm_elf_change_section (void)
22306 {
22307 /* Link an unlinked unwind index table section to the .text section. */
22308 if (elf_section_type (now_seg) == SHT_ARM_EXIDX
22309 && elf_linked_to_section (now_seg) == NULL)
22310 elf_linked_to_section (now_seg) = text_section;
22311 }
22312
22313 int
22314 arm_elf_section_type (const char * str, size_t len)
22315 {
22316 if (len == 5 && strncmp (str, "exidx", 5) == 0)
22317 return SHT_ARM_EXIDX;
22318
22319 return -1;
22320 }
22321 \f
22322 /* Code to deal with unwinding tables. */
22323
22324 static void add_unwind_adjustsp (offsetT);
22325
22326 /* Generate any deferred unwind frame offset. */
22327
22328 static void
22329 flush_pending_unwind (void)
22330 {
22331 offsetT offset;
22332
22333 offset = unwind.pending_offset;
22334 unwind.pending_offset = 0;
22335 if (offset != 0)
22336 add_unwind_adjustsp (offset);
22337 }
22338
22339 /* Add an opcode to this list for this function. Two-byte opcodes should
22340 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse
22341 order. */
22342
22343 static void
22344 add_unwind_opcode (valueT op, int length)
22345 {
22346 /* Add any deferred stack adjustment. */
22347 if (unwind.pending_offset)
22348 flush_pending_unwind ();
22349
22350 unwind.sp_restored = 0;
22351
22352 if (unwind.opcode_count + length > unwind.opcode_alloc)
22353 {
22354 unwind.opcode_alloc += ARM_OPCODE_CHUNK_SIZE;
22355 if (unwind.opcodes)
22356 unwind.opcodes = XRESIZEVEC (unsigned char, unwind.opcodes,
22357 unwind.opcode_alloc);
22358 else
22359 unwind.opcodes = XNEWVEC (unsigned char, unwind.opcode_alloc);
22360 }
22361 while (length > 0)
22362 {
22363 length--;
22364 unwind.opcodes[unwind.opcode_count] = op & 0xff;
22365 op >>= 8;
22366 unwind.opcode_count++;
22367 }
22368 }
22369
22370 /* Add unwind opcodes to adjust the stack pointer. */
22371
22372 static void
22373 add_unwind_adjustsp (offsetT offset)
22374 {
22375 valueT op;
22376
22377 if (offset > 0x200)
22378 {
22379 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */
22380 char bytes[5];
22381 int n;
22382 valueT o;
22383
22384 /* Long form: 0xb2, uleb128. */
22385 /* This might not fit in a word so add the individual bytes,
22386 remembering the list is built in reverse order. */
22387 o = (valueT) ((offset - 0x204) >> 2);
22388 if (o == 0)
22389 add_unwind_opcode (0, 1);
22390
22391 /* Calculate the uleb128 encoding of the offset. */
22392 n = 0;
22393 while (o)
22394 {
22395 bytes[n] = o & 0x7f;
22396 o >>= 7;
22397 if (o)
22398 bytes[n] |= 0x80;
22399 n++;
22400 }
22401 /* Add the insn. */
22402 for (; n; n--)
22403 add_unwind_opcode (bytes[n - 1], 1);
22404 add_unwind_opcode (0xb2, 1);
22405 }
22406 else if (offset > 0x100)
22407 {
22408 /* Two short opcodes. */
22409 add_unwind_opcode (0x3f, 1);
22410 op = (offset - 0x104) >> 2;
22411 add_unwind_opcode (op, 1);
22412 }
22413 else if (offset > 0)
22414 {
22415 /* Short opcode. */
22416 op = (offset - 4) >> 2;
22417 add_unwind_opcode (op, 1);
22418 }
22419 else if (offset < 0)
22420 {
22421 offset = -offset;
22422 while (offset > 0x100)
22423 {
22424 add_unwind_opcode (0x7f, 1);
22425 offset -= 0x100;
22426 }
22427 op = ((offset - 4) >> 2) | 0x40;
22428 add_unwind_opcode (op, 1);
22429 }
22430 }
22431
22432 /* Finish the list of unwind opcodes for this function. */
22433
22434 static void
22435 finish_unwind_opcodes (void)
22436 {
22437 valueT op;
22438
22439 if (unwind.fp_used)
22440 {
22441 /* Adjust sp as necessary. */
22442 unwind.pending_offset += unwind.fp_offset - unwind.frame_size;
22443 flush_pending_unwind ();
22444
22445 /* After restoring sp from the frame pointer. */
22446 op = 0x90 | unwind.fp_reg;
22447 add_unwind_opcode (op, 1);
22448 }
22449 else
22450 flush_pending_unwind ();
22451 }
22452
22453
22454 /* Start an exception table entry. If idx is nonzero this is an index table
22455 entry. */
22456
22457 static void
22458 start_unwind_section (const segT text_seg, int idx)
22459 {
22460 const char * text_name;
22461 const char * prefix;
22462 const char * prefix_once;
22463 const char * group_name;
22464 char * sec_name;
22465 int type;
22466 int flags;
22467 int linkonce;
22468
22469 if (idx)
22470 {
22471 prefix = ELF_STRING_ARM_unwind;
22472 prefix_once = ELF_STRING_ARM_unwind_once;
22473 type = SHT_ARM_EXIDX;
22474 }
22475 else
22476 {
22477 prefix = ELF_STRING_ARM_unwind_info;
22478 prefix_once = ELF_STRING_ARM_unwind_info_once;
22479 type = SHT_PROGBITS;
22480 }
22481
22482 text_name = segment_name (text_seg);
22483 if (streq (text_name, ".text"))
22484 text_name = "";
22485
22486 if (strncmp (text_name, ".gnu.linkonce.t.",
22487 strlen (".gnu.linkonce.t.")) == 0)
22488 {
22489 prefix = prefix_once;
22490 text_name += strlen (".gnu.linkonce.t.");
22491 }
22492
22493 sec_name = concat (prefix, text_name, (char *) NULL);
22494
22495 flags = SHF_ALLOC;
22496 linkonce = 0;
22497 group_name = 0;
22498
22499 /* Handle COMDAT group. */
22500 if (prefix != prefix_once && (text_seg->flags & SEC_LINK_ONCE) != 0)
22501 {
22502 group_name = elf_group_name (text_seg);
22503 if (group_name == NULL)
22504 {
22505 as_bad (_("Group section `%s' has no group signature"),
22506 segment_name (text_seg));
22507 ignore_rest_of_line ();
22508 return;
22509 }
22510 flags |= SHF_GROUP;
22511 linkonce = 1;
22512 }
22513
22514 obj_elf_change_section (sec_name, type, 0, flags, 0, group_name,
22515 linkonce, 0);
22516
22517 /* Set the section link for index tables. */
22518 if (idx)
22519 elf_linked_to_section (now_seg) = text_seg;
22520 }
22521
22522
22523 /* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
22524 personality routine data. Returns zero, or the index table value for
22525 an inline entry. */
22526
22527 static valueT
22528 create_unwind_entry (int have_data)
22529 {
22530 int size;
22531 addressT where;
22532 char *ptr;
22533 /* The current word of data. */
22534 valueT data;
22535 /* The number of bytes left in this word. */
22536 int n;
22537
22538 finish_unwind_opcodes ();
22539
22540 /* Remember the current text section. */
22541 unwind.saved_seg = now_seg;
22542 unwind.saved_subseg = now_subseg;
22543
22544 start_unwind_section (now_seg, 0);
22545
22546 if (unwind.personality_routine == NULL)
22547 {
22548 if (unwind.personality_index == -2)
22549 {
22550 if (have_data)
22551 as_bad (_("handlerdata in cantunwind frame"));
22552 return 1; /* EXIDX_CANTUNWIND. */
22553 }
22554
22555 /* Use a default personality routine if none is specified. */
22556 if (unwind.personality_index == -1)
22557 {
22558 if (unwind.opcode_count > 3)
22559 unwind.personality_index = 1;
22560 else
22561 unwind.personality_index = 0;
22562 }
22563
22564 /* Space for the personality routine entry. */
22565 if (unwind.personality_index == 0)
22566 {
22567 if (unwind.opcode_count > 3)
22568 as_bad (_("too many unwind opcodes for personality routine 0"));
22569
22570 if (!have_data)
22571 {
22572 /* All the data is inline in the index table. */
22573 data = 0x80;
22574 n = 3;
22575 while (unwind.opcode_count > 0)
22576 {
22577 unwind.opcode_count--;
22578 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
22579 n--;
22580 }
22581
22582 /* Pad with "finish" opcodes. */
22583 while (n--)
22584 data = (data << 8) | 0xb0;
22585
22586 return data;
22587 }
22588 size = 0;
22589 }
22590 else
22591 /* We get two opcodes "free" in the first word. */
22592 size = unwind.opcode_count - 2;
22593 }
22594 else
22595 {
22596 /* PR 16765: Missing or misplaced unwind directives can trigger this. */
22597 if (unwind.personality_index != -1)
22598 {
22599 as_bad (_("attempt to recreate an unwind entry"));
22600 return 1;
22601 }
22602
22603 /* An extra byte is required for the opcode count. */
22604 size = unwind.opcode_count + 1;
22605 }
22606
22607 size = (size + 3) >> 2;
22608 if (size > 0xff)
22609 as_bad (_("too many unwind opcodes"));
22610
22611 frag_align (2, 0, 0);
22612 record_alignment (now_seg, 2);
22613 unwind.table_entry = expr_build_dot ();
22614
22615 /* Allocate the table entry. */
22616 ptr = frag_more ((size << 2) + 4);
22617 /* PR 13449: Zero the table entries in case some of them are not used. */
22618 memset (ptr, 0, (size << 2) + 4);
22619 where = frag_now_fix () - ((size << 2) + 4);
22620
22621 switch (unwind.personality_index)
22622 {
22623 case -1:
22624 /* ??? Should this be a PLT generating relocation? */
22625 /* Custom personality routine. */
22626 fix_new (frag_now, where, 4, unwind.personality_routine, 0, 1,
22627 BFD_RELOC_ARM_PREL31);
22628
22629 where += 4;
22630 ptr += 4;
22631
22632 /* Set the first byte to the number of additional words. */
22633 data = size > 0 ? size - 1 : 0;
22634 n = 3;
22635 break;
22636
22637 /* ABI defined personality routines. */
22638 case 0:
22639 /* Three opcodes bytes are packed into the first word. */
22640 data = 0x80;
22641 n = 3;
22642 break;
22643
22644 case 1:
22645 case 2:
22646 /* The size and first two opcode bytes go in the first word. */
22647 data = ((0x80 + unwind.personality_index) << 8) | size;
22648 n = 2;
22649 break;
22650
22651 default:
22652 /* Should never happen. */
22653 abort ();
22654 }
22655
22656 /* Pack the opcodes into words (MSB first), reversing the list at the same
22657 time. */
22658 while (unwind.opcode_count > 0)
22659 {
22660 if (n == 0)
22661 {
22662 md_number_to_chars (ptr, data, 4);
22663 ptr += 4;
22664 n = 4;
22665 data = 0;
22666 }
22667 unwind.opcode_count--;
22668 n--;
22669 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
22670 }
22671
22672 /* Finish off the last word. */
22673 if (n < 4)
22674 {
22675 /* Pad with "finish" opcodes. */
22676 while (n--)
22677 data = (data << 8) | 0xb0;
22678
22679 md_number_to_chars (ptr, data, 4);
22680 }
22681
22682 if (!have_data)
22683 {
22684 /* Add an empty descriptor if there is no user-specified data. */
22685 ptr = frag_more (4);
22686 md_number_to_chars (ptr, 0, 4);
22687 }
22688
22689 return 0;
22690 }
22691
22692
22693 /* Initialize the DWARF-2 unwind information for this procedure. */
22694
22695 void
22696 tc_arm_frame_initial_instructions (void)
22697 {
22698 cfi_add_CFA_def_cfa (REG_SP, 0);
22699 }
22700 #endif /* OBJ_ELF */
22701
22702 /* Convert REGNAME to a DWARF-2 register number. */
22703
22704 int
22705 tc_arm_regname_to_dw2regnum (char *regname)
22706 {
22707 int reg = arm_reg_parse (&regname, REG_TYPE_RN);
22708 if (reg != FAIL)
22709 return reg;
22710
22711 /* PR 16694: Allow VFP registers as well. */
22712 reg = arm_reg_parse (&regname, REG_TYPE_VFS);
22713 if (reg != FAIL)
22714 return 64 + reg;
22715
22716 reg = arm_reg_parse (&regname, REG_TYPE_VFD);
22717 if (reg != FAIL)
22718 return reg + 256;
22719
22720 return FAIL;
22721 }
22722
22723 #ifdef TE_PE
22724 void
22725 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
22726 {
22727 expressionS exp;
22728
22729 exp.X_op = O_secrel;
22730 exp.X_add_symbol = symbol;
22731 exp.X_add_number = 0;
22732 emit_expr (&exp, size);
22733 }
22734 #endif
22735
22736 /* MD interface: Symbol and relocation handling. */
22737
22738 /* Return the address within the segment that a PC-relative fixup is
22739 relative to. For ARM, PC-relative fixups applied to instructions
22740 are generally relative to the location of the fixup plus 8 bytes.
22741 Thumb branches are offset by 4, and Thumb loads relative to PC
22742 require special handling. */
22743
22744 long
22745 md_pcrel_from_section (fixS * fixP, segT seg)
22746 {
22747 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
22748
22749 /* If this is pc-relative and we are going to emit a relocation
22750 then we just want to put out any pipeline compensation that the linker
22751 will need. Otherwise we want to use the calculated base.
22752 For WinCE we skip the bias for externals as well, since this
22753 is how the MS ARM-CE assembler behaves and we want to be compatible. */
22754 if (fixP->fx_pcrel
22755 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
22756 || (arm_force_relocation (fixP)
22757 #ifdef TE_WINCE
22758 && !S_IS_EXTERNAL (fixP->fx_addsy)
22759 #endif
22760 )))
22761 base = 0;
22762
22763
22764 switch (fixP->fx_r_type)
22765 {
22766 /* PC relative addressing on the Thumb is slightly odd as the
22767 bottom two bits of the PC are forced to zero for the
22768 calculation. This happens *after* application of the
22769 pipeline offset. However, Thumb adrl already adjusts for
22770 this, so we need not do it again. */
22771 case BFD_RELOC_ARM_THUMB_ADD:
22772 return base & ~3;
22773
22774 case BFD_RELOC_ARM_THUMB_OFFSET:
22775 case BFD_RELOC_ARM_T32_OFFSET_IMM:
22776 case BFD_RELOC_ARM_T32_ADD_PC12:
22777 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
22778 return (base + 4) & ~3;
22779
22780 /* Thumb branches are simply offset by +4. */
22781 case BFD_RELOC_THUMB_PCREL_BRANCH7:
22782 case BFD_RELOC_THUMB_PCREL_BRANCH9:
22783 case BFD_RELOC_THUMB_PCREL_BRANCH12:
22784 case BFD_RELOC_THUMB_PCREL_BRANCH20:
22785 case BFD_RELOC_THUMB_PCREL_BRANCH25:
22786 return base + 4;
22787
22788 case BFD_RELOC_THUMB_PCREL_BRANCH23:
22789 if (fixP->fx_addsy
22790 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22791 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22792 && ARM_IS_FUNC (fixP->fx_addsy)
22793 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
22794 base = fixP->fx_where + fixP->fx_frag->fr_address;
22795 return base + 4;
22796
22797 /* BLX is like branches above, but forces the low two bits of PC to
22798 zero. */
22799 case BFD_RELOC_THUMB_PCREL_BLX:
22800 if (fixP->fx_addsy
22801 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22802 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22803 && THUMB_IS_FUNC (fixP->fx_addsy)
22804 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
22805 base = fixP->fx_where + fixP->fx_frag->fr_address;
22806 return (base + 4) & ~3;
22807
22808 /* ARM mode branches are offset by +8. However, the Windows CE
22809 loader expects the relocation not to take this into account. */
22810 case BFD_RELOC_ARM_PCREL_BLX:
22811 if (fixP->fx_addsy
22812 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22813 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22814 && ARM_IS_FUNC (fixP->fx_addsy)
22815 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
22816 base = fixP->fx_where + fixP->fx_frag->fr_address;
22817 return base + 8;
22818
22819 case BFD_RELOC_ARM_PCREL_CALL:
22820 if (fixP->fx_addsy
22821 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22822 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22823 && THUMB_IS_FUNC (fixP->fx_addsy)
22824 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
22825 base = fixP->fx_where + fixP->fx_frag->fr_address;
22826 return base + 8;
22827
22828 case BFD_RELOC_ARM_PCREL_BRANCH:
22829 case BFD_RELOC_ARM_PCREL_JUMP:
22830 case BFD_RELOC_ARM_PLT32:
22831 #ifdef TE_WINCE
22832 /* When handling fixups immediately, because we have already
22833 discovered the value of a symbol, or the address of the frag involved
22834 we must account for the offset by +8, as the OS loader will never see the reloc.
22835 see fixup_segment() in write.c
22836 The S_IS_EXTERNAL test handles the case of global symbols.
22837 Those need the calculated base, not just the pipe compensation the linker will need. */
22838 if (fixP->fx_pcrel
22839 && fixP->fx_addsy != NULL
22840 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22841 && (S_IS_EXTERNAL (fixP->fx_addsy) || !arm_force_relocation (fixP)))
22842 return base + 8;
22843 return base;
22844 #else
22845 return base + 8;
22846 #endif
22847
22848
22849 /* ARM mode loads relative to PC are also offset by +8. Unlike
22850 branches, the Windows CE loader *does* expect the relocation
22851 to take this into account. */
22852 case BFD_RELOC_ARM_OFFSET_IMM:
22853 case BFD_RELOC_ARM_OFFSET_IMM8:
22854 case BFD_RELOC_ARM_HWLITERAL:
22855 case BFD_RELOC_ARM_LITERAL:
22856 case BFD_RELOC_ARM_CP_OFF_IMM:
22857 return base + 8;
22858
22859
22860 /* Other PC-relative relocations are un-offset. */
22861 default:
22862 return base;
22863 }
22864 }
22865
22866 static bfd_boolean flag_warn_syms = TRUE;
22867
22868 bfd_boolean
22869 arm_tc_equal_in_insn (int c ATTRIBUTE_UNUSED, char * name)
22870 {
22871 /* PR 18347 - Warn if the user attempts to create a symbol with the same
22872 name as an ARM instruction. Whilst strictly speaking it is allowed, it
22873 does mean that the resulting code might be very confusing to the reader.
22874 Also this warning can be triggered if the user omits an operand before
22875 an immediate address, eg:
22876
22877 LDR =foo
22878
22879 GAS treats this as an assignment of the value of the symbol foo to a
22880 symbol LDR, and so (without this code) it will not issue any kind of
22881 warning or error message.
22882
22883 Note - ARM instructions are case-insensitive but the strings in the hash
22884 table are all stored in lower case, so we must first ensure that name is
22885 lower case too. */
22886 if (flag_warn_syms && arm_ops_hsh)
22887 {
22888 char * nbuf = strdup (name);
22889 char * p;
22890
22891 for (p = nbuf; *p; p++)
22892 *p = TOLOWER (*p);
22893 if (hash_find (arm_ops_hsh, nbuf) != NULL)
22894 {
22895 static struct hash_control * already_warned = NULL;
22896
22897 if (already_warned == NULL)
22898 already_warned = hash_new ();
22899 /* Only warn about the symbol once. To keep the code
22900 simple we let hash_insert do the lookup for us. */
22901 if (hash_insert (already_warned, name, NULL) == NULL)
22902 as_warn (_("[-mwarn-syms]: Assignment makes a symbol match an ARM instruction: %s"), name);
22903 }
22904 else
22905 free (nbuf);
22906 }
22907
22908 return FALSE;
22909 }
22910
22911 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
22912 Otherwise we have no need to default values of symbols. */
22913
22914 symbolS *
22915 md_undefined_symbol (char * name ATTRIBUTE_UNUSED)
22916 {
22917 #ifdef OBJ_ELF
22918 if (name[0] == '_' && name[1] == 'G'
22919 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
22920 {
22921 if (!GOT_symbol)
22922 {
22923 if (symbol_find (name))
22924 as_bad (_("GOT already in the symbol table"));
22925
22926 GOT_symbol = symbol_new (name, undefined_section,
22927 (valueT) 0, & zero_address_frag);
22928 }
22929
22930 return GOT_symbol;
22931 }
22932 #endif
22933
22934 return NULL;
22935 }
22936
22937 /* Subroutine of md_apply_fix. Check to see if an immediate can be
22938 computed as two separate immediate values, added together. We
22939 already know that this value cannot be computed by just one ARM
22940 instruction. */
22941
22942 static unsigned int
22943 validate_immediate_twopart (unsigned int val,
22944 unsigned int * highpart)
22945 {
22946 unsigned int a;
22947 unsigned int i;
22948
22949 for (i = 0; i < 32; i += 2)
22950 if (((a = rotate_left (val, i)) & 0xff) != 0)
22951 {
22952 if (a & 0xff00)
22953 {
22954 if (a & ~ 0xffff)
22955 continue;
22956 * highpart = (a >> 8) | ((i + 24) << 7);
22957 }
22958 else if (a & 0xff0000)
22959 {
22960 if (a & 0xff000000)
22961 continue;
22962 * highpart = (a >> 16) | ((i + 16) << 7);
22963 }
22964 else
22965 {
22966 gas_assert (a & 0xff000000);
22967 * highpart = (a >> 24) | ((i + 8) << 7);
22968 }
22969
22970 return (a & 0xff) | (i << 7);
22971 }
22972
22973 return FAIL;
22974 }
22975
22976 static int
22977 validate_offset_imm (unsigned int val, int hwse)
22978 {
22979 if ((hwse && val > 255) || val > 4095)
22980 return FAIL;
22981 return val;
22982 }
22983
22984 /* Subroutine of md_apply_fix. Do those data_ops which can take a
22985 negative immediate constant by altering the instruction. A bit of
22986 a hack really.
22987 MOV <-> MVN
22988 AND <-> BIC
22989 ADC <-> SBC
22990 by inverting the second operand, and
22991 ADD <-> SUB
22992 CMP <-> CMN
22993 by negating the second operand. */
22994
22995 static int
22996 negate_data_op (unsigned long * instruction,
22997 unsigned long value)
22998 {
22999 int op, new_inst;
23000 unsigned long negated, inverted;
23001
23002 negated = encode_arm_immediate (-value);
23003 inverted = encode_arm_immediate (~value);
23004
23005 op = (*instruction >> DATA_OP_SHIFT) & 0xf;
23006 switch (op)
23007 {
23008 /* First negates. */
23009 case OPCODE_SUB: /* ADD <-> SUB */
23010 new_inst = OPCODE_ADD;
23011 value = negated;
23012 break;
23013
23014 case OPCODE_ADD:
23015 new_inst = OPCODE_SUB;
23016 value = negated;
23017 break;
23018
23019 case OPCODE_CMP: /* CMP <-> CMN */
23020 new_inst = OPCODE_CMN;
23021 value = negated;
23022 break;
23023
23024 case OPCODE_CMN:
23025 new_inst = OPCODE_CMP;
23026 value = negated;
23027 break;
23028
23029 /* Now Inverted ops. */
23030 case OPCODE_MOV: /* MOV <-> MVN */
23031 new_inst = OPCODE_MVN;
23032 value = inverted;
23033 break;
23034
23035 case OPCODE_MVN:
23036 new_inst = OPCODE_MOV;
23037 value = inverted;
23038 break;
23039
23040 case OPCODE_AND: /* AND <-> BIC */
23041 new_inst = OPCODE_BIC;
23042 value = inverted;
23043 break;
23044
23045 case OPCODE_BIC:
23046 new_inst = OPCODE_AND;
23047 value = inverted;
23048 break;
23049
23050 case OPCODE_ADC: /* ADC <-> SBC */
23051 new_inst = OPCODE_SBC;
23052 value = inverted;
23053 break;
23054
23055 case OPCODE_SBC:
23056 new_inst = OPCODE_ADC;
23057 value = inverted;
23058 break;
23059
23060 /* We cannot do anything. */
23061 default:
23062 return FAIL;
23063 }
23064
23065 if (value == (unsigned) FAIL)
23066 return FAIL;
23067
23068 *instruction &= OPCODE_MASK;
23069 *instruction |= new_inst << DATA_OP_SHIFT;
23070 return value;
23071 }
23072
23073 /* Like negate_data_op, but for Thumb-2. */
23074
23075 static unsigned int
23076 thumb32_negate_data_op (offsetT *instruction, unsigned int value)
23077 {
23078 int op, new_inst;
23079 int rd;
23080 unsigned int negated, inverted;
23081
23082 negated = encode_thumb32_immediate (-value);
23083 inverted = encode_thumb32_immediate (~value);
23084
23085 rd = (*instruction >> 8) & 0xf;
23086 op = (*instruction >> T2_DATA_OP_SHIFT) & 0xf;
23087 switch (op)
23088 {
23089 /* ADD <-> SUB. Includes CMP <-> CMN. */
23090 case T2_OPCODE_SUB:
23091 new_inst = T2_OPCODE_ADD;
23092 value = negated;
23093 break;
23094
23095 case T2_OPCODE_ADD:
23096 new_inst = T2_OPCODE_SUB;
23097 value = negated;
23098 break;
23099
23100 /* ORR <-> ORN. Includes MOV <-> MVN. */
23101 case T2_OPCODE_ORR:
23102 new_inst = T2_OPCODE_ORN;
23103 value = inverted;
23104 break;
23105
23106 case T2_OPCODE_ORN:
23107 new_inst = T2_OPCODE_ORR;
23108 value = inverted;
23109 break;
23110
23111 /* AND <-> BIC. TST has no inverted equivalent. */
23112 case T2_OPCODE_AND:
23113 new_inst = T2_OPCODE_BIC;
23114 if (rd == 15)
23115 value = FAIL;
23116 else
23117 value = inverted;
23118 break;
23119
23120 case T2_OPCODE_BIC:
23121 new_inst = T2_OPCODE_AND;
23122 value = inverted;
23123 break;
23124
23125 /* ADC <-> SBC */
23126 case T2_OPCODE_ADC:
23127 new_inst = T2_OPCODE_SBC;
23128 value = inverted;
23129 break;
23130
23131 case T2_OPCODE_SBC:
23132 new_inst = T2_OPCODE_ADC;
23133 value = inverted;
23134 break;
23135
23136 /* We cannot do anything. */
23137 default:
23138 return FAIL;
23139 }
23140
23141 if (value == (unsigned int)FAIL)
23142 return FAIL;
23143
23144 *instruction &= T2_OPCODE_MASK;
23145 *instruction |= new_inst << T2_DATA_OP_SHIFT;
23146 return value;
23147 }
23148
23149 /* Read a 32-bit thumb instruction from buf. */
23150
23151 static unsigned long
23152 get_thumb32_insn (char * buf)
23153 {
23154 unsigned long insn;
23155 insn = md_chars_to_number (buf, THUMB_SIZE) << 16;
23156 insn |= md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
23157
23158 return insn;
23159 }
23160
23161 /* We usually want to set the low bit on the address of thumb function
23162 symbols. In particular .word foo - . should have the low bit set.
23163 Generic code tries to fold the difference of two symbols to
23164 a constant. Prevent this and force a relocation when the first symbols
23165 is a thumb function. */
23166
23167 bfd_boolean
23168 arm_optimize_expr (expressionS *l, operatorT op, expressionS *r)
23169 {
23170 if (op == O_subtract
23171 && l->X_op == O_symbol
23172 && r->X_op == O_symbol
23173 && THUMB_IS_FUNC (l->X_add_symbol))
23174 {
23175 l->X_op = O_subtract;
23176 l->X_op_symbol = r->X_add_symbol;
23177 l->X_add_number -= r->X_add_number;
23178 return TRUE;
23179 }
23180
23181 /* Process as normal. */
23182 return FALSE;
23183 }
23184
23185 /* Encode Thumb2 unconditional branches and calls. The encoding
23186 for the 2 are identical for the immediate values. */
23187
23188 static void
23189 encode_thumb2_b_bl_offset (char * buf, offsetT value)
23190 {
23191 #define T2I1I2MASK ((1 << 13) | (1 << 11))
23192 offsetT newval;
23193 offsetT newval2;
23194 addressT S, I1, I2, lo, hi;
23195
23196 S = (value >> 24) & 0x01;
23197 I1 = (value >> 23) & 0x01;
23198 I2 = (value >> 22) & 0x01;
23199 hi = (value >> 12) & 0x3ff;
23200 lo = (value >> 1) & 0x7ff;
23201 newval = md_chars_to_number (buf, THUMB_SIZE);
23202 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
23203 newval |= (S << 10) | hi;
23204 newval2 &= ~T2I1I2MASK;
23205 newval2 |= (((I1 ^ S) << 13) | ((I2 ^ S) << 11) | lo) ^ T2I1I2MASK;
23206 md_number_to_chars (buf, newval, THUMB_SIZE);
23207 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
23208 }
23209
23210 void
23211 md_apply_fix (fixS * fixP,
23212 valueT * valP,
23213 segT seg)
23214 {
23215 offsetT value = * valP;
23216 offsetT newval;
23217 unsigned int newimm;
23218 unsigned long temp;
23219 int sign;
23220 char * buf = fixP->fx_where + fixP->fx_frag->fr_literal;
23221
23222 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
23223
23224 /* Note whether this will delete the relocation. */
23225
23226 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
23227 fixP->fx_done = 1;
23228
23229 /* On a 64-bit host, silently truncate 'value' to 32 bits for
23230 consistency with the behaviour on 32-bit hosts. Remember value
23231 for emit_reloc. */
23232 value &= 0xffffffff;
23233 value ^= 0x80000000;
23234 value -= 0x80000000;
23235
23236 *valP = value;
23237 fixP->fx_addnumber = value;
23238
23239 /* Same treatment for fixP->fx_offset. */
23240 fixP->fx_offset &= 0xffffffff;
23241 fixP->fx_offset ^= 0x80000000;
23242 fixP->fx_offset -= 0x80000000;
23243
23244 switch (fixP->fx_r_type)
23245 {
23246 case BFD_RELOC_NONE:
23247 /* This will need to go in the object file. */
23248 fixP->fx_done = 0;
23249 break;
23250
23251 case BFD_RELOC_ARM_IMMEDIATE:
23252 /* We claim that this fixup has been processed here,
23253 even if in fact we generate an error because we do
23254 not have a reloc for it, so tc_gen_reloc will reject it. */
23255 fixP->fx_done = 1;
23256
23257 if (fixP->fx_addsy)
23258 {
23259 const char *msg = 0;
23260
23261 if (! S_IS_DEFINED (fixP->fx_addsy))
23262 msg = _("undefined symbol %s used as an immediate value");
23263 else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
23264 msg = _("symbol %s is in a different section");
23265 else if (S_IS_WEAK (fixP->fx_addsy))
23266 msg = _("symbol %s is weak and may be overridden later");
23267
23268 if (msg)
23269 {
23270 as_bad_where (fixP->fx_file, fixP->fx_line,
23271 msg, S_GET_NAME (fixP->fx_addsy));
23272 break;
23273 }
23274 }
23275
23276 temp = md_chars_to_number (buf, INSN_SIZE);
23277
23278 /* If the offset is negative, we should use encoding A2 for ADR. */
23279 if ((temp & 0xfff0000) == 0x28f0000 && value < 0)
23280 newimm = negate_data_op (&temp, value);
23281 else
23282 {
23283 newimm = encode_arm_immediate (value);
23284
23285 /* If the instruction will fail, see if we can fix things up by
23286 changing the opcode. */
23287 if (newimm == (unsigned int) FAIL)
23288 newimm = negate_data_op (&temp, value);
23289 /* MOV accepts both ARM modified immediate (A1 encoding) and
23290 UINT16 (A2 encoding) when possible, MOVW only accepts UINT16.
23291 When disassembling, MOV is preferred when there is no encoding
23292 overlap. */
23293 if (newimm == (unsigned int) FAIL
23294 && ((temp >> DATA_OP_SHIFT) & 0xf) == OPCODE_MOV
23295 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2)
23296 && !((temp >> SBIT_SHIFT) & 0x1)
23297 && value >= 0 && value <= 0xffff)
23298 {
23299 /* Clear bits[23:20] to change encoding from A1 to A2. */
23300 temp &= 0xff0fffff;
23301 /* Encoding high 4bits imm. Code below will encode the remaining
23302 low 12bits. */
23303 temp |= (value & 0x0000f000) << 4;
23304 newimm = value & 0x00000fff;
23305 }
23306 }
23307
23308 if (newimm == (unsigned int) FAIL)
23309 {
23310 as_bad_where (fixP->fx_file, fixP->fx_line,
23311 _("invalid constant (%lx) after fixup"),
23312 (unsigned long) value);
23313 break;
23314 }
23315
23316 newimm |= (temp & 0xfffff000);
23317 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
23318 break;
23319
23320 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
23321 {
23322 unsigned int highpart = 0;
23323 unsigned int newinsn = 0xe1a00000; /* nop. */
23324
23325 if (fixP->fx_addsy)
23326 {
23327 const char *msg = 0;
23328
23329 if (! S_IS_DEFINED (fixP->fx_addsy))
23330 msg = _("undefined symbol %s used as an immediate value");
23331 else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
23332 msg = _("symbol %s is in a different section");
23333 else if (S_IS_WEAK (fixP->fx_addsy))
23334 msg = _("symbol %s is weak and may be overridden later");
23335
23336 if (msg)
23337 {
23338 as_bad_where (fixP->fx_file, fixP->fx_line,
23339 msg, S_GET_NAME (fixP->fx_addsy));
23340 break;
23341 }
23342 }
23343
23344 newimm = encode_arm_immediate (value);
23345 temp = md_chars_to_number (buf, INSN_SIZE);
23346
23347 /* If the instruction will fail, see if we can fix things up by
23348 changing the opcode. */
23349 if (newimm == (unsigned int) FAIL
23350 && (newimm = negate_data_op (& temp, value)) == (unsigned int) FAIL)
23351 {
23352 /* No ? OK - try using two ADD instructions to generate
23353 the value. */
23354 newimm = validate_immediate_twopart (value, & highpart);
23355
23356 /* Yes - then make sure that the second instruction is
23357 also an add. */
23358 if (newimm != (unsigned int) FAIL)
23359 newinsn = temp;
23360 /* Still No ? Try using a negated value. */
23361 else if ((newimm = validate_immediate_twopart (- value, & highpart)) != (unsigned int) FAIL)
23362 temp = newinsn = (temp & OPCODE_MASK) | OPCODE_SUB << DATA_OP_SHIFT;
23363 /* Otherwise - give up. */
23364 else
23365 {
23366 as_bad_where (fixP->fx_file, fixP->fx_line,
23367 _("unable to compute ADRL instructions for PC offset of 0x%lx"),
23368 (long) value);
23369 break;
23370 }
23371
23372 /* Replace the first operand in the 2nd instruction (which
23373 is the PC) with the destination register. We have
23374 already added in the PC in the first instruction and we
23375 do not want to do it again. */
23376 newinsn &= ~ 0xf0000;
23377 newinsn |= ((newinsn & 0x0f000) << 4);
23378 }
23379
23380 newimm |= (temp & 0xfffff000);
23381 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
23382
23383 highpart |= (newinsn & 0xfffff000);
23384 md_number_to_chars (buf + INSN_SIZE, (valueT) highpart, INSN_SIZE);
23385 }
23386 break;
23387
23388 case BFD_RELOC_ARM_OFFSET_IMM:
23389 if (!fixP->fx_done && seg->use_rela_p)
23390 value = 0;
23391 /* Fall through. */
23392
23393 case BFD_RELOC_ARM_LITERAL:
23394 sign = value > 0;
23395
23396 if (value < 0)
23397 value = - value;
23398
23399 if (validate_offset_imm (value, 0) == FAIL)
23400 {
23401 if (fixP->fx_r_type == BFD_RELOC_ARM_LITERAL)
23402 as_bad_where (fixP->fx_file, fixP->fx_line,
23403 _("invalid literal constant: pool needs to be closer"));
23404 else
23405 as_bad_where (fixP->fx_file, fixP->fx_line,
23406 _("bad immediate value for offset (%ld)"),
23407 (long) value);
23408 break;
23409 }
23410
23411 newval = md_chars_to_number (buf, INSN_SIZE);
23412 if (value == 0)
23413 newval &= 0xfffff000;
23414 else
23415 {
23416 newval &= 0xff7ff000;
23417 newval |= value | (sign ? INDEX_UP : 0);
23418 }
23419 md_number_to_chars (buf, newval, INSN_SIZE);
23420 break;
23421
23422 case BFD_RELOC_ARM_OFFSET_IMM8:
23423 case BFD_RELOC_ARM_HWLITERAL:
23424 sign = value > 0;
23425
23426 if (value < 0)
23427 value = - value;
23428
23429 if (validate_offset_imm (value, 1) == FAIL)
23430 {
23431 if (fixP->fx_r_type == BFD_RELOC_ARM_HWLITERAL)
23432 as_bad_where (fixP->fx_file, fixP->fx_line,
23433 _("invalid literal constant: pool needs to be closer"));
23434 else
23435 as_bad_where (fixP->fx_file, fixP->fx_line,
23436 _("bad immediate value for 8-bit offset (%ld)"),
23437 (long) value);
23438 break;
23439 }
23440
23441 newval = md_chars_to_number (buf, INSN_SIZE);
23442 if (value == 0)
23443 newval &= 0xfffff0f0;
23444 else
23445 {
23446 newval &= 0xff7ff0f0;
23447 newval |= ((value >> 4) << 8) | (value & 0xf) | (sign ? INDEX_UP : 0);
23448 }
23449 md_number_to_chars (buf, newval, INSN_SIZE);
23450 break;
23451
23452 case BFD_RELOC_ARM_T32_OFFSET_U8:
23453 if (value < 0 || value > 1020 || value % 4 != 0)
23454 as_bad_where (fixP->fx_file, fixP->fx_line,
23455 _("bad immediate value for offset (%ld)"), (long) value);
23456 value /= 4;
23457
23458 newval = md_chars_to_number (buf+2, THUMB_SIZE);
23459 newval |= value;
23460 md_number_to_chars (buf+2, newval, THUMB_SIZE);
23461 break;
23462
23463 case BFD_RELOC_ARM_T32_OFFSET_IMM:
23464 /* This is a complicated relocation used for all varieties of Thumb32
23465 load/store instruction with immediate offset:
23466
23467 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
23468 *4, optional writeback(W)
23469 (doubleword load/store)
23470
23471 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
23472 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
23473 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
23474 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
23475 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
23476
23477 Uppercase letters indicate bits that are already encoded at
23478 this point. Lowercase letters are our problem. For the
23479 second block of instructions, the secondary opcode nybble
23480 (bits 8..11) is present, and bit 23 is zero, even if this is
23481 a PC-relative operation. */
23482 newval = md_chars_to_number (buf, THUMB_SIZE);
23483 newval <<= 16;
23484 newval |= md_chars_to_number (buf+THUMB_SIZE, THUMB_SIZE);
23485
23486 if ((newval & 0xf0000000) == 0xe0000000)
23487 {
23488 /* Doubleword load/store: 8-bit offset, scaled by 4. */
23489 if (value >= 0)
23490 newval |= (1 << 23);
23491 else
23492 value = -value;
23493 if (value % 4 != 0)
23494 {
23495 as_bad_where (fixP->fx_file, fixP->fx_line,
23496 _("offset not a multiple of 4"));
23497 break;
23498 }
23499 value /= 4;
23500 if (value > 0xff)
23501 {
23502 as_bad_where (fixP->fx_file, fixP->fx_line,
23503 _("offset out of range"));
23504 break;
23505 }
23506 newval &= ~0xff;
23507 }
23508 else if ((newval & 0x000f0000) == 0x000f0000)
23509 {
23510 /* PC-relative, 12-bit offset. */
23511 if (value >= 0)
23512 newval |= (1 << 23);
23513 else
23514 value = -value;
23515 if (value > 0xfff)
23516 {
23517 as_bad_where (fixP->fx_file, fixP->fx_line,
23518 _("offset out of range"));
23519 break;
23520 }
23521 newval &= ~0xfff;
23522 }
23523 else if ((newval & 0x00000100) == 0x00000100)
23524 {
23525 /* Writeback: 8-bit, +/- offset. */
23526 if (value >= 0)
23527 newval |= (1 << 9);
23528 else
23529 value = -value;
23530 if (value > 0xff)
23531 {
23532 as_bad_where (fixP->fx_file, fixP->fx_line,
23533 _("offset out of range"));
23534 break;
23535 }
23536 newval &= ~0xff;
23537 }
23538 else if ((newval & 0x00000f00) == 0x00000e00)
23539 {
23540 /* T-instruction: positive 8-bit offset. */
23541 if (value < 0 || value > 0xff)
23542 {
23543 as_bad_where (fixP->fx_file, fixP->fx_line,
23544 _("offset out of range"));
23545 break;
23546 }
23547 newval &= ~0xff;
23548 newval |= value;
23549 }
23550 else
23551 {
23552 /* Positive 12-bit or negative 8-bit offset. */
23553 int limit;
23554 if (value >= 0)
23555 {
23556 newval |= (1 << 23);
23557 limit = 0xfff;
23558 }
23559 else
23560 {
23561 value = -value;
23562 limit = 0xff;
23563 }
23564 if (value > limit)
23565 {
23566 as_bad_where (fixP->fx_file, fixP->fx_line,
23567 _("offset out of range"));
23568 break;
23569 }
23570 newval &= ~limit;
23571 }
23572
23573 newval |= value;
23574 md_number_to_chars (buf, (newval >> 16) & 0xffff, THUMB_SIZE);
23575 md_number_to_chars (buf + THUMB_SIZE, newval & 0xffff, THUMB_SIZE);
23576 break;
23577
23578 case BFD_RELOC_ARM_SHIFT_IMM:
23579 newval = md_chars_to_number (buf, INSN_SIZE);
23580 if (((unsigned long) value) > 32
23581 || (value == 32
23582 && (((newval & 0x60) == 0) || (newval & 0x60) == 0x60)))
23583 {
23584 as_bad_where (fixP->fx_file, fixP->fx_line,
23585 _("shift expression is too large"));
23586 break;
23587 }
23588
23589 if (value == 0)
23590 /* Shifts of zero must be done as lsl. */
23591 newval &= ~0x60;
23592 else if (value == 32)
23593 value = 0;
23594 newval &= 0xfffff07f;
23595 newval |= (value & 0x1f) << 7;
23596 md_number_to_chars (buf, newval, INSN_SIZE);
23597 break;
23598
23599 case BFD_RELOC_ARM_T32_IMMEDIATE:
23600 case BFD_RELOC_ARM_T32_ADD_IMM:
23601 case BFD_RELOC_ARM_T32_IMM12:
23602 case BFD_RELOC_ARM_T32_ADD_PC12:
23603 /* We claim that this fixup has been processed here,
23604 even if in fact we generate an error because we do
23605 not have a reloc for it, so tc_gen_reloc will reject it. */
23606 fixP->fx_done = 1;
23607
23608 if (fixP->fx_addsy
23609 && ! S_IS_DEFINED (fixP->fx_addsy))
23610 {
23611 as_bad_where (fixP->fx_file, fixP->fx_line,
23612 _("undefined symbol %s used as an immediate value"),
23613 S_GET_NAME (fixP->fx_addsy));
23614 break;
23615 }
23616
23617 newval = md_chars_to_number (buf, THUMB_SIZE);
23618 newval <<= 16;
23619 newval |= md_chars_to_number (buf+2, THUMB_SIZE);
23620
23621 newimm = FAIL;
23622 if ((fixP->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
23623 /* ARMv8-M Baseline MOV will reach here, but it doesn't support
23624 Thumb2 modified immediate encoding (T2). */
23625 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2))
23626 || fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
23627 {
23628 newimm = encode_thumb32_immediate (value);
23629 if (newimm == (unsigned int) FAIL)
23630 newimm = thumb32_negate_data_op (&newval, value);
23631 }
23632 if (newimm == (unsigned int) FAIL)
23633 {
23634 if (fixP->fx_r_type != BFD_RELOC_ARM_T32_IMMEDIATE)
23635 {
23636 /* Turn add/sum into addw/subw. */
23637 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
23638 newval = (newval & 0xfeffffff) | 0x02000000;
23639 /* No flat 12-bit imm encoding for addsw/subsw. */
23640 if ((newval & 0x00100000) == 0)
23641 {
23642 /* 12 bit immediate for addw/subw. */
23643 if (value < 0)
23644 {
23645 value = -value;
23646 newval ^= 0x00a00000;
23647 }
23648 if (value > 0xfff)
23649 newimm = (unsigned int) FAIL;
23650 else
23651 newimm = value;
23652 }
23653 }
23654 else
23655 {
23656 /* MOV accepts both Thumb2 modified immediate (T2 encoding) and
23657 UINT16 (T3 encoding), MOVW only accepts UINT16. When
23658 disassembling, MOV is preferred when there is no encoding
23659 overlap. */
23660 if (((newval >> T2_DATA_OP_SHIFT) & 0xf) == T2_OPCODE_ORR
23661 /* NOTE: MOV uses the ORR opcode in Thumb 2 mode
23662 but with the Rn field [19:16] set to 1111. */
23663 && (((newval >> 16) & 0xf) == 0xf)
23664 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m)
23665 && !((newval >> T2_SBIT_SHIFT) & 0x1)
23666 && value >= 0 && value <= 0xffff)
23667 {
23668 /* Toggle bit[25] to change encoding from T2 to T3. */
23669 newval ^= 1 << 25;
23670 /* Clear bits[19:16]. */
23671 newval &= 0xfff0ffff;
23672 /* Encoding high 4bits imm. Code below will encode the
23673 remaining low 12bits. */
23674 newval |= (value & 0x0000f000) << 4;
23675 newimm = value & 0x00000fff;
23676 }
23677 }
23678 }
23679
23680 if (newimm == (unsigned int)FAIL)
23681 {
23682 as_bad_where (fixP->fx_file, fixP->fx_line,
23683 _("invalid constant (%lx) after fixup"),
23684 (unsigned long) value);
23685 break;
23686 }
23687
23688 newval |= (newimm & 0x800) << 15;
23689 newval |= (newimm & 0x700) << 4;
23690 newval |= (newimm & 0x0ff);
23691
23692 md_number_to_chars (buf, (valueT) ((newval >> 16) & 0xffff), THUMB_SIZE);
23693 md_number_to_chars (buf+2, (valueT) (newval & 0xffff), THUMB_SIZE);
23694 break;
23695
23696 case BFD_RELOC_ARM_SMC:
23697 if (((unsigned long) value) > 0xffff)
23698 as_bad_where (fixP->fx_file, fixP->fx_line,
23699 _("invalid smc expression"));
23700 newval = md_chars_to_number (buf, INSN_SIZE);
23701 newval |= (value & 0xf) | ((value & 0xfff0) << 4);
23702 md_number_to_chars (buf, newval, INSN_SIZE);
23703 break;
23704
23705 case BFD_RELOC_ARM_HVC:
23706 if (((unsigned long) value) > 0xffff)
23707 as_bad_where (fixP->fx_file, fixP->fx_line,
23708 _("invalid hvc expression"));
23709 newval = md_chars_to_number (buf, INSN_SIZE);
23710 newval |= (value & 0xf) | ((value & 0xfff0) << 4);
23711 md_number_to_chars (buf, newval, INSN_SIZE);
23712 break;
23713
23714 case BFD_RELOC_ARM_SWI:
23715 if (fixP->tc_fix_data != 0)
23716 {
23717 if (((unsigned long) value) > 0xff)
23718 as_bad_where (fixP->fx_file, fixP->fx_line,
23719 _("invalid swi expression"));
23720 newval = md_chars_to_number (buf, THUMB_SIZE);
23721 newval |= value;
23722 md_number_to_chars (buf, newval, THUMB_SIZE);
23723 }
23724 else
23725 {
23726 if (((unsigned long) value) > 0x00ffffff)
23727 as_bad_where (fixP->fx_file, fixP->fx_line,
23728 _("invalid swi expression"));
23729 newval = md_chars_to_number (buf, INSN_SIZE);
23730 newval |= value;
23731 md_number_to_chars (buf, newval, INSN_SIZE);
23732 }
23733 break;
23734
23735 case BFD_RELOC_ARM_MULTI:
23736 if (((unsigned long) value) > 0xffff)
23737 as_bad_where (fixP->fx_file, fixP->fx_line,
23738 _("invalid expression in load/store multiple"));
23739 newval = value | md_chars_to_number (buf, INSN_SIZE);
23740 md_number_to_chars (buf, newval, INSN_SIZE);
23741 break;
23742
23743 #ifdef OBJ_ELF
23744 case BFD_RELOC_ARM_PCREL_CALL:
23745
23746 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
23747 && fixP->fx_addsy
23748 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
23749 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23750 && THUMB_IS_FUNC (fixP->fx_addsy))
23751 /* Flip the bl to blx. This is a simple flip
23752 bit here because we generate PCREL_CALL for
23753 unconditional bls. */
23754 {
23755 newval = md_chars_to_number (buf, INSN_SIZE);
23756 newval = newval | 0x10000000;
23757 md_number_to_chars (buf, newval, INSN_SIZE);
23758 temp = 1;
23759 fixP->fx_done = 1;
23760 }
23761 else
23762 temp = 3;
23763 goto arm_branch_common;
23764
23765 case BFD_RELOC_ARM_PCREL_JUMP:
23766 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
23767 && fixP->fx_addsy
23768 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
23769 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23770 && THUMB_IS_FUNC (fixP->fx_addsy))
23771 {
23772 /* This would map to a bl<cond>, b<cond>,
23773 b<always> to a Thumb function. We
23774 need to force a relocation for this particular
23775 case. */
23776 newval = md_chars_to_number (buf, INSN_SIZE);
23777 fixP->fx_done = 0;
23778 }
23779 /* Fall through. */
23780
23781 case BFD_RELOC_ARM_PLT32:
23782 #endif
23783 case BFD_RELOC_ARM_PCREL_BRANCH:
23784 temp = 3;
23785 goto arm_branch_common;
23786
23787 case BFD_RELOC_ARM_PCREL_BLX:
23788
23789 temp = 1;
23790 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
23791 && fixP->fx_addsy
23792 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
23793 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23794 && ARM_IS_FUNC (fixP->fx_addsy))
23795 {
23796 /* Flip the blx to a bl and warn. */
23797 const char *name = S_GET_NAME (fixP->fx_addsy);
23798 newval = 0xeb000000;
23799 as_warn_where (fixP->fx_file, fixP->fx_line,
23800 _("blx to '%s' an ARM ISA state function changed to bl"),
23801 name);
23802 md_number_to_chars (buf, newval, INSN_SIZE);
23803 temp = 3;
23804 fixP->fx_done = 1;
23805 }
23806
23807 #ifdef OBJ_ELF
23808 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
23809 fixP->fx_r_type = BFD_RELOC_ARM_PCREL_CALL;
23810 #endif
23811
23812 arm_branch_common:
23813 /* We are going to store value (shifted right by two) in the
23814 instruction, in a 24 bit, signed field. Bits 26 through 32 either
23815 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
23816 also be clear. */
23817 if (value & temp)
23818 as_bad_where (fixP->fx_file, fixP->fx_line,
23819 _("misaligned branch destination"));
23820 if ((value & (offsetT)0xfe000000) != (offsetT)0
23821 && (value & (offsetT)0xfe000000) != (offsetT)0xfe000000)
23822 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
23823
23824 if (fixP->fx_done || !seg->use_rela_p)
23825 {
23826 newval = md_chars_to_number (buf, INSN_SIZE);
23827 newval |= (value >> 2) & 0x00ffffff;
23828 /* Set the H bit on BLX instructions. */
23829 if (temp == 1)
23830 {
23831 if (value & 2)
23832 newval |= 0x01000000;
23833 else
23834 newval &= ~0x01000000;
23835 }
23836 md_number_to_chars (buf, newval, INSN_SIZE);
23837 }
23838 break;
23839
23840 case BFD_RELOC_THUMB_PCREL_BRANCH7: /* CBZ */
23841 /* CBZ can only branch forward. */
23842
23843 /* Attempts to use CBZ to branch to the next instruction
23844 (which, strictly speaking, are prohibited) will be turned into
23845 no-ops.
23846
23847 FIXME: It may be better to remove the instruction completely and
23848 perform relaxation. */
23849 if (value == -2)
23850 {
23851 newval = md_chars_to_number (buf, THUMB_SIZE);
23852 newval = 0xbf00; /* NOP encoding T1 */
23853 md_number_to_chars (buf, newval, THUMB_SIZE);
23854 }
23855 else
23856 {
23857 if (value & ~0x7e)
23858 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
23859
23860 if (fixP->fx_done || !seg->use_rela_p)
23861 {
23862 newval = md_chars_to_number (buf, THUMB_SIZE);
23863 newval |= ((value & 0x3e) << 2) | ((value & 0x40) << 3);
23864 md_number_to_chars (buf, newval, THUMB_SIZE);
23865 }
23866 }
23867 break;
23868
23869 case BFD_RELOC_THUMB_PCREL_BRANCH9: /* Conditional branch. */
23870 if ((value & ~0xff) && ((value & ~0xff) != ~0xff))
23871 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
23872
23873 if (fixP->fx_done || !seg->use_rela_p)
23874 {
23875 newval = md_chars_to_number (buf, THUMB_SIZE);
23876 newval |= (value & 0x1ff) >> 1;
23877 md_number_to_chars (buf, newval, THUMB_SIZE);
23878 }
23879 break;
23880
23881 case BFD_RELOC_THUMB_PCREL_BRANCH12: /* Unconditional branch. */
23882 if ((value & ~0x7ff) && ((value & ~0x7ff) != ~0x7ff))
23883 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
23884
23885 if (fixP->fx_done || !seg->use_rela_p)
23886 {
23887 newval = md_chars_to_number (buf, THUMB_SIZE);
23888 newval |= (value & 0xfff) >> 1;
23889 md_number_to_chars (buf, newval, THUMB_SIZE);
23890 }
23891 break;
23892
23893 case BFD_RELOC_THUMB_PCREL_BRANCH20:
23894 if (fixP->fx_addsy
23895 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23896 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
23897 && ARM_IS_FUNC (fixP->fx_addsy)
23898 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
23899 {
23900 /* Force a relocation for a branch 20 bits wide. */
23901 fixP->fx_done = 0;
23902 }
23903 if ((value & ~0x1fffff) && ((value & ~0x0fffff) != ~0x0fffff))
23904 as_bad_where (fixP->fx_file, fixP->fx_line,
23905 _("conditional branch out of range"));
23906
23907 if (fixP->fx_done || !seg->use_rela_p)
23908 {
23909 offsetT newval2;
23910 addressT S, J1, J2, lo, hi;
23911
23912 S = (value & 0x00100000) >> 20;
23913 J2 = (value & 0x00080000) >> 19;
23914 J1 = (value & 0x00040000) >> 18;
23915 hi = (value & 0x0003f000) >> 12;
23916 lo = (value & 0x00000ffe) >> 1;
23917
23918 newval = md_chars_to_number (buf, THUMB_SIZE);
23919 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
23920 newval |= (S << 10) | hi;
23921 newval2 |= (J1 << 13) | (J2 << 11) | lo;
23922 md_number_to_chars (buf, newval, THUMB_SIZE);
23923 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
23924 }
23925 break;
23926
23927 case BFD_RELOC_THUMB_PCREL_BLX:
23928 /* If there is a blx from a thumb state function to
23929 another thumb function flip this to a bl and warn
23930 about it. */
23931
23932 if (fixP->fx_addsy
23933 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
23934 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23935 && THUMB_IS_FUNC (fixP->fx_addsy))
23936 {
23937 const char *name = S_GET_NAME (fixP->fx_addsy);
23938 as_warn_where (fixP->fx_file, fixP->fx_line,
23939 _("blx to Thumb func '%s' from Thumb ISA state changed to bl"),
23940 name);
23941 newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
23942 newval = newval | 0x1000;
23943 md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
23944 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
23945 fixP->fx_done = 1;
23946 }
23947
23948
23949 goto thumb_bl_common;
23950
23951 case BFD_RELOC_THUMB_PCREL_BRANCH23:
23952 /* A bl from Thumb state ISA to an internal ARM state function
23953 is converted to a blx. */
23954 if (fixP->fx_addsy
23955 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23956 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
23957 && ARM_IS_FUNC (fixP->fx_addsy)
23958 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
23959 {
23960 newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
23961 newval = newval & ~0x1000;
23962 md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
23963 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BLX;
23964 fixP->fx_done = 1;
23965 }
23966
23967 thumb_bl_common:
23968
23969 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
23970 /* For a BLX instruction, make sure that the relocation is rounded up
23971 to a word boundary. This follows the semantics of the instruction
23972 which specifies that bit 1 of the target address will come from bit
23973 1 of the base address. */
23974 value = (value + 3) & ~ 3;
23975
23976 #ifdef OBJ_ELF
23977 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4
23978 && fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
23979 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
23980 #endif
23981
23982 if ((value & ~0x3fffff) && ((value & ~0x3fffff) != ~0x3fffff))
23983 {
23984 if (!(ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2)))
23985 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
23986 else if ((value & ~0x1ffffff)
23987 && ((value & ~0x1ffffff) != ~0x1ffffff))
23988 as_bad_where (fixP->fx_file, fixP->fx_line,
23989 _("Thumb2 branch out of range"));
23990 }
23991
23992 if (fixP->fx_done || !seg->use_rela_p)
23993 encode_thumb2_b_bl_offset (buf, value);
23994
23995 break;
23996
23997 case BFD_RELOC_THUMB_PCREL_BRANCH25:
23998 if ((value & ~0x0ffffff) && ((value & ~0x0ffffff) != ~0x0ffffff))
23999 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
24000
24001 if (fixP->fx_done || !seg->use_rela_p)
24002 encode_thumb2_b_bl_offset (buf, value);
24003
24004 break;
24005
24006 case BFD_RELOC_8:
24007 if (fixP->fx_done || !seg->use_rela_p)
24008 *buf = value;
24009 break;
24010
24011 case BFD_RELOC_16:
24012 if (fixP->fx_done || !seg->use_rela_p)
24013 md_number_to_chars (buf, value, 2);
24014 break;
24015
24016 #ifdef OBJ_ELF
24017 case BFD_RELOC_ARM_TLS_CALL:
24018 case BFD_RELOC_ARM_THM_TLS_CALL:
24019 case BFD_RELOC_ARM_TLS_DESCSEQ:
24020 case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
24021 case BFD_RELOC_ARM_TLS_GOTDESC:
24022 case BFD_RELOC_ARM_TLS_GD32:
24023 case BFD_RELOC_ARM_TLS_LE32:
24024 case BFD_RELOC_ARM_TLS_IE32:
24025 case BFD_RELOC_ARM_TLS_LDM32:
24026 case BFD_RELOC_ARM_TLS_LDO32:
24027 S_SET_THREAD_LOCAL (fixP->fx_addsy);
24028 break;
24029
24030 /* Same handling as above, but with the arm_fdpic guard. */
24031 case BFD_RELOC_ARM_TLS_GD32_FDPIC:
24032 case BFD_RELOC_ARM_TLS_IE32_FDPIC:
24033 case BFD_RELOC_ARM_TLS_LDM32_FDPIC:
24034 if (arm_fdpic)
24035 {
24036 S_SET_THREAD_LOCAL (fixP->fx_addsy);
24037 }
24038 else
24039 {
24040 as_bad_where (fixP->fx_file, fixP->fx_line,
24041 _("Relocation supported only in FDPIC mode"));
24042 }
24043 break;
24044
24045 case BFD_RELOC_ARM_GOT32:
24046 case BFD_RELOC_ARM_GOTOFF:
24047 break;
24048
24049 case BFD_RELOC_ARM_GOT_PREL:
24050 if (fixP->fx_done || !seg->use_rela_p)
24051 md_number_to_chars (buf, value, 4);
24052 break;
24053
24054 case BFD_RELOC_ARM_TARGET2:
24055 /* TARGET2 is not partial-inplace, so we need to write the
24056 addend here for REL targets, because it won't be written out
24057 during reloc processing later. */
24058 if (fixP->fx_done || !seg->use_rela_p)
24059 md_number_to_chars (buf, fixP->fx_offset, 4);
24060 break;
24061
24062 /* Relocations for FDPIC. */
24063 case BFD_RELOC_ARM_GOTFUNCDESC:
24064 case BFD_RELOC_ARM_GOTOFFFUNCDESC:
24065 case BFD_RELOC_ARM_FUNCDESC:
24066 if (arm_fdpic)
24067 {
24068 if (fixP->fx_done || !seg->use_rela_p)
24069 md_number_to_chars (buf, 0, 4);
24070 }
24071 else
24072 {
24073 as_bad_where (fixP->fx_file, fixP->fx_line,
24074 _("Relocation supported only in FDPIC mode"));
24075 }
24076 break;
24077 #endif
24078
24079 case BFD_RELOC_RVA:
24080 case BFD_RELOC_32:
24081 case BFD_RELOC_ARM_TARGET1:
24082 case BFD_RELOC_ARM_ROSEGREL32:
24083 case BFD_RELOC_ARM_SBREL32:
24084 case BFD_RELOC_32_PCREL:
24085 #ifdef TE_PE
24086 case BFD_RELOC_32_SECREL:
24087 #endif
24088 if (fixP->fx_done || !seg->use_rela_p)
24089 #ifdef TE_WINCE
24090 /* For WinCE we only do this for pcrel fixups. */
24091 if (fixP->fx_done || fixP->fx_pcrel)
24092 #endif
24093 md_number_to_chars (buf, value, 4);
24094 break;
24095
24096 #ifdef OBJ_ELF
24097 case BFD_RELOC_ARM_PREL31:
24098 if (fixP->fx_done || !seg->use_rela_p)
24099 {
24100 newval = md_chars_to_number (buf, 4) & 0x80000000;
24101 if ((value ^ (value >> 1)) & 0x40000000)
24102 {
24103 as_bad_where (fixP->fx_file, fixP->fx_line,
24104 _("rel31 relocation overflow"));
24105 }
24106 newval |= value & 0x7fffffff;
24107 md_number_to_chars (buf, newval, 4);
24108 }
24109 break;
24110 #endif
24111
24112 case BFD_RELOC_ARM_CP_OFF_IMM:
24113 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
24114 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM)
24115 newval = md_chars_to_number (buf, INSN_SIZE);
24116 else
24117 newval = get_thumb32_insn (buf);
24118 if ((newval & 0x0f200f00) == 0x0d000900)
24119 {
24120 /* This is a fp16 vstr/vldr. The immediate offset in the mnemonic
24121 has permitted values that are multiples of 2, in the range 0
24122 to 510. */
24123 if (value < -510 || value > 510 || (value & 1))
24124 as_bad_where (fixP->fx_file, fixP->fx_line,
24125 _("co-processor offset out of range"));
24126 }
24127 else if (value < -1023 || value > 1023 || (value & 3))
24128 as_bad_where (fixP->fx_file, fixP->fx_line,
24129 _("co-processor offset out of range"));
24130 cp_off_common:
24131 sign = value > 0;
24132 if (value < 0)
24133 value = -value;
24134 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
24135 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
24136 newval = md_chars_to_number (buf, INSN_SIZE);
24137 else
24138 newval = get_thumb32_insn (buf);
24139 if (value == 0)
24140 newval &= 0xffffff00;
24141 else
24142 {
24143 newval &= 0xff7fff00;
24144 if ((newval & 0x0f200f00) == 0x0d000900)
24145 {
24146 /* This is a fp16 vstr/vldr.
24147
24148 It requires the immediate offset in the instruction is shifted
24149 left by 1 to be a half-word offset.
24150
24151 Here, left shift by 1 first, and later right shift by 2
24152 should get the right offset. */
24153 value <<= 1;
24154 }
24155 newval |= (value >> 2) | (sign ? INDEX_UP : 0);
24156 }
24157 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
24158 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
24159 md_number_to_chars (buf, newval, INSN_SIZE);
24160 else
24161 put_thumb32_insn (buf, newval);
24162 break;
24163
24164 case BFD_RELOC_ARM_CP_OFF_IMM_S2:
24165 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2:
24166 if (value < -255 || value > 255)
24167 as_bad_where (fixP->fx_file, fixP->fx_line,
24168 _("co-processor offset out of range"));
24169 value *= 4;
24170 goto cp_off_common;
24171
24172 case BFD_RELOC_ARM_THUMB_OFFSET:
24173 newval = md_chars_to_number (buf, THUMB_SIZE);
24174 /* Exactly what ranges, and where the offset is inserted depends
24175 on the type of instruction, we can establish this from the
24176 top 4 bits. */
24177 switch (newval >> 12)
24178 {
24179 case 4: /* PC load. */
24180 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
24181 forced to zero for these loads; md_pcrel_from has already
24182 compensated for this. */
24183 if (value & 3)
24184 as_bad_where (fixP->fx_file, fixP->fx_line,
24185 _("invalid offset, target not word aligned (0x%08lX)"),
24186 (((unsigned long) fixP->fx_frag->fr_address
24187 + (unsigned long) fixP->fx_where) & ~3)
24188 + (unsigned long) value);
24189
24190 if (value & ~0x3fc)
24191 as_bad_where (fixP->fx_file, fixP->fx_line,
24192 _("invalid offset, value too big (0x%08lX)"),
24193 (long) value);
24194
24195 newval |= value >> 2;
24196 break;
24197
24198 case 9: /* SP load/store. */
24199 if (value & ~0x3fc)
24200 as_bad_where (fixP->fx_file, fixP->fx_line,
24201 _("invalid offset, value too big (0x%08lX)"),
24202 (long) value);
24203 newval |= value >> 2;
24204 break;
24205
24206 case 6: /* Word load/store. */
24207 if (value & ~0x7c)
24208 as_bad_where (fixP->fx_file, fixP->fx_line,
24209 _("invalid offset, value too big (0x%08lX)"),
24210 (long) value);
24211 newval |= value << 4; /* 6 - 2. */
24212 break;
24213
24214 case 7: /* Byte load/store. */
24215 if (value & ~0x1f)
24216 as_bad_where (fixP->fx_file, fixP->fx_line,
24217 _("invalid offset, value too big (0x%08lX)"),
24218 (long) value);
24219 newval |= value << 6;
24220 break;
24221
24222 case 8: /* Halfword load/store. */
24223 if (value & ~0x3e)
24224 as_bad_where (fixP->fx_file, fixP->fx_line,
24225 _("invalid offset, value too big (0x%08lX)"),
24226 (long) value);
24227 newval |= value << 5; /* 6 - 1. */
24228 break;
24229
24230 default:
24231 as_bad_where (fixP->fx_file, fixP->fx_line,
24232 "Unable to process relocation for thumb opcode: %lx",
24233 (unsigned long) newval);
24234 break;
24235 }
24236 md_number_to_chars (buf, newval, THUMB_SIZE);
24237 break;
24238
24239 case BFD_RELOC_ARM_THUMB_ADD:
24240 /* This is a complicated relocation, since we use it for all of
24241 the following immediate relocations:
24242
24243 3bit ADD/SUB
24244 8bit ADD/SUB
24245 9bit ADD/SUB SP word-aligned
24246 10bit ADD PC/SP word-aligned
24247
24248 The type of instruction being processed is encoded in the
24249 instruction field:
24250
24251 0x8000 SUB
24252 0x00F0 Rd
24253 0x000F Rs
24254 */
24255 newval = md_chars_to_number (buf, THUMB_SIZE);
24256 {
24257 int rd = (newval >> 4) & 0xf;
24258 int rs = newval & 0xf;
24259 int subtract = !!(newval & 0x8000);
24260
24261 /* Check for HI regs, only very restricted cases allowed:
24262 Adjusting SP, and using PC or SP to get an address. */
24263 if ((rd > 7 && (rd != REG_SP || rs != REG_SP))
24264 || (rs > 7 && rs != REG_SP && rs != REG_PC))
24265 as_bad_where (fixP->fx_file, fixP->fx_line,
24266 _("invalid Hi register with immediate"));
24267
24268 /* If value is negative, choose the opposite instruction. */
24269 if (value < 0)
24270 {
24271 value = -value;
24272 subtract = !subtract;
24273 if (value < 0)
24274 as_bad_where (fixP->fx_file, fixP->fx_line,
24275 _("immediate value out of range"));
24276 }
24277
24278 if (rd == REG_SP)
24279 {
24280 if (value & ~0x1fc)
24281 as_bad_where (fixP->fx_file, fixP->fx_line,
24282 _("invalid immediate for stack address calculation"));
24283 newval = subtract ? T_OPCODE_SUB_ST : T_OPCODE_ADD_ST;
24284 newval |= value >> 2;
24285 }
24286 else if (rs == REG_PC || rs == REG_SP)
24287 {
24288 /* PR gas/18541. If the addition is for a defined symbol
24289 within range of an ADR instruction then accept it. */
24290 if (subtract
24291 && value == 4
24292 && fixP->fx_addsy != NULL)
24293 {
24294 subtract = 0;
24295
24296 if (! S_IS_DEFINED (fixP->fx_addsy)
24297 || S_GET_SEGMENT (fixP->fx_addsy) != seg
24298 || S_IS_WEAK (fixP->fx_addsy))
24299 {
24300 as_bad_where (fixP->fx_file, fixP->fx_line,
24301 _("address calculation needs a strongly defined nearby symbol"));
24302 }
24303 else
24304 {
24305 offsetT v = fixP->fx_where + fixP->fx_frag->fr_address;
24306
24307 /* Round up to the next 4-byte boundary. */
24308 if (v & 3)
24309 v = (v + 3) & ~ 3;
24310 else
24311 v += 4;
24312 v = S_GET_VALUE (fixP->fx_addsy) - v;
24313
24314 if (v & ~0x3fc)
24315 {
24316 as_bad_where (fixP->fx_file, fixP->fx_line,
24317 _("symbol too far away"));
24318 }
24319 else
24320 {
24321 fixP->fx_done = 1;
24322 value = v;
24323 }
24324 }
24325 }
24326
24327 if (subtract || value & ~0x3fc)
24328 as_bad_where (fixP->fx_file, fixP->fx_line,
24329 _("invalid immediate for address calculation (value = 0x%08lX)"),
24330 (unsigned long) (subtract ? - value : value));
24331 newval = (rs == REG_PC ? T_OPCODE_ADD_PC : T_OPCODE_ADD_SP);
24332 newval |= rd << 8;
24333 newval |= value >> 2;
24334 }
24335 else if (rs == rd)
24336 {
24337 if (value & ~0xff)
24338 as_bad_where (fixP->fx_file, fixP->fx_line,
24339 _("immediate value out of range"));
24340 newval = subtract ? T_OPCODE_SUB_I8 : T_OPCODE_ADD_I8;
24341 newval |= (rd << 8) | value;
24342 }
24343 else
24344 {
24345 if (value & ~0x7)
24346 as_bad_where (fixP->fx_file, fixP->fx_line,
24347 _("immediate value out of range"));
24348 newval = subtract ? T_OPCODE_SUB_I3 : T_OPCODE_ADD_I3;
24349 newval |= rd | (rs << 3) | (value << 6);
24350 }
24351 }
24352 md_number_to_chars (buf, newval, THUMB_SIZE);
24353 break;
24354
24355 case BFD_RELOC_ARM_THUMB_IMM:
24356 newval = md_chars_to_number (buf, THUMB_SIZE);
24357 if (value < 0 || value > 255)
24358 as_bad_where (fixP->fx_file, fixP->fx_line,
24359 _("invalid immediate: %ld is out of range"),
24360 (long) value);
24361 newval |= value;
24362 md_number_to_chars (buf, newval, THUMB_SIZE);
24363 break;
24364
24365 case BFD_RELOC_ARM_THUMB_SHIFT:
24366 /* 5bit shift value (0..32). LSL cannot take 32. */
24367 newval = md_chars_to_number (buf, THUMB_SIZE) & 0xf83f;
24368 temp = newval & 0xf800;
24369 if (value < 0 || value > 32 || (value == 32 && temp == T_OPCODE_LSL_I))
24370 as_bad_where (fixP->fx_file, fixP->fx_line,
24371 _("invalid shift value: %ld"), (long) value);
24372 /* Shifts of zero must be encoded as LSL. */
24373 if (value == 0)
24374 newval = (newval & 0x003f) | T_OPCODE_LSL_I;
24375 /* Shifts of 32 are encoded as zero. */
24376 else if (value == 32)
24377 value = 0;
24378 newval |= value << 6;
24379 md_number_to_chars (buf, newval, THUMB_SIZE);
24380 break;
24381
24382 case BFD_RELOC_VTABLE_INHERIT:
24383 case BFD_RELOC_VTABLE_ENTRY:
24384 fixP->fx_done = 0;
24385 return;
24386
24387 case BFD_RELOC_ARM_MOVW:
24388 case BFD_RELOC_ARM_MOVT:
24389 case BFD_RELOC_ARM_THUMB_MOVW:
24390 case BFD_RELOC_ARM_THUMB_MOVT:
24391 if (fixP->fx_done || !seg->use_rela_p)
24392 {
24393 /* REL format relocations are limited to a 16-bit addend. */
24394 if (!fixP->fx_done)
24395 {
24396 if (value < -0x8000 || value > 0x7fff)
24397 as_bad_where (fixP->fx_file, fixP->fx_line,
24398 _("offset out of range"));
24399 }
24400 else if (fixP->fx_r_type == BFD_RELOC_ARM_MOVT
24401 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
24402 {
24403 value >>= 16;
24404 }
24405
24406 if (fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
24407 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
24408 {
24409 newval = get_thumb32_insn (buf);
24410 newval &= 0xfbf08f00;
24411 newval |= (value & 0xf000) << 4;
24412 newval |= (value & 0x0800) << 15;
24413 newval |= (value & 0x0700) << 4;
24414 newval |= (value & 0x00ff);
24415 put_thumb32_insn (buf, newval);
24416 }
24417 else
24418 {
24419 newval = md_chars_to_number (buf, 4);
24420 newval &= 0xfff0f000;
24421 newval |= value & 0x0fff;
24422 newval |= (value & 0xf000) << 4;
24423 md_number_to_chars (buf, newval, 4);
24424 }
24425 }
24426 return;
24427
24428 case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC:
24429 case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC:
24430 case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC:
24431 case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC:
24432 gas_assert (!fixP->fx_done);
24433 {
24434 bfd_vma insn;
24435 bfd_boolean is_mov;
24436 bfd_vma encoded_addend = value;
24437
24438 /* Check that addend can be encoded in instruction. */
24439 if (!seg->use_rela_p && (value < 0 || value > 255))
24440 as_bad_where (fixP->fx_file, fixP->fx_line,
24441 _("the offset 0x%08lX is not representable"),
24442 (unsigned long) encoded_addend);
24443
24444 /* Extract the instruction. */
24445 insn = md_chars_to_number (buf, THUMB_SIZE);
24446 is_mov = (insn & 0xf800) == 0x2000;
24447
24448 /* Encode insn. */
24449 if (is_mov)
24450 {
24451 if (!seg->use_rela_p)
24452 insn |= encoded_addend;
24453 }
24454 else
24455 {
24456 int rd, rs;
24457
24458 /* Extract the instruction. */
24459 /* Encoding is the following
24460 0x8000 SUB
24461 0x00F0 Rd
24462 0x000F Rs
24463 */
24464 /* The following conditions must be true :
24465 - ADD
24466 - Rd == Rs
24467 - Rd <= 7
24468 */
24469 rd = (insn >> 4) & 0xf;
24470 rs = insn & 0xf;
24471 if ((insn & 0x8000) || (rd != rs) || rd > 7)
24472 as_bad_where (fixP->fx_file, fixP->fx_line,
24473 _("Unable to process relocation for thumb opcode: %lx"),
24474 (unsigned long) insn);
24475
24476 /* Encode as ADD immediate8 thumb 1 code. */
24477 insn = 0x3000 | (rd << 8);
24478
24479 /* Place the encoded addend into the first 8 bits of the
24480 instruction. */
24481 if (!seg->use_rela_p)
24482 insn |= encoded_addend;
24483 }
24484
24485 /* Update the instruction. */
24486 md_number_to_chars (buf, insn, THUMB_SIZE);
24487 }
24488 break;
24489
24490 case BFD_RELOC_ARM_ALU_PC_G0_NC:
24491 case BFD_RELOC_ARM_ALU_PC_G0:
24492 case BFD_RELOC_ARM_ALU_PC_G1_NC:
24493 case BFD_RELOC_ARM_ALU_PC_G1:
24494 case BFD_RELOC_ARM_ALU_PC_G2:
24495 case BFD_RELOC_ARM_ALU_SB_G0_NC:
24496 case BFD_RELOC_ARM_ALU_SB_G0:
24497 case BFD_RELOC_ARM_ALU_SB_G1_NC:
24498 case BFD_RELOC_ARM_ALU_SB_G1:
24499 case BFD_RELOC_ARM_ALU_SB_G2:
24500 gas_assert (!fixP->fx_done);
24501 if (!seg->use_rela_p)
24502 {
24503 bfd_vma insn;
24504 bfd_vma encoded_addend;
24505 bfd_vma addend_abs = abs (value);
24506
24507 /* Check that the absolute value of the addend can be
24508 expressed as an 8-bit constant plus a rotation. */
24509 encoded_addend = encode_arm_immediate (addend_abs);
24510 if (encoded_addend == (unsigned int) FAIL)
24511 as_bad_where (fixP->fx_file, fixP->fx_line,
24512 _("the offset 0x%08lX is not representable"),
24513 (unsigned long) addend_abs);
24514
24515 /* Extract the instruction. */
24516 insn = md_chars_to_number (buf, INSN_SIZE);
24517
24518 /* If the addend is positive, use an ADD instruction.
24519 Otherwise use a SUB. Take care not to destroy the S bit. */
24520 insn &= 0xff1fffff;
24521 if (value < 0)
24522 insn |= 1 << 22;
24523 else
24524 insn |= 1 << 23;
24525
24526 /* Place the encoded addend into the first 12 bits of the
24527 instruction. */
24528 insn &= 0xfffff000;
24529 insn |= encoded_addend;
24530
24531 /* Update the instruction. */
24532 md_number_to_chars (buf, insn, INSN_SIZE);
24533 }
24534 break;
24535
24536 case BFD_RELOC_ARM_LDR_PC_G0:
24537 case BFD_RELOC_ARM_LDR_PC_G1:
24538 case BFD_RELOC_ARM_LDR_PC_G2:
24539 case BFD_RELOC_ARM_LDR_SB_G0:
24540 case BFD_RELOC_ARM_LDR_SB_G1:
24541 case BFD_RELOC_ARM_LDR_SB_G2:
24542 gas_assert (!fixP->fx_done);
24543 if (!seg->use_rela_p)
24544 {
24545 bfd_vma insn;
24546 bfd_vma addend_abs = abs (value);
24547
24548 /* Check that the absolute value of the addend can be
24549 encoded in 12 bits. */
24550 if (addend_abs >= 0x1000)
24551 as_bad_where (fixP->fx_file, fixP->fx_line,
24552 _("bad offset 0x%08lX (only 12 bits available for the magnitude)"),
24553 (unsigned long) addend_abs);
24554
24555 /* Extract the instruction. */
24556 insn = md_chars_to_number (buf, INSN_SIZE);
24557
24558 /* If the addend is negative, clear bit 23 of the instruction.
24559 Otherwise set it. */
24560 if (value < 0)
24561 insn &= ~(1 << 23);
24562 else
24563 insn |= 1 << 23;
24564
24565 /* Place the absolute value of the addend into the first 12 bits
24566 of the instruction. */
24567 insn &= 0xfffff000;
24568 insn |= addend_abs;
24569
24570 /* Update the instruction. */
24571 md_number_to_chars (buf, insn, INSN_SIZE);
24572 }
24573 break;
24574
24575 case BFD_RELOC_ARM_LDRS_PC_G0:
24576 case BFD_RELOC_ARM_LDRS_PC_G1:
24577 case BFD_RELOC_ARM_LDRS_PC_G2:
24578 case BFD_RELOC_ARM_LDRS_SB_G0:
24579 case BFD_RELOC_ARM_LDRS_SB_G1:
24580 case BFD_RELOC_ARM_LDRS_SB_G2:
24581 gas_assert (!fixP->fx_done);
24582 if (!seg->use_rela_p)
24583 {
24584 bfd_vma insn;
24585 bfd_vma addend_abs = abs (value);
24586
24587 /* Check that the absolute value of the addend can be
24588 encoded in 8 bits. */
24589 if (addend_abs >= 0x100)
24590 as_bad_where (fixP->fx_file, fixP->fx_line,
24591 _("bad offset 0x%08lX (only 8 bits available for the magnitude)"),
24592 (unsigned long) addend_abs);
24593
24594 /* Extract the instruction. */
24595 insn = md_chars_to_number (buf, INSN_SIZE);
24596
24597 /* If the addend is negative, clear bit 23 of the instruction.
24598 Otherwise set it. */
24599 if (value < 0)
24600 insn &= ~(1 << 23);
24601 else
24602 insn |= 1 << 23;
24603
24604 /* Place the first four bits of the absolute value of the addend
24605 into the first 4 bits of the instruction, and the remaining
24606 four into bits 8 .. 11. */
24607 insn &= 0xfffff0f0;
24608 insn |= (addend_abs & 0xf) | ((addend_abs & 0xf0) << 4);
24609
24610 /* Update the instruction. */
24611 md_number_to_chars (buf, insn, INSN_SIZE);
24612 }
24613 break;
24614
24615 case BFD_RELOC_ARM_LDC_PC_G0:
24616 case BFD_RELOC_ARM_LDC_PC_G1:
24617 case BFD_RELOC_ARM_LDC_PC_G2:
24618 case BFD_RELOC_ARM_LDC_SB_G0:
24619 case BFD_RELOC_ARM_LDC_SB_G1:
24620 case BFD_RELOC_ARM_LDC_SB_G2:
24621 gas_assert (!fixP->fx_done);
24622 if (!seg->use_rela_p)
24623 {
24624 bfd_vma insn;
24625 bfd_vma addend_abs = abs (value);
24626
24627 /* Check that the absolute value of the addend is a multiple of
24628 four and, when divided by four, fits in 8 bits. */
24629 if (addend_abs & 0x3)
24630 as_bad_where (fixP->fx_file, fixP->fx_line,
24631 _("bad offset 0x%08lX (must be word-aligned)"),
24632 (unsigned long) addend_abs);
24633
24634 if ((addend_abs >> 2) > 0xff)
24635 as_bad_where (fixP->fx_file, fixP->fx_line,
24636 _("bad offset 0x%08lX (must be an 8-bit number of words)"),
24637 (unsigned long) addend_abs);
24638
24639 /* Extract the instruction. */
24640 insn = md_chars_to_number (buf, INSN_SIZE);
24641
24642 /* If the addend is negative, clear bit 23 of the instruction.
24643 Otherwise set it. */
24644 if (value < 0)
24645 insn &= ~(1 << 23);
24646 else
24647 insn |= 1 << 23;
24648
24649 /* Place the addend (divided by four) into the first eight
24650 bits of the instruction. */
24651 insn &= 0xfffffff0;
24652 insn |= addend_abs >> 2;
24653
24654 /* Update the instruction. */
24655 md_number_to_chars (buf, insn, INSN_SIZE);
24656 }
24657 break;
24658
24659 case BFD_RELOC_ARM_V4BX:
24660 /* This will need to go in the object file. */
24661 fixP->fx_done = 0;
24662 break;
24663
24664 case BFD_RELOC_UNUSED:
24665 default:
24666 as_bad_where (fixP->fx_file, fixP->fx_line,
24667 _("bad relocation fixup type (%d)"), fixP->fx_r_type);
24668 }
24669 }
24670
24671 /* Translate internal representation of relocation info to BFD target
24672 format. */
24673
24674 arelent *
24675 tc_gen_reloc (asection *section, fixS *fixp)
24676 {
24677 arelent * reloc;
24678 bfd_reloc_code_real_type code;
24679
24680 reloc = XNEW (arelent);
24681
24682 reloc->sym_ptr_ptr = XNEW (asymbol *);
24683 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
24684 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
24685
24686 if (fixp->fx_pcrel)
24687 {
24688 if (section->use_rela_p)
24689 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
24690 else
24691 fixp->fx_offset = reloc->address;
24692 }
24693 reloc->addend = fixp->fx_offset;
24694
24695 switch (fixp->fx_r_type)
24696 {
24697 case BFD_RELOC_8:
24698 if (fixp->fx_pcrel)
24699 {
24700 code = BFD_RELOC_8_PCREL;
24701 break;
24702 }
24703 /* Fall through. */
24704
24705 case BFD_RELOC_16:
24706 if (fixp->fx_pcrel)
24707 {
24708 code = BFD_RELOC_16_PCREL;
24709 break;
24710 }
24711 /* Fall through. */
24712
24713 case BFD_RELOC_32:
24714 if (fixp->fx_pcrel)
24715 {
24716 code = BFD_RELOC_32_PCREL;
24717 break;
24718 }
24719 /* Fall through. */
24720
24721 case BFD_RELOC_ARM_MOVW:
24722 if (fixp->fx_pcrel)
24723 {
24724 code = BFD_RELOC_ARM_MOVW_PCREL;
24725 break;
24726 }
24727 /* Fall through. */
24728
24729 case BFD_RELOC_ARM_MOVT:
24730 if (fixp->fx_pcrel)
24731 {
24732 code = BFD_RELOC_ARM_MOVT_PCREL;
24733 break;
24734 }
24735 /* Fall through. */
24736
24737 case BFD_RELOC_ARM_THUMB_MOVW:
24738 if (fixp->fx_pcrel)
24739 {
24740 code = BFD_RELOC_ARM_THUMB_MOVW_PCREL;
24741 break;
24742 }
24743 /* Fall through. */
24744
24745 case BFD_RELOC_ARM_THUMB_MOVT:
24746 if (fixp->fx_pcrel)
24747 {
24748 code = BFD_RELOC_ARM_THUMB_MOVT_PCREL;
24749 break;
24750 }
24751 /* Fall through. */
24752
24753 case BFD_RELOC_NONE:
24754 case BFD_RELOC_ARM_PCREL_BRANCH:
24755 case BFD_RELOC_ARM_PCREL_BLX:
24756 case BFD_RELOC_RVA:
24757 case BFD_RELOC_THUMB_PCREL_BRANCH7:
24758 case BFD_RELOC_THUMB_PCREL_BRANCH9:
24759 case BFD_RELOC_THUMB_PCREL_BRANCH12:
24760 case BFD_RELOC_THUMB_PCREL_BRANCH20:
24761 case BFD_RELOC_THUMB_PCREL_BRANCH23:
24762 case BFD_RELOC_THUMB_PCREL_BRANCH25:
24763 case BFD_RELOC_VTABLE_ENTRY:
24764 case BFD_RELOC_VTABLE_INHERIT:
24765 #ifdef TE_PE
24766 case BFD_RELOC_32_SECREL:
24767 #endif
24768 code = fixp->fx_r_type;
24769 break;
24770
24771 case BFD_RELOC_THUMB_PCREL_BLX:
24772 #ifdef OBJ_ELF
24773 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
24774 code = BFD_RELOC_THUMB_PCREL_BRANCH23;
24775 else
24776 #endif
24777 code = BFD_RELOC_THUMB_PCREL_BLX;
24778 break;
24779
24780 case BFD_RELOC_ARM_LITERAL:
24781 case BFD_RELOC_ARM_HWLITERAL:
24782 /* If this is called then the a literal has
24783 been referenced across a section boundary. */
24784 as_bad_where (fixp->fx_file, fixp->fx_line,
24785 _("literal referenced across section boundary"));
24786 return NULL;
24787
24788 #ifdef OBJ_ELF
24789 case BFD_RELOC_ARM_TLS_CALL:
24790 case BFD_RELOC_ARM_THM_TLS_CALL:
24791 case BFD_RELOC_ARM_TLS_DESCSEQ:
24792 case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
24793 case BFD_RELOC_ARM_GOT32:
24794 case BFD_RELOC_ARM_GOTOFF:
24795 case BFD_RELOC_ARM_GOT_PREL:
24796 case BFD_RELOC_ARM_PLT32:
24797 case BFD_RELOC_ARM_TARGET1:
24798 case BFD_RELOC_ARM_ROSEGREL32:
24799 case BFD_RELOC_ARM_SBREL32:
24800 case BFD_RELOC_ARM_PREL31:
24801 case BFD_RELOC_ARM_TARGET2:
24802 case BFD_RELOC_ARM_TLS_LDO32:
24803 case BFD_RELOC_ARM_PCREL_CALL:
24804 case BFD_RELOC_ARM_PCREL_JUMP:
24805 case BFD_RELOC_ARM_ALU_PC_G0_NC:
24806 case BFD_RELOC_ARM_ALU_PC_G0:
24807 case BFD_RELOC_ARM_ALU_PC_G1_NC:
24808 case BFD_RELOC_ARM_ALU_PC_G1:
24809 case BFD_RELOC_ARM_ALU_PC_G2:
24810 case BFD_RELOC_ARM_LDR_PC_G0:
24811 case BFD_RELOC_ARM_LDR_PC_G1:
24812 case BFD_RELOC_ARM_LDR_PC_G2:
24813 case BFD_RELOC_ARM_LDRS_PC_G0:
24814 case BFD_RELOC_ARM_LDRS_PC_G1:
24815 case BFD_RELOC_ARM_LDRS_PC_G2:
24816 case BFD_RELOC_ARM_LDC_PC_G0:
24817 case BFD_RELOC_ARM_LDC_PC_G1:
24818 case BFD_RELOC_ARM_LDC_PC_G2:
24819 case BFD_RELOC_ARM_ALU_SB_G0_NC:
24820 case BFD_RELOC_ARM_ALU_SB_G0:
24821 case BFD_RELOC_ARM_ALU_SB_G1_NC:
24822 case BFD_RELOC_ARM_ALU_SB_G1:
24823 case BFD_RELOC_ARM_ALU_SB_G2:
24824 case BFD_RELOC_ARM_LDR_SB_G0:
24825 case BFD_RELOC_ARM_LDR_SB_G1:
24826 case BFD_RELOC_ARM_LDR_SB_G2:
24827 case BFD_RELOC_ARM_LDRS_SB_G0:
24828 case BFD_RELOC_ARM_LDRS_SB_G1:
24829 case BFD_RELOC_ARM_LDRS_SB_G2:
24830 case BFD_RELOC_ARM_LDC_SB_G0:
24831 case BFD_RELOC_ARM_LDC_SB_G1:
24832 case BFD_RELOC_ARM_LDC_SB_G2:
24833 case BFD_RELOC_ARM_V4BX:
24834 case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC:
24835 case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC:
24836 case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC:
24837 case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC:
24838 case BFD_RELOC_ARM_GOTFUNCDESC:
24839 case BFD_RELOC_ARM_GOTOFFFUNCDESC:
24840 case BFD_RELOC_ARM_FUNCDESC:
24841 code = fixp->fx_r_type;
24842 break;
24843
24844 case BFD_RELOC_ARM_TLS_GOTDESC:
24845 case BFD_RELOC_ARM_TLS_GD32:
24846 case BFD_RELOC_ARM_TLS_GD32_FDPIC:
24847 case BFD_RELOC_ARM_TLS_LE32:
24848 case BFD_RELOC_ARM_TLS_IE32:
24849 case BFD_RELOC_ARM_TLS_IE32_FDPIC:
24850 case BFD_RELOC_ARM_TLS_LDM32:
24851 case BFD_RELOC_ARM_TLS_LDM32_FDPIC:
24852 /* BFD will include the symbol's address in the addend.
24853 But we don't want that, so subtract it out again here. */
24854 if (!S_IS_COMMON (fixp->fx_addsy))
24855 reloc->addend -= (*reloc->sym_ptr_ptr)->value;
24856 code = fixp->fx_r_type;
24857 break;
24858 #endif
24859
24860 case BFD_RELOC_ARM_IMMEDIATE:
24861 as_bad_where (fixp->fx_file, fixp->fx_line,
24862 _("internal relocation (type: IMMEDIATE) not fixed up"));
24863 return NULL;
24864
24865 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
24866 as_bad_where (fixp->fx_file, fixp->fx_line,
24867 _("ADRL used for a symbol not defined in the same file"));
24868 return NULL;
24869
24870 case BFD_RELOC_ARM_OFFSET_IMM:
24871 if (section->use_rela_p)
24872 {
24873 code = fixp->fx_r_type;
24874 break;
24875 }
24876
24877 if (fixp->fx_addsy != NULL
24878 && !S_IS_DEFINED (fixp->fx_addsy)
24879 && S_IS_LOCAL (fixp->fx_addsy))
24880 {
24881 as_bad_where (fixp->fx_file, fixp->fx_line,
24882 _("undefined local label `%s'"),
24883 S_GET_NAME (fixp->fx_addsy));
24884 return NULL;
24885 }
24886
24887 as_bad_where (fixp->fx_file, fixp->fx_line,
24888 _("internal_relocation (type: OFFSET_IMM) not fixed up"));
24889 return NULL;
24890
24891 default:
24892 {
24893 const char * type;
24894
24895 switch (fixp->fx_r_type)
24896 {
24897 case BFD_RELOC_NONE: type = "NONE"; break;
24898 case BFD_RELOC_ARM_OFFSET_IMM8: type = "OFFSET_IMM8"; break;
24899 case BFD_RELOC_ARM_SHIFT_IMM: type = "SHIFT_IMM"; break;
24900 case BFD_RELOC_ARM_SMC: type = "SMC"; break;
24901 case BFD_RELOC_ARM_SWI: type = "SWI"; break;
24902 case BFD_RELOC_ARM_MULTI: type = "MULTI"; break;
24903 case BFD_RELOC_ARM_CP_OFF_IMM: type = "CP_OFF_IMM"; break;
24904 case BFD_RELOC_ARM_T32_OFFSET_IMM: type = "T32_OFFSET_IMM"; break;
24905 case BFD_RELOC_ARM_T32_CP_OFF_IMM: type = "T32_CP_OFF_IMM"; break;
24906 case BFD_RELOC_ARM_THUMB_ADD: type = "THUMB_ADD"; break;
24907 case BFD_RELOC_ARM_THUMB_SHIFT: type = "THUMB_SHIFT"; break;
24908 case BFD_RELOC_ARM_THUMB_IMM: type = "THUMB_IMM"; break;
24909 case BFD_RELOC_ARM_THUMB_OFFSET: type = "THUMB_OFFSET"; break;
24910 default: type = _("<unknown>"); break;
24911 }
24912 as_bad_where (fixp->fx_file, fixp->fx_line,
24913 _("cannot represent %s relocation in this object file format"),
24914 type);
24915 return NULL;
24916 }
24917 }
24918
24919 #ifdef OBJ_ELF
24920 if ((code == BFD_RELOC_32_PCREL || code == BFD_RELOC_32)
24921 && GOT_symbol
24922 && fixp->fx_addsy == GOT_symbol)
24923 {
24924 code = BFD_RELOC_ARM_GOTPC;
24925 reloc->addend = fixp->fx_offset = reloc->address;
24926 }
24927 #endif
24928
24929 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
24930
24931 if (reloc->howto == NULL)
24932 {
24933 as_bad_where (fixp->fx_file, fixp->fx_line,
24934 _("cannot represent %s relocation in this object file format"),
24935 bfd_get_reloc_code_name (code));
24936 return NULL;
24937 }
24938
24939 /* HACK: Since arm ELF uses Rel instead of Rela, encode the
24940 vtable entry to be used in the relocation's section offset. */
24941 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
24942 reloc->address = fixp->fx_offset;
24943
24944 return reloc;
24945 }
24946
24947 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
24948
24949 void
24950 cons_fix_new_arm (fragS * frag,
24951 int where,
24952 int size,
24953 expressionS * exp,
24954 bfd_reloc_code_real_type reloc)
24955 {
24956 int pcrel = 0;
24957
24958 /* Pick a reloc.
24959 FIXME: @@ Should look at CPU word size. */
24960 switch (size)
24961 {
24962 case 1:
24963 reloc = BFD_RELOC_8;
24964 break;
24965 case 2:
24966 reloc = BFD_RELOC_16;
24967 break;
24968 case 4:
24969 default:
24970 reloc = BFD_RELOC_32;
24971 break;
24972 case 8:
24973 reloc = BFD_RELOC_64;
24974 break;
24975 }
24976
24977 #ifdef TE_PE
24978 if (exp->X_op == O_secrel)
24979 {
24980 exp->X_op = O_symbol;
24981 reloc = BFD_RELOC_32_SECREL;
24982 }
24983 #endif
24984
24985 fix_new_exp (frag, where, size, exp, pcrel, reloc);
24986 }
24987
24988 #if defined (OBJ_COFF)
24989 void
24990 arm_validate_fix (fixS * fixP)
24991 {
24992 /* If the destination of the branch is a defined symbol which does not have
24993 the THUMB_FUNC attribute, then we must be calling a function which has
24994 the (interfacearm) attribute. We look for the Thumb entry point to that
24995 function and change the branch to refer to that function instead. */
24996 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BRANCH23
24997 && fixP->fx_addsy != NULL
24998 && S_IS_DEFINED (fixP->fx_addsy)
24999 && ! THUMB_IS_FUNC (fixP->fx_addsy))
25000 {
25001 fixP->fx_addsy = find_real_start (fixP->fx_addsy);
25002 }
25003 }
25004 #endif
25005
25006
25007 int
25008 arm_force_relocation (struct fix * fixp)
25009 {
25010 #if defined (OBJ_COFF) && defined (TE_PE)
25011 if (fixp->fx_r_type == BFD_RELOC_RVA)
25012 return 1;
25013 #endif
25014
25015 /* In case we have a call or a branch to a function in ARM ISA mode from
25016 a thumb function or vice-versa force the relocation. These relocations
25017 are cleared off for some cores that might have blx and simple transformations
25018 are possible. */
25019
25020 #ifdef OBJ_ELF
25021 switch (fixp->fx_r_type)
25022 {
25023 case BFD_RELOC_ARM_PCREL_JUMP:
25024 case BFD_RELOC_ARM_PCREL_CALL:
25025 case BFD_RELOC_THUMB_PCREL_BLX:
25026 if (THUMB_IS_FUNC (fixp->fx_addsy))
25027 return 1;
25028 break;
25029
25030 case BFD_RELOC_ARM_PCREL_BLX:
25031 case BFD_RELOC_THUMB_PCREL_BRANCH25:
25032 case BFD_RELOC_THUMB_PCREL_BRANCH20:
25033 case BFD_RELOC_THUMB_PCREL_BRANCH23:
25034 if (ARM_IS_FUNC (fixp->fx_addsy))
25035 return 1;
25036 break;
25037
25038 default:
25039 break;
25040 }
25041 #endif
25042
25043 /* Resolve these relocations even if the symbol is extern or weak.
25044 Technically this is probably wrong due to symbol preemption.
25045 In practice these relocations do not have enough range to be useful
25046 at dynamic link time, and some code (e.g. in the Linux kernel)
25047 expects these references to be resolved. */
25048 if (fixp->fx_r_type == BFD_RELOC_ARM_IMMEDIATE
25049 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM
25050 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM8
25051 || fixp->fx_r_type == BFD_RELOC_ARM_ADRL_IMMEDIATE
25052 || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
25053 || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2
25054 || fixp->fx_r_type == BFD_RELOC_ARM_THUMB_OFFSET
25055 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM
25056 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
25057 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMM12
25058 || fixp->fx_r_type == BFD_RELOC_ARM_T32_OFFSET_IMM
25059 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_PC12
25060 || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM
25061 || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM_S2)
25062 return 0;
25063
25064 /* Always leave these relocations for the linker. */
25065 if ((fixp->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
25066 && fixp->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
25067 || fixp->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
25068 return 1;
25069
25070 /* Always generate relocations against function symbols. */
25071 if (fixp->fx_r_type == BFD_RELOC_32
25072 && fixp->fx_addsy
25073 && (symbol_get_bfdsym (fixp->fx_addsy)->flags & BSF_FUNCTION))
25074 return 1;
25075
25076 return generic_force_reloc (fixp);
25077 }
25078
25079 #if defined (OBJ_ELF) || defined (OBJ_COFF)
25080 /* Relocations against function names must be left unadjusted,
25081 so that the linker can use this information to generate interworking
25082 stubs. The MIPS version of this function
25083 also prevents relocations that are mips-16 specific, but I do not
25084 know why it does this.
25085
25086 FIXME:
25087 There is one other problem that ought to be addressed here, but
25088 which currently is not: Taking the address of a label (rather
25089 than a function) and then later jumping to that address. Such
25090 addresses also ought to have their bottom bit set (assuming that
25091 they reside in Thumb code), but at the moment they will not. */
25092
25093 bfd_boolean
25094 arm_fix_adjustable (fixS * fixP)
25095 {
25096 if (fixP->fx_addsy == NULL)
25097 return 1;
25098
25099 /* Preserve relocations against symbols with function type. */
25100 if (symbol_get_bfdsym (fixP->fx_addsy)->flags & BSF_FUNCTION)
25101 return FALSE;
25102
25103 if (THUMB_IS_FUNC (fixP->fx_addsy)
25104 && fixP->fx_subsy == NULL)
25105 return FALSE;
25106
25107 /* We need the symbol name for the VTABLE entries. */
25108 if ( fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
25109 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
25110 return FALSE;
25111
25112 /* Don't allow symbols to be discarded on GOT related relocs. */
25113 if (fixP->fx_r_type == BFD_RELOC_ARM_PLT32
25114 || fixP->fx_r_type == BFD_RELOC_ARM_GOT32
25115 || fixP->fx_r_type == BFD_RELOC_ARM_GOTOFF
25116 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GD32
25117 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GD32_FDPIC
25118 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LE32
25119 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_IE32
25120 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_IE32_FDPIC
25121 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDM32
25122 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDM32_FDPIC
25123 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDO32
25124 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GOTDESC
25125 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_CALL
25126 || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_CALL
25127 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_DESCSEQ
25128 || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_DESCSEQ
25129 || fixP->fx_r_type == BFD_RELOC_ARM_TARGET2)
25130 return FALSE;
25131
25132 /* Similarly for group relocations. */
25133 if ((fixP->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
25134 && fixP->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
25135 || fixP->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
25136 return FALSE;
25137
25138 /* MOVW/MOVT REL relocations have limited offsets, so keep the symbols. */
25139 if (fixP->fx_r_type == BFD_RELOC_ARM_MOVW
25140 || fixP->fx_r_type == BFD_RELOC_ARM_MOVT
25141 || fixP->fx_r_type == BFD_RELOC_ARM_MOVW_PCREL
25142 || fixP->fx_r_type == BFD_RELOC_ARM_MOVT_PCREL
25143 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
25144 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT
25145 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW_PCREL
25146 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT_PCREL)
25147 return FALSE;
25148
25149 /* BFD_RELOC_ARM_THUMB_ALU_ABS_Gx_NC relocations have VERY limited
25150 offsets, so keep these symbols. */
25151 if (fixP->fx_r_type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
25152 && fixP->fx_r_type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC)
25153 return FALSE;
25154
25155 return TRUE;
25156 }
25157 #endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */
25158
25159 #ifdef OBJ_ELF
25160 const char *
25161 elf32_arm_target_format (void)
25162 {
25163 #ifdef TE_SYMBIAN
25164 return (target_big_endian
25165 ? "elf32-bigarm-symbian"
25166 : "elf32-littlearm-symbian");
25167 #elif defined (TE_VXWORKS)
25168 return (target_big_endian
25169 ? "elf32-bigarm-vxworks"
25170 : "elf32-littlearm-vxworks");
25171 #elif defined (TE_NACL)
25172 return (target_big_endian
25173 ? "elf32-bigarm-nacl"
25174 : "elf32-littlearm-nacl");
25175 #else
25176 if (arm_fdpic)
25177 {
25178 if (target_big_endian)
25179 return "elf32-bigarm-fdpic";
25180 else
25181 return "elf32-littlearm-fdpic";
25182 }
25183 else
25184 {
25185 if (target_big_endian)
25186 return "elf32-bigarm";
25187 else
25188 return "elf32-littlearm";
25189 }
25190 #endif
25191 }
25192
25193 void
25194 armelf_frob_symbol (symbolS * symp,
25195 int * puntp)
25196 {
25197 elf_frob_symbol (symp, puntp);
25198 }
25199 #endif
25200
25201 /* MD interface: Finalization. */
25202
25203 void
25204 arm_cleanup (void)
25205 {
25206 literal_pool * pool;
25207
25208 /* Ensure that all the IT blocks are properly closed. */
25209 check_it_blocks_finished ();
25210
25211 for (pool = list_of_pools; pool; pool = pool->next)
25212 {
25213 /* Put it at the end of the relevant section. */
25214 subseg_set (pool->section, pool->sub_section);
25215 #ifdef OBJ_ELF
25216 arm_elf_change_section ();
25217 #endif
25218 s_ltorg (0);
25219 }
25220 }
25221
25222 #ifdef OBJ_ELF
25223 /* Remove any excess mapping symbols generated for alignment frags in
25224 SEC. We may have created a mapping symbol before a zero byte
25225 alignment; remove it if there's a mapping symbol after the
25226 alignment. */
25227 static void
25228 check_mapping_symbols (bfd *abfd ATTRIBUTE_UNUSED, asection *sec,
25229 void *dummy ATTRIBUTE_UNUSED)
25230 {
25231 segment_info_type *seginfo = seg_info (sec);
25232 fragS *fragp;
25233
25234 if (seginfo == NULL || seginfo->frchainP == NULL)
25235 return;
25236
25237 for (fragp = seginfo->frchainP->frch_root;
25238 fragp != NULL;
25239 fragp = fragp->fr_next)
25240 {
25241 symbolS *sym = fragp->tc_frag_data.last_map;
25242 fragS *next = fragp->fr_next;
25243
25244 /* Variable-sized frags have been converted to fixed size by
25245 this point. But if this was variable-sized to start with,
25246 there will be a fixed-size frag after it. So don't handle
25247 next == NULL. */
25248 if (sym == NULL || next == NULL)
25249 continue;
25250
25251 if (S_GET_VALUE (sym) < next->fr_address)
25252 /* Not at the end of this frag. */
25253 continue;
25254 know (S_GET_VALUE (sym) == next->fr_address);
25255
25256 do
25257 {
25258 if (next->tc_frag_data.first_map != NULL)
25259 {
25260 /* Next frag starts with a mapping symbol. Discard this
25261 one. */
25262 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
25263 break;
25264 }
25265
25266 if (next->fr_next == NULL)
25267 {
25268 /* This mapping symbol is at the end of the section. Discard
25269 it. */
25270 know (next->fr_fix == 0 && next->fr_var == 0);
25271 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
25272 break;
25273 }
25274
25275 /* As long as we have empty frags without any mapping symbols,
25276 keep looking. */
25277 /* If the next frag is non-empty and does not start with a
25278 mapping symbol, then this mapping symbol is required. */
25279 if (next->fr_address != next->fr_next->fr_address)
25280 break;
25281
25282 next = next->fr_next;
25283 }
25284 while (next != NULL);
25285 }
25286 }
25287 #endif
25288
25289 /* Adjust the symbol table. This marks Thumb symbols as distinct from
25290 ARM ones. */
25291
25292 void
25293 arm_adjust_symtab (void)
25294 {
25295 #ifdef OBJ_COFF
25296 symbolS * sym;
25297
25298 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
25299 {
25300 if (ARM_IS_THUMB (sym))
25301 {
25302 if (THUMB_IS_FUNC (sym))
25303 {
25304 /* Mark the symbol as a Thumb function. */
25305 if ( S_GET_STORAGE_CLASS (sym) == C_STAT
25306 || S_GET_STORAGE_CLASS (sym) == C_LABEL) /* This can happen! */
25307 S_SET_STORAGE_CLASS (sym, C_THUMBSTATFUNC);
25308
25309 else if (S_GET_STORAGE_CLASS (sym) == C_EXT)
25310 S_SET_STORAGE_CLASS (sym, C_THUMBEXTFUNC);
25311 else
25312 as_bad (_("%s: unexpected function type: %d"),
25313 S_GET_NAME (sym), S_GET_STORAGE_CLASS (sym));
25314 }
25315 else switch (S_GET_STORAGE_CLASS (sym))
25316 {
25317 case C_EXT:
25318 S_SET_STORAGE_CLASS (sym, C_THUMBEXT);
25319 break;
25320 case C_STAT:
25321 S_SET_STORAGE_CLASS (sym, C_THUMBSTAT);
25322 break;
25323 case C_LABEL:
25324 S_SET_STORAGE_CLASS (sym, C_THUMBLABEL);
25325 break;
25326 default:
25327 /* Do nothing. */
25328 break;
25329 }
25330 }
25331
25332 if (ARM_IS_INTERWORK (sym))
25333 coffsymbol (symbol_get_bfdsym (sym))->native->u.syment.n_flags = 0xFF;
25334 }
25335 #endif
25336 #ifdef OBJ_ELF
25337 symbolS * sym;
25338 char bind;
25339
25340 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
25341 {
25342 if (ARM_IS_THUMB (sym))
25343 {
25344 elf_symbol_type * elf_sym;
25345
25346 elf_sym = elf_symbol (symbol_get_bfdsym (sym));
25347 bind = ELF_ST_BIND (elf_sym->internal_elf_sym.st_info);
25348
25349 if (! bfd_is_arm_special_symbol_name (elf_sym->symbol.name,
25350 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
25351 {
25352 /* If it's a .thumb_func, declare it as so,
25353 otherwise tag label as .code 16. */
25354 if (THUMB_IS_FUNC (sym))
25355 ARM_SET_SYM_BRANCH_TYPE (elf_sym->internal_elf_sym.st_target_internal,
25356 ST_BRANCH_TO_THUMB);
25357 else if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
25358 elf_sym->internal_elf_sym.st_info =
25359 ELF_ST_INFO (bind, STT_ARM_16BIT);
25360 }
25361 }
25362 }
25363
25364 /* Remove any overlapping mapping symbols generated by alignment frags. */
25365 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
25366 /* Now do generic ELF adjustments. */
25367 elf_adjust_symtab ();
25368 #endif
25369 }
25370
25371 /* MD interface: Initialization. */
25372
25373 static void
25374 set_constant_flonums (void)
25375 {
25376 int i;
25377
25378 for (i = 0; i < NUM_FLOAT_VALS; i++)
25379 if (atof_ieee ((char *) fp_const[i], 'x', fp_values[i]) == NULL)
25380 abort ();
25381 }
25382
25383 /* Auto-select Thumb mode if it's the only available instruction set for the
25384 given architecture. */
25385
25386 static void
25387 autoselect_thumb_from_cpu_variant (void)
25388 {
25389 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
25390 opcode_select (16);
25391 }
25392
25393 void
25394 md_begin (void)
25395 {
25396 unsigned mach;
25397 unsigned int i;
25398
25399 if ( (arm_ops_hsh = hash_new ()) == NULL
25400 || (arm_cond_hsh = hash_new ()) == NULL
25401 || (arm_shift_hsh = hash_new ()) == NULL
25402 || (arm_psr_hsh = hash_new ()) == NULL
25403 || (arm_v7m_psr_hsh = hash_new ()) == NULL
25404 || (arm_reg_hsh = hash_new ()) == NULL
25405 || (arm_reloc_hsh = hash_new ()) == NULL
25406 || (arm_barrier_opt_hsh = hash_new ()) == NULL)
25407 as_fatal (_("virtual memory exhausted"));
25408
25409 for (i = 0; i < sizeof (insns) / sizeof (struct asm_opcode); i++)
25410 hash_insert (arm_ops_hsh, insns[i].template_name, (void *) (insns + i));
25411 for (i = 0; i < sizeof (conds) / sizeof (struct asm_cond); i++)
25412 hash_insert (arm_cond_hsh, conds[i].template_name, (void *) (conds + i));
25413 for (i = 0; i < sizeof (shift_names) / sizeof (struct asm_shift_name); i++)
25414 hash_insert (arm_shift_hsh, shift_names[i].name, (void *) (shift_names + i));
25415 for (i = 0; i < sizeof (psrs) / sizeof (struct asm_psr); i++)
25416 hash_insert (arm_psr_hsh, psrs[i].template_name, (void *) (psrs + i));
25417 for (i = 0; i < sizeof (v7m_psrs) / sizeof (struct asm_psr); i++)
25418 hash_insert (arm_v7m_psr_hsh, v7m_psrs[i].template_name,
25419 (void *) (v7m_psrs + i));
25420 for (i = 0; i < sizeof (reg_names) / sizeof (struct reg_entry); i++)
25421 hash_insert (arm_reg_hsh, reg_names[i].name, (void *) (reg_names + i));
25422 for (i = 0;
25423 i < sizeof (barrier_opt_names) / sizeof (struct asm_barrier_opt);
25424 i++)
25425 hash_insert (arm_barrier_opt_hsh, barrier_opt_names[i].template_name,
25426 (void *) (barrier_opt_names + i));
25427 #ifdef OBJ_ELF
25428 for (i = 0; i < ARRAY_SIZE (reloc_names); i++)
25429 {
25430 struct reloc_entry * entry = reloc_names + i;
25431
25432 if (arm_is_eabi() && entry->reloc == BFD_RELOC_ARM_PLT32)
25433 /* This makes encode_branch() use the EABI versions of this relocation. */
25434 entry->reloc = BFD_RELOC_UNUSED;
25435
25436 hash_insert (arm_reloc_hsh, entry->name, (void *) entry);
25437 }
25438 #endif
25439
25440 set_constant_flonums ();
25441
25442 /* Set the cpu variant based on the command-line options. We prefer
25443 -mcpu= over -march= if both are set (as for GCC); and we prefer
25444 -mfpu= over any other way of setting the floating point unit.
25445 Use of legacy options with new options are faulted. */
25446 if (legacy_cpu)
25447 {
25448 if (mcpu_cpu_opt || march_cpu_opt)
25449 as_bad (_("use of old and new-style options to set CPU type"));
25450
25451 selected_arch = *legacy_cpu;
25452 }
25453 else if (mcpu_cpu_opt)
25454 {
25455 selected_arch = *mcpu_cpu_opt;
25456 selected_ext = *mcpu_ext_opt;
25457 }
25458 else if (march_cpu_opt)
25459 {
25460 selected_arch = *march_cpu_opt;
25461 selected_ext = *march_ext_opt;
25462 }
25463 ARM_MERGE_FEATURE_SETS (selected_cpu, selected_arch, selected_ext);
25464
25465 if (legacy_fpu)
25466 {
25467 if (mfpu_opt)
25468 as_bad (_("use of old and new-style options to set FPU type"));
25469
25470 selected_fpu = *legacy_fpu;
25471 }
25472 else if (mfpu_opt)
25473 selected_fpu = *mfpu_opt;
25474 else
25475 {
25476 #if !(defined (EABI_DEFAULT) || defined (TE_LINUX) \
25477 || defined (TE_NetBSD) || defined (TE_VXWORKS))
25478 /* Some environments specify a default FPU. If they don't, infer it
25479 from the processor. */
25480 if (mcpu_fpu_opt)
25481 selected_fpu = *mcpu_fpu_opt;
25482 else if (march_fpu_opt)
25483 selected_fpu = *march_fpu_opt;
25484 #else
25485 selected_fpu = fpu_default;
25486 #endif
25487 }
25488
25489 if (ARM_FEATURE_ZERO (selected_fpu))
25490 {
25491 if (!no_cpu_selected ())
25492 selected_fpu = fpu_default;
25493 else
25494 selected_fpu = fpu_arch_fpa;
25495 }
25496
25497 #ifdef CPU_DEFAULT
25498 if (ARM_FEATURE_ZERO (selected_arch))
25499 {
25500 selected_arch = cpu_default;
25501 selected_cpu = selected_arch;
25502 }
25503 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
25504 #else
25505 /* Autodection of feature mode: allow all features in cpu_variant but leave
25506 selected_cpu unset. It will be set in aeabi_set_public_attributes ()
25507 after all instruction have been processed and we can decide what CPU
25508 should be selected. */
25509 if (ARM_FEATURE_ZERO (selected_arch))
25510 ARM_MERGE_FEATURE_SETS (cpu_variant, arm_arch_any, selected_fpu);
25511 else
25512 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
25513 #endif
25514
25515 autoselect_thumb_from_cpu_variant ();
25516
25517 arm_arch_used = thumb_arch_used = arm_arch_none;
25518
25519 #if defined OBJ_COFF || defined OBJ_ELF
25520 {
25521 unsigned int flags = 0;
25522
25523 #if defined OBJ_ELF
25524 flags = meabi_flags;
25525
25526 switch (meabi_flags)
25527 {
25528 case EF_ARM_EABI_UNKNOWN:
25529 #endif
25530 /* Set the flags in the private structure. */
25531 if (uses_apcs_26) flags |= F_APCS26;
25532 if (support_interwork) flags |= F_INTERWORK;
25533 if (uses_apcs_float) flags |= F_APCS_FLOAT;
25534 if (pic_code) flags |= F_PIC;
25535 if (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_any_hard))
25536 flags |= F_SOFT_FLOAT;
25537
25538 switch (mfloat_abi_opt)
25539 {
25540 case ARM_FLOAT_ABI_SOFT:
25541 case ARM_FLOAT_ABI_SOFTFP:
25542 flags |= F_SOFT_FLOAT;
25543 break;
25544
25545 case ARM_FLOAT_ABI_HARD:
25546 if (flags & F_SOFT_FLOAT)
25547 as_bad (_("hard-float conflicts with specified fpu"));
25548 break;
25549 }
25550
25551 /* Using pure-endian doubles (even if soft-float). */
25552 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
25553 flags |= F_VFP_FLOAT;
25554
25555 #if defined OBJ_ELF
25556 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_maverick))
25557 flags |= EF_ARM_MAVERICK_FLOAT;
25558 break;
25559
25560 case EF_ARM_EABI_VER4:
25561 case EF_ARM_EABI_VER5:
25562 /* No additional flags to set. */
25563 break;
25564
25565 default:
25566 abort ();
25567 }
25568 #endif
25569 bfd_set_private_flags (stdoutput, flags);
25570
25571 /* We have run out flags in the COFF header to encode the
25572 status of ATPCS support, so instead we create a dummy,
25573 empty, debug section called .arm.atpcs. */
25574 if (atpcs)
25575 {
25576 asection * sec;
25577
25578 sec = bfd_make_section (stdoutput, ".arm.atpcs");
25579
25580 if (sec != NULL)
25581 {
25582 bfd_set_section_flags
25583 (stdoutput, sec, SEC_READONLY | SEC_DEBUGGING /* | SEC_HAS_CONTENTS */);
25584 bfd_set_section_size (stdoutput, sec, 0);
25585 bfd_set_section_contents (stdoutput, sec, NULL, 0, 0);
25586 }
25587 }
25588 }
25589 #endif
25590
25591 /* Record the CPU type as well. */
25592 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2))
25593 mach = bfd_mach_arm_iWMMXt2;
25594 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt))
25595 mach = bfd_mach_arm_iWMMXt;
25596 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_xscale))
25597 mach = bfd_mach_arm_XScale;
25598 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_maverick))
25599 mach = bfd_mach_arm_ep9312;
25600 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5e))
25601 mach = bfd_mach_arm_5TE;
25602 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5))
25603 {
25604 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
25605 mach = bfd_mach_arm_5T;
25606 else
25607 mach = bfd_mach_arm_5;
25608 }
25609 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4))
25610 {
25611 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
25612 mach = bfd_mach_arm_4T;
25613 else
25614 mach = bfd_mach_arm_4;
25615 }
25616 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3m))
25617 mach = bfd_mach_arm_3M;
25618 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3))
25619 mach = bfd_mach_arm_3;
25620 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2s))
25621 mach = bfd_mach_arm_2a;
25622 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2))
25623 mach = bfd_mach_arm_2;
25624 else
25625 mach = bfd_mach_arm_unknown;
25626
25627 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
25628 }
25629
25630 /* Command line processing. */
25631
25632 /* md_parse_option
25633 Invocation line includes a switch not recognized by the base assembler.
25634 See if it's a processor-specific option.
25635
25636 This routine is somewhat complicated by the need for backwards
25637 compatibility (since older releases of gcc can't be changed).
25638 The new options try to make the interface as compatible as
25639 possible with GCC.
25640
25641 New options (supported) are:
25642
25643 -mcpu=<cpu name> Assemble for selected processor
25644 -march=<architecture name> Assemble for selected architecture
25645 -mfpu=<fpu architecture> Assemble for selected FPU.
25646 -EB/-mbig-endian Big-endian
25647 -EL/-mlittle-endian Little-endian
25648 -k Generate PIC code
25649 -mthumb Start in Thumb mode
25650 -mthumb-interwork Code supports ARM/Thumb interworking
25651
25652 -m[no-]warn-deprecated Warn about deprecated features
25653 -m[no-]warn-syms Warn when symbols match instructions
25654
25655 For now we will also provide support for:
25656
25657 -mapcs-32 32-bit Program counter
25658 -mapcs-26 26-bit Program counter
25659 -macps-float Floats passed in FP registers
25660 -mapcs-reentrant Reentrant code
25661 -matpcs
25662 (sometime these will probably be replaced with -mapcs=<list of options>
25663 and -matpcs=<list of options>)
25664
25665 The remaining options are only supported for back-wards compatibility.
25666 Cpu variants, the arm part is optional:
25667 -m[arm]1 Currently not supported.
25668 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor
25669 -m[arm]3 Arm 3 processor
25670 -m[arm]6[xx], Arm 6 processors
25671 -m[arm]7[xx][t][[d]m] Arm 7 processors
25672 -m[arm]8[10] Arm 8 processors
25673 -m[arm]9[20][tdmi] Arm 9 processors
25674 -mstrongarm[110[0]] StrongARM processors
25675 -mxscale XScale processors
25676 -m[arm]v[2345[t[e]]] Arm architectures
25677 -mall All (except the ARM1)
25678 FP variants:
25679 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions
25680 -mfpe-old (No float load/store multiples)
25681 -mvfpxd VFP Single precision
25682 -mvfp All VFP
25683 -mno-fpu Disable all floating point instructions
25684
25685 The following CPU names are recognized:
25686 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
25687 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
25688 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
25689 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
25690 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
25691 arm10t arm10e, arm1020t, arm1020e, arm10200e,
25692 strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
25693
25694 */
25695
25696 const char * md_shortopts = "m:k";
25697
25698 #ifdef ARM_BI_ENDIAN
25699 #define OPTION_EB (OPTION_MD_BASE + 0)
25700 #define OPTION_EL (OPTION_MD_BASE + 1)
25701 #else
25702 #if TARGET_BYTES_BIG_ENDIAN
25703 #define OPTION_EB (OPTION_MD_BASE + 0)
25704 #else
25705 #define OPTION_EL (OPTION_MD_BASE + 1)
25706 #endif
25707 #endif
25708 #define OPTION_FIX_V4BX (OPTION_MD_BASE + 2)
25709 #define OPTION_FDPIC (OPTION_MD_BASE + 3)
25710
25711 struct option md_longopts[] =
25712 {
25713 #ifdef OPTION_EB
25714 {"EB", no_argument, NULL, OPTION_EB},
25715 #endif
25716 #ifdef OPTION_EL
25717 {"EL", no_argument, NULL, OPTION_EL},
25718 #endif
25719 {"fix-v4bx", no_argument, NULL, OPTION_FIX_V4BX},
25720 #ifdef OBJ_ELF
25721 {"fdpic", no_argument, NULL, OPTION_FDPIC},
25722 #endif
25723 {NULL, no_argument, NULL, 0}
25724 };
25725
25726 size_t md_longopts_size = sizeof (md_longopts);
25727
25728 struct arm_option_table
25729 {
25730 const char * option; /* Option name to match. */
25731 const char * help; /* Help information. */
25732 int * var; /* Variable to change. */
25733 int value; /* What to change it to. */
25734 const char * deprecated; /* If non-null, print this message. */
25735 };
25736
25737 struct arm_option_table arm_opts[] =
25738 {
25739 {"k", N_("generate PIC code"), &pic_code, 1, NULL},
25740 {"mthumb", N_("assemble Thumb code"), &thumb_mode, 1, NULL},
25741 {"mthumb-interwork", N_("support ARM/Thumb interworking"),
25742 &support_interwork, 1, NULL},
25743 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26, 0, NULL},
25744 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26, 1, NULL},
25745 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float,
25746 1, NULL},
25747 {"mapcs-reentrant", N_("re-entrant code"), &pic_code, 1, NULL},
25748 {"matpcs", N_("code is ATPCS conformant"), &atpcs, 1, NULL},
25749 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
25750 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
25751 NULL},
25752
25753 /* These are recognized by the assembler, but have no affect on code. */
25754 {"mapcs-frame", N_("use frame pointer"), NULL, 0, NULL},
25755 {"mapcs-stack-check", N_("use stack size checking"), NULL, 0, NULL},
25756
25757 {"mwarn-deprecated", NULL, &warn_on_deprecated, 1, NULL},
25758 {"mno-warn-deprecated", N_("do not warn on use of deprecated feature"),
25759 &warn_on_deprecated, 0, NULL},
25760 {"mwarn-syms", N_("warn about symbols that match instruction names [default]"), (int *) (& flag_warn_syms), TRUE, NULL},
25761 {"mno-warn-syms", N_("disable warnings about symobls that match instructions"), (int *) (& flag_warn_syms), FALSE, NULL},
25762 {NULL, NULL, NULL, 0, NULL}
25763 };
25764
25765 struct arm_legacy_option_table
25766 {
25767 const char * option; /* Option name to match. */
25768 const arm_feature_set ** var; /* Variable to change. */
25769 const arm_feature_set value; /* What to change it to. */
25770 const char * deprecated; /* If non-null, print this message. */
25771 };
25772
25773 const struct arm_legacy_option_table arm_legacy_opts[] =
25774 {
25775 /* DON'T add any new processors to this list -- we want the whole list
25776 to go away... Add them to the processors table instead. */
25777 {"marm1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
25778 {"m1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
25779 {"marm2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
25780 {"m2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
25781 {"marm250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
25782 {"m250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
25783 {"marm3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
25784 {"m3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
25785 {"marm6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
25786 {"m6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
25787 {"marm600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
25788 {"m600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
25789 {"marm610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
25790 {"m610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
25791 {"marm620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
25792 {"m620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
25793 {"marm7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
25794 {"m7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
25795 {"marm70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
25796 {"m70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
25797 {"marm700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
25798 {"m700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
25799 {"marm700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
25800 {"m700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
25801 {"marm710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
25802 {"m710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
25803 {"marm710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
25804 {"m710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
25805 {"marm720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
25806 {"m720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
25807 {"marm7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
25808 {"m7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
25809 {"marm7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
25810 {"m7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
25811 {"marm7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
25812 {"m7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
25813 {"marm7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
25814 {"m7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
25815 {"marm7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
25816 {"m7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
25817 {"marm7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
25818 {"m7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
25819 {"marm7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
25820 {"m7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
25821 {"marm7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
25822 {"m7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
25823 {"marm7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
25824 {"m7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
25825 {"marm7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
25826 {"m7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
25827 {"marm710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
25828 {"m710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
25829 {"marm720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
25830 {"m720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
25831 {"marm740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
25832 {"m740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
25833 {"marm8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
25834 {"m8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
25835 {"marm810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
25836 {"m810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
25837 {"marm9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
25838 {"m9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
25839 {"marm9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
25840 {"m9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
25841 {"marm920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
25842 {"m920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
25843 {"marm940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
25844 {"m940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
25845 {"mstrongarm", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=strongarm")},
25846 {"mstrongarm110", &legacy_cpu, ARM_ARCH_V4,
25847 N_("use -mcpu=strongarm110")},
25848 {"mstrongarm1100", &legacy_cpu, ARM_ARCH_V4,
25849 N_("use -mcpu=strongarm1100")},
25850 {"mstrongarm1110", &legacy_cpu, ARM_ARCH_V4,
25851 N_("use -mcpu=strongarm1110")},
25852 {"mxscale", &legacy_cpu, ARM_ARCH_XSCALE, N_("use -mcpu=xscale")},
25853 {"miwmmxt", &legacy_cpu, ARM_ARCH_IWMMXT, N_("use -mcpu=iwmmxt")},
25854 {"mall", &legacy_cpu, ARM_ANY, N_("use -mcpu=all")},
25855
25856 /* Architecture variants -- don't add any more to this list either. */
25857 {"mv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
25858 {"marmv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
25859 {"mv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
25860 {"marmv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
25861 {"mv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
25862 {"marmv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
25863 {"mv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
25864 {"marmv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
25865 {"mv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
25866 {"marmv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
25867 {"mv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
25868 {"marmv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
25869 {"mv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
25870 {"marmv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
25871 {"mv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
25872 {"marmv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
25873 {"mv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
25874 {"marmv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
25875
25876 /* Floating point variants -- don't add any more to this list either. */
25877 {"mfpe-old", &legacy_fpu, FPU_ARCH_FPE, N_("use -mfpu=fpe")},
25878 {"mfpa10", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa10")},
25879 {"mfpa11", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa11")},
25880 {"mno-fpu", &legacy_fpu, ARM_ARCH_NONE,
25881 N_("use either -mfpu=softfpa or -mfpu=softvfp")},
25882
25883 {NULL, NULL, ARM_ARCH_NONE, NULL}
25884 };
25885
25886 struct arm_cpu_option_table
25887 {
25888 const char * name;
25889 size_t name_len;
25890 const arm_feature_set value;
25891 const arm_feature_set ext;
25892 /* For some CPUs we assume an FPU unless the user explicitly sets
25893 -mfpu=... */
25894 const arm_feature_set default_fpu;
25895 /* The canonical name of the CPU, or NULL to use NAME converted to upper
25896 case. */
25897 const char * canonical_name;
25898 };
25899
25900 /* This list should, at a minimum, contain all the cpu names
25901 recognized by GCC. */
25902 #define ARM_CPU_OPT(N, CN, V, E, DF) { N, sizeof (N) - 1, V, E, DF, CN }
25903
25904 static const struct arm_cpu_option_table arm_cpus[] =
25905 {
25906 ARM_CPU_OPT ("all", NULL, ARM_ANY,
25907 ARM_ARCH_NONE,
25908 FPU_ARCH_FPA),
25909 ARM_CPU_OPT ("arm1", NULL, ARM_ARCH_V1,
25910 ARM_ARCH_NONE,
25911 FPU_ARCH_FPA),
25912 ARM_CPU_OPT ("arm2", NULL, ARM_ARCH_V2,
25913 ARM_ARCH_NONE,
25914 FPU_ARCH_FPA),
25915 ARM_CPU_OPT ("arm250", NULL, ARM_ARCH_V2S,
25916 ARM_ARCH_NONE,
25917 FPU_ARCH_FPA),
25918 ARM_CPU_OPT ("arm3", NULL, ARM_ARCH_V2S,
25919 ARM_ARCH_NONE,
25920 FPU_ARCH_FPA),
25921 ARM_CPU_OPT ("arm6", NULL, ARM_ARCH_V3,
25922 ARM_ARCH_NONE,
25923 FPU_ARCH_FPA),
25924 ARM_CPU_OPT ("arm60", NULL, ARM_ARCH_V3,
25925 ARM_ARCH_NONE,
25926 FPU_ARCH_FPA),
25927 ARM_CPU_OPT ("arm600", NULL, ARM_ARCH_V3,
25928 ARM_ARCH_NONE,
25929 FPU_ARCH_FPA),
25930 ARM_CPU_OPT ("arm610", NULL, ARM_ARCH_V3,
25931 ARM_ARCH_NONE,
25932 FPU_ARCH_FPA),
25933 ARM_CPU_OPT ("arm620", NULL, ARM_ARCH_V3,
25934 ARM_ARCH_NONE,
25935 FPU_ARCH_FPA),
25936 ARM_CPU_OPT ("arm7", NULL, ARM_ARCH_V3,
25937 ARM_ARCH_NONE,
25938 FPU_ARCH_FPA),
25939 ARM_CPU_OPT ("arm7m", NULL, ARM_ARCH_V3M,
25940 ARM_ARCH_NONE,
25941 FPU_ARCH_FPA),
25942 ARM_CPU_OPT ("arm7d", NULL, ARM_ARCH_V3,
25943 ARM_ARCH_NONE,
25944 FPU_ARCH_FPA),
25945 ARM_CPU_OPT ("arm7dm", NULL, ARM_ARCH_V3M,
25946 ARM_ARCH_NONE,
25947 FPU_ARCH_FPA),
25948 ARM_CPU_OPT ("arm7di", NULL, ARM_ARCH_V3,
25949 ARM_ARCH_NONE,
25950 FPU_ARCH_FPA),
25951 ARM_CPU_OPT ("arm7dmi", NULL, ARM_ARCH_V3M,
25952 ARM_ARCH_NONE,
25953 FPU_ARCH_FPA),
25954 ARM_CPU_OPT ("arm70", NULL, ARM_ARCH_V3,
25955 ARM_ARCH_NONE,
25956 FPU_ARCH_FPA),
25957 ARM_CPU_OPT ("arm700", NULL, ARM_ARCH_V3,
25958 ARM_ARCH_NONE,
25959 FPU_ARCH_FPA),
25960 ARM_CPU_OPT ("arm700i", NULL, ARM_ARCH_V3,
25961 ARM_ARCH_NONE,
25962 FPU_ARCH_FPA),
25963 ARM_CPU_OPT ("arm710", NULL, ARM_ARCH_V3,
25964 ARM_ARCH_NONE,
25965 FPU_ARCH_FPA),
25966 ARM_CPU_OPT ("arm710t", NULL, ARM_ARCH_V4T,
25967 ARM_ARCH_NONE,
25968 FPU_ARCH_FPA),
25969 ARM_CPU_OPT ("arm720", NULL, ARM_ARCH_V3,
25970 ARM_ARCH_NONE,
25971 FPU_ARCH_FPA),
25972 ARM_CPU_OPT ("arm720t", NULL, ARM_ARCH_V4T,
25973 ARM_ARCH_NONE,
25974 FPU_ARCH_FPA),
25975 ARM_CPU_OPT ("arm740t", NULL, ARM_ARCH_V4T,
25976 ARM_ARCH_NONE,
25977 FPU_ARCH_FPA),
25978 ARM_CPU_OPT ("arm710c", NULL, ARM_ARCH_V3,
25979 ARM_ARCH_NONE,
25980 FPU_ARCH_FPA),
25981 ARM_CPU_OPT ("arm7100", NULL, ARM_ARCH_V3,
25982 ARM_ARCH_NONE,
25983 FPU_ARCH_FPA),
25984 ARM_CPU_OPT ("arm7500", NULL, ARM_ARCH_V3,
25985 ARM_ARCH_NONE,
25986 FPU_ARCH_FPA),
25987 ARM_CPU_OPT ("arm7500fe", NULL, ARM_ARCH_V3,
25988 ARM_ARCH_NONE,
25989 FPU_ARCH_FPA),
25990 ARM_CPU_OPT ("arm7t", NULL, ARM_ARCH_V4T,
25991 ARM_ARCH_NONE,
25992 FPU_ARCH_FPA),
25993 ARM_CPU_OPT ("arm7tdmi", NULL, ARM_ARCH_V4T,
25994 ARM_ARCH_NONE,
25995 FPU_ARCH_FPA),
25996 ARM_CPU_OPT ("arm7tdmi-s", NULL, ARM_ARCH_V4T,
25997 ARM_ARCH_NONE,
25998 FPU_ARCH_FPA),
25999 ARM_CPU_OPT ("arm8", NULL, ARM_ARCH_V4,
26000 ARM_ARCH_NONE,
26001 FPU_ARCH_FPA),
26002 ARM_CPU_OPT ("arm810", NULL, ARM_ARCH_V4,
26003 ARM_ARCH_NONE,
26004 FPU_ARCH_FPA),
26005 ARM_CPU_OPT ("strongarm", NULL, ARM_ARCH_V4,
26006 ARM_ARCH_NONE,
26007 FPU_ARCH_FPA),
26008 ARM_CPU_OPT ("strongarm1", NULL, ARM_ARCH_V4,
26009 ARM_ARCH_NONE,
26010 FPU_ARCH_FPA),
26011 ARM_CPU_OPT ("strongarm110", NULL, ARM_ARCH_V4,
26012 ARM_ARCH_NONE,
26013 FPU_ARCH_FPA),
26014 ARM_CPU_OPT ("strongarm1100", NULL, ARM_ARCH_V4,
26015 ARM_ARCH_NONE,
26016 FPU_ARCH_FPA),
26017 ARM_CPU_OPT ("strongarm1110", NULL, ARM_ARCH_V4,
26018 ARM_ARCH_NONE,
26019 FPU_ARCH_FPA),
26020 ARM_CPU_OPT ("arm9", NULL, ARM_ARCH_V4T,
26021 ARM_ARCH_NONE,
26022 FPU_ARCH_FPA),
26023 ARM_CPU_OPT ("arm920", "ARM920T", ARM_ARCH_V4T,
26024 ARM_ARCH_NONE,
26025 FPU_ARCH_FPA),
26026 ARM_CPU_OPT ("arm920t", NULL, ARM_ARCH_V4T,
26027 ARM_ARCH_NONE,
26028 FPU_ARCH_FPA),
26029 ARM_CPU_OPT ("arm922t", NULL, ARM_ARCH_V4T,
26030 ARM_ARCH_NONE,
26031 FPU_ARCH_FPA),
26032 ARM_CPU_OPT ("arm940t", NULL, ARM_ARCH_V4T,
26033 ARM_ARCH_NONE,
26034 FPU_ARCH_FPA),
26035 ARM_CPU_OPT ("arm9tdmi", NULL, ARM_ARCH_V4T,
26036 ARM_ARCH_NONE,
26037 FPU_ARCH_FPA),
26038 ARM_CPU_OPT ("fa526", NULL, ARM_ARCH_V4,
26039 ARM_ARCH_NONE,
26040 FPU_ARCH_FPA),
26041 ARM_CPU_OPT ("fa626", NULL, ARM_ARCH_V4,
26042 ARM_ARCH_NONE,
26043 FPU_ARCH_FPA),
26044
26045 /* For V5 or later processors we default to using VFP; but the user
26046 should really set the FPU type explicitly. */
26047 ARM_CPU_OPT ("arm9e-r0", NULL, ARM_ARCH_V5TExP,
26048 ARM_ARCH_NONE,
26049 FPU_ARCH_VFP_V2),
26050 ARM_CPU_OPT ("arm9e", NULL, ARM_ARCH_V5TE,
26051 ARM_ARCH_NONE,
26052 FPU_ARCH_VFP_V2),
26053 ARM_CPU_OPT ("arm926ej", "ARM926EJ-S", ARM_ARCH_V5TEJ,
26054 ARM_ARCH_NONE,
26055 FPU_ARCH_VFP_V2),
26056 ARM_CPU_OPT ("arm926ejs", "ARM926EJ-S", ARM_ARCH_V5TEJ,
26057 ARM_ARCH_NONE,
26058 FPU_ARCH_VFP_V2),
26059 ARM_CPU_OPT ("arm926ej-s", NULL, ARM_ARCH_V5TEJ,
26060 ARM_ARCH_NONE,
26061 FPU_ARCH_VFP_V2),
26062 ARM_CPU_OPT ("arm946e-r0", NULL, ARM_ARCH_V5TExP,
26063 ARM_ARCH_NONE,
26064 FPU_ARCH_VFP_V2),
26065 ARM_CPU_OPT ("arm946e", "ARM946E-S", ARM_ARCH_V5TE,
26066 ARM_ARCH_NONE,
26067 FPU_ARCH_VFP_V2),
26068 ARM_CPU_OPT ("arm946e-s", NULL, ARM_ARCH_V5TE,
26069 ARM_ARCH_NONE,
26070 FPU_ARCH_VFP_V2),
26071 ARM_CPU_OPT ("arm966e-r0", NULL, ARM_ARCH_V5TExP,
26072 ARM_ARCH_NONE,
26073 FPU_ARCH_VFP_V2),
26074 ARM_CPU_OPT ("arm966e", "ARM966E-S", ARM_ARCH_V5TE,
26075 ARM_ARCH_NONE,
26076 FPU_ARCH_VFP_V2),
26077 ARM_CPU_OPT ("arm966e-s", NULL, ARM_ARCH_V5TE,
26078 ARM_ARCH_NONE,
26079 FPU_ARCH_VFP_V2),
26080 ARM_CPU_OPT ("arm968e-s", NULL, ARM_ARCH_V5TE,
26081 ARM_ARCH_NONE,
26082 FPU_ARCH_VFP_V2),
26083 ARM_CPU_OPT ("arm10t", NULL, ARM_ARCH_V5T,
26084 ARM_ARCH_NONE,
26085 FPU_ARCH_VFP_V1),
26086 ARM_CPU_OPT ("arm10tdmi", NULL, ARM_ARCH_V5T,
26087 ARM_ARCH_NONE,
26088 FPU_ARCH_VFP_V1),
26089 ARM_CPU_OPT ("arm10e", NULL, ARM_ARCH_V5TE,
26090 ARM_ARCH_NONE,
26091 FPU_ARCH_VFP_V2),
26092 ARM_CPU_OPT ("arm1020", "ARM1020E", ARM_ARCH_V5TE,
26093 ARM_ARCH_NONE,
26094 FPU_ARCH_VFP_V2),
26095 ARM_CPU_OPT ("arm1020t", NULL, ARM_ARCH_V5T,
26096 ARM_ARCH_NONE,
26097 FPU_ARCH_VFP_V1),
26098 ARM_CPU_OPT ("arm1020e", NULL, ARM_ARCH_V5TE,
26099 ARM_ARCH_NONE,
26100 FPU_ARCH_VFP_V2),
26101 ARM_CPU_OPT ("arm1022e", NULL, ARM_ARCH_V5TE,
26102 ARM_ARCH_NONE,
26103 FPU_ARCH_VFP_V2),
26104 ARM_CPU_OPT ("arm1026ejs", "ARM1026EJ-S", ARM_ARCH_V5TEJ,
26105 ARM_ARCH_NONE,
26106 FPU_ARCH_VFP_V2),
26107 ARM_CPU_OPT ("arm1026ej-s", NULL, ARM_ARCH_V5TEJ,
26108 ARM_ARCH_NONE,
26109 FPU_ARCH_VFP_V2),
26110 ARM_CPU_OPT ("fa606te", NULL, ARM_ARCH_V5TE,
26111 ARM_ARCH_NONE,
26112 FPU_ARCH_VFP_V2),
26113 ARM_CPU_OPT ("fa616te", NULL, ARM_ARCH_V5TE,
26114 ARM_ARCH_NONE,
26115 FPU_ARCH_VFP_V2),
26116 ARM_CPU_OPT ("fa626te", NULL, ARM_ARCH_V5TE,
26117 ARM_ARCH_NONE,
26118 FPU_ARCH_VFP_V2),
26119 ARM_CPU_OPT ("fmp626", NULL, ARM_ARCH_V5TE,
26120 ARM_ARCH_NONE,
26121 FPU_ARCH_VFP_V2),
26122 ARM_CPU_OPT ("fa726te", NULL, ARM_ARCH_V5TE,
26123 ARM_ARCH_NONE,
26124 FPU_ARCH_VFP_V2),
26125 ARM_CPU_OPT ("arm1136js", "ARM1136J-S", ARM_ARCH_V6,
26126 ARM_ARCH_NONE,
26127 FPU_NONE),
26128 ARM_CPU_OPT ("arm1136j-s", NULL, ARM_ARCH_V6,
26129 ARM_ARCH_NONE,
26130 FPU_NONE),
26131 ARM_CPU_OPT ("arm1136jfs", "ARM1136JF-S", ARM_ARCH_V6,
26132 ARM_ARCH_NONE,
26133 FPU_ARCH_VFP_V2),
26134 ARM_CPU_OPT ("arm1136jf-s", NULL, ARM_ARCH_V6,
26135 ARM_ARCH_NONE,
26136 FPU_ARCH_VFP_V2),
26137 ARM_CPU_OPT ("mpcore", "MPCore", ARM_ARCH_V6K,
26138 ARM_ARCH_NONE,
26139 FPU_ARCH_VFP_V2),
26140 ARM_CPU_OPT ("mpcorenovfp", "MPCore", ARM_ARCH_V6K,
26141 ARM_ARCH_NONE,
26142 FPU_NONE),
26143 ARM_CPU_OPT ("arm1156t2-s", NULL, ARM_ARCH_V6T2,
26144 ARM_ARCH_NONE,
26145 FPU_NONE),
26146 ARM_CPU_OPT ("arm1156t2f-s", NULL, ARM_ARCH_V6T2,
26147 ARM_ARCH_NONE,
26148 FPU_ARCH_VFP_V2),
26149 ARM_CPU_OPT ("arm1176jz-s", NULL, ARM_ARCH_V6KZ,
26150 ARM_ARCH_NONE,
26151 FPU_NONE),
26152 ARM_CPU_OPT ("arm1176jzf-s", NULL, ARM_ARCH_V6KZ,
26153 ARM_ARCH_NONE,
26154 FPU_ARCH_VFP_V2),
26155 ARM_CPU_OPT ("cortex-a5", "Cortex-A5", ARM_ARCH_V7A,
26156 ARM_FEATURE_CORE_LOW (ARM_EXT_MP | ARM_EXT_SEC),
26157 FPU_NONE),
26158 ARM_CPU_OPT ("cortex-a7", "Cortex-A7", ARM_ARCH_V7VE,
26159 ARM_ARCH_NONE,
26160 FPU_ARCH_NEON_VFP_V4),
26161 ARM_CPU_OPT ("cortex-a8", "Cortex-A8", ARM_ARCH_V7A,
26162 ARM_FEATURE_CORE_LOW (ARM_EXT_SEC),
26163 ARM_FEATURE_COPROC (FPU_VFP_V3 | FPU_NEON_EXT_V1)),
26164 ARM_CPU_OPT ("cortex-a9", "Cortex-A9", ARM_ARCH_V7A,
26165 ARM_FEATURE_CORE_LOW (ARM_EXT_MP | ARM_EXT_SEC),
26166 ARM_FEATURE_COPROC (FPU_VFP_V3 | FPU_NEON_EXT_V1)),
26167 ARM_CPU_OPT ("cortex-a12", "Cortex-A12", ARM_ARCH_V7VE,
26168 ARM_ARCH_NONE,
26169 FPU_ARCH_NEON_VFP_V4),
26170 ARM_CPU_OPT ("cortex-a15", "Cortex-A15", ARM_ARCH_V7VE,
26171 ARM_ARCH_NONE,
26172 FPU_ARCH_NEON_VFP_V4),
26173 ARM_CPU_OPT ("cortex-a17", "Cortex-A17", ARM_ARCH_V7VE,
26174 ARM_ARCH_NONE,
26175 FPU_ARCH_NEON_VFP_V4),
26176 ARM_CPU_OPT ("cortex-a32", "Cortex-A32", ARM_ARCH_V8A,
26177 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
26178 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
26179 ARM_CPU_OPT ("cortex-a35", "Cortex-A35", ARM_ARCH_V8A,
26180 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
26181 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
26182 ARM_CPU_OPT ("cortex-a53", "Cortex-A53", ARM_ARCH_V8A,
26183 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
26184 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
26185 ARM_CPU_OPT ("cortex-a55", "Cortex-A55", ARM_ARCH_V8_2A,
26186 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
26187 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
26188 ARM_CPU_OPT ("cortex-a57", "Cortex-A57", ARM_ARCH_V8A,
26189 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
26190 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
26191 ARM_CPU_OPT ("cortex-a72", "Cortex-A72", ARM_ARCH_V8A,
26192 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
26193 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
26194 ARM_CPU_OPT ("cortex-a73", "Cortex-A73", ARM_ARCH_V8A,
26195 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
26196 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
26197 ARM_CPU_OPT ("cortex-a75", "Cortex-A75", ARM_ARCH_V8_2A,
26198 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
26199 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
26200 ARM_CPU_OPT ("cortex-a76", "Cortex-A76", ARM_ARCH_V8_2A,
26201 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
26202 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
26203 ARM_CPU_OPT ("cortex-r4", "Cortex-R4", ARM_ARCH_V7R,
26204 ARM_ARCH_NONE,
26205 FPU_NONE),
26206 ARM_CPU_OPT ("cortex-r4f", "Cortex-R4F", ARM_ARCH_V7R,
26207 ARM_ARCH_NONE,
26208 FPU_ARCH_VFP_V3D16),
26209 ARM_CPU_OPT ("cortex-r5", "Cortex-R5", ARM_ARCH_V7R,
26210 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV),
26211 FPU_NONE),
26212 ARM_CPU_OPT ("cortex-r7", "Cortex-R7", ARM_ARCH_V7R,
26213 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV),
26214 FPU_ARCH_VFP_V3D16),
26215 ARM_CPU_OPT ("cortex-r8", "Cortex-R8", ARM_ARCH_V7R,
26216 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV),
26217 FPU_ARCH_VFP_V3D16),
26218 ARM_CPU_OPT ("cortex-r52", "Cortex-R52", ARM_ARCH_V8R,
26219 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
26220 FPU_ARCH_NEON_VFP_ARMV8),
26221 ARM_CPU_OPT ("cortex-m33", "Cortex-M33", ARM_ARCH_V8M_MAIN,
26222 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
26223 FPU_NONE),
26224 ARM_CPU_OPT ("cortex-m23", "Cortex-M23", ARM_ARCH_V8M_BASE,
26225 ARM_ARCH_NONE,
26226 FPU_NONE),
26227 ARM_CPU_OPT ("cortex-m7", "Cortex-M7", ARM_ARCH_V7EM,
26228 ARM_ARCH_NONE,
26229 FPU_NONE),
26230 ARM_CPU_OPT ("cortex-m4", "Cortex-M4", ARM_ARCH_V7EM,
26231 ARM_ARCH_NONE,
26232 FPU_NONE),
26233 ARM_CPU_OPT ("cortex-m3", "Cortex-M3", ARM_ARCH_V7M,
26234 ARM_ARCH_NONE,
26235 FPU_NONE),
26236 ARM_CPU_OPT ("cortex-m1", "Cortex-M1", ARM_ARCH_V6SM,
26237 ARM_ARCH_NONE,
26238 FPU_NONE),
26239 ARM_CPU_OPT ("cortex-m0", "Cortex-M0", ARM_ARCH_V6SM,
26240 ARM_ARCH_NONE,
26241 FPU_NONE),
26242 ARM_CPU_OPT ("cortex-m0plus", "Cortex-M0+", ARM_ARCH_V6SM,
26243 ARM_ARCH_NONE,
26244 FPU_NONE),
26245 ARM_CPU_OPT ("exynos-m1", "Samsung Exynos M1", ARM_ARCH_V8A,
26246 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
26247 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
26248
26249 /* ??? XSCALE is really an architecture. */
26250 ARM_CPU_OPT ("xscale", NULL, ARM_ARCH_XSCALE,
26251 ARM_ARCH_NONE,
26252 FPU_ARCH_VFP_V2),
26253
26254 /* ??? iwmmxt is not a processor. */
26255 ARM_CPU_OPT ("iwmmxt", NULL, ARM_ARCH_IWMMXT,
26256 ARM_ARCH_NONE,
26257 FPU_ARCH_VFP_V2),
26258 ARM_CPU_OPT ("iwmmxt2", NULL, ARM_ARCH_IWMMXT2,
26259 ARM_ARCH_NONE,
26260 FPU_ARCH_VFP_V2),
26261 ARM_CPU_OPT ("i80200", NULL, ARM_ARCH_XSCALE,
26262 ARM_ARCH_NONE,
26263 FPU_ARCH_VFP_V2),
26264
26265 /* Maverick. */
26266 ARM_CPU_OPT ("ep9312", "ARM920T",
26267 ARM_FEATURE_LOW (ARM_AEXT_V4T, ARM_CEXT_MAVERICK),
26268 ARM_ARCH_NONE, FPU_ARCH_MAVERICK),
26269
26270 /* Marvell processors. */
26271 ARM_CPU_OPT ("marvell-pj4", NULL, ARM_ARCH_V7A,
26272 ARM_FEATURE_CORE_LOW (ARM_EXT_MP | ARM_EXT_SEC),
26273 FPU_ARCH_VFP_V3D16),
26274 ARM_CPU_OPT ("marvell-whitney", NULL, ARM_ARCH_V7A,
26275 ARM_FEATURE_CORE_LOW (ARM_EXT_MP | ARM_EXT_SEC),
26276 FPU_ARCH_NEON_VFP_V4),
26277
26278 /* APM X-Gene family. */
26279 ARM_CPU_OPT ("xgene1", "APM X-Gene 1", ARM_ARCH_V8A,
26280 ARM_ARCH_NONE,
26281 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
26282 ARM_CPU_OPT ("xgene2", "APM X-Gene 2", ARM_ARCH_V8A,
26283 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
26284 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
26285
26286 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, ARM_ARCH_NONE, NULL }
26287 };
26288 #undef ARM_CPU_OPT
26289
26290 struct arm_arch_option_table
26291 {
26292 const char * name;
26293 size_t name_len;
26294 const arm_feature_set value;
26295 const arm_feature_set default_fpu;
26296 };
26297
26298 /* This list should, at a minimum, contain all the architecture names
26299 recognized by GCC. */
26300 #define ARM_ARCH_OPT(N, V, DF) { N, sizeof (N) - 1, V, DF }
26301
26302 static const struct arm_arch_option_table arm_archs[] =
26303 {
26304 ARM_ARCH_OPT ("all", ARM_ANY, FPU_ARCH_FPA),
26305 ARM_ARCH_OPT ("armv1", ARM_ARCH_V1, FPU_ARCH_FPA),
26306 ARM_ARCH_OPT ("armv2", ARM_ARCH_V2, FPU_ARCH_FPA),
26307 ARM_ARCH_OPT ("armv2a", ARM_ARCH_V2S, FPU_ARCH_FPA),
26308 ARM_ARCH_OPT ("armv2s", ARM_ARCH_V2S, FPU_ARCH_FPA),
26309 ARM_ARCH_OPT ("armv3", ARM_ARCH_V3, FPU_ARCH_FPA),
26310 ARM_ARCH_OPT ("armv3m", ARM_ARCH_V3M, FPU_ARCH_FPA),
26311 ARM_ARCH_OPT ("armv4", ARM_ARCH_V4, FPU_ARCH_FPA),
26312 ARM_ARCH_OPT ("armv4xm", ARM_ARCH_V4xM, FPU_ARCH_FPA),
26313 ARM_ARCH_OPT ("armv4t", ARM_ARCH_V4T, FPU_ARCH_FPA),
26314 ARM_ARCH_OPT ("armv4txm", ARM_ARCH_V4TxM, FPU_ARCH_FPA),
26315 ARM_ARCH_OPT ("armv5", ARM_ARCH_V5, FPU_ARCH_VFP),
26316 ARM_ARCH_OPT ("armv5t", ARM_ARCH_V5T, FPU_ARCH_VFP),
26317 ARM_ARCH_OPT ("armv5txm", ARM_ARCH_V5TxM, FPU_ARCH_VFP),
26318 ARM_ARCH_OPT ("armv5te", ARM_ARCH_V5TE, FPU_ARCH_VFP),
26319 ARM_ARCH_OPT ("armv5texp", ARM_ARCH_V5TExP, FPU_ARCH_VFP),
26320 ARM_ARCH_OPT ("armv5tej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP),
26321 ARM_ARCH_OPT ("armv6", ARM_ARCH_V6, FPU_ARCH_VFP),
26322 ARM_ARCH_OPT ("armv6j", ARM_ARCH_V6, FPU_ARCH_VFP),
26323 ARM_ARCH_OPT ("armv6k", ARM_ARCH_V6K, FPU_ARCH_VFP),
26324 ARM_ARCH_OPT ("armv6z", ARM_ARCH_V6Z, FPU_ARCH_VFP),
26325 /* The official spelling of this variant is ARMv6KZ, the name "armv6zk" is
26326 kept to preserve existing behaviour. */
26327 ARM_ARCH_OPT ("armv6kz", ARM_ARCH_V6KZ, FPU_ARCH_VFP),
26328 ARM_ARCH_OPT ("armv6zk", ARM_ARCH_V6KZ, FPU_ARCH_VFP),
26329 ARM_ARCH_OPT ("armv6t2", ARM_ARCH_V6T2, FPU_ARCH_VFP),
26330 ARM_ARCH_OPT ("armv6kt2", ARM_ARCH_V6KT2, FPU_ARCH_VFP),
26331 ARM_ARCH_OPT ("armv6zt2", ARM_ARCH_V6ZT2, FPU_ARCH_VFP),
26332 /* The official spelling of this variant is ARMv6KZ, the name "armv6zkt2" is
26333 kept to preserve existing behaviour. */
26334 ARM_ARCH_OPT ("armv6kzt2", ARM_ARCH_V6KZT2, FPU_ARCH_VFP),
26335 ARM_ARCH_OPT ("armv6zkt2", ARM_ARCH_V6KZT2, FPU_ARCH_VFP),
26336 ARM_ARCH_OPT ("armv6-m", ARM_ARCH_V6M, FPU_ARCH_VFP),
26337 ARM_ARCH_OPT ("armv6s-m", ARM_ARCH_V6SM, FPU_ARCH_VFP),
26338 ARM_ARCH_OPT ("armv7", ARM_ARCH_V7, FPU_ARCH_VFP),
26339 /* The official spelling of the ARMv7 profile variants is the dashed form.
26340 Accept the non-dashed form for compatibility with old toolchains. */
26341 ARM_ARCH_OPT ("armv7a", ARM_ARCH_V7A, FPU_ARCH_VFP),
26342 ARM_ARCH_OPT ("armv7ve", ARM_ARCH_V7VE, FPU_ARCH_VFP),
26343 ARM_ARCH_OPT ("armv7r", ARM_ARCH_V7R, FPU_ARCH_VFP),
26344 ARM_ARCH_OPT ("armv7m", ARM_ARCH_V7M, FPU_ARCH_VFP),
26345 ARM_ARCH_OPT ("armv7-a", ARM_ARCH_V7A, FPU_ARCH_VFP),
26346 ARM_ARCH_OPT ("armv7-r", ARM_ARCH_V7R, FPU_ARCH_VFP),
26347 ARM_ARCH_OPT ("armv7-m", ARM_ARCH_V7M, FPU_ARCH_VFP),
26348 ARM_ARCH_OPT ("armv7e-m", ARM_ARCH_V7EM, FPU_ARCH_VFP),
26349 ARM_ARCH_OPT ("armv8-m.base", ARM_ARCH_V8M_BASE, FPU_ARCH_VFP),
26350 ARM_ARCH_OPT ("armv8-m.main", ARM_ARCH_V8M_MAIN, FPU_ARCH_VFP),
26351 ARM_ARCH_OPT ("armv8-a", ARM_ARCH_V8A, FPU_ARCH_VFP),
26352 ARM_ARCH_OPT ("armv8.1-a", ARM_ARCH_V8_1A, FPU_ARCH_VFP),
26353 ARM_ARCH_OPT ("armv8.2-a", ARM_ARCH_V8_2A, FPU_ARCH_VFP),
26354 ARM_ARCH_OPT ("armv8.3-a", ARM_ARCH_V8_3A, FPU_ARCH_VFP),
26355 ARM_ARCH_OPT ("armv8-r", ARM_ARCH_V8R, FPU_ARCH_VFP),
26356 ARM_ARCH_OPT ("armv8.4-a", ARM_ARCH_V8_4A, FPU_ARCH_VFP),
26357 ARM_ARCH_OPT ("armv8.5-a", ARM_ARCH_V8_5A, FPU_ARCH_VFP),
26358 ARM_ARCH_OPT ("xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP),
26359 ARM_ARCH_OPT ("iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP),
26360 ARM_ARCH_OPT ("iwmmxt2", ARM_ARCH_IWMMXT2,FPU_ARCH_VFP),
26361 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
26362 };
26363 #undef ARM_ARCH_OPT
26364
26365 /* ISA extensions in the co-processor and main instruction set space. */
26366
26367 struct arm_option_extension_value_table
26368 {
26369 const char * name;
26370 size_t name_len;
26371 const arm_feature_set merge_value;
26372 const arm_feature_set clear_value;
26373 /* List of architectures for which an extension is available. ARM_ARCH_NONE
26374 indicates that an extension is available for all architectures while
26375 ARM_ANY marks an empty entry. */
26376 const arm_feature_set allowed_archs[2];
26377 };
26378
26379 /* The following table must be in alphabetical order with a NULL last entry. */
26380
26381 #define ARM_EXT_OPT(N, M, C, AA) { N, sizeof (N) - 1, M, C, { AA, ARM_ANY } }
26382 #define ARM_EXT_OPT2(N, M, C, AA1, AA2) { N, sizeof (N) - 1, M, C, {AA1, AA2} }
26383
26384 static const struct arm_option_extension_value_table arm_extensions[] =
26385 {
26386 ARM_EXT_OPT ("crc", ARCH_CRC_ARMV8, ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
26387 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
26388 ARM_EXT_OPT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
26389 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8),
26390 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
26391 ARM_EXT_OPT ("dotprod", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8,
26392 ARM_FEATURE_COPROC (FPU_NEON_EXT_DOTPROD),
26393 ARM_ARCH_V8_2A),
26394 ARM_EXT_OPT ("dsp", ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
26395 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
26396 ARM_FEATURE_CORE (ARM_EXT_V7M, ARM_EXT2_V8M)),
26397 ARM_EXT_OPT ("fp", FPU_ARCH_VFP_ARMV8, ARM_FEATURE_COPROC (FPU_VFP_ARMV8),
26398 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
26399 ARM_EXT_OPT ("fp16", ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
26400 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
26401 ARM_ARCH_V8_2A),
26402 ARM_EXT_OPT ("fp16fml", ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
26403 | ARM_EXT2_FP16_FML),
26404 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
26405 | ARM_EXT2_FP16_FML),
26406 ARM_ARCH_V8_2A),
26407 ARM_EXT_OPT2 ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV),
26408 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV),
26409 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A),
26410 ARM_FEATURE_CORE_LOW (ARM_EXT_V7R)),
26411 /* Duplicate entry for the purpose of allowing ARMv7 to match in presence of
26412 Thumb divide instruction. Due to this having the same name as the
26413 previous entry, this will be ignored when doing command-line parsing and
26414 only considered by build attribute selection code. */
26415 ARM_EXT_OPT ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_DIV),
26416 ARM_FEATURE_CORE_LOW (ARM_EXT_DIV),
26417 ARM_FEATURE_CORE_LOW (ARM_EXT_V7)),
26418 ARM_EXT_OPT ("iwmmxt",ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT),
26419 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT), ARM_ARCH_NONE),
26420 ARM_EXT_OPT ("iwmmxt2", ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2),
26421 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2), ARM_ARCH_NONE),
26422 ARM_EXT_OPT ("maverick", ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK),
26423 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK), ARM_ARCH_NONE),
26424 ARM_EXT_OPT2 ("mp", ARM_FEATURE_CORE_LOW (ARM_EXT_MP),
26425 ARM_FEATURE_CORE_LOW (ARM_EXT_MP),
26426 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A),
26427 ARM_FEATURE_CORE_LOW (ARM_EXT_V7R)),
26428 ARM_EXT_OPT ("os", ARM_FEATURE_CORE_LOW (ARM_EXT_OS),
26429 ARM_FEATURE_CORE_LOW (ARM_EXT_OS),
26430 ARM_FEATURE_CORE_LOW (ARM_EXT_V6M)),
26431 ARM_EXT_OPT ("pan", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN),
26432 ARM_FEATURE (ARM_EXT_V8, ARM_EXT2_PAN, 0),
26433 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8A)),
26434 ARM_EXT_OPT ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES),
26435 ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES),
26436 ARM_ARCH_V8A),
26437 ARM_EXT_OPT ("ras", ARM_FEATURE_CORE_HIGH (ARM_EXT2_RAS),
26438 ARM_FEATURE (ARM_EXT_V8, ARM_EXT2_RAS, 0),
26439 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8A)),
26440 ARM_EXT_OPT ("rdma", FPU_ARCH_NEON_VFP_ARMV8_1,
26441 ARM_FEATURE_COPROC (FPU_NEON_ARMV8 | FPU_NEON_EXT_RDMA),
26442 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8A)),
26443 ARM_EXT_OPT ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB),
26444 ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB),
26445 ARM_ARCH_V8A),
26446 ARM_EXT_OPT2 ("sec", ARM_FEATURE_CORE_LOW (ARM_EXT_SEC),
26447 ARM_FEATURE_CORE_LOW (ARM_EXT_SEC),
26448 ARM_FEATURE_CORE_LOW (ARM_EXT_V6K),
26449 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A)),
26450 ARM_EXT_OPT ("simd", FPU_ARCH_NEON_VFP_ARMV8,
26451 ARM_FEATURE_COPROC (FPU_NEON_ARMV8),
26452 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
26453 ARM_EXT_OPT ("virt", ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT | ARM_EXT_ADIV
26454 | ARM_EXT_DIV),
26455 ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT),
26456 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A)),
26457 ARM_EXT_OPT ("xscale",ARM_FEATURE_COPROC (ARM_CEXT_XSCALE),
26458 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE), ARM_ARCH_NONE),
26459 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, { ARM_ARCH_NONE, ARM_ARCH_NONE } }
26460 };
26461 #undef ARM_EXT_OPT
26462
26463 /* ISA floating-point and Advanced SIMD extensions. */
26464 struct arm_option_fpu_value_table
26465 {
26466 const char * name;
26467 const arm_feature_set value;
26468 };
26469
26470 /* This list should, at a minimum, contain all the fpu names
26471 recognized by GCC. */
26472 static const struct arm_option_fpu_value_table arm_fpus[] =
26473 {
26474 {"softfpa", FPU_NONE},
26475 {"fpe", FPU_ARCH_FPE},
26476 {"fpe2", FPU_ARCH_FPE},
26477 {"fpe3", FPU_ARCH_FPA}, /* Third release supports LFM/SFM. */
26478 {"fpa", FPU_ARCH_FPA},
26479 {"fpa10", FPU_ARCH_FPA},
26480 {"fpa11", FPU_ARCH_FPA},
26481 {"arm7500fe", FPU_ARCH_FPA},
26482 {"softvfp", FPU_ARCH_VFP},
26483 {"softvfp+vfp", FPU_ARCH_VFP_V2},
26484 {"vfp", FPU_ARCH_VFP_V2},
26485 {"vfp9", FPU_ARCH_VFP_V2},
26486 {"vfp3", FPU_ARCH_VFP_V3}, /* Undocumented, use vfpv3. */
26487 {"vfp10", FPU_ARCH_VFP_V2},
26488 {"vfp10-r0", FPU_ARCH_VFP_V1},
26489 {"vfpxd", FPU_ARCH_VFP_V1xD},
26490 {"vfpv2", FPU_ARCH_VFP_V2},
26491 {"vfpv3", FPU_ARCH_VFP_V3},
26492 {"vfpv3-fp16", FPU_ARCH_VFP_V3_FP16},
26493 {"vfpv3-d16", FPU_ARCH_VFP_V3D16},
26494 {"vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16},
26495 {"vfpv3xd", FPU_ARCH_VFP_V3xD},
26496 {"vfpv3xd-fp16", FPU_ARCH_VFP_V3xD_FP16},
26497 {"arm1020t", FPU_ARCH_VFP_V1},
26498 {"arm1020e", FPU_ARCH_VFP_V2},
26499 {"arm1136jfs", FPU_ARCH_VFP_V2}, /* Undocumented, use arm1136jf-s. */
26500 {"arm1136jf-s", FPU_ARCH_VFP_V2},
26501 {"maverick", FPU_ARCH_MAVERICK},
26502 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1},
26503 {"neon-vfpv3", FPU_ARCH_VFP_V3_PLUS_NEON_V1},
26504 {"neon-fp16", FPU_ARCH_NEON_FP16},
26505 {"vfpv4", FPU_ARCH_VFP_V4},
26506 {"vfpv4-d16", FPU_ARCH_VFP_V4D16},
26507 {"fpv4-sp-d16", FPU_ARCH_VFP_V4_SP_D16},
26508 {"fpv5-d16", FPU_ARCH_VFP_V5D16},
26509 {"fpv5-sp-d16", FPU_ARCH_VFP_V5_SP_D16},
26510 {"neon-vfpv4", FPU_ARCH_NEON_VFP_V4},
26511 {"fp-armv8", FPU_ARCH_VFP_ARMV8},
26512 {"neon-fp-armv8", FPU_ARCH_NEON_VFP_ARMV8},
26513 {"crypto-neon-fp-armv8",
26514 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8},
26515 {"neon-fp-armv8.1", FPU_ARCH_NEON_VFP_ARMV8_1},
26516 {"crypto-neon-fp-armv8.1",
26517 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1},
26518 {NULL, ARM_ARCH_NONE}
26519 };
26520
26521 struct arm_option_value_table
26522 {
26523 const char *name;
26524 long value;
26525 };
26526
26527 static const struct arm_option_value_table arm_float_abis[] =
26528 {
26529 {"hard", ARM_FLOAT_ABI_HARD},
26530 {"softfp", ARM_FLOAT_ABI_SOFTFP},
26531 {"soft", ARM_FLOAT_ABI_SOFT},
26532 {NULL, 0}
26533 };
26534
26535 #ifdef OBJ_ELF
26536 /* We only know how to output GNU and ver 4/5 (AAELF) formats. */
26537 static const struct arm_option_value_table arm_eabis[] =
26538 {
26539 {"gnu", EF_ARM_EABI_UNKNOWN},
26540 {"4", EF_ARM_EABI_VER4},
26541 {"5", EF_ARM_EABI_VER5},
26542 {NULL, 0}
26543 };
26544 #endif
26545
26546 struct arm_long_option_table
26547 {
26548 const char * option; /* Substring to match. */
26549 const char * help; /* Help information. */
26550 int (* func) (const char * subopt); /* Function to decode sub-option. */
26551 const char * deprecated; /* If non-null, print this message. */
26552 };
26553
26554 static bfd_boolean
26555 arm_parse_extension (const char *str, const arm_feature_set *opt_set,
26556 arm_feature_set *ext_set)
26557 {
26558 /* We insist on extensions being specified in alphabetical order, and with
26559 extensions being added before being removed. We achieve this by having
26560 the global ARM_EXTENSIONS table in alphabetical order, and using the
26561 ADDING_VALUE variable to indicate whether we are adding an extension (1)
26562 or removing it (0) and only allowing it to change in the order
26563 -1 -> 1 -> 0. */
26564 const struct arm_option_extension_value_table * opt = NULL;
26565 const arm_feature_set arm_any = ARM_ANY;
26566 int adding_value = -1;
26567
26568 while (str != NULL && *str != 0)
26569 {
26570 const char *ext;
26571 size_t len;
26572
26573 if (*str != '+')
26574 {
26575 as_bad (_("invalid architectural extension"));
26576 return FALSE;
26577 }
26578
26579 str++;
26580 ext = strchr (str, '+');
26581
26582 if (ext != NULL)
26583 len = ext - str;
26584 else
26585 len = strlen (str);
26586
26587 if (len >= 2 && strncmp (str, "no", 2) == 0)
26588 {
26589 if (adding_value != 0)
26590 {
26591 adding_value = 0;
26592 opt = arm_extensions;
26593 }
26594
26595 len -= 2;
26596 str += 2;
26597 }
26598 else if (len > 0)
26599 {
26600 if (adding_value == -1)
26601 {
26602 adding_value = 1;
26603 opt = arm_extensions;
26604 }
26605 else if (adding_value != 1)
26606 {
26607 as_bad (_("must specify extensions to add before specifying "
26608 "those to remove"));
26609 return FALSE;
26610 }
26611 }
26612
26613 if (len == 0)
26614 {
26615 as_bad (_("missing architectural extension"));
26616 return FALSE;
26617 }
26618
26619 gas_assert (adding_value != -1);
26620 gas_assert (opt != NULL);
26621
26622 /* Scan over the options table trying to find an exact match. */
26623 for (; opt->name != NULL; opt++)
26624 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
26625 {
26626 int i, nb_allowed_archs =
26627 sizeof (opt->allowed_archs) / sizeof (opt->allowed_archs[0]);
26628 /* Check we can apply the extension to this architecture. */
26629 for (i = 0; i < nb_allowed_archs; i++)
26630 {
26631 /* Empty entry. */
26632 if (ARM_FEATURE_EQUAL (opt->allowed_archs[i], arm_any))
26633 continue;
26634 if (ARM_FSET_CPU_SUBSET (opt->allowed_archs[i], *opt_set))
26635 break;
26636 }
26637 if (i == nb_allowed_archs)
26638 {
26639 as_bad (_("extension does not apply to the base architecture"));
26640 return FALSE;
26641 }
26642
26643 /* Add or remove the extension. */
26644 if (adding_value)
26645 ARM_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->merge_value);
26646 else
26647 ARM_CLEAR_FEATURE (*ext_set, *ext_set, opt->clear_value);
26648
26649 /* Allowing Thumb division instructions for ARMv7 in autodetection
26650 rely on this break so that duplicate extensions (extensions
26651 with the same name as a previous extension in the list) are not
26652 considered for command-line parsing. */
26653 break;
26654 }
26655
26656 if (opt->name == NULL)
26657 {
26658 /* Did we fail to find an extension because it wasn't specified in
26659 alphabetical order, or because it does not exist? */
26660
26661 for (opt = arm_extensions; opt->name != NULL; opt++)
26662 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
26663 break;
26664
26665 if (opt->name == NULL)
26666 as_bad (_("unknown architectural extension `%s'"), str);
26667 else
26668 as_bad (_("architectural extensions must be specified in "
26669 "alphabetical order"));
26670
26671 return FALSE;
26672 }
26673 else
26674 {
26675 /* We should skip the extension we've just matched the next time
26676 round. */
26677 opt++;
26678 }
26679
26680 str = ext;
26681 };
26682
26683 return TRUE;
26684 }
26685
26686 static bfd_boolean
26687 arm_parse_cpu (const char *str)
26688 {
26689 const struct arm_cpu_option_table *opt;
26690 const char *ext = strchr (str, '+');
26691 size_t len;
26692
26693 if (ext != NULL)
26694 len = ext - str;
26695 else
26696 len = strlen (str);
26697
26698 if (len == 0)
26699 {
26700 as_bad (_("missing cpu name `%s'"), str);
26701 return FALSE;
26702 }
26703
26704 for (opt = arm_cpus; opt->name != NULL; opt++)
26705 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
26706 {
26707 mcpu_cpu_opt = &opt->value;
26708 if (mcpu_ext_opt == NULL)
26709 mcpu_ext_opt = XNEW (arm_feature_set);
26710 *mcpu_ext_opt = opt->ext;
26711 mcpu_fpu_opt = &opt->default_fpu;
26712 if (opt->canonical_name)
26713 {
26714 gas_assert (sizeof selected_cpu_name > strlen (opt->canonical_name));
26715 strcpy (selected_cpu_name, opt->canonical_name);
26716 }
26717 else
26718 {
26719 size_t i;
26720
26721 if (len >= sizeof selected_cpu_name)
26722 len = (sizeof selected_cpu_name) - 1;
26723
26724 for (i = 0; i < len; i++)
26725 selected_cpu_name[i] = TOUPPER (opt->name[i]);
26726 selected_cpu_name[i] = 0;
26727 }
26728
26729 if (ext != NULL)
26730 return arm_parse_extension (ext, mcpu_cpu_opt, mcpu_ext_opt);
26731
26732 return TRUE;
26733 }
26734
26735 as_bad (_("unknown cpu `%s'"), str);
26736 return FALSE;
26737 }
26738
26739 static bfd_boolean
26740 arm_parse_arch (const char *str)
26741 {
26742 const struct arm_arch_option_table *opt;
26743 const char *ext = strchr (str, '+');
26744 size_t len;
26745
26746 if (ext != NULL)
26747 len = ext - str;
26748 else
26749 len = strlen (str);
26750
26751 if (len == 0)
26752 {
26753 as_bad (_("missing architecture name `%s'"), str);
26754 return FALSE;
26755 }
26756
26757 for (opt = arm_archs; opt->name != NULL; opt++)
26758 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
26759 {
26760 march_cpu_opt = &opt->value;
26761 if (march_ext_opt == NULL)
26762 march_ext_opt = XNEW (arm_feature_set);
26763 *march_ext_opt = arm_arch_none;
26764 march_fpu_opt = &opt->default_fpu;
26765 strcpy (selected_cpu_name, opt->name);
26766
26767 if (ext != NULL)
26768 return arm_parse_extension (ext, march_cpu_opt, march_ext_opt);
26769
26770 return TRUE;
26771 }
26772
26773 as_bad (_("unknown architecture `%s'\n"), str);
26774 return FALSE;
26775 }
26776
26777 static bfd_boolean
26778 arm_parse_fpu (const char * str)
26779 {
26780 const struct arm_option_fpu_value_table * opt;
26781
26782 for (opt = arm_fpus; opt->name != NULL; opt++)
26783 if (streq (opt->name, str))
26784 {
26785 mfpu_opt = &opt->value;
26786 return TRUE;
26787 }
26788
26789 as_bad (_("unknown floating point format `%s'\n"), str);
26790 return FALSE;
26791 }
26792
26793 static bfd_boolean
26794 arm_parse_float_abi (const char * str)
26795 {
26796 const struct arm_option_value_table * opt;
26797
26798 for (opt = arm_float_abis; opt->name != NULL; opt++)
26799 if (streq (opt->name, str))
26800 {
26801 mfloat_abi_opt = opt->value;
26802 return TRUE;
26803 }
26804
26805 as_bad (_("unknown floating point abi `%s'\n"), str);
26806 return FALSE;
26807 }
26808
26809 #ifdef OBJ_ELF
26810 static bfd_boolean
26811 arm_parse_eabi (const char * str)
26812 {
26813 const struct arm_option_value_table *opt;
26814
26815 for (opt = arm_eabis; opt->name != NULL; opt++)
26816 if (streq (opt->name, str))
26817 {
26818 meabi_flags = opt->value;
26819 return TRUE;
26820 }
26821 as_bad (_("unknown EABI `%s'\n"), str);
26822 return FALSE;
26823 }
26824 #endif
26825
26826 static bfd_boolean
26827 arm_parse_it_mode (const char * str)
26828 {
26829 bfd_boolean ret = TRUE;
26830
26831 if (streq ("arm", str))
26832 implicit_it_mode = IMPLICIT_IT_MODE_ARM;
26833 else if (streq ("thumb", str))
26834 implicit_it_mode = IMPLICIT_IT_MODE_THUMB;
26835 else if (streq ("always", str))
26836 implicit_it_mode = IMPLICIT_IT_MODE_ALWAYS;
26837 else if (streq ("never", str))
26838 implicit_it_mode = IMPLICIT_IT_MODE_NEVER;
26839 else
26840 {
26841 as_bad (_("unknown implicit IT mode `%s', should be "\
26842 "arm, thumb, always, or never."), str);
26843 ret = FALSE;
26844 }
26845
26846 return ret;
26847 }
26848
26849 static bfd_boolean
26850 arm_ccs_mode (const char * unused ATTRIBUTE_UNUSED)
26851 {
26852 codecomposer_syntax = TRUE;
26853 arm_comment_chars[0] = ';';
26854 arm_line_separator_chars[0] = 0;
26855 return TRUE;
26856 }
26857
26858 struct arm_long_option_table arm_long_opts[] =
26859 {
26860 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
26861 arm_parse_cpu, NULL},
26862 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
26863 arm_parse_arch, NULL},
26864 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"),
26865 arm_parse_fpu, NULL},
26866 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"),
26867 arm_parse_float_abi, NULL},
26868 #ifdef OBJ_ELF
26869 {"meabi=", N_("<ver>\t\t assemble for eabi version <ver>"),
26870 arm_parse_eabi, NULL},
26871 #endif
26872 {"mimplicit-it=", N_("<mode>\t controls implicit insertion of IT instructions"),
26873 arm_parse_it_mode, NULL},
26874 {"mccs", N_("\t\t\t TI CodeComposer Studio syntax compatibility mode"),
26875 arm_ccs_mode, NULL},
26876 {NULL, NULL, 0, NULL}
26877 };
26878
26879 int
26880 md_parse_option (int c, const char * arg)
26881 {
26882 struct arm_option_table *opt;
26883 const struct arm_legacy_option_table *fopt;
26884 struct arm_long_option_table *lopt;
26885
26886 switch (c)
26887 {
26888 #ifdef OPTION_EB
26889 case OPTION_EB:
26890 target_big_endian = 1;
26891 break;
26892 #endif
26893
26894 #ifdef OPTION_EL
26895 case OPTION_EL:
26896 target_big_endian = 0;
26897 break;
26898 #endif
26899
26900 case OPTION_FIX_V4BX:
26901 fix_v4bx = TRUE;
26902 break;
26903
26904 #ifdef OBJ_ELF
26905 case OPTION_FDPIC:
26906 arm_fdpic = TRUE;
26907 break;
26908 #endif /* OBJ_ELF */
26909
26910 case 'a':
26911 /* Listing option. Just ignore these, we don't support additional
26912 ones. */
26913 return 0;
26914
26915 default:
26916 for (opt = arm_opts; opt->option != NULL; opt++)
26917 {
26918 if (c == opt->option[0]
26919 && ((arg == NULL && opt->option[1] == 0)
26920 || streq (arg, opt->option + 1)))
26921 {
26922 /* If the option is deprecated, tell the user. */
26923 if (warn_on_deprecated && opt->deprecated != NULL)
26924 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
26925 arg ? arg : "", _(opt->deprecated));
26926
26927 if (opt->var != NULL)
26928 *opt->var = opt->value;
26929
26930 return 1;
26931 }
26932 }
26933
26934 for (fopt = arm_legacy_opts; fopt->option != NULL; fopt++)
26935 {
26936 if (c == fopt->option[0]
26937 && ((arg == NULL && fopt->option[1] == 0)
26938 || streq (arg, fopt->option + 1)))
26939 {
26940 /* If the option is deprecated, tell the user. */
26941 if (warn_on_deprecated && fopt->deprecated != NULL)
26942 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
26943 arg ? arg : "", _(fopt->deprecated));
26944
26945 if (fopt->var != NULL)
26946 *fopt->var = &fopt->value;
26947
26948 return 1;
26949 }
26950 }
26951
26952 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
26953 {
26954 /* These options are expected to have an argument. */
26955 if (c == lopt->option[0]
26956 && arg != NULL
26957 && strncmp (arg, lopt->option + 1,
26958 strlen (lopt->option + 1)) == 0)
26959 {
26960 /* If the option is deprecated, tell the user. */
26961 if (warn_on_deprecated && lopt->deprecated != NULL)
26962 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
26963 _(lopt->deprecated));
26964
26965 /* Call the sup-option parser. */
26966 return lopt->func (arg + strlen (lopt->option) - 1);
26967 }
26968 }
26969
26970 return 0;
26971 }
26972
26973 return 1;
26974 }
26975
26976 void
26977 md_show_usage (FILE * fp)
26978 {
26979 struct arm_option_table *opt;
26980 struct arm_long_option_table *lopt;
26981
26982 fprintf (fp, _(" ARM-specific assembler options:\n"));
26983
26984 for (opt = arm_opts; opt->option != NULL; opt++)
26985 if (opt->help != NULL)
26986 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
26987
26988 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
26989 if (lopt->help != NULL)
26990 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
26991
26992 #ifdef OPTION_EB
26993 fprintf (fp, _("\
26994 -EB assemble code for a big-endian cpu\n"));
26995 #endif
26996
26997 #ifdef OPTION_EL
26998 fprintf (fp, _("\
26999 -EL assemble code for a little-endian cpu\n"));
27000 #endif
27001
27002 fprintf (fp, _("\
27003 --fix-v4bx Allow BX in ARMv4 code\n"));
27004
27005 #ifdef OBJ_ELF
27006 fprintf (fp, _("\
27007 --fdpic generate an FDPIC object file\n"));
27008 #endif /* OBJ_ELF */
27009 }
27010
27011 #ifdef OBJ_ELF
27012
27013 typedef struct
27014 {
27015 int val;
27016 arm_feature_set flags;
27017 } cpu_arch_ver_table;
27018
27019 /* Mapping from CPU features to EABI CPU arch values. Table must be sorted
27020 chronologically for architectures, with an exception for ARMv6-M and
27021 ARMv6S-M due to legacy reasons. No new architecture should have a
27022 special case. This allows for build attribute selection results to be
27023 stable when new architectures are added. */
27024 static const cpu_arch_ver_table cpu_arch_ver[] =
27025 {
27026 {TAG_CPU_ARCH_PRE_V4, ARM_ARCH_V1},
27027 {TAG_CPU_ARCH_PRE_V4, ARM_ARCH_V2},
27028 {TAG_CPU_ARCH_PRE_V4, ARM_ARCH_V2S},
27029 {TAG_CPU_ARCH_PRE_V4, ARM_ARCH_V3},
27030 {TAG_CPU_ARCH_PRE_V4, ARM_ARCH_V3M},
27031 {TAG_CPU_ARCH_V4, ARM_ARCH_V4xM},
27032 {TAG_CPU_ARCH_V4, ARM_ARCH_V4},
27033 {TAG_CPU_ARCH_V4T, ARM_ARCH_V4TxM},
27034 {TAG_CPU_ARCH_V4T, ARM_ARCH_V4T},
27035 {TAG_CPU_ARCH_V5T, ARM_ARCH_V5xM},
27036 {TAG_CPU_ARCH_V5T, ARM_ARCH_V5},
27037 {TAG_CPU_ARCH_V5T, ARM_ARCH_V5TxM},
27038 {TAG_CPU_ARCH_V5T, ARM_ARCH_V5T},
27039 {TAG_CPU_ARCH_V5TE, ARM_ARCH_V5TExP},
27040 {TAG_CPU_ARCH_V5TE, ARM_ARCH_V5TE},
27041 {TAG_CPU_ARCH_V5TEJ, ARM_ARCH_V5TEJ},
27042 {TAG_CPU_ARCH_V6, ARM_ARCH_V6},
27043 {TAG_CPU_ARCH_V6KZ, ARM_ARCH_V6Z},
27044 {TAG_CPU_ARCH_V6KZ, ARM_ARCH_V6KZ},
27045 {TAG_CPU_ARCH_V6K, ARM_ARCH_V6K},
27046 {TAG_CPU_ARCH_V6T2, ARM_ARCH_V6T2},
27047 {TAG_CPU_ARCH_V6T2, ARM_ARCH_V6KT2},
27048 {TAG_CPU_ARCH_V6T2, ARM_ARCH_V6ZT2},
27049 {TAG_CPU_ARCH_V6T2, ARM_ARCH_V6KZT2},
27050
27051 /* When assembling a file with only ARMv6-M or ARMv6S-M instruction, GNU as
27052 always selected build attributes to match those of ARMv6-M
27053 (resp. ARMv6S-M). However, due to these architectures being a strict
27054 subset of ARMv7-M in terms of instructions available, ARMv7-M attributes
27055 would be selected when fully respecting chronology of architectures.
27056 It is thus necessary to make a special case of ARMv6-M and ARMv6S-M and
27057 move them before ARMv7 architectures. */
27058 {TAG_CPU_ARCH_V6_M, ARM_ARCH_V6M},
27059 {TAG_CPU_ARCH_V6S_M, ARM_ARCH_V6SM},
27060
27061 {TAG_CPU_ARCH_V7, ARM_ARCH_V7},
27062 {TAG_CPU_ARCH_V7, ARM_ARCH_V7A},
27063 {TAG_CPU_ARCH_V7, ARM_ARCH_V7R},
27064 {TAG_CPU_ARCH_V7, ARM_ARCH_V7M},
27065 {TAG_CPU_ARCH_V7, ARM_ARCH_V7VE},
27066 {TAG_CPU_ARCH_V7E_M, ARM_ARCH_V7EM},
27067 {TAG_CPU_ARCH_V8, ARM_ARCH_V8A},
27068 {TAG_CPU_ARCH_V8, ARM_ARCH_V8_1A},
27069 {TAG_CPU_ARCH_V8, ARM_ARCH_V8_2A},
27070 {TAG_CPU_ARCH_V8, ARM_ARCH_V8_3A},
27071 {TAG_CPU_ARCH_V8M_BASE, ARM_ARCH_V8M_BASE},
27072 {TAG_CPU_ARCH_V8M_MAIN, ARM_ARCH_V8M_MAIN},
27073 {TAG_CPU_ARCH_V8R, ARM_ARCH_V8R},
27074 {TAG_CPU_ARCH_V8, ARM_ARCH_V8_4A},
27075 {TAG_CPU_ARCH_V8, ARM_ARCH_V8_5A},
27076 {-1, ARM_ARCH_NONE}
27077 };
27078
27079 /* Set an attribute if it has not already been set by the user. */
27080
27081 static void
27082 aeabi_set_attribute_int (int tag, int value)
27083 {
27084 if (tag < 1
27085 || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
27086 || !attributes_set_explicitly[tag])
27087 bfd_elf_add_proc_attr_int (stdoutput, tag, value);
27088 }
27089
27090 static void
27091 aeabi_set_attribute_string (int tag, const char *value)
27092 {
27093 if (tag < 1
27094 || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
27095 || !attributes_set_explicitly[tag])
27096 bfd_elf_add_proc_attr_string (stdoutput, tag, value);
27097 }
27098
27099 /* Return whether features in the *NEEDED feature set are available via
27100 extensions for the architecture whose feature set is *ARCH_FSET. */
27101
27102 static bfd_boolean
27103 have_ext_for_needed_feat_p (const arm_feature_set *arch_fset,
27104 const arm_feature_set *needed)
27105 {
27106 int i, nb_allowed_archs;
27107 arm_feature_set ext_fset;
27108 const struct arm_option_extension_value_table *opt;
27109
27110 ext_fset = arm_arch_none;
27111 for (opt = arm_extensions; opt->name != NULL; opt++)
27112 {
27113 /* Extension does not provide any feature we need. */
27114 if (!ARM_CPU_HAS_FEATURE (*needed, opt->merge_value))
27115 continue;
27116
27117 nb_allowed_archs =
27118 sizeof (opt->allowed_archs) / sizeof (opt->allowed_archs[0]);
27119 for (i = 0; i < nb_allowed_archs; i++)
27120 {
27121 /* Empty entry. */
27122 if (ARM_FEATURE_EQUAL (opt->allowed_archs[i], arm_arch_any))
27123 break;
27124
27125 /* Extension is available, add it. */
27126 if (ARM_FSET_CPU_SUBSET (opt->allowed_archs[i], *arch_fset))
27127 ARM_MERGE_FEATURE_SETS (ext_fset, ext_fset, opt->merge_value);
27128 }
27129 }
27130
27131 /* Can we enable all features in *needed? */
27132 return ARM_FSET_CPU_SUBSET (*needed, ext_fset);
27133 }
27134
27135 /* Select value for Tag_CPU_arch and Tag_CPU_arch_profile build attributes for
27136 a given architecture feature set *ARCH_EXT_FSET including extension feature
27137 set *EXT_FSET. Selection logic used depend on EXACT_MATCH:
27138 - if true, check for an exact match of the architecture modulo extensions;
27139 - otherwise, select build attribute value of the first superset
27140 architecture released so that results remains stable when new architectures
27141 are added.
27142 For -march/-mcpu=all the build attribute value of the most featureful
27143 architecture is returned. Tag_CPU_arch_profile result is returned in
27144 PROFILE. */
27145
27146 static int
27147 get_aeabi_cpu_arch_from_fset (const arm_feature_set *arch_ext_fset,
27148 const arm_feature_set *ext_fset,
27149 char *profile, int exact_match)
27150 {
27151 arm_feature_set arch_fset;
27152 const cpu_arch_ver_table *p_ver, *p_ver_ret = NULL;
27153
27154 /* Select most featureful architecture with all its extensions if building
27155 for -march=all as the feature sets used to set build attributes. */
27156 if (ARM_FEATURE_EQUAL (*arch_ext_fset, arm_arch_any))
27157 {
27158 /* Force revisiting of decision for each new architecture. */
27159 gas_assert (MAX_TAG_CPU_ARCH <= TAG_CPU_ARCH_V8M_MAIN);
27160 *profile = 'A';
27161 return TAG_CPU_ARCH_V8;
27162 }
27163
27164 ARM_CLEAR_FEATURE (arch_fset, *arch_ext_fset, *ext_fset);
27165
27166 for (p_ver = cpu_arch_ver; p_ver->val != -1; p_ver++)
27167 {
27168 arm_feature_set known_arch_fset;
27169
27170 ARM_CLEAR_FEATURE (known_arch_fset, p_ver->flags, fpu_any);
27171 if (exact_match)
27172 {
27173 /* Base architecture match user-specified architecture and
27174 extensions, eg. ARMv6S-M matching -march=armv6-m+os. */
27175 if (ARM_FEATURE_EQUAL (*arch_ext_fset, known_arch_fset))
27176 {
27177 p_ver_ret = p_ver;
27178 goto found;
27179 }
27180 /* Base architecture match user-specified architecture only
27181 (eg. ARMv6-M in the same case as above). Record it in case we
27182 find a match with above condition. */
27183 else if (p_ver_ret == NULL
27184 && ARM_FEATURE_EQUAL (arch_fset, known_arch_fset))
27185 p_ver_ret = p_ver;
27186 }
27187 else
27188 {
27189
27190 /* Architecture has all features wanted. */
27191 if (ARM_FSET_CPU_SUBSET (arch_fset, known_arch_fset))
27192 {
27193 arm_feature_set added_fset;
27194
27195 /* Compute features added by this architecture over the one
27196 recorded in p_ver_ret. */
27197 if (p_ver_ret != NULL)
27198 ARM_CLEAR_FEATURE (added_fset, known_arch_fset,
27199 p_ver_ret->flags);
27200 /* First architecture that match incl. with extensions, or the
27201 only difference in features over the recorded match is
27202 features that were optional and are now mandatory. */
27203 if (p_ver_ret == NULL
27204 || ARM_FSET_CPU_SUBSET (added_fset, arch_fset))
27205 {
27206 p_ver_ret = p_ver;
27207 goto found;
27208 }
27209 }
27210 else if (p_ver_ret == NULL)
27211 {
27212 arm_feature_set needed_ext_fset;
27213
27214 ARM_CLEAR_FEATURE (needed_ext_fset, arch_fset, known_arch_fset);
27215
27216 /* Architecture has all features needed when using some
27217 extensions. Record it and continue searching in case there
27218 exist an architecture providing all needed features without
27219 the need for extensions (eg. ARMv6S-M Vs ARMv6-M with
27220 OS extension). */
27221 if (have_ext_for_needed_feat_p (&known_arch_fset,
27222 &needed_ext_fset))
27223 p_ver_ret = p_ver;
27224 }
27225 }
27226 }
27227
27228 if (p_ver_ret == NULL)
27229 return -1;
27230
27231 found:
27232 /* Tag_CPU_arch_profile. */
27233 if (ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_v7a)
27234 || ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_v8)
27235 || (ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_atomics)
27236 && !ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_v8m_m_only)))
27237 *profile = 'A';
27238 else if (ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_v7r))
27239 *profile = 'R';
27240 else if (ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_m))
27241 *profile = 'M';
27242 else
27243 *profile = '\0';
27244 return p_ver_ret->val;
27245 }
27246
27247 /* Set the public EABI object attributes. */
27248
27249 static void
27250 aeabi_set_public_attributes (void)
27251 {
27252 char profile = '\0';
27253 int arch = -1;
27254 int virt_sec = 0;
27255 int fp16_optional = 0;
27256 int skip_exact_match = 0;
27257 arm_feature_set flags, flags_arch, flags_ext;
27258
27259 /* Autodetection mode, choose the architecture based the instructions
27260 actually used. */
27261 if (no_cpu_selected ())
27262 {
27263 ARM_MERGE_FEATURE_SETS (flags, arm_arch_used, thumb_arch_used);
27264
27265 if (ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_any))
27266 ARM_MERGE_FEATURE_SETS (flags, flags, arm_ext_v1);
27267
27268 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_any))
27269 ARM_MERGE_FEATURE_SETS (flags, flags, arm_ext_v4t);
27270
27271 /* Code run during relaxation relies on selected_cpu being set. */
27272 ARM_CLEAR_FEATURE (flags_arch, flags, fpu_any);
27273 flags_ext = arm_arch_none;
27274 ARM_CLEAR_FEATURE (selected_arch, flags_arch, flags_ext);
27275 selected_ext = flags_ext;
27276 selected_cpu = flags;
27277 }
27278 /* Otherwise, choose the architecture based on the capabilities of the
27279 requested cpu. */
27280 else
27281 {
27282 ARM_MERGE_FEATURE_SETS (flags_arch, selected_arch, selected_ext);
27283 ARM_CLEAR_FEATURE (flags_arch, flags_arch, fpu_any);
27284 flags_ext = selected_ext;
27285 flags = selected_cpu;
27286 }
27287 ARM_MERGE_FEATURE_SETS (flags, flags, selected_fpu);
27288
27289 /* Allow the user to override the reported architecture. */
27290 if (!ARM_FEATURE_ZERO (selected_object_arch))
27291 {
27292 ARM_CLEAR_FEATURE (flags_arch, selected_object_arch, fpu_any);
27293 flags_ext = arm_arch_none;
27294 }
27295 else
27296 skip_exact_match = ARM_FEATURE_EQUAL (selected_cpu, arm_arch_any);
27297
27298 /* When this function is run again after relaxation has happened there is no
27299 way to determine whether an architecture or CPU was specified by the user:
27300 - selected_cpu is set above for relaxation to work;
27301 - march_cpu_opt is not set if only -mcpu or .cpu is used;
27302 - mcpu_cpu_opt is set to arm_arch_any for autodetection.
27303 Therefore, if not in -march=all case we first try an exact match and fall
27304 back to autodetection. */
27305 if (!skip_exact_match)
27306 arch = get_aeabi_cpu_arch_from_fset (&flags_arch, &flags_ext, &profile, 1);
27307 if (arch == -1)
27308 arch = get_aeabi_cpu_arch_from_fset (&flags_arch, &flags_ext, &profile, 0);
27309 if (arch == -1)
27310 as_bad (_("no architecture contains all the instructions used\n"));
27311
27312 /* Tag_CPU_name. */
27313 if (selected_cpu_name[0])
27314 {
27315 char *q;
27316
27317 q = selected_cpu_name;
27318 if (strncmp (q, "armv", 4) == 0)
27319 {
27320 int i;
27321
27322 q += 4;
27323 for (i = 0; q[i]; i++)
27324 q[i] = TOUPPER (q[i]);
27325 }
27326 aeabi_set_attribute_string (Tag_CPU_name, q);
27327 }
27328
27329 /* Tag_CPU_arch. */
27330 aeabi_set_attribute_int (Tag_CPU_arch, arch);
27331
27332 /* Tag_CPU_arch_profile. */
27333 if (profile != '\0')
27334 aeabi_set_attribute_int (Tag_CPU_arch_profile, profile);
27335
27336 /* Tag_DSP_extension. */
27337 if (ARM_CPU_HAS_FEATURE (selected_ext, arm_ext_dsp))
27338 aeabi_set_attribute_int (Tag_DSP_extension, 1);
27339
27340 ARM_CLEAR_FEATURE (flags_arch, flags, fpu_any);
27341 /* Tag_ARM_ISA_use. */
27342 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v1)
27343 || ARM_FEATURE_ZERO (flags_arch))
27344 aeabi_set_attribute_int (Tag_ARM_ISA_use, 1);
27345
27346 /* Tag_THUMB_ISA_use. */
27347 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v4t)
27348 || ARM_FEATURE_ZERO (flags_arch))
27349 {
27350 int thumb_isa_use;
27351
27352 if (!ARM_CPU_HAS_FEATURE (flags, arm_ext_v8)
27353 && ARM_CPU_HAS_FEATURE (flags, arm_ext_v8m_m_only))
27354 thumb_isa_use = 3;
27355 else if (ARM_CPU_HAS_FEATURE (flags, arm_arch_t2))
27356 thumb_isa_use = 2;
27357 else
27358 thumb_isa_use = 1;
27359 aeabi_set_attribute_int (Tag_THUMB_ISA_use, thumb_isa_use);
27360 }
27361
27362 /* Tag_VFP_arch. */
27363 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_armv8xd))
27364 aeabi_set_attribute_int (Tag_VFP_arch,
27365 ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32)
27366 ? 7 : 8);
27367 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_fma))
27368 aeabi_set_attribute_int (Tag_VFP_arch,
27369 ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32)
27370 ? 5 : 6);
27371 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32))
27372 {
27373 fp16_optional = 1;
27374 aeabi_set_attribute_int (Tag_VFP_arch, 3);
27375 }
27376 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v3xd))
27377 {
27378 aeabi_set_attribute_int (Tag_VFP_arch, 4);
27379 fp16_optional = 1;
27380 }
27381 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v2))
27382 aeabi_set_attribute_int (Tag_VFP_arch, 2);
27383 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1)
27384 || ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd))
27385 aeabi_set_attribute_int (Tag_VFP_arch, 1);
27386
27387 /* Tag_ABI_HardFP_use. */
27388 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd)
27389 && !ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1))
27390 aeabi_set_attribute_int (Tag_ABI_HardFP_use, 1);
27391
27392 /* Tag_WMMX_arch. */
27393 if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt2))
27394 aeabi_set_attribute_int (Tag_WMMX_arch, 2);
27395 else if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt))
27396 aeabi_set_attribute_int (Tag_WMMX_arch, 1);
27397
27398 /* Tag_Advanced_SIMD_arch (formerly Tag_NEON_arch). */
27399 if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_v8_1))
27400 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 4);
27401 else if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_armv8))
27402 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 3);
27403 else if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_v1))
27404 {
27405 if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_fma))
27406 {
27407 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 2);
27408 }
27409 else
27410 {
27411 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 1);
27412 fp16_optional = 1;
27413 }
27414 }
27415
27416 /* Tag_VFP_HP_extension (formerly Tag_NEON_FP16_arch). */
27417 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_fp16) && fp16_optional)
27418 aeabi_set_attribute_int (Tag_VFP_HP_extension, 1);
27419
27420 /* Tag_DIV_use.
27421
27422 We set Tag_DIV_use to two when integer divide instructions have been used
27423 in ARM state, or when Thumb integer divide instructions have been used,
27424 but we have no architecture profile set, nor have we any ARM instructions.
27425
27426 For ARMv8-A and ARMv8-M we set the tag to 0 as integer divide is implied
27427 by the base architecture.
27428
27429 For new architectures we will have to check these tests. */
27430 gas_assert (arch <= TAG_CPU_ARCH_V8M_MAIN);
27431 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v8)
27432 || ARM_CPU_HAS_FEATURE (flags, arm_ext_v8m))
27433 aeabi_set_attribute_int (Tag_DIV_use, 0);
27434 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_adiv)
27435 || (profile == '\0'
27436 && ARM_CPU_HAS_FEATURE (flags, arm_ext_div)
27437 && !ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_any)))
27438 aeabi_set_attribute_int (Tag_DIV_use, 2);
27439
27440 /* Tag_MP_extension_use. */
27441 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_mp))
27442 aeabi_set_attribute_int (Tag_MPextension_use, 1);
27443
27444 /* Tag Virtualization_use. */
27445 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_sec))
27446 virt_sec |= 1;
27447 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_virt))
27448 virt_sec |= 2;
27449 if (virt_sec != 0)
27450 aeabi_set_attribute_int (Tag_Virtualization_use, virt_sec);
27451 }
27452
27453 /* Post relaxation hook. Recompute ARM attributes now that relaxation is
27454 finished and free extension feature bits which will not be used anymore. */
27455
27456 void
27457 arm_md_post_relax (void)
27458 {
27459 aeabi_set_public_attributes ();
27460 XDELETE (mcpu_ext_opt);
27461 mcpu_ext_opt = NULL;
27462 XDELETE (march_ext_opt);
27463 march_ext_opt = NULL;
27464 }
27465
27466 /* Add the default contents for the .ARM.attributes section. */
27467
27468 void
27469 arm_md_end (void)
27470 {
27471 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
27472 return;
27473
27474 aeabi_set_public_attributes ();
27475 }
27476 #endif /* OBJ_ELF */
27477
27478 /* Parse a .cpu directive. */
27479
27480 static void
27481 s_arm_cpu (int ignored ATTRIBUTE_UNUSED)
27482 {
27483 const struct arm_cpu_option_table *opt;
27484 char *name;
27485 char saved_char;
27486
27487 name = input_line_pointer;
27488 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
27489 input_line_pointer++;
27490 saved_char = *input_line_pointer;
27491 *input_line_pointer = 0;
27492
27493 /* Skip the first "all" entry. */
27494 for (opt = arm_cpus + 1; opt->name != NULL; opt++)
27495 if (streq (opt->name, name))
27496 {
27497 selected_arch = opt->value;
27498 selected_ext = opt->ext;
27499 ARM_MERGE_FEATURE_SETS (selected_cpu, selected_arch, selected_ext);
27500 if (opt->canonical_name)
27501 strcpy (selected_cpu_name, opt->canonical_name);
27502 else
27503 {
27504 int i;
27505 for (i = 0; opt->name[i]; i++)
27506 selected_cpu_name[i] = TOUPPER (opt->name[i]);
27507
27508 selected_cpu_name[i] = 0;
27509 }
27510 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
27511
27512 *input_line_pointer = saved_char;
27513 demand_empty_rest_of_line ();
27514 return;
27515 }
27516 as_bad (_("unknown cpu `%s'"), name);
27517 *input_line_pointer = saved_char;
27518 ignore_rest_of_line ();
27519 }
27520
27521 /* Parse a .arch directive. */
27522
27523 static void
27524 s_arm_arch (int ignored ATTRIBUTE_UNUSED)
27525 {
27526 const struct arm_arch_option_table *opt;
27527 char saved_char;
27528 char *name;
27529
27530 name = input_line_pointer;
27531 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
27532 input_line_pointer++;
27533 saved_char = *input_line_pointer;
27534 *input_line_pointer = 0;
27535
27536 /* Skip the first "all" entry. */
27537 for (opt = arm_archs + 1; opt->name != NULL; opt++)
27538 if (streq (opt->name, name))
27539 {
27540 selected_arch = opt->value;
27541 selected_ext = arm_arch_none;
27542 selected_cpu = selected_arch;
27543 strcpy (selected_cpu_name, opt->name);
27544 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
27545 *input_line_pointer = saved_char;
27546 demand_empty_rest_of_line ();
27547 return;
27548 }
27549
27550 as_bad (_("unknown architecture `%s'\n"), name);
27551 *input_line_pointer = saved_char;
27552 ignore_rest_of_line ();
27553 }
27554
27555 /* Parse a .object_arch directive. */
27556
27557 static void
27558 s_arm_object_arch (int ignored ATTRIBUTE_UNUSED)
27559 {
27560 const struct arm_arch_option_table *opt;
27561 char saved_char;
27562 char *name;
27563
27564 name = input_line_pointer;
27565 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
27566 input_line_pointer++;
27567 saved_char = *input_line_pointer;
27568 *input_line_pointer = 0;
27569
27570 /* Skip the first "all" entry. */
27571 for (opt = arm_archs + 1; opt->name != NULL; opt++)
27572 if (streq (opt->name, name))
27573 {
27574 selected_object_arch = opt->value;
27575 *input_line_pointer = saved_char;
27576 demand_empty_rest_of_line ();
27577 return;
27578 }
27579
27580 as_bad (_("unknown architecture `%s'\n"), name);
27581 *input_line_pointer = saved_char;
27582 ignore_rest_of_line ();
27583 }
27584
27585 /* Parse a .arch_extension directive. */
27586
27587 static void
27588 s_arm_arch_extension (int ignored ATTRIBUTE_UNUSED)
27589 {
27590 const struct arm_option_extension_value_table *opt;
27591 char saved_char;
27592 char *name;
27593 int adding_value = 1;
27594
27595 name = input_line_pointer;
27596 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
27597 input_line_pointer++;
27598 saved_char = *input_line_pointer;
27599 *input_line_pointer = 0;
27600
27601 if (strlen (name) >= 2
27602 && strncmp (name, "no", 2) == 0)
27603 {
27604 adding_value = 0;
27605 name += 2;
27606 }
27607
27608 for (opt = arm_extensions; opt->name != NULL; opt++)
27609 if (streq (opt->name, name))
27610 {
27611 int i, nb_allowed_archs =
27612 sizeof (opt->allowed_archs) / sizeof (opt->allowed_archs[i]);
27613 for (i = 0; i < nb_allowed_archs; i++)
27614 {
27615 /* Empty entry. */
27616 if (ARM_CPU_IS_ANY (opt->allowed_archs[i]))
27617 continue;
27618 if (ARM_FSET_CPU_SUBSET (opt->allowed_archs[i], selected_arch))
27619 break;
27620 }
27621
27622 if (i == nb_allowed_archs)
27623 {
27624 as_bad (_("architectural extension `%s' is not allowed for the "
27625 "current base architecture"), name);
27626 break;
27627 }
27628
27629 if (adding_value)
27630 ARM_MERGE_FEATURE_SETS (selected_ext, selected_ext,
27631 opt->merge_value);
27632 else
27633 ARM_CLEAR_FEATURE (selected_ext, selected_ext, opt->clear_value);
27634
27635 ARM_MERGE_FEATURE_SETS (selected_cpu, selected_arch, selected_ext);
27636 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
27637 *input_line_pointer = saved_char;
27638 demand_empty_rest_of_line ();
27639 /* Allowing Thumb division instructions for ARMv7 in autodetection rely
27640 on this return so that duplicate extensions (extensions with the
27641 same name as a previous extension in the list) are not considered
27642 for command-line parsing. */
27643 return;
27644 }
27645
27646 if (opt->name == NULL)
27647 as_bad (_("unknown architecture extension `%s'\n"), name);
27648
27649 *input_line_pointer = saved_char;
27650 ignore_rest_of_line ();
27651 }
27652
27653 /* Parse a .fpu directive. */
27654
27655 static void
27656 s_arm_fpu (int ignored ATTRIBUTE_UNUSED)
27657 {
27658 const struct arm_option_fpu_value_table *opt;
27659 char saved_char;
27660 char *name;
27661
27662 name = input_line_pointer;
27663 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
27664 input_line_pointer++;
27665 saved_char = *input_line_pointer;
27666 *input_line_pointer = 0;
27667
27668 for (opt = arm_fpus; opt->name != NULL; opt++)
27669 if (streq (opt->name, name))
27670 {
27671 selected_fpu = opt->value;
27672 #ifndef CPU_DEFAULT
27673 if (no_cpu_selected ())
27674 ARM_MERGE_FEATURE_SETS (cpu_variant, arm_arch_any, selected_fpu);
27675 else
27676 #endif
27677 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
27678 *input_line_pointer = saved_char;
27679 demand_empty_rest_of_line ();
27680 return;
27681 }
27682
27683 as_bad (_("unknown floating point format `%s'\n"), name);
27684 *input_line_pointer = saved_char;
27685 ignore_rest_of_line ();
27686 }
27687
27688 /* Copy symbol information. */
27689
27690 void
27691 arm_copy_symbol_attributes (symbolS *dest, symbolS *src)
27692 {
27693 ARM_GET_FLAG (dest) = ARM_GET_FLAG (src);
27694 }
27695
27696 #ifdef OBJ_ELF
27697 /* Given a symbolic attribute NAME, return the proper integer value.
27698 Returns -1 if the attribute is not known. */
27699
27700 int
27701 arm_convert_symbolic_attribute (const char *name)
27702 {
27703 static const struct
27704 {
27705 const char * name;
27706 const int tag;
27707 }
27708 attribute_table[] =
27709 {
27710 /* When you modify this table you should
27711 also modify the list in doc/c-arm.texi. */
27712 #define T(tag) {#tag, tag}
27713 T (Tag_CPU_raw_name),
27714 T (Tag_CPU_name),
27715 T (Tag_CPU_arch),
27716 T (Tag_CPU_arch_profile),
27717 T (Tag_ARM_ISA_use),
27718 T (Tag_THUMB_ISA_use),
27719 T (Tag_FP_arch),
27720 T (Tag_VFP_arch),
27721 T (Tag_WMMX_arch),
27722 T (Tag_Advanced_SIMD_arch),
27723 T (Tag_PCS_config),
27724 T (Tag_ABI_PCS_R9_use),
27725 T (Tag_ABI_PCS_RW_data),
27726 T (Tag_ABI_PCS_RO_data),
27727 T (Tag_ABI_PCS_GOT_use),
27728 T (Tag_ABI_PCS_wchar_t),
27729 T (Tag_ABI_FP_rounding),
27730 T (Tag_ABI_FP_denormal),
27731 T (Tag_ABI_FP_exceptions),
27732 T (Tag_ABI_FP_user_exceptions),
27733 T (Tag_ABI_FP_number_model),
27734 T (Tag_ABI_align_needed),
27735 T (Tag_ABI_align8_needed),
27736 T (Tag_ABI_align_preserved),
27737 T (Tag_ABI_align8_preserved),
27738 T (Tag_ABI_enum_size),
27739 T (Tag_ABI_HardFP_use),
27740 T (Tag_ABI_VFP_args),
27741 T (Tag_ABI_WMMX_args),
27742 T (Tag_ABI_optimization_goals),
27743 T (Tag_ABI_FP_optimization_goals),
27744 T (Tag_compatibility),
27745 T (Tag_CPU_unaligned_access),
27746 T (Tag_FP_HP_extension),
27747 T (Tag_VFP_HP_extension),
27748 T (Tag_ABI_FP_16bit_format),
27749 T (Tag_MPextension_use),
27750 T (Tag_DIV_use),
27751 T (Tag_nodefaults),
27752 T (Tag_also_compatible_with),
27753 T (Tag_conformance),
27754 T (Tag_T2EE_use),
27755 T (Tag_Virtualization_use),
27756 T (Tag_DSP_extension),
27757 /* We deliberately do not include Tag_MPextension_use_legacy. */
27758 #undef T
27759 };
27760 unsigned int i;
27761
27762 if (name == NULL)
27763 return -1;
27764
27765 for (i = 0; i < ARRAY_SIZE (attribute_table); i++)
27766 if (streq (name, attribute_table[i].name))
27767 return attribute_table[i].tag;
27768
27769 return -1;
27770 }
27771
27772 /* Apply sym value for relocations only in the case that they are for
27773 local symbols in the same segment as the fixup and you have the
27774 respective architectural feature for blx and simple switches. */
27775
27776 int
27777 arm_apply_sym_value (struct fix * fixP, segT this_seg)
27778 {
27779 if (fixP->fx_addsy
27780 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
27781 /* PR 17444: If the local symbol is in a different section then a reloc
27782 will always be generated for it, so applying the symbol value now
27783 will result in a double offset being stored in the relocation. */
27784 && (S_GET_SEGMENT (fixP->fx_addsy) == this_seg)
27785 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE))
27786 {
27787 switch (fixP->fx_r_type)
27788 {
27789 case BFD_RELOC_ARM_PCREL_BLX:
27790 case BFD_RELOC_THUMB_PCREL_BRANCH23:
27791 if (ARM_IS_FUNC (fixP->fx_addsy))
27792 return 1;
27793 break;
27794
27795 case BFD_RELOC_ARM_PCREL_CALL:
27796 case BFD_RELOC_THUMB_PCREL_BLX:
27797 if (THUMB_IS_FUNC (fixP->fx_addsy))
27798 return 1;
27799 break;
27800
27801 default:
27802 break;
27803 }
27804
27805 }
27806 return 0;
27807 }
27808 #endif /* OBJ_ELF */