Remove arm-aout and arm-coff support
[binutils-gdb.git] / gas / config / tc-arm.c
1 /* tc-arm.c -- Assemble for the ARM
2 Copyright (C) 1994-2018 Free Software Foundation, Inc.
3 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
4 Modified by David Taylor (dtaylor@armltd.co.uk)
5 Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
6 Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
7 Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
8
9 This file is part of GAS, the GNU Assembler.
10
11 GAS is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 3, or (at your option)
14 any later version.
15
16 GAS is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
20
21 You should have received a copy of the GNU General Public License
22 along with GAS; see the file COPYING. If not, write to the Free
23 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
24 02110-1301, USA. */
25
26 #include "as.h"
27 #include <limits.h>
28 #include <stdarg.h>
29 #define NO_RELOC 0
30 #include "safe-ctype.h"
31 #include "subsegs.h"
32 #include "obstack.h"
33 #include "libiberty.h"
34 #include "opcode/arm.h"
35
36 #ifdef OBJ_ELF
37 #include "elf/arm.h"
38 #include "dw2gencfi.h"
39 #endif
40
41 #include "dwarf2dbg.h"
42
43 #ifdef OBJ_ELF
44 /* Must be at least the size of the largest unwind opcode (currently two). */
45 #define ARM_OPCODE_CHUNK_SIZE 8
46
47 /* This structure holds the unwinding state. */
48
49 static struct
50 {
51 symbolS * proc_start;
52 symbolS * table_entry;
53 symbolS * personality_routine;
54 int personality_index;
55 /* The segment containing the function. */
56 segT saved_seg;
57 subsegT saved_subseg;
58 /* Opcodes generated from this function. */
59 unsigned char * opcodes;
60 int opcode_count;
61 int opcode_alloc;
62 /* The number of bytes pushed to the stack. */
63 offsetT frame_size;
64 /* We don't add stack adjustment opcodes immediately so that we can merge
65 multiple adjustments. We can also omit the final adjustment
66 when using a frame pointer. */
67 offsetT pending_offset;
68 /* These two fields are set by both unwind_movsp and unwind_setfp. They
69 hold the reg+offset to use when restoring sp from a frame pointer. */
70 offsetT fp_offset;
71 int fp_reg;
72 /* Nonzero if an unwind_setfp directive has been seen. */
73 unsigned fp_used:1;
74 /* Nonzero if the last opcode restores sp from fp_reg. */
75 unsigned sp_restored:1;
76 } unwind;
77
78 #endif /* OBJ_ELF */
79
80 /* Results from operand parsing worker functions. */
81
82 typedef enum
83 {
84 PARSE_OPERAND_SUCCESS,
85 PARSE_OPERAND_FAIL,
86 PARSE_OPERAND_FAIL_NO_BACKTRACK
87 } parse_operand_result;
88
89 enum arm_float_abi
90 {
91 ARM_FLOAT_ABI_HARD,
92 ARM_FLOAT_ABI_SOFTFP,
93 ARM_FLOAT_ABI_SOFT
94 };
95
96 /* Types of processor to assemble for. */
97 #ifndef CPU_DEFAULT
98 /* The code that was here used to select a default CPU depending on compiler
99 pre-defines which were only present when doing native builds, thus
100 changing gas' default behaviour depending upon the build host.
101
102 If you have a target that requires a default CPU option then the you
103 should define CPU_DEFAULT here. */
104 #endif
105
106 #ifndef FPU_DEFAULT
107 # ifdef TE_LINUX
108 # define FPU_DEFAULT FPU_ARCH_FPA
109 # elif defined (TE_NetBSD)
110 # ifdef OBJ_ELF
111 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */
112 # else
113 /* Legacy a.out format. */
114 # define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */
115 # endif
116 # elif defined (TE_VXWORKS)
117 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */
118 # else
119 /* For backwards compatibility, default to FPA. */
120 # define FPU_DEFAULT FPU_ARCH_FPA
121 # endif
122 #endif /* ifndef FPU_DEFAULT */
123
124 #define streq(a, b) (strcmp (a, b) == 0)
125
126 /* Current set of feature bits available (CPU+FPU). Different from
127 selected_cpu + selected_fpu in case of autodetection since the CPU
128 feature bits are then all set. */
129 static arm_feature_set cpu_variant;
130 /* Feature bits used in each execution state. Used to set build attribute
131 (in particular Tag_*_ISA_use) in CPU autodetection mode. */
132 static arm_feature_set arm_arch_used;
133 static arm_feature_set thumb_arch_used;
134
135 /* Flags stored in private area of BFD structure. */
136 static int uses_apcs_26 = FALSE;
137 static int atpcs = FALSE;
138 static int support_interwork = FALSE;
139 static int uses_apcs_float = FALSE;
140 static int pic_code = FALSE;
141 static int fix_v4bx = FALSE;
142 /* Warn on using deprecated features. */
143 static int warn_on_deprecated = TRUE;
144
145 /* Understand CodeComposer Studio assembly syntax. */
146 bfd_boolean codecomposer_syntax = FALSE;
147
148 /* Variables that we set while parsing command-line options. Once all
149 options have been read we re-process these values to set the real
150 assembly flags. */
151
152 /* CPU and FPU feature bits set for legacy CPU and FPU options (eg. -marm1
153 instead of -mcpu=arm1). */
154 static const arm_feature_set *legacy_cpu = NULL;
155 static const arm_feature_set *legacy_fpu = NULL;
156
157 /* CPU, extension and FPU feature bits selected by -mcpu. */
158 static const arm_feature_set *mcpu_cpu_opt = NULL;
159 static arm_feature_set *mcpu_ext_opt = NULL;
160 static const arm_feature_set *mcpu_fpu_opt = NULL;
161
162 /* CPU, extension and FPU feature bits selected by -march. */
163 static const arm_feature_set *march_cpu_opt = NULL;
164 static arm_feature_set *march_ext_opt = NULL;
165 static const arm_feature_set *march_fpu_opt = NULL;
166
167 /* Feature bits selected by -mfpu. */
168 static const arm_feature_set *mfpu_opt = NULL;
169
170 /* Constants for known architecture features. */
171 static const arm_feature_set fpu_default = FPU_DEFAULT;
172 static const arm_feature_set fpu_arch_vfp_v1 ATTRIBUTE_UNUSED = FPU_ARCH_VFP_V1;
173 static const arm_feature_set fpu_arch_vfp_v2 = FPU_ARCH_VFP_V2;
174 static const arm_feature_set fpu_arch_vfp_v3 ATTRIBUTE_UNUSED = FPU_ARCH_VFP_V3;
175 static const arm_feature_set fpu_arch_neon_v1 ATTRIBUTE_UNUSED = FPU_ARCH_NEON_V1;
176 static const arm_feature_set fpu_arch_fpa = FPU_ARCH_FPA;
177 static const arm_feature_set fpu_any_hard = FPU_ANY_HARD;
178 #ifdef OBJ_ELF
179 static const arm_feature_set fpu_arch_maverick = FPU_ARCH_MAVERICK;
180 #endif
181 static const arm_feature_set fpu_endian_pure = FPU_ARCH_ENDIAN_PURE;
182
183 #ifdef CPU_DEFAULT
184 static const arm_feature_set cpu_default = CPU_DEFAULT;
185 #endif
186
187 static const arm_feature_set arm_ext_v1 = ARM_FEATURE_CORE_LOW (ARM_EXT_V1);
188 static const arm_feature_set arm_ext_v2 = ARM_FEATURE_CORE_LOW (ARM_EXT_V2);
189 static const arm_feature_set arm_ext_v2s = ARM_FEATURE_CORE_LOW (ARM_EXT_V2S);
190 static const arm_feature_set arm_ext_v3 = ARM_FEATURE_CORE_LOW (ARM_EXT_V3);
191 static const arm_feature_set arm_ext_v3m = ARM_FEATURE_CORE_LOW (ARM_EXT_V3M);
192 static const arm_feature_set arm_ext_v4 = ARM_FEATURE_CORE_LOW (ARM_EXT_V4);
193 static const arm_feature_set arm_ext_v4t = ARM_FEATURE_CORE_LOW (ARM_EXT_V4T);
194 static const arm_feature_set arm_ext_v5 = ARM_FEATURE_CORE_LOW (ARM_EXT_V5);
195 static const arm_feature_set arm_ext_v4t_5 =
196 ARM_FEATURE_CORE_LOW (ARM_EXT_V4T | ARM_EXT_V5);
197 static const arm_feature_set arm_ext_v5t = ARM_FEATURE_CORE_LOW (ARM_EXT_V5T);
198 static const arm_feature_set arm_ext_v5e = ARM_FEATURE_CORE_LOW (ARM_EXT_V5E);
199 static const arm_feature_set arm_ext_v5exp = ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP);
200 static const arm_feature_set arm_ext_v5j = ARM_FEATURE_CORE_LOW (ARM_EXT_V5J);
201 static const arm_feature_set arm_ext_v6 = ARM_FEATURE_CORE_LOW (ARM_EXT_V6);
202 static const arm_feature_set arm_ext_v6k = ARM_FEATURE_CORE_LOW (ARM_EXT_V6K);
203 static const arm_feature_set arm_ext_v6t2 = ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2);
204 static const arm_feature_set arm_ext_v6_notm =
205 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_NOTM);
206 static const arm_feature_set arm_ext_v6_dsp =
207 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_DSP);
208 static const arm_feature_set arm_ext_barrier =
209 ARM_FEATURE_CORE_LOW (ARM_EXT_BARRIER);
210 static const arm_feature_set arm_ext_msr =
211 ARM_FEATURE_CORE_LOW (ARM_EXT_THUMB_MSR);
212 static const arm_feature_set arm_ext_div = ARM_FEATURE_CORE_LOW (ARM_EXT_DIV);
213 static const arm_feature_set arm_ext_v7 = ARM_FEATURE_CORE_LOW (ARM_EXT_V7);
214 static const arm_feature_set arm_ext_v7a = ARM_FEATURE_CORE_LOW (ARM_EXT_V7A);
215 static const arm_feature_set arm_ext_v7r = ARM_FEATURE_CORE_LOW (ARM_EXT_V7R);
216 #ifdef OBJ_ELF
217 static const arm_feature_set ATTRIBUTE_UNUSED arm_ext_v7m = ARM_FEATURE_CORE_LOW (ARM_EXT_V7M);
218 #endif
219 static const arm_feature_set arm_ext_v8 = ARM_FEATURE_CORE_LOW (ARM_EXT_V8);
220 static const arm_feature_set arm_ext_m =
221 ARM_FEATURE_CORE (ARM_EXT_V6M | ARM_EXT_V7M,
222 ARM_EXT2_V8M | ARM_EXT2_V8M_MAIN);
223 static const arm_feature_set arm_ext_mp = ARM_FEATURE_CORE_LOW (ARM_EXT_MP);
224 static const arm_feature_set arm_ext_sec = ARM_FEATURE_CORE_LOW (ARM_EXT_SEC);
225 static const arm_feature_set arm_ext_os = ARM_FEATURE_CORE_LOW (ARM_EXT_OS);
226 static const arm_feature_set arm_ext_adiv = ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV);
227 static const arm_feature_set arm_ext_virt = ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT);
228 static const arm_feature_set arm_ext_pan = ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN);
229 static const arm_feature_set arm_ext_v8m = ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M);
230 static const arm_feature_set arm_ext_v8m_main =
231 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M_MAIN);
232 /* Instructions in ARMv8-M only found in M profile architectures. */
233 static const arm_feature_set arm_ext_v8m_m_only =
234 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M | ARM_EXT2_V8M_MAIN);
235 static const arm_feature_set arm_ext_v6t2_v8m =
236 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V6T2_V8M);
237 /* Instructions shared between ARMv8-A and ARMv8-M. */
238 static const arm_feature_set arm_ext_atomics =
239 ARM_FEATURE_CORE_HIGH (ARM_EXT2_ATOMICS);
240 #ifdef OBJ_ELF
241 /* DSP instructions Tag_DSP_extension refers to. */
242 static const arm_feature_set arm_ext_dsp =
243 ARM_FEATURE_CORE_LOW (ARM_EXT_V5E | ARM_EXT_V5ExP | ARM_EXT_V6_DSP);
244 #endif
245 static const arm_feature_set arm_ext_ras =
246 ARM_FEATURE_CORE_HIGH (ARM_EXT2_RAS);
247 /* FP16 instructions. */
248 static const arm_feature_set arm_ext_fp16 =
249 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST);
250 static const arm_feature_set arm_ext_fp16_fml =
251 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_FML);
252 static const arm_feature_set arm_ext_v8_2 =
253 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_2A);
254 static const arm_feature_set arm_ext_v8_3 =
255 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_3A);
256
257 static const arm_feature_set arm_arch_any = ARM_ANY;
258 #ifdef OBJ_ELF
259 static const arm_feature_set fpu_any = FPU_ANY;
260 #endif
261 static const arm_feature_set arm_arch_full ATTRIBUTE_UNUSED = ARM_FEATURE (-1, -1, -1);
262 static const arm_feature_set arm_arch_t2 = ARM_ARCH_THUMB2;
263 static const arm_feature_set arm_arch_none = ARM_ARCH_NONE;
264
265 static const arm_feature_set arm_cext_iwmmxt2 =
266 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2);
267 static const arm_feature_set arm_cext_iwmmxt =
268 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT);
269 static const arm_feature_set arm_cext_xscale =
270 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE);
271 static const arm_feature_set arm_cext_maverick =
272 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK);
273 static const arm_feature_set fpu_fpa_ext_v1 =
274 ARM_FEATURE_COPROC (FPU_FPA_EXT_V1);
275 static const arm_feature_set fpu_fpa_ext_v2 =
276 ARM_FEATURE_COPROC (FPU_FPA_EXT_V2);
277 static const arm_feature_set fpu_vfp_ext_v1xd =
278 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1xD);
279 static const arm_feature_set fpu_vfp_ext_v1 =
280 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1);
281 static const arm_feature_set fpu_vfp_ext_v2 =
282 ARM_FEATURE_COPROC (FPU_VFP_EXT_V2);
283 static const arm_feature_set fpu_vfp_ext_v3xd =
284 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3xD);
285 static const arm_feature_set fpu_vfp_ext_v3 =
286 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3);
287 static const arm_feature_set fpu_vfp_ext_d32 =
288 ARM_FEATURE_COPROC (FPU_VFP_EXT_D32);
289 static const arm_feature_set fpu_neon_ext_v1 =
290 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1);
291 static const arm_feature_set fpu_vfp_v3_or_neon_ext =
292 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1 | FPU_VFP_EXT_V3);
293 #ifdef OBJ_ELF
294 static const arm_feature_set fpu_vfp_fp16 =
295 ARM_FEATURE_COPROC (FPU_VFP_EXT_FP16);
296 static const arm_feature_set fpu_neon_ext_fma =
297 ARM_FEATURE_COPROC (FPU_NEON_EXT_FMA);
298 #endif
299 static const arm_feature_set fpu_vfp_ext_fma =
300 ARM_FEATURE_COPROC (FPU_VFP_EXT_FMA);
301 static const arm_feature_set fpu_vfp_ext_armv8 =
302 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8);
303 static const arm_feature_set fpu_vfp_ext_armv8xd =
304 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8xD);
305 static const arm_feature_set fpu_neon_ext_armv8 =
306 ARM_FEATURE_COPROC (FPU_NEON_EXT_ARMV8);
307 static const arm_feature_set fpu_crypto_ext_armv8 =
308 ARM_FEATURE_COPROC (FPU_CRYPTO_EXT_ARMV8);
309 static const arm_feature_set crc_ext_armv8 =
310 ARM_FEATURE_COPROC (CRC_EXT_ARMV8);
311 static const arm_feature_set fpu_neon_ext_v8_1 =
312 ARM_FEATURE_COPROC (FPU_NEON_EXT_RDMA);
313 static const arm_feature_set fpu_neon_ext_dotprod =
314 ARM_FEATURE_COPROC (FPU_NEON_EXT_DOTPROD);
315
316 static int mfloat_abi_opt = -1;
317 /* Architecture feature bits selected by the last -mcpu/-march or .cpu/.arch
318 directive. */
319 static arm_feature_set selected_arch = ARM_ARCH_NONE;
320 /* Extension feature bits selected by the last -mcpu/-march or .arch_extension
321 directive. */
322 static arm_feature_set selected_ext = ARM_ARCH_NONE;
323 /* Feature bits selected by the last -mcpu/-march or by the combination of the
324 last .cpu/.arch directive .arch_extension directives since that
325 directive. */
326 static arm_feature_set selected_cpu = ARM_ARCH_NONE;
327 /* FPU feature bits selected by the last -mfpu or .fpu directive. */
328 static arm_feature_set selected_fpu = FPU_NONE;
329 /* Feature bits selected by the last .object_arch directive. */
330 static arm_feature_set selected_object_arch = ARM_ARCH_NONE;
331 /* Must be long enough to hold any of the names in arm_cpus. */
332 static char selected_cpu_name[20];
333
334 extern FLONUM_TYPE generic_floating_point_number;
335
336 /* Return if no cpu was selected on command-line. */
337 static bfd_boolean
338 no_cpu_selected (void)
339 {
340 return ARM_FEATURE_EQUAL (selected_cpu, arm_arch_none);
341 }
342
343 #ifdef OBJ_ELF
344 # ifdef EABI_DEFAULT
345 static int meabi_flags = EABI_DEFAULT;
346 # else
347 static int meabi_flags = EF_ARM_EABI_UNKNOWN;
348 # endif
349
350 static int attributes_set_explicitly[NUM_KNOWN_OBJ_ATTRIBUTES];
351
352 bfd_boolean
353 arm_is_eabi (void)
354 {
355 return (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4);
356 }
357 #endif
358
359 #ifdef OBJ_ELF
360 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
361 symbolS * GOT_symbol;
362 #endif
363
364 /* 0: assemble for ARM,
365 1: assemble for Thumb,
366 2: assemble for Thumb even though target CPU does not support thumb
367 instructions. */
368 static int thumb_mode = 0;
369 /* A value distinct from the possible values for thumb_mode that we
370 can use to record whether thumb_mode has been copied into the
371 tc_frag_data field of a frag. */
372 #define MODE_RECORDED (1 << 4)
373
374 /* Specifies the intrinsic IT insn behavior mode. */
375 enum implicit_it_mode
376 {
377 IMPLICIT_IT_MODE_NEVER = 0x00,
378 IMPLICIT_IT_MODE_ARM = 0x01,
379 IMPLICIT_IT_MODE_THUMB = 0x02,
380 IMPLICIT_IT_MODE_ALWAYS = (IMPLICIT_IT_MODE_ARM | IMPLICIT_IT_MODE_THUMB)
381 };
382 static int implicit_it_mode = IMPLICIT_IT_MODE_ARM;
383
384 /* If unified_syntax is true, we are processing the new unified
385 ARM/Thumb syntax. Important differences from the old ARM mode:
386
387 - Immediate operands do not require a # prefix.
388 - Conditional affixes always appear at the end of the
389 instruction. (For backward compatibility, those instructions
390 that formerly had them in the middle, continue to accept them
391 there.)
392 - The IT instruction may appear, and if it does is validated
393 against subsequent conditional affixes. It does not generate
394 machine code.
395
396 Important differences from the old Thumb mode:
397
398 - Immediate operands do not require a # prefix.
399 - Most of the V6T2 instructions are only available in unified mode.
400 - The .N and .W suffixes are recognized and honored (it is an error
401 if they cannot be honored).
402 - All instructions set the flags if and only if they have an 's' affix.
403 - Conditional affixes may be used. They are validated against
404 preceding IT instructions. Unlike ARM mode, you cannot use a
405 conditional affix except in the scope of an IT instruction. */
406
407 static bfd_boolean unified_syntax = FALSE;
408
409 /* An immediate operand can start with #, and ld*, st*, pld operands
410 can contain [ and ]. We need to tell APP not to elide whitespace
411 before a [, which can appear as the first operand for pld.
412 Likewise, a { can appear as the first operand for push, pop, vld*, etc. */
413 const char arm_symbol_chars[] = "#[]{}";
414
415 enum neon_el_type
416 {
417 NT_invtype,
418 NT_untyped,
419 NT_integer,
420 NT_float,
421 NT_poly,
422 NT_signed,
423 NT_unsigned
424 };
425
426 struct neon_type_el
427 {
428 enum neon_el_type type;
429 unsigned size;
430 };
431
432 #define NEON_MAX_TYPE_ELS 4
433
434 struct neon_type
435 {
436 struct neon_type_el el[NEON_MAX_TYPE_ELS];
437 unsigned elems;
438 };
439
440 enum it_instruction_type
441 {
442 OUTSIDE_IT_INSN,
443 INSIDE_IT_INSN,
444 INSIDE_IT_LAST_INSN,
445 IF_INSIDE_IT_LAST_INSN, /* Either outside or inside;
446 if inside, should be the last one. */
447 NEUTRAL_IT_INSN, /* This could be either inside or outside,
448 i.e. BKPT and NOP. */
449 IT_INSN /* The IT insn has been parsed. */
450 };
451
452 /* The maximum number of operands we need. */
453 #define ARM_IT_MAX_OPERANDS 6
454
455 struct arm_it
456 {
457 const char * error;
458 unsigned long instruction;
459 int size;
460 int size_req;
461 int cond;
462 /* "uncond_value" is set to the value in place of the conditional field in
463 unconditional versions of the instruction, or -1 if nothing is
464 appropriate. */
465 int uncond_value;
466 struct neon_type vectype;
467 /* This does not indicate an actual NEON instruction, only that
468 the mnemonic accepts neon-style type suffixes. */
469 int is_neon;
470 /* Set to the opcode if the instruction needs relaxation.
471 Zero if the instruction is not relaxed. */
472 unsigned long relax;
473 struct
474 {
475 bfd_reloc_code_real_type type;
476 expressionS exp;
477 int pc_rel;
478 } reloc;
479
480 enum it_instruction_type it_insn_type;
481
482 struct
483 {
484 unsigned reg;
485 signed int imm;
486 struct neon_type_el vectype;
487 unsigned present : 1; /* Operand present. */
488 unsigned isreg : 1; /* Operand was a register. */
489 unsigned immisreg : 1; /* .imm field is a second register. */
490 unsigned isscalar : 1; /* Operand is a (Neon) scalar. */
491 unsigned immisalign : 1; /* Immediate is an alignment specifier. */
492 unsigned immisfloat : 1; /* Immediate was parsed as a float. */
493 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
494 instructions. This allows us to disambiguate ARM <-> vector insns. */
495 unsigned regisimm : 1; /* 64-bit immediate, reg forms high 32 bits. */
496 unsigned isvec : 1; /* Is a single, double or quad VFP/Neon reg. */
497 unsigned isquad : 1; /* Operand is Neon quad-precision register. */
498 unsigned issingle : 1; /* Operand is VFP single-precision register. */
499 unsigned hasreloc : 1; /* Operand has relocation suffix. */
500 unsigned writeback : 1; /* Operand has trailing ! */
501 unsigned preind : 1; /* Preindexed address. */
502 unsigned postind : 1; /* Postindexed address. */
503 unsigned negative : 1; /* Index register was negated. */
504 unsigned shifted : 1; /* Shift applied to operation. */
505 unsigned shift_kind : 3; /* Shift operation (enum shift_kind). */
506 } operands[ARM_IT_MAX_OPERANDS];
507 };
508
509 static struct arm_it inst;
510
511 #define NUM_FLOAT_VALS 8
512
513 const char * fp_const[] =
514 {
515 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
516 };
517
518 /* Number of littlenums required to hold an extended precision number. */
519 #define MAX_LITTLENUMS 6
520
521 LITTLENUM_TYPE fp_values[NUM_FLOAT_VALS][MAX_LITTLENUMS];
522
523 #define FAIL (-1)
524 #define SUCCESS (0)
525
526 #define SUFF_S 1
527 #define SUFF_D 2
528 #define SUFF_E 3
529 #define SUFF_P 4
530
531 #define CP_T_X 0x00008000
532 #define CP_T_Y 0x00400000
533
534 #define CONDS_BIT 0x00100000
535 #define LOAD_BIT 0x00100000
536
537 #define DOUBLE_LOAD_FLAG 0x00000001
538
539 struct asm_cond
540 {
541 const char * template_name;
542 unsigned long value;
543 };
544
545 #define COND_ALWAYS 0xE
546
547 struct asm_psr
548 {
549 const char * template_name;
550 unsigned long field;
551 };
552
553 struct asm_barrier_opt
554 {
555 const char * template_name;
556 unsigned long value;
557 const arm_feature_set arch;
558 };
559
560 /* The bit that distinguishes CPSR and SPSR. */
561 #define SPSR_BIT (1 << 22)
562
563 /* The individual PSR flag bits. */
564 #define PSR_c (1 << 16)
565 #define PSR_x (1 << 17)
566 #define PSR_s (1 << 18)
567 #define PSR_f (1 << 19)
568
569 struct reloc_entry
570 {
571 const char * name;
572 bfd_reloc_code_real_type reloc;
573 };
574
575 enum vfp_reg_pos
576 {
577 VFP_REG_Sd, VFP_REG_Sm, VFP_REG_Sn,
578 VFP_REG_Dd, VFP_REG_Dm, VFP_REG_Dn
579 };
580
581 enum vfp_ldstm_type
582 {
583 VFP_LDSTMIA, VFP_LDSTMDB, VFP_LDSTMIAX, VFP_LDSTMDBX
584 };
585
586 /* Bits for DEFINED field in neon_typed_alias. */
587 #define NTA_HASTYPE 1
588 #define NTA_HASINDEX 2
589
590 struct neon_typed_alias
591 {
592 unsigned char defined;
593 unsigned char index;
594 struct neon_type_el eltype;
595 };
596
597 /* ARM register categories. This includes coprocessor numbers and various
598 architecture extensions' registers. Each entry should have an error message
599 in reg_expected_msgs below. */
600 enum arm_reg_type
601 {
602 REG_TYPE_RN,
603 REG_TYPE_CP,
604 REG_TYPE_CN,
605 REG_TYPE_FN,
606 REG_TYPE_VFS,
607 REG_TYPE_VFD,
608 REG_TYPE_NQ,
609 REG_TYPE_VFSD,
610 REG_TYPE_NDQ,
611 REG_TYPE_NSD,
612 REG_TYPE_NSDQ,
613 REG_TYPE_VFC,
614 REG_TYPE_MVF,
615 REG_TYPE_MVD,
616 REG_TYPE_MVFX,
617 REG_TYPE_MVDX,
618 REG_TYPE_MVAX,
619 REG_TYPE_DSPSC,
620 REG_TYPE_MMXWR,
621 REG_TYPE_MMXWC,
622 REG_TYPE_MMXWCG,
623 REG_TYPE_XSCALE,
624 REG_TYPE_RNB
625 };
626
627 /* Structure for a hash table entry for a register.
628 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
629 information which states whether a vector type or index is specified (for a
630 register alias created with .dn or .qn). Otherwise NEON should be NULL. */
631 struct reg_entry
632 {
633 const char * name;
634 unsigned int number;
635 unsigned char type;
636 unsigned char builtin;
637 struct neon_typed_alias * neon;
638 };
639
640 /* Diagnostics used when we don't get a register of the expected type. */
641 const char * const reg_expected_msgs[] =
642 {
643 [REG_TYPE_RN] = N_("ARM register expected"),
644 [REG_TYPE_CP] = N_("bad or missing co-processor number"),
645 [REG_TYPE_CN] = N_("co-processor register expected"),
646 [REG_TYPE_FN] = N_("FPA register expected"),
647 [REG_TYPE_VFS] = N_("VFP single precision register expected"),
648 [REG_TYPE_VFD] = N_("VFP/Neon double precision register expected"),
649 [REG_TYPE_NQ] = N_("Neon quad precision register expected"),
650 [REG_TYPE_VFSD] = N_("VFP single or double precision register expected"),
651 [REG_TYPE_NDQ] = N_("Neon double or quad precision register expected"),
652 [REG_TYPE_NSD] = N_("Neon single or double precision register expected"),
653 [REG_TYPE_NSDQ] = N_("VFP single, double or Neon quad precision register"
654 " expected"),
655 [REG_TYPE_VFC] = N_("VFP system register expected"),
656 [REG_TYPE_MVF] = N_("Maverick MVF register expected"),
657 [REG_TYPE_MVD] = N_("Maverick MVD register expected"),
658 [REG_TYPE_MVFX] = N_("Maverick MVFX register expected"),
659 [REG_TYPE_MVDX] = N_("Maverick MVDX register expected"),
660 [REG_TYPE_MVAX] = N_("Maverick MVAX register expected"),
661 [REG_TYPE_DSPSC] = N_("Maverick DSPSC register expected"),
662 [REG_TYPE_MMXWR] = N_("iWMMXt data register expected"),
663 [REG_TYPE_MMXWC] = N_("iWMMXt control register expected"),
664 [REG_TYPE_MMXWCG] = N_("iWMMXt scalar register expected"),
665 [REG_TYPE_XSCALE] = N_("XScale accumulator register expected"),
666 [REG_TYPE_RNB] = N_("")
667 };
668
669 /* Some well known registers that we refer to directly elsewhere. */
670 #define REG_R12 12
671 #define REG_SP 13
672 #define REG_LR 14
673 #define REG_PC 15
674
675 /* ARM instructions take 4bytes in the object file, Thumb instructions
676 take 2: */
677 #define INSN_SIZE 4
678
679 struct asm_opcode
680 {
681 /* Basic string to match. */
682 const char * template_name;
683
684 /* Parameters to instruction. */
685 unsigned int operands[8];
686
687 /* Conditional tag - see opcode_lookup. */
688 unsigned int tag : 4;
689
690 /* Basic instruction code. */
691 unsigned int avalue : 28;
692
693 /* Thumb-format instruction code. */
694 unsigned int tvalue;
695
696 /* Which architecture variant provides this instruction. */
697 const arm_feature_set * avariant;
698 const arm_feature_set * tvariant;
699
700 /* Function to call to encode instruction in ARM format. */
701 void (* aencode) (void);
702
703 /* Function to call to encode instruction in Thumb format. */
704 void (* tencode) (void);
705 };
706
707 /* Defines for various bits that we will want to toggle. */
708 #define INST_IMMEDIATE 0x02000000
709 #define OFFSET_REG 0x02000000
710 #define HWOFFSET_IMM 0x00400000
711 #define SHIFT_BY_REG 0x00000010
712 #define PRE_INDEX 0x01000000
713 #define INDEX_UP 0x00800000
714 #define WRITE_BACK 0x00200000
715 #define LDM_TYPE_2_OR_3 0x00400000
716 #define CPSI_MMOD 0x00020000
717
718 #define LITERAL_MASK 0xf000f000
719 #define OPCODE_MASK 0xfe1fffff
720 #define V4_STR_BIT 0x00000020
721 #define VLDR_VMOV_SAME 0x0040f000
722
723 #define T2_SUBS_PC_LR 0xf3de8f00
724
725 #define DATA_OP_SHIFT 21
726 #define SBIT_SHIFT 20
727
728 #define T2_OPCODE_MASK 0xfe1fffff
729 #define T2_DATA_OP_SHIFT 21
730 #define T2_SBIT_SHIFT 20
731
732 #define A_COND_MASK 0xf0000000
733 #define A_PUSH_POP_OP_MASK 0x0fff0000
734
735 /* Opcodes for pushing/poping registers to/from the stack. */
736 #define A1_OPCODE_PUSH 0x092d0000
737 #define A2_OPCODE_PUSH 0x052d0004
738 #define A2_OPCODE_POP 0x049d0004
739
740 /* Codes to distinguish the arithmetic instructions. */
741 #define OPCODE_AND 0
742 #define OPCODE_EOR 1
743 #define OPCODE_SUB 2
744 #define OPCODE_RSB 3
745 #define OPCODE_ADD 4
746 #define OPCODE_ADC 5
747 #define OPCODE_SBC 6
748 #define OPCODE_RSC 7
749 #define OPCODE_TST 8
750 #define OPCODE_TEQ 9
751 #define OPCODE_CMP 10
752 #define OPCODE_CMN 11
753 #define OPCODE_ORR 12
754 #define OPCODE_MOV 13
755 #define OPCODE_BIC 14
756 #define OPCODE_MVN 15
757
758 #define T2_OPCODE_AND 0
759 #define T2_OPCODE_BIC 1
760 #define T2_OPCODE_ORR 2
761 #define T2_OPCODE_ORN 3
762 #define T2_OPCODE_EOR 4
763 #define T2_OPCODE_ADD 8
764 #define T2_OPCODE_ADC 10
765 #define T2_OPCODE_SBC 11
766 #define T2_OPCODE_SUB 13
767 #define T2_OPCODE_RSB 14
768
769 #define T_OPCODE_MUL 0x4340
770 #define T_OPCODE_TST 0x4200
771 #define T_OPCODE_CMN 0x42c0
772 #define T_OPCODE_NEG 0x4240
773 #define T_OPCODE_MVN 0x43c0
774
775 #define T_OPCODE_ADD_R3 0x1800
776 #define T_OPCODE_SUB_R3 0x1a00
777 #define T_OPCODE_ADD_HI 0x4400
778 #define T_OPCODE_ADD_ST 0xb000
779 #define T_OPCODE_SUB_ST 0xb080
780 #define T_OPCODE_ADD_SP 0xa800
781 #define T_OPCODE_ADD_PC 0xa000
782 #define T_OPCODE_ADD_I8 0x3000
783 #define T_OPCODE_SUB_I8 0x3800
784 #define T_OPCODE_ADD_I3 0x1c00
785 #define T_OPCODE_SUB_I3 0x1e00
786
787 #define T_OPCODE_ASR_R 0x4100
788 #define T_OPCODE_LSL_R 0x4080
789 #define T_OPCODE_LSR_R 0x40c0
790 #define T_OPCODE_ROR_R 0x41c0
791 #define T_OPCODE_ASR_I 0x1000
792 #define T_OPCODE_LSL_I 0x0000
793 #define T_OPCODE_LSR_I 0x0800
794
795 #define T_OPCODE_MOV_I8 0x2000
796 #define T_OPCODE_CMP_I8 0x2800
797 #define T_OPCODE_CMP_LR 0x4280
798 #define T_OPCODE_MOV_HR 0x4600
799 #define T_OPCODE_CMP_HR 0x4500
800
801 #define T_OPCODE_LDR_PC 0x4800
802 #define T_OPCODE_LDR_SP 0x9800
803 #define T_OPCODE_STR_SP 0x9000
804 #define T_OPCODE_LDR_IW 0x6800
805 #define T_OPCODE_STR_IW 0x6000
806 #define T_OPCODE_LDR_IH 0x8800
807 #define T_OPCODE_STR_IH 0x8000
808 #define T_OPCODE_LDR_IB 0x7800
809 #define T_OPCODE_STR_IB 0x7000
810 #define T_OPCODE_LDR_RW 0x5800
811 #define T_OPCODE_STR_RW 0x5000
812 #define T_OPCODE_LDR_RH 0x5a00
813 #define T_OPCODE_STR_RH 0x5200
814 #define T_OPCODE_LDR_RB 0x5c00
815 #define T_OPCODE_STR_RB 0x5400
816
817 #define T_OPCODE_PUSH 0xb400
818 #define T_OPCODE_POP 0xbc00
819
820 #define T_OPCODE_BRANCH 0xe000
821
822 #define THUMB_SIZE 2 /* Size of thumb instruction. */
823 #define THUMB_PP_PC_LR 0x0100
824 #define THUMB_LOAD_BIT 0x0800
825 #define THUMB2_LOAD_BIT 0x00100000
826
827 #define BAD_ARGS _("bad arguments to instruction")
828 #define BAD_SP _("r13 not allowed here")
829 #define BAD_PC _("r15 not allowed here")
830 #define BAD_COND _("instruction cannot be conditional")
831 #define BAD_OVERLAP _("registers may not be the same")
832 #define BAD_HIREG _("lo register required")
833 #define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
834 #define BAD_ADDR_MODE _("instruction does not accept this addressing mode");
835 #define BAD_BRANCH _("branch must be last instruction in IT block")
836 #define BAD_NOT_IT _("instruction not allowed in IT block")
837 #define BAD_FPU _("selected FPU does not support instruction")
838 #define BAD_OUT_IT _("thumb conditional instruction should be in IT block")
839 #define BAD_IT_COND _("incorrect condition in IT block")
840 #define BAD_IT_IT _("IT falling in the range of a previous IT block")
841 #define MISSING_FNSTART _("missing .fnstart before unwinding directive")
842 #define BAD_PC_ADDRESSING \
843 _("cannot use register index with PC-relative addressing")
844 #define BAD_PC_WRITEBACK \
845 _("cannot use writeback with PC-relative addressing")
846 #define BAD_RANGE _("branch out of range")
847 #define BAD_FP16 _("selected processor does not support fp16 instruction")
848 #define UNPRED_REG(R) _("using " R " results in unpredictable behaviour")
849 #define THUMB1_RELOC_ONLY _("relocation valid in thumb1 code only")
850
851 static struct hash_control * arm_ops_hsh;
852 static struct hash_control * arm_cond_hsh;
853 static struct hash_control * arm_shift_hsh;
854 static struct hash_control * arm_psr_hsh;
855 static struct hash_control * arm_v7m_psr_hsh;
856 static struct hash_control * arm_reg_hsh;
857 static struct hash_control * arm_reloc_hsh;
858 static struct hash_control * arm_barrier_opt_hsh;
859
860 /* Stuff needed to resolve the label ambiguity
861 As:
862 ...
863 label: <insn>
864 may differ from:
865 ...
866 label:
867 <insn> */
868
869 symbolS * last_label_seen;
870 static int label_is_thumb_function_name = FALSE;
871
872 /* Literal pool structure. Held on a per-section
873 and per-sub-section basis. */
874
875 #define MAX_LITERAL_POOL_SIZE 1024
876 typedef struct literal_pool
877 {
878 expressionS literals [MAX_LITERAL_POOL_SIZE];
879 unsigned int next_free_entry;
880 unsigned int id;
881 symbolS * symbol;
882 segT section;
883 subsegT sub_section;
884 #ifdef OBJ_ELF
885 struct dwarf2_line_info locs [MAX_LITERAL_POOL_SIZE];
886 #endif
887 struct literal_pool * next;
888 unsigned int alignment;
889 } literal_pool;
890
891 /* Pointer to a linked list of literal pools. */
892 literal_pool * list_of_pools = NULL;
893
894 typedef enum asmfunc_states
895 {
896 OUTSIDE_ASMFUNC,
897 WAITING_ASMFUNC_NAME,
898 WAITING_ENDASMFUNC
899 } asmfunc_states;
900
901 static asmfunc_states asmfunc_state = OUTSIDE_ASMFUNC;
902
903 #ifdef OBJ_ELF
904 # define now_it seg_info (now_seg)->tc_segment_info_data.current_it
905 #else
906 static struct current_it now_it;
907 #endif
908
909 static inline int
910 now_it_compatible (int cond)
911 {
912 return (cond & ~1) == (now_it.cc & ~1);
913 }
914
915 static inline int
916 conditional_insn (void)
917 {
918 return inst.cond != COND_ALWAYS;
919 }
920
921 static int in_it_block (void);
922
923 static int handle_it_state (void);
924
925 static void force_automatic_it_block_close (void);
926
927 static void it_fsm_post_encode (void);
928
929 #define set_it_insn_type(type) \
930 do \
931 { \
932 inst.it_insn_type = type; \
933 if (handle_it_state () == FAIL) \
934 return; \
935 } \
936 while (0)
937
938 #define set_it_insn_type_nonvoid(type, failret) \
939 do \
940 { \
941 inst.it_insn_type = type; \
942 if (handle_it_state () == FAIL) \
943 return failret; \
944 } \
945 while(0)
946
947 #define set_it_insn_type_last() \
948 do \
949 { \
950 if (inst.cond == COND_ALWAYS) \
951 set_it_insn_type (IF_INSIDE_IT_LAST_INSN); \
952 else \
953 set_it_insn_type (INSIDE_IT_LAST_INSN); \
954 } \
955 while (0)
956
957 /* Pure syntax. */
958
959 /* This array holds the chars that always start a comment. If the
960 pre-processor is disabled, these aren't very useful. */
961 char arm_comment_chars[] = "@";
962
963 /* This array holds the chars that only start a comment at the beginning of
964 a line. If the line seems to have the form '# 123 filename'
965 .line and .file directives will appear in the pre-processed output. */
966 /* Note that input_file.c hand checks for '#' at the beginning of the
967 first line of the input file. This is because the compiler outputs
968 #NO_APP at the beginning of its output. */
969 /* Also note that comments like this one will always work. */
970 const char line_comment_chars[] = "#";
971
972 char arm_line_separator_chars[] = ";";
973
974 /* Chars that can be used to separate mant
975 from exp in floating point numbers. */
976 const char EXP_CHARS[] = "eE";
977
978 /* Chars that mean this number is a floating point constant. */
979 /* As in 0f12.456 */
980 /* or 0d1.2345e12 */
981
982 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
983
984 /* Prefix characters that indicate the start of an immediate
985 value. */
986 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
987
988 /* Separator character handling. */
989
990 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
991
992 static inline int
993 skip_past_char (char ** str, char c)
994 {
995 /* PR gas/14987: Allow for whitespace before the expected character. */
996 skip_whitespace (*str);
997
998 if (**str == c)
999 {
1000 (*str)++;
1001 return SUCCESS;
1002 }
1003 else
1004 return FAIL;
1005 }
1006
1007 #define skip_past_comma(str) skip_past_char (str, ',')
1008
1009 /* Arithmetic expressions (possibly involving symbols). */
1010
1011 /* Return TRUE if anything in the expression is a bignum. */
1012
1013 static bfd_boolean
1014 walk_no_bignums (symbolS * sp)
1015 {
1016 if (symbol_get_value_expression (sp)->X_op == O_big)
1017 return TRUE;
1018
1019 if (symbol_get_value_expression (sp)->X_add_symbol)
1020 {
1021 return (walk_no_bignums (symbol_get_value_expression (sp)->X_add_symbol)
1022 || (symbol_get_value_expression (sp)->X_op_symbol
1023 && walk_no_bignums (symbol_get_value_expression (sp)->X_op_symbol)));
1024 }
1025
1026 return FALSE;
1027 }
1028
1029 static bfd_boolean in_my_get_expression = FALSE;
1030
1031 /* Third argument to my_get_expression. */
1032 #define GE_NO_PREFIX 0
1033 #define GE_IMM_PREFIX 1
1034 #define GE_OPT_PREFIX 2
1035 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
1036 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
1037 #define GE_OPT_PREFIX_BIG 3
1038
1039 static int
1040 my_get_expression (expressionS * ep, char ** str, int prefix_mode)
1041 {
1042 char * save_in;
1043
1044 /* In unified syntax, all prefixes are optional. */
1045 if (unified_syntax)
1046 prefix_mode = (prefix_mode == GE_OPT_PREFIX_BIG) ? prefix_mode
1047 : GE_OPT_PREFIX;
1048
1049 switch (prefix_mode)
1050 {
1051 case GE_NO_PREFIX: break;
1052 case GE_IMM_PREFIX:
1053 if (!is_immediate_prefix (**str))
1054 {
1055 inst.error = _("immediate expression requires a # prefix");
1056 return FAIL;
1057 }
1058 (*str)++;
1059 break;
1060 case GE_OPT_PREFIX:
1061 case GE_OPT_PREFIX_BIG:
1062 if (is_immediate_prefix (**str))
1063 (*str)++;
1064 break;
1065 default:
1066 abort ();
1067 }
1068
1069 memset (ep, 0, sizeof (expressionS));
1070
1071 save_in = input_line_pointer;
1072 input_line_pointer = *str;
1073 in_my_get_expression = TRUE;
1074 expression (ep);
1075 in_my_get_expression = FALSE;
1076
1077 if (ep->X_op == O_illegal || ep->X_op == O_absent)
1078 {
1079 /* We found a bad or missing expression in md_operand(). */
1080 *str = input_line_pointer;
1081 input_line_pointer = save_in;
1082 if (inst.error == NULL)
1083 inst.error = (ep->X_op == O_absent
1084 ? _("missing expression") :_("bad expression"));
1085 return 1;
1086 }
1087
1088 /* Get rid of any bignums now, so that we don't generate an error for which
1089 we can't establish a line number later on. Big numbers are never valid
1090 in instructions, which is where this routine is always called. */
1091 if (prefix_mode != GE_OPT_PREFIX_BIG
1092 && (ep->X_op == O_big
1093 || (ep->X_add_symbol
1094 && (walk_no_bignums (ep->X_add_symbol)
1095 || (ep->X_op_symbol
1096 && walk_no_bignums (ep->X_op_symbol))))))
1097 {
1098 inst.error = _("invalid constant");
1099 *str = input_line_pointer;
1100 input_line_pointer = save_in;
1101 return 1;
1102 }
1103
1104 *str = input_line_pointer;
1105 input_line_pointer = save_in;
1106 return SUCCESS;
1107 }
1108
1109 /* Turn a string in input_line_pointer into a floating point constant
1110 of type TYPE, and store the appropriate bytes in *LITP. The number
1111 of LITTLENUMS emitted is stored in *SIZEP. An error message is
1112 returned, or NULL on OK.
1113
1114 Note that fp constants aren't represent in the normal way on the ARM.
1115 In big endian mode, things are as expected. However, in little endian
1116 mode fp constants are big-endian word-wise, and little-endian byte-wise
1117 within the words. For example, (double) 1.1 in big endian mode is
1118 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
1119 the byte sequence 99 99 f1 3f 9a 99 99 99.
1120
1121 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
1122
1123 const char *
1124 md_atof (int type, char * litP, int * sizeP)
1125 {
1126 int prec;
1127 LITTLENUM_TYPE words[MAX_LITTLENUMS];
1128 char *t;
1129 int i;
1130
1131 switch (type)
1132 {
1133 case 'f':
1134 case 'F':
1135 case 's':
1136 case 'S':
1137 prec = 2;
1138 break;
1139
1140 case 'd':
1141 case 'D':
1142 case 'r':
1143 case 'R':
1144 prec = 4;
1145 break;
1146
1147 case 'x':
1148 case 'X':
1149 prec = 5;
1150 break;
1151
1152 case 'p':
1153 case 'P':
1154 prec = 5;
1155 break;
1156
1157 default:
1158 *sizeP = 0;
1159 return _("Unrecognized or unsupported floating point constant");
1160 }
1161
1162 t = atof_ieee (input_line_pointer, type, words);
1163 if (t)
1164 input_line_pointer = t;
1165 *sizeP = prec * sizeof (LITTLENUM_TYPE);
1166
1167 if (target_big_endian)
1168 {
1169 for (i = 0; i < prec; i++)
1170 {
1171 md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1172 litP += sizeof (LITTLENUM_TYPE);
1173 }
1174 }
1175 else
1176 {
1177 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
1178 for (i = prec - 1; i >= 0; i--)
1179 {
1180 md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1181 litP += sizeof (LITTLENUM_TYPE);
1182 }
1183 else
1184 /* For a 4 byte float the order of elements in `words' is 1 0.
1185 For an 8 byte float the order is 1 0 3 2. */
1186 for (i = 0; i < prec; i += 2)
1187 {
1188 md_number_to_chars (litP, (valueT) words[i + 1],
1189 sizeof (LITTLENUM_TYPE));
1190 md_number_to_chars (litP + sizeof (LITTLENUM_TYPE),
1191 (valueT) words[i], sizeof (LITTLENUM_TYPE));
1192 litP += 2 * sizeof (LITTLENUM_TYPE);
1193 }
1194 }
1195
1196 return NULL;
1197 }
1198
1199 /* We handle all bad expressions here, so that we can report the faulty
1200 instruction in the error message. */
1201
1202 void
1203 md_operand (expressionS * exp)
1204 {
1205 if (in_my_get_expression)
1206 exp->X_op = O_illegal;
1207 }
1208
1209 /* Immediate values. */
1210
1211 #ifdef OBJ_ELF
1212 /* Generic immediate-value read function for use in directives.
1213 Accepts anything that 'expression' can fold to a constant.
1214 *val receives the number. */
1215
1216 static int
1217 immediate_for_directive (int *val)
1218 {
1219 expressionS exp;
1220 exp.X_op = O_illegal;
1221
1222 if (is_immediate_prefix (*input_line_pointer))
1223 {
1224 input_line_pointer++;
1225 expression (&exp);
1226 }
1227
1228 if (exp.X_op != O_constant)
1229 {
1230 as_bad (_("expected #constant"));
1231 ignore_rest_of_line ();
1232 return FAIL;
1233 }
1234 *val = exp.X_add_number;
1235 return SUCCESS;
1236 }
1237 #endif
1238
1239 /* Register parsing. */
1240
1241 /* Generic register parser. CCP points to what should be the
1242 beginning of a register name. If it is indeed a valid register
1243 name, advance CCP over it and return the reg_entry structure;
1244 otherwise return NULL. Does not issue diagnostics. */
1245
1246 static struct reg_entry *
1247 arm_reg_parse_multi (char **ccp)
1248 {
1249 char *start = *ccp;
1250 char *p;
1251 struct reg_entry *reg;
1252
1253 skip_whitespace (start);
1254
1255 #ifdef REGISTER_PREFIX
1256 if (*start != REGISTER_PREFIX)
1257 return NULL;
1258 start++;
1259 #endif
1260 #ifdef OPTIONAL_REGISTER_PREFIX
1261 if (*start == OPTIONAL_REGISTER_PREFIX)
1262 start++;
1263 #endif
1264
1265 p = start;
1266 if (!ISALPHA (*p) || !is_name_beginner (*p))
1267 return NULL;
1268
1269 do
1270 p++;
1271 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
1272
1273 reg = (struct reg_entry *) hash_find_n (arm_reg_hsh, start, p - start);
1274
1275 if (!reg)
1276 return NULL;
1277
1278 *ccp = p;
1279 return reg;
1280 }
1281
1282 static int
1283 arm_reg_alt_syntax (char **ccp, char *start, struct reg_entry *reg,
1284 enum arm_reg_type type)
1285 {
1286 /* Alternative syntaxes are accepted for a few register classes. */
1287 switch (type)
1288 {
1289 case REG_TYPE_MVF:
1290 case REG_TYPE_MVD:
1291 case REG_TYPE_MVFX:
1292 case REG_TYPE_MVDX:
1293 /* Generic coprocessor register names are allowed for these. */
1294 if (reg && reg->type == REG_TYPE_CN)
1295 return reg->number;
1296 break;
1297
1298 case REG_TYPE_CP:
1299 /* For backward compatibility, a bare number is valid here. */
1300 {
1301 unsigned long processor = strtoul (start, ccp, 10);
1302 if (*ccp != start && processor <= 15)
1303 return processor;
1304 }
1305 /* Fall through. */
1306
1307 case REG_TYPE_MMXWC:
1308 /* WC includes WCG. ??? I'm not sure this is true for all
1309 instructions that take WC registers. */
1310 if (reg && reg->type == REG_TYPE_MMXWCG)
1311 return reg->number;
1312 break;
1313
1314 default:
1315 break;
1316 }
1317
1318 return FAIL;
1319 }
1320
1321 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1322 return value is the register number or FAIL. */
1323
1324 static int
1325 arm_reg_parse (char **ccp, enum arm_reg_type type)
1326 {
1327 char *start = *ccp;
1328 struct reg_entry *reg = arm_reg_parse_multi (ccp);
1329 int ret;
1330
1331 /* Do not allow a scalar (reg+index) to parse as a register. */
1332 if (reg && reg->neon && (reg->neon->defined & NTA_HASINDEX))
1333 return FAIL;
1334
1335 if (reg && reg->type == type)
1336 return reg->number;
1337
1338 if ((ret = arm_reg_alt_syntax (ccp, start, reg, type)) != FAIL)
1339 return ret;
1340
1341 *ccp = start;
1342 return FAIL;
1343 }
1344
1345 /* Parse a Neon type specifier. *STR should point at the leading '.'
1346 character. Does no verification at this stage that the type fits the opcode
1347 properly. E.g.,
1348
1349 .i32.i32.s16
1350 .s32.f32
1351 .u16
1352
1353 Can all be legally parsed by this function.
1354
1355 Fills in neon_type struct pointer with parsed information, and updates STR
1356 to point after the parsed type specifier. Returns SUCCESS if this was a legal
1357 type, FAIL if not. */
1358
1359 static int
1360 parse_neon_type (struct neon_type *type, char **str)
1361 {
1362 char *ptr = *str;
1363
1364 if (type)
1365 type->elems = 0;
1366
1367 while (type->elems < NEON_MAX_TYPE_ELS)
1368 {
1369 enum neon_el_type thistype = NT_untyped;
1370 unsigned thissize = -1u;
1371
1372 if (*ptr != '.')
1373 break;
1374
1375 ptr++;
1376
1377 /* Just a size without an explicit type. */
1378 if (ISDIGIT (*ptr))
1379 goto parsesize;
1380
1381 switch (TOLOWER (*ptr))
1382 {
1383 case 'i': thistype = NT_integer; break;
1384 case 'f': thistype = NT_float; break;
1385 case 'p': thistype = NT_poly; break;
1386 case 's': thistype = NT_signed; break;
1387 case 'u': thistype = NT_unsigned; break;
1388 case 'd':
1389 thistype = NT_float;
1390 thissize = 64;
1391 ptr++;
1392 goto done;
1393 default:
1394 as_bad (_("unexpected character `%c' in type specifier"), *ptr);
1395 return FAIL;
1396 }
1397
1398 ptr++;
1399
1400 /* .f is an abbreviation for .f32. */
1401 if (thistype == NT_float && !ISDIGIT (*ptr))
1402 thissize = 32;
1403 else
1404 {
1405 parsesize:
1406 thissize = strtoul (ptr, &ptr, 10);
1407
1408 if (thissize != 8 && thissize != 16 && thissize != 32
1409 && thissize != 64)
1410 {
1411 as_bad (_("bad size %d in type specifier"), thissize);
1412 return FAIL;
1413 }
1414 }
1415
1416 done:
1417 if (type)
1418 {
1419 type->el[type->elems].type = thistype;
1420 type->el[type->elems].size = thissize;
1421 type->elems++;
1422 }
1423 }
1424
1425 /* Empty/missing type is not a successful parse. */
1426 if (type->elems == 0)
1427 return FAIL;
1428
1429 *str = ptr;
1430
1431 return SUCCESS;
1432 }
1433
1434 /* Errors may be set multiple times during parsing or bit encoding
1435 (particularly in the Neon bits), but usually the earliest error which is set
1436 will be the most meaningful. Avoid overwriting it with later (cascading)
1437 errors by calling this function. */
1438
1439 static void
1440 first_error (const char *err)
1441 {
1442 if (!inst.error)
1443 inst.error = err;
1444 }
1445
1446 /* Parse a single type, e.g. ".s32", leading period included. */
1447 static int
1448 parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
1449 {
1450 char *str = *ccp;
1451 struct neon_type optype;
1452
1453 if (*str == '.')
1454 {
1455 if (parse_neon_type (&optype, &str) == SUCCESS)
1456 {
1457 if (optype.elems == 1)
1458 *vectype = optype.el[0];
1459 else
1460 {
1461 first_error (_("only one type should be specified for operand"));
1462 return FAIL;
1463 }
1464 }
1465 else
1466 {
1467 first_error (_("vector type expected"));
1468 return FAIL;
1469 }
1470 }
1471 else
1472 return FAIL;
1473
1474 *ccp = str;
1475
1476 return SUCCESS;
1477 }
1478
1479 /* Special meanings for indices (which have a range of 0-7), which will fit into
1480 a 4-bit integer. */
1481
1482 #define NEON_ALL_LANES 15
1483 #define NEON_INTERLEAVE_LANES 14
1484
1485 /* Parse either a register or a scalar, with an optional type. Return the
1486 register number, and optionally fill in the actual type of the register
1487 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1488 type/index information in *TYPEINFO. */
1489
1490 static int
1491 parse_typed_reg_or_scalar (char **ccp, enum arm_reg_type type,
1492 enum arm_reg_type *rtype,
1493 struct neon_typed_alias *typeinfo)
1494 {
1495 char *str = *ccp;
1496 struct reg_entry *reg = arm_reg_parse_multi (&str);
1497 struct neon_typed_alias atype;
1498 struct neon_type_el parsetype;
1499
1500 atype.defined = 0;
1501 atype.index = -1;
1502 atype.eltype.type = NT_invtype;
1503 atype.eltype.size = -1;
1504
1505 /* Try alternate syntax for some types of register. Note these are mutually
1506 exclusive with the Neon syntax extensions. */
1507 if (reg == NULL)
1508 {
1509 int altreg = arm_reg_alt_syntax (&str, *ccp, reg, type);
1510 if (altreg != FAIL)
1511 *ccp = str;
1512 if (typeinfo)
1513 *typeinfo = atype;
1514 return altreg;
1515 }
1516
1517 /* Undo polymorphism when a set of register types may be accepted. */
1518 if ((type == REG_TYPE_NDQ
1519 && (reg->type == REG_TYPE_NQ || reg->type == REG_TYPE_VFD))
1520 || (type == REG_TYPE_VFSD
1521 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD))
1522 || (type == REG_TYPE_NSDQ
1523 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD
1524 || reg->type == REG_TYPE_NQ))
1525 || (type == REG_TYPE_NSD
1526 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD))
1527 || (type == REG_TYPE_MMXWC
1528 && (reg->type == REG_TYPE_MMXWCG)))
1529 type = (enum arm_reg_type) reg->type;
1530
1531 if (type != reg->type)
1532 return FAIL;
1533
1534 if (reg->neon)
1535 atype = *reg->neon;
1536
1537 if (parse_neon_operand_type (&parsetype, &str) == SUCCESS)
1538 {
1539 if ((atype.defined & NTA_HASTYPE) != 0)
1540 {
1541 first_error (_("can't redefine type for operand"));
1542 return FAIL;
1543 }
1544 atype.defined |= NTA_HASTYPE;
1545 atype.eltype = parsetype;
1546 }
1547
1548 if (skip_past_char (&str, '[') == SUCCESS)
1549 {
1550 if (type != REG_TYPE_VFD
1551 && !(type == REG_TYPE_VFS
1552 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8_2)))
1553 {
1554 first_error (_("only D registers may be indexed"));
1555 return FAIL;
1556 }
1557
1558 if ((atype.defined & NTA_HASINDEX) != 0)
1559 {
1560 first_error (_("can't change index for operand"));
1561 return FAIL;
1562 }
1563
1564 atype.defined |= NTA_HASINDEX;
1565
1566 if (skip_past_char (&str, ']') == SUCCESS)
1567 atype.index = NEON_ALL_LANES;
1568 else
1569 {
1570 expressionS exp;
1571
1572 my_get_expression (&exp, &str, GE_NO_PREFIX);
1573
1574 if (exp.X_op != O_constant)
1575 {
1576 first_error (_("constant expression required"));
1577 return FAIL;
1578 }
1579
1580 if (skip_past_char (&str, ']') == FAIL)
1581 return FAIL;
1582
1583 atype.index = exp.X_add_number;
1584 }
1585 }
1586
1587 if (typeinfo)
1588 *typeinfo = atype;
1589
1590 if (rtype)
1591 *rtype = type;
1592
1593 *ccp = str;
1594
1595 return reg->number;
1596 }
1597
1598 /* Like arm_reg_parse, but allow allow the following extra features:
1599 - If RTYPE is non-zero, return the (possibly restricted) type of the
1600 register (e.g. Neon double or quad reg when either has been requested).
1601 - If this is a Neon vector type with additional type information, fill
1602 in the struct pointed to by VECTYPE (if non-NULL).
1603 This function will fault on encountering a scalar. */
1604
1605 static int
1606 arm_typed_reg_parse (char **ccp, enum arm_reg_type type,
1607 enum arm_reg_type *rtype, struct neon_type_el *vectype)
1608 {
1609 struct neon_typed_alias atype;
1610 char *str = *ccp;
1611 int reg = parse_typed_reg_or_scalar (&str, type, rtype, &atype);
1612
1613 if (reg == FAIL)
1614 return FAIL;
1615
1616 /* Do not allow regname(... to parse as a register. */
1617 if (*str == '(')
1618 return FAIL;
1619
1620 /* Do not allow a scalar (reg+index) to parse as a register. */
1621 if ((atype.defined & NTA_HASINDEX) != 0)
1622 {
1623 first_error (_("register operand expected, but got scalar"));
1624 return FAIL;
1625 }
1626
1627 if (vectype)
1628 *vectype = atype.eltype;
1629
1630 *ccp = str;
1631
1632 return reg;
1633 }
1634
1635 #define NEON_SCALAR_REG(X) ((X) >> 4)
1636 #define NEON_SCALAR_INDEX(X) ((X) & 15)
1637
1638 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1639 have enough information to be able to do a good job bounds-checking. So, we
1640 just do easy checks here, and do further checks later. */
1641
1642 static int
1643 parse_scalar (char **ccp, int elsize, struct neon_type_el *type)
1644 {
1645 int reg;
1646 char *str = *ccp;
1647 struct neon_typed_alias atype;
1648 enum arm_reg_type reg_type = REG_TYPE_VFD;
1649
1650 if (elsize == 4)
1651 reg_type = REG_TYPE_VFS;
1652
1653 reg = parse_typed_reg_or_scalar (&str, reg_type, NULL, &atype);
1654
1655 if (reg == FAIL || (atype.defined & NTA_HASINDEX) == 0)
1656 return FAIL;
1657
1658 if (atype.index == NEON_ALL_LANES)
1659 {
1660 first_error (_("scalar must have an index"));
1661 return FAIL;
1662 }
1663 else if (atype.index >= 64 / elsize)
1664 {
1665 first_error (_("scalar index out of range"));
1666 return FAIL;
1667 }
1668
1669 if (type)
1670 *type = atype.eltype;
1671
1672 *ccp = str;
1673
1674 return reg * 16 + atype.index;
1675 }
1676
1677 /* Parse an ARM register list. Returns the bitmask, or FAIL. */
1678
1679 static long
1680 parse_reg_list (char ** strp)
1681 {
1682 char * str = * strp;
1683 long range = 0;
1684 int another_range;
1685
1686 /* We come back here if we get ranges concatenated by '+' or '|'. */
1687 do
1688 {
1689 skip_whitespace (str);
1690
1691 another_range = 0;
1692
1693 if (*str == '{')
1694 {
1695 int in_range = 0;
1696 int cur_reg = -1;
1697
1698 str++;
1699 do
1700 {
1701 int reg;
1702
1703 if ((reg = arm_reg_parse (&str, REG_TYPE_RN)) == FAIL)
1704 {
1705 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
1706 return FAIL;
1707 }
1708
1709 if (in_range)
1710 {
1711 int i;
1712
1713 if (reg <= cur_reg)
1714 {
1715 first_error (_("bad range in register list"));
1716 return FAIL;
1717 }
1718
1719 for (i = cur_reg + 1; i < reg; i++)
1720 {
1721 if (range & (1 << i))
1722 as_tsktsk
1723 (_("Warning: duplicated register (r%d) in register list"),
1724 i);
1725 else
1726 range |= 1 << i;
1727 }
1728 in_range = 0;
1729 }
1730
1731 if (range & (1 << reg))
1732 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1733 reg);
1734 else if (reg <= cur_reg)
1735 as_tsktsk (_("Warning: register range not in ascending order"));
1736
1737 range |= 1 << reg;
1738 cur_reg = reg;
1739 }
1740 while (skip_past_comma (&str) != FAIL
1741 || (in_range = 1, *str++ == '-'));
1742 str--;
1743
1744 if (skip_past_char (&str, '}') == FAIL)
1745 {
1746 first_error (_("missing `}'"));
1747 return FAIL;
1748 }
1749 }
1750 else
1751 {
1752 expressionS exp;
1753
1754 if (my_get_expression (&exp, &str, GE_NO_PREFIX))
1755 return FAIL;
1756
1757 if (exp.X_op == O_constant)
1758 {
1759 if (exp.X_add_number
1760 != (exp.X_add_number & 0x0000ffff))
1761 {
1762 inst.error = _("invalid register mask");
1763 return FAIL;
1764 }
1765
1766 if ((range & exp.X_add_number) != 0)
1767 {
1768 int regno = range & exp.X_add_number;
1769
1770 regno &= -regno;
1771 regno = (1 << regno) - 1;
1772 as_tsktsk
1773 (_("Warning: duplicated register (r%d) in register list"),
1774 regno);
1775 }
1776
1777 range |= exp.X_add_number;
1778 }
1779 else
1780 {
1781 if (inst.reloc.type != 0)
1782 {
1783 inst.error = _("expression too complex");
1784 return FAIL;
1785 }
1786
1787 memcpy (&inst.reloc.exp, &exp, sizeof (expressionS));
1788 inst.reloc.type = BFD_RELOC_ARM_MULTI;
1789 inst.reloc.pc_rel = 0;
1790 }
1791 }
1792
1793 if (*str == '|' || *str == '+')
1794 {
1795 str++;
1796 another_range = 1;
1797 }
1798 }
1799 while (another_range);
1800
1801 *strp = str;
1802 return range;
1803 }
1804
1805 /* Types of registers in a list. */
1806
1807 enum reg_list_els
1808 {
1809 REGLIST_VFP_S,
1810 REGLIST_VFP_D,
1811 REGLIST_NEON_D
1812 };
1813
1814 /* Parse a VFP register list. If the string is invalid return FAIL.
1815 Otherwise return the number of registers, and set PBASE to the first
1816 register. Parses registers of type ETYPE.
1817 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1818 - Q registers can be used to specify pairs of D registers
1819 - { } can be omitted from around a singleton register list
1820 FIXME: This is not implemented, as it would require backtracking in
1821 some cases, e.g.:
1822 vtbl.8 d3,d4,d5
1823 This could be done (the meaning isn't really ambiguous), but doesn't
1824 fit in well with the current parsing framework.
1825 - 32 D registers may be used (also true for VFPv3).
1826 FIXME: Types are ignored in these register lists, which is probably a
1827 bug. */
1828
1829 static int
1830 parse_vfp_reg_list (char **ccp, unsigned int *pbase, enum reg_list_els etype)
1831 {
1832 char *str = *ccp;
1833 int base_reg;
1834 int new_base;
1835 enum arm_reg_type regtype = (enum arm_reg_type) 0;
1836 int max_regs = 0;
1837 int count = 0;
1838 int warned = 0;
1839 unsigned long mask = 0;
1840 int i;
1841
1842 if (skip_past_char (&str, '{') == FAIL)
1843 {
1844 inst.error = _("expecting {");
1845 return FAIL;
1846 }
1847
1848 switch (etype)
1849 {
1850 case REGLIST_VFP_S:
1851 regtype = REG_TYPE_VFS;
1852 max_regs = 32;
1853 break;
1854
1855 case REGLIST_VFP_D:
1856 regtype = REG_TYPE_VFD;
1857 break;
1858
1859 case REGLIST_NEON_D:
1860 regtype = REG_TYPE_NDQ;
1861 break;
1862 }
1863
1864 if (etype != REGLIST_VFP_S)
1865 {
1866 /* VFPv3 allows 32 D registers, except for the VFPv3-D16 variant. */
1867 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
1868 {
1869 max_regs = 32;
1870 if (thumb_mode)
1871 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
1872 fpu_vfp_ext_d32);
1873 else
1874 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
1875 fpu_vfp_ext_d32);
1876 }
1877 else
1878 max_regs = 16;
1879 }
1880
1881 base_reg = max_regs;
1882
1883 do
1884 {
1885 int setmask = 1, addregs = 1;
1886
1887 new_base = arm_typed_reg_parse (&str, regtype, &regtype, NULL);
1888
1889 if (new_base == FAIL)
1890 {
1891 first_error (_(reg_expected_msgs[regtype]));
1892 return FAIL;
1893 }
1894
1895 if (new_base >= max_regs)
1896 {
1897 first_error (_("register out of range in list"));
1898 return FAIL;
1899 }
1900
1901 /* Note: a value of 2 * n is returned for the register Q<n>. */
1902 if (regtype == REG_TYPE_NQ)
1903 {
1904 setmask = 3;
1905 addregs = 2;
1906 }
1907
1908 if (new_base < base_reg)
1909 base_reg = new_base;
1910
1911 if (mask & (setmask << new_base))
1912 {
1913 first_error (_("invalid register list"));
1914 return FAIL;
1915 }
1916
1917 if ((mask >> new_base) != 0 && ! warned)
1918 {
1919 as_tsktsk (_("register list not in ascending order"));
1920 warned = 1;
1921 }
1922
1923 mask |= setmask << new_base;
1924 count += addregs;
1925
1926 if (*str == '-') /* We have the start of a range expression */
1927 {
1928 int high_range;
1929
1930 str++;
1931
1932 if ((high_range = arm_typed_reg_parse (&str, regtype, NULL, NULL))
1933 == FAIL)
1934 {
1935 inst.error = gettext (reg_expected_msgs[regtype]);
1936 return FAIL;
1937 }
1938
1939 if (high_range >= max_regs)
1940 {
1941 first_error (_("register out of range in list"));
1942 return FAIL;
1943 }
1944
1945 if (regtype == REG_TYPE_NQ)
1946 high_range = high_range + 1;
1947
1948 if (high_range <= new_base)
1949 {
1950 inst.error = _("register range not in ascending order");
1951 return FAIL;
1952 }
1953
1954 for (new_base += addregs; new_base <= high_range; new_base += addregs)
1955 {
1956 if (mask & (setmask << new_base))
1957 {
1958 inst.error = _("invalid register list");
1959 return FAIL;
1960 }
1961
1962 mask |= setmask << new_base;
1963 count += addregs;
1964 }
1965 }
1966 }
1967 while (skip_past_comma (&str) != FAIL);
1968
1969 str++;
1970
1971 /* Sanity check -- should have raised a parse error above. */
1972 if (count == 0 || count > max_regs)
1973 abort ();
1974
1975 *pbase = base_reg;
1976
1977 /* Final test -- the registers must be consecutive. */
1978 mask >>= base_reg;
1979 for (i = 0; i < count; i++)
1980 {
1981 if ((mask & (1u << i)) == 0)
1982 {
1983 inst.error = _("non-contiguous register range");
1984 return FAIL;
1985 }
1986 }
1987
1988 *ccp = str;
1989
1990 return count;
1991 }
1992
1993 /* True if two alias types are the same. */
1994
1995 static bfd_boolean
1996 neon_alias_types_same (struct neon_typed_alias *a, struct neon_typed_alias *b)
1997 {
1998 if (!a && !b)
1999 return TRUE;
2000
2001 if (!a || !b)
2002 return FALSE;
2003
2004 if (a->defined != b->defined)
2005 return FALSE;
2006
2007 if ((a->defined & NTA_HASTYPE) != 0
2008 && (a->eltype.type != b->eltype.type
2009 || a->eltype.size != b->eltype.size))
2010 return FALSE;
2011
2012 if ((a->defined & NTA_HASINDEX) != 0
2013 && (a->index != b->index))
2014 return FALSE;
2015
2016 return TRUE;
2017 }
2018
2019 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
2020 The base register is put in *PBASE.
2021 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
2022 the return value.
2023 The register stride (minus one) is put in bit 4 of the return value.
2024 Bits [6:5] encode the list length (minus one).
2025 The type of the list elements is put in *ELTYPE, if non-NULL. */
2026
2027 #define NEON_LANE(X) ((X) & 0xf)
2028 #define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1)
2029 #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
2030
2031 static int
2032 parse_neon_el_struct_list (char **str, unsigned *pbase,
2033 struct neon_type_el *eltype)
2034 {
2035 char *ptr = *str;
2036 int base_reg = -1;
2037 int reg_incr = -1;
2038 int count = 0;
2039 int lane = -1;
2040 int leading_brace = 0;
2041 enum arm_reg_type rtype = REG_TYPE_NDQ;
2042 const char *const incr_error = _("register stride must be 1 or 2");
2043 const char *const type_error = _("mismatched element/structure types in list");
2044 struct neon_typed_alias firsttype;
2045 firsttype.defined = 0;
2046 firsttype.eltype.type = NT_invtype;
2047 firsttype.eltype.size = -1;
2048 firsttype.index = -1;
2049
2050 if (skip_past_char (&ptr, '{') == SUCCESS)
2051 leading_brace = 1;
2052
2053 do
2054 {
2055 struct neon_typed_alias atype;
2056 int getreg = parse_typed_reg_or_scalar (&ptr, rtype, &rtype, &atype);
2057
2058 if (getreg == FAIL)
2059 {
2060 first_error (_(reg_expected_msgs[rtype]));
2061 return FAIL;
2062 }
2063
2064 if (base_reg == -1)
2065 {
2066 base_reg = getreg;
2067 if (rtype == REG_TYPE_NQ)
2068 {
2069 reg_incr = 1;
2070 }
2071 firsttype = atype;
2072 }
2073 else if (reg_incr == -1)
2074 {
2075 reg_incr = getreg - base_reg;
2076 if (reg_incr < 1 || reg_incr > 2)
2077 {
2078 first_error (_(incr_error));
2079 return FAIL;
2080 }
2081 }
2082 else if (getreg != base_reg + reg_incr * count)
2083 {
2084 first_error (_(incr_error));
2085 return FAIL;
2086 }
2087
2088 if (! neon_alias_types_same (&atype, &firsttype))
2089 {
2090 first_error (_(type_error));
2091 return FAIL;
2092 }
2093
2094 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
2095 modes. */
2096 if (ptr[0] == '-')
2097 {
2098 struct neon_typed_alias htype;
2099 int hireg, dregs = (rtype == REG_TYPE_NQ) ? 2 : 1;
2100 if (lane == -1)
2101 lane = NEON_INTERLEAVE_LANES;
2102 else if (lane != NEON_INTERLEAVE_LANES)
2103 {
2104 first_error (_(type_error));
2105 return FAIL;
2106 }
2107 if (reg_incr == -1)
2108 reg_incr = 1;
2109 else if (reg_incr != 1)
2110 {
2111 first_error (_("don't use Rn-Rm syntax with non-unit stride"));
2112 return FAIL;
2113 }
2114 ptr++;
2115 hireg = parse_typed_reg_or_scalar (&ptr, rtype, NULL, &htype);
2116 if (hireg == FAIL)
2117 {
2118 first_error (_(reg_expected_msgs[rtype]));
2119 return FAIL;
2120 }
2121 if (! neon_alias_types_same (&htype, &firsttype))
2122 {
2123 first_error (_(type_error));
2124 return FAIL;
2125 }
2126 count += hireg + dregs - getreg;
2127 continue;
2128 }
2129
2130 /* If we're using Q registers, we can't use [] or [n] syntax. */
2131 if (rtype == REG_TYPE_NQ)
2132 {
2133 count += 2;
2134 continue;
2135 }
2136
2137 if ((atype.defined & NTA_HASINDEX) != 0)
2138 {
2139 if (lane == -1)
2140 lane = atype.index;
2141 else if (lane != atype.index)
2142 {
2143 first_error (_(type_error));
2144 return FAIL;
2145 }
2146 }
2147 else if (lane == -1)
2148 lane = NEON_INTERLEAVE_LANES;
2149 else if (lane != NEON_INTERLEAVE_LANES)
2150 {
2151 first_error (_(type_error));
2152 return FAIL;
2153 }
2154 count++;
2155 }
2156 while ((count != 1 || leading_brace) && skip_past_comma (&ptr) != FAIL);
2157
2158 /* No lane set by [x]. We must be interleaving structures. */
2159 if (lane == -1)
2160 lane = NEON_INTERLEAVE_LANES;
2161
2162 /* Sanity check. */
2163 if (lane == -1 || base_reg == -1 || count < 1 || count > 4
2164 || (count > 1 && reg_incr == -1))
2165 {
2166 first_error (_("error parsing element/structure list"));
2167 return FAIL;
2168 }
2169
2170 if ((count > 1 || leading_brace) && skip_past_char (&ptr, '}') == FAIL)
2171 {
2172 first_error (_("expected }"));
2173 return FAIL;
2174 }
2175
2176 if (reg_incr == -1)
2177 reg_incr = 1;
2178
2179 if (eltype)
2180 *eltype = firsttype.eltype;
2181
2182 *pbase = base_reg;
2183 *str = ptr;
2184
2185 return lane | ((reg_incr - 1) << 4) | ((count - 1) << 5);
2186 }
2187
2188 /* Parse an explicit relocation suffix on an expression. This is
2189 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
2190 arm_reloc_hsh contains no entries, so this function can only
2191 succeed if there is no () after the word. Returns -1 on error,
2192 BFD_RELOC_UNUSED if there wasn't any suffix. */
2193
2194 static int
2195 parse_reloc (char **str)
2196 {
2197 struct reloc_entry *r;
2198 char *p, *q;
2199
2200 if (**str != '(')
2201 return BFD_RELOC_UNUSED;
2202
2203 p = *str + 1;
2204 q = p;
2205
2206 while (*q && *q != ')' && *q != ',')
2207 q++;
2208 if (*q != ')')
2209 return -1;
2210
2211 if ((r = (struct reloc_entry *)
2212 hash_find_n (arm_reloc_hsh, p, q - p)) == NULL)
2213 return -1;
2214
2215 *str = q + 1;
2216 return r->reloc;
2217 }
2218
2219 /* Directives: register aliases. */
2220
2221 static struct reg_entry *
2222 insert_reg_alias (char *str, unsigned number, int type)
2223 {
2224 struct reg_entry *new_reg;
2225 const char *name;
2226
2227 if ((new_reg = (struct reg_entry *) hash_find (arm_reg_hsh, str)) != 0)
2228 {
2229 if (new_reg->builtin)
2230 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str);
2231
2232 /* Only warn about a redefinition if it's not defined as the
2233 same register. */
2234 else if (new_reg->number != number || new_reg->type != type)
2235 as_warn (_("ignoring redefinition of register alias '%s'"), str);
2236
2237 return NULL;
2238 }
2239
2240 name = xstrdup (str);
2241 new_reg = XNEW (struct reg_entry);
2242
2243 new_reg->name = name;
2244 new_reg->number = number;
2245 new_reg->type = type;
2246 new_reg->builtin = FALSE;
2247 new_reg->neon = NULL;
2248
2249 if (hash_insert (arm_reg_hsh, name, (void *) new_reg))
2250 abort ();
2251
2252 return new_reg;
2253 }
2254
2255 static void
2256 insert_neon_reg_alias (char *str, int number, int type,
2257 struct neon_typed_alias *atype)
2258 {
2259 struct reg_entry *reg = insert_reg_alias (str, number, type);
2260
2261 if (!reg)
2262 {
2263 first_error (_("attempt to redefine typed alias"));
2264 return;
2265 }
2266
2267 if (atype)
2268 {
2269 reg->neon = XNEW (struct neon_typed_alias);
2270 *reg->neon = *atype;
2271 }
2272 }
2273
2274 /* Look for the .req directive. This is of the form:
2275
2276 new_register_name .req existing_register_name
2277
2278 If we find one, or if it looks sufficiently like one that we want to
2279 handle any error here, return TRUE. Otherwise return FALSE. */
2280
2281 static bfd_boolean
2282 create_register_alias (char * newname, char *p)
2283 {
2284 struct reg_entry *old;
2285 char *oldname, *nbuf;
2286 size_t nlen;
2287
2288 /* The input scrubber ensures that whitespace after the mnemonic is
2289 collapsed to single spaces. */
2290 oldname = p;
2291 if (strncmp (oldname, " .req ", 6) != 0)
2292 return FALSE;
2293
2294 oldname += 6;
2295 if (*oldname == '\0')
2296 return FALSE;
2297
2298 old = (struct reg_entry *) hash_find (arm_reg_hsh, oldname);
2299 if (!old)
2300 {
2301 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
2302 return TRUE;
2303 }
2304
2305 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2306 the desired alias name, and p points to its end. If not, then
2307 the desired alias name is in the global original_case_string. */
2308 #ifdef TC_CASE_SENSITIVE
2309 nlen = p - newname;
2310 #else
2311 newname = original_case_string;
2312 nlen = strlen (newname);
2313 #endif
2314
2315 nbuf = xmemdup0 (newname, nlen);
2316
2317 /* Create aliases under the new name as stated; an all-lowercase
2318 version of the new name; and an all-uppercase version of the new
2319 name. */
2320 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
2321 {
2322 for (p = nbuf; *p; p++)
2323 *p = TOUPPER (*p);
2324
2325 if (strncmp (nbuf, newname, nlen))
2326 {
2327 /* If this attempt to create an additional alias fails, do not bother
2328 trying to create the all-lower case alias. We will fail and issue
2329 a second, duplicate error message. This situation arises when the
2330 programmer does something like:
2331 foo .req r0
2332 Foo .req r1
2333 The second .req creates the "Foo" alias but then fails to create
2334 the artificial FOO alias because it has already been created by the
2335 first .req. */
2336 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
2337 {
2338 free (nbuf);
2339 return TRUE;
2340 }
2341 }
2342
2343 for (p = nbuf; *p; p++)
2344 *p = TOLOWER (*p);
2345
2346 if (strncmp (nbuf, newname, nlen))
2347 insert_reg_alias (nbuf, old->number, old->type);
2348 }
2349
2350 free (nbuf);
2351 return TRUE;
2352 }
2353
2354 /* Create a Neon typed/indexed register alias using directives, e.g.:
2355 X .dn d5.s32[1]
2356 Y .qn 6.s16
2357 Z .dn d7
2358 T .dn Z[0]
2359 These typed registers can be used instead of the types specified after the
2360 Neon mnemonic, so long as all operands given have types. Types can also be
2361 specified directly, e.g.:
2362 vadd d0.s32, d1.s32, d2.s32 */
2363
2364 static bfd_boolean
2365 create_neon_reg_alias (char *newname, char *p)
2366 {
2367 enum arm_reg_type basetype;
2368 struct reg_entry *basereg;
2369 struct reg_entry mybasereg;
2370 struct neon_type ntype;
2371 struct neon_typed_alias typeinfo;
2372 char *namebuf, *nameend ATTRIBUTE_UNUSED;
2373 int namelen;
2374
2375 typeinfo.defined = 0;
2376 typeinfo.eltype.type = NT_invtype;
2377 typeinfo.eltype.size = -1;
2378 typeinfo.index = -1;
2379
2380 nameend = p;
2381
2382 if (strncmp (p, " .dn ", 5) == 0)
2383 basetype = REG_TYPE_VFD;
2384 else if (strncmp (p, " .qn ", 5) == 0)
2385 basetype = REG_TYPE_NQ;
2386 else
2387 return FALSE;
2388
2389 p += 5;
2390
2391 if (*p == '\0')
2392 return FALSE;
2393
2394 basereg = arm_reg_parse_multi (&p);
2395
2396 if (basereg && basereg->type != basetype)
2397 {
2398 as_bad (_("bad type for register"));
2399 return FALSE;
2400 }
2401
2402 if (basereg == NULL)
2403 {
2404 expressionS exp;
2405 /* Try parsing as an integer. */
2406 my_get_expression (&exp, &p, GE_NO_PREFIX);
2407 if (exp.X_op != O_constant)
2408 {
2409 as_bad (_("expression must be constant"));
2410 return FALSE;
2411 }
2412 basereg = &mybasereg;
2413 basereg->number = (basetype == REG_TYPE_NQ) ? exp.X_add_number * 2
2414 : exp.X_add_number;
2415 basereg->neon = 0;
2416 }
2417
2418 if (basereg->neon)
2419 typeinfo = *basereg->neon;
2420
2421 if (parse_neon_type (&ntype, &p) == SUCCESS)
2422 {
2423 /* We got a type. */
2424 if (typeinfo.defined & NTA_HASTYPE)
2425 {
2426 as_bad (_("can't redefine the type of a register alias"));
2427 return FALSE;
2428 }
2429
2430 typeinfo.defined |= NTA_HASTYPE;
2431 if (ntype.elems != 1)
2432 {
2433 as_bad (_("you must specify a single type only"));
2434 return FALSE;
2435 }
2436 typeinfo.eltype = ntype.el[0];
2437 }
2438
2439 if (skip_past_char (&p, '[') == SUCCESS)
2440 {
2441 expressionS exp;
2442 /* We got a scalar index. */
2443
2444 if (typeinfo.defined & NTA_HASINDEX)
2445 {
2446 as_bad (_("can't redefine the index of a scalar alias"));
2447 return FALSE;
2448 }
2449
2450 my_get_expression (&exp, &p, GE_NO_PREFIX);
2451
2452 if (exp.X_op != O_constant)
2453 {
2454 as_bad (_("scalar index must be constant"));
2455 return FALSE;
2456 }
2457
2458 typeinfo.defined |= NTA_HASINDEX;
2459 typeinfo.index = exp.X_add_number;
2460
2461 if (skip_past_char (&p, ']') == FAIL)
2462 {
2463 as_bad (_("expecting ]"));
2464 return FALSE;
2465 }
2466 }
2467
2468 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2469 the desired alias name, and p points to its end. If not, then
2470 the desired alias name is in the global original_case_string. */
2471 #ifdef TC_CASE_SENSITIVE
2472 namelen = nameend - newname;
2473 #else
2474 newname = original_case_string;
2475 namelen = strlen (newname);
2476 #endif
2477
2478 namebuf = xmemdup0 (newname, namelen);
2479
2480 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2481 typeinfo.defined != 0 ? &typeinfo : NULL);
2482
2483 /* Insert name in all uppercase. */
2484 for (p = namebuf; *p; p++)
2485 *p = TOUPPER (*p);
2486
2487 if (strncmp (namebuf, newname, namelen))
2488 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2489 typeinfo.defined != 0 ? &typeinfo : NULL);
2490
2491 /* Insert name in all lowercase. */
2492 for (p = namebuf; *p; p++)
2493 *p = TOLOWER (*p);
2494
2495 if (strncmp (namebuf, newname, namelen))
2496 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2497 typeinfo.defined != 0 ? &typeinfo : NULL);
2498
2499 free (namebuf);
2500 return TRUE;
2501 }
2502
2503 /* Should never be called, as .req goes between the alias and the
2504 register name, not at the beginning of the line. */
2505
2506 static void
2507 s_req (int a ATTRIBUTE_UNUSED)
2508 {
2509 as_bad (_("invalid syntax for .req directive"));
2510 }
2511
2512 static void
2513 s_dn (int a ATTRIBUTE_UNUSED)
2514 {
2515 as_bad (_("invalid syntax for .dn directive"));
2516 }
2517
2518 static void
2519 s_qn (int a ATTRIBUTE_UNUSED)
2520 {
2521 as_bad (_("invalid syntax for .qn directive"));
2522 }
2523
2524 /* The .unreq directive deletes an alias which was previously defined
2525 by .req. For example:
2526
2527 my_alias .req r11
2528 .unreq my_alias */
2529
2530 static void
2531 s_unreq (int a ATTRIBUTE_UNUSED)
2532 {
2533 char * name;
2534 char saved_char;
2535
2536 name = input_line_pointer;
2537
2538 while (*input_line_pointer != 0
2539 && *input_line_pointer != ' '
2540 && *input_line_pointer != '\n')
2541 ++input_line_pointer;
2542
2543 saved_char = *input_line_pointer;
2544 *input_line_pointer = 0;
2545
2546 if (!*name)
2547 as_bad (_("invalid syntax for .unreq directive"));
2548 else
2549 {
2550 struct reg_entry *reg = (struct reg_entry *) hash_find (arm_reg_hsh,
2551 name);
2552
2553 if (!reg)
2554 as_bad (_("unknown register alias '%s'"), name);
2555 else if (reg->builtin)
2556 as_warn (_("ignoring attempt to use .unreq on fixed register name: '%s'"),
2557 name);
2558 else
2559 {
2560 char * p;
2561 char * nbuf;
2562
2563 hash_delete (arm_reg_hsh, name, FALSE);
2564 free ((char *) reg->name);
2565 if (reg->neon)
2566 free (reg->neon);
2567 free (reg);
2568
2569 /* Also locate the all upper case and all lower case versions.
2570 Do not complain if we cannot find one or the other as it
2571 was probably deleted above. */
2572
2573 nbuf = strdup (name);
2574 for (p = nbuf; *p; p++)
2575 *p = TOUPPER (*p);
2576 reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
2577 if (reg)
2578 {
2579 hash_delete (arm_reg_hsh, nbuf, FALSE);
2580 free ((char *) reg->name);
2581 if (reg->neon)
2582 free (reg->neon);
2583 free (reg);
2584 }
2585
2586 for (p = nbuf; *p; p++)
2587 *p = TOLOWER (*p);
2588 reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
2589 if (reg)
2590 {
2591 hash_delete (arm_reg_hsh, nbuf, FALSE);
2592 free ((char *) reg->name);
2593 if (reg->neon)
2594 free (reg->neon);
2595 free (reg);
2596 }
2597
2598 free (nbuf);
2599 }
2600 }
2601
2602 *input_line_pointer = saved_char;
2603 demand_empty_rest_of_line ();
2604 }
2605
2606 /* Directives: Instruction set selection. */
2607
2608 #ifdef OBJ_ELF
2609 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
2610 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2611 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2612 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
2613
2614 /* Create a new mapping symbol for the transition to STATE. */
2615
2616 static void
2617 make_mapping_symbol (enum mstate state, valueT value, fragS *frag)
2618 {
2619 symbolS * symbolP;
2620 const char * symname;
2621 int type;
2622
2623 switch (state)
2624 {
2625 case MAP_DATA:
2626 symname = "$d";
2627 type = BSF_NO_FLAGS;
2628 break;
2629 case MAP_ARM:
2630 symname = "$a";
2631 type = BSF_NO_FLAGS;
2632 break;
2633 case MAP_THUMB:
2634 symname = "$t";
2635 type = BSF_NO_FLAGS;
2636 break;
2637 default:
2638 abort ();
2639 }
2640
2641 symbolP = symbol_new (symname, now_seg, value, frag);
2642 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
2643
2644 switch (state)
2645 {
2646 case MAP_ARM:
2647 THUMB_SET_FUNC (symbolP, 0);
2648 ARM_SET_THUMB (symbolP, 0);
2649 ARM_SET_INTERWORK (symbolP, support_interwork);
2650 break;
2651
2652 case MAP_THUMB:
2653 THUMB_SET_FUNC (symbolP, 1);
2654 ARM_SET_THUMB (symbolP, 1);
2655 ARM_SET_INTERWORK (symbolP, support_interwork);
2656 break;
2657
2658 case MAP_DATA:
2659 default:
2660 break;
2661 }
2662
2663 /* Save the mapping symbols for future reference. Also check that
2664 we do not place two mapping symbols at the same offset within a
2665 frag. We'll handle overlap between frags in
2666 check_mapping_symbols.
2667
2668 If .fill or other data filling directive generates zero sized data,
2669 the mapping symbol for the following code will have the same value
2670 as the one generated for the data filling directive. In this case,
2671 we replace the old symbol with the new one at the same address. */
2672 if (value == 0)
2673 {
2674 if (frag->tc_frag_data.first_map != NULL)
2675 {
2676 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
2677 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP, &symbol_lastP);
2678 }
2679 frag->tc_frag_data.first_map = symbolP;
2680 }
2681 if (frag->tc_frag_data.last_map != NULL)
2682 {
2683 know (S_GET_VALUE (frag->tc_frag_data.last_map) <= S_GET_VALUE (symbolP));
2684 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
2685 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP, &symbol_lastP);
2686 }
2687 frag->tc_frag_data.last_map = symbolP;
2688 }
2689
2690 /* We must sometimes convert a region marked as code to data during
2691 code alignment, if an odd number of bytes have to be padded. The
2692 code mapping symbol is pushed to an aligned address. */
2693
2694 static void
2695 insert_data_mapping_symbol (enum mstate state,
2696 valueT value, fragS *frag, offsetT bytes)
2697 {
2698 /* If there was already a mapping symbol, remove it. */
2699 if (frag->tc_frag_data.last_map != NULL
2700 && S_GET_VALUE (frag->tc_frag_data.last_map) == frag->fr_address + value)
2701 {
2702 symbolS *symp = frag->tc_frag_data.last_map;
2703
2704 if (value == 0)
2705 {
2706 know (frag->tc_frag_data.first_map == symp);
2707 frag->tc_frag_data.first_map = NULL;
2708 }
2709 frag->tc_frag_data.last_map = NULL;
2710 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
2711 }
2712
2713 make_mapping_symbol (MAP_DATA, value, frag);
2714 make_mapping_symbol (state, value + bytes, frag);
2715 }
2716
2717 static void mapping_state_2 (enum mstate state, int max_chars);
2718
2719 /* Set the mapping state to STATE. Only call this when about to
2720 emit some STATE bytes to the file. */
2721
2722 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
2723 void
2724 mapping_state (enum mstate state)
2725 {
2726 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2727
2728 if (mapstate == state)
2729 /* The mapping symbol has already been emitted.
2730 There is nothing else to do. */
2731 return;
2732
2733 if (state == MAP_ARM || state == MAP_THUMB)
2734 /* PR gas/12931
2735 All ARM instructions require 4-byte alignment.
2736 (Almost) all Thumb instructions require 2-byte alignment.
2737
2738 When emitting instructions into any section, mark the section
2739 appropriately.
2740
2741 Some Thumb instructions are alignment-sensitive modulo 4 bytes,
2742 but themselves require 2-byte alignment; this applies to some
2743 PC- relative forms. However, these cases will involve implicit
2744 literal pool generation or an explicit .align >=2, both of
2745 which will cause the section to me marked with sufficient
2746 alignment. Thus, we don't handle those cases here. */
2747 record_alignment (now_seg, state == MAP_ARM ? 2 : 1);
2748
2749 if (TRANSITION (MAP_UNDEFINED, MAP_DATA))
2750 /* This case will be evaluated later. */
2751 return;
2752
2753 mapping_state_2 (state, 0);
2754 }
2755
2756 /* Same as mapping_state, but MAX_CHARS bytes have already been
2757 allocated. Put the mapping symbol that far back. */
2758
2759 static void
2760 mapping_state_2 (enum mstate state, int max_chars)
2761 {
2762 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2763
2764 if (!SEG_NORMAL (now_seg))
2765 return;
2766
2767 if (mapstate == state)
2768 /* The mapping symbol has already been emitted.
2769 There is nothing else to do. */
2770 return;
2771
2772 if (TRANSITION (MAP_UNDEFINED, MAP_ARM)
2773 || TRANSITION (MAP_UNDEFINED, MAP_THUMB))
2774 {
2775 struct frag * const frag_first = seg_info (now_seg)->frchainP->frch_root;
2776 const int add_symbol = (frag_now != frag_first) || (frag_now_fix () > 0);
2777
2778 if (add_symbol)
2779 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
2780 }
2781
2782 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
2783 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
2784 }
2785 #undef TRANSITION
2786 #else
2787 #define mapping_state(x) ((void)0)
2788 #define mapping_state_2(x, y) ((void)0)
2789 #endif
2790
2791 /* Find the real, Thumb encoded start of a Thumb function. */
2792
2793 #ifdef OBJ_COFF
2794 static symbolS *
2795 find_real_start (symbolS * symbolP)
2796 {
2797 char * real_start;
2798 const char * name = S_GET_NAME (symbolP);
2799 symbolS * new_target;
2800
2801 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
2802 #define STUB_NAME ".real_start_of"
2803
2804 if (name == NULL)
2805 abort ();
2806
2807 /* The compiler may generate BL instructions to local labels because
2808 it needs to perform a branch to a far away location. These labels
2809 do not have a corresponding ".real_start_of" label. We check
2810 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2811 the ".real_start_of" convention for nonlocal branches. */
2812 if (S_IS_LOCAL (symbolP) || name[0] == '.')
2813 return symbolP;
2814
2815 real_start = concat (STUB_NAME, name, NULL);
2816 new_target = symbol_find (real_start);
2817 free (real_start);
2818
2819 if (new_target == NULL)
2820 {
2821 as_warn (_("Failed to find real start of function: %s\n"), name);
2822 new_target = symbolP;
2823 }
2824
2825 return new_target;
2826 }
2827 #endif
2828
2829 static void
2830 opcode_select (int width)
2831 {
2832 switch (width)
2833 {
2834 case 16:
2835 if (! thumb_mode)
2836 {
2837 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
2838 as_bad (_("selected processor does not support THUMB opcodes"));
2839
2840 thumb_mode = 1;
2841 /* No need to force the alignment, since we will have been
2842 coming from ARM mode, which is word-aligned. */
2843 record_alignment (now_seg, 1);
2844 }
2845 break;
2846
2847 case 32:
2848 if (thumb_mode)
2849 {
2850 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
2851 as_bad (_("selected processor does not support ARM opcodes"));
2852
2853 thumb_mode = 0;
2854
2855 if (!need_pass_2)
2856 frag_align (2, 0, 0);
2857
2858 record_alignment (now_seg, 1);
2859 }
2860 break;
2861
2862 default:
2863 as_bad (_("invalid instruction size selected (%d)"), width);
2864 }
2865 }
2866
2867 static void
2868 s_arm (int ignore ATTRIBUTE_UNUSED)
2869 {
2870 opcode_select (32);
2871 demand_empty_rest_of_line ();
2872 }
2873
2874 static void
2875 s_thumb (int ignore ATTRIBUTE_UNUSED)
2876 {
2877 opcode_select (16);
2878 demand_empty_rest_of_line ();
2879 }
2880
2881 static void
2882 s_code (int unused ATTRIBUTE_UNUSED)
2883 {
2884 int temp;
2885
2886 temp = get_absolute_expression ();
2887 switch (temp)
2888 {
2889 case 16:
2890 case 32:
2891 opcode_select (temp);
2892 break;
2893
2894 default:
2895 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp);
2896 }
2897 }
2898
2899 static void
2900 s_force_thumb (int ignore ATTRIBUTE_UNUSED)
2901 {
2902 /* If we are not already in thumb mode go into it, EVEN if
2903 the target processor does not support thumb instructions.
2904 This is used by gcc/config/arm/lib1funcs.asm for example
2905 to compile interworking support functions even if the
2906 target processor should not support interworking. */
2907 if (! thumb_mode)
2908 {
2909 thumb_mode = 2;
2910 record_alignment (now_seg, 1);
2911 }
2912
2913 demand_empty_rest_of_line ();
2914 }
2915
2916 static void
2917 s_thumb_func (int ignore ATTRIBUTE_UNUSED)
2918 {
2919 s_thumb (0);
2920
2921 /* The following label is the name/address of the start of a Thumb function.
2922 We need to know this for the interworking support. */
2923 label_is_thumb_function_name = TRUE;
2924 }
2925
2926 /* Perform a .set directive, but also mark the alias as
2927 being a thumb function. */
2928
2929 static void
2930 s_thumb_set (int equiv)
2931 {
2932 /* XXX the following is a duplicate of the code for s_set() in read.c
2933 We cannot just call that code as we need to get at the symbol that
2934 is created. */
2935 char * name;
2936 char delim;
2937 char * end_name;
2938 symbolS * symbolP;
2939
2940 /* Especial apologies for the random logic:
2941 This just grew, and could be parsed much more simply!
2942 Dean - in haste. */
2943 delim = get_symbol_name (& name);
2944 end_name = input_line_pointer;
2945 (void) restore_line_pointer (delim);
2946
2947 if (*input_line_pointer != ',')
2948 {
2949 *end_name = 0;
2950 as_bad (_("expected comma after name \"%s\""), name);
2951 *end_name = delim;
2952 ignore_rest_of_line ();
2953 return;
2954 }
2955
2956 input_line_pointer++;
2957 *end_name = 0;
2958
2959 if (name[0] == '.' && name[1] == '\0')
2960 {
2961 /* XXX - this should not happen to .thumb_set. */
2962 abort ();
2963 }
2964
2965 if ((symbolP = symbol_find (name)) == NULL
2966 && (symbolP = md_undefined_symbol (name)) == NULL)
2967 {
2968 #ifndef NO_LISTING
2969 /* When doing symbol listings, play games with dummy fragments living
2970 outside the normal fragment chain to record the file and line info
2971 for this symbol. */
2972 if (listing & LISTING_SYMBOLS)
2973 {
2974 extern struct list_info_struct * listing_tail;
2975 fragS * dummy_frag = (fragS * ) xmalloc (sizeof (fragS));
2976
2977 memset (dummy_frag, 0, sizeof (fragS));
2978 dummy_frag->fr_type = rs_fill;
2979 dummy_frag->line = listing_tail;
2980 symbolP = symbol_new (name, undefined_section, 0, dummy_frag);
2981 dummy_frag->fr_symbol = symbolP;
2982 }
2983 else
2984 #endif
2985 symbolP = symbol_new (name, undefined_section, 0, &zero_address_frag);
2986
2987 #ifdef OBJ_COFF
2988 /* "set" symbols are local unless otherwise specified. */
2989 SF_SET_LOCAL (symbolP);
2990 #endif /* OBJ_COFF */
2991 } /* Make a new symbol. */
2992
2993 symbol_table_insert (symbolP);
2994
2995 * end_name = delim;
2996
2997 if (equiv
2998 && S_IS_DEFINED (symbolP)
2999 && S_GET_SEGMENT (symbolP) != reg_section)
3000 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP));
3001
3002 pseudo_set (symbolP);
3003
3004 demand_empty_rest_of_line ();
3005
3006 /* XXX Now we come to the Thumb specific bit of code. */
3007
3008 THUMB_SET_FUNC (symbolP, 1);
3009 ARM_SET_THUMB (symbolP, 1);
3010 #if defined OBJ_ELF || defined OBJ_COFF
3011 ARM_SET_INTERWORK (symbolP, support_interwork);
3012 #endif
3013 }
3014
3015 /* Directives: Mode selection. */
3016
3017 /* .syntax [unified|divided] - choose the new unified syntax
3018 (same for Arm and Thumb encoding, modulo slight differences in what
3019 can be represented) or the old divergent syntax for each mode. */
3020 static void
3021 s_syntax (int unused ATTRIBUTE_UNUSED)
3022 {
3023 char *name, delim;
3024
3025 delim = get_symbol_name (& name);
3026
3027 if (!strcasecmp (name, "unified"))
3028 unified_syntax = TRUE;
3029 else if (!strcasecmp (name, "divided"))
3030 unified_syntax = FALSE;
3031 else
3032 {
3033 as_bad (_("unrecognized syntax mode \"%s\""), name);
3034 return;
3035 }
3036 (void) restore_line_pointer (delim);
3037 demand_empty_rest_of_line ();
3038 }
3039
3040 /* Directives: sectioning and alignment. */
3041
3042 static void
3043 s_bss (int ignore ATTRIBUTE_UNUSED)
3044 {
3045 /* We don't support putting frags in the BSS segment, we fake it by
3046 marking in_bss, then looking at s_skip for clues. */
3047 subseg_set (bss_section, 0);
3048 demand_empty_rest_of_line ();
3049
3050 #ifdef md_elf_section_change_hook
3051 md_elf_section_change_hook ();
3052 #endif
3053 }
3054
3055 static void
3056 s_even (int ignore ATTRIBUTE_UNUSED)
3057 {
3058 /* Never make frag if expect extra pass. */
3059 if (!need_pass_2)
3060 frag_align (1, 0, 0);
3061
3062 record_alignment (now_seg, 1);
3063
3064 demand_empty_rest_of_line ();
3065 }
3066
3067 /* Directives: CodeComposer Studio. */
3068
3069 /* .ref (for CodeComposer Studio syntax only). */
3070 static void
3071 s_ccs_ref (int unused ATTRIBUTE_UNUSED)
3072 {
3073 if (codecomposer_syntax)
3074 ignore_rest_of_line ();
3075 else
3076 as_bad (_(".ref pseudo-op only available with -mccs flag."));
3077 }
3078
3079 /* If name is not NULL, then it is used for marking the beginning of a
3080 function, whereas if it is NULL then it means the function end. */
3081 static void
3082 asmfunc_debug (const char * name)
3083 {
3084 static const char * last_name = NULL;
3085
3086 if (name != NULL)
3087 {
3088 gas_assert (last_name == NULL);
3089 last_name = name;
3090
3091 if (debug_type == DEBUG_STABS)
3092 stabs_generate_asm_func (name, name);
3093 }
3094 else
3095 {
3096 gas_assert (last_name != NULL);
3097
3098 if (debug_type == DEBUG_STABS)
3099 stabs_generate_asm_endfunc (last_name, last_name);
3100
3101 last_name = NULL;
3102 }
3103 }
3104
3105 static void
3106 s_ccs_asmfunc (int unused ATTRIBUTE_UNUSED)
3107 {
3108 if (codecomposer_syntax)
3109 {
3110 switch (asmfunc_state)
3111 {
3112 case OUTSIDE_ASMFUNC:
3113 asmfunc_state = WAITING_ASMFUNC_NAME;
3114 break;
3115
3116 case WAITING_ASMFUNC_NAME:
3117 as_bad (_(".asmfunc repeated."));
3118 break;
3119
3120 case WAITING_ENDASMFUNC:
3121 as_bad (_(".asmfunc without function."));
3122 break;
3123 }
3124 demand_empty_rest_of_line ();
3125 }
3126 else
3127 as_bad (_(".asmfunc pseudo-op only available with -mccs flag."));
3128 }
3129
3130 static void
3131 s_ccs_endasmfunc (int unused ATTRIBUTE_UNUSED)
3132 {
3133 if (codecomposer_syntax)
3134 {
3135 switch (asmfunc_state)
3136 {
3137 case OUTSIDE_ASMFUNC:
3138 as_bad (_(".endasmfunc without a .asmfunc."));
3139 break;
3140
3141 case WAITING_ASMFUNC_NAME:
3142 as_bad (_(".endasmfunc without function."));
3143 break;
3144
3145 case WAITING_ENDASMFUNC:
3146 asmfunc_state = OUTSIDE_ASMFUNC;
3147 asmfunc_debug (NULL);
3148 break;
3149 }
3150 demand_empty_rest_of_line ();
3151 }
3152 else
3153 as_bad (_(".endasmfunc pseudo-op only available with -mccs flag."));
3154 }
3155
3156 static void
3157 s_ccs_def (int name)
3158 {
3159 if (codecomposer_syntax)
3160 s_globl (name);
3161 else
3162 as_bad (_(".def pseudo-op only available with -mccs flag."));
3163 }
3164
3165 /* Directives: Literal pools. */
3166
3167 static literal_pool *
3168 find_literal_pool (void)
3169 {
3170 literal_pool * pool;
3171
3172 for (pool = list_of_pools; pool != NULL; pool = pool->next)
3173 {
3174 if (pool->section == now_seg
3175 && pool->sub_section == now_subseg)
3176 break;
3177 }
3178
3179 return pool;
3180 }
3181
3182 static literal_pool *
3183 find_or_make_literal_pool (void)
3184 {
3185 /* Next literal pool ID number. */
3186 static unsigned int latest_pool_num = 1;
3187 literal_pool * pool;
3188
3189 pool = find_literal_pool ();
3190
3191 if (pool == NULL)
3192 {
3193 /* Create a new pool. */
3194 pool = XNEW (literal_pool);
3195 if (! pool)
3196 return NULL;
3197
3198 pool->next_free_entry = 0;
3199 pool->section = now_seg;
3200 pool->sub_section = now_subseg;
3201 pool->next = list_of_pools;
3202 pool->symbol = NULL;
3203 pool->alignment = 2;
3204
3205 /* Add it to the list. */
3206 list_of_pools = pool;
3207 }
3208
3209 /* New pools, and emptied pools, will have a NULL symbol. */
3210 if (pool->symbol == NULL)
3211 {
3212 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
3213 (valueT) 0, &zero_address_frag);
3214 pool->id = latest_pool_num ++;
3215 }
3216
3217 /* Done. */
3218 return pool;
3219 }
3220
3221 /* Add the literal in the global 'inst'
3222 structure to the relevant literal pool. */
3223
3224 static int
3225 add_to_lit_pool (unsigned int nbytes)
3226 {
3227 #define PADDING_SLOT 0x1
3228 #define LIT_ENTRY_SIZE_MASK 0xFF
3229 literal_pool * pool;
3230 unsigned int entry, pool_size = 0;
3231 bfd_boolean padding_slot_p = FALSE;
3232 unsigned imm1 = 0;
3233 unsigned imm2 = 0;
3234
3235 if (nbytes == 8)
3236 {
3237 imm1 = inst.operands[1].imm;
3238 imm2 = (inst.operands[1].regisimm ? inst.operands[1].reg
3239 : inst.reloc.exp.X_unsigned ? 0
3240 : ((bfd_int64_t) inst.operands[1].imm) >> 32);
3241 if (target_big_endian)
3242 {
3243 imm1 = imm2;
3244 imm2 = inst.operands[1].imm;
3245 }
3246 }
3247
3248 pool = find_or_make_literal_pool ();
3249
3250 /* Check if this literal value is already in the pool. */
3251 for (entry = 0; entry < pool->next_free_entry; entry ++)
3252 {
3253 if (nbytes == 4)
3254 {
3255 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
3256 && (inst.reloc.exp.X_op == O_constant)
3257 && (pool->literals[entry].X_add_number
3258 == inst.reloc.exp.X_add_number)
3259 && (pool->literals[entry].X_md == nbytes)
3260 && (pool->literals[entry].X_unsigned
3261 == inst.reloc.exp.X_unsigned))
3262 break;
3263
3264 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
3265 && (inst.reloc.exp.X_op == O_symbol)
3266 && (pool->literals[entry].X_add_number
3267 == inst.reloc.exp.X_add_number)
3268 && (pool->literals[entry].X_add_symbol
3269 == inst.reloc.exp.X_add_symbol)
3270 && (pool->literals[entry].X_op_symbol
3271 == inst.reloc.exp.X_op_symbol)
3272 && (pool->literals[entry].X_md == nbytes))
3273 break;
3274 }
3275 else if ((nbytes == 8)
3276 && !(pool_size & 0x7)
3277 && ((entry + 1) != pool->next_free_entry)
3278 && (pool->literals[entry].X_op == O_constant)
3279 && (pool->literals[entry].X_add_number == (offsetT) imm1)
3280 && (pool->literals[entry].X_unsigned
3281 == inst.reloc.exp.X_unsigned)
3282 && (pool->literals[entry + 1].X_op == O_constant)
3283 && (pool->literals[entry + 1].X_add_number == (offsetT) imm2)
3284 && (pool->literals[entry + 1].X_unsigned
3285 == inst.reloc.exp.X_unsigned))
3286 break;
3287
3288 padding_slot_p = ((pool->literals[entry].X_md >> 8) == PADDING_SLOT);
3289 if (padding_slot_p && (nbytes == 4))
3290 break;
3291
3292 pool_size += 4;
3293 }
3294
3295 /* Do we need to create a new entry? */
3296 if (entry == pool->next_free_entry)
3297 {
3298 if (entry >= MAX_LITERAL_POOL_SIZE)
3299 {
3300 inst.error = _("literal pool overflow");
3301 return FAIL;
3302 }
3303
3304 if (nbytes == 8)
3305 {
3306 /* For 8-byte entries, we align to an 8-byte boundary,
3307 and split it into two 4-byte entries, because on 32-bit
3308 host, 8-byte constants are treated as big num, thus
3309 saved in "generic_bignum" which will be overwritten
3310 by later assignments.
3311
3312 We also need to make sure there is enough space for
3313 the split.
3314
3315 We also check to make sure the literal operand is a
3316 constant number. */
3317 if (!(inst.reloc.exp.X_op == O_constant
3318 || inst.reloc.exp.X_op == O_big))
3319 {
3320 inst.error = _("invalid type for literal pool");
3321 return FAIL;
3322 }
3323 else if (pool_size & 0x7)
3324 {
3325 if ((entry + 2) >= MAX_LITERAL_POOL_SIZE)
3326 {
3327 inst.error = _("literal pool overflow");
3328 return FAIL;
3329 }
3330
3331 pool->literals[entry] = inst.reloc.exp;
3332 pool->literals[entry].X_op = O_constant;
3333 pool->literals[entry].X_add_number = 0;
3334 pool->literals[entry++].X_md = (PADDING_SLOT << 8) | 4;
3335 pool->next_free_entry += 1;
3336 pool_size += 4;
3337 }
3338 else if ((entry + 1) >= MAX_LITERAL_POOL_SIZE)
3339 {
3340 inst.error = _("literal pool overflow");
3341 return FAIL;
3342 }
3343
3344 pool->literals[entry] = inst.reloc.exp;
3345 pool->literals[entry].X_op = O_constant;
3346 pool->literals[entry].X_add_number = imm1;
3347 pool->literals[entry].X_unsigned = inst.reloc.exp.X_unsigned;
3348 pool->literals[entry++].X_md = 4;
3349 pool->literals[entry] = inst.reloc.exp;
3350 pool->literals[entry].X_op = O_constant;
3351 pool->literals[entry].X_add_number = imm2;
3352 pool->literals[entry].X_unsigned = inst.reloc.exp.X_unsigned;
3353 pool->literals[entry].X_md = 4;
3354 pool->alignment = 3;
3355 pool->next_free_entry += 1;
3356 }
3357 else
3358 {
3359 pool->literals[entry] = inst.reloc.exp;
3360 pool->literals[entry].X_md = 4;
3361 }
3362
3363 #ifdef OBJ_ELF
3364 /* PR ld/12974: Record the location of the first source line to reference
3365 this entry in the literal pool. If it turns out during linking that the
3366 symbol does not exist we will be able to give an accurate line number for
3367 the (first use of the) missing reference. */
3368 if (debug_type == DEBUG_DWARF2)
3369 dwarf2_where (pool->locs + entry);
3370 #endif
3371 pool->next_free_entry += 1;
3372 }
3373 else if (padding_slot_p)
3374 {
3375 pool->literals[entry] = inst.reloc.exp;
3376 pool->literals[entry].X_md = nbytes;
3377 }
3378
3379 inst.reloc.exp.X_op = O_symbol;
3380 inst.reloc.exp.X_add_number = pool_size;
3381 inst.reloc.exp.X_add_symbol = pool->symbol;
3382
3383 return SUCCESS;
3384 }
3385
3386 bfd_boolean
3387 tc_start_label_without_colon (void)
3388 {
3389 bfd_boolean ret = TRUE;
3390
3391 if (codecomposer_syntax && asmfunc_state == WAITING_ASMFUNC_NAME)
3392 {
3393 const char *label = input_line_pointer;
3394
3395 while (!is_end_of_line[(int) label[-1]])
3396 --label;
3397
3398 if (*label == '.')
3399 {
3400 as_bad (_("Invalid label '%s'"), label);
3401 ret = FALSE;
3402 }
3403
3404 asmfunc_debug (label);
3405
3406 asmfunc_state = WAITING_ENDASMFUNC;
3407 }
3408
3409 return ret;
3410 }
3411
3412 /* Can't use symbol_new here, so have to create a symbol and then at
3413 a later date assign it a value. That's what these functions do. */
3414
3415 static void
3416 symbol_locate (symbolS * symbolP,
3417 const char * name, /* It is copied, the caller can modify. */
3418 segT segment, /* Segment identifier (SEG_<something>). */
3419 valueT valu, /* Symbol value. */
3420 fragS * frag) /* Associated fragment. */
3421 {
3422 size_t name_length;
3423 char * preserved_copy_of_name;
3424
3425 name_length = strlen (name) + 1; /* +1 for \0. */
3426 obstack_grow (&notes, name, name_length);
3427 preserved_copy_of_name = (char *) obstack_finish (&notes);
3428
3429 #ifdef tc_canonicalize_symbol_name
3430 preserved_copy_of_name =
3431 tc_canonicalize_symbol_name (preserved_copy_of_name);
3432 #endif
3433
3434 S_SET_NAME (symbolP, preserved_copy_of_name);
3435
3436 S_SET_SEGMENT (symbolP, segment);
3437 S_SET_VALUE (symbolP, valu);
3438 symbol_clear_list_pointers (symbolP);
3439
3440 symbol_set_frag (symbolP, frag);
3441
3442 /* Link to end of symbol chain. */
3443 {
3444 extern int symbol_table_frozen;
3445
3446 if (symbol_table_frozen)
3447 abort ();
3448 }
3449
3450 symbol_append (symbolP, symbol_lastP, & symbol_rootP, & symbol_lastP);
3451
3452 obj_symbol_new_hook (symbolP);
3453
3454 #ifdef tc_symbol_new_hook
3455 tc_symbol_new_hook (symbolP);
3456 #endif
3457
3458 #ifdef DEBUG_SYMS
3459 verify_symbol_chain (symbol_rootP, symbol_lastP);
3460 #endif /* DEBUG_SYMS */
3461 }
3462
3463 static void
3464 s_ltorg (int ignored ATTRIBUTE_UNUSED)
3465 {
3466 unsigned int entry;
3467 literal_pool * pool;
3468 char sym_name[20];
3469
3470 pool = find_literal_pool ();
3471 if (pool == NULL
3472 || pool->symbol == NULL
3473 || pool->next_free_entry == 0)
3474 return;
3475
3476 /* Align pool as you have word accesses.
3477 Only make a frag if we have to. */
3478 if (!need_pass_2)
3479 frag_align (pool->alignment, 0, 0);
3480
3481 record_alignment (now_seg, 2);
3482
3483 #ifdef OBJ_ELF
3484 seg_info (now_seg)->tc_segment_info_data.mapstate = MAP_DATA;
3485 make_mapping_symbol (MAP_DATA, (valueT) frag_now_fix (), frag_now);
3486 #endif
3487 sprintf (sym_name, "$$lit_\002%x", pool->id);
3488
3489 symbol_locate (pool->symbol, sym_name, now_seg,
3490 (valueT) frag_now_fix (), frag_now);
3491 symbol_table_insert (pool->symbol);
3492
3493 ARM_SET_THUMB (pool->symbol, thumb_mode);
3494
3495 #if defined OBJ_COFF || defined OBJ_ELF
3496 ARM_SET_INTERWORK (pool->symbol, support_interwork);
3497 #endif
3498
3499 for (entry = 0; entry < pool->next_free_entry; entry ++)
3500 {
3501 #ifdef OBJ_ELF
3502 if (debug_type == DEBUG_DWARF2)
3503 dwarf2_gen_line_info (frag_now_fix (), pool->locs + entry);
3504 #endif
3505 /* First output the expression in the instruction to the pool. */
3506 emit_expr (&(pool->literals[entry]),
3507 pool->literals[entry].X_md & LIT_ENTRY_SIZE_MASK);
3508 }
3509
3510 /* Mark the pool as empty. */
3511 pool->next_free_entry = 0;
3512 pool->symbol = NULL;
3513 }
3514
3515 #ifdef OBJ_ELF
3516 /* Forward declarations for functions below, in the MD interface
3517 section. */
3518 static void fix_new_arm (fragS *, int, short, expressionS *, int, int);
3519 static valueT create_unwind_entry (int);
3520 static void start_unwind_section (const segT, int);
3521 static void add_unwind_opcode (valueT, int);
3522 static void flush_pending_unwind (void);
3523
3524 /* Directives: Data. */
3525
3526 static void
3527 s_arm_elf_cons (int nbytes)
3528 {
3529 expressionS exp;
3530
3531 #ifdef md_flush_pending_output
3532 md_flush_pending_output ();
3533 #endif
3534
3535 if (is_it_end_of_statement ())
3536 {
3537 demand_empty_rest_of_line ();
3538 return;
3539 }
3540
3541 #ifdef md_cons_align
3542 md_cons_align (nbytes);
3543 #endif
3544
3545 mapping_state (MAP_DATA);
3546 do
3547 {
3548 int reloc;
3549 char *base = input_line_pointer;
3550
3551 expression (& exp);
3552
3553 if (exp.X_op != O_symbol)
3554 emit_expr (&exp, (unsigned int) nbytes);
3555 else
3556 {
3557 char *before_reloc = input_line_pointer;
3558 reloc = parse_reloc (&input_line_pointer);
3559 if (reloc == -1)
3560 {
3561 as_bad (_("unrecognized relocation suffix"));
3562 ignore_rest_of_line ();
3563 return;
3564 }
3565 else if (reloc == BFD_RELOC_UNUSED)
3566 emit_expr (&exp, (unsigned int) nbytes);
3567 else
3568 {
3569 reloc_howto_type *howto = (reloc_howto_type *)
3570 bfd_reloc_type_lookup (stdoutput,
3571 (bfd_reloc_code_real_type) reloc);
3572 int size = bfd_get_reloc_size (howto);
3573
3574 if (reloc == BFD_RELOC_ARM_PLT32)
3575 {
3576 as_bad (_("(plt) is only valid on branch targets"));
3577 reloc = BFD_RELOC_UNUSED;
3578 size = 0;
3579 }
3580
3581 if (size > nbytes)
3582 as_bad (ngettext ("%s relocations do not fit in %d byte",
3583 "%s relocations do not fit in %d bytes",
3584 nbytes),
3585 howto->name, nbytes);
3586 else
3587 {
3588 /* We've parsed an expression stopping at O_symbol.
3589 But there may be more expression left now that we
3590 have parsed the relocation marker. Parse it again.
3591 XXX Surely there is a cleaner way to do this. */
3592 char *p = input_line_pointer;
3593 int offset;
3594 char *save_buf = XNEWVEC (char, input_line_pointer - base);
3595
3596 memcpy (save_buf, base, input_line_pointer - base);
3597 memmove (base + (input_line_pointer - before_reloc),
3598 base, before_reloc - base);
3599
3600 input_line_pointer = base + (input_line_pointer-before_reloc);
3601 expression (&exp);
3602 memcpy (base, save_buf, p - base);
3603
3604 offset = nbytes - size;
3605 p = frag_more (nbytes);
3606 memset (p, 0, nbytes);
3607 fix_new_exp (frag_now, p - frag_now->fr_literal + offset,
3608 size, &exp, 0, (enum bfd_reloc_code_real) reloc);
3609 free (save_buf);
3610 }
3611 }
3612 }
3613 }
3614 while (*input_line_pointer++ == ',');
3615
3616 /* Put terminator back into stream. */
3617 input_line_pointer --;
3618 demand_empty_rest_of_line ();
3619 }
3620
3621 /* Emit an expression containing a 32-bit thumb instruction.
3622 Implementation based on put_thumb32_insn. */
3623
3624 static void
3625 emit_thumb32_expr (expressionS * exp)
3626 {
3627 expressionS exp_high = *exp;
3628
3629 exp_high.X_add_number = (unsigned long)exp_high.X_add_number >> 16;
3630 emit_expr (& exp_high, (unsigned int) THUMB_SIZE);
3631 exp->X_add_number &= 0xffff;
3632 emit_expr (exp, (unsigned int) THUMB_SIZE);
3633 }
3634
3635 /* Guess the instruction size based on the opcode. */
3636
3637 static int
3638 thumb_insn_size (int opcode)
3639 {
3640 if ((unsigned int) opcode < 0xe800u)
3641 return 2;
3642 else if ((unsigned int) opcode >= 0xe8000000u)
3643 return 4;
3644 else
3645 return 0;
3646 }
3647
3648 static bfd_boolean
3649 emit_insn (expressionS *exp, int nbytes)
3650 {
3651 int size = 0;
3652
3653 if (exp->X_op == O_constant)
3654 {
3655 size = nbytes;
3656
3657 if (size == 0)
3658 size = thumb_insn_size (exp->X_add_number);
3659
3660 if (size != 0)
3661 {
3662 if (size == 2 && (unsigned int)exp->X_add_number > 0xffffu)
3663 {
3664 as_bad (_(".inst.n operand too big. "\
3665 "Use .inst.w instead"));
3666 size = 0;
3667 }
3668 else
3669 {
3670 if (now_it.state == AUTOMATIC_IT_BLOCK)
3671 set_it_insn_type_nonvoid (OUTSIDE_IT_INSN, 0);
3672 else
3673 set_it_insn_type_nonvoid (NEUTRAL_IT_INSN, 0);
3674
3675 if (thumb_mode && (size > THUMB_SIZE) && !target_big_endian)
3676 emit_thumb32_expr (exp);
3677 else
3678 emit_expr (exp, (unsigned int) size);
3679
3680 it_fsm_post_encode ();
3681 }
3682 }
3683 else
3684 as_bad (_("cannot determine Thumb instruction size. " \
3685 "Use .inst.n/.inst.w instead"));
3686 }
3687 else
3688 as_bad (_("constant expression required"));
3689
3690 return (size != 0);
3691 }
3692
3693 /* Like s_arm_elf_cons but do not use md_cons_align and
3694 set the mapping state to MAP_ARM/MAP_THUMB. */
3695
3696 static void
3697 s_arm_elf_inst (int nbytes)
3698 {
3699 if (is_it_end_of_statement ())
3700 {
3701 demand_empty_rest_of_line ();
3702 return;
3703 }
3704
3705 /* Calling mapping_state () here will not change ARM/THUMB,
3706 but will ensure not to be in DATA state. */
3707
3708 if (thumb_mode)
3709 mapping_state (MAP_THUMB);
3710 else
3711 {
3712 if (nbytes != 0)
3713 {
3714 as_bad (_("width suffixes are invalid in ARM mode"));
3715 ignore_rest_of_line ();
3716 return;
3717 }
3718
3719 nbytes = 4;
3720
3721 mapping_state (MAP_ARM);
3722 }
3723
3724 do
3725 {
3726 expressionS exp;
3727
3728 expression (& exp);
3729
3730 if (! emit_insn (& exp, nbytes))
3731 {
3732 ignore_rest_of_line ();
3733 return;
3734 }
3735 }
3736 while (*input_line_pointer++ == ',');
3737
3738 /* Put terminator back into stream. */
3739 input_line_pointer --;
3740 demand_empty_rest_of_line ();
3741 }
3742
3743 /* Parse a .rel31 directive. */
3744
3745 static void
3746 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED)
3747 {
3748 expressionS exp;
3749 char *p;
3750 valueT highbit;
3751
3752 highbit = 0;
3753 if (*input_line_pointer == '1')
3754 highbit = 0x80000000;
3755 else if (*input_line_pointer != '0')
3756 as_bad (_("expected 0 or 1"));
3757
3758 input_line_pointer++;
3759 if (*input_line_pointer != ',')
3760 as_bad (_("missing comma"));
3761 input_line_pointer++;
3762
3763 #ifdef md_flush_pending_output
3764 md_flush_pending_output ();
3765 #endif
3766
3767 #ifdef md_cons_align
3768 md_cons_align (4);
3769 #endif
3770
3771 mapping_state (MAP_DATA);
3772
3773 expression (&exp);
3774
3775 p = frag_more (4);
3776 md_number_to_chars (p, highbit, 4);
3777 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 1,
3778 BFD_RELOC_ARM_PREL31);
3779
3780 demand_empty_rest_of_line ();
3781 }
3782
3783 /* Directives: AEABI stack-unwind tables. */
3784
3785 /* Parse an unwind_fnstart directive. Simply records the current location. */
3786
3787 static void
3788 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED)
3789 {
3790 demand_empty_rest_of_line ();
3791 if (unwind.proc_start)
3792 {
3793 as_bad (_("duplicate .fnstart directive"));
3794 return;
3795 }
3796
3797 /* Mark the start of the function. */
3798 unwind.proc_start = expr_build_dot ();
3799
3800 /* Reset the rest of the unwind info. */
3801 unwind.opcode_count = 0;
3802 unwind.table_entry = NULL;
3803 unwind.personality_routine = NULL;
3804 unwind.personality_index = -1;
3805 unwind.frame_size = 0;
3806 unwind.fp_offset = 0;
3807 unwind.fp_reg = REG_SP;
3808 unwind.fp_used = 0;
3809 unwind.sp_restored = 0;
3810 }
3811
3812
3813 /* Parse a handlerdata directive. Creates the exception handling table entry
3814 for the function. */
3815
3816 static void
3817 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED)
3818 {
3819 demand_empty_rest_of_line ();
3820 if (!unwind.proc_start)
3821 as_bad (MISSING_FNSTART);
3822
3823 if (unwind.table_entry)
3824 as_bad (_("duplicate .handlerdata directive"));
3825
3826 create_unwind_entry (1);
3827 }
3828
3829 /* Parse an unwind_fnend directive. Generates the index table entry. */
3830
3831 static void
3832 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED)
3833 {
3834 long where;
3835 char *ptr;
3836 valueT val;
3837 unsigned int marked_pr_dependency;
3838
3839 demand_empty_rest_of_line ();
3840
3841 if (!unwind.proc_start)
3842 {
3843 as_bad (_(".fnend directive without .fnstart"));
3844 return;
3845 }
3846
3847 /* Add eh table entry. */
3848 if (unwind.table_entry == NULL)
3849 val = create_unwind_entry (0);
3850 else
3851 val = 0;
3852
3853 /* Add index table entry. This is two words. */
3854 start_unwind_section (unwind.saved_seg, 1);
3855 frag_align (2, 0, 0);
3856 record_alignment (now_seg, 2);
3857
3858 ptr = frag_more (8);
3859 memset (ptr, 0, 8);
3860 where = frag_now_fix () - 8;
3861
3862 /* Self relative offset of the function start. */
3863 fix_new (frag_now, where, 4, unwind.proc_start, 0, 1,
3864 BFD_RELOC_ARM_PREL31);
3865
3866 /* Indicate dependency on EHABI-defined personality routines to the
3867 linker, if it hasn't been done already. */
3868 marked_pr_dependency
3869 = seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency;
3870 if (unwind.personality_index >= 0 && unwind.personality_index < 3
3871 && !(marked_pr_dependency & (1 << unwind.personality_index)))
3872 {
3873 static const char *const name[] =
3874 {
3875 "__aeabi_unwind_cpp_pr0",
3876 "__aeabi_unwind_cpp_pr1",
3877 "__aeabi_unwind_cpp_pr2"
3878 };
3879 symbolS *pr = symbol_find_or_make (name[unwind.personality_index]);
3880 fix_new (frag_now, where, 0, pr, 0, 1, BFD_RELOC_NONE);
3881 seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency
3882 |= 1 << unwind.personality_index;
3883 }
3884
3885 if (val)
3886 /* Inline exception table entry. */
3887 md_number_to_chars (ptr + 4, val, 4);
3888 else
3889 /* Self relative offset of the table entry. */
3890 fix_new (frag_now, where + 4, 4, unwind.table_entry, 0, 1,
3891 BFD_RELOC_ARM_PREL31);
3892
3893 /* Restore the original section. */
3894 subseg_set (unwind.saved_seg, unwind.saved_subseg);
3895
3896 unwind.proc_start = NULL;
3897 }
3898
3899
3900 /* Parse an unwind_cantunwind directive. */
3901
3902 static void
3903 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED)
3904 {
3905 demand_empty_rest_of_line ();
3906 if (!unwind.proc_start)
3907 as_bad (MISSING_FNSTART);
3908
3909 if (unwind.personality_routine || unwind.personality_index != -1)
3910 as_bad (_("personality routine specified for cantunwind frame"));
3911
3912 unwind.personality_index = -2;
3913 }
3914
3915
3916 /* Parse a personalityindex directive. */
3917
3918 static void
3919 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED)
3920 {
3921 expressionS exp;
3922
3923 if (!unwind.proc_start)
3924 as_bad (MISSING_FNSTART);
3925
3926 if (unwind.personality_routine || unwind.personality_index != -1)
3927 as_bad (_("duplicate .personalityindex directive"));
3928
3929 expression (&exp);
3930
3931 if (exp.X_op != O_constant
3932 || exp.X_add_number < 0 || exp.X_add_number > 15)
3933 {
3934 as_bad (_("bad personality routine number"));
3935 ignore_rest_of_line ();
3936 return;
3937 }
3938
3939 unwind.personality_index = exp.X_add_number;
3940
3941 demand_empty_rest_of_line ();
3942 }
3943
3944
3945 /* Parse a personality directive. */
3946
3947 static void
3948 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED)
3949 {
3950 char *name, *p, c;
3951
3952 if (!unwind.proc_start)
3953 as_bad (MISSING_FNSTART);
3954
3955 if (unwind.personality_routine || unwind.personality_index != -1)
3956 as_bad (_("duplicate .personality directive"));
3957
3958 c = get_symbol_name (& name);
3959 p = input_line_pointer;
3960 if (c == '"')
3961 ++ input_line_pointer;
3962 unwind.personality_routine = symbol_find_or_make (name);
3963 *p = c;
3964 demand_empty_rest_of_line ();
3965 }
3966
3967
3968 /* Parse a directive saving core registers. */
3969
3970 static void
3971 s_arm_unwind_save_core (void)
3972 {
3973 valueT op;
3974 long range;
3975 int n;
3976
3977 range = parse_reg_list (&input_line_pointer);
3978 if (range == FAIL)
3979 {
3980 as_bad (_("expected register list"));
3981 ignore_rest_of_line ();
3982 return;
3983 }
3984
3985 demand_empty_rest_of_line ();
3986
3987 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
3988 into .unwind_save {..., sp...}. We aren't bothered about the value of
3989 ip because it is clobbered by calls. */
3990 if (unwind.sp_restored && unwind.fp_reg == 12
3991 && (range & 0x3000) == 0x1000)
3992 {
3993 unwind.opcode_count--;
3994 unwind.sp_restored = 0;
3995 range = (range | 0x2000) & ~0x1000;
3996 unwind.pending_offset = 0;
3997 }
3998
3999 /* Pop r4-r15. */
4000 if (range & 0xfff0)
4001 {
4002 /* See if we can use the short opcodes. These pop a block of up to 8
4003 registers starting with r4, plus maybe r14. */
4004 for (n = 0; n < 8; n++)
4005 {
4006 /* Break at the first non-saved register. */
4007 if ((range & (1 << (n + 4))) == 0)
4008 break;
4009 }
4010 /* See if there are any other bits set. */
4011 if (n == 0 || (range & (0xfff0 << n) & 0xbff0) != 0)
4012 {
4013 /* Use the long form. */
4014 op = 0x8000 | ((range >> 4) & 0xfff);
4015 add_unwind_opcode (op, 2);
4016 }
4017 else
4018 {
4019 /* Use the short form. */
4020 if (range & 0x4000)
4021 op = 0xa8; /* Pop r14. */
4022 else
4023 op = 0xa0; /* Do not pop r14. */
4024 op |= (n - 1);
4025 add_unwind_opcode (op, 1);
4026 }
4027 }
4028
4029 /* Pop r0-r3. */
4030 if (range & 0xf)
4031 {
4032 op = 0xb100 | (range & 0xf);
4033 add_unwind_opcode (op, 2);
4034 }
4035
4036 /* Record the number of bytes pushed. */
4037 for (n = 0; n < 16; n++)
4038 {
4039 if (range & (1 << n))
4040 unwind.frame_size += 4;
4041 }
4042 }
4043
4044
4045 /* Parse a directive saving FPA registers. */
4046
4047 static void
4048 s_arm_unwind_save_fpa (int reg)
4049 {
4050 expressionS exp;
4051 int num_regs;
4052 valueT op;
4053
4054 /* Get Number of registers to transfer. */
4055 if (skip_past_comma (&input_line_pointer) != FAIL)
4056 expression (&exp);
4057 else
4058 exp.X_op = O_illegal;
4059
4060 if (exp.X_op != O_constant)
4061 {
4062 as_bad (_("expected , <constant>"));
4063 ignore_rest_of_line ();
4064 return;
4065 }
4066
4067 num_regs = exp.X_add_number;
4068
4069 if (num_regs < 1 || num_regs > 4)
4070 {
4071 as_bad (_("number of registers must be in the range [1:4]"));
4072 ignore_rest_of_line ();
4073 return;
4074 }
4075
4076 demand_empty_rest_of_line ();
4077
4078 if (reg == 4)
4079 {
4080 /* Short form. */
4081 op = 0xb4 | (num_regs - 1);
4082 add_unwind_opcode (op, 1);
4083 }
4084 else
4085 {
4086 /* Long form. */
4087 op = 0xc800 | (reg << 4) | (num_regs - 1);
4088 add_unwind_opcode (op, 2);
4089 }
4090 unwind.frame_size += num_regs * 12;
4091 }
4092
4093
4094 /* Parse a directive saving VFP registers for ARMv6 and above. */
4095
4096 static void
4097 s_arm_unwind_save_vfp_armv6 (void)
4098 {
4099 int count;
4100 unsigned int start;
4101 valueT op;
4102 int num_vfpv3_regs = 0;
4103 int num_regs_below_16;
4104
4105 count = parse_vfp_reg_list (&input_line_pointer, &start, REGLIST_VFP_D);
4106 if (count == FAIL)
4107 {
4108 as_bad (_("expected register list"));
4109 ignore_rest_of_line ();
4110 return;
4111 }
4112
4113 demand_empty_rest_of_line ();
4114
4115 /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather
4116 than FSTMX/FLDMX-style ones). */
4117
4118 /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31. */
4119 if (start >= 16)
4120 num_vfpv3_regs = count;
4121 else if (start + count > 16)
4122 num_vfpv3_regs = start + count - 16;
4123
4124 if (num_vfpv3_regs > 0)
4125 {
4126 int start_offset = start > 16 ? start - 16 : 0;
4127 op = 0xc800 | (start_offset << 4) | (num_vfpv3_regs - 1);
4128 add_unwind_opcode (op, 2);
4129 }
4130
4131 /* Generate opcode for registers numbered in the range 0 .. 15. */
4132 num_regs_below_16 = num_vfpv3_regs > 0 ? 16 - (int) start : count;
4133 gas_assert (num_regs_below_16 + num_vfpv3_regs == count);
4134 if (num_regs_below_16 > 0)
4135 {
4136 op = 0xc900 | (start << 4) | (num_regs_below_16 - 1);
4137 add_unwind_opcode (op, 2);
4138 }
4139
4140 unwind.frame_size += count * 8;
4141 }
4142
4143
4144 /* Parse a directive saving VFP registers for pre-ARMv6. */
4145
4146 static void
4147 s_arm_unwind_save_vfp (void)
4148 {
4149 int count;
4150 unsigned int reg;
4151 valueT op;
4152
4153 count = parse_vfp_reg_list (&input_line_pointer, &reg, REGLIST_VFP_D);
4154 if (count == FAIL)
4155 {
4156 as_bad (_("expected register list"));
4157 ignore_rest_of_line ();
4158 return;
4159 }
4160
4161 demand_empty_rest_of_line ();
4162
4163 if (reg == 8)
4164 {
4165 /* Short form. */
4166 op = 0xb8 | (count - 1);
4167 add_unwind_opcode (op, 1);
4168 }
4169 else
4170 {
4171 /* Long form. */
4172 op = 0xb300 | (reg << 4) | (count - 1);
4173 add_unwind_opcode (op, 2);
4174 }
4175 unwind.frame_size += count * 8 + 4;
4176 }
4177
4178
4179 /* Parse a directive saving iWMMXt data registers. */
4180
4181 static void
4182 s_arm_unwind_save_mmxwr (void)
4183 {
4184 int reg;
4185 int hi_reg;
4186 int i;
4187 unsigned mask = 0;
4188 valueT op;
4189
4190 if (*input_line_pointer == '{')
4191 input_line_pointer++;
4192
4193 do
4194 {
4195 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
4196
4197 if (reg == FAIL)
4198 {
4199 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
4200 goto error;
4201 }
4202
4203 if (mask >> reg)
4204 as_tsktsk (_("register list not in ascending order"));
4205 mask |= 1 << reg;
4206
4207 if (*input_line_pointer == '-')
4208 {
4209 input_line_pointer++;
4210 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
4211 if (hi_reg == FAIL)
4212 {
4213 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
4214 goto error;
4215 }
4216 else if (reg >= hi_reg)
4217 {
4218 as_bad (_("bad register range"));
4219 goto error;
4220 }
4221 for (; reg < hi_reg; reg++)
4222 mask |= 1 << reg;
4223 }
4224 }
4225 while (skip_past_comma (&input_line_pointer) != FAIL);
4226
4227 skip_past_char (&input_line_pointer, '}');
4228
4229 demand_empty_rest_of_line ();
4230
4231 /* Generate any deferred opcodes because we're going to be looking at
4232 the list. */
4233 flush_pending_unwind ();
4234
4235 for (i = 0; i < 16; i++)
4236 {
4237 if (mask & (1 << i))
4238 unwind.frame_size += 8;
4239 }
4240
4241 /* Attempt to combine with a previous opcode. We do this because gcc
4242 likes to output separate unwind directives for a single block of
4243 registers. */
4244 if (unwind.opcode_count > 0)
4245 {
4246 i = unwind.opcodes[unwind.opcode_count - 1];
4247 if ((i & 0xf8) == 0xc0)
4248 {
4249 i &= 7;
4250 /* Only merge if the blocks are contiguous. */
4251 if (i < 6)
4252 {
4253 if ((mask & 0xfe00) == (1 << 9))
4254 {
4255 mask |= ((1 << (i + 11)) - 1) & 0xfc00;
4256 unwind.opcode_count--;
4257 }
4258 }
4259 else if (i == 6 && unwind.opcode_count >= 2)
4260 {
4261 i = unwind.opcodes[unwind.opcode_count - 2];
4262 reg = i >> 4;
4263 i &= 0xf;
4264
4265 op = 0xffff << (reg - 1);
4266 if (reg > 0
4267 && ((mask & op) == (1u << (reg - 1))))
4268 {
4269 op = (1 << (reg + i + 1)) - 1;
4270 op &= ~((1 << reg) - 1);
4271 mask |= op;
4272 unwind.opcode_count -= 2;
4273 }
4274 }
4275 }
4276 }
4277
4278 hi_reg = 15;
4279 /* We want to generate opcodes in the order the registers have been
4280 saved, ie. descending order. */
4281 for (reg = 15; reg >= -1; reg--)
4282 {
4283 /* Save registers in blocks. */
4284 if (reg < 0
4285 || !(mask & (1 << reg)))
4286 {
4287 /* We found an unsaved reg. Generate opcodes to save the
4288 preceding block. */
4289 if (reg != hi_reg)
4290 {
4291 if (reg == 9)
4292 {
4293 /* Short form. */
4294 op = 0xc0 | (hi_reg - 10);
4295 add_unwind_opcode (op, 1);
4296 }
4297 else
4298 {
4299 /* Long form. */
4300 op = 0xc600 | ((reg + 1) << 4) | ((hi_reg - reg) - 1);
4301 add_unwind_opcode (op, 2);
4302 }
4303 }
4304 hi_reg = reg - 1;
4305 }
4306 }
4307
4308 return;
4309 error:
4310 ignore_rest_of_line ();
4311 }
4312
4313 static void
4314 s_arm_unwind_save_mmxwcg (void)
4315 {
4316 int reg;
4317 int hi_reg;
4318 unsigned mask = 0;
4319 valueT op;
4320
4321 if (*input_line_pointer == '{')
4322 input_line_pointer++;
4323
4324 skip_whitespace (input_line_pointer);
4325
4326 do
4327 {
4328 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
4329
4330 if (reg == FAIL)
4331 {
4332 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
4333 goto error;
4334 }
4335
4336 reg -= 8;
4337 if (mask >> reg)
4338 as_tsktsk (_("register list not in ascending order"));
4339 mask |= 1 << reg;
4340
4341 if (*input_line_pointer == '-')
4342 {
4343 input_line_pointer++;
4344 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
4345 if (hi_reg == FAIL)
4346 {
4347 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
4348 goto error;
4349 }
4350 else if (reg >= hi_reg)
4351 {
4352 as_bad (_("bad register range"));
4353 goto error;
4354 }
4355 for (; reg < hi_reg; reg++)
4356 mask |= 1 << reg;
4357 }
4358 }
4359 while (skip_past_comma (&input_line_pointer) != FAIL);
4360
4361 skip_past_char (&input_line_pointer, '}');
4362
4363 demand_empty_rest_of_line ();
4364
4365 /* Generate any deferred opcodes because we're going to be looking at
4366 the list. */
4367 flush_pending_unwind ();
4368
4369 for (reg = 0; reg < 16; reg++)
4370 {
4371 if (mask & (1 << reg))
4372 unwind.frame_size += 4;
4373 }
4374 op = 0xc700 | mask;
4375 add_unwind_opcode (op, 2);
4376 return;
4377 error:
4378 ignore_rest_of_line ();
4379 }
4380
4381
4382 /* Parse an unwind_save directive.
4383 If the argument is non-zero, this is a .vsave directive. */
4384
4385 static void
4386 s_arm_unwind_save (int arch_v6)
4387 {
4388 char *peek;
4389 struct reg_entry *reg;
4390 bfd_boolean had_brace = FALSE;
4391
4392 if (!unwind.proc_start)
4393 as_bad (MISSING_FNSTART);
4394
4395 /* Figure out what sort of save we have. */
4396 peek = input_line_pointer;
4397
4398 if (*peek == '{')
4399 {
4400 had_brace = TRUE;
4401 peek++;
4402 }
4403
4404 reg = arm_reg_parse_multi (&peek);
4405
4406 if (!reg)
4407 {
4408 as_bad (_("register expected"));
4409 ignore_rest_of_line ();
4410 return;
4411 }
4412
4413 switch (reg->type)
4414 {
4415 case REG_TYPE_FN:
4416 if (had_brace)
4417 {
4418 as_bad (_("FPA .unwind_save does not take a register list"));
4419 ignore_rest_of_line ();
4420 return;
4421 }
4422 input_line_pointer = peek;
4423 s_arm_unwind_save_fpa (reg->number);
4424 return;
4425
4426 case REG_TYPE_RN:
4427 s_arm_unwind_save_core ();
4428 return;
4429
4430 case REG_TYPE_VFD:
4431 if (arch_v6)
4432 s_arm_unwind_save_vfp_armv6 ();
4433 else
4434 s_arm_unwind_save_vfp ();
4435 return;
4436
4437 case REG_TYPE_MMXWR:
4438 s_arm_unwind_save_mmxwr ();
4439 return;
4440
4441 case REG_TYPE_MMXWCG:
4442 s_arm_unwind_save_mmxwcg ();
4443 return;
4444
4445 default:
4446 as_bad (_(".unwind_save does not support this kind of register"));
4447 ignore_rest_of_line ();
4448 }
4449 }
4450
4451
4452 /* Parse an unwind_movsp directive. */
4453
4454 static void
4455 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED)
4456 {
4457 int reg;
4458 valueT op;
4459 int offset;
4460
4461 if (!unwind.proc_start)
4462 as_bad (MISSING_FNSTART);
4463
4464 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4465 if (reg == FAIL)
4466 {
4467 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_RN]));
4468 ignore_rest_of_line ();
4469 return;
4470 }
4471
4472 /* Optional constant. */
4473 if (skip_past_comma (&input_line_pointer) != FAIL)
4474 {
4475 if (immediate_for_directive (&offset) == FAIL)
4476 return;
4477 }
4478 else
4479 offset = 0;
4480
4481 demand_empty_rest_of_line ();
4482
4483 if (reg == REG_SP || reg == REG_PC)
4484 {
4485 as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
4486 return;
4487 }
4488
4489 if (unwind.fp_reg != REG_SP)
4490 as_bad (_("unexpected .unwind_movsp directive"));
4491
4492 /* Generate opcode to restore the value. */
4493 op = 0x90 | reg;
4494 add_unwind_opcode (op, 1);
4495
4496 /* Record the information for later. */
4497 unwind.fp_reg = reg;
4498 unwind.fp_offset = unwind.frame_size - offset;
4499 unwind.sp_restored = 1;
4500 }
4501
4502 /* Parse an unwind_pad directive. */
4503
4504 static void
4505 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED)
4506 {
4507 int offset;
4508
4509 if (!unwind.proc_start)
4510 as_bad (MISSING_FNSTART);
4511
4512 if (immediate_for_directive (&offset) == FAIL)
4513 return;
4514
4515 if (offset & 3)
4516 {
4517 as_bad (_("stack increment must be multiple of 4"));
4518 ignore_rest_of_line ();
4519 return;
4520 }
4521
4522 /* Don't generate any opcodes, just record the details for later. */
4523 unwind.frame_size += offset;
4524 unwind.pending_offset += offset;
4525
4526 demand_empty_rest_of_line ();
4527 }
4528
4529 /* Parse an unwind_setfp directive. */
4530
4531 static void
4532 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED)
4533 {
4534 int sp_reg;
4535 int fp_reg;
4536 int offset;
4537
4538 if (!unwind.proc_start)
4539 as_bad (MISSING_FNSTART);
4540
4541 fp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4542 if (skip_past_comma (&input_line_pointer) == FAIL)
4543 sp_reg = FAIL;
4544 else
4545 sp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4546
4547 if (fp_reg == FAIL || sp_reg == FAIL)
4548 {
4549 as_bad (_("expected <reg>, <reg>"));
4550 ignore_rest_of_line ();
4551 return;
4552 }
4553
4554 /* Optional constant. */
4555 if (skip_past_comma (&input_line_pointer) != FAIL)
4556 {
4557 if (immediate_for_directive (&offset) == FAIL)
4558 return;
4559 }
4560 else
4561 offset = 0;
4562
4563 demand_empty_rest_of_line ();
4564
4565 if (sp_reg != REG_SP && sp_reg != unwind.fp_reg)
4566 {
4567 as_bad (_("register must be either sp or set by a previous"
4568 "unwind_movsp directive"));
4569 return;
4570 }
4571
4572 /* Don't generate any opcodes, just record the information for later. */
4573 unwind.fp_reg = fp_reg;
4574 unwind.fp_used = 1;
4575 if (sp_reg == REG_SP)
4576 unwind.fp_offset = unwind.frame_size - offset;
4577 else
4578 unwind.fp_offset -= offset;
4579 }
4580
4581 /* Parse an unwind_raw directive. */
4582
4583 static void
4584 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED)
4585 {
4586 expressionS exp;
4587 /* This is an arbitrary limit. */
4588 unsigned char op[16];
4589 int count;
4590
4591 if (!unwind.proc_start)
4592 as_bad (MISSING_FNSTART);
4593
4594 expression (&exp);
4595 if (exp.X_op == O_constant
4596 && skip_past_comma (&input_line_pointer) != FAIL)
4597 {
4598 unwind.frame_size += exp.X_add_number;
4599 expression (&exp);
4600 }
4601 else
4602 exp.X_op = O_illegal;
4603
4604 if (exp.X_op != O_constant)
4605 {
4606 as_bad (_("expected <offset>, <opcode>"));
4607 ignore_rest_of_line ();
4608 return;
4609 }
4610
4611 count = 0;
4612
4613 /* Parse the opcode. */
4614 for (;;)
4615 {
4616 if (count >= 16)
4617 {
4618 as_bad (_("unwind opcode too long"));
4619 ignore_rest_of_line ();
4620 }
4621 if (exp.X_op != O_constant || exp.X_add_number & ~0xff)
4622 {
4623 as_bad (_("invalid unwind opcode"));
4624 ignore_rest_of_line ();
4625 return;
4626 }
4627 op[count++] = exp.X_add_number;
4628
4629 /* Parse the next byte. */
4630 if (skip_past_comma (&input_line_pointer) == FAIL)
4631 break;
4632
4633 expression (&exp);
4634 }
4635
4636 /* Add the opcode bytes in reverse order. */
4637 while (count--)
4638 add_unwind_opcode (op[count], 1);
4639
4640 demand_empty_rest_of_line ();
4641 }
4642
4643
4644 /* Parse a .eabi_attribute directive. */
4645
4646 static void
4647 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED)
4648 {
4649 int tag = obj_elf_vendor_attribute (OBJ_ATTR_PROC);
4650
4651 if (tag < NUM_KNOWN_OBJ_ATTRIBUTES)
4652 attributes_set_explicitly[tag] = 1;
4653 }
4654
4655 /* Emit a tls fix for the symbol. */
4656
4657 static void
4658 s_arm_tls_descseq (int ignored ATTRIBUTE_UNUSED)
4659 {
4660 char *p;
4661 expressionS exp;
4662 #ifdef md_flush_pending_output
4663 md_flush_pending_output ();
4664 #endif
4665
4666 #ifdef md_cons_align
4667 md_cons_align (4);
4668 #endif
4669
4670 /* Since we're just labelling the code, there's no need to define a
4671 mapping symbol. */
4672 expression (&exp);
4673 p = obstack_next_free (&frchain_now->frch_obstack);
4674 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 0,
4675 thumb_mode ? BFD_RELOC_ARM_THM_TLS_DESCSEQ
4676 : BFD_RELOC_ARM_TLS_DESCSEQ);
4677 }
4678 #endif /* OBJ_ELF */
4679
4680 static void s_arm_arch (int);
4681 static void s_arm_object_arch (int);
4682 static void s_arm_cpu (int);
4683 static void s_arm_fpu (int);
4684 static void s_arm_arch_extension (int);
4685
4686 #ifdef TE_PE
4687
4688 static void
4689 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
4690 {
4691 expressionS exp;
4692
4693 do
4694 {
4695 expression (&exp);
4696 if (exp.X_op == O_symbol)
4697 exp.X_op = O_secrel;
4698
4699 emit_expr (&exp, 4);
4700 }
4701 while (*input_line_pointer++ == ',');
4702
4703 input_line_pointer--;
4704 demand_empty_rest_of_line ();
4705 }
4706 #endif /* TE_PE */
4707
4708 /* This table describes all the machine specific pseudo-ops the assembler
4709 has to support. The fields are:
4710 pseudo-op name without dot
4711 function to call to execute this pseudo-op
4712 Integer arg to pass to the function. */
4713
4714 const pseudo_typeS md_pseudo_table[] =
4715 {
4716 /* Never called because '.req' does not start a line. */
4717 { "req", s_req, 0 },
4718 /* Following two are likewise never called. */
4719 { "dn", s_dn, 0 },
4720 { "qn", s_qn, 0 },
4721 { "unreq", s_unreq, 0 },
4722 { "bss", s_bss, 0 },
4723 { "align", s_align_ptwo, 2 },
4724 { "arm", s_arm, 0 },
4725 { "thumb", s_thumb, 0 },
4726 { "code", s_code, 0 },
4727 { "force_thumb", s_force_thumb, 0 },
4728 { "thumb_func", s_thumb_func, 0 },
4729 { "thumb_set", s_thumb_set, 0 },
4730 { "even", s_even, 0 },
4731 { "ltorg", s_ltorg, 0 },
4732 { "pool", s_ltorg, 0 },
4733 { "syntax", s_syntax, 0 },
4734 { "cpu", s_arm_cpu, 0 },
4735 { "arch", s_arm_arch, 0 },
4736 { "object_arch", s_arm_object_arch, 0 },
4737 { "fpu", s_arm_fpu, 0 },
4738 { "arch_extension", s_arm_arch_extension, 0 },
4739 #ifdef OBJ_ELF
4740 { "word", s_arm_elf_cons, 4 },
4741 { "long", s_arm_elf_cons, 4 },
4742 { "inst.n", s_arm_elf_inst, 2 },
4743 { "inst.w", s_arm_elf_inst, 4 },
4744 { "inst", s_arm_elf_inst, 0 },
4745 { "rel31", s_arm_rel31, 0 },
4746 { "fnstart", s_arm_unwind_fnstart, 0 },
4747 { "fnend", s_arm_unwind_fnend, 0 },
4748 { "cantunwind", s_arm_unwind_cantunwind, 0 },
4749 { "personality", s_arm_unwind_personality, 0 },
4750 { "personalityindex", s_arm_unwind_personalityindex, 0 },
4751 { "handlerdata", s_arm_unwind_handlerdata, 0 },
4752 { "save", s_arm_unwind_save, 0 },
4753 { "vsave", s_arm_unwind_save, 1 },
4754 { "movsp", s_arm_unwind_movsp, 0 },
4755 { "pad", s_arm_unwind_pad, 0 },
4756 { "setfp", s_arm_unwind_setfp, 0 },
4757 { "unwind_raw", s_arm_unwind_raw, 0 },
4758 { "eabi_attribute", s_arm_eabi_attribute, 0 },
4759 { "tlsdescseq", s_arm_tls_descseq, 0 },
4760 #else
4761 { "word", cons, 4},
4762
4763 /* These are used for dwarf. */
4764 {"2byte", cons, 2},
4765 {"4byte", cons, 4},
4766 {"8byte", cons, 8},
4767 /* These are used for dwarf2. */
4768 { "file", dwarf2_directive_file, 0 },
4769 { "loc", dwarf2_directive_loc, 0 },
4770 { "loc_mark_labels", dwarf2_directive_loc_mark_labels, 0 },
4771 #endif
4772 { "extend", float_cons, 'x' },
4773 { "ldouble", float_cons, 'x' },
4774 { "packed", float_cons, 'p' },
4775 #ifdef TE_PE
4776 {"secrel32", pe_directive_secrel, 0},
4777 #endif
4778
4779 /* These are for compatibility with CodeComposer Studio. */
4780 {"ref", s_ccs_ref, 0},
4781 {"def", s_ccs_def, 0},
4782 {"asmfunc", s_ccs_asmfunc, 0},
4783 {"endasmfunc", s_ccs_endasmfunc, 0},
4784
4785 { 0, 0, 0 }
4786 };
4787 \f
4788 /* Parser functions used exclusively in instruction operands. */
4789
4790 /* Generic immediate-value read function for use in insn parsing.
4791 STR points to the beginning of the immediate (the leading #);
4792 VAL receives the value; if the value is outside [MIN, MAX]
4793 issue an error. PREFIX_OPT is true if the immediate prefix is
4794 optional. */
4795
4796 static int
4797 parse_immediate (char **str, int *val, int min, int max,
4798 bfd_boolean prefix_opt)
4799 {
4800 expressionS exp;
4801
4802 my_get_expression (&exp, str, prefix_opt ? GE_OPT_PREFIX : GE_IMM_PREFIX);
4803 if (exp.X_op != O_constant)
4804 {
4805 inst.error = _("constant expression required");
4806 return FAIL;
4807 }
4808
4809 if (exp.X_add_number < min || exp.X_add_number > max)
4810 {
4811 inst.error = _("immediate value out of range");
4812 return FAIL;
4813 }
4814
4815 *val = exp.X_add_number;
4816 return SUCCESS;
4817 }
4818
4819 /* Less-generic immediate-value read function with the possibility of loading a
4820 big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate
4821 instructions. Puts the result directly in inst.operands[i]. */
4822
4823 static int
4824 parse_big_immediate (char **str, int i, expressionS *in_exp,
4825 bfd_boolean allow_symbol_p)
4826 {
4827 expressionS exp;
4828 expressionS *exp_p = in_exp ? in_exp : &exp;
4829 char *ptr = *str;
4830
4831 my_get_expression (exp_p, &ptr, GE_OPT_PREFIX_BIG);
4832
4833 if (exp_p->X_op == O_constant)
4834 {
4835 inst.operands[i].imm = exp_p->X_add_number & 0xffffffff;
4836 /* If we're on a 64-bit host, then a 64-bit number can be returned using
4837 O_constant. We have to be careful not to break compilation for
4838 32-bit X_add_number, though. */
4839 if ((exp_p->X_add_number & ~(offsetT)(0xffffffffU)) != 0)
4840 {
4841 /* X >> 32 is illegal if sizeof (exp_p->X_add_number) == 4. */
4842 inst.operands[i].reg = (((exp_p->X_add_number >> 16) >> 16)
4843 & 0xffffffff);
4844 inst.operands[i].regisimm = 1;
4845 }
4846 }
4847 else if (exp_p->X_op == O_big
4848 && LITTLENUM_NUMBER_OF_BITS * exp_p->X_add_number > 32)
4849 {
4850 unsigned parts = 32 / LITTLENUM_NUMBER_OF_BITS, j, idx = 0;
4851
4852 /* Bignums have their least significant bits in
4853 generic_bignum[0]. Make sure we put 32 bits in imm and
4854 32 bits in reg, in a (hopefully) portable way. */
4855 gas_assert (parts != 0);
4856
4857 /* Make sure that the number is not too big.
4858 PR 11972: Bignums can now be sign-extended to the
4859 size of a .octa so check that the out of range bits
4860 are all zero or all one. */
4861 if (LITTLENUM_NUMBER_OF_BITS * exp_p->X_add_number > 64)
4862 {
4863 LITTLENUM_TYPE m = -1;
4864
4865 if (generic_bignum[parts * 2] != 0
4866 && generic_bignum[parts * 2] != m)
4867 return FAIL;
4868
4869 for (j = parts * 2 + 1; j < (unsigned) exp_p->X_add_number; j++)
4870 if (generic_bignum[j] != generic_bignum[j-1])
4871 return FAIL;
4872 }
4873
4874 inst.operands[i].imm = 0;
4875 for (j = 0; j < parts; j++, idx++)
4876 inst.operands[i].imm |= generic_bignum[idx]
4877 << (LITTLENUM_NUMBER_OF_BITS * j);
4878 inst.operands[i].reg = 0;
4879 for (j = 0; j < parts; j++, idx++)
4880 inst.operands[i].reg |= generic_bignum[idx]
4881 << (LITTLENUM_NUMBER_OF_BITS * j);
4882 inst.operands[i].regisimm = 1;
4883 }
4884 else if (!(exp_p->X_op == O_symbol && allow_symbol_p))
4885 return FAIL;
4886
4887 *str = ptr;
4888
4889 return SUCCESS;
4890 }
4891
4892 /* Returns the pseudo-register number of an FPA immediate constant,
4893 or FAIL if there isn't a valid constant here. */
4894
4895 static int
4896 parse_fpa_immediate (char ** str)
4897 {
4898 LITTLENUM_TYPE words[MAX_LITTLENUMS];
4899 char * save_in;
4900 expressionS exp;
4901 int i;
4902 int j;
4903
4904 /* First try and match exact strings, this is to guarantee
4905 that some formats will work even for cross assembly. */
4906
4907 for (i = 0; fp_const[i]; i++)
4908 {
4909 if (strncmp (*str, fp_const[i], strlen (fp_const[i])) == 0)
4910 {
4911 char *start = *str;
4912
4913 *str += strlen (fp_const[i]);
4914 if (is_end_of_line[(unsigned char) **str])
4915 return i + 8;
4916 *str = start;
4917 }
4918 }
4919
4920 /* Just because we didn't get a match doesn't mean that the constant
4921 isn't valid, just that it is in a format that we don't
4922 automatically recognize. Try parsing it with the standard
4923 expression routines. */
4924
4925 memset (words, 0, MAX_LITTLENUMS * sizeof (LITTLENUM_TYPE));
4926
4927 /* Look for a raw floating point number. */
4928 if ((save_in = atof_ieee (*str, 'x', words)) != NULL
4929 && is_end_of_line[(unsigned char) *save_in])
4930 {
4931 for (i = 0; i < NUM_FLOAT_VALS; i++)
4932 {
4933 for (j = 0; j < MAX_LITTLENUMS; j++)
4934 {
4935 if (words[j] != fp_values[i][j])
4936 break;
4937 }
4938
4939 if (j == MAX_LITTLENUMS)
4940 {
4941 *str = save_in;
4942 return i + 8;
4943 }
4944 }
4945 }
4946
4947 /* Try and parse a more complex expression, this will probably fail
4948 unless the code uses a floating point prefix (eg "0f"). */
4949 save_in = input_line_pointer;
4950 input_line_pointer = *str;
4951 if (expression (&exp) == absolute_section
4952 && exp.X_op == O_big
4953 && exp.X_add_number < 0)
4954 {
4955 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
4956 Ditto for 15. */
4957 #define X_PRECISION 5
4958 #define E_PRECISION 15L
4959 if (gen_to_words (words, X_PRECISION, E_PRECISION) == 0)
4960 {
4961 for (i = 0; i < NUM_FLOAT_VALS; i++)
4962 {
4963 for (j = 0; j < MAX_LITTLENUMS; j++)
4964 {
4965 if (words[j] != fp_values[i][j])
4966 break;
4967 }
4968
4969 if (j == MAX_LITTLENUMS)
4970 {
4971 *str = input_line_pointer;
4972 input_line_pointer = save_in;
4973 return i + 8;
4974 }
4975 }
4976 }
4977 }
4978
4979 *str = input_line_pointer;
4980 input_line_pointer = save_in;
4981 inst.error = _("invalid FPA immediate expression");
4982 return FAIL;
4983 }
4984
4985 /* Returns 1 if a number has "quarter-precision" float format
4986 0baBbbbbbc defgh000 00000000 00000000. */
4987
4988 static int
4989 is_quarter_float (unsigned imm)
4990 {
4991 int bs = (imm & 0x20000000) ? 0x3e000000 : 0x40000000;
4992 return (imm & 0x7ffff) == 0 && ((imm & 0x7e000000) ^ bs) == 0;
4993 }
4994
4995
4996 /* Detect the presence of a floating point or integer zero constant,
4997 i.e. #0.0 or #0. */
4998
4999 static bfd_boolean
5000 parse_ifimm_zero (char **in)
5001 {
5002 int error_code;
5003
5004 if (!is_immediate_prefix (**in))
5005 {
5006 /* In unified syntax, all prefixes are optional. */
5007 if (!unified_syntax)
5008 return FALSE;
5009 }
5010 else
5011 ++*in;
5012
5013 /* Accept #0x0 as a synonym for #0. */
5014 if (strncmp (*in, "0x", 2) == 0)
5015 {
5016 int val;
5017 if (parse_immediate (in, &val, 0, 0, TRUE) == FAIL)
5018 return FALSE;
5019 return TRUE;
5020 }
5021
5022 error_code = atof_generic (in, ".", EXP_CHARS,
5023 &generic_floating_point_number);
5024
5025 if (!error_code
5026 && generic_floating_point_number.sign == '+'
5027 && (generic_floating_point_number.low
5028 > generic_floating_point_number.leader))
5029 return TRUE;
5030
5031 return FALSE;
5032 }
5033
5034 /* Parse an 8-bit "quarter-precision" floating point number of the form:
5035 0baBbbbbbc defgh000 00000000 00000000.
5036 The zero and minus-zero cases need special handling, since they can't be
5037 encoded in the "quarter-precision" float format, but can nonetheless be
5038 loaded as integer constants. */
5039
5040 static unsigned
5041 parse_qfloat_immediate (char **ccp, int *immed)
5042 {
5043 char *str = *ccp;
5044 char *fpnum;
5045 LITTLENUM_TYPE words[MAX_LITTLENUMS];
5046 int found_fpchar = 0;
5047
5048 skip_past_char (&str, '#');
5049
5050 /* We must not accidentally parse an integer as a floating-point number. Make
5051 sure that the value we parse is not an integer by checking for special
5052 characters '.' or 'e'.
5053 FIXME: This is a horrible hack, but doing better is tricky because type
5054 information isn't in a very usable state at parse time. */
5055 fpnum = str;
5056 skip_whitespace (fpnum);
5057
5058 if (strncmp (fpnum, "0x", 2) == 0)
5059 return FAIL;
5060 else
5061 {
5062 for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
5063 if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
5064 {
5065 found_fpchar = 1;
5066 break;
5067 }
5068
5069 if (!found_fpchar)
5070 return FAIL;
5071 }
5072
5073 if ((str = atof_ieee (str, 's', words)) != NULL)
5074 {
5075 unsigned fpword = 0;
5076 int i;
5077
5078 /* Our FP word must be 32 bits (single-precision FP). */
5079 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
5080 {
5081 fpword <<= LITTLENUM_NUMBER_OF_BITS;
5082 fpword |= words[i];
5083 }
5084
5085 if (is_quarter_float (fpword) || (fpword & 0x7fffffff) == 0)
5086 *immed = fpword;
5087 else
5088 return FAIL;
5089
5090 *ccp = str;
5091
5092 return SUCCESS;
5093 }
5094
5095 return FAIL;
5096 }
5097
5098 /* Shift operands. */
5099 enum shift_kind
5100 {
5101 SHIFT_LSL, SHIFT_LSR, SHIFT_ASR, SHIFT_ROR, SHIFT_RRX
5102 };
5103
5104 struct asm_shift_name
5105 {
5106 const char *name;
5107 enum shift_kind kind;
5108 };
5109
5110 /* Third argument to parse_shift. */
5111 enum parse_shift_mode
5112 {
5113 NO_SHIFT_RESTRICT, /* Any kind of shift is accepted. */
5114 SHIFT_IMMEDIATE, /* Shift operand must be an immediate. */
5115 SHIFT_LSL_OR_ASR_IMMEDIATE, /* Shift must be LSL or ASR immediate. */
5116 SHIFT_ASR_IMMEDIATE, /* Shift must be ASR immediate. */
5117 SHIFT_LSL_IMMEDIATE, /* Shift must be LSL immediate. */
5118 };
5119
5120 /* Parse a <shift> specifier on an ARM data processing instruction.
5121 This has three forms:
5122
5123 (LSL|LSR|ASL|ASR|ROR) Rs
5124 (LSL|LSR|ASL|ASR|ROR) #imm
5125 RRX
5126
5127 Note that ASL is assimilated to LSL in the instruction encoding, and
5128 RRX to ROR #0 (which cannot be written as such). */
5129
5130 static int
5131 parse_shift (char **str, int i, enum parse_shift_mode mode)
5132 {
5133 const struct asm_shift_name *shift_name;
5134 enum shift_kind shift;
5135 char *s = *str;
5136 char *p = s;
5137 int reg;
5138
5139 for (p = *str; ISALPHA (*p); p++)
5140 ;
5141
5142 if (p == *str)
5143 {
5144 inst.error = _("shift expression expected");
5145 return FAIL;
5146 }
5147
5148 shift_name = (const struct asm_shift_name *) hash_find_n (arm_shift_hsh, *str,
5149 p - *str);
5150
5151 if (shift_name == NULL)
5152 {
5153 inst.error = _("shift expression expected");
5154 return FAIL;
5155 }
5156
5157 shift = shift_name->kind;
5158
5159 switch (mode)
5160 {
5161 case NO_SHIFT_RESTRICT:
5162 case SHIFT_IMMEDIATE: break;
5163
5164 case SHIFT_LSL_OR_ASR_IMMEDIATE:
5165 if (shift != SHIFT_LSL && shift != SHIFT_ASR)
5166 {
5167 inst.error = _("'LSL' or 'ASR' required");
5168 return FAIL;
5169 }
5170 break;
5171
5172 case SHIFT_LSL_IMMEDIATE:
5173 if (shift != SHIFT_LSL)
5174 {
5175 inst.error = _("'LSL' required");
5176 return FAIL;
5177 }
5178 break;
5179
5180 case SHIFT_ASR_IMMEDIATE:
5181 if (shift != SHIFT_ASR)
5182 {
5183 inst.error = _("'ASR' required");
5184 return FAIL;
5185 }
5186 break;
5187
5188 default: abort ();
5189 }
5190
5191 if (shift != SHIFT_RRX)
5192 {
5193 /* Whitespace can appear here if the next thing is a bare digit. */
5194 skip_whitespace (p);
5195
5196 if (mode == NO_SHIFT_RESTRICT
5197 && (reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5198 {
5199 inst.operands[i].imm = reg;
5200 inst.operands[i].immisreg = 1;
5201 }
5202 else if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5203 return FAIL;
5204 }
5205 inst.operands[i].shift_kind = shift;
5206 inst.operands[i].shifted = 1;
5207 *str = p;
5208 return SUCCESS;
5209 }
5210
5211 /* Parse a <shifter_operand> for an ARM data processing instruction:
5212
5213 #<immediate>
5214 #<immediate>, <rotate>
5215 <Rm>
5216 <Rm>, <shift>
5217
5218 where <shift> is defined by parse_shift above, and <rotate> is a
5219 multiple of 2 between 0 and 30. Validation of immediate operands
5220 is deferred to md_apply_fix. */
5221
5222 static int
5223 parse_shifter_operand (char **str, int i)
5224 {
5225 int value;
5226 expressionS exp;
5227
5228 if ((value = arm_reg_parse (str, REG_TYPE_RN)) != FAIL)
5229 {
5230 inst.operands[i].reg = value;
5231 inst.operands[i].isreg = 1;
5232
5233 /* parse_shift will override this if appropriate */
5234 inst.reloc.exp.X_op = O_constant;
5235 inst.reloc.exp.X_add_number = 0;
5236
5237 if (skip_past_comma (str) == FAIL)
5238 return SUCCESS;
5239
5240 /* Shift operation on register. */
5241 return parse_shift (str, i, NO_SHIFT_RESTRICT);
5242 }
5243
5244 if (my_get_expression (&inst.reloc.exp, str, GE_IMM_PREFIX))
5245 return FAIL;
5246
5247 if (skip_past_comma (str) == SUCCESS)
5248 {
5249 /* #x, y -- ie explicit rotation by Y. */
5250 if (my_get_expression (&exp, str, GE_NO_PREFIX))
5251 return FAIL;
5252
5253 if (exp.X_op != O_constant || inst.reloc.exp.X_op != O_constant)
5254 {
5255 inst.error = _("constant expression expected");
5256 return FAIL;
5257 }
5258
5259 value = exp.X_add_number;
5260 if (value < 0 || value > 30 || value % 2 != 0)
5261 {
5262 inst.error = _("invalid rotation");
5263 return FAIL;
5264 }
5265 if (inst.reloc.exp.X_add_number < 0 || inst.reloc.exp.X_add_number > 255)
5266 {
5267 inst.error = _("invalid constant");
5268 return FAIL;
5269 }
5270
5271 /* Encode as specified. */
5272 inst.operands[i].imm = inst.reloc.exp.X_add_number | value << 7;
5273 return SUCCESS;
5274 }
5275
5276 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
5277 inst.reloc.pc_rel = 0;
5278 return SUCCESS;
5279 }
5280
5281 /* Group relocation information. Each entry in the table contains the
5282 textual name of the relocation as may appear in assembler source
5283 and must end with a colon.
5284 Along with this textual name are the relocation codes to be used if
5285 the corresponding instruction is an ALU instruction (ADD or SUB only),
5286 an LDR, an LDRS, or an LDC. */
5287
5288 struct group_reloc_table_entry
5289 {
5290 const char *name;
5291 int alu_code;
5292 int ldr_code;
5293 int ldrs_code;
5294 int ldc_code;
5295 };
5296
5297 typedef enum
5298 {
5299 /* Varieties of non-ALU group relocation. */
5300
5301 GROUP_LDR,
5302 GROUP_LDRS,
5303 GROUP_LDC
5304 } group_reloc_type;
5305
5306 static struct group_reloc_table_entry group_reloc_table[] =
5307 { /* Program counter relative: */
5308 { "pc_g0_nc",
5309 BFD_RELOC_ARM_ALU_PC_G0_NC, /* ALU */
5310 0, /* LDR */
5311 0, /* LDRS */
5312 0 }, /* LDC */
5313 { "pc_g0",
5314 BFD_RELOC_ARM_ALU_PC_G0, /* ALU */
5315 BFD_RELOC_ARM_LDR_PC_G0, /* LDR */
5316 BFD_RELOC_ARM_LDRS_PC_G0, /* LDRS */
5317 BFD_RELOC_ARM_LDC_PC_G0 }, /* LDC */
5318 { "pc_g1_nc",
5319 BFD_RELOC_ARM_ALU_PC_G1_NC, /* ALU */
5320 0, /* LDR */
5321 0, /* LDRS */
5322 0 }, /* LDC */
5323 { "pc_g1",
5324 BFD_RELOC_ARM_ALU_PC_G1, /* ALU */
5325 BFD_RELOC_ARM_LDR_PC_G1, /* LDR */
5326 BFD_RELOC_ARM_LDRS_PC_G1, /* LDRS */
5327 BFD_RELOC_ARM_LDC_PC_G1 }, /* LDC */
5328 { "pc_g2",
5329 BFD_RELOC_ARM_ALU_PC_G2, /* ALU */
5330 BFD_RELOC_ARM_LDR_PC_G2, /* LDR */
5331 BFD_RELOC_ARM_LDRS_PC_G2, /* LDRS */
5332 BFD_RELOC_ARM_LDC_PC_G2 }, /* LDC */
5333 /* Section base relative */
5334 { "sb_g0_nc",
5335 BFD_RELOC_ARM_ALU_SB_G0_NC, /* ALU */
5336 0, /* LDR */
5337 0, /* LDRS */
5338 0 }, /* LDC */
5339 { "sb_g0",
5340 BFD_RELOC_ARM_ALU_SB_G0, /* ALU */
5341 BFD_RELOC_ARM_LDR_SB_G0, /* LDR */
5342 BFD_RELOC_ARM_LDRS_SB_G0, /* LDRS */
5343 BFD_RELOC_ARM_LDC_SB_G0 }, /* LDC */
5344 { "sb_g1_nc",
5345 BFD_RELOC_ARM_ALU_SB_G1_NC, /* ALU */
5346 0, /* LDR */
5347 0, /* LDRS */
5348 0 }, /* LDC */
5349 { "sb_g1",
5350 BFD_RELOC_ARM_ALU_SB_G1, /* ALU */
5351 BFD_RELOC_ARM_LDR_SB_G1, /* LDR */
5352 BFD_RELOC_ARM_LDRS_SB_G1, /* LDRS */
5353 BFD_RELOC_ARM_LDC_SB_G1 }, /* LDC */
5354 { "sb_g2",
5355 BFD_RELOC_ARM_ALU_SB_G2, /* ALU */
5356 BFD_RELOC_ARM_LDR_SB_G2, /* LDR */
5357 BFD_RELOC_ARM_LDRS_SB_G2, /* LDRS */
5358 BFD_RELOC_ARM_LDC_SB_G2 }, /* LDC */
5359 /* Absolute thumb alu relocations. */
5360 { "lower0_7",
5361 BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC,/* ALU. */
5362 0, /* LDR. */
5363 0, /* LDRS. */
5364 0 }, /* LDC. */
5365 { "lower8_15",
5366 BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC,/* ALU. */
5367 0, /* LDR. */
5368 0, /* LDRS. */
5369 0 }, /* LDC. */
5370 { "upper0_7",
5371 BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC,/* ALU. */
5372 0, /* LDR. */
5373 0, /* LDRS. */
5374 0 }, /* LDC. */
5375 { "upper8_15",
5376 BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC,/* ALU. */
5377 0, /* LDR. */
5378 0, /* LDRS. */
5379 0 } }; /* LDC. */
5380
5381 /* Given the address of a pointer pointing to the textual name of a group
5382 relocation as may appear in assembler source, attempt to find its details
5383 in group_reloc_table. The pointer will be updated to the character after
5384 the trailing colon. On failure, FAIL will be returned; SUCCESS
5385 otherwise. On success, *entry will be updated to point at the relevant
5386 group_reloc_table entry. */
5387
5388 static int
5389 find_group_reloc_table_entry (char **str, struct group_reloc_table_entry **out)
5390 {
5391 unsigned int i;
5392 for (i = 0; i < ARRAY_SIZE (group_reloc_table); i++)
5393 {
5394 int length = strlen (group_reloc_table[i].name);
5395
5396 if (strncasecmp (group_reloc_table[i].name, *str, length) == 0
5397 && (*str)[length] == ':')
5398 {
5399 *out = &group_reloc_table[i];
5400 *str += (length + 1);
5401 return SUCCESS;
5402 }
5403 }
5404
5405 return FAIL;
5406 }
5407
5408 /* Parse a <shifter_operand> for an ARM data processing instruction
5409 (as for parse_shifter_operand) where group relocations are allowed:
5410
5411 #<immediate>
5412 #<immediate>, <rotate>
5413 #:<group_reloc>:<expression>
5414 <Rm>
5415 <Rm>, <shift>
5416
5417 where <group_reloc> is one of the strings defined in group_reloc_table.
5418 The hashes are optional.
5419
5420 Everything else is as for parse_shifter_operand. */
5421
5422 static parse_operand_result
5423 parse_shifter_operand_group_reloc (char **str, int i)
5424 {
5425 /* Determine if we have the sequence of characters #: or just :
5426 coming next. If we do, then we check for a group relocation.
5427 If we don't, punt the whole lot to parse_shifter_operand. */
5428
5429 if (((*str)[0] == '#' && (*str)[1] == ':')
5430 || (*str)[0] == ':')
5431 {
5432 struct group_reloc_table_entry *entry;
5433
5434 if ((*str)[0] == '#')
5435 (*str) += 2;
5436 else
5437 (*str)++;
5438
5439 /* Try to parse a group relocation. Anything else is an error. */
5440 if (find_group_reloc_table_entry (str, &entry) == FAIL)
5441 {
5442 inst.error = _("unknown group relocation");
5443 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5444 }
5445
5446 /* We now have the group relocation table entry corresponding to
5447 the name in the assembler source. Next, we parse the expression. */
5448 if (my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX))
5449 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5450
5451 /* Record the relocation type (always the ALU variant here). */
5452 inst.reloc.type = (bfd_reloc_code_real_type) entry->alu_code;
5453 gas_assert (inst.reloc.type != 0);
5454
5455 return PARSE_OPERAND_SUCCESS;
5456 }
5457 else
5458 return parse_shifter_operand (str, i) == SUCCESS
5459 ? PARSE_OPERAND_SUCCESS : PARSE_OPERAND_FAIL;
5460
5461 /* Never reached. */
5462 }
5463
5464 /* Parse a Neon alignment expression. Information is written to
5465 inst.operands[i]. We assume the initial ':' has been skipped.
5466
5467 align .imm = align << 8, .immisalign=1, .preind=0 */
5468 static parse_operand_result
5469 parse_neon_alignment (char **str, int i)
5470 {
5471 char *p = *str;
5472 expressionS exp;
5473
5474 my_get_expression (&exp, &p, GE_NO_PREFIX);
5475
5476 if (exp.X_op != O_constant)
5477 {
5478 inst.error = _("alignment must be constant");
5479 return PARSE_OPERAND_FAIL;
5480 }
5481
5482 inst.operands[i].imm = exp.X_add_number << 8;
5483 inst.operands[i].immisalign = 1;
5484 /* Alignments are not pre-indexes. */
5485 inst.operands[i].preind = 0;
5486
5487 *str = p;
5488 return PARSE_OPERAND_SUCCESS;
5489 }
5490
5491 /* Parse all forms of an ARM address expression. Information is written
5492 to inst.operands[i] and/or inst.reloc.
5493
5494 Preindexed addressing (.preind=1):
5495
5496 [Rn, #offset] .reg=Rn .reloc.exp=offset
5497 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5498 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5499 .shift_kind=shift .reloc.exp=shift_imm
5500
5501 These three may have a trailing ! which causes .writeback to be set also.
5502
5503 Postindexed addressing (.postind=1, .writeback=1):
5504
5505 [Rn], #offset .reg=Rn .reloc.exp=offset
5506 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5507 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5508 .shift_kind=shift .reloc.exp=shift_imm
5509
5510 Unindexed addressing (.preind=0, .postind=0):
5511
5512 [Rn], {option} .reg=Rn .imm=option .immisreg=0
5513
5514 Other:
5515
5516 [Rn]{!} shorthand for [Rn,#0]{!}
5517 =immediate .isreg=0 .reloc.exp=immediate
5518 label .reg=PC .reloc.pc_rel=1 .reloc.exp=label
5519
5520 It is the caller's responsibility to check for addressing modes not
5521 supported by the instruction, and to set inst.reloc.type. */
5522
5523 static parse_operand_result
5524 parse_address_main (char **str, int i, int group_relocations,
5525 group_reloc_type group_type)
5526 {
5527 char *p = *str;
5528 int reg;
5529
5530 if (skip_past_char (&p, '[') == FAIL)
5531 {
5532 if (skip_past_char (&p, '=') == FAIL)
5533 {
5534 /* Bare address - translate to PC-relative offset. */
5535 inst.reloc.pc_rel = 1;
5536 inst.operands[i].reg = REG_PC;
5537 inst.operands[i].isreg = 1;
5538 inst.operands[i].preind = 1;
5539
5540 if (my_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX_BIG))
5541 return PARSE_OPERAND_FAIL;
5542 }
5543 else if (parse_big_immediate (&p, i, &inst.reloc.exp,
5544 /*allow_symbol_p=*/TRUE))
5545 return PARSE_OPERAND_FAIL;
5546
5547 *str = p;
5548 return PARSE_OPERAND_SUCCESS;
5549 }
5550
5551 /* PR gas/14887: Allow for whitespace after the opening bracket. */
5552 skip_whitespace (p);
5553
5554 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5555 {
5556 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
5557 return PARSE_OPERAND_FAIL;
5558 }
5559 inst.operands[i].reg = reg;
5560 inst.operands[i].isreg = 1;
5561
5562 if (skip_past_comma (&p) == SUCCESS)
5563 {
5564 inst.operands[i].preind = 1;
5565
5566 if (*p == '+') p++;
5567 else if (*p == '-') p++, inst.operands[i].negative = 1;
5568
5569 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5570 {
5571 inst.operands[i].imm = reg;
5572 inst.operands[i].immisreg = 1;
5573
5574 if (skip_past_comma (&p) == SUCCESS)
5575 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
5576 return PARSE_OPERAND_FAIL;
5577 }
5578 else if (skip_past_char (&p, ':') == SUCCESS)
5579 {
5580 /* FIXME: '@' should be used here, but it's filtered out by generic
5581 code before we get to see it here. This may be subject to
5582 change. */
5583 parse_operand_result result = parse_neon_alignment (&p, i);
5584
5585 if (result != PARSE_OPERAND_SUCCESS)
5586 return result;
5587 }
5588 else
5589 {
5590 if (inst.operands[i].negative)
5591 {
5592 inst.operands[i].negative = 0;
5593 p--;
5594 }
5595
5596 if (group_relocations
5597 && ((*p == '#' && *(p + 1) == ':') || *p == ':'))
5598 {
5599 struct group_reloc_table_entry *entry;
5600
5601 /* Skip over the #: or : sequence. */
5602 if (*p == '#')
5603 p += 2;
5604 else
5605 p++;
5606
5607 /* Try to parse a group relocation. Anything else is an
5608 error. */
5609 if (find_group_reloc_table_entry (&p, &entry) == FAIL)
5610 {
5611 inst.error = _("unknown group relocation");
5612 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5613 }
5614
5615 /* We now have the group relocation table entry corresponding to
5616 the name in the assembler source. Next, we parse the
5617 expression. */
5618 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
5619 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5620
5621 /* Record the relocation type. */
5622 switch (group_type)
5623 {
5624 case GROUP_LDR:
5625 inst.reloc.type = (bfd_reloc_code_real_type) entry->ldr_code;
5626 break;
5627
5628 case GROUP_LDRS:
5629 inst.reloc.type = (bfd_reloc_code_real_type) entry->ldrs_code;
5630 break;
5631
5632 case GROUP_LDC:
5633 inst.reloc.type = (bfd_reloc_code_real_type) entry->ldc_code;
5634 break;
5635
5636 default:
5637 gas_assert (0);
5638 }
5639
5640 if (inst.reloc.type == 0)
5641 {
5642 inst.error = _("this group relocation is not allowed on this instruction");
5643 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5644 }
5645 }
5646 else
5647 {
5648 char *q = p;
5649
5650 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5651 return PARSE_OPERAND_FAIL;
5652 /* If the offset is 0, find out if it's a +0 or -0. */
5653 if (inst.reloc.exp.X_op == O_constant
5654 && inst.reloc.exp.X_add_number == 0)
5655 {
5656 skip_whitespace (q);
5657 if (*q == '#')
5658 {
5659 q++;
5660 skip_whitespace (q);
5661 }
5662 if (*q == '-')
5663 inst.operands[i].negative = 1;
5664 }
5665 }
5666 }
5667 }
5668 else if (skip_past_char (&p, ':') == SUCCESS)
5669 {
5670 /* FIXME: '@' should be used here, but it's filtered out by generic code
5671 before we get to see it here. This may be subject to change. */
5672 parse_operand_result result = parse_neon_alignment (&p, i);
5673
5674 if (result != PARSE_OPERAND_SUCCESS)
5675 return result;
5676 }
5677
5678 if (skip_past_char (&p, ']') == FAIL)
5679 {
5680 inst.error = _("']' expected");
5681 return PARSE_OPERAND_FAIL;
5682 }
5683
5684 if (skip_past_char (&p, '!') == SUCCESS)
5685 inst.operands[i].writeback = 1;
5686
5687 else if (skip_past_comma (&p) == SUCCESS)
5688 {
5689 if (skip_past_char (&p, '{') == SUCCESS)
5690 {
5691 /* [Rn], {expr} - unindexed, with option */
5692 if (parse_immediate (&p, &inst.operands[i].imm,
5693 0, 255, TRUE) == FAIL)
5694 return PARSE_OPERAND_FAIL;
5695
5696 if (skip_past_char (&p, '}') == FAIL)
5697 {
5698 inst.error = _("'}' expected at end of 'option' field");
5699 return PARSE_OPERAND_FAIL;
5700 }
5701 if (inst.operands[i].preind)
5702 {
5703 inst.error = _("cannot combine index with option");
5704 return PARSE_OPERAND_FAIL;
5705 }
5706 *str = p;
5707 return PARSE_OPERAND_SUCCESS;
5708 }
5709 else
5710 {
5711 inst.operands[i].postind = 1;
5712 inst.operands[i].writeback = 1;
5713
5714 if (inst.operands[i].preind)
5715 {
5716 inst.error = _("cannot combine pre- and post-indexing");
5717 return PARSE_OPERAND_FAIL;
5718 }
5719
5720 if (*p == '+') p++;
5721 else if (*p == '-') p++, inst.operands[i].negative = 1;
5722
5723 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5724 {
5725 /* We might be using the immediate for alignment already. If we
5726 are, OR the register number into the low-order bits. */
5727 if (inst.operands[i].immisalign)
5728 inst.operands[i].imm |= reg;
5729 else
5730 inst.operands[i].imm = reg;
5731 inst.operands[i].immisreg = 1;
5732
5733 if (skip_past_comma (&p) == SUCCESS)
5734 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
5735 return PARSE_OPERAND_FAIL;
5736 }
5737 else
5738 {
5739 char *q = p;
5740
5741 if (inst.operands[i].negative)
5742 {
5743 inst.operands[i].negative = 0;
5744 p--;
5745 }
5746 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5747 return PARSE_OPERAND_FAIL;
5748 /* If the offset is 0, find out if it's a +0 or -0. */
5749 if (inst.reloc.exp.X_op == O_constant
5750 && inst.reloc.exp.X_add_number == 0)
5751 {
5752 skip_whitespace (q);
5753 if (*q == '#')
5754 {
5755 q++;
5756 skip_whitespace (q);
5757 }
5758 if (*q == '-')
5759 inst.operands[i].negative = 1;
5760 }
5761 }
5762 }
5763 }
5764
5765 /* If at this point neither .preind nor .postind is set, we have a
5766 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
5767 if (inst.operands[i].preind == 0 && inst.operands[i].postind == 0)
5768 {
5769 inst.operands[i].preind = 1;
5770 inst.reloc.exp.X_op = O_constant;
5771 inst.reloc.exp.X_add_number = 0;
5772 }
5773 *str = p;
5774 return PARSE_OPERAND_SUCCESS;
5775 }
5776
5777 static int
5778 parse_address (char **str, int i)
5779 {
5780 return parse_address_main (str, i, 0, GROUP_LDR) == PARSE_OPERAND_SUCCESS
5781 ? SUCCESS : FAIL;
5782 }
5783
5784 static parse_operand_result
5785 parse_address_group_reloc (char **str, int i, group_reloc_type type)
5786 {
5787 return parse_address_main (str, i, 1, type);
5788 }
5789
5790 /* Parse an operand for a MOVW or MOVT instruction. */
5791 static int
5792 parse_half (char **str)
5793 {
5794 char * p;
5795
5796 p = *str;
5797 skip_past_char (&p, '#');
5798 if (strncasecmp (p, ":lower16:", 9) == 0)
5799 inst.reloc.type = BFD_RELOC_ARM_MOVW;
5800 else if (strncasecmp (p, ":upper16:", 9) == 0)
5801 inst.reloc.type = BFD_RELOC_ARM_MOVT;
5802
5803 if (inst.reloc.type != BFD_RELOC_UNUSED)
5804 {
5805 p += 9;
5806 skip_whitespace (p);
5807 }
5808
5809 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
5810 return FAIL;
5811
5812 if (inst.reloc.type == BFD_RELOC_UNUSED)
5813 {
5814 if (inst.reloc.exp.X_op != O_constant)
5815 {
5816 inst.error = _("constant expression expected");
5817 return FAIL;
5818 }
5819 if (inst.reloc.exp.X_add_number < 0
5820 || inst.reloc.exp.X_add_number > 0xffff)
5821 {
5822 inst.error = _("immediate value out of range");
5823 return FAIL;
5824 }
5825 }
5826 *str = p;
5827 return SUCCESS;
5828 }
5829
5830 /* Miscellaneous. */
5831
5832 /* Parse a PSR flag operand. The value returned is FAIL on syntax error,
5833 or a bitmask suitable to be or-ed into the ARM msr instruction. */
5834 static int
5835 parse_psr (char **str, bfd_boolean lhs)
5836 {
5837 char *p;
5838 unsigned long psr_field;
5839 const struct asm_psr *psr;
5840 char *start;
5841 bfd_boolean is_apsr = FALSE;
5842 bfd_boolean m_profile = ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m);
5843
5844 /* PR gas/12698: If the user has specified -march=all then m_profile will
5845 be TRUE, but we want to ignore it in this case as we are building for any
5846 CPU type, including non-m variants. */
5847 if (ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any))
5848 m_profile = FALSE;
5849
5850 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
5851 feature for ease of use and backwards compatibility. */
5852 p = *str;
5853 if (strncasecmp (p, "SPSR", 4) == 0)
5854 {
5855 if (m_profile)
5856 goto unsupported_psr;
5857
5858 psr_field = SPSR_BIT;
5859 }
5860 else if (strncasecmp (p, "CPSR", 4) == 0)
5861 {
5862 if (m_profile)
5863 goto unsupported_psr;
5864
5865 psr_field = 0;
5866 }
5867 else if (strncasecmp (p, "APSR", 4) == 0)
5868 {
5869 /* APSR[_<bits>] can be used as a synonym for CPSR[_<flags>] on ARMv7-A
5870 and ARMv7-R architecture CPUs. */
5871 is_apsr = TRUE;
5872 psr_field = 0;
5873 }
5874 else if (m_profile)
5875 {
5876 start = p;
5877 do
5878 p++;
5879 while (ISALNUM (*p) || *p == '_');
5880
5881 if (strncasecmp (start, "iapsr", 5) == 0
5882 || strncasecmp (start, "eapsr", 5) == 0
5883 || strncasecmp (start, "xpsr", 4) == 0
5884 || strncasecmp (start, "psr", 3) == 0)
5885 p = start + strcspn (start, "rR") + 1;
5886
5887 psr = (const struct asm_psr *) hash_find_n (arm_v7m_psr_hsh, start,
5888 p - start);
5889
5890 if (!psr)
5891 return FAIL;
5892
5893 /* If APSR is being written, a bitfield may be specified. Note that
5894 APSR itself is handled above. */
5895 if (psr->field <= 3)
5896 {
5897 psr_field = psr->field;
5898 is_apsr = TRUE;
5899 goto check_suffix;
5900 }
5901
5902 *str = p;
5903 /* M-profile MSR instructions have the mask field set to "10", except
5904 *PSR variants which modify APSR, which may use a different mask (and
5905 have been handled already). Do that by setting the PSR_f field
5906 here. */
5907 return psr->field | (lhs ? PSR_f : 0);
5908 }
5909 else
5910 goto unsupported_psr;
5911
5912 p += 4;
5913 check_suffix:
5914 if (*p == '_')
5915 {
5916 /* A suffix follows. */
5917 p++;
5918 start = p;
5919
5920 do
5921 p++;
5922 while (ISALNUM (*p) || *p == '_');
5923
5924 if (is_apsr)
5925 {
5926 /* APSR uses a notation for bits, rather than fields. */
5927 unsigned int nzcvq_bits = 0;
5928 unsigned int g_bit = 0;
5929 char *bit;
5930
5931 for (bit = start; bit != p; bit++)
5932 {
5933 switch (TOLOWER (*bit))
5934 {
5935 case 'n':
5936 nzcvq_bits |= (nzcvq_bits & 0x01) ? 0x20 : 0x01;
5937 break;
5938
5939 case 'z':
5940 nzcvq_bits |= (nzcvq_bits & 0x02) ? 0x20 : 0x02;
5941 break;
5942
5943 case 'c':
5944 nzcvq_bits |= (nzcvq_bits & 0x04) ? 0x20 : 0x04;
5945 break;
5946
5947 case 'v':
5948 nzcvq_bits |= (nzcvq_bits & 0x08) ? 0x20 : 0x08;
5949 break;
5950
5951 case 'q':
5952 nzcvq_bits |= (nzcvq_bits & 0x10) ? 0x20 : 0x10;
5953 break;
5954
5955 case 'g':
5956 g_bit |= (g_bit & 0x1) ? 0x2 : 0x1;
5957 break;
5958
5959 default:
5960 inst.error = _("unexpected bit specified after APSR");
5961 return FAIL;
5962 }
5963 }
5964
5965 if (nzcvq_bits == 0x1f)
5966 psr_field |= PSR_f;
5967
5968 if (g_bit == 0x1)
5969 {
5970 if (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp))
5971 {
5972 inst.error = _("selected processor does not "
5973 "support DSP extension");
5974 return FAIL;
5975 }
5976
5977 psr_field |= PSR_s;
5978 }
5979
5980 if ((nzcvq_bits & 0x20) != 0
5981 || (nzcvq_bits != 0x1f && nzcvq_bits != 0)
5982 || (g_bit & 0x2) != 0)
5983 {
5984 inst.error = _("bad bitmask specified after APSR");
5985 return FAIL;
5986 }
5987 }
5988 else
5989 {
5990 psr = (const struct asm_psr *) hash_find_n (arm_psr_hsh, start,
5991 p - start);
5992 if (!psr)
5993 goto error;
5994
5995 psr_field |= psr->field;
5996 }
5997 }
5998 else
5999 {
6000 if (ISALNUM (*p))
6001 goto error; /* Garbage after "[CS]PSR". */
6002
6003 /* Unadorned APSR is equivalent to APSR_nzcvq/CPSR_f (for writes). This
6004 is deprecated, but allow it anyway. */
6005 if (is_apsr && lhs)
6006 {
6007 psr_field |= PSR_f;
6008 as_tsktsk (_("writing to APSR without specifying a bitmask is "
6009 "deprecated"));
6010 }
6011 else if (!m_profile)
6012 /* These bits are never right for M-profile devices: don't set them
6013 (only code paths which read/write APSR reach here). */
6014 psr_field |= (PSR_c | PSR_f);
6015 }
6016 *str = p;
6017 return psr_field;
6018
6019 unsupported_psr:
6020 inst.error = _("selected processor does not support requested special "
6021 "purpose register");
6022 return FAIL;
6023
6024 error:
6025 inst.error = _("flag for {c}psr instruction expected");
6026 return FAIL;
6027 }
6028
6029 /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
6030 value suitable for splatting into the AIF field of the instruction. */
6031
6032 static int
6033 parse_cps_flags (char **str)
6034 {
6035 int val = 0;
6036 int saw_a_flag = 0;
6037 char *s = *str;
6038
6039 for (;;)
6040 switch (*s++)
6041 {
6042 case '\0': case ',':
6043 goto done;
6044
6045 case 'a': case 'A': saw_a_flag = 1; val |= 0x4; break;
6046 case 'i': case 'I': saw_a_flag = 1; val |= 0x2; break;
6047 case 'f': case 'F': saw_a_flag = 1; val |= 0x1; break;
6048
6049 default:
6050 inst.error = _("unrecognized CPS flag");
6051 return FAIL;
6052 }
6053
6054 done:
6055 if (saw_a_flag == 0)
6056 {
6057 inst.error = _("missing CPS flags");
6058 return FAIL;
6059 }
6060
6061 *str = s - 1;
6062 return val;
6063 }
6064
6065 /* Parse an endian specifier ("BE" or "LE", case insensitive);
6066 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
6067
6068 static int
6069 parse_endian_specifier (char **str)
6070 {
6071 int little_endian;
6072 char *s = *str;
6073
6074 if (strncasecmp (s, "BE", 2))
6075 little_endian = 0;
6076 else if (strncasecmp (s, "LE", 2))
6077 little_endian = 1;
6078 else
6079 {
6080 inst.error = _("valid endian specifiers are be or le");
6081 return FAIL;
6082 }
6083
6084 if (ISALNUM (s[2]) || s[2] == '_')
6085 {
6086 inst.error = _("valid endian specifiers are be or le");
6087 return FAIL;
6088 }
6089
6090 *str = s + 2;
6091 return little_endian;
6092 }
6093
6094 /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
6095 value suitable for poking into the rotate field of an sxt or sxta
6096 instruction, or FAIL on error. */
6097
6098 static int
6099 parse_ror (char **str)
6100 {
6101 int rot;
6102 char *s = *str;
6103
6104 if (strncasecmp (s, "ROR", 3) == 0)
6105 s += 3;
6106 else
6107 {
6108 inst.error = _("missing rotation field after comma");
6109 return FAIL;
6110 }
6111
6112 if (parse_immediate (&s, &rot, 0, 24, FALSE) == FAIL)
6113 return FAIL;
6114
6115 switch (rot)
6116 {
6117 case 0: *str = s; return 0x0;
6118 case 8: *str = s; return 0x1;
6119 case 16: *str = s; return 0x2;
6120 case 24: *str = s; return 0x3;
6121
6122 default:
6123 inst.error = _("rotation can only be 0, 8, 16, or 24");
6124 return FAIL;
6125 }
6126 }
6127
6128 /* Parse a conditional code (from conds[] below). The value returned is in the
6129 range 0 .. 14, or FAIL. */
6130 static int
6131 parse_cond (char **str)
6132 {
6133 char *q;
6134 const struct asm_cond *c;
6135 int n;
6136 /* Condition codes are always 2 characters, so matching up to
6137 3 characters is sufficient. */
6138 char cond[3];
6139
6140 q = *str;
6141 n = 0;
6142 while (ISALPHA (*q) && n < 3)
6143 {
6144 cond[n] = TOLOWER (*q);
6145 q++;
6146 n++;
6147 }
6148
6149 c = (const struct asm_cond *) hash_find_n (arm_cond_hsh, cond, n);
6150 if (!c)
6151 {
6152 inst.error = _("condition required");
6153 return FAIL;
6154 }
6155
6156 *str = q;
6157 return c->value;
6158 }
6159
6160 /* Record a use of the given feature. */
6161 static void
6162 record_feature_use (const arm_feature_set *feature)
6163 {
6164 if (thumb_mode)
6165 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, *feature);
6166 else
6167 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, *feature);
6168 }
6169
6170 /* If the given feature is currently allowed, mark it as used and return TRUE.
6171 Return FALSE otherwise. */
6172 static bfd_boolean
6173 mark_feature_used (const arm_feature_set *feature)
6174 {
6175 /* Ensure the option is currently allowed. */
6176 if (!ARM_CPU_HAS_FEATURE (cpu_variant, *feature))
6177 return FALSE;
6178
6179 /* Add the appropriate architecture feature for the barrier option used. */
6180 record_feature_use (feature);
6181
6182 return TRUE;
6183 }
6184
6185 /* Parse an option for a barrier instruction. Returns the encoding for the
6186 option, or FAIL. */
6187 static int
6188 parse_barrier (char **str)
6189 {
6190 char *p, *q;
6191 const struct asm_barrier_opt *o;
6192
6193 p = q = *str;
6194 while (ISALPHA (*q))
6195 q++;
6196
6197 o = (const struct asm_barrier_opt *) hash_find_n (arm_barrier_opt_hsh, p,
6198 q - p);
6199 if (!o)
6200 return FAIL;
6201
6202 if (!mark_feature_used (&o->arch))
6203 return FAIL;
6204
6205 *str = q;
6206 return o->value;
6207 }
6208
6209 /* Parse the operands of a table branch instruction. Similar to a memory
6210 operand. */
6211 static int
6212 parse_tb (char **str)
6213 {
6214 char * p = *str;
6215 int reg;
6216
6217 if (skip_past_char (&p, '[') == FAIL)
6218 {
6219 inst.error = _("'[' expected");
6220 return FAIL;
6221 }
6222
6223 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
6224 {
6225 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
6226 return FAIL;
6227 }
6228 inst.operands[0].reg = reg;
6229
6230 if (skip_past_comma (&p) == FAIL)
6231 {
6232 inst.error = _("',' expected");
6233 return FAIL;
6234 }
6235
6236 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
6237 {
6238 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
6239 return FAIL;
6240 }
6241 inst.operands[0].imm = reg;
6242
6243 if (skip_past_comma (&p) == SUCCESS)
6244 {
6245 if (parse_shift (&p, 0, SHIFT_LSL_IMMEDIATE) == FAIL)
6246 return FAIL;
6247 if (inst.reloc.exp.X_add_number != 1)
6248 {
6249 inst.error = _("invalid shift");
6250 return FAIL;
6251 }
6252 inst.operands[0].shifted = 1;
6253 }
6254
6255 if (skip_past_char (&p, ']') == FAIL)
6256 {
6257 inst.error = _("']' expected");
6258 return FAIL;
6259 }
6260 *str = p;
6261 return SUCCESS;
6262 }
6263
6264 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
6265 information on the types the operands can take and how they are encoded.
6266 Up to four operands may be read; this function handles setting the
6267 ".present" field for each read operand itself.
6268 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
6269 else returns FAIL. */
6270
6271 static int
6272 parse_neon_mov (char **str, int *which_operand)
6273 {
6274 int i = *which_operand, val;
6275 enum arm_reg_type rtype;
6276 char *ptr = *str;
6277 struct neon_type_el optype;
6278
6279 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
6280 {
6281 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
6282 inst.operands[i].reg = val;
6283 inst.operands[i].isscalar = 1;
6284 inst.operands[i].vectype = optype;
6285 inst.operands[i++].present = 1;
6286
6287 if (skip_past_comma (&ptr) == FAIL)
6288 goto wanted_comma;
6289
6290 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6291 goto wanted_arm;
6292
6293 inst.operands[i].reg = val;
6294 inst.operands[i].isreg = 1;
6295 inst.operands[i].present = 1;
6296 }
6297 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype, &optype))
6298 != FAIL)
6299 {
6300 /* Cases 0, 1, 2, 3, 5 (D only). */
6301 if (skip_past_comma (&ptr) == FAIL)
6302 goto wanted_comma;
6303
6304 inst.operands[i].reg = val;
6305 inst.operands[i].isreg = 1;
6306 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
6307 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6308 inst.operands[i].isvec = 1;
6309 inst.operands[i].vectype = optype;
6310 inst.operands[i++].present = 1;
6311
6312 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6313 {
6314 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
6315 Case 13: VMOV <Sd>, <Rm> */
6316 inst.operands[i].reg = val;
6317 inst.operands[i].isreg = 1;
6318 inst.operands[i].present = 1;
6319
6320 if (rtype == REG_TYPE_NQ)
6321 {
6322 first_error (_("can't use Neon quad register here"));
6323 return FAIL;
6324 }
6325 else if (rtype != REG_TYPE_VFS)
6326 {
6327 i++;
6328 if (skip_past_comma (&ptr) == FAIL)
6329 goto wanted_comma;
6330 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6331 goto wanted_arm;
6332 inst.operands[i].reg = val;
6333 inst.operands[i].isreg = 1;
6334 inst.operands[i].present = 1;
6335 }
6336 }
6337 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype,
6338 &optype)) != FAIL)
6339 {
6340 /* Case 0: VMOV<c><q> <Qd>, <Qm>
6341 Case 1: VMOV<c><q> <Dd>, <Dm>
6342 Case 8: VMOV.F32 <Sd>, <Sm>
6343 Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm> */
6344
6345 inst.operands[i].reg = val;
6346 inst.operands[i].isreg = 1;
6347 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
6348 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6349 inst.operands[i].isvec = 1;
6350 inst.operands[i].vectype = optype;
6351 inst.operands[i].present = 1;
6352
6353 if (skip_past_comma (&ptr) == SUCCESS)
6354 {
6355 /* Case 15. */
6356 i++;
6357
6358 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6359 goto wanted_arm;
6360
6361 inst.operands[i].reg = val;
6362 inst.operands[i].isreg = 1;
6363 inst.operands[i++].present = 1;
6364
6365 if (skip_past_comma (&ptr) == FAIL)
6366 goto wanted_comma;
6367
6368 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6369 goto wanted_arm;
6370
6371 inst.operands[i].reg = val;
6372 inst.operands[i].isreg = 1;
6373 inst.operands[i].present = 1;
6374 }
6375 }
6376 else if (parse_qfloat_immediate (&ptr, &inst.operands[i].imm) == SUCCESS)
6377 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
6378 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
6379 Case 10: VMOV.F32 <Sd>, #<imm>
6380 Case 11: VMOV.F64 <Dd>, #<imm> */
6381 inst.operands[i].immisfloat = 1;
6382 else if (parse_big_immediate (&ptr, i, NULL, /*allow_symbol_p=*/FALSE)
6383 == SUCCESS)
6384 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
6385 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
6386 ;
6387 else
6388 {
6389 first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
6390 return FAIL;
6391 }
6392 }
6393 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6394 {
6395 /* Cases 6, 7. */
6396 inst.operands[i].reg = val;
6397 inst.operands[i].isreg = 1;
6398 inst.operands[i++].present = 1;
6399
6400 if (skip_past_comma (&ptr) == FAIL)
6401 goto wanted_comma;
6402
6403 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
6404 {
6405 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
6406 inst.operands[i].reg = val;
6407 inst.operands[i].isscalar = 1;
6408 inst.operands[i].present = 1;
6409 inst.operands[i].vectype = optype;
6410 }
6411 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6412 {
6413 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
6414 inst.operands[i].reg = val;
6415 inst.operands[i].isreg = 1;
6416 inst.operands[i++].present = 1;
6417
6418 if (skip_past_comma (&ptr) == FAIL)
6419 goto wanted_comma;
6420
6421 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFSD, &rtype, &optype))
6422 == FAIL)
6423 {
6424 first_error (_(reg_expected_msgs[REG_TYPE_VFSD]));
6425 return FAIL;
6426 }
6427
6428 inst.operands[i].reg = val;
6429 inst.operands[i].isreg = 1;
6430 inst.operands[i].isvec = 1;
6431 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6432 inst.operands[i].vectype = optype;
6433 inst.operands[i].present = 1;
6434
6435 if (rtype == REG_TYPE_VFS)
6436 {
6437 /* Case 14. */
6438 i++;
6439 if (skip_past_comma (&ptr) == FAIL)
6440 goto wanted_comma;
6441 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL,
6442 &optype)) == FAIL)
6443 {
6444 first_error (_(reg_expected_msgs[REG_TYPE_VFS]));
6445 return FAIL;
6446 }
6447 inst.operands[i].reg = val;
6448 inst.operands[i].isreg = 1;
6449 inst.operands[i].isvec = 1;
6450 inst.operands[i].issingle = 1;
6451 inst.operands[i].vectype = optype;
6452 inst.operands[i].present = 1;
6453 }
6454 }
6455 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL, &optype))
6456 != FAIL)
6457 {
6458 /* Case 13. */
6459 inst.operands[i].reg = val;
6460 inst.operands[i].isreg = 1;
6461 inst.operands[i].isvec = 1;
6462 inst.operands[i].issingle = 1;
6463 inst.operands[i].vectype = optype;
6464 inst.operands[i].present = 1;
6465 }
6466 }
6467 else
6468 {
6469 first_error (_("parse error"));
6470 return FAIL;
6471 }
6472
6473 /* Successfully parsed the operands. Update args. */
6474 *which_operand = i;
6475 *str = ptr;
6476 return SUCCESS;
6477
6478 wanted_comma:
6479 first_error (_("expected comma"));
6480 return FAIL;
6481
6482 wanted_arm:
6483 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
6484 return FAIL;
6485 }
6486
6487 /* Use this macro when the operand constraints are different
6488 for ARM and THUMB (e.g. ldrd). */
6489 #define MIX_ARM_THUMB_OPERANDS(arm_operand, thumb_operand) \
6490 ((arm_operand) | ((thumb_operand) << 16))
6491
6492 /* Matcher codes for parse_operands. */
6493 enum operand_parse_code
6494 {
6495 OP_stop, /* end of line */
6496
6497 OP_RR, /* ARM register */
6498 OP_RRnpc, /* ARM register, not r15 */
6499 OP_RRnpcsp, /* ARM register, neither r15 nor r13 (a.k.a. 'BadReg') */
6500 OP_RRnpcb, /* ARM register, not r15, in square brackets */
6501 OP_RRnpctw, /* ARM register, not r15 in Thumb-state or with writeback,
6502 optional trailing ! */
6503 OP_RRw, /* ARM register, not r15, optional trailing ! */
6504 OP_RCP, /* Coprocessor number */
6505 OP_RCN, /* Coprocessor register */
6506 OP_RF, /* FPA register */
6507 OP_RVS, /* VFP single precision register */
6508 OP_RVD, /* VFP double precision register (0..15) */
6509 OP_RND, /* Neon double precision register (0..31) */
6510 OP_RNQ, /* Neon quad precision register */
6511 OP_RVSD, /* VFP single or double precision register */
6512 OP_RNSD, /* Neon single or double precision register */
6513 OP_RNDQ, /* Neon double or quad precision register */
6514 OP_RNSDQ, /* Neon single, double or quad precision register */
6515 OP_RNSC, /* Neon scalar D[X] */
6516 OP_RVC, /* VFP control register */
6517 OP_RMF, /* Maverick F register */
6518 OP_RMD, /* Maverick D register */
6519 OP_RMFX, /* Maverick FX register */
6520 OP_RMDX, /* Maverick DX register */
6521 OP_RMAX, /* Maverick AX register */
6522 OP_RMDS, /* Maverick DSPSC register */
6523 OP_RIWR, /* iWMMXt wR register */
6524 OP_RIWC, /* iWMMXt wC register */
6525 OP_RIWG, /* iWMMXt wCG register */
6526 OP_RXA, /* XScale accumulator register */
6527
6528 OP_REGLST, /* ARM register list */
6529 OP_VRSLST, /* VFP single-precision register list */
6530 OP_VRDLST, /* VFP double-precision register list */
6531 OP_VRSDLST, /* VFP single or double-precision register list (& quad) */
6532 OP_NRDLST, /* Neon double-precision register list (d0-d31, qN aliases) */
6533 OP_NSTRLST, /* Neon element/structure list */
6534
6535 OP_RNDQ_I0, /* Neon D or Q reg, or immediate zero. */
6536 OP_RVSD_I0, /* VFP S or D reg, or immediate zero. */
6537 OP_RSVD_FI0, /* VFP S or D reg, or floating point immediate zero. */
6538 OP_RR_RNSC, /* ARM reg or Neon scalar. */
6539 OP_RNSD_RNSC, /* Neon S or D reg, or Neon scalar. */
6540 OP_RNSDQ_RNSC, /* Vector S, D or Q reg, or Neon scalar. */
6541 OP_RNDQ_RNSC, /* Neon D or Q reg, or Neon scalar. */
6542 OP_RND_RNSC, /* Neon D reg, or Neon scalar. */
6543 OP_VMOV, /* Neon VMOV operands. */
6544 OP_RNDQ_Ibig, /* Neon D or Q reg, or big immediate for logic and VMVN. */
6545 OP_RNDQ_I63b, /* Neon D or Q reg, or immediate for shift. */
6546 OP_RIWR_I32z, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2. */
6547
6548 OP_I0, /* immediate zero */
6549 OP_I7, /* immediate value 0 .. 7 */
6550 OP_I15, /* 0 .. 15 */
6551 OP_I16, /* 1 .. 16 */
6552 OP_I16z, /* 0 .. 16 */
6553 OP_I31, /* 0 .. 31 */
6554 OP_I31w, /* 0 .. 31, optional trailing ! */
6555 OP_I32, /* 1 .. 32 */
6556 OP_I32z, /* 0 .. 32 */
6557 OP_I63, /* 0 .. 63 */
6558 OP_I63s, /* -64 .. 63 */
6559 OP_I64, /* 1 .. 64 */
6560 OP_I64z, /* 0 .. 64 */
6561 OP_I255, /* 0 .. 255 */
6562
6563 OP_I4b, /* immediate, prefix optional, 1 .. 4 */
6564 OP_I7b, /* 0 .. 7 */
6565 OP_I15b, /* 0 .. 15 */
6566 OP_I31b, /* 0 .. 31 */
6567
6568 OP_SH, /* shifter operand */
6569 OP_SHG, /* shifter operand with possible group relocation */
6570 OP_ADDR, /* Memory address expression (any mode) */
6571 OP_ADDRGLDR, /* Mem addr expr (any mode) with possible LDR group reloc */
6572 OP_ADDRGLDRS, /* Mem addr expr (any mode) with possible LDRS group reloc */
6573 OP_ADDRGLDC, /* Mem addr expr (any mode) with possible LDC group reloc */
6574 OP_EXP, /* arbitrary expression */
6575 OP_EXPi, /* same, with optional immediate prefix */
6576 OP_EXPr, /* same, with optional relocation suffix */
6577 OP_HALF, /* 0 .. 65535 or low/high reloc. */
6578 OP_IROT1, /* VCADD rotate immediate: 90, 270. */
6579 OP_IROT2, /* VCMLA rotate immediate: 0, 90, 180, 270. */
6580
6581 OP_CPSF, /* CPS flags */
6582 OP_ENDI, /* Endianness specifier */
6583 OP_wPSR, /* CPSR/SPSR/APSR mask for msr (writing). */
6584 OP_rPSR, /* CPSR/SPSR/APSR mask for msr (reading). */
6585 OP_COND, /* conditional code */
6586 OP_TB, /* Table branch. */
6587
6588 OP_APSR_RR, /* ARM register or "APSR_nzcv". */
6589
6590 OP_RRnpc_I0, /* ARM register or literal 0 */
6591 OP_RR_EXr, /* ARM register or expression with opt. reloc stuff. */
6592 OP_RR_EXi, /* ARM register or expression with imm prefix */
6593 OP_RF_IF, /* FPA register or immediate */
6594 OP_RIWR_RIWC, /* iWMMXt R or C reg */
6595 OP_RIWC_RIWG, /* iWMMXt wC or wCG reg */
6596
6597 /* Optional operands. */
6598 OP_oI7b, /* immediate, prefix optional, 0 .. 7 */
6599 OP_oI31b, /* 0 .. 31 */
6600 OP_oI32b, /* 1 .. 32 */
6601 OP_oI32z, /* 0 .. 32 */
6602 OP_oIffffb, /* 0 .. 65535 */
6603 OP_oI255c, /* curly-brace enclosed, 0 .. 255 */
6604
6605 OP_oRR, /* ARM register */
6606 OP_oRRnpc, /* ARM register, not the PC */
6607 OP_oRRnpcsp, /* ARM register, neither the PC nor the SP (a.k.a. BadReg) */
6608 OP_oRRw, /* ARM register, not r15, optional trailing ! */
6609 OP_oRND, /* Optional Neon double precision register */
6610 OP_oRNQ, /* Optional Neon quad precision register */
6611 OP_oRNDQ, /* Optional Neon double or quad precision register */
6612 OP_oRNSDQ, /* Optional single, double or quad precision vector register */
6613 OP_oSHll, /* LSL immediate */
6614 OP_oSHar, /* ASR immediate */
6615 OP_oSHllar, /* LSL or ASR immediate */
6616 OP_oROR, /* ROR 0/8/16/24 */
6617 OP_oBARRIER_I15, /* Option argument for a barrier instruction. */
6618
6619 /* Some pre-defined mixed (ARM/THUMB) operands. */
6620 OP_RR_npcsp = MIX_ARM_THUMB_OPERANDS (OP_RR, OP_RRnpcsp),
6621 OP_RRnpc_npcsp = MIX_ARM_THUMB_OPERANDS (OP_RRnpc, OP_RRnpcsp),
6622 OP_oRRnpc_npcsp = MIX_ARM_THUMB_OPERANDS (OP_oRRnpc, OP_oRRnpcsp),
6623
6624 OP_FIRST_OPTIONAL = OP_oI7b
6625 };
6626
6627 /* Generic instruction operand parser. This does no encoding and no
6628 semantic validation; it merely squirrels values away in the inst
6629 structure. Returns SUCCESS or FAIL depending on whether the
6630 specified grammar matched. */
6631 static int
6632 parse_operands (char *str, const unsigned int *pattern, bfd_boolean thumb)
6633 {
6634 unsigned const int *upat = pattern;
6635 char *backtrack_pos = 0;
6636 const char *backtrack_error = 0;
6637 int i, val = 0, backtrack_index = 0;
6638 enum arm_reg_type rtype;
6639 parse_operand_result result;
6640 unsigned int op_parse_code;
6641
6642 #define po_char_or_fail(chr) \
6643 do \
6644 { \
6645 if (skip_past_char (&str, chr) == FAIL) \
6646 goto bad_args; \
6647 } \
6648 while (0)
6649
6650 #define po_reg_or_fail(regtype) \
6651 do \
6652 { \
6653 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6654 & inst.operands[i].vectype); \
6655 if (val == FAIL) \
6656 { \
6657 first_error (_(reg_expected_msgs[regtype])); \
6658 goto failure; \
6659 } \
6660 inst.operands[i].reg = val; \
6661 inst.operands[i].isreg = 1; \
6662 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6663 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6664 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6665 || rtype == REG_TYPE_VFD \
6666 || rtype == REG_TYPE_NQ); \
6667 } \
6668 while (0)
6669
6670 #define po_reg_or_goto(regtype, label) \
6671 do \
6672 { \
6673 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6674 & inst.operands[i].vectype); \
6675 if (val == FAIL) \
6676 goto label; \
6677 \
6678 inst.operands[i].reg = val; \
6679 inst.operands[i].isreg = 1; \
6680 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6681 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6682 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6683 || rtype == REG_TYPE_VFD \
6684 || rtype == REG_TYPE_NQ); \
6685 } \
6686 while (0)
6687
6688 #define po_imm_or_fail(min, max, popt) \
6689 do \
6690 { \
6691 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
6692 goto failure; \
6693 inst.operands[i].imm = val; \
6694 } \
6695 while (0)
6696
6697 #define po_scalar_or_goto(elsz, label) \
6698 do \
6699 { \
6700 val = parse_scalar (& str, elsz, & inst.operands[i].vectype); \
6701 if (val == FAIL) \
6702 goto label; \
6703 inst.operands[i].reg = val; \
6704 inst.operands[i].isscalar = 1; \
6705 } \
6706 while (0)
6707
6708 #define po_misc_or_fail(expr) \
6709 do \
6710 { \
6711 if (expr) \
6712 goto failure; \
6713 } \
6714 while (0)
6715
6716 #define po_misc_or_fail_no_backtrack(expr) \
6717 do \
6718 { \
6719 result = expr; \
6720 if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK) \
6721 backtrack_pos = 0; \
6722 if (result != PARSE_OPERAND_SUCCESS) \
6723 goto failure; \
6724 } \
6725 while (0)
6726
6727 #define po_barrier_or_imm(str) \
6728 do \
6729 { \
6730 val = parse_barrier (&str); \
6731 if (val == FAIL && ! ISALPHA (*str)) \
6732 goto immediate; \
6733 if (val == FAIL \
6734 /* ISB can only take SY as an option. */ \
6735 || ((inst.instruction & 0xf0) == 0x60 \
6736 && val != 0xf)) \
6737 { \
6738 inst.error = _("invalid barrier type"); \
6739 backtrack_pos = 0; \
6740 goto failure; \
6741 } \
6742 } \
6743 while (0)
6744
6745 skip_whitespace (str);
6746
6747 for (i = 0; upat[i] != OP_stop; i++)
6748 {
6749 op_parse_code = upat[i];
6750 if (op_parse_code >= 1<<16)
6751 op_parse_code = thumb ? (op_parse_code >> 16)
6752 : (op_parse_code & ((1<<16)-1));
6753
6754 if (op_parse_code >= OP_FIRST_OPTIONAL)
6755 {
6756 /* Remember where we are in case we need to backtrack. */
6757 gas_assert (!backtrack_pos);
6758 backtrack_pos = str;
6759 backtrack_error = inst.error;
6760 backtrack_index = i;
6761 }
6762
6763 if (i > 0 && (i > 1 || inst.operands[0].present))
6764 po_char_or_fail (',');
6765
6766 switch (op_parse_code)
6767 {
6768 /* Registers */
6769 case OP_oRRnpc:
6770 case OP_oRRnpcsp:
6771 case OP_RRnpc:
6772 case OP_RRnpcsp:
6773 case OP_oRR:
6774 case OP_RR: po_reg_or_fail (REG_TYPE_RN); break;
6775 case OP_RCP: po_reg_or_fail (REG_TYPE_CP); break;
6776 case OP_RCN: po_reg_or_fail (REG_TYPE_CN); break;
6777 case OP_RF: po_reg_or_fail (REG_TYPE_FN); break;
6778 case OP_RVS: po_reg_or_fail (REG_TYPE_VFS); break;
6779 case OP_RVD: po_reg_or_fail (REG_TYPE_VFD); break;
6780 case OP_oRND:
6781 case OP_RND: po_reg_or_fail (REG_TYPE_VFD); break;
6782 case OP_RVC:
6783 po_reg_or_goto (REG_TYPE_VFC, coproc_reg);
6784 break;
6785 /* Also accept generic coprocessor regs for unknown registers. */
6786 coproc_reg:
6787 po_reg_or_fail (REG_TYPE_CN);
6788 break;
6789 case OP_RMF: po_reg_or_fail (REG_TYPE_MVF); break;
6790 case OP_RMD: po_reg_or_fail (REG_TYPE_MVD); break;
6791 case OP_RMFX: po_reg_or_fail (REG_TYPE_MVFX); break;
6792 case OP_RMDX: po_reg_or_fail (REG_TYPE_MVDX); break;
6793 case OP_RMAX: po_reg_or_fail (REG_TYPE_MVAX); break;
6794 case OP_RMDS: po_reg_or_fail (REG_TYPE_DSPSC); break;
6795 case OP_RIWR: po_reg_or_fail (REG_TYPE_MMXWR); break;
6796 case OP_RIWC: po_reg_or_fail (REG_TYPE_MMXWC); break;
6797 case OP_RIWG: po_reg_or_fail (REG_TYPE_MMXWCG); break;
6798 case OP_RXA: po_reg_or_fail (REG_TYPE_XSCALE); break;
6799 case OP_oRNQ:
6800 case OP_RNQ: po_reg_or_fail (REG_TYPE_NQ); break;
6801 case OP_RNSD: po_reg_or_fail (REG_TYPE_NSD); break;
6802 case OP_oRNDQ:
6803 case OP_RNDQ: po_reg_or_fail (REG_TYPE_NDQ); break;
6804 case OP_RVSD: po_reg_or_fail (REG_TYPE_VFSD); break;
6805 case OP_oRNSDQ:
6806 case OP_RNSDQ: po_reg_or_fail (REG_TYPE_NSDQ); break;
6807
6808 /* Neon scalar. Using an element size of 8 means that some invalid
6809 scalars are accepted here, so deal with those in later code. */
6810 case OP_RNSC: po_scalar_or_goto (8, failure); break;
6811
6812 case OP_RNDQ_I0:
6813 {
6814 po_reg_or_goto (REG_TYPE_NDQ, try_imm0);
6815 break;
6816 try_imm0:
6817 po_imm_or_fail (0, 0, TRUE);
6818 }
6819 break;
6820
6821 case OP_RVSD_I0:
6822 po_reg_or_goto (REG_TYPE_VFSD, try_imm0);
6823 break;
6824
6825 case OP_RSVD_FI0:
6826 {
6827 po_reg_or_goto (REG_TYPE_VFSD, try_ifimm0);
6828 break;
6829 try_ifimm0:
6830 if (parse_ifimm_zero (&str))
6831 inst.operands[i].imm = 0;
6832 else
6833 {
6834 inst.error
6835 = _("only floating point zero is allowed as immediate value");
6836 goto failure;
6837 }
6838 }
6839 break;
6840
6841 case OP_RR_RNSC:
6842 {
6843 po_scalar_or_goto (8, try_rr);
6844 break;
6845 try_rr:
6846 po_reg_or_fail (REG_TYPE_RN);
6847 }
6848 break;
6849
6850 case OP_RNSDQ_RNSC:
6851 {
6852 po_scalar_or_goto (8, try_nsdq);
6853 break;
6854 try_nsdq:
6855 po_reg_or_fail (REG_TYPE_NSDQ);
6856 }
6857 break;
6858
6859 case OP_RNSD_RNSC:
6860 {
6861 po_scalar_or_goto (8, try_s_scalar);
6862 break;
6863 try_s_scalar:
6864 po_scalar_or_goto (4, try_nsd);
6865 break;
6866 try_nsd:
6867 po_reg_or_fail (REG_TYPE_NSD);
6868 }
6869 break;
6870
6871 case OP_RNDQ_RNSC:
6872 {
6873 po_scalar_or_goto (8, try_ndq);
6874 break;
6875 try_ndq:
6876 po_reg_or_fail (REG_TYPE_NDQ);
6877 }
6878 break;
6879
6880 case OP_RND_RNSC:
6881 {
6882 po_scalar_or_goto (8, try_vfd);
6883 break;
6884 try_vfd:
6885 po_reg_or_fail (REG_TYPE_VFD);
6886 }
6887 break;
6888
6889 case OP_VMOV:
6890 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
6891 not careful then bad things might happen. */
6892 po_misc_or_fail (parse_neon_mov (&str, &i) == FAIL);
6893 break;
6894
6895 case OP_RNDQ_Ibig:
6896 {
6897 po_reg_or_goto (REG_TYPE_NDQ, try_immbig);
6898 break;
6899 try_immbig:
6900 /* There's a possibility of getting a 64-bit immediate here, so
6901 we need special handling. */
6902 if (parse_big_immediate (&str, i, NULL, /*allow_symbol_p=*/FALSE)
6903 == FAIL)
6904 {
6905 inst.error = _("immediate value is out of range");
6906 goto failure;
6907 }
6908 }
6909 break;
6910
6911 case OP_RNDQ_I63b:
6912 {
6913 po_reg_or_goto (REG_TYPE_NDQ, try_shimm);
6914 break;
6915 try_shimm:
6916 po_imm_or_fail (0, 63, TRUE);
6917 }
6918 break;
6919
6920 case OP_RRnpcb:
6921 po_char_or_fail ('[');
6922 po_reg_or_fail (REG_TYPE_RN);
6923 po_char_or_fail (']');
6924 break;
6925
6926 case OP_RRnpctw:
6927 case OP_RRw:
6928 case OP_oRRw:
6929 po_reg_or_fail (REG_TYPE_RN);
6930 if (skip_past_char (&str, '!') == SUCCESS)
6931 inst.operands[i].writeback = 1;
6932 break;
6933
6934 /* Immediates */
6935 case OP_I7: po_imm_or_fail ( 0, 7, FALSE); break;
6936 case OP_I15: po_imm_or_fail ( 0, 15, FALSE); break;
6937 case OP_I16: po_imm_or_fail ( 1, 16, FALSE); break;
6938 case OP_I16z: po_imm_or_fail ( 0, 16, FALSE); break;
6939 case OP_I31: po_imm_or_fail ( 0, 31, FALSE); break;
6940 case OP_I32: po_imm_or_fail ( 1, 32, FALSE); break;
6941 case OP_I32z: po_imm_or_fail ( 0, 32, FALSE); break;
6942 case OP_I63s: po_imm_or_fail (-64, 63, FALSE); break;
6943 case OP_I63: po_imm_or_fail ( 0, 63, FALSE); break;
6944 case OP_I64: po_imm_or_fail ( 1, 64, FALSE); break;
6945 case OP_I64z: po_imm_or_fail ( 0, 64, FALSE); break;
6946 case OP_I255: po_imm_or_fail ( 0, 255, FALSE); break;
6947
6948 case OP_I4b: po_imm_or_fail ( 1, 4, TRUE); break;
6949 case OP_oI7b:
6950 case OP_I7b: po_imm_or_fail ( 0, 7, TRUE); break;
6951 case OP_I15b: po_imm_or_fail ( 0, 15, TRUE); break;
6952 case OP_oI31b:
6953 case OP_I31b: po_imm_or_fail ( 0, 31, TRUE); break;
6954 case OP_oI32b: po_imm_or_fail ( 1, 32, TRUE); break;
6955 case OP_oI32z: po_imm_or_fail ( 0, 32, TRUE); break;
6956 case OP_oIffffb: po_imm_or_fail ( 0, 0xffff, TRUE); break;
6957
6958 /* Immediate variants */
6959 case OP_oI255c:
6960 po_char_or_fail ('{');
6961 po_imm_or_fail (0, 255, TRUE);
6962 po_char_or_fail ('}');
6963 break;
6964
6965 case OP_I31w:
6966 /* The expression parser chokes on a trailing !, so we have
6967 to find it first and zap it. */
6968 {
6969 char *s = str;
6970 while (*s && *s != ',')
6971 s++;
6972 if (s[-1] == '!')
6973 {
6974 s[-1] = '\0';
6975 inst.operands[i].writeback = 1;
6976 }
6977 po_imm_or_fail (0, 31, TRUE);
6978 if (str == s - 1)
6979 str = s;
6980 }
6981 break;
6982
6983 /* Expressions */
6984 case OP_EXPi: EXPi:
6985 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6986 GE_OPT_PREFIX));
6987 break;
6988
6989 case OP_EXP:
6990 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6991 GE_NO_PREFIX));
6992 break;
6993
6994 case OP_EXPr: EXPr:
6995 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6996 GE_NO_PREFIX));
6997 if (inst.reloc.exp.X_op == O_symbol)
6998 {
6999 val = parse_reloc (&str);
7000 if (val == -1)
7001 {
7002 inst.error = _("unrecognized relocation suffix");
7003 goto failure;
7004 }
7005 else if (val != BFD_RELOC_UNUSED)
7006 {
7007 inst.operands[i].imm = val;
7008 inst.operands[i].hasreloc = 1;
7009 }
7010 }
7011 break;
7012
7013 /* Operand for MOVW or MOVT. */
7014 case OP_HALF:
7015 po_misc_or_fail (parse_half (&str));
7016 break;
7017
7018 /* Register or expression. */
7019 case OP_RR_EXr: po_reg_or_goto (REG_TYPE_RN, EXPr); break;
7020 case OP_RR_EXi: po_reg_or_goto (REG_TYPE_RN, EXPi); break;
7021
7022 /* Register or immediate. */
7023 case OP_RRnpc_I0: po_reg_or_goto (REG_TYPE_RN, I0); break;
7024 I0: po_imm_or_fail (0, 0, FALSE); break;
7025
7026 case OP_RF_IF: po_reg_or_goto (REG_TYPE_FN, IF); break;
7027 IF:
7028 if (!is_immediate_prefix (*str))
7029 goto bad_args;
7030 str++;
7031 val = parse_fpa_immediate (&str);
7032 if (val == FAIL)
7033 goto failure;
7034 /* FPA immediates are encoded as registers 8-15.
7035 parse_fpa_immediate has already applied the offset. */
7036 inst.operands[i].reg = val;
7037 inst.operands[i].isreg = 1;
7038 break;
7039
7040 case OP_RIWR_I32z: po_reg_or_goto (REG_TYPE_MMXWR, I32z); break;
7041 I32z: po_imm_or_fail (0, 32, FALSE); break;
7042
7043 /* Two kinds of register. */
7044 case OP_RIWR_RIWC:
7045 {
7046 struct reg_entry *rege = arm_reg_parse_multi (&str);
7047 if (!rege
7048 || (rege->type != REG_TYPE_MMXWR
7049 && rege->type != REG_TYPE_MMXWC
7050 && rege->type != REG_TYPE_MMXWCG))
7051 {
7052 inst.error = _("iWMMXt data or control register expected");
7053 goto failure;
7054 }
7055 inst.operands[i].reg = rege->number;
7056 inst.operands[i].isreg = (rege->type == REG_TYPE_MMXWR);
7057 }
7058 break;
7059
7060 case OP_RIWC_RIWG:
7061 {
7062 struct reg_entry *rege = arm_reg_parse_multi (&str);
7063 if (!rege
7064 || (rege->type != REG_TYPE_MMXWC
7065 && rege->type != REG_TYPE_MMXWCG))
7066 {
7067 inst.error = _("iWMMXt control register expected");
7068 goto failure;
7069 }
7070 inst.operands[i].reg = rege->number;
7071 inst.operands[i].isreg = 1;
7072 }
7073 break;
7074
7075 /* Misc */
7076 case OP_CPSF: val = parse_cps_flags (&str); break;
7077 case OP_ENDI: val = parse_endian_specifier (&str); break;
7078 case OP_oROR: val = parse_ror (&str); break;
7079 case OP_COND: val = parse_cond (&str); break;
7080 case OP_oBARRIER_I15:
7081 po_barrier_or_imm (str); break;
7082 immediate:
7083 if (parse_immediate (&str, &val, 0, 15, TRUE) == FAIL)
7084 goto failure;
7085 break;
7086
7087 case OP_wPSR:
7088 case OP_rPSR:
7089 po_reg_or_goto (REG_TYPE_RNB, try_psr);
7090 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_virt))
7091 {
7092 inst.error = _("Banked registers are not available with this "
7093 "architecture.");
7094 goto failure;
7095 }
7096 break;
7097 try_psr:
7098 val = parse_psr (&str, op_parse_code == OP_wPSR);
7099 break;
7100
7101 case OP_APSR_RR:
7102 po_reg_or_goto (REG_TYPE_RN, try_apsr);
7103 break;
7104 try_apsr:
7105 /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
7106 instruction). */
7107 if (strncasecmp (str, "APSR_", 5) == 0)
7108 {
7109 unsigned found = 0;
7110 str += 5;
7111 while (found < 15)
7112 switch (*str++)
7113 {
7114 case 'c': found = (found & 1) ? 16 : found | 1; break;
7115 case 'n': found = (found & 2) ? 16 : found | 2; break;
7116 case 'z': found = (found & 4) ? 16 : found | 4; break;
7117 case 'v': found = (found & 8) ? 16 : found | 8; break;
7118 default: found = 16;
7119 }
7120 if (found != 15)
7121 goto failure;
7122 inst.operands[i].isvec = 1;
7123 /* APSR_nzcv is encoded in instructions as if it were the REG_PC. */
7124 inst.operands[i].reg = REG_PC;
7125 }
7126 else
7127 goto failure;
7128 break;
7129
7130 case OP_TB:
7131 po_misc_or_fail (parse_tb (&str));
7132 break;
7133
7134 /* Register lists. */
7135 case OP_REGLST:
7136 val = parse_reg_list (&str);
7137 if (*str == '^')
7138 {
7139 inst.operands[i].writeback = 1;
7140 str++;
7141 }
7142 break;
7143
7144 case OP_VRSLST:
7145 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_S);
7146 break;
7147
7148 case OP_VRDLST:
7149 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_D);
7150 break;
7151
7152 case OP_VRSDLST:
7153 /* Allow Q registers too. */
7154 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7155 REGLIST_NEON_D);
7156 if (val == FAIL)
7157 {
7158 inst.error = NULL;
7159 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7160 REGLIST_VFP_S);
7161 inst.operands[i].issingle = 1;
7162 }
7163 break;
7164
7165 case OP_NRDLST:
7166 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7167 REGLIST_NEON_D);
7168 break;
7169
7170 case OP_NSTRLST:
7171 val = parse_neon_el_struct_list (&str, &inst.operands[i].reg,
7172 &inst.operands[i].vectype);
7173 break;
7174
7175 /* Addressing modes */
7176 case OP_ADDR:
7177 po_misc_or_fail (parse_address (&str, i));
7178 break;
7179
7180 case OP_ADDRGLDR:
7181 po_misc_or_fail_no_backtrack (
7182 parse_address_group_reloc (&str, i, GROUP_LDR));
7183 break;
7184
7185 case OP_ADDRGLDRS:
7186 po_misc_or_fail_no_backtrack (
7187 parse_address_group_reloc (&str, i, GROUP_LDRS));
7188 break;
7189
7190 case OP_ADDRGLDC:
7191 po_misc_or_fail_no_backtrack (
7192 parse_address_group_reloc (&str, i, GROUP_LDC));
7193 break;
7194
7195 case OP_SH:
7196 po_misc_or_fail (parse_shifter_operand (&str, i));
7197 break;
7198
7199 case OP_SHG:
7200 po_misc_or_fail_no_backtrack (
7201 parse_shifter_operand_group_reloc (&str, i));
7202 break;
7203
7204 case OP_oSHll:
7205 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_IMMEDIATE));
7206 break;
7207
7208 case OP_oSHar:
7209 po_misc_or_fail (parse_shift (&str, i, SHIFT_ASR_IMMEDIATE));
7210 break;
7211
7212 case OP_oSHllar:
7213 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_OR_ASR_IMMEDIATE));
7214 break;
7215
7216 default:
7217 as_fatal (_("unhandled operand code %d"), op_parse_code);
7218 }
7219
7220 /* Various value-based sanity checks and shared operations. We
7221 do not signal immediate failures for the register constraints;
7222 this allows a syntax error to take precedence. */
7223 switch (op_parse_code)
7224 {
7225 case OP_oRRnpc:
7226 case OP_RRnpc:
7227 case OP_RRnpcb:
7228 case OP_RRw:
7229 case OP_oRRw:
7230 case OP_RRnpc_I0:
7231 if (inst.operands[i].isreg && inst.operands[i].reg == REG_PC)
7232 inst.error = BAD_PC;
7233 break;
7234
7235 case OP_oRRnpcsp:
7236 case OP_RRnpcsp:
7237 if (inst.operands[i].isreg)
7238 {
7239 if (inst.operands[i].reg == REG_PC)
7240 inst.error = BAD_PC;
7241 else if (inst.operands[i].reg == REG_SP
7242 /* The restriction on Rd/Rt/Rt2 on Thumb mode has been
7243 relaxed since ARMv8-A. */
7244 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
7245 {
7246 gas_assert (thumb);
7247 inst.error = BAD_SP;
7248 }
7249 }
7250 break;
7251
7252 case OP_RRnpctw:
7253 if (inst.operands[i].isreg
7254 && inst.operands[i].reg == REG_PC
7255 && (inst.operands[i].writeback || thumb))
7256 inst.error = BAD_PC;
7257 break;
7258
7259 case OP_CPSF:
7260 case OP_ENDI:
7261 case OP_oROR:
7262 case OP_wPSR:
7263 case OP_rPSR:
7264 case OP_COND:
7265 case OP_oBARRIER_I15:
7266 case OP_REGLST:
7267 case OP_VRSLST:
7268 case OP_VRDLST:
7269 case OP_VRSDLST:
7270 case OP_NRDLST:
7271 case OP_NSTRLST:
7272 if (val == FAIL)
7273 goto failure;
7274 inst.operands[i].imm = val;
7275 break;
7276
7277 default:
7278 break;
7279 }
7280
7281 /* If we get here, this operand was successfully parsed. */
7282 inst.operands[i].present = 1;
7283 continue;
7284
7285 bad_args:
7286 inst.error = BAD_ARGS;
7287
7288 failure:
7289 if (!backtrack_pos)
7290 {
7291 /* The parse routine should already have set inst.error, but set a
7292 default here just in case. */
7293 if (!inst.error)
7294 inst.error = _("syntax error");
7295 return FAIL;
7296 }
7297
7298 /* Do not backtrack over a trailing optional argument that
7299 absorbed some text. We will only fail again, with the
7300 'garbage following instruction' error message, which is
7301 probably less helpful than the current one. */
7302 if (backtrack_index == i && backtrack_pos != str
7303 && upat[i+1] == OP_stop)
7304 {
7305 if (!inst.error)
7306 inst.error = _("syntax error");
7307 return FAIL;
7308 }
7309
7310 /* Try again, skipping the optional argument at backtrack_pos. */
7311 str = backtrack_pos;
7312 inst.error = backtrack_error;
7313 inst.operands[backtrack_index].present = 0;
7314 i = backtrack_index;
7315 backtrack_pos = 0;
7316 }
7317
7318 /* Check that we have parsed all the arguments. */
7319 if (*str != '\0' && !inst.error)
7320 inst.error = _("garbage following instruction");
7321
7322 return inst.error ? FAIL : SUCCESS;
7323 }
7324
7325 #undef po_char_or_fail
7326 #undef po_reg_or_fail
7327 #undef po_reg_or_goto
7328 #undef po_imm_or_fail
7329 #undef po_scalar_or_fail
7330 #undef po_barrier_or_imm
7331
7332 /* Shorthand macro for instruction encoding functions issuing errors. */
7333 #define constraint(expr, err) \
7334 do \
7335 { \
7336 if (expr) \
7337 { \
7338 inst.error = err; \
7339 return; \
7340 } \
7341 } \
7342 while (0)
7343
7344 /* Reject "bad registers" for Thumb-2 instructions. Many Thumb-2
7345 instructions are unpredictable if these registers are used. This
7346 is the BadReg predicate in ARM's Thumb-2 documentation.
7347
7348 Before ARMv8-A, REG_PC and REG_SP were not allowed in quite a few
7349 places, while the restriction on REG_SP was relaxed since ARMv8-A. */
7350 #define reject_bad_reg(reg) \
7351 do \
7352 if (reg == REG_PC) \
7353 { \
7354 inst.error = BAD_PC; \
7355 return; \
7356 } \
7357 else if (reg == REG_SP \
7358 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8)) \
7359 { \
7360 inst.error = BAD_SP; \
7361 return; \
7362 } \
7363 while (0)
7364
7365 /* If REG is R13 (the stack pointer), warn that its use is
7366 deprecated. */
7367 #define warn_deprecated_sp(reg) \
7368 do \
7369 if (warn_on_deprecated && reg == REG_SP) \
7370 as_tsktsk (_("use of r13 is deprecated")); \
7371 while (0)
7372
7373 /* Functions for operand encoding. ARM, then Thumb. */
7374
7375 #define rotate_left(v, n) (v << (n & 31) | v >> ((32 - n) & 31))
7376
7377 /* If the current inst is scalar ARMv8.2 fp16 instruction, do special encoding.
7378
7379 The only binary encoding difference is the Coprocessor number. Coprocessor
7380 9 is used for half-precision calculations or conversions. The format of the
7381 instruction is the same as the equivalent Coprocessor 10 instruction that
7382 exists for Single-Precision operation. */
7383
7384 static void
7385 do_scalar_fp16_v82_encode (void)
7386 {
7387 if (inst.cond != COND_ALWAYS)
7388 as_warn (_("ARMv8.2 scalar fp16 instruction cannot be conditional,"
7389 " the behaviour is UNPREDICTABLE"));
7390 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16),
7391 _(BAD_FP16));
7392
7393 inst.instruction = (inst.instruction & 0xfffff0ff) | 0x900;
7394 mark_feature_used (&arm_ext_fp16);
7395 }
7396
7397 /* If VAL can be encoded in the immediate field of an ARM instruction,
7398 return the encoded form. Otherwise, return FAIL. */
7399
7400 static unsigned int
7401 encode_arm_immediate (unsigned int val)
7402 {
7403 unsigned int a, i;
7404
7405 if (val <= 0xff)
7406 return val;
7407
7408 for (i = 2; i < 32; i += 2)
7409 if ((a = rotate_left (val, i)) <= 0xff)
7410 return a | (i << 7); /* 12-bit pack: [shift-cnt,const]. */
7411
7412 return FAIL;
7413 }
7414
7415 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
7416 return the encoded form. Otherwise, return FAIL. */
7417 static unsigned int
7418 encode_thumb32_immediate (unsigned int val)
7419 {
7420 unsigned int a, i;
7421
7422 if (val <= 0xff)
7423 return val;
7424
7425 for (i = 1; i <= 24; i++)
7426 {
7427 a = val >> i;
7428 if ((val & ~(0xff << i)) == 0)
7429 return ((val >> i) & 0x7f) | ((32 - i) << 7);
7430 }
7431
7432 a = val & 0xff;
7433 if (val == ((a << 16) | a))
7434 return 0x100 | a;
7435 if (val == ((a << 24) | (a << 16) | (a << 8) | a))
7436 return 0x300 | a;
7437
7438 a = val & 0xff00;
7439 if (val == ((a << 16) | a))
7440 return 0x200 | (a >> 8);
7441
7442 return FAIL;
7443 }
7444 /* Encode a VFP SP or DP register number into inst.instruction. */
7445
7446 static void
7447 encode_arm_vfp_reg (int reg, enum vfp_reg_pos pos)
7448 {
7449 if ((pos == VFP_REG_Dd || pos == VFP_REG_Dn || pos == VFP_REG_Dm)
7450 && reg > 15)
7451 {
7452 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
7453 {
7454 if (thumb_mode)
7455 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
7456 fpu_vfp_ext_d32);
7457 else
7458 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
7459 fpu_vfp_ext_d32);
7460 }
7461 else
7462 {
7463 first_error (_("D register out of range for selected VFP version"));
7464 return;
7465 }
7466 }
7467
7468 switch (pos)
7469 {
7470 case VFP_REG_Sd:
7471 inst.instruction |= ((reg >> 1) << 12) | ((reg & 1) << 22);
7472 break;
7473
7474 case VFP_REG_Sn:
7475 inst.instruction |= ((reg >> 1) << 16) | ((reg & 1) << 7);
7476 break;
7477
7478 case VFP_REG_Sm:
7479 inst.instruction |= ((reg >> 1) << 0) | ((reg & 1) << 5);
7480 break;
7481
7482 case VFP_REG_Dd:
7483 inst.instruction |= ((reg & 15) << 12) | ((reg >> 4) << 22);
7484 break;
7485
7486 case VFP_REG_Dn:
7487 inst.instruction |= ((reg & 15) << 16) | ((reg >> 4) << 7);
7488 break;
7489
7490 case VFP_REG_Dm:
7491 inst.instruction |= (reg & 15) | ((reg >> 4) << 5);
7492 break;
7493
7494 default:
7495 abort ();
7496 }
7497 }
7498
7499 /* Encode a <shift> in an ARM-format instruction. The immediate,
7500 if any, is handled by md_apply_fix. */
7501 static void
7502 encode_arm_shift (int i)
7503 {
7504 /* register-shifted register. */
7505 if (inst.operands[i].immisreg)
7506 {
7507 int op_index;
7508 for (op_index = 0; op_index <= i; ++op_index)
7509 {
7510 /* Check the operand only when it's presented. In pre-UAL syntax,
7511 if the destination register is the same as the first operand, two
7512 register form of the instruction can be used. */
7513 if (inst.operands[op_index].present && inst.operands[op_index].isreg
7514 && inst.operands[op_index].reg == REG_PC)
7515 as_warn (UNPRED_REG ("r15"));
7516 }
7517
7518 if (inst.operands[i].imm == REG_PC)
7519 as_warn (UNPRED_REG ("r15"));
7520 }
7521
7522 if (inst.operands[i].shift_kind == SHIFT_RRX)
7523 inst.instruction |= SHIFT_ROR << 5;
7524 else
7525 {
7526 inst.instruction |= inst.operands[i].shift_kind << 5;
7527 if (inst.operands[i].immisreg)
7528 {
7529 inst.instruction |= SHIFT_BY_REG;
7530 inst.instruction |= inst.operands[i].imm << 8;
7531 }
7532 else
7533 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
7534 }
7535 }
7536
7537 static void
7538 encode_arm_shifter_operand (int i)
7539 {
7540 if (inst.operands[i].isreg)
7541 {
7542 inst.instruction |= inst.operands[i].reg;
7543 encode_arm_shift (i);
7544 }
7545 else
7546 {
7547 inst.instruction |= INST_IMMEDIATE;
7548 if (inst.reloc.type != BFD_RELOC_ARM_IMMEDIATE)
7549 inst.instruction |= inst.operands[i].imm;
7550 }
7551 }
7552
7553 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
7554 static void
7555 encode_arm_addr_mode_common (int i, bfd_boolean is_t)
7556 {
7557 /* PR 14260:
7558 Generate an error if the operand is not a register. */
7559 constraint (!inst.operands[i].isreg,
7560 _("Instruction does not support =N addresses"));
7561
7562 inst.instruction |= inst.operands[i].reg << 16;
7563
7564 if (inst.operands[i].preind)
7565 {
7566 if (is_t)
7567 {
7568 inst.error = _("instruction does not accept preindexed addressing");
7569 return;
7570 }
7571 inst.instruction |= PRE_INDEX;
7572 if (inst.operands[i].writeback)
7573 inst.instruction |= WRITE_BACK;
7574
7575 }
7576 else if (inst.operands[i].postind)
7577 {
7578 gas_assert (inst.operands[i].writeback);
7579 if (is_t)
7580 inst.instruction |= WRITE_BACK;
7581 }
7582 else /* unindexed - only for coprocessor */
7583 {
7584 inst.error = _("instruction does not accept unindexed addressing");
7585 return;
7586 }
7587
7588 if (((inst.instruction & WRITE_BACK) || !(inst.instruction & PRE_INDEX))
7589 && (((inst.instruction & 0x000f0000) >> 16)
7590 == ((inst.instruction & 0x0000f000) >> 12)))
7591 as_warn ((inst.instruction & LOAD_BIT)
7592 ? _("destination register same as write-back base")
7593 : _("source register same as write-back base"));
7594 }
7595
7596 /* inst.operands[i] was set up by parse_address. Encode it into an
7597 ARM-format mode 2 load or store instruction. If is_t is true,
7598 reject forms that cannot be used with a T instruction (i.e. not
7599 post-indexed). */
7600 static void
7601 encode_arm_addr_mode_2 (int i, bfd_boolean is_t)
7602 {
7603 const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
7604
7605 encode_arm_addr_mode_common (i, is_t);
7606
7607 if (inst.operands[i].immisreg)
7608 {
7609 constraint ((inst.operands[i].imm == REG_PC
7610 || (is_pc && inst.operands[i].writeback)),
7611 BAD_PC_ADDRESSING);
7612 inst.instruction |= INST_IMMEDIATE; /* yes, this is backwards */
7613 inst.instruction |= inst.operands[i].imm;
7614 if (!inst.operands[i].negative)
7615 inst.instruction |= INDEX_UP;
7616 if (inst.operands[i].shifted)
7617 {
7618 if (inst.operands[i].shift_kind == SHIFT_RRX)
7619 inst.instruction |= SHIFT_ROR << 5;
7620 else
7621 {
7622 inst.instruction |= inst.operands[i].shift_kind << 5;
7623 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
7624 }
7625 }
7626 }
7627 else /* immediate offset in inst.reloc */
7628 {
7629 if (is_pc && !inst.reloc.pc_rel)
7630 {
7631 const bfd_boolean is_load = ((inst.instruction & LOAD_BIT) != 0);
7632
7633 /* If is_t is TRUE, it's called from do_ldstt. ldrt/strt
7634 cannot use PC in addressing.
7635 PC cannot be used in writeback addressing, either. */
7636 constraint ((is_t || inst.operands[i].writeback),
7637 BAD_PC_ADDRESSING);
7638
7639 /* Use of PC in str is deprecated for ARMv7. */
7640 if (warn_on_deprecated
7641 && !is_load
7642 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7))
7643 as_tsktsk (_("use of PC in this instruction is deprecated"));
7644 }
7645
7646 if (inst.reloc.type == BFD_RELOC_UNUSED)
7647 {
7648 /* Prefer + for zero encoded value. */
7649 if (!inst.operands[i].negative)
7650 inst.instruction |= INDEX_UP;
7651 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM;
7652 }
7653 }
7654 }
7655
7656 /* inst.operands[i] was set up by parse_address. Encode it into an
7657 ARM-format mode 3 load or store instruction. Reject forms that
7658 cannot be used with such instructions. If is_t is true, reject
7659 forms that cannot be used with a T instruction (i.e. not
7660 post-indexed). */
7661 static void
7662 encode_arm_addr_mode_3 (int i, bfd_boolean is_t)
7663 {
7664 if (inst.operands[i].immisreg && inst.operands[i].shifted)
7665 {
7666 inst.error = _("instruction does not accept scaled register index");
7667 return;
7668 }
7669
7670 encode_arm_addr_mode_common (i, is_t);
7671
7672 if (inst.operands[i].immisreg)
7673 {
7674 constraint ((inst.operands[i].imm == REG_PC
7675 || (is_t && inst.operands[i].reg == REG_PC)),
7676 BAD_PC_ADDRESSING);
7677 constraint (inst.operands[i].reg == REG_PC && inst.operands[i].writeback,
7678 BAD_PC_WRITEBACK);
7679 inst.instruction |= inst.operands[i].imm;
7680 if (!inst.operands[i].negative)
7681 inst.instruction |= INDEX_UP;
7682 }
7683 else /* immediate offset in inst.reloc */
7684 {
7685 constraint ((inst.operands[i].reg == REG_PC && !inst.reloc.pc_rel
7686 && inst.operands[i].writeback),
7687 BAD_PC_WRITEBACK);
7688 inst.instruction |= HWOFFSET_IMM;
7689 if (inst.reloc.type == BFD_RELOC_UNUSED)
7690 {
7691 /* Prefer + for zero encoded value. */
7692 if (!inst.operands[i].negative)
7693 inst.instruction |= INDEX_UP;
7694
7695 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM8;
7696 }
7697 }
7698 }
7699
7700 /* Write immediate bits [7:0] to the following locations:
7701
7702 |28/24|23 19|18 16|15 4|3 0|
7703 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
7704
7705 This function is used by VMOV/VMVN/VORR/VBIC. */
7706
7707 static void
7708 neon_write_immbits (unsigned immbits)
7709 {
7710 inst.instruction |= immbits & 0xf;
7711 inst.instruction |= ((immbits >> 4) & 0x7) << 16;
7712 inst.instruction |= ((immbits >> 7) & 0x1) << (thumb_mode ? 28 : 24);
7713 }
7714
7715 /* Invert low-order SIZE bits of XHI:XLO. */
7716
7717 static void
7718 neon_invert_size (unsigned *xlo, unsigned *xhi, int size)
7719 {
7720 unsigned immlo = xlo ? *xlo : 0;
7721 unsigned immhi = xhi ? *xhi : 0;
7722
7723 switch (size)
7724 {
7725 case 8:
7726 immlo = (~immlo) & 0xff;
7727 break;
7728
7729 case 16:
7730 immlo = (~immlo) & 0xffff;
7731 break;
7732
7733 case 64:
7734 immhi = (~immhi) & 0xffffffff;
7735 /* fall through. */
7736
7737 case 32:
7738 immlo = (~immlo) & 0xffffffff;
7739 break;
7740
7741 default:
7742 abort ();
7743 }
7744
7745 if (xlo)
7746 *xlo = immlo;
7747
7748 if (xhi)
7749 *xhi = immhi;
7750 }
7751
7752 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
7753 A, B, C, D. */
7754
7755 static int
7756 neon_bits_same_in_bytes (unsigned imm)
7757 {
7758 return ((imm & 0x000000ff) == 0 || (imm & 0x000000ff) == 0x000000ff)
7759 && ((imm & 0x0000ff00) == 0 || (imm & 0x0000ff00) == 0x0000ff00)
7760 && ((imm & 0x00ff0000) == 0 || (imm & 0x00ff0000) == 0x00ff0000)
7761 && ((imm & 0xff000000) == 0 || (imm & 0xff000000) == 0xff000000);
7762 }
7763
7764 /* For immediate of above form, return 0bABCD. */
7765
7766 static unsigned
7767 neon_squash_bits (unsigned imm)
7768 {
7769 return (imm & 0x01) | ((imm & 0x0100) >> 7) | ((imm & 0x010000) >> 14)
7770 | ((imm & 0x01000000) >> 21);
7771 }
7772
7773 /* Compress quarter-float representation to 0b...000 abcdefgh. */
7774
7775 static unsigned
7776 neon_qfloat_bits (unsigned imm)
7777 {
7778 return ((imm >> 19) & 0x7f) | ((imm >> 24) & 0x80);
7779 }
7780
7781 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
7782 the instruction. *OP is passed as the initial value of the op field, and
7783 may be set to a different value depending on the constant (i.e.
7784 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
7785 MVN). If the immediate looks like a repeated pattern then also
7786 try smaller element sizes. */
7787
7788 static int
7789 neon_cmode_for_move_imm (unsigned immlo, unsigned immhi, int float_p,
7790 unsigned *immbits, int *op, int size,
7791 enum neon_el_type type)
7792 {
7793 /* Only permit float immediates (including 0.0/-0.0) if the operand type is
7794 float. */
7795 if (type == NT_float && !float_p)
7796 return FAIL;
7797
7798 if (type == NT_float && is_quarter_float (immlo) && immhi == 0)
7799 {
7800 if (size != 32 || *op == 1)
7801 return FAIL;
7802 *immbits = neon_qfloat_bits (immlo);
7803 return 0xf;
7804 }
7805
7806 if (size == 64)
7807 {
7808 if (neon_bits_same_in_bytes (immhi)
7809 && neon_bits_same_in_bytes (immlo))
7810 {
7811 if (*op == 1)
7812 return FAIL;
7813 *immbits = (neon_squash_bits (immhi) << 4)
7814 | neon_squash_bits (immlo);
7815 *op = 1;
7816 return 0xe;
7817 }
7818
7819 if (immhi != immlo)
7820 return FAIL;
7821 }
7822
7823 if (size >= 32)
7824 {
7825 if (immlo == (immlo & 0x000000ff))
7826 {
7827 *immbits = immlo;
7828 return 0x0;
7829 }
7830 else if (immlo == (immlo & 0x0000ff00))
7831 {
7832 *immbits = immlo >> 8;
7833 return 0x2;
7834 }
7835 else if (immlo == (immlo & 0x00ff0000))
7836 {
7837 *immbits = immlo >> 16;
7838 return 0x4;
7839 }
7840 else if (immlo == (immlo & 0xff000000))
7841 {
7842 *immbits = immlo >> 24;
7843 return 0x6;
7844 }
7845 else if (immlo == ((immlo & 0x0000ff00) | 0x000000ff))
7846 {
7847 *immbits = (immlo >> 8) & 0xff;
7848 return 0xc;
7849 }
7850 else if (immlo == ((immlo & 0x00ff0000) | 0x0000ffff))
7851 {
7852 *immbits = (immlo >> 16) & 0xff;
7853 return 0xd;
7854 }
7855
7856 if ((immlo & 0xffff) != (immlo >> 16))
7857 return FAIL;
7858 immlo &= 0xffff;
7859 }
7860
7861 if (size >= 16)
7862 {
7863 if (immlo == (immlo & 0x000000ff))
7864 {
7865 *immbits = immlo;
7866 return 0x8;
7867 }
7868 else if (immlo == (immlo & 0x0000ff00))
7869 {
7870 *immbits = immlo >> 8;
7871 return 0xa;
7872 }
7873
7874 if ((immlo & 0xff) != (immlo >> 8))
7875 return FAIL;
7876 immlo &= 0xff;
7877 }
7878
7879 if (immlo == (immlo & 0x000000ff))
7880 {
7881 /* Don't allow MVN with 8-bit immediate. */
7882 if (*op == 1)
7883 return FAIL;
7884 *immbits = immlo;
7885 return 0xe;
7886 }
7887
7888 return FAIL;
7889 }
7890
7891 #if defined BFD_HOST_64_BIT
7892 /* Returns TRUE if double precision value V may be cast
7893 to single precision without loss of accuracy. */
7894
7895 static bfd_boolean
7896 is_double_a_single (bfd_int64_t v)
7897 {
7898 int exp = (int)((v >> 52) & 0x7FF);
7899 bfd_int64_t mantissa = (v & (bfd_int64_t)0xFFFFFFFFFFFFFULL);
7900
7901 return (exp == 0 || exp == 0x7FF
7902 || (exp >= 1023 - 126 && exp <= 1023 + 127))
7903 && (mantissa & 0x1FFFFFFFl) == 0;
7904 }
7905
7906 /* Returns a double precision value casted to single precision
7907 (ignoring the least significant bits in exponent and mantissa). */
7908
7909 static int
7910 double_to_single (bfd_int64_t v)
7911 {
7912 int sign = (int) ((v >> 63) & 1l);
7913 int exp = (int) ((v >> 52) & 0x7FF);
7914 bfd_int64_t mantissa = (v & (bfd_int64_t)0xFFFFFFFFFFFFFULL);
7915
7916 if (exp == 0x7FF)
7917 exp = 0xFF;
7918 else
7919 {
7920 exp = exp - 1023 + 127;
7921 if (exp >= 0xFF)
7922 {
7923 /* Infinity. */
7924 exp = 0x7F;
7925 mantissa = 0;
7926 }
7927 else if (exp < 0)
7928 {
7929 /* No denormalized numbers. */
7930 exp = 0;
7931 mantissa = 0;
7932 }
7933 }
7934 mantissa >>= 29;
7935 return (sign << 31) | (exp << 23) | mantissa;
7936 }
7937 #endif /* BFD_HOST_64_BIT */
7938
7939 enum lit_type
7940 {
7941 CONST_THUMB,
7942 CONST_ARM,
7943 CONST_VEC
7944 };
7945
7946 static void do_vfp_nsyn_opcode (const char *);
7947
7948 /* inst.reloc.exp describes an "=expr" load pseudo-operation.
7949 Determine whether it can be performed with a move instruction; if
7950 it can, convert inst.instruction to that move instruction and
7951 return TRUE; if it can't, convert inst.instruction to a literal-pool
7952 load and return FALSE. If this is not a valid thing to do in the
7953 current context, set inst.error and return TRUE.
7954
7955 inst.operands[i] describes the destination register. */
7956
7957 static bfd_boolean
7958 move_or_literal_pool (int i, enum lit_type t, bfd_boolean mode_3)
7959 {
7960 unsigned long tbit;
7961 bfd_boolean thumb_p = (t == CONST_THUMB);
7962 bfd_boolean arm_p = (t == CONST_ARM);
7963
7964 if (thumb_p)
7965 tbit = (inst.instruction > 0xffff) ? THUMB2_LOAD_BIT : THUMB_LOAD_BIT;
7966 else
7967 tbit = LOAD_BIT;
7968
7969 if ((inst.instruction & tbit) == 0)
7970 {
7971 inst.error = _("invalid pseudo operation");
7972 return TRUE;
7973 }
7974
7975 if (inst.reloc.exp.X_op != O_constant
7976 && inst.reloc.exp.X_op != O_symbol
7977 && inst.reloc.exp.X_op != O_big)
7978 {
7979 inst.error = _("constant expression expected");
7980 return TRUE;
7981 }
7982
7983 if (inst.reloc.exp.X_op == O_constant
7984 || inst.reloc.exp.X_op == O_big)
7985 {
7986 #if defined BFD_HOST_64_BIT
7987 bfd_int64_t v;
7988 #else
7989 offsetT v;
7990 #endif
7991 if (inst.reloc.exp.X_op == O_big)
7992 {
7993 LITTLENUM_TYPE w[X_PRECISION];
7994 LITTLENUM_TYPE * l;
7995
7996 if (inst.reloc.exp.X_add_number == -1)
7997 {
7998 gen_to_words (w, X_PRECISION, E_PRECISION);
7999 l = w;
8000 /* FIXME: Should we check words w[2..5] ? */
8001 }
8002 else
8003 l = generic_bignum;
8004
8005 #if defined BFD_HOST_64_BIT
8006 v =
8007 ((((((((bfd_int64_t) l[3] & LITTLENUM_MASK)
8008 << LITTLENUM_NUMBER_OF_BITS)
8009 | ((bfd_int64_t) l[2] & LITTLENUM_MASK))
8010 << LITTLENUM_NUMBER_OF_BITS)
8011 | ((bfd_int64_t) l[1] & LITTLENUM_MASK))
8012 << LITTLENUM_NUMBER_OF_BITS)
8013 | ((bfd_int64_t) l[0] & LITTLENUM_MASK));
8014 #else
8015 v = ((l[1] & LITTLENUM_MASK) << LITTLENUM_NUMBER_OF_BITS)
8016 | (l[0] & LITTLENUM_MASK);
8017 #endif
8018 }
8019 else
8020 v = inst.reloc.exp.X_add_number;
8021
8022 if (!inst.operands[i].issingle)
8023 {
8024 if (thumb_p)
8025 {
8026 /* LDR should not use lead in a flag-setting instruction being
8027 chosen so we do not check whether movs can be used. */
8028
8029 if ((ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2)
8030 || ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m))
8031 && inst.operands[i].reg != 13
8032 && inst.operands[i].reg != 15)
8033 {
8034 /* Check if on thumb2 it can be done with a mov.w, mvn or
8035 movw instruction. */
8036 unsigned int newimm;
8037 bfd_boolean isNegated;
8038
8039 newimm = encode_thumb32_immediate (v);
8040 if (newimm != (unsigned int) FAIL)
8041 isNegated = FALSE;
8042 else
8043 {
8044 newimm = encode_thumb32_immediate (~v);
8045 if (newimm != (unsigned int) FAIL)
8046 isNegated = TRUE;
8047 }
8048
8049 /* The number can be loaded with a mov.w or mvn
8050 instruction. */
8051 if (newimm != (unsigned int) FAIL
8052 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2))
8053 {
8054 inst.instruction = (0xf04f0000 /* MOV.W. */
8055 | (inst.operands[i].reg << 8));
8056 /* Change to MOVN. */
8057 inst.instruction |= (isNegated ? 0x200000 : 0);
8058 inst.instruction |= (newimm & 0x800) << 15;
8059 inst.instruction |= (newimm & 0x700) << 4;
8060 inst.instruction |= (newimm & 0x0ff);
8061 return TRUE;
8062 }
8063 /* The number can be loaded with a movw instruction. */
8064 else if ((v & ~0xFFFF) == 0
8065 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m))
8066 {
8067 int imm = v & 0xFFFF;
8068
8069 inst.instruction = 0xf2400000; /* MOVW. */
8070 inst.instruction |= (inst.operands[i].reg << 8);
8071 inst.instruction |= (imm & 0xf000) << 4;
8072 inst.instruction |= (imm & 0x0800) << 15;
8073 inst.instruction |= (imm & 0x0700) << 4;
8074 inst.instruction |= (imm & 0x00ff);
8075 return TRUE;
8076 }
8077 }
8078 }
8079 else if (arm_p)
8080 {
8081 int value = encode_arm_immediate (v);
8082
8083 if (value != FAIL)
8084 {
8085 /* This can be done with a mov instruction. */
8086 inst.instruction &= LITERAL_MASK;
8087 inst.instruction |= INST_IMMEDIATE | (OPCODE_MOV << DATA_OP_SHIFT);
8088 inst.instruction |= value & 0xfff;
8089 return TRUE;
8090 }
8091
8092 value = encode_arm_immediate (~ v);
8093 if (value != FAIL)
8094 {
8095 /* This can be done with a mvn instruction. */
8096 inst.instruction &= LITERAL_MASK;
8097 inst.instruction |= INST_IMMEDIATE | (OPCODE_MVN << DATA_OP_SHIFT);
8098 inst.instruction |= value & 0xfff;
8099 return TRUE;
8100 }
8101 }
8102 else if (t == CONST_VEC && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1))
8103 {
8104 int op = 0;
8105 unsigned immbits = 0;
8106 unsigned immlo = inst.operands[1].imm;
8107 unsigned immhi = inst.operands[1].regisimm
8108 ? inst.operands[1].reg
8109 : inst.reloc.exp.X_unsigned
8110 ? 0
8111 : ((bfd_int64_t)((int) immlo)) >> 32;
8112 int cmode = neon_cmode_for_move_imm (immlo, immhi, FALSE, &immbits,
8113 &op, 64, NT_invtype);
8114
8115 if (cmode == FAIL)
8116 {
8117 neon_invert_size (&immlo, &immhi, 64);
8118 op = !op;
8119 cmode = neon_cmode_for_move_imm (immlo, immhi, FALSE, &immbits,
8120 &op, 64, NT_invtype);
8121 }
8122
8123 if (cmode != FAIL)
8124 {
8125 inst.instruction = (inst.instruction & VLDR_VMOV_SAME)
8126 | (1 << 23)
8127 | (cmode << 8)
8128 | (op << 5)
8129 | (1 << 4);
8130
8131 /* Fill other bits in vmov encoding for both thumb and arm. */
8132 if (thumb_mode)
8133 inst.instruction |= (0x7U << 29) | (0xF << 24);
8134 else
8135 inst.instruction |= (0xFU << 28) | (0x1 << 25);
8136 neon_write_immbits (immbits);
8137 return TRUE;
8138 }
8139 }
8140 }
8141
8142 if (t == CONST_VEC)
8143 {
8144 /* Check if vldr Rx, =constant could be optimized to vmov Rx, #constant. */
8145 if (inst.operands[i].issingle
8146 && is_quarter_float (inst.operands[1].imm)
8147 && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3xd))
8148 {
8149 inst.operands[1].imm =
8150 neon_qfloat_bits (v);
8151 do_vfp_nsyn_opcode ("fconsts");
8152 return TRUE;
8153 }
8154
8155 /* If our host does not support a 64-bit type then we cannot perform
8156 the following optimization. This mean that there will be a
8157 discrepancy between the output produced by an assembler built for
8158 a 32-bit-only host and the output produced from a 64-bit host, but
8159 this cannot be helped. */
8160 #if defined BFD_HOST_64_BIT
8161 else if (!inst.operands[1].issingle
8162 && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3))
8163 {
8164 if (is_double_a_single (v)
8165 && is_quarter_float (double_to_single (v)))
8166 {
8167 inst.operands[1].imm =
8168 neon_qfloat_bits (double_to_single (v));
8169 do_vfp_nsyn_opcode ("fconstd");
8170 return TRUE;
8171 }
8172 }
8173 #endif
8174 }
8175 }
8176
8177 if (add_to_lit_pool ((!inst.operands[i].isvec
8178 || inst.operands[i].issingle) ? 4 : 8) == FAIL)
8179 return TRUE;
8180
8181 inst.operands[1].reg = REG_PC;
8182 inst.operands[1].isreg = 1;
8183 inst.operands[1].preind = 1;
8184 inst.reloc.pc_rel = 1;
8185 inst.reloc.type = (thumb_p
8186 ? BFD_RELOC_ARM_THUMB_OFFSET
8187 : (mode_3
8188 ? BFD_RELOC_ARM_HWLITERAL
8189 : BFD_RELOC_ARM_LITERAL));
8190 return FALSE;
8191 }
8192
8193 /* inst.operands[i] was set up by parse_address. Encode it into an
8194 ARM-format instruction. Reject all forms which cannot be encoded
8195 into a coprocessor load/store instruction. If wb_ok is false,
8196 reject use of writeback; if unind_ok is false, reject use of
8197 unindexed addressing. If reloc_override is not 0, use it instead
8198 of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
8199 (in which case it is preserved). */
8200
8201 static int
8202 encode_arm_cp_address (int i, int wb_ok, int unind_ok, int reloc_override)
8203 {
8204 if (!inst.operands[i].isreg)
8205 {
8206 /* PR 18256 */
8207 if (! inst.operands[0].isvec)
8208 {
8209 inst.error = _("invalid co-processor operand");
8210 return FAIL;
8211 }
8212 if (move_or_literal_pool (0, CONST_VEC, /*mode_3=*/FALSE))
8213 return SUCCESS;
8214 }
8215
8216 inst.instruction |= inst.operands[i].reg << 16;
8217
8218 gas_assert (!(inst.operands[i].preind && inst.operands[i].postind));
8219
8220 if (!inst.operands[i].preind && !inst.operands[i].postind) /* unindexed */
8221 {
8222 gas_assert (!inst.operands[i].writeback);
8223 if (!unind_ok)
8224 {
8225 inst.error = _("instruction does not support unindexed addressing");
8226 return FAIL;
8227 }
8228 inst.instruction |= inst.operands[i].imm;
8229 inst.instruction |= INDEX_UP;
8230 return SUCCESS;
8231 }
8232
8233 if (inst.operands[i].preind)
8234 inst.instruction |= PRE_INDEX;
8235
8236 if (inst.operands[i].writeback)
8237 {
8238 if (inst.operands[i].reg == REG_PC)
8239 {
8240 inst.error = _("pc may not be used with write-back");
8241 return FAIL;
8242 }
8243 if (!wb_ok)
8244 {
8245 inst.error = _("instruction does not support writeback");
8246 return FAIL;
8247 }
8248 inst.instruction |= WRITE_BACK;
8249 }
8250
8251 if (reloc_override)
8252 inst.reloc.type = (bfd_reloc_code_real_type) reloc_override;
8253 else if ((inst.reloc.type < BFD_RELOC_ARM_ALU_PC_G0_NC
8254 || inst.reloc.type > BFD_RELOC_ARM_LDC_SB_G2)
8255 && inst.reloc.type != BFD_RELOC_ARM_LDR_PC_G0)
8256 {
8257 if (thumb_mode)
8258 inst.reloc.type = BFD_RELOC_ARM_T32_CP_OFF_IMM;
8259 else
8260 inst.reloc.type = BFD_RELOC_ARM_CP_OFF_IMM;
8261 }
8262
8263 /* Prefer + for zero encoded value. */
8264 if (!inst.operands[i].negative)
8265 inst.instruction |= INDEX_UP;
8266
8267 return SUCCESS;
8268 }
8269
8270 /* Functions for instruction encoding, sorted by sub-architecture.
8271 First some generics; their names are taken from the conventional
8272 bit positions for register arguments in ARM format instructions. */
8273
8274 static void
8275 do_noargs (void)
8276 {
8277 }
8278
8279 static void
8280 do_rd (void)
8281 {
8282 inst.instruction |= inst.operands[0].reg << 12;
8283 }
8284
8285 static void
8286 do_rn (void)
8287 {
8288 inst.instruction |= inst.operands[0].reg << 16;
8289 }
8290
8291 static void
8292 do_rd_rm (void)
8293 {
8294 inst.instruction |= inst.operands[0].reg << 12;
8295 inst.instruction |= inst.operands[1].reg;
8296 }
8297
8298 static void
8299 do_rm_rn (void)
8300 {
8301 inst.instruction |= inst.operands[0].reg;
8302 inst.instruction |= inst.operands[1].reg << 16;
8303 }
8304
8305 static void
8306 do_rd_rn (void)
8307 {
8308 inst.instruction |= inst.operands[0].reg << 12;
8309 inst.instruction |= inst.operands[1].reg << 16;
8310 }
8311
8312 static void
8313 do_rn_rd (void)
8314 {
8315 inst.instruction |= inst.operands[0].reg << 16;
8316 inst.instruction |= inst.operands[1].reg << 12;
8317 }
8318
8319 static void
8320 do_tt (void)
8321 {
8322 inst.instruction |= inst.operands[0].reg << 8;
8323 inst.instruction |= inst.operands[1].reg << 16;
8324 }
8325
8326 static bfd_boolean
8327 check_obsolete (const arm_feature_set *feature, const char *msg)
8328 {
8329 if (ARM_CPU_IS_ANY (cpu_variant))
8330 {
8331 as_tsktsk ("%s", msg);
8332 return TRUE;
8333 }
8334 else if (ARM_CPU_HAS_FEATURE (cpu_variant, *feature))
8335 {
8336 as_bad ("%s", msg);
8337 return TRUE;
8338 }
8339
8340 return FALSE;
8341 }
8342
8343 static void
8344 do_rd_rm_rn (void)
8345 {
8346 unsigned Rn = inst.operands[2].reg;
8347 /* Enforce restrictions on SWP instruction. */
8348 if ((inst.instruction & 0x0fbfffff) == 0x01000090)
8349 {
8350 constraint (Rn == inst.operands[0].reg || Rn == inst.operands[1].reg,
8351 _("Rn must not overlap other operands"));
8352
8353 /* SWP{b} is obsolete for ARMv8-A, and deprecated for ARMv6* and ARMv7.
8354 */
8355 if (!check_obsolete (&arm_ext_v8,
8356 _("swp{b} use is obsoleted for ARMv8 and later"))
8357 && warn_on_deprecated
8358 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6))
8359 as_tsktsk (_("swp{b} use is deprecated for ARMv6 and ARMv7"));
8360 }
8361
8362 inst.instruction |= inst.operands[0].reg << 12;
8363 inst.instruction |= inst.operands[1].reg;
8364 inst.instruction |= Rn << 16;
8365 }
8366
8367 static void
8368 do_rd_rn_rm (void)
8369 {
8370 inst.instruction |= inst.operands[0].reg << 12;
8371 inst.instruction |= inst.operands[1].reg << 16;
8372 inst.instruction |= inst.operands[2].reg;
8373 }
8374
8375 static void
8376 do_rm_rd_rn (void)
8377 {
8378 constraint ((inst.operands[2].reg == REG_PC), BAD_PC);
8379 constraint (((inst.reloc.exp.X_op != O_constant
8380 && inst.reloc.exp.X_op != O_illegal)
8381 || inst.reloc.exp.X_add_number != 0),
8382 BAD_ADDR_MODE);
8383 inst.instruction |= inst.operands[0].reg;
8384 inst.instruction |= inst.operands[1].reg << 12;
8385 inst.instruction |= inst.operands[2].reg << 16;
8386 }
8387
8388 static void
8389 do_imm0 (void)
8390 {
8391 inst.instruction |= inst.operands[0].imm;
8392 }
8393
8394 static void
8395 do_rd_cpaddr (void)
8396 {
8397 inst.instruction |= inst.operands[0].reg << 12;
8398 encode_arm_cp_address (1, TRUE, TRUE, 0);
8399 }
8400
8401 /* ARM instructions, in alphabetical order by function name (except
8402 that wrapper functions appear immediately after the function they
8403 wrap). */
8404
8405 /* This is a pseudo-op of the form "adr rd, label" to be converted
8406 into a relative address of the form "add rd, pc, #label-.-8". */
8407
8408 static void
8409 do_adr (void)
8410 {
8411 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
8412
8413 /* Frag hacking will turn this into a sub instruction if the offset turns
8414 out to be negative. */
8415 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
8416 inst.reloc.pc_rel = 1;
8417 inst.reloc.exp.X_add_number -= 8;
8418
8419 if (inst.reloc.exp.X_op == O_symbol
8420 && inst.reloc.exp.X_add_symbol != NULL
8421 && S_IS_DEFINED (inst.reloc.exp.X_add_symbol)
8422 && THUMB_IS_FUNC (inst.reloc.exp.X_add_symbol))
8423 inst.reloc.exp.X_add_number += 1;
8424 }
8425
8426 /* This is a pseudo-op of the form "adrl rd, label" to be converted
8427 into a relative address of the form:
8428 add rd, pc, #low(label-.-8)"
8429 add rd, rd, #high(label-.-8)" */
8430
8431 static void
8432 do_adrl (void)
8433 {
8434 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
8435
8436 /* Frag hacking will turn this into a sub instruction if the offset turns
8437 out to be negative. */
8438 inst.reloc.type = BFD_RELOC_ARM_ADRL_IMMEDIATE;
8439 inst.reloc.pc_rel = 1;
8440 inst.size = INSN_SIZE * 2;
8441 inst.reloc.exp.X_add_number -= 8;
8442
8443 if (inst.reloc.exp.X_op == O_symbol
8444 && inst.reloc.exp.X_add_symbol != NULL
8445 && S_IS_DEFINED (inst.reloc.exp.X_add_symbol)
8446 && THUMB_IS_FUNC (inst.reloc.exp.X_add_symbol))
8447 inst.reloc.exp.X_add_number += 1;
8448 }
8449
8450 static void
8451 do_arit (void)
8452 {
8453 constraint (inst.reloc.type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
8454 && inst.reloc.type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC ,
8455 THUMB1_RELOC_ONLY);
8456 if (!inst.operands[1].present)
8457 inst.operands[1].reg = inst.operands[0].reg;
8458 inst.instruction |= inst.operands[0].reg << 12;
8459 inst.instruction |= inst.operands[1].reg << 16;
8460 encode_arm_shifter_operand (2);
8461 }
8462
8463 static void
8464 do_barrier (void)
8465 {
8466 if (inst.operands[0].present)
8467 inst.instruction |= inst.operands[0].imm;
8468 else
8469 inst.instruction |= 0xf;
8470 }
8471
8472 static void
8473 do_bfc (void)
8474 {
8475 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
8476 constraint (msb > 32, _("bit-field extends past end of register"));
8477 /* The instruction encoding stores the LSB and MSB,
8478 not the LSB and width. */
8479 inst.instruction |= inst.operands[0].reg << 12;
8480 inst.instruction |= inst.operands[1].imm << 7;
8481 inst.instruction |= (msb - 1) << 16;
8482 }
8483
8484 static void
8485 do_bfi (void)
8486 {
8487 unsigned int msb;
8488
8489 /* #0 in second position is alternative syntax for bfc, which is
8490 the same instruction but with REG_PC in the Rm field. */
8491 if (!inst.operands[1].isreg)
8492 inst.operands[1].reg = REG_PC;
8493
8494 msb = inst.operands[2].imm + inst.operands[3].imm;
8495 constraint (msb > 32, _("bit-field extends past end of register"));
8496 /* The instruction encoding stores the LSB and MSB,
8497 not the LSB and width. */
8498 inst.instruction |= inst.operands[0].reg << 12;
8499 inst.instruction |= inst.operands[1].reg;
8500 inst.instruction |= inst.operands[2].imm << 7;
8501 inst.instruction |= (msb - 1) << 16;
8502 }
8503
8504 static void
8505 do_bfx (void)
8506 {
8507 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
8508 _("bit-field extends past end of register"));
8509 inst.instruction |= inst.operands[0].reg << 12;
8510 inst.instruction |= inst.operands[1].reg;
8511 inst.instruction |= inst.operands[2].imm << 7;
8512 inst.instruction |= (inst.operands[3].imm - 1) << 16;
8513 }
8514
8515 /* ARM V5 breakpoint instruction (argument parse)
8516 BKPT <16 bit unsigned immediate>
8517 Instruction is not conditional.
8518 The bit pattern given in insns[] has the COND_ALWAYS condition,
8519 and it is an error if the caller tried to override that. */
8520
8521 static void
8522 do_bkpt (void)
8523 {
8524 /* Top 12 of 16 bits to bits 19:8. */
8525 inst.instruction |= (inst.operands[0].imm & 0xfff0) << 4;
8526
8527 /* Bottom 4 of 16 bits to bits 3:0. */
8528 inst.instruction |= inst.operands[0].imm & 0xf;
8529 }
8530
8531 static void
8532 encode_branch (int default_reloc)
8533 {
8534 if (inst.operands[0].hasreloc)
8535 {
8536 constraint (inst.operands[0].imm != BFD_RELOC_ARM_PLT32
8537 && inst.operands[0].imm != BFD_RELOC_ARM_TLS_CALL,
8538 _("the only valid suffixes here are '(plt)' and '(tlscall)'"));
8539 inst.reloc.type = inst.operands[0].imm == BFD_RELOC_ARM_PLT32
8540 ? BFD_RELOC_ARM_PLT32
8541 : thumb_mode ? BFD_RELOC_ARM_THM_TLS_CALL : BFD_RELOC_ARM_TLS_CALL;
8542 }
8543 else
8544 inst.reloc.type = (bfd_reloc_code_real_type) default_reloc;
8545 inst.reloc.pc_rel = 1;
8546 }
8547
8548 static void
8549 do_branch (void)
8550 {
8551 #ifdef OBJ_ELF
8552 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
8553 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
8554 else
8555 #endif
8556 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
8557 }
8558
8559 static void
8560 do_bl (void)
8561 {
8562 #ifdef OBJ_ELF
8563 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
8564 {
8565 if (inst.cond == COND_ALWAYS)
8566 encode_branch (BFD_RELOC_ARM_PCREL_CALL);
8567 else
8568 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
8569 }
8570 else
8571 #endif
8572 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
8573 }
8574
8575 /* ARM V5 branch-link-exchange instruction (argument parse)
8576 BLX <target_addr> ie BLX(1)
8577 BLX{<condition>} <Rm> ie BLX(2)
8578 Unfortunately, there are two different opcodes for this mnemonic.
8579 So, the insns[].value is not used, and the code here zaps values
8580 into inst.instruction.
8581 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
8582
8583 static void
8584 do_blx (void)
8585 {
8586 if (inst.operands[0].isreg)
8587 {
8588 /* Arg is a register; the opcode provided by insns[] is correct.
8589 It is not illegal to do "blx pc", just useless. */
8590 if (inst.operands[0].reg == REG_PC)
8591 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
8592
8593 inst.instruction |= inst.operands[0].reg;
8594 }
8595 else
8596 {
8597 /* Arg is an address; this instruction cannot be executed
8598 conditionally, and the opcode must be adjusted.
8599 We retain the BFD_RELOC_ARM_PCREL_BLX till the very end
8600 where we generate out a BFD_RELOC_ARM_PCREL_CALL instead. */
8601 constraint (inst.cond != COND_ALWAYS, BAD_COND);
8602 inst.instruction = 0xfa000000;
8603 encode_branch (BFD_RELOC_ARM_PCREL_BLX);
8604 }
8605 }
8606
8607 static void
8608 do_bx (void)
8609 {
8610 bfd_boolean want_reloc;
8611
8612 if (inst.operands[0].reg == REG_PC)
8613 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
8614
8615 inst.instruction |= inst.operands[0].reg;
8616 /* Output R_ARM_V4BX relocations if is an EABI object that looks like
8617 it is for ARMv4t or earlier. */
8618 want_reloc = !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5);
8619 if (!ARM_FEATURE_ZERO (selected_object_arch)
8620 && !ARM_CPU_HAS_FEATURE (selected_object_arch, arm_ext_v5))
8621 want_reloc = TRUE;
8622
8623 #ifdef OBJ_ELF
8624 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
8625 #endif
8626 want_reloc = FALSE;
8627
8628 if (want_reloc)
8629 inst.reloc.type = BFD_RELOC_ARM_V4BX;
8630 }
8631
8632
8633 /* ARM v5TEJ. Jump to Jazelle code. */
8634
8635 static void
8636 do_bxj (void)
8637 {
8638 if (inst.operands[0].reg == REG_PC)
8639 as_tsktsk (_("use of r15 in bxj is not really useful"));
8640
8641 inst.instruction |= inst.operands[0].reg;
8642 }
8643
8644 /* Co-processor data operation:
8645 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
8646 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
8647 static void
8648 do_cdp (void)
8649 {
8650 inst.instruction |= inst.operands[0].reg << 8;
8651 inst.instruction |= inst.operands[1].imm << 20;
8652 inst.instruction |= inst.operands[2].reg << 12;
8653 inst.instruction |= inst.operands[3].reg << 16;
8654 inst.instruction |= inst.operands[4].reg;
8655 inst.instruction |= inst.operands[5].imm << 5;
8656 }
8657
8658 static void
8659 do_cmp (void)
8660 {
8661 inst.instruction |= inst.operands[0].reg << 16;
8662 encode_arm_shifter_operand (1);
8663 }
8664
8665 /* Transfer between coprocessor and ARM registers.
8666 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
8667 MRC2
8668 MCR{cond}
8669 MCR2
8670
8671 No special properties. */
8672
8673 struct deprecated_coproc_regs_s
8674 {
8675 unsigned cp;
8676 int opc1;
8677 unsigned crn;
8678 unsigned crm;
8679 int opc2;
8680 arm_feature_set deprecated;
8681 arm_feature_set obsoleted;
8682 const char *dep_msg;
8683 const char *obs_msg;
8684 };
8685
8686 #define DEPR_ACCESS_V8 \
8687 N_("This coprocessor register access is deprecated in ARMv8")
8688
8689 /* Table of all deprecated coprocessor registers. */
8690 static struct deprecated_coproc_regs_s deprecated_coproc_regs[] =
8691 {
8692 {15, 0, 7, 10, 5, /* CP15DMB. */
8693 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8694 DEPR_ACCESS_V8, NULL},
8695 {15, 0, 7, 10, 4, /* CP15DSB. */
8696 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8697 DEPR_ACCESS_V8, NULL},
8698 {15, 0, 7, 5, 4, /* CP15ISB. */
8699 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8700 DEPR_ACCESS_V8, NULL},
8701 {14, 6, 1, 0, 0, /* TEEHBR. */
8702 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8703 DEPR_ACCESS_V8, NULL},
8704 {14, 6, 0, 0, 0, /* TEECR. */
8705 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8706 DEPR_ACCESS_V8, NULL},
8707 };
8708
8709 #undef DEPR_ACCESS_V8
8710
8711 static const size_t deprecated_coproc_reg_count =
8712 sizeof (deprecated_coproc_regs) / sizeof (deprecated_coproc_regs[0]);
8713
8714 static void
8715 do_co_reg (void)
8716 {
8717 unsigned Rd;
8718 size_t i;
8719
8720 Rd = inst.operands[2].reg;
8721 if (thumb_mode)
8722 {
8723 if (inst.instruction == 0xee000010
8724 || inst.instruction == 0xfe000010)
8725 /* MCR, MCR2 */
8726 reject_bad_reg (Rd);
8727 else if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
8728 /* MRC, MRC2 */
8729 constraint (Rd == REG_SP, BAD_SP);
8730 }
8731 else
8732 {
8733 /* MCR */
8734 if (inst.instruction == 0xe000010)
8735 constraint (Rd == REG_PC, BAD_PC);
8736 }
8737
8738 for (i = 0; i < deprecated_coproc_reg_count; ++i)
8739 {
8740 const struct deprecated_coproc_regs_s *r =
8741 deprecated_coproc_regs + i;
8742
8743 if (inst.operands[0].reg == r->cp
8744 && inst.operands[1].imm == r->opc1
8745 && inst.operands[3].reg == r->crn
8746 && inst.operands[4].reg == r->crm
8747 && inst.operands[5].imm == r->opc2)
8748 {
8749 if (! ARM_CPU_IS_ANY (cpu_variant)
8750 && warn_on_deprecated
8751 && ARM_CPU_HAS_FEATURE (cpu_variant, r->deprecated))
8752 as_tsktsk ("%s", r->dep_msg);
8753 }
8754 }
8755
8756 inst.instruction |= inst.operands[0].reg << 8;
8757 inst.instruction |= inst.operands[1].imm << 21;
8758 inst.instruction |= Rd << 12;
8759 inst.instruction |= inst.operands[3].reg << 16;
8760 inst.instruction |= inst.operands[4].reg;
8761 inst.instruction |= inst.operands[5].imm << 5;
8762 }
8763
8764 /* Transfer between coprocessor register and pair of ARM registers.
8765 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
8766 MCRR2
8767 MRRC{cond}
8768 MRRC2
8769
8770 Two XScale instructions are special cases of these:
8771
8772 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
8773 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
8774
8775 Result unpredictable if Rd or Rn is R15. */
8776
8777 static void
8778 do_co_reg2c (void)
8779 {
8780 unsigned Rd, Rn;
8781
8782 Rd = inst.operands[2].reg;
8783 Rn = inst.operands[3].reg;
8784
8785 if (thumb_mode)
8786 {
8787 reject_bad_reg (Rd);
8788 reject_bad_reg (Rn);
8789 }
8790 else
8791 {
8792 constraint (Rd == REG_PC, BAD_PC);
8793 constraint (Rn == REG_PC, BAD_PC);
8794 }
8795
8796 /* Only check the MRRC{2} variants. */
8797 if ((inst.instruction & 0x0FF00000) == 0x0C500000)
8798 {
8799 /* If Rd == Rn, error that the operation is
8800 unpredictable (example MRRC p3,#1,r1,r1,c4). */
8801 constraint (Rd == Rn, BAD_OVERLAP);
8802 }
8803
8804 inst.instruction |= inst.operands[0].reg << 8;
8805 inst.instruction |= inst.operands[1].imm << 4;
8806 inst.instruction |= Rd << 12;
8807 inst.instruction |= Rn << 16;
8808 inst.instruction |= inst.operands[4].reg;
8809 }
8810
8811 static void
8812 do_cpsi (void)
8813 {
8814 inst.instruction |= inst.operands[0].imm << 6;
8815 if (inst.operands[1].present)
8816 {
8817 inst.instruction |= CPSI_MMOD;
8818 inst.instruction |= inst.operands[1].imm;
8819 }
8820 }
8821
8822 static void
8823 do_dbg (void)
8824 {
8825 inst.instruction |= inst.operands[0].imm;
8826 }
8827
8828 static void
8829 do_div (void)
8830 {
8831 unsigned Rd, Rn, Rm;
8832
8833 Rd = inst.operands[0].reg;
8834 Rn = (inst.operands[1].present
8835 ? inst.operands[1].reg : Rd);
8836 Rm = inst.operands[2].reg;
8837
8838 constraint ((Rd == REG_PC), BAD_PC);
8839 constraint ((Rn == REG_PC), BAD_PC);
8840 constraint ((Rm == REG_PC), BAD_PC);
8841
8842 inst.instruction |= Rd << 16;
8843 inst.instruction |= Rn << 0;
8844 inst.instruction |= Rm << 8;
8845 }
8846
8847 static void
8848 do_it (void)
8849 {
8850 /* There is no IT instruction in ARM mode. We
8851 process it to do the validation as if in
8852 thumb mode, just in case the code gets
8853 assembled for thumb using the unified syntax. */
8854
8855 inst.size = 0;
8856 if (unified_syntax)
8857 {
8858 set_it_insn_type (IT_INSN);
8859 now_it.mask = (inst.instruction & 0xf) | 0x10;
8860 now_it.cc = inst.operands[0].imm;
8861 }
8862 }
8863
8864 /* If there is only one register in the register list,
8865 then return its register number. Otherwise return -1. */
8866 static int
8867 only_one_reg_in_list (int range)
8868 {
8869 int i = ffs (range) - 1;
8870 return (i > 15 || range != (1 << i)) ? -1 : i;
8871 }
8872
8873 static void
8874 encode_ldmstm(int from_push_pop_mnem)
8875 {
8876 int base_reg = inst.operands[0].reg;
8877 int range = inst.operands[1].imm;
8878 int one_reg;
8879
8880 inst.instruction |= base_reg << 16;
8881 inst.instruction |= range;
8882
8883 if (inst.operands[1].writeback)
8884 inst.instruction |= LDM_TYPE_2_OR_3;
8885
8886 if (inst.operands[0].writeback)
8887 {
8888 inst.instruction |= WRITE_BACK;
8889 /* Check for unpredictable uses of writeback. */
8890 if (inst.instruction & LOAD_BIT)
8891 {
8892 /* Not allowed in LDM type 2. */
8893 if ((inst.instruction & LDM_TYPE_2_OR_3)
8894 && ((range & (1 << REG_PC)) == 0))
8895 as_warn (_("writeback of base register is UNPREDICTABLE"));
8896 /* Only allowed if base reg not in list for other types. */
8897 else if (range & (1 << base_reg))
8898 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
8899 }
8900 else /* STM. */
8901 {
8902 /* Not allowed for type 2. */
8903 if (inst.instruction & LDM_TYPE_2_OR_3)
8904 as_warn (_("writeback of base register is UNPREDICTABLE"));
8905 /* Only allowed if base reg not in list, or first in list. */
8906 else if ((range & (1 << base_reg))
8907 && (range & ((1 << base_reg) - 1)))
8908 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
8909 }
8910 }
8911
8912 /* If PUSH/POP has only one register, then use the A2 encoding. */
8913 one_reg = only_one_reg_in_list (range);
8914 if (from_push_pop_mnem && one_reg >= 0)
8915 {
8916 int is_push = (inst.instruction & A_PUSH_POP_OP_MASK) == A1_OPCODE_PUSH;
8917
8918 if (is_push && one_reg == 13 /* SP */)
8919 /* PR 22483: The A2 encoding cannot be used when
8920 pushing the stack pointer as this is UNPREDICTABLE. */
8921 return;
8922
8923 inst.instruction &= A_COND_MASK;
8924 inst.instruction |= is_push ? A2_OPCODE_PUSH : A2_OPCODE_POP;
8925 inst.instruction |= one_reg << 12;
8926 }
8927 }
8928
8929 static void
8930 do_ldmstm (void)
8931 {
8932 encode_ldmstm (/*from_push_pop_mnem=*/FALSE);
8933 }
8934
8935 /* ARMv5TE load-consecutive (argument parse)
8936 Mode is like LDRH.
8937
8938 LDRccD R, mode
8939 STRccD R, mode. */
8940
8941 static void
8942 do_ldrd (void)
8943 {
8944 constraint (inst.operands[0].reg % 2 != 0,
8945 _("first transfer register must be even"));
8946 constraint (inst.operands[1].present
8947 && inst.operands[1].reg != inst.operands[0].reg + 1,
8948 _("can only transfer two consecutive registers"));
8949 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
8950 constraint (!inst.operands[2].isreg, _("'[' expected"));
8951
8952 if (!inst.operands[1].present)
8953 inst.operands[1].reg = inst.operands[0].reg + 1;
8954
8955 /* encode_arm_addr_mode_3 will diagnose overlap between the base
8956 register and the first register written; we have to diagnose
8957 overlap between the base and the second register written here. */
8958
8959 if (inst.operands[2].reg == inst.operands[1].reg
8960 && (inst.operands[2].writeback || inst.operands[2].postind))
8961 as_warn (_("base register written back, and overlaps "
8962 "second transfer register"));
8963
8964 if (!(inst.instruction & V4_STR_BIT))
8965 {
8966 /* For an index-register load, the index register must not overlap the
8967 destination (even if not write-back). */
8968 if (inst.operands[2].immisreg
8969 && ((unsigned) inst.operands[2].imm == inst.operands[0].reg
8970 || (unsigned) inst.operands[2].imm == inst.operands[1].reg))
8971 as_warn (_("index register overlaps transfer register"));
8972 }
8973 inst.instruction |= inst.operands[0].reg << 12;
8974 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE);
8975 }
8976
8977 static void
8978 do_ldrex (void)
8979 {
8980 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
8981 || inst.operands[1].postind || inst.operands[1].writeback
8982 || inst.operands[1].immisreg || inst.operands[1].shifted
8983 || inst.operands[1].negative
8984 /* This can arise if the programmer has written
8985 strex rN, rM, foo
8986 or if they have mistakenly used a register name as the last
8987 operand, eg:
8988 strex rN, rM, rX
8989 It is very difficult to distinguish between these two cases
8990 because "rX" might actually be a label. ie the register
8991 name has been occluded by a symbol of the same name. So we
8992 just generate a general 'bad addressing mode' type error
8993 message and leave it up to the programmer to discover the
8994 true cause and fix their mistake. */
8995 || (inst.operands[1].reg == REG_PC),
8996 BAD_ADDR_MODE);
8997
8998 constraint (inst.reloc.exp.X_op != O_constant
8999 || inst.reloc.exp.X_add_number != 0,
9000 _("offset must be zero in ARM encoding"));
9001
9002 constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
9003
9004 inst.instruction |= inst.operands[0].reg << 12;
9005 inst.instruction |= inst.operands[1].reg << 16;
9006 inst.reloc.type = BFD_RELOC_UNUSED;
9007 }
9008
9009 static void
9010 do_ldrexd (void)
9011 {
9012 constraint (inst.operands[0].reg % 2 != 0,
9013 _("even register required"));
9014 constraint (inst.operands[1].present
9015 && inst.operands[1].reg != inst.operands[0].reg + 1,
9016 _("can only load two consecutive registers"));
9017 /* If op 1 were present and equal to PC, this function wouldn't
9018 have been called in the first place. */
9019 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
9020
9021 inst.instruction |= inst.operands[0].reg << 12;
9022 inst.instruction |= inst.operands[2].reg << 16;
9023 }
9024
9025 /* In both ARM and thumb state 'ldr pc, #imm' with an immediate
9026 which is not a multiple of four is UNPREDICTABLE. */
9027 static void
9028 check_ldr_r15_aligned (void)
9029 {
9030 constraint (!(inst.operands[1].immisreg)
9031 && (inst.operands[0].reg == REG_PC
9032 && inst.operands[1].reg == REG_PC
9033 && (inst.reloc.exp.X_add_number & 0x3)),
9034 _("ldr to register 15 must be 4-byte aligned"));
9035 }
9036
9037 static void
9038 do_ldst (void)
9039 {
9040 inst.instruction |= inst.operands[0].reg << 12;
9041 if (!inst.operands[1].isreg)
9042 if (move_or_literal_pool (0, CONST_ARM, /*mode_3=*/FALSE))
9043 return;
9044 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE);
9045 check_ldr_r15_aligned ();
9046 }
9047
9048 static void
9049 do_ldstt (void)
9050 {
9051 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
9052 reject [Rn,...]. */
9053 if (inst.operands[1].preind)
9054 {
9055 constraint (inst.reloc.exp.X_op != O_constant
9056 || inst.reloc.exp.X_add_number != 0,
9057 _("this instruction requires a post-indexed address"));
9058
9059 inst.operands[1].preind = 0;
9060 inst.operands[1].postind = 1;
9061 inst.operands[1].writeback = 1;
9062 }
9063 inst.instruction |= inst.operands[0].reg << 12;
9064 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE);
9065 }
9066
9067 /* Halfword and signed-byte load/store operations. */
9068
9069 static void
9070 do_ldstv4 (void)
9071 {
9072 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
9073 inst.instruction |= inst.operands[0].reg << 12;
9074 if (!inst.operands[1].isreg)
9075 if (move_or_literal_pool (0, CONST_ARM, /*mode_3=*/TRUE))
9076 return;
9077 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE);
9078 }
9079
9080 static void
9081 do_ldsttv4 (void)
9082 {
9083 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
9084 reject [Rn,...]. */
9085 if (inst.operands[1].preind)
9086 {
9087 constraint (inst.reloc.exp.X_op != O_constant
9088 || inst.reloc.exp.X_add_number != 0,
9089 _("this instruction requires a post-indexed address"));
9090
9091 inst.operands[1].preind = 0;
9092 inst.operands[1].postind = 1;
9093 inst.operands[1].writeback = 1;
9094 }
9095 inst.instruction |= inst.operands[0].reg << 12;
9096 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE);
9097 }
9098
9099 /* Co-processor register load/store.
9100 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
9101 static void
9102 do_lstc (void)
9103 {
9104 inst.instruction |= inst.operands[0].reg << 8;
9105 inst.instruction |= inst.operands[1].reg << 12;
9106 encode_arm_cp_address (2, TRUE, TRUE, 0);
9107 }
9108
9109 static void
9110 do_mlas (void)
9111 {
9112 /* This restriction does not apply to mls (nor to mla in v6 or later). */
9113 if (inst.operands[0].reg == inst.operands[1].reg
9114 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6)
9115 && !(inst.instruction & 0x00400000))
9116 as_tsktsk (_("Rd and Rm should be different in mla"));
9117
9118 inst.instruction |= inst.operands[0].reg << 16;
9119 inst.instruction |= inst.operands[1].reg;
9120 inst.instruction |= inst.operands[2].reg << 8;
9121 inst.instruction |= inst.operands[3].reg << 12;
9122 }
9123
9124 static void
9125 do_mov (void)
9126 {
9127 constraint (inst.reloc.type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
9128 && inst.reloc.type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC ,
9129 THUMB1_RELOC_ONLY);
9130 inst.instruction |= inst.operands[0].reg << 12;
9131 encode_arm_shifter_operand (1);
9132 }
9133
9134 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
9135 static void
9136 do_mov16 (void)
9137 {
9138 bfd_vma imm;
9139 bfd_boolean top;
9140
9141 top = (inst.instruction & 0x00400000) != 0;
9142 constraint (top && inst.reloc.type == BFD_RELOC_ARM_MOVW,
9143 _(":lower16: not allowed in this instruction"));
9144 constraint (!top && inst.reloc.type == BFD_RELOC_ARM_MOVT,
9145 _(":upper16: not allowed in this instruction"));
9146 inst.instruction |= inst.operands[0].reg << 12;
9147 if (inst.reloc.type == BFD_RELOC_UNUSED)
9148 {
9149 imm = inst.reloc.exp.X_add_number;
9150 /* The value is in two pieces: 0:11, 16:19. */
9151 inst.instruction |= (imm & 0x00000fff);
9152 inst.instruction |= (imm & 0x0000f000) << 4;
9153 }
9154 }
9155
9156 static int
9157 do_vfp_nsyn_mrs (void)
9158 {
9159 if (inst.operands[0].isvec)
9160 {
9161 if (inst.operands[1].reg != 1)
9162 first_error (_("operand 1 must be FPSCR"));
9163 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
9164 memset (&inst.operands[1], '\0', sizeof (inst.operands[1]));
9165 do_vfp_nsyn_opcode ("fmstat");
9166 }
9167 else if (inst.operands[1].isvec)
9168 do_vfp_nsyn_opcode ("fmrx");
9169 else
9170 return FAIL;
9171
9172 return SUCCESS;
9173 }
9174
9175 static int
9176 do_vfp_nsyn_msr (void)
9177 {
9178 if (inst.operands[0].isvec)
9179 do_vfp_nsyn_opcode ("fmxr");
9180 else
9181 return FAIL;
9182
9183 return SUCCESS;
9184 }
9185
9186 static void
9187 do_vmrs (void)
9188 {
9189 unsigned Rt = inst.operands[0].reg;
9190
9191 if (thumb_mode && Rt == REG_SP)
9192 {
9193 inst.error = BAD_SP;
9194 return;
9195 }
9196
9197 /* MVFR2 is only valid at ARMv8-A. */
9198 if (inst.operands[1].reg == 5)
9199 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
9200 _(BAD_FPU));
9201
9202 /* APSR_ sets isvec. All other refs to PC are illegal. */
9203 if (!inst.operands[0].isvec && Rt == REG_PC)
9204 {
9205 inst.error = BAD_PC;
9206 return;
9207 }
9208
9209 /* If we get through parsing the register name, we just insert the number
9210 generated into the instruction without further validation. */
9211 inst.instruction |= (inst.operands[1].reg << 16);
9212 inst.instruction |= (Rt << 12);
9213 }
9214
9215 static void
9216 do_vmsr (void)
9217 {
9218 unsigned Rt = inst.operands[1].reg;
9219
9220 if (thumb_mode)
9221 reject_bad_reg (Rt);
9222 else if (Rt == REG_PC)
9223 {
9224 inst.error = BAD_PC;
9225 return;
9226 }
9227
9228 /* MVFR2 is only valid for ARMv8-A. */
9229 if (inst.operands[0].reg == 5)
9230 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
9231 _(BAD_FPU));
9232
9233 /* If we get through parsing the register name, we just insert the number
9234 generated into the instruction without further validation. */
9235 inst.instruction |= (inst.operands[0].reg << 16);
9236 inst.instruction |= (Rt << 12);
9237 }
9238
9239 static void
9240 do_mrs (void)
9241 {
9242 unsigned br;
9243
9244 if (do_vfp_nsyn_mrs () == SUCCESS)
9245 return;
9246
9247 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
9248 inst.instruction |= inst.operands[0].reg << 12;
9249
9250 if (inst.operands[1].isreg)
9251 {
9252 br = inst.operands[1].reg;
9253 if (((br & 0x200) == 0) && ((br & 0xf0000) != 0xf0000))
9254 as_bad (_("bad register for mrs"));
9255 }
9256 else
9257 {
9258 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
9259 constraint ((inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f))
9260 != (PSR_c|PSR_f),
9261 _("'APSR', 'CPSR' or 'SPSR' expected"));
9262 br = (15<<16) | (inst.operands[1].imm & SPSR_BIT);
9263 }
9264
9265 inst.instruction |= br;
9266 }
9267
9268 /* Two possible forms:
9269 "{C|S}PSR_<field>, Rm",
9270 "{C|S}PSR_f, #expression". */
9271
9272 static void
9273 do_msr (void)
9274 {
9275 if (do_vfp_nsyn_msr () == SUCCESS)
9276 return;
9277
9278 inst.instruction |= inst.operands[0].imm;
9279 if (inst.operands[1].isreg)
9280 inst.instruction |= inst.operands[1].reg;
9281 else
9282 {
9283 inst.instruction |= INST_IMMEDIATE;
9284 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
9285 inst.reloc.pc_rel = 0;
9286 }
9287 }
9288
9289 static void
9290 do_mul (void)
9291 {
9292 constraint (inst.operands[2].reg == REG_PC, BAD_PC);
9293
9294 if (!inst.operands[2].present)
9295 inst.operands[2].reg = inst.operands[0].reg;
9296 inst.instruction |= inst.operands[0].reg << 16;
9297 inst.instruction |= inst.operands[1].reg;
9298 inst.instruction |= inst.operands[2].reg << 8;
9299
9300 if (inst.operands[0].reg == inst.operands[1].reg
9301 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
9302 as_tsktsk (_("Rd and Rm should be different in mul"));
9303 }
9304
9305 /* Long Multiply Parser
9306 UMULL RdLo, RdHi, Rm, Rs
9307 SMULL RdLo, RdHi, Rm, Rs
9308 UMLAL RdLo, RdHi, Rm, Rs
9309 SMLAL RdLo, RdHi, Rm, Rs. */
9310
9311 static void
9312 do_mull (void)
9313 {
9314 inst.instruction |= inst.operands[0].reg << 12;
9315 inst.instruction |= inst.operands[1].reg << 16;
9316 inst.instruction |= inst.operands[2].reg;
9317 inst.instruction |= inst.operands[3].reg << 8;
9318
9319 /* rdhi and rdlo must be different. */
9320 if (inst.operands[0].reg == inst.operands[1].reg)
9321 as_tsktsk (_("rdhi and rdlo must be different"));
9322
9323 /* rdhi, rdlo and rm must all be different before armv6. */
9324 if ((inst.operands[0].reg == inst.operands[2].reg
9325 || inst.operands[1].reg == inst.operands[2].reg)
9326 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
9327 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
9328 }
9329
9330 static void
9331 do_nop (void)
9332 {
9333 if (inst.operands[0].present
9334 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6k))
9335 {
9336 /* Architectural NOP hints are CPSR sets with no bits selected. */
9337 inst.instruction &= 0xf0000000;
9338 inst.instruction |= 0x0320f000;
9339 if (inst.operands[0].present)
9340 inst.instruction |= inst.operands[0].imm;
9341 }
9342 }
9343
9344 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
9345 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
9346 Condition defaults to COND_ALWAYS.
9347 Error if Rd, Rn or Rm are R15. */
9348
9349 static void
9350 do_pkhbt (void)
9351 {
9352 inst.instruction |= inst.operands[0].reg << 12;
9353 inst.instruction |= inst.operands[1].reg << 16;
9354 inst.instruction |= inst.operands[2].reg;
9355 if (inst.operands[3].present)
9356 encode_arm_shift (3);
9357 }
9358
9359 /* ARM V6 PKHTB (Argument Parse). */
9360
9361 static void
9362 do_pkhtb (void)
9363 {
9364 if (!inst.operands[3].present)
9365 {
9366 /* If the shift specifier is omitted, turn the instruction
9367 into pkhbt rd, rm, rn. */
9368 inst.instruction &= 0xfff00010;
9369 inst.instruction |= inst.operands[0].reg << 12;
9370 inst.instruction |= inst.operands[1].reg;
9371 inst.instruction |= inst.operands[2].reg << 16;
9372 }
9373 else
9374 {
9375 inst.instruction |= inst.operands[0].reg << 12;
9376 inst.instruction |= inst.operands[1].reg << 16;
9377 inst.instruction |= inst.operands[2].reg;
9378 encode_arm_shift (3);
9379 }
9380 }
9381
9382 /* ARMv5TE: Preload-Cache
9383 MP Extensions: Preload for write
9384
9385 PLD(W) <addr_mode>
9386
9387 Syntactically, like LDR with B=1, W=0, L=1. */
9388
9389 static void
9390 do_pld (void)
9391 {
9392 constraint (!inst.operands[0].isreg,
9393 _("'[' expected after PLD mnemonic"));
9394 constraint (inst.operands[0].postind,
9395 _("post-indexed expression used in preload instruction"));
9396 constraint (inst.operands[0].writeback,
9397 _("writeback used in preload instruction"));
9398 constraint (!inst.operands[0].preind,
9399 _("unindexed addressing used in preload instruction"));
9400 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
9401 }
9402
9403 /* ARMv7: PLI <addr_mode> */
9404 static void
9405 do_pli (void)
9406 {
9407 constraint (!inst.operands[0].isreg,
9408 _("'[' expected after PLI mnemonic"));
9409 constraint (inst.operands[0].postind,
9410 _("post-indexed expression used in preload instruction"));
9411 constraint (inst.operands[0].writeback,
9412 _("writeback used in preload instruction"));
9413 constraint (!inst.operands[0].preind,
9414 _("unindexed addressing used in preload instruction"));
9415 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
9416 inst.instruction &= ~PRE_INDEX;
9417 }
9418
9419 static void
9420 do_push_pop (void)
9421 {
9422 constraint (inst.operands[0].writeback,
9423 _("push/pop do not support {reglist}^"));
9424 inst.operands[1] = inst.operands[0];
9425 memset (&inst.operands[0], 0, sizeof inst.operands[0]);
9426 inst.operands[0].isreg = 1;
9427 inst.operands[0].writeback = 1;
9428 inst.operands[0].reg = REG_SP;
9429 encode_ldmstm (/*from_push_pop_mnem=*/TRUE);
9430 }
9431
9432 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
9433 word at the specified address and the following word
9434 respectively.
9435 Unconditionally executed.
9436 Error if Rn is R15. */
9437
9438 static void
9439 do_rfe (void)
9440 {
9441 inst.instruction |= inst.operands[0].reg << 16;
9442 if (inst.operands[0].writeback)
9443 inst.instruction |= WRITE_BACK;
9444 }
9445
9446 /* ARM V6 ssat (argument parse). */
9447
9448 static void
9449 do_ssat (void)
9450 {
9451 inst.instruction |= inst.operands[0].reg << 12;
9452 inst.instruction |= (inst.operands[1].imm - 1) << 16;
9453 inst.instruction |= inst.operands[2].reg;
9454
9455 if (inst.operands[3].present)
9456 encode_arm_shift (3);
9457 }
9458
9459 /* ARM V6 usat (argument parse). */
9460
9461 static void
9462 do_usat (void)
9463 {
9464 inst.instruction |= inst.operands[0].reg << 12;
9465 inst.instruction |= inst.operands[1].imm << 16;
9466 inst.instruction |= inst.operands[2].reg;
9467
9468 if (inst.operands[3].present)
9469 encode_arm_shift (3);
9470 }
9471
9472 /* ARM V6 ssat16 (argument parse). */
9473
9474 static void
9475 do_ssat16 (void)
9476 {
9477 inst.instruction |= inst.operands[0].reg << 12;
9478 inst.instruction |= ((inst.operands[1].imm - 1) << 16);
9479 inst.instruction |= inst.operands[2].reg;
9480 }
9481
9482 static void
9483 do_usat16 (void)
9484 {
9485 inst.instruction |= inst.operands[0].reg << 12;
9486 inst.instruction |= inst.operands[1].imm << 16;
9487 inst.instruction |= inst.operands[2].reg;
9488 }
9489
9490 /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
9491 preserving the other bits.
9492
9493 setend <endian_specifier>, where <endian_specifier> is either
9494 BE or LE. */
9495
9496 static void
9497 do_setend (void)
9498 {
9499 if (warn_on_deprecated
9500 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
9501 as_tsktsk (_("setend use is deprecated for ARMv8"));
9502
9503 if (inst.operands[0].imm)
9504 inst.instruction |= 0x200;
9505 }
9506
9507 static void
9508 do_shift (void)
9509 {
9510 unsigned int Rm = (inst.operands[1].present
9511 ? inst.operands[1].reg
9512 : inst.operands[0].reg);
9513
9514 inst.instruction |= inst.operands[0].reg << 12;
9515 inst.instruction |= Rm;
9516 if (inst.operands[2].isreg) /* Rd, {Rm,} Rs */
9517 {
9518 inst.instruction |= inst.operands[2].reg << 8;
9519 inst.instruction |= SHIFT_BY_REG;
9520 /* PR 12854: Error on extraneous shifts. */
9521 constraint (inst.operands[2].shifted,
9522 _("extraneous shift as part of operand to shift insn"));
9523 }
9524 else
9525 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
9526 }
9527
9528 static void
9529 do_smc (void)
9530 {
9531 inst.reloc.type = BFD_RELOC_ARM_SMC;
9532 inst.reloc.pc_rel = 0;
9533 }
9534
9535 static void
9536 do_hvc (void)
9537 {
9538 inst.reloc.type = BFD_RELOC_ARM_HVC;
9539 inst.reloc.pc_rel = 0;
9540 }
9541
9542 static void
9543 do_swi (void)
9544 {
9545 inst.reloc.type = BFD_RELOC_ARM_SWI;
9546 inst.reloc.pc_rel = 0;
9547 }
9548
9549 static void
9550 do_setpan (void)
9551 {
9552 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_pan),
9553 _("selected processor does not support SETPAN instruction"));
9554
9555 inst.instruction |= ((inst.operands[0].imm & 1) << 9);
9556 }
9557
9558 static void
9559 do_t_setpan (void)
9560 {
9561 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_pan),
9562 _("selected processor does not support SETPAN instruction"));
9563
9564 inst.instruction |= (inst.operands[0].imm << 3);
9565 }
9566
9567 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
9568 SMLAxy{cond} Rd,Rm,Rs,Rn
9569 SMLAWy{cond} Rd,Rm,Rs,Rn
9570 Error if any register is R15. */
9571
9572 static void
9573 do_smla (void)
9574 {
9575 inst.instruction |= inst.operands[0].reg << 16;
9576 inst.instruction |= inst.operands[1].reg;
9577 inst.instruction |= inst.operands[2].reg << 8;
9578 inst.instruction |= inst.operands[3].reg << 12;
9579 }
9580
9581 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
9582 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
9583 Error if any register is R15.
9584 Warning if Rdlo == Rdhi. */
9585
9586 static void
9587 do_smlal (void)
9588 {
9589 inst.instruction |= inst.operands[0].reg << 12;
9590 inst.instruction |= inst.operands[1].reg << 16;
9591 inst.instruction |= inst.operands[2].reg;
9592 inst.instruction |= inst.operands[3].reg << 8;
9593
9594 if (inst.operands[0].reg == inst.operands[1].reg)
9595 as_tsktsk (_("rdhi and rdlo must be different"));
9596 }
9597
9598 /* ARM V5E (El Segundo) signed-multiply (argument parse)
9599 SMULxy{cond} Rd,Rm,Rs
9600 Error if any register is R15. */
9601
9602 static void
9603 do_smul (void)
9604 {
9605 inst.instruction |= inst.operands[0].reg << 16;
9606 inst.instruction |= inst.operands[1].reg;
9607 inst.instruction |= inst.operands[2].reg << 8;
9608 }
9609
9610 /* ARM V6 srs (argument parse). The variable fields in the encoding are
9611 the same for both ARM and Thumb-2. */
9612
9613 static void
9614 do_srs (void)
9615 {
9616 int reg;
9617
9618 if (inst.operands[0].present)
9619 {
9620 reg = inst.operands[0].reg;
9621 constraint (reg != REG_SP, _("SRS base register must be r13"));
9622 }
9623 else
9624 reg = REG_SP;
9625
9626 inst.instruction |= reg << 16;
9627 inst.instruction |= inst.operands[1].imm;
9628 if (inst.operands[0].writeback || inst.operands[1].writeback)
9629 inst.instruction |= WRITE_BACK;
9630 }
9631
9632 /* ARM V6 strex (argument parse). */
9633
9634 static void
9635 do_strex (void)
9636 {
9637 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
9638 || inst.operands[2].postind || inst.operands[2].writeback
9639 || inst.operands[2].immisreg || inst.operands[2].shifted
9640 || inst.operands[2].negative
9641 /* See comment in do_ldrex(). */
9642 || (inst.operands[2].reg == REG_PC),
9643 BAD_ADDR_MODE);
9644
9645 constraint (inst.operands[0].reg == inst.operands[1].reg
9646 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9647
9648 constraint (inst.reloc.exp.X_op != O_constant
9649 || inst.reloc.exp.X_add_number != 0,
9650 _("offset must be zero in ARM encoding"));
9651
9652 inst.instruction |= inst.operands[0].reg << 12;
9653 inst.instruction |= inst.operands[1].reg;
9654 inst.instruction |= inst.operands[2].reg << 16;
9655 inst.reloc.type = BFD_RELOC_UNUSED;
9656 }
9657
9658 static void
9659 do_t_strexbh (void)
9660 {
9661 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
9662 || inst.operands[2].postind || inst.operands[2].writeback
9663 || inst.operands[2].immisreg || inst.operands[2].shifted
9664 || inst.operands[2].negative,
9665 BAD_ADDR_MODE);
9666
9667 constraint (inst.operands[0].reg == inst.operands[1].reg
9668 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9669
9670 do_rm_rd_rn ();
9671 }
9672
9673 static void
9674 do_strexd (void)
9675 {
9676 constraint (inst.operands[1].reg % 2 != 0,
9677 _("even register required"));
9678 constraint (inst.operands[2].present
9679 && inst.operands[2].reg != inst.operands[1].reg + 1,
9680 _("can only store two consecutive registers"));
9681 /* If op 2 were present and equal to PC, this function wouldn't
9682 have been called in the first place. */
9683 constraint (inst.operands[1].reg == REG_LR, _("r14 not allowed here"));
9684
9685 constraint (inst.operands[0].reg == inst.operands[1].reg
9686 || inst.operands[0].reg == inst.operands[1].reg + 1
9687 || inst.operands[0].reg == inst.operands[3].reg,
9688 BAD_OVERLAP);
9689
9690 inst.instruction |= inst.operands[0].reg << 12;
9691 inst.instruction |= inst.operands[1].reg;
9692 inst.instruction |= inst.operands[3].reg << 16;
9693 }
9694
9695 /* ARM V8 STRL. */
9696 static void
9697 do_stlex (void)
9698 {
9699 constraint (inst.operands[0].reg == inst.operands[1].reg
9700 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9701
9702 do_rd_rm_rn ();
9703 }
9704
9705 static void
9706 do_t_stlex (void)
9707 {
9708 constraint (inst.operands[0].reg == inst.operands[1].reg
9709 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9710
9711 do_rm_rd_rn ();
9712 }
9713
9714 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
9715 extends it to 32-bits, and adds the result to a value in another
9716 register. You can specify a rotation by 0, 8, 16, or 24 bits
9717 before extracting the 16-bit value.
9718 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
9719 Condition defaults to COND_ALWAYS.
9720 Error if any register uses R15. */
9721
9722 static void
9723 do_sxtah (void)
9724 {
9725 inst.instruction |= inst.operands[0].reg << 12;
9726 inst.instruction |= inst.operands[1].reg << 16;
9727 inst.instruction |= inst.operands[2].reg;
9728 inst.instruction |= inst.operands[3].imm << 10;
9729 }
9730
9731 /* ARM V6 SXTH.
9732
9733 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
9734 Condition defaults to COND_ALWAYS.
9735 Error if any register uses R15. */
9736
9737 static void
9738 do_sxth (void)
9739 {
9740 inst.instruction |= inst.operands[0].reg << 12;
9741 inst.instruction |= inst.operands[1].reg;
9742 inst.instruction |= inst.operands[2].imm << 10;
9743 }
9744 \f
9745 /* VFP instructions. In a logical order: SP variant first, monad
9746 before dyad, arithmetic then move then load/store. */
9747
9748 static void
9749 do_vfp_sp_monadic (void)
9750 {
9751 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9752 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
9753 }
9754
9755 static void
9756 do_vfp_sp_dyadic (void)
9757 {
9758 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9759 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
9760 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
9761 }
9762
9763 static void
9764 do_vfp_sp_compare_z (void)
9765 {
9766 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9767 }
9768
9769 static void
9770 do_vfp_dp_sp_cvt (void)
9771 {
9772 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9773 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
9774 }
9775
9776 static void
9777 do_vfp_sp_dp_cvt (void)
9778 {
9779 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9780 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
9781 }
9782
9783 static void
9784 do_vfp_reg_from_sp (void)
9785 {
9786 inst.instruction |= inst.operands[0].reg << 12;
9787 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
9788 }
9789
9790 static void
9791 do_vfp_reg2_from_sp2 (void)
9792 {
9793 constraint (inst.operands[2].imm != 2,
9794 _("only two consecutive VFP SP registers allowed here"));
9795 inst.instruction |= inst.operands[0].reg << 12;
9796 inst.instruction |= inst.operands[1].reg << 16;
9797 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
9798 }
9799
9800 static void
9801 do_vfp_sp_from_reg (void)
9802 {
9803 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sn);
9804 inst.instruction |= inst.operands[1].reg << 12;
9805 }
9806
9807 static void
9808 do_vfp_sp2_from_reg2 (void)
9809 {
9810 constraint (inst.operands[0].imm != 2,
9811 _("only two consecutive VFP SP registers allowed here"));
9812 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sm);
9813 inst.instruction |= inst.operands[1].reg << 12;
9814 inst.instruction |= inst.operands[2].reg << 16;
9815 }
9816
9817 static void
9818 do_vfp_sp_ldst (void)
9819 {
9820 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9821 encode_arm_cp_address (1, FALSE, TRUE, 0);
9822 }
9823
9824 static void
9825 do_vfp_dp_ldst (void)
9826 {
9827 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9828 encode_arm_cp_address (1, FALSE, TRUE, 0);
9829 }
9830
9831
9832 static void
9833 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type)
9834 {
9835 if (inst.operands[0].writeback)
9836 inst.instruction |= WRITE_BACK;
9837 else
9838 constraint (ldstm_type != VFP_LDSTMIA,
9839 _("this addressing mode requires base-register writeback"));
9840 inst.instruction |= inst.operands[0].reg << 16;
9841 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sd);
9842 inst.instruction |= inst.operands[1].imm;
9843 }
9844
9845 static void
9846 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type)
9847 {
9848 int count;
9849
9850 if (inst.operands[0].writeback)
9851 inst.instruction |= WRITE_BACK;
9852 else
9853 constraint (ldstm_type != VFP_LDSTMIA && ldstm_type != VFP_LDSTMIAX,
9854 _("this addressing mode requires base-register writeback"));
9855
9856 inst.instruction |= inst.operands[0].reg << 16;
9857 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
9858
9859 count = inst.operands[1].imm << 1;
9860 if (ldstm_type == VFP_LDSTMIAX || ldstm_type == VFP_LDSTMDBX)
9861 count += 1;
9862
9863 inst.instruction |= count;
9864 }
9865
9866 static void
9867 do_vfp_sp_ldstmia (void)
9868 {
9869 vfp_sp_ldstm (VFP_LDSTMIA);
9870 }
9871
9872 static void
9873 do_vfp_sp_ldstmdb (void)
9874 {
9875 vfp_sp_ldstm (VFP_LDSTMDB);
9876 }
9877
9878 static void
9879 do_vfp_dp_ldstmia (void)
9880 {
9881 vfp_dp_ldstm (VFP_LDSTMIA);
9882 }
9883
9884 static void
9885 do_vfp_dp_ldstmdb (void)
9886 {
9887 vfp_dp_ldstm (VFP_LDSTMDB);
9888 }
9889
9890 static void
9891 do_vfp_xp_ldstmia (void)
9892 {
9893 vfp_dp_ldstm (VFP_LDSTMIAX);
9894 }
9895
9896 static void
9897 do_vfp_xp_ldstmdb (void)
9898 {
9899 vfp_dp_ldstm (VFP_LDSTMDBX);
9900 }
9901
9902 static void
9903 do_vfp_dp_rd_rm (void)
9904 {
9905 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9906 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
9907 }
9908
9909 static void
9910 do_vfp_dp_rn_rd (void)
9911 {
9912 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dn);
9913 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
9914 }
9915
9916 static void
9917 do_vfp_dp_rd_rn (void)
9918 {
9919 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9920 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
9921 }
9922
9923 static void
9924 do_vfp_dp_rd_rn_rm (void)
9925 {
9926 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9927 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
9928 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dm);
9929 }
9930
9931 static void
9932 do_vfp_dp_rd (void)
9933 {
9934 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9935 }
9936
9937 static void
9938 do_vfp_dp_rm_rd_rn (void)
9939 {
9940 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dm);
9941 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
9942 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dn);
9943 }
9944
9945 /* VFPv3 instructions. */
9946 static void
9947 do_vfp_sp_const (void)
9948 {
9949 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9950 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
9951 inst.instruction |= (inst.operands[1].imm & 0x0f);
9952 }
9953
9954 static void
9955 do_vfp_dp_const (void)
9956 {
9957 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9958 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
9959 inst.instruction |= (inst.operands[1].imm & 0x0f);
9960 }
9961
9962 static void
9963 vfp_conv (int srcsize)
9964 {
9965 int immbits = srcsize - inst.operands[1].imm;
9966
9967 if (srcsize == 16 && !(immbits >= 0 && immbits <= srcsize))
9968 {
9969 /* If srcsize is 16, inst.operands[1].imm must be in the range 0-16.
9970 i.e. immbits must be in range 0 - 16. */
9971 inst.error = _("immediate value out of range, expected range [0, 16]");
9972 return;
9973 }
9974 else if (srcsize == 32 && !(immbits >= 0 && immbits < srcsize))
9975 {
9976 /* If srcsize is 32, inst.operands[1].imm must be in the range 1-32.
9977 i.e. immbits must be in range 0 - 31. */
9978 inst.error = _("immediate value out of range, expected range [1, 32]");
9979 return;
9980 }
9981
9982 inst.instruction |= (immbits & 1) << 5;
9983 inst.instruction |= (immbits >> 1);
9984 }
9985
9986 static void
9987 do_vfp_sp_conv_16 (void)
9988 {
9989 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9990 vfp_conv (16);
9991 }
9992
9993 static void
9994 do_vfp_dp_conv_16 (void)
9995 {
9996 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9997 vfp_conv (16);
9998 }
9999
10000 static void
10001 do_vfp_sp_conv_32 (void)
10002 {
10003 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10004 vfp_conv (32);
10005 }
10006
10007 static void
10008 do_vfp_dp_conv_32 (void)
10009 {
10010 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10011 vfp_conv (32);
10012 }
10013 \f
10014 /* FPA instructions. Also in a logical order. */
10015
10016 static void
10017 do_fpa_cmp (void)
10018 {
10019 inst.instruction |= inst.operands[0].reg << 16;
10020 inst.instruction |= inst.operands[1].reg;
10021 }
10022
10023 static void
10024 do_fpa_ldmstm (void)
10025 {
10026 inst.instruction |= inst.operands[0].reg << 12;
10027 switch (inst.operands[1].imm)
10028 {
10029 case 1: inst.instruction |= CP_T_X; break;
10030 case 2: inst.instruction |= CP_T_Y; break;
10031 case 3: inst.instruction |= CP_T_Y | CP_T_X; break;
10032 case 4: break;
10033 default: abort ();
10034 }
10035
10036 if (inst.instruction & (PRE_INDEX | INDEX_UP))
10037 {
10038 /* The instruction specified "ea" or "fd", so we can only accept
10039 [Rn]{!}. The instruction does not really support stacking or
10040 unstacking, so we have to emulate these by setting appropriate
10041 bits and offsets. */
10042 constraint (inst.reloc.exp.X_op != O_constant
10043 || inst.reloc.exp.X_add_number != 0,
10044 _("this instruction does not support indexing"));
10045
10046 if ((inst.instruction & PRE_INDEX) || inst.operands[2].writeback)
10047 inst.reloc.exp.X_add_number = 12 * inst.operands[1].imm;
10048
10049 if (!(inst.instruction & INDEX_UP))
10050 inst.reloc.exp.X_add_number = -inst.reloc.exp.X_add_number;
10051
10052 if (!(inst.instruction & PRE_INDEX) && inst.operands[2].writeback)
10053 {
10054 inst.operands[2].preind = 0;
10055 inst.operands[2].postind = 1;
10056 }
10057 }
10058
10059 encode_arm_cp_address (2, TRUE, TRUE, 0);
10060 }
10061 \f
10062 /* iWMMXt instructions: strictly in alphabetical order. */
10063
10064 static void
10065 do_iwmmxt_tandorc (void)
10066 {
10067 constraint (inst.operands[0].reg != REG_PC, _("only r15 allowed here"));
10068 }
10069
10070 static void
10071 do_iwmmxt_textrc (void)
10072 {
10073 inst.instruction |= inst.operands[0].reg << 12;
10074 inst.instruction |= inst.operands[1].imm;
10075 }
10076
10077 static void
10078 do_iwmmxt_textrm (void)
10079 {
10080 inst.instruction |= inst.operands[0].reg << 12;
10081 inst.instruction |= inst.operands[1].reg << 16;
10082 inst.instruction |= inst.operands[2].imm;
10083 }
10084
10085 static void
10086 do_iwmmxt_tinsr (void)
10087 {
10088 inst.instruction |= inst.operands[0].reg << 16;
10089 inst.instruction |= inst.operands[1].reg << 12;
10090 inst.instruction |= inst.operands[2].imm;
10091 }
10092
10093 static void
10094 do_iwmmxt_tmia (void)
10095 {
10096 inst.instruction |= inst.operands[0].reg << 5;
10097 inst.instruction |= inst.operands[1].reg;
10098 inst.instruction |= inst.operands[2].reg << 12;
10099 }
10100
10101 static void
10102 do_iwmmxt_waligni (void)
10103 {
10104 inst.instruction |= inst.operands[0].reg << 12;
10105 inst.instruction |= inst.operands[1].reg << 16;
10106 inst.instruction |= inst.operands[2].reg;
10107 inst.instruction |= inst.operands[3].imm << 20;
10108 }
10109
10110 static void
10111 do_iwmmxt_wmerge (void)
10112 {
10113 inst.instruction |= inst.operands[0].reg << 12;
10114 inst.instruction |= inst.operands[1].reg << 16;
10115 inst.instruction |= inst.operands[2].reg;
10116 inst.instruction |= inst.operands[3].imm << 21;
10117 }
10118
10119 static void
10120 do_iwmmxt_wmov (void)
10121 {
10122 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
10123 inst.instruction |= inst.operands[0].reg << 12;
10124 inst.instruction |= inst.operands[1].reg << 16;
10125 inst.instruction |= inst.operands[1].reg;
10126 }
10127
10128 static void
10129 do_iwmmxt_wldstbh (void)
10130 {
10131 int reloc;
10132 inst.instruction |= inst.operands[0].reg << 12;
10133 if (thumb_mode)
10134 reloc = BFD_RELOC_ARM_T32_CP_OFF_IMM_S2;
10135 else
10136 reloc = BFD_RELOC_ARM_CP_OFF_IMM_S2;
10137 encode_arm_cp_address (1, TRUE, FALSE, reloc);
10138 }
10139
10140 static void
10141 do_iwmmxt_wldstw (void)
10142 {
10143 /* RIWR_RIWC clears .isreg for a control register. */
10144 if (!inst.operands[0].isreg)
10145 {
10146 constraint (inst.cond != COND_ALWAYS, BAD_COND);
10147 inst.instruction |= 0xf0000000;
10148 }
10149
10150 inst.instruction |= inst.operands[0].reg << 12;
10151 encode_arm_cp_address (1, TRUE, TRUE, 0);
10152 }
10153
10154 static void
10155 do_iwmmxt_wldstd (void)
10156 {
10157 inst.instruction |= inst.operands[0].reg << 12;
10158 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2)
10159 && inst.operands[1].immisreg)
10160 {
10161 inst.instruction &= ~0x1a000ff;
10162 inst.instruction |= (0xfU << 28);
10163 if (inst.operands[1].preind)
10164 inst.instruction |= PRE_INDEX;
10165 if (!inst.operands[1].negative)
10166 inst.instruction |= INDEX_UP;
10167 if (inst.operands[1].writeback)
10168 inst.instruction |= WRITE_BACK;
10169 inst.instruction |= inst.operands[1].reg << 16;
10170 inst.instruction |= inst.reloc.exp.X_add_number << 4;
10171 inst.instruction |= inst.operands[1].imm;
10172 }
10173 else
10174 encode_arm_cp_address (1, TRUE, FALSE, 0);
10175 }
10176
10177 static void
10178 do_iwmmxt_wshufh (void)
10179 {
10180 inst.instruction |= inst.operands[0].reg << 12;
10181 inst.instruction |= inst.operands[1].reg << 16;
10182 inst.instruction |= ((inst.operands[2].imm & 0xf0) << 16);
10183 inst.instruction |= (inst.operands[2].imm & 0x0f);
10184 }
10185
10186 static void
10187 do_iwmmxt_wzero (void)
10188 {
10189 /* WZERO reg is an alias for WANDN reg, reg, reg. */
10190 inst.instruction |= inst.operands[0].reg;
10191 inst.instruction |= inst.operands[0].reg << 12;
10192 inst.instruction |= inst.operands[0].reg << 16;
10193 }
10194
10195 static void
10196 do_iwmmxt_wrwrwr_or_imm5 (void)
10197 {
10198 if (inst.operands[2].isreg)
10199 do_rd_rn_rm ();
10200 else {
10201 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2),
10202 _("immediate operand requires iWMMXt2"));
10203 do_rd_rn ();
10204 if (inst.operands[2].imm == 0)
10205 {
10206 switch ((inst.instruction >> 20) & 0xf)
10207 {
10208 case 4:
10209 case 5:
10210 case 6:
10211 case 7:
10212 /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16. */
10213 inst.operands[2].imm = 16;
10214 inst.instruction = (inst.instruction & 0xff0fffff) | (0x7 << 20);
10215 break;
10216 case 8:
10217 case 9:
10218 case 10:
10219 case 11:
10220 /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32. */
10221 inst.operands[2].imm = 32;
10222 inst.instruction = (inst.instruction & 0xff0fffff) | (0xb << 20);
10223 break;
10224 case 12:
10225 case 13:
10226 case 14:
10227 case 15:
10228 {
10229 /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn. */
10230 unsigned long wrn;
10231 wrn = (inst.instruction >> 16) & 0xf;
10232 inst.instruction &= 0xff0fff0f;
10233 inst.instruction |= wrn;
10234 /* Bail out here; the instruction is now assembled. */
10235 return;
10236 }
10237 }
10238 }
10239 /* Map 32 -> 0, etc. */
10240 inst.operands[2].imm &= 0x1f;
10241 inst.instruction |= (0xfU << 28) | ((inst.operands[2].imm & 0x10) << 4) | (inst.operands[2].imm & 0xf);
10242 }
10243 }
10244 \f
10245 /* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
10246 operations first, then control, shift, and load/store. */
10247
10248 /* Insns like "foo X,Y,Z". */
10249
10250 static void
10251 do_mav_triple (void)
10252 {
10253 inst.instruction |= inst.operands[0].reg << 16;
10254 inst.instruction |= inst.operands[1].reg;
10255 inst.instruction |= inst.operands[2].reg << 12;
10256 }
10257
10258 /* Insns like "foo W,X,Y,Z".
10259 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
10260
10261 static void
10262 do_mav_quad (void)
10263 {
10264 inst.instruction |= inst.operands[0].reg << 5;
10265 inst.instruction |= inst.operands[1].reg << 12;
10266 inst.instruction |= inst.operands[2].reg << 16;
10267 inst.instruction |= inst.operands[3].reg;
10268 }
10269
10270 /* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
10271 static void
10272 do_mav_dspsc (void)
10273 {
10274 inst.instruction |= inst.operands[1].reg << 12;
10275 }
10276
10277 /* Maverick shift immediate instructions.
10278 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
10279 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
10280
10281 static void
10282 do_mav_shift (void)
10283 {
10284 int imm = inst.operands[2].imm;
10285
10286 inst.instruction |= inst.operands[0].reg << 12;
10287 inst.instruction |= inst.operands[1].reg << 16;
10288
10289 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
10290 Bits 5-7 of the insn should have bits 4-6 of the immediate.
10291 Bit 4 should be 0. */
10292 imm = (imm & 0xf) | ((imm & 0x70) << 1);
10293
10294 inst.instruction |= imm;
10295 }
10296 \f
10297 /* XScale instructions. Also sorted arithmetic before move. */
10298
10299 /* Xscale multiply-accumulate (argument parse)
10300 MIAcc acc0,Rm,Rs
10301 MIAPHcc acc0,Rm,Rs
10302 MIAxycc acc0,Rm,Rs. */
10303
10304 static void
10305 do_xsc_mia (void)
10306 {
10307 inst.instruction |= inst.operands[1].reg;
10308 inst.instruction |= inst.operands[2].reg << 12;
10309 }
10310
10311 /* Xscale move-accumulator-register (argument parse)
10312
10313 MARcc acc0,RdLo,RdHi. */
10314
10315 static void
10316 do_xsc_mar (void)
10317 {
10318 inst.instruction |= inst.operands[1].reg << 12;
10319 inst.instruction |= inst.operands[2].reg << 16;
10320 }
10321
10322 /* Xscale move-register-accumulator (argument parse)
10323
10324 MRAcc RdLo,RdHi,acc0. */
10325
10326 static void
10327 do_xsc_mra (void)
10328 {
10329 constraint (inst.operands[0].reg == inst.operands[1].reg, BAD_OVERLAP);
10330 inst.instruction |= inst.operands[0].reg << 12;
10331 inst.instruction |= inst.operands[1].reg << 16;
10332 }
10333 \f
10334 /* Encoding functions relevant only to Thumb. */
10335
10336 /* inst.operands[i] is a shifted-register operand; encode
10337 it into inst.instruction in the format used by Thumb32. */
10338
10339 static void
10340 encode_thumb32_shifted_operand (int i)
10341 {
10342 unsigned int value = inst.reloc.exp.X_add_number;
10343 unsigned int shift = inst.operands[i].shift_kind;
10344
10345 constraint (inst.operands[i].immisreg,
10346 _("shift by register not allowed in thumb mode"));
10347 inst.instruction |= inst.operands[i].reg;
10348 if (shift == SHIFT_RRX)
10349 inst.instruction |= SHIFT_ROR << 4;
10350 else
10351 {
10352 constraint (inst.reloc.exp.X_op != O_constant,
10353 _("expression too complex"));
10354
10355 constraint (value > 32
10356 || (value == 32 && (shift == SHIFT_LSL
10357 || shift == SHIFT_ROR)),
10358 _("shift expression is too large"));
10359
10360 if (value == 0)
10361 shift = SHIFT_LSL;
10362 else if (value == 32)
10363 value = 0;
10364
10365 inst.instruction |= shift << 4;
10366 inst.instruction |= (value & 0x1c) << 10;
10367 inst.instruction |= (value & 0x03) << 6;
10368 }
10369 }
10370
10371
10372 /* inst.operands[i] was set up by parse_address. Encode it into a
10373 Thumb32 format load or store instruction. Reject forms that cannot
10374 be used with such instructions. If is_t is true, reject forms that
10375 cannot be used with a T instruction; if is_d is true, reject forms
10376 that cannot be used with a D instruction. If it is a store insn,
10377 reject PC in Rn. */
10378
10379 static void
10380 encode_thumb32_addr_mode (int i, bfd_boolean is_t, bfd_boolean is_d)
10381 {
10382 const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
10383
10384 constraint (!inst.operands[i].isreg,
10385 _("Instruction does not support =N addresses"));
10386
10387 inst.instruction |= inst.operands[i].reg << 16;
10388 if (inst.operands[i].immisreg)
10389 {
10390 constraint (is_pc, BAD_PC_ADDRESSING);
10391 constraint (is_t || is_d, _("cannot use register index with this instruction"));
10392 constraint (inst.operands[i].negative,
10393 _("Thumb does not support negative register indexing"));
10394 constraint (inst.operands[i].postind,
10395 _("Thumb does not support register post-indexing"));
10396 constraint (inst.operands[i].writeback,
10397 _("Thumb does not support register indexing with writeback"));
10398 constraint (inst.operands[i].shifted && inst.operands[i].shift_kind != SHIFT_LSL,
10399 _("Thumb supports only LSL in shifted register indexing"));
10400
10401 inst.instruction |= inst.operands[i].imm;
10402 if (inst.operands[i].shifted)
10403 {
10404 constraint (inst.reloc.exp.X_op != O_constant,
10405 _("expression too complex"));
10406 constraint (inst.reloc.exp.X_add_number < 0
10407 || inst.reloc.exp.X_add_number > 3,
10408 _("shift out of range"));
10409 inst.instruction |= inst.reloc.exp.X_add_number << 4;
10410 }
10411 inst.reloc.type = BFD_RELOC_UNUSED;
10412 }
10413 else if (inst.operands[i].preind)
10414 {
10415 constraint (is_pc && inst.operands[i].writeback, BAD_PC_WRITEBACK);
10416 constraint (is_t && inst.operands[i].writeback,
10417 _("cannot use writeback with this instruction"));
10418 constraint (is_pc && ((inst.instruction & THUMB2_LOAD_BIT) == 0),
10419 BAD_PC_ADDRESSING);
10420
10421 if (is_d)
10422 {
10423 inst.instruction |= 0x01000000;
10424 if (inst.operands[i].writeback)
10425 inst.instruction |= 0x00200000;
10426 }
10427 else
10428 {
10429 inst.instruction |= 0x00000c00;
10430 if (inst.operands[i].writeback)
10431 inst.instruction |= 0x00000100;
10432 }
10433 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
10434 }
10435 else if (inst.operands[i].postind)
10436 {
10437 gas_assert (inst.operands[i].writeback);
10438 constraint (is_pc, _("cannot use post-indexing with PC-relative addressing"));
10439 constraint (is_t, _("cannot use post-indexing with this instruction"));
10440
10441 if (is_d)
10442 inst.instruction |= 0x00200000;
10443 else
10444 inst.instruction |= 0x00000900;
10445 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
10446 }
10447 else /* unindexed - only for coprocessor */
10448 inst.error = _("instruction does not accept unindexed addressing");
10449 }
10450
10451 /* Table of Thumb instructions which exist in both 16- and 32-bit
10452 encodings (the latter only in post-V6T2 cores). The index is the
10453 value used in the insns table below. When there is more than one
10454 possible 16-bit encoding for the instruction, this table always
10455 holds variant (1).
10456 Also contains several pseudo-instructions used during relaxation. */
10457 #define T16_32_TAB \
10458 X(_adc, 4140, eb400000), \
10459 X(_adcs, 4140, eb500000), \
10460 X(_add, 1c00, eb000000), \
10461 X(_adds, 1c00, eb100000), \
10462 X(_addi, 0000, f1000000), \
10463 X(_addis, 0000, f1100000), \
10464 X(_add_pc,000f, f20f0000), \
10465 X(_add_sp,000d, f10d0000), \
10466 X(_adr, 000f, f20f0000), \
10467 X(_and, 4000, ea000000), \
10468 X(_ands, 4000, ea100000), \
10469 X(_asr, 1000, fa40f000), \
10470 X(_asrs, 1000, fa50f000), \
10471 X(_b, e000, f000b000), \
10472 X(_bcond, d000, f0008000), \
10473 X(_bic, 4380, ea200000), \
10474 X(_bics, 4380, ea300000), \
10475 X(_cmn, 42c0, eb100f00), \
10476 X(_cmp, 2800, ebb00f00), \
10477 X(_cpsie, b660, f3af8400), \
10478 X(_cpsid, b670, f3af8600), \
10479 X(_cpy, 4600, ea4f0000), \
10480 X(_dec_sp,80dd, f1ad0d00), \
10481 X(_eor, 4040, ea800000), \
10482 X(_eors, 4040, ea900000), \
10483 X(_inc_sp,00dd, f10d0d00), \
10484 X(_ldmia, c800, e8900000), \
10485 X(_ldr, 6800, f8500000), \
10486 X(_ldrb, 7800, f8100000), \
10487 X(_ldrh, 8800, f8300000), \
10488 X(_ldrsb, 5600, f9100000), \
10489 X(_ldrsh, 5e00, f9300000), \
10490 X(_ldr_pc,4800, f85f0000), \
10491 X(_ldr_pc2,4800, f85f0000), \
10492 X(_ldr_sp,9800, f85d0000), \
10493 X(_lsl, 0000, fa00f000), \
10494 X(_lsls, 0000, fa10f000), \
10495 X(_lsr, 0800, fa20f000), \
10496 X(_lsrs, 0800, fa30f000), \
10497 X(_mov, 2000, ea4f0000), \
10498 X(_movs, 2000, ea5f0000), \
10499 X(_mul, 4340, fb00f000), \
10500 X(_muls, 4340, ffffffff), /* no 32b muls */ \
10501 X(_mvn, 43c0, ea6f0000), \
10502 X(_mvns, 43c0, ea7f0000), \
10503 X(_neg, 4240, f1c00000), /* rsb #0 */ \
10504 X(_negs, 4240, f1d00000), /* rsbs #0 */ \
10505 X(_orr, 4300, ea400000), \
10506 X(_orrs, 4300, ea500000), \
10507 X(_pop, bc00, e8bd0000), /* ldmia sp!,... */ \
10508 X(_push, b400, e92d0000), /* stmdb sp!,... */ \
10509 X(_rev, ba00, fa90f080), \
10510 X(_rev16, ba40, fa90f090), \
10511 X(_revsh, bac0, fa90f0b0), \
10512 X(_ror, 41c0, fa60f000), \
10513 X(_rors, 41c0, fa70f000), \
10514 X(_sbc, 4180, eb600000), \
10515 X(_sbcs, 4180, eb700000), \
10516 X(_stmia, c000, e8800000), \
10517 X(_str, 6000, f8400000), \
10518 X(_strb, 7000, f8000000), \
10519 X(_strh, 8000, f8200000), \
10520 X(_str_sp,9000, f84d0000), \
10521 X(_sub, 1e00, eba00000), \
10522 X(_subs, 1e00, ebb00000), \
10523 X(_subi, 8000, f1a00000), \
10524 X(_subis, 8000, f1b00000), \
10525 X(_sxtb, b240, fa4ff080), \
10526 X(_sxth, b200, fa0ff080), \
10527 X(_tst, 4200, ea100f00), \
10528 X(_uxtb, b2c0, fa5ff080), \
10529 X(_uxth, b280, fa1ff080), \
10530 X(_nop, bf00, f3af8000), \
10531 X(_yield, bf10, f3af8001), \
10532 X(_wfe, bf20, f3af8002), \
10533 X(_wfi, bf30, f3af8003), \
10534 X(_sev, bf40, f3af8004), \
10535 X(_sevl, bf50, f3af8005), \
10536 X(_udf, de00, f7f0a000)
10537
10538 /* To catch errors in encoding functions, the codes are all offset by
10539 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
10540 as 16-bit instructions. */
10541 #define X(a,b,c) T_MNEM##a
10542 enum t16_32_codes { T16_32_OFFSET = 0xF7FF, T16_32_TAB };
10543 #undef X
10544
10545 #define X(a,b,c) 0x##b
10546 static const unsigned short thumb_op16[] = { T16_32_TAB };
10547 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
10548 #undef X
10549
10550 #define X(a,b,c) 0x##c
10551 static const unsigned int thumb_op32[] = { T16_32_TAB };
10552 #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
10553 #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
10554 #undef X
10555 #undef T16_32_TAB
10556
10557 /* Thumb instruction encoders, in alphabetical order. */
10558
10559 /* ADDW or SUBW. */
10560
10561 static void
10562 do_t_add_sub_w (void)
10563 {
10564 int Rd, Rn;
10565
10566 Rd = inst.operands[0].reg;
10567 Rn = inst.operands[1].reg;
10568
10569 /* If Rn is REG_PC, this is ADR; if Rn is REG_SP, then this
10570 is the SP-{plus,minus}-immediate form of the instruction. */
10571 if (Rn == REG_SP)
10572 constraint (Rd == REG_PC, BAD_PC);
10573 else
10574 reject_bad_reg (Rd);
10575
10576 inst.instruction |= (Rn << 16) | (Rd << 8);
10577 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
10578 }
10579
10580 /* Parse an add or subtract instruction. We get here with inst.instruction
10581 equaling any of THUMB_OPCODE_add, adds, sub, or subs. */
10582
10583 static void
10584 do_t_add_sub (void)
10585 {
10586 int Rd, Rs, Rn;
10587
10588 Rd = inst.operands[0].reg;
10589 Rs = (inst.operands[1].present
10590 ? inst.operands[1].reg /* Rd, Rs, foo */
10591 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
10592
10593 if (Rd == REG_PC)
10594 set_it_insn_type_last ();
10595
10596 if (unified_syntax)
10597 {
10598 bfd_boolean flags;
10599 bfd_boolean narrow;
10600 int opcode;
10601
10602 flags = (inst.instruction == T_MNEM_adds
10603 || inst.instruction == T_MNEM_subs);
10604 if (flags)
10605 narrow = !in_it_block ();
10606 else
10607 narrow = in_it_block ();
10608 if (!inst.operands[2].isreg)
10609 {
10610 int add;
10611
10612 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
10613 constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
10614
10615 add = (inst.instruction == T_MNEM_add
10616 || inst.instruction == T_MNEM_adds);
10617 opcode = 0;
10618 if (inst.size_req != 4)
10619 {
10620 /* Attempt to use a narrow opcode, with relaxation if
10621 appropriate. */
10622 if (Rd == REG_SP && Rs == REG_SP && !flags)
10623 opcode = add ? T_MNEM_inc_sp : T_MNEM_dec_sp;
10624 else if (Rd <= 7 && Rs == REG_SP && add && !flags)
10625 opcode = T_MNEM_add_sp;
10626 else if (Rd <= 7 && Rs == REG_PC && add && !flags)
10627 opcode = T_MNEM_add_pc;
10628 else if (Rd <= 7 && Rs <= 7 && narrow)
10629 {
10630 if (flags)
10631 opcode = add ? T_MNEM_addis : T_MNEM_subis;
10632 else
10633 opcode = add ? T_MNEM_addi : T_MNEM_subi;
10634 }
10635 if (opcode)
10636 {
10637 inst.instruction = THUMB_OP16(opcode);
10638 inst.instruction |= (Rd << 4) | Rs;
10639 if (inst.reloc.type < BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
10640 || inst.reloc.type > BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC)
10641 {
10642 if (inst.size_req == 2)
10643 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
10644 else
10645 inst.relax = opcode;
10646 }
10647 }
10648 else
10649 constraint (inst.size_req == 2, BAD_HIREG);
10650 }
10651 if (inst.size_req == 4
10652 || (inst.size_req != 2 && !opcode))
10653 {
10654 constraint (inst.reloc.type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
10655 && inst.reloc.type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC ,
10656 THUMB1_RELOC_ONLY);
10657 if (Rd == REG_PC)
10658 {
10659 constraint (add, BAD_PC);
10660 constraint (Rs != REG_LR || inst.instruction != T_MNEM_subs,
10661 _("only SUBS PC, LR, #const allowed"));
10662 constraint (inst.reloc.exp.X_op != O_constant,
10663 _("expression too complex"));
10664 constraint (inst.reloc.exp.X_add_number < 0
10665 || inst.reloc.exp.X_add_number > 0xff,
10666 _("immediate value out of range"));
10667 inst.instruction = T2_SUBS_PC_LR
10668 | inst.reloc.exp.X_add_number;
10669 inst.reloc.type = BFD_RELOC_UNUSED;
10670 return;
10671 }
10672 else if (Rs == REG_PC)
10673 {
10674 /* Always use addw/subw. */
10675 inst.instruction = add ? 0xf20f0000 : 0xf2af0000;
10676 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
10677 }
10678 else
10679 {
10680 inst.instruction = THUMB_OP32 (inst.instruction);
10681 inst.instruction = (inst.instruction & 0xe1ffffff)
10682 | 0x10000000;
10683 if (flags)
10684 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10685 else
10686 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_IMM;
10687 }
10688 inst.instruction |= Rd << 8;
10689 inst.instruction |= Rs << 16;
10690 }
10691 }
10692 else
10693 {
10694 unsigned int value = inst.reloc.exp.X_add_number;
10695 unsigned int shift = inst.operands[2].shift_kind;
10696
10697 Rn = inst.operands[2].reg;
10698 /* See if we can do this with a 16-bit instruction. */
10699 if (!inst.operands[2].shifted && inst.size_req != 4)
10700 {
10701 if (Rd > 7 || Rs > 7 || Rn > 7)
10702 narrow = FALSE;
10703
10704 if (narrow)
10705 {
10706 inst.instruction = ((inst.instruction == T_MNEM_adds
10707 || inst.instruction == T_MNEM_add)
10708 ? T_OPCODE_ADD_R3
10709 : T_OPCODE_SUB_R3);
10710 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
10711 return;
10712 }
10713
10714 if (inst.instruction == T_MNEM_add && (Rd == Rs || Rd == Rn))
10715 {
10716 /* Thumb-1 cores (except v6-M) require at least one high
10717 register in a narrow non flag setting add. */
10718 if (Rd > 7 || Rn > 7
10719 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2)
10720 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_msr))
10721 {
10722 if (Rd == Rn)
10723 {
10724 Rn = Rs;
10725 Rs = Rd;
10726 }
10727 inst.instruction = T_OPCODE_ADD_HI;
10728 inst.instruction |= (Rd & 8) << 4;
10729 inst.instruction |= (Rd & 7);
10730 inst.instruction |= Rn << 3;
10731 return;
10732 }
10733 }
10734 }
10735
10736 constraint (Rd == REG_PC, BAD_PC);
10737 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
10738 constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
10739 constraint (Rs == REG_PC, BAD_PC);
10740 reject_bad_reg (Rn);
10741
10742 /* If we get here, it can't be done in 16 bits. */
10743 constraint (inst.operands[2].shifted && inst.operands[2].immisreg,
10744 _("shift must be constant"));
10745 inst.instruction = THUMB_OP32 (inst.instruction);
10746 inst.instruction |= Rd << 8;
10747 inst.instruction |= Rs << 16;
10748 constraint (Rd == REG_SP && Rs == REG_SP && value > 3,
10749 _("shift value over 3 not allowed in thumb mode"));
10750 constraint (Rd == REG_SP && Rs == REG_SP && shift != SHIFT_LSL,
10751 _("only LSL shift allowed in thumb mode"));
10752 encode_thumb32_shifted_operand (2);
10753 }
10754 }
10755 else
10756 {
10757 constraint (inst.instruction == T_MNEM_adds
10758 || inst.instruction == T_MNEM_subs,
10759 BAD_THUMB32);
10760
10761 if (!inst.operands[2].isreg) /* Rd, Rs, #imm */
10762 {
10763 constraint ((Rd > 7 && (Rd != REG_SP || Rs != REG_SP))
10764 || (Rs > 7 && Rs != REG_SP && Rs != REG_PC),
10765 BAD_HIREG);
10766
10767 inst.instruction = (inst.instruction == T_MNEM_add
10768 ? 0x0000 : 0x8000);
10769 inst.instruction |= (Rd << 4) | Rs;
10770 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
10771 return;
10772 }
10773
10774 Rn = inst.operands[2].reg;
10775 constraint (inst.operands[2].shifted, _("unshifted register required"));
10776
10777 /* We now have Rd, Rs, and Rn set to registers. */
10778 if (Rd > 7 || Rs > 7 || Rn > 7)
10779 {
10780 /* Can't do this for SUB. */
10781 constraint (inst.instruction == T_MNEM_sub, BAD_HIREG);
10782 inst.instruction = T_OPCODE_ADD_HI;
10783 inst.instruction |= (Rd & 8) << 4;
10784 inst.instruction |= (Rd & 7);
10785 if (Rs == Rd)
10786 inst.instruction |= Rn << 3;
10787 else if (Rn == Rd)
10788 inst.instruction |= Rs << 3;
10789 else
10790 constraint (1, _("dest must overlap one source register"));
10791 }
10792 else
10793 {
10794 inst.instruction = (inst.instruction == T_MNEM_add
10795 ? T_OPCODE_ADD_R3 : T_OPCODE_SUB_R3);
10796 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
10797 }
10798 }
10799 }
10800
10801 static void
10802 do_t_adr (void)
10803 {
10804 unsigned Rd;
10805
10806 Rd = inst.operands[0].reg;
10807 reject_bad_reg (Rd);
10808
10809 if (unified_syntax && inst.size_req == 0 && Rd <= 7)
10810 {
10811 /* Defer to section relaxation. */
10812 inst.relax = inst.instruction;
10813 inst.instruction = THUMB_OP16 (inst.instruction);
10814 inst.instruction |= Rd << 4;
10815 }
10816 else if (unified_syntax && inst.size_req != 2)
10817 {
10818 /* Generate a 32-bit opcode. */
10819 inst.instruction = THUMB_OP32 (inst.instruction);
10820 inst.instruction |= Rd << 8;
10821 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_PC12;
10822 inst.reloc.pc_rel = 1;
10823 }
10824 else
10825 {
10826 /* Generate a 16-bit opcode. */
10827 inst.instruction = THUMB_OP16 (inst.instruction);
10828 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
10829 inst.reloc.exp.X_add_number -= 4; /* PC relative adjust. */
10830 inst.reloc.pc_rel = 1;
10831 inst.instruction |= Rd << 4;
10832 }
10833
10834 if (inst.reloc.exp.X_op == O_symbol
10835 && inst.reloc.exp.X_add_symbol != NULL
10836 && S_IS_DEFINED (inst.reloc.exp.X_add_symbol)
10837 && THUMB_IS_FUNC (inst.reloc.exp.X_add_symbol))
10838 inst.reloc.exp.X_add_number += 1;
10839 }
10840
10841 /* Arithmetic instructions for which there is just one 16-bit
10842 instruction encoding, and it allows only two low registers.
10843 For maximal compatibility with ARM syntax, we allow three register
10844 operands even when Thumb-32 instructions are not available, as long
10845 as the first two are identical. For instance, both "sbc r0,r1" and
10846 "sbc r0,r0,r1" are allowed. */
10847 static void
10848 do_t_arit3 (void)
10849 {
10850 int Rd, Rs, Rn;
10851
10852 Rd = inst.operands[0].reg;
10853 Rs = (inst.operands[1].present
10854 ? inst.operands[1].reg /* Rd, Rs, foo */
10855 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
10856 Rn = inst.operands[2].reg;
10857
10858 reject_bad_reg (Rd);
10859 reject_bad_reg (Rs);
10860 if (inst.operands[2].isreg)
10861 reject_bad_reg (Rn);
10862
10863 if (unified_syntax)
10864 {
10865 if (!inst.operands[2].isreg)
10866 {
10867 /* For an immediate, we always generate a 32-bit opcode;
10868 section relaxation will shrink it later if possible. */
10869 inst.instruction = THUMB_OP32 (inst.instruction);
10870 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
10871 inst.instruction |= Rd << 8;
10872 inst.instruction |= Rs << 16;
10873 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10874 }
10875 else
10876 {
10877 bfd_boolean narrow;
10878
10879 /* See if we can do this with a 16-bit instruction. */
10880 if (THUMB_SETS_FLAGS (inst.instruction))
10881 narrow = !in_it_block ();
10882 else
10883 narrow = in_it_block ();
10884
10885 if (Rd > 7 || Rn > 7 || Rs > 7)
10886 narrow = FALSE;
10887 if (inst.operands[2].shifted)
10888 narrow = FALSE;
10889 if (inst.size_req == 4)
10890 narrow = FALSE;
10891
10892 if (narrow
10893 && Rd == Rs)
10894 {
10895 inst.instruction = THUMB_OP16 (inst.instruction);
10896 inst.instruction |= Rd;
10897 inst.instruction |= Rn << 3;
10898 return;
10899 }
10900
10901 /* If we get here, it can't be done in 16 bits. */
10902 constraint (inst.operands[2].shifted
10903 && inst.operands[2].immisreg,
10904 _("shift must be constant"));
10905 inst.instruction = THUMB_OP32 (inst.instruction);
10906 inst.instruction |= Rd << 8;
10907 inst.instruction |= Rs << 16;
10908 encode_thumb32_shifted_operand (2);
10909 }
10910 }
10911 else
10912 {
10913 /* On its face this is a lie - the instruction does set the
10914 flags. However, the only supported mnemonic in this mode
10915 says it doesn't. */
10916 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
10917
10918 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
10919 _("unshifted register required"));
10920 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
10921 constraint (Rd != Rs,
10922 _("dest and source1 must be the same register"));
10923
10924 inst.instruction = THUMB_OP16 (inst.instruction);
10925 inst.instruction |= Rd;
10926 inst.instruction |= Rn << 3;
10927 }
10928 }
10929
10930 /* Similarly, but for instructions where the arithmetic operation is
10931 commutative, so we can allow either of them to be different from
10932 the destination operand in a 16-bit instruction. For instance, all
10933 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
10934 accepted. */
10935 static void
10936 do_t_arit3c (void)
10937 {
10938 int Rd, Rs, Rn;
10939
10940 Rd = inst.operands[0].reg;
10941 Rs = (inst.operands[1].present
10942 ? inst.operands[1].reg /* Rd, Rs, foo */
10943 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
10944 Rn = inst.operands[2].reg;
10945
10946 reject_bad_reg (Rd);
10947 reject_bad_reg (Rs);
10948 if (inst.operands[2].isreg)
10949 reject_bad_reg (Rn);
10950
10951 if (unified_syntax)
10952 {
10953 if (!inst.operands[2].isreg)
10954 {
10955 /* For an immediate, we always generate a 32-bit opcode;
10956 section relaxation will shrink it later if possible. */
10957 inst.instruction = THUMB_OP32 (inst.instruction);
10958 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
10959 inst.instruction |= Rd << 8;
10960 inst.instruction |= Rs << 16;
10961 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10962 }
10963 else
10964 {
10965 bfd_boolean narrow;
10966
10967 /* See if we can do this with a 16-bit instruction. */
10968 if (THUMB_SETS_FLAGS (inst.instruction))
10969 narrow = !in_it_block ();
10970 else
10971 narrow = in_it_block ();
10972
10973 if (Rd > 7 || Rn > 7 || Rs > 7)
10974 narrow = FALSE;
10975 if (inst.operands[2].shifted)
10976 narrow = FALSE;
10977 if (inst.size_req == 4)
10978 narrow = FALSE;
10979
10980 if (narrow)
10981 {
10982 if (Rd == Rs)
10983 {
10984 inst.instruction = THUMB_OP16 (inst.instruction);
10985 inst.instruction |= Rd;
10986 inst.instruction |= Rn << 3;
10987 return;
10988 }
10989 if (Rd == Rn)
10990 {
10991 inst.instruction = THUMB_OP16 (inst.instruction);
10992 inst.instruction |= Rd;
10993 inst.instruction |= Rs << 3;
10994 return;
10995 }
10996 }
10997
10998 /* If we get here, it can't be done in 16 bits. */
10999 constraint (inst.operands[2].shifted
11000 && inst.operands[2].immisreg,
11001 _("shift must be constant"));
11002 inst.instruction = THUMB_OP32 (inst.instruction);
11003 inst.instruction |= Rd << 8;
11004 inst.instruction |= Rs << 16;
11005 encode_thumb32_shifted_operand (2);
11006 }
11007 }
11008 else
11009 {
11010 /* On its face this is a lie - the instruction does set the
11011 flags. However, the only supported mnemonic in this mode
11012 says it doesn't. */
11013 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
11014
11015 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
11016 _("unshifted register required"));
11017 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
11018
11019 inst.instruction = THUMB_OP16 (inst.instruction);
11020 inst.instruction |= Rd;
11021
11022 if (Rd == Rs)
11023 inst.instruction |= Rn << 3;
11024 else if (Rd == Rn)
11025 inst.instruction |= Rs << 3;
11026 else
11027 constraint (1, _("dest must overlap one source register"));
11028 }
11029 }
11030
11031 static void
11032 do_t_bfc (void)
11033 {
11034 unsigned Rd;
11035 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
11036 constraint (msb > 32, _("bit-field extends past end of register"));
11037 /* The instruction encoding stores the LSB and MSB,
11038 not the LSB and width. */
11039 Rd = inst.operands[0].reg;
11040 reject_bad_reg (Rd);
11041 inst.instruction |= Rd << 8;
11042 inst.instruction |= (inst.operands[1].imm & 0x1c) << 10;
11043 inst.instruction |= (inst.operands[1].imm & 0x03) << 6;
11044 inst.instruction |= msb - 1;
11045 }
11046
11047 static void
11048 do_t_bfi (void)
11049 {
11050 int Rd, Rn;
11051 unsigned int msb;
11052
11053 Rd = inst.operands[0].reg;
11054 reject_bad_reg (Rd);
11055
11056 /* #0 in second position is alternative syntax for bfc, which is
11057 the same instruction but with REG_PC in the Rm field. */
11058 if (!inst.operands[1].isreg)
11059 Rn = REG_PC;
11060 else
11061 {
11062 Rn = inst.operands[1].reg;
11063 reject_bad_reg (Rn);
11064 }
11065
11066 msb = inst.operands[2].imm + inst.operands[3].imm;
11067 constraint (msb > 32, _("bit-field extends past end of register"));
11068 /* The instruction encoding stores the LSB and MSB,
11069 not the LSB and width. */
11070 inst.instruction |= Rd << 8;
11071 inst.instruction |= Rn << 16;
11072 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
11073 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
11074 inst.instruction |= msb - 1;
11075 }
11076
11077 static void
11078 do_t_bfx (void)
11079 {
11080 unsigned Rd, Rn;
11081
11082 Rd = inst.operands[0].reg;
11083 Rn = inst.operands[1].reg;
11084
11085 reject_bad_reg (Rd);
11086 reject_bad_reg (Rn);
11087
11088 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
11089 _("bit-field extends past end of register"));
11090 inst.instruction |= Rd << 8;
11091 inst.instruction |= Rn << 16;
11092 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
11093 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
11094 inst.instruction |= inst.operands[3].imm - 1;
11095 }
11096
11097 /* ARM V5 Thumb BLX (argument parse)
11098 BLX <target_addr> which is BLX(1)
11099 BLX <Rm> which is BLX(2)
11100 Unfortunately, there are two different opcodes for this mnemonic.
11101 So, the insns[].value is not used, and the code here zaps values
11102 into inst.instruction.
11103
11104 ??? How to take advantage of the additional two bits of displacement
11105 available in Thumb32 mode? Need new relocation? */
11106
11107 static void
11108 do_t_blx (void)
11109 {
11110 set_it_insn_type_last ();
11111
11112 if (inst.operands[0].isreg)
11113 {
11114 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
11115 /* We have a register, so this is BLX(2). */
11116 inst.instruction |= inst.operands[0].reg << 3;
11117 }
11118 else
11119 {
11120 /* No register. This must be BLX(1). */
11121 inst.instruction = 0xf000e800;
11122 encode_branch (BFD_RELOC_THUMB_PCREL_BLX);
11123 }
11124 }
11125
11126 static void
11127 do_t_branch (void)
11128 {
11129 int opcode;
11130 int cond;
11131 bfd_reloc_code_real_type reloc;
11132
11133 cond = inst.cond;
11134 set_it_insn_type (IF_INSIDE_IT_LAST_INSN);
11135
11136 if (in_it_block ())
11137 {
11138 /* Conditional branches inside IT blocks are encoded as unconditional
11139 branches. */
11140 cond = COND_ALWAYS;
11141 }
11142 else
11143 cond = inst.cond;
11144
11145 if (cond != COND_ALWAYS)
11146 opcode = T_MNEM_bcond;
11147 else
11148 opcode = inst.instruction;
11149
11150 if (unified_syntax
11151 && (inst.size_req == 4
11152 || (inst.size_req != 2
11153 && (inst.operands[0].hasreloc
11154 || inst.reloc.exp.X_op == O_constant))))
11155 {
11156 inst.instruction = THUMB_OP32(opcode);
11157 if (cond == COND_ALWAYS)
11158 reloc = BFD_RELOC_THUMB_PCREL_BRANCH25;
11159 else
11160 {
11161 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2),
11162 _("selected architecture does not support "
11163 "wide conditional branch instruction"));
11164
11165 gas_assert (cond != 0xF);
11166 inst.instruction |= cond << 22;
11167 reloc = BFD_RELOC_THUMB_PCREL_BRANCH20;
11168 }
11169 }
11170 else
11171 {
11172 inst.instruction = THUMB_OP16(opcode);
11173 if (cond == COND_ALWAYS)
11174 reloc = BFD_RELOC_THUMB_PCREL_BRANCH12;
11175 else
11176 {
11177 inst.instruction |= cond << 8;
11178 reloc = BFD_RELOC_THUMB_PCREL_BRANCH9;
11179 }
11180 /* Allow section relaxation. */
11181 if (unified_syntax && inst.size_req != 2)
11182 inst.relax = opcode;
11183 }
11184 inst.reloc.type = reloc;
11185 inst.reloc.pc_rel = 1;
11186 }
11187
11188 /* Actually do the work for Thumb state bkpt and hlt. The only difference
11189 between the two is the maximum immediate allowed - which is passed in
11190 RANGE. */
11191 static void
11192 do_t_bkpt_hlt1 (int range)
11193 {
11194 constraint (inst.cond != COND_ALWAYS,
11195 _("instruction is always unconditional"));
11196 if (inst.operands[0].present)
11197 {
11198 constraint (inst.operands[0].imm > range,
11199 _("immediate value out of range"));
11200 inst.instruction |= inst.operands[0].imm;
11201 }
11202
11203 set_it_insn_type (NEUTRAL_IT_INSN);
11204 }
11205
11206 static void
11207 do_t_hlt (void)
11208 {
11209 do_t_bkpt_hlt1 (63);
11210 }
11211
11212 static void
11213 do_t_bkpt (void)
11214 {
11215 do_t_bkpt_hlt1 (255);
11216 }
11217
11218 static void
11219 do_t_branch23 (void)
11220 {
11221 set_it_insn_type_last ();
11222 encode_branch (BFD_RELOC_THUMB_PCREL_BRANCH23);
11223
11224 /* md_apply_fix blows up with 'bl foo(PLT)' where foo is defined in
11225 this file. We used to simply ignore the PLT reloc type here --
11226 the branch encoding is now needed to deal with TLSCALL relocs.
11227 So if we see a PLT reloc now, put it back to how it used to be to
11228 keep the preexisting behaviour. */
11229 if (inst.reloc.type == BFD_RELOC_ARM_PLT32)
11230 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH23;
11231
11232 #if defined(OBJ_COFF)
11233 /* If the destination of the branch is a defined symbol which does not have
11234 the THUMB_FUNC attribute, then we must be calling a function which has
11235 the (interfacearm) attribute. We look for the Thumb entry point to that
11236 function and change the branch to refer to that function instead. */
11237 if ( inst.reloc.exp.X_op == O_symbol
11238 && inst.reloc.exp.X_add_symbol != NULL
11239 && S_IS_DEFINED (inst.reloc.exp.X_add_symbol)
11240 && ! THUMB_IS_FUNC (inst.reloc.exp.X_add_symbol))
11241 inst.reloc.exp.X_add_symbol =
11242 find_real_start (inst.reloc.exp.X_add_symbol);
11243 #endif
11244 }
11245
11246 static void
11247 do_t_bx (void)
11248 {
11249 set_it_insn_type_last ();
11250 inst.instruction |= inst.operands[0].reg << 3;
11251 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
11252 should cause the alignment to be checked once it is known. This is
11253 because BX PC only works if the instruction is word aligned. */
11254 }
11255
11256 static void
11257 do_t_bxj (void)
11258 {
11259 int Rm;
11260
11261 set_it_insn_type_last ();
11262 Rm = inst.operands[0].reg;
11263 reject_bad_reg (Rm);
11264 inst.instruction |= Rm << 16;
11265 }
11266
11267 static void
11268 do_t_clz (void)
11269 {
11270 unsigned Rd;
11271 unsigned Rm;
11272
11273 Rd = inst.operands[0].reg;
11274 Rm = inst.operands[1].reg;
11275
11276 reject_bad_reg (Rd);
11277 reject_bad_reg (Rm);
11278
11279 inst.instruction |= Rd << 8;
11280 inst.instruction |= Rm << 16;
11281 inst.instruction |= Rm;
11282 }
11283
11284 static void
11285 do_t_csdb (void)
11286 {
11287 set_it_insn_type (OUTSIDE_IT_INSN);
11288 }
11289
11290 static void
11291 do_t_cps (void)
11292 {
11293 set_it_insn_type (OUTSIDE_IT_INSN);
11294 inst.instruction |= inst.operands[0].imm;
11295 }
11296
11297 static void
11298 do_t_cpsi (void)
11299 {
11300 set_it_insn_type (OUTSIDE_IT_INSN);
11301 if (unified_syntax
11302 && (inst.operands[1].present || inst.size_req == 4)
11303 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6_notm))
11304 {
11305 unsigned int imod = (inst.instruction & 0x0030) >> 4;
11306 inst.instruction = 0xf3af8000;
11307 inst.instruction |= imod << 9;
11308 inst.instruction |= inst.operands[0].imm << 5;
11309 if (inst.operands[1].present)
11310 inst.instruction |= 0x100 | inst.operands[1].imm;
11311 }
11312 else
11313 {
11314 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1)
11315 && (inst.operands[0].imm & 4),
11316 _("selected processor does not support 'A' form "
11317 "of this instruction"));
11318 constraint (inst.operands[1].present || inst.size_req == 4,
11319 _("Thumb does not support the 2-argument "
11320 "form of this instruction"));
11321 inst.instruction |= inst.operands[0].imm;
11322 }
11323 }
11324
11325 /* THUMB CPY instruction (argument parse). */
11326
11327 static void
11328 do_t_cpy (void)
11329 {
11330 if (inst.size_req == 4)
11331 {
11332 inst.instruction = THUMB_OP32 (T_MNEM_mov);
11333 inst.instruction |= inst.operands[0].reg << 8;
11334 inst.instruction |= inst.operands[1].reg;
11335 }
11336 else
11337 {
11338 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
11339 inst.instruction |= (inst.operands[0].reg & 0x7);
11340 inst.instruction |= inst.operands[1].reg << 3;
11341 }
11342 }
11343
11344 static void
11345 do_t_cbz (void)
11346 {
11347 set_it_insn_type (OUTSIDE_IT_INSN);
11348 constraint (inst.operands[0].reg > 7, BAD_HIREG);
11349 inst.instruction |= inst.operands[0].reg;
11350 inst.reloc.pc_rel = 1;
11351 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH7;
11352 }
11353
11354 static void
11355 do_t_dbg (void)
11356 {
11357 inst.instruction |= inst.operands[0].imm;
11358 }
11359
11360 static void
11361 do_t_div (void)
11362 {
11363 unsigned Rd, Rn, Rm;
11364
11365 Rd = inst.operands[0].reg;
11366 Rn = (inst.operands[1].present
11367 ? inst.operands[1].reg : Rd);
11368 Rm = inst.operands[2].reg;
11369
11370 reject_bad_reg (Rd);
11371 reject_bad_reg (Rn);
11372 reject_bad_reg (Rm);
11373
11374 inst.instruction |= Rd << 8;
11375 inst.instruction |= Rn << 16;
11376 inst.instruction |= Rm;
11377 }
11378
11379 static void
11380 do_t_hint (void)
11381 {
11382 if (unified_syntax && inst.size_req == 4)
11383 inst.instruction = THUMB_OP32 (inst.instruction);
11384 else
11385 inst.instruction = THUMB_OP16 (inst.instruction);
11386 }
11387
11388 static void
11389 do_t_it (void)
11390 {
11391 unsigned int cond = inst.operands[0].imm;
11392
11393 set_it_insn_type (IT_INSN);
11394 now_it.mask = (inst.instruction & 0xf) | 0x10;
11395 now_it.cc = cond;
11396 now_it.warn_deprecated = FALSE;
11397
11398 /* If the condition is a negative condition, invert the mask. */
11399 if ((cond & 0x1) == 0x0)
11400 {
11401 unsigned int mask = inst.instruction & 0x000f;
11402
11403 if ((mask & 0x7) == 0)
11404 {
11405 /* No conversion needed. */
11406 now_it.block_length = 1;
11407 }
11408 else if ((mask & 0x3) == 0)
11409 {
11410 mask ^= 0x8;
11411 now_it.block_length = 2;
11412 }
11413 else if ((mask & 0x1) == 0)
11414 {
11415 mask ^= 0xC;
11416 now_it.block_length = 3;
11417 }
11418 else
11419 {
11420 mask ^= 0xE;
11421 now_it.block_length = 4;
11422 }
11423
11424 inst.instruction &= 0xfff0;
11425 inst.instruction |= mask;
11426 }
11427
11428 inst.instruction |= cond << 4;
11429 }
11430
11431 /* Helper function used for both push/pop and ldm/stm. */
11432 static void
11433 encode_thumb2_ldmstm (int base, unsigned mask, bfd_boolean writeback)
11434 {
11435 bfd_boolean load;
11436
11437 load = (inst.instruction & (1 << 20)) != 0;
11438
11439 if (mask & (1 << 13))
11440 inst.error = _("SP not allowed in register list");
11441
11442 if ((mask & (1 << base)) != 0
11443 && writeback)
11444 inst.error = _("having the base register in the register list when "
11445 "using write back is UNPREDICTABLE");
11446
11447 if (load)
11448 {
11449 if (mask & (1 << 15))
11450 {
11451 if (mask & (1 << 14))
11452 inst.error = _("LR and PC should not both be in register list");
11453 else
11454 set_it_insn_type_last ();
11455 }
11456 }
11457 else
11458 {
11459 if (mask & (1 << 15))
11460 inst.error = _("PC not allowed in register list");
11461 }
11462
11463 if ((mask & (mask - 1)) == 0)
11464 {
11465 /* Single register transfers implemented as str/ldr. */
11466 if (writeback)
11467 {
11468 if (inst.instruction & (1 << 23))
11469 inst.instruction = 0x00000b04; /* ia! -> [base], #4 */
11470 else
11471 inst.instruction = 0x00000d04; /* db! -> [base, #-4]! */
11472 }
11473 else
11474 {
11475 if (inst.instruction & (1 << 23))
11476 inst.instruction = 0x00800000; /* ia -> [base] */
11477 else
11478 inst.instruction = 0x00000c04; /* db -> [base, #-4] */
11479 }
11480
11481 inst.instruction |= 0xf8400000;
11482 if (load)
11483 inst.instruction |= 0x00100000;
11484
11485 mask = ffs (mask) - 1;
11486 mask <<= 12;
11487 }
11488 else if (writeback)
11489 inst.instruction |= WRITE_BACK;
11490
11491 inst.instruction |= mask;
11492 inst.instruction |= base << 16;
11493 }
11494
11495 static void
11496 do_t_ldmstm (void)
11497 {
11498 /* This really doesn't seem worth it. */
11499 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
11500 _("expression too complex"));
11501 constraint (inst.operands[1].writeback,
11502 _("Thumb load/store multiple does not support {reglist}^"));
11503
11504 if (unified_syntax)
11505 {
11506 bfd_boolean narrow;
11507 unsigned mask;
11508
11509 narrow = FALSE;
11510 /* See if we can use a 16-bit instruction. */
11511 if (inst.instruction < 0xffff /* not ldmdb/stmdb */
11512 && inst.size_req != 4
11513 && !(inst.operands[1].imm & ~0xff))
11514 {
11515 mask = 1 << inst.operands[0].reg;
11516
11517 if (inst.operands[0].reg <= 7)
11518 {
11519 if (inst.instruction == T_MNEM_stmia
11520 ? inst.operands[0].writeback
11521 : (inst.operands[0].writeback
11522 == !(inst.operands[1].imm & mask)))
11523 {
11524 if (inst.instruction == T_MNEM_stmia
11525 && (inst.operands[1].imm & mask)
11526 && (inst.operands[1].imm & (mask - 1)))
11527 as_warn (_("value stored for r%d is UNKNOWN"),
11528 inst.operands[0].reg);
11529
11530 inst.instruction = THUMB_OP16 (inst.instruction);
11531 inst.instruction |= inst.operands[0].reg << 8;
11532 inst.instruction |= inst.operands[1].imm;
11533 narrow = TRUE;
11534 }
11535 else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
11536 {
11537 /* This means 1 register in reg list one of 3 situations:
11538 1. Instruction is stmia, but without writeback.
11539 2. lmdia without writeback, but with Rn not in
11540 reglist.
11541 3. ldmia with writeback, but with Rn in reglist.
11542 Case 3 is UNPREDICTABLE behaviour, so we handle
11543 case 1 and 2 which can be converted into a 16-bit
11544 str or ldr. The SP cases are handled below. */
11545 unsigned long opcode;
11546 /* First, record an error for Case 3. */
11547 if (inst.operands[1].imm & mask
11548 && inst.operands[0].writeback)
11549 inst.error =
11550 _("having the base register in the register list when "
11551 "using write back is UNPREDICTABLE");
11552
11553 opcode = (inst.instruction == T_MNEM_stmia ? T_MNEM_str
11554 : T_MNEM_ldr);
11555 inst.instruction = THUMB_OP16 (opcode);
11556 inst.instruction |= inst.operands[0].reg << 3;
11557 inst.instruction |= (ffs (inst.operands[1].imm)-1);
11558 narrow = TRUE;
11559 }
11560 }
11561 else if (inst.operands[0] .reg == REG_SP)
11562 {
11563 if (inst.operands[0].writeback)
11564 {
11565 inst.instruction =
11566 THUMB_OP16 (inst.instruction == T_MNEM_stmia
11567 ? T_MNEM_push : T_MNEM_pop);
11568 inst.instruction |= inst.operands[1].imm;
11569 narrow = TRUE;
11570 }
11571 else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
11572 {
11573 inst.instruction =
11574 THUMB_OP16 (inst.instruction == T_MNEM_stmia
11575 ? T_MNEM_str_sp : T_MNEM_ldr_sp);
11576 inst.instruction |= ((ffs (inst.operands[1].imm)-1) << 8);
11577 narrow = TRUE;
11578 }
11579 }
11580 }
11581
11582 if (!narrow)
11583 {
11584 if (inst.instruction < 0xffff)
11585 inst.instruction = THUMB_OP32 (inst.instruction);
11586
11587 encode_thumb2_ldmstm (inst.operands[0].reg, inst.operands[1].imm,
11588 inst.operands[0].writeback);
11589 }
11590 }
11591 else
11592 {
11593 constraint (inst.operands[0].reg > 7
11594 || (inst.operands[1].imm & ~0xff), BAD_HIREG);
11595 constraint (inst.instruction != T_MNEM_ldmia
11596 && inst.instruction != T_MNEM_stmia,
11597 _("Thumb-2 instruction only valid in unified syntax"));
11598 if (inst.instruction == T_MNEM_stmia)
11599 {
11600 if (!inst.operands[0].writeback)
11601 as_warn (_("this instruction will write back the base register"));
11602 if ((inst.operands[1].imm & (1 << inst.operands[0].reg))
11603 && (inst.operands[1].imm & ((1 << inst.operands[0].reg) - 1)))
11604 as_warn (_("value stored for r%d is UNKNOWN"),
11605 inst.operands[0].reg);
11606 }
11607 else
11608 {
11609 if (!inst.operands[0].writeback
11610 && !(inst.operands[1].imm & (1 << inst.operands[0].reg)))
11611 as_warn (_("this instruction will write back the base register"));
11612 else if (inst.operands[0].writeback
11613 && (inst.operands[1].imm & (1 << inst.operands[0].reg)))
11614 as_warn (_("this instruction will not write back the base register"));
11615 }
11616
11617 inst.instruction = THUMB_OP16 (inst.instruction);
11618 inst.instruction |= inst.operands[0].reg << 8;
11619 inst.instruction |= inst.operands[1].imm;
11620 }
11621 }
11622
11623 static void
11624 do_t_ldrex (void)
11625 {
11626 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
11627 || inst.operands[1].postind || inst.operands[1].writeback
11628 || inst.operands[1].immisreg || inst.operands[1].shifted
11629 || inst.operands[1].negative,
11630 BAD_ADDR_MODE);
11631
11632 constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
11633
11634 inst.instruction |= inst.operands[0].reg << 12;
11635 inst.instruction |= inst.operands[1].reg << 16;
11636 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
11637 }
11638
11639 static void
11640 do_t_ldrexd (void)
11641 {
11642 if (!inst.operands[1].present)
11643 {
11644 constraint (inst.operands[0].reg == REG_LR,
11645 _("r14 not allowed as first register "
11646 "when second register is omitted"));
11647 inst.operands[1].reg = inst.operands[0].reg + 1;
11648 }
11649 constraint (inst.operands[0].reg == inst.operands[1].reg,
11650 BAD_OVERLAP);
11651
11652 inst.instruction |= inst.operands[0].reg << 12;
11653 inst.instruction |= inst.operands[1].reg << 8;
11654 inst.instruction |= inst.operands[2].reg << 16;
11655 }
11656
11657 static void
11658 do_t_ldst (void)
11659 {
11660 unsigned long opcode;
11661 int Rn;
11662
11663 if (inst.operands[0].isreg
11664 && !inst.operands[0].preind
11665 && inst.operands[0].reg == REG_PC)
11666 set_it_insn_type_last ();
11667
11668 opcode = inst.instruction;
11669 if (unified_syntax)
11670 {
11671 if (!inst.operands[1].isreg)
11672 {
11673 if (opcode <= 0xffff)
11674 inst.instruction = THUMB_OP32 (opcode);
11675 if (move_or_literal_pool (0, CONST_THUMB, /*mode_3=*/FALSE))
11676 return;
11677 }
11678 if (inst.operands[1].isreg
11679 && !inst.operands[1].writeback
11680 && !inst.operands[1].shifted && !inst.operands[1].postind
11681 && !inst.operands[1].negative && inst.operands[0].reg <= 7
11682 && opcode <= 0xffff
11683 && inst.size_req != 4)
11684 {
11685 /* Insn may have a 16-bit form. */
11686 Rn = inst.operands[1].reg;
11687 if (inst.operands[1].immisreg)
11688 {
11689 inst.instruction = THUMB_OP16 (opcode);
11690 /* [Rn, Rik] */
11691 if (Rn <= 7 && inst.operands[1].imm <= 7)
11692 goto op16;
11693 else if (opcode != T_MNEM_ldr && opcode != T_MNEM_str)
11694 reject_bad_reg (inst.operands[1].imm);
11695 }
11696 else if ((Rn <= 7 && opcode != T_MNEM_ldrsh
11697 && opcode != T_MNEM_ldrsb)
11698 || ((Rn == REG_PC || Rn == REG_SP) && opcode == T_MNEM_ldr)
11699 || (Rn == REG_SP && opcode == T_MNEM_str))
11700 {
11701 /* [Rn, #const] */
11702 if (Rn > 7)
11703 {
11704 if (Rn == REG_PC)
11705 {
11706 if (inst.reloc.pc_rel)
11707 opcode = T_MNEM_ldr_pc2;
11708 else
11709 opcode = T_MNEM_ldr_pc;
11710 }
11711 else
11712 {
11713 if (opcode == T_MNEM_ldr)
11714 opcode = T_MNEM_ldr_sp;
11715 else
11716 opcode = T_MNEM_str_sp;
11717 }
11718 inst.instruction = inst.operands[0].reg << 8;
11719 }
11720 else
11721 {
11722 inst.instruction = inst.operands[0].reg;
11723 inst.instruction |= inst.operands[1].reg << 3;
11724 }
11725 inst.instruction |= THUMB_OP16 (opcode);
11726 if (inst.size_req == 2)
11727 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
11728 else
11729 inst.relax = opcode;
11730 return;
11731 }
11732 }
11733 /* Definitely a 32-bit variant. */
11734
11735 /* Warning for Erratum 752419. */
11736 if (opcode == T_MNEM_ldr
11737 && inst.operands[0].reg == REG_SP
11738 && inst.operands[1].writeback == 1
11739 && !inst.operands[1].immisreg)
11740 {
11741 if (no_cpu_selected ()
11742 || (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7)
11743 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a)
11744 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7r)))
11745 as_warn (_("This instruction may be unpredictable "
11746 "if executed on M-profile cores "
11747 "with interrupts enabled."));
11748 }
11749
11750 /* Do some validations regarding addressing modes. */
11751 if (inst.operands[1].immisreg)
11752 reject_bad_reg (inst.operands[1].imm);
11753
11754 constraint (inst.operands[1].writeback == 1
11755 && inst.operands[0].reg == inst.operands[1].reg,
11756 BAD_OVERLAP);
11757
11758 inst.instruction = THUMB_OP32 (opcode);
11759 inst.instruction |= inst.operands[0].reg << 12;
11760 encode_thumb32_addr_mode (1, /*is_t=*/FALSE, /*is_d=*/FALSE);
11761 check_ldr_r15_aligned ();
11762 return;
11763 }
11764
11765 constraint (inst.operands[0].reg > 7, BAD_HIREG);
11766
11767 if (inst.instruction == T_MNEM_ldrsh || inst.instruction == T_MNEM_ldrsb)
11768 {
11769 /* Only [Rn,Rm] is acceptable. */
11770 constraint (inst.operands[1].reg > 7 || inst.operands[1].imm > 7, BAD_HIREG);
11771 constraint (!inst.operands[1].isreg || !inst.operands[1].immisreg
11772 || inst.operands[1].postind || inst.operands[1].shifted
11773 || inst.operands[1].negative,
11774 _("Thumb does not support this addressing mode"));
11775 inst.instruction = THUMB_OP16 (inst.instruction);
11776 goto op16;
11777 }
11778
11779 inst.instruction = THUMB_OP16 (inst.instruction);
11780 if (!inst.operands[1].isreg)
11781 if (move_or_literal_pool (0, CONST_THUMB, /*mode_3=*/FALSE))
11782 return;
11783
11784 constraint (!inst.operands[1].preind
11785 || inst.operands[1].shifted
11786 || inst.operands[1].writeback,
11787 _("Thumb does not support this addressing mode"));
11788 if (inst.operands[1].reg == REG_PC || inst.operands[1].reg == REG_SP)
11789 {
11790 constraint (inst.instruction & 0x0600,
11791 _("byte or halfword not valid for base register"));
11792 constraint (inst.operands[1].reg == REG_PC
11793 && !(inst.instruction & THUMB_LOAD_BIT),
11794 _("r15 based store not allowed"));
11795 constraint (inst.operands[1].immisreg,
11796 _("invalid base register for register offset"));
11797
11798 if (inst.operands[1].reg == REG_PC)
11799 inst.instruction = T_OPCODE_LDR_PC;
11800 else if (inst.instruction & THUMB_LOAD_BIT)
11801 inst.instruction = T_OPCODE_LDR_SP;
11802 else
11803 inst.instruction = T_OPCODE_STR_SP;
11804
11805 inst.instruction |= inst.operands[0].reg << 8;
11806 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
11807 return;
11808 }
11809
11810 constraint (inst.operands[1].reg > 7, BAD_HIREG);
11811 if (!inst.operands[1].immisreg)
11812 {
11813 /* Immediate offset. */
11814 inst.instruction |= inst.operands[0].reg;
11815 inst.instruction |= inst.operands[1].reg << 3;
11816 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
11817 return;
11818 }
11819
11820 /* Register offset. */
11821 constraint (inst.operands[1].imm > 7, BAD_HIREG);
11822 constraint (inst.operands[1].negative,
11823 _("Thumb does not support this addressing mode"));
11824
11825 op16:
11826 switch (inst.instruction)
11827 {
11828 case T_OPCODE_STR_IW: inst.instruction = T_OPCODE_STR_RW; break;
11829 case T_OPCODE_STR_IH: inst.instruction = T_OPCODE_STR_RH; break;
11830 case T_OPCODE_STR_IB: inst.instruction = T_OPCODE_STR_RB; break;
11831 case T_OPCODE_LDR_IW: inst.instruction = T_OPCODE_LDR_RW; break;
11832 case T_OPCODE_LDR_IH: inst.instruction = T_OPCODE_LDR_RH; break;
11833 case T_OPCODE_LDR_IB: inst.instruction = T_OPCODE_LDR_RB; break;
11834 case 0x5600 /* ldrsb */:
11835 case 0x5e00 /* ldrsh */: break;
11836 default: abort ();
11837 }
11838
11839 inst.instruction |= inst.operands[0].reg;
11840 inst.instruction |= inst.operands[1].reg << 3;
11841 inst.instruction |= inst.operands[1].imm << 6;
11842 }
11843
11844 static void
11845 do_t_ldstd (void)
11846 {
11847 if (!inst.operands[1].present)
11848 {
11849 inst.operands[1].reg = inst.operands[0].reg + 1;
11850 constraint (inst.operands[0].reg == REG_LR,
11851 _("r14 not allowed here"));
11852 constraint (inst.operands[0].reg == REG_R12,
11853 _("r12 not allowed here"));
11854 }
11855
11856 if (inst.operands[2].writeback
11857 && (inst.operands[0].reg == inst.operands[2].reg
11858 || inst.operands[1].reg == inst.operands[2].reg))
11859 as_warn (_("base register written back, and overlaps "
11860 "one of transfer registers"));
11861
11862 inst.instruction |= inst.operands[0].reg << 12;
11863 inst.instruction |= inst.operands[1].reg << 8;
11864 encode_thumb32_addr_mode (2, /*is_t=*/FALSE, /*is_d=*/TRUE);
11865 }
11866
11867 static void
11868 do_t_ldstt (void)
11869 {
11870 inst.instruction |= inst.operands[0].reg << 12;
11871 encode_thumb32_addr_mode (1, /*is_t=*/TRUE, /*is_d=*/FALSE);
11872 }
11873
11874 static void
11875 do_t_mla (void)
11876 {
11877 unsigned Rd, Rn, Rm, Ra;
11878
11879 Rd = inst.operands[0].reg;
11880 Rn = inst.operands[1].reg;
11881 Rm = inst.operands[2].reg;
11882 Ra = inst.operands[3].reg;
11883
11884 reject_bad_reg (Rd);
11885 reject_bad_reg (Rn);
11886 reject_bad_reg (Rm);
11887 reject_bad_reg (Ra);
11888
11889 inst.instruction |= Rd << 8;
11890 inst.instruction |= Rn << 16;
11891 inst.instruction |= Rm;
11892 inst.instruction |= Ra << 12;
11893 }
11894
11895 static void
11896 do_t_mlal (void)
11897 {
11898 unsigned RdLo, RdHi, Rn, Rm;
11899
11900 RdLo = inst.operands[0].reg;
11901 RdHi = inst.operands[1].reg;
11902 Rn = inst.operands[2].reg;
11903 Rm = inst.operands[3].reg;
11904
11905 reject_bad_reg (RdLo);
11906 reject_bad_reg (RdHi);
11907 reject_bad_reg (Rn);
11908 reject_bad_reg (Rm);
11909
11910 inst.instruction |= RdLo << 12;
11911 inst.instruction |= RdHi << 8;
11912 inst.instruction |= Rn << 16;
11913 inst.instruction |= Rm;
11914 }
11915
11916 static void
11917 do_t_mov_cmp (void)
11918 {
11919 unsigned Rn, Rm;
11920
11921 Rn = inst.operands[0].reg;
11922 Rm = inst.operands[1].reg;
11923
11924 if (Rn == REG_PC)
11925 set_it_insn_type_last ();
11926
11927 if (unified_syntax)
11928 {
11929 int r0off = (inst.instruction == T_MNEM_mov
11930 || inst.instruction == T_MNEM_movs) ? 8 : 16;
11931 unsigned long opcode;
11932 bfd_boolean narrow;
11933 bfd_boolean low_regs;
11934
11935 low_regs = (Rn <= 7 && Rm <= 7);
11936 opcode = inst.instruction;
11937 if (in_it_block ())
11938 narrow = opcode != T_MNEM_movs;
11939 else
11940 narrow = opcode != T_MNEM_movs || low_regs;
11941 if (inst.size_req == 4
11942 || inst.operands[1].shifted)
11943 narrow = FALSE;
11944
11945 /* MOVS PC, LR is encoded as SUBS PC, LR, #0. */
11946 if (opcode == T_MNEM_movs && inst.operands[1].isreg
11947 && !inst.operands[1].shifted
11948 && Rn == REG_PC
11949 && Rm == REG_LR)
11950 {
11951 inst.instruction = T2_SUBS_PC_LR;
11952 return;
11953 }
11954
11955 if (opcode == T_MNEM_cmp)
11956 {
11957 constraint (Rn == REG_PC, BAD_PC);
11958 if (narrow)
11959 {
11960 /* In the Thumb-2 ISA, use of R13 as Rm is deprecated,
11961 but valid. */
11962 warn_deprecated_sp (Rm);
11963 /* R15 was documented as a valid choice for Rm in ARMv6,
11964 but as UNPREDICTABLE in ARMv7. ARM's proprietary
11965 tools reject R15, so we do too. */
11966 constraint (Rm == REG_PC, BAD_PC);
11967 }
11968 else
11969 reject_bad_reg (Rm);
11970 }
11971 else if (opcode == T_MNEM_mov
11972 || opcode == T_MNEM_movs)
11973 {
11974 if (inst.operands[1].isreg)
11975 {
11976 if (opcode == T_MNEM_movs)
11977 {
11978 reject_bad_reg (Rn);
11979 reject_bad_reg (Rm);
11980 }
11981 else if (narrow)
11982 {
11983 /* This is mov.n. */
11984 if ((Rn == REG_SP || Rn == REG_PC)
11985 && (Rm == REG_SP || Rm == REG_PC))
11986 {
11987 as_tsktsk (_("Use of r%u as a source register is "
11988 "deprecated when r%u is the destination "
11989 "register."), Rm, Rn);
11990 }
11991 }
11992 else
11993 {
11994 /* This is mov.w. */
11995 constraint (Rn == REG_PC, BAD_PC);
11996 constraint (Rm == REG_PC, BAD_PC);
11997 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
11998 constraint (Rn == REG_SP && Rm == REG_SP, BAD_SP);
11999 }
12000 }
12001 else
12002 reject_bad_reg (Rn);
12003 }
12004
12005 if (!inst.operands[1].isreg)
12006 {
12007 /* Immediate operand. */
12008 if (!in_it_block () && opcode == T_MNEM_mov)
12009 narrow = 0;
12010 if (low_regs && narrow)
12011 {
12012 inst.instruction = THUMB_OP16 (opcode);
12013 inst.instruction |= Rn << 8;
12014 if (inst.reloc.type < BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
12015 || inst.reloc.type > BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC)
12016 {
12017 if (inst.size_req == 2)
12018 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
12019 else
12020 inst.relax = opcode;
12021 }
12022 }
12023 else
12024 {
12025 constraint (inst.reloc.type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
12026 && inst.reloc.type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC ,
12027 THUMB1_RELOC_ONLY);
12028
12029 inst.instruction = THUMB_OP32 (inst.instruction);
12030 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
12031 inst.instruction |= Rn << r0off;
12032 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
12033 }
12034 }
12035 else if (inst.operands[1].shifted && inst.operands[1].immisreg
12036 && (inst.instruction == T_MNEM_mov
12037 || inst.instruction == T_MNEM_movs))
12038 {
12039 /* Register shifts are encoded as separate shift instructions. */
12040 bfd_boolean flags = (inst.instruction == T_MNEM_movs);
12041
12042 if (in_it_block ())
12043 narrow = !flags;
12044 else
12045 narrow = flags;
12046
12047 if (inst.size_req == 4)
12048 narrow = FALSE;
12049
12050 if (!low_regs || inst.operands[1].imm > 7)
12051 narrow = FALSE;
12052
12053 if (Rn != Rm)
12054 narrow = FALSE;
12055
12056 switch (inst.operands[1].shift_kind)
12057 {
12058 case SHIFT_LSL:
12059 opcode = narrow ? T_OPCODE_LSL_R : THUMB_OP32 (T_MNEM_lsl);
12060 break;
12061 case SHIFT_ASR:
12062 opcode = narrow ? T_OPCODE_ASR_R : THUMB_OP32 (T_MNEM_asr);
12063 break;
12064 case SHIFT_LSR:
12065 opcode = narrow ? T_OPCODE_LSR_R : THUMB_OP32 (T_MNEM_lsr);
12066 break;
12067 case SHIFT_ROR:
12068 opcode = narrow ? T_OPCODE_ROR_R : THUMB_OP32 (T_MNEM_ror);
12069 break;
12070 default:
12071 abort ();
12072 }
12073
12074 inst.instruction = opcode;
12075 if (narrow)
12076 {
12077 inst.instruction |= Rn;
12078 inst.instruction |= inst.operands[1].imm << 3;
12079 }
12080 else
12081 {
12082 if (flags)
12083 inst.instruction |= CONDS_BIT;
12084
12085 inst.instruction |= Rn << 8;
12086 inst.instruction |= Rm << 16;
12087 inst.instruction |= inst.operands[1].imm;
12088 }
12089 }
12090 else if (!narrow)
12091 {
12092 /* Some mov with immediate shift have narrow variants.
12093 Register shifts are handled above. */
12094 if (low_regs && inst.operands[1].shifted
12095 && (inst.instruction == T_MNEM_mov
12096 || inst.instruction == T_MNEM_movs))
12097 {
12098 if (in_it_block ())
12099 narrow = (inst.instruction == T_MNEM_mov);
12100 else
12101 narrow = (inst.instruction == T_MNEM_movs);
12102 }
12103
12104 if (narrow)
12105 {
12106 switch (inst.operands[1].shift_kind)
12107 {
12108 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
12109 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
12110 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
12111 default: narrow = FALSE; break;
12112 }
12113 }
12114
12115 if (narrow)
12116 {
12117 inst.instruction |= Rn;
12118 inst.instruction |= Rm << 3;
12119 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
12120 }
12121 else
12122 {
12123 inst.instruction = THUMB_OP32 (inst.instruction);
12124 inst.instruction |= Rn << r0off;
12125 encode_thumb32_shifted_operand (1);
12126 }
12127 }
12128 else
12129 switch (inst.instruction)
12130 {
12131 case T_MNEM_mov:
12132 /* In v4t or v5t a move of two lowregs produces unpredictable
12133 results. Don't allow this. */
12134 if (low_regs)
12135 {
12136 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6),
12137 "MOV Rd, Rs with two low registers is not "
12138 "permitted on this architecture");
12139 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
12140 arm_ext_v6);
12141 }
12142
12143 inst.instruction = T_OPCODE_MOV_HR;
12144 inst.instruction |= (Rn & 0x8) << 4;
12145 inst.instruction |= (Rn & 0x7);
12146 inst.instruction |= Rm << 3;
12147 break;
12148
12149 case T_MNEM_movs:
12150 /* We know we have low registers at this point.
12151 Generate LSLS Rd, Rs, #0. */
12152 inst.instruction = T_OPCODE_LSL_I;
12153 inst.instruction |= Rn;
12154 inst.instruction |= Rm << 3;
12155 break;
12156
12157 case T_MNEM_cmp:
12158 if (low_regs)
12159 {
12160 inst.instruction = T_OPCODE_CMP_LR;
12161 inst.instruction |= Rn;
12162 inst.instruction |= Rm << 3;
12163 }
12164 else
12165 {
12166 inst.instruction = T_OPCODE_CMP_HR;
12167 inst.instruction |= (Rn & 0x8) << 4;
12168 inst.instruction |= (Rn & 0x7);
12169 inst.instruction |= Rm << 3;
12170 }
12171 break;
12172 }
12173 return;
12174 }
12175
12176 inst.instruction = THUMB_OP16 (inst.instruction);
12177
12178 /* PR 10443: Do not silently ignore shifted operands. */
12179 constraint (inst.operands[1].shifted,
12180 _("shifts in CMP/MOV instructions are only supported in unified syntax"));
12181
12182 if (inst.operands[1].isreg)
12183 {
12184 if (Rn < 8 && Rm < 8)
12185 {
12186 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
12187 since a MOV instruction produces unpredictable results. */
12188 if (inst.instruction == T_OPCODE_MOV_I8)
12189 inst.instruction = T_OPCODE_ADD_I3;
12190 else
12191 inst.instruction = T_OPCODE_CMP_LR;
12192
12193 inst.instruction |= Rn;
12194 inst.instruction |= Rm << 3;
12195 }
12196 else
12197 {
12198 if (inst.instruction == T_OPCODE_MOV_I8)
12199 inst.instruction = T_OPCODE_MOV_HR;
12200 else
12201 inst.instruction = T_OPCODE_CMP_HR;
12202 do_t_cpy ();
12203 }
12204 }
12205 else
12206 {
12207 constraint (Rn > 7,
12208 _("only lo regs allowed with immediate"));
12209 inst.instruction |= Rn << 8;
12210 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
12211 }
12212 }
12213
12214 static void
12215 do_t_mov16 (void)
12216 {
12217 unsigned Rd;
12218 bfd_vma imm;
12219 bfd_boolean top;
12220
12221 top = (inst.instruction & 0x00800000) != 0;
12222 if (inst.reloc.type == BFD_RELOC_ARM_MOVW)
12223 {
12224 constraint (top, _(":lower16: not allowed in this instruction"));
12225 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVW;
12226 }
12227 else if (inst.reloc.type == BFD_RELOC_ARM_MOVT)
12228 {
12229 constraint (!top, _(":upper16: not allowed in this instruction"));
12230 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVT;
12231 }
12232
12233 Rd = inst.operands[0].reg;
12234 reject_bad_reg (Rd);
12235
12236 inst.instruction |= Rd << 8;
12237 if (inst.reloc.type == BFD_RELOC_UNUSED)
12238 {
12239 imm = inst.reloc.exp.X_add_number;
12240 inst.instruction |= (imm & 0xf000) << 4;
12241 inst.instruction |= (imm & 0x0800) << 15;
12242 inst.instruction |= (imm & 0x0700) << 4;
12243 inst.instruction |= (imm & 0x00ff);
12244 }
12245 }
12246
12247 static void
12248 do_t_mvn_tst (void)
12249 {
12250 unsigned Rn, Rm;
12251
12252 Rn = inst.operands[0].reg;
12253 Rm = inst.operands[1].reg;
12254
12255 if (inst.instruction == T_MNEM_cmp
12256 || inst.instruction == T_MNEM_cmn)
12257 constraint (Rn == REG_PC, BAD_PC);
12258 else
12259 reject_bad_reg (Rn);
12260 reject_bad_reg (Rm);
12261
12262 if (unified_syntax)
12263 {
12264 int r0off = (inst.instruction == T_MNEM_mvn
12265 || inst.instruction == T_MNEM_mvns) ? 8 : 16;
12266 bfd_boolean narrow;
12267
12268 if (inst.size_req == 4
12269 || inst.instruction > 0xffff
12270 || inst.operands[1].shifted
12271 || Rn > 7 || Rm > 7)
12272 narrow = FALSE;
12273 else if (inst.instruction == T_MNEM_cmn
12274 || inst.instruction == T_MNEM_tst)
12275 narrow = TRUE;
12276 else if (THUMB_SETS_FLAGS (inst.instruction))
12277 narrow = !in_it_block ();
12278 else
12279 narrow = in_it_block ();
12280
12281 if (!inst.operands[1].isreg)
12282 {
12283 /* For an immediate, we always generate a 32-bit opcode;
12284 section relaxation will shrink it later if possible. */
12285 if (inst.instruction < 0xffff)
12286 inst.instruction = THUMB_OP32 (inst.instruction);
12287 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
12288 inst.instruction |= Rn << r0off;
12289 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
12290 }
12291 else
12292 {
12293 /* See if we can do this with a 16-bit instruction. */
12294 if (narrow)
12295 {
12296 inst.instruction = THUMB_OP16 (inst.instruction);
12297 inst.instruction |= Rn;
12298 inst.instruction |= Rm << 3;
12299 }
12300 else
12301 {
12302 constraint (inst.operands[1].shifted
12303 && inst.operands[1].immisreg,
12304 _("shift must be constant"));
12305 if (inst.instruction < 0xffff)
12306 inst.instruction = THUMB_OP32 (inst.instruction);
12307 inst.instruction |= Rn << r0off;
12308 encode_thumb32_shifted_operand (1);
12309 }
12310 }
12311 }
12312 else
12313 {
12314 constraint (inst.instruction > 0xffff
12315 || inst.instruction == T_MNEM_mvns, BAD_THUMB32);
12316 constraint (!inst.operands[1].isreg || inst.operands[1].shifted,
12317 _("unshifted register required"));
12318 constraint (Rn > 7 || Rm > 7,
12319 BAD_HIREG);
12320
12321 inst.instruction = THUMB_OP16 (inst.instruction);
12322 inst.instruction |= Rn;
12323 inst.instruction |= Rm << 3;
12324 }
12325 }
12326
12327 static void
12328 do_t_mrs (void)
12329 {
12330 unsigned Rd;
12331
12332 if (do_vfp_nsyn_mrs () == SUCCESS)
12333 return;
12334
12335 Rd = inst.operands[0].reg;
12336 reject_bad_reg (Rd);
12337 inst.instruction |= Rd << 8;
12338
12339 if (inst.operands[1].isreg)
12340 {
12341 unsigned br = inst.operands[1].reg;
12342 if (((br & 0x200) == 0) && ((br & 0xf000) != 0xf000))
12343 as_bad (_("bad register for mrs"));
12344
12345 inst.instruction |= br & (0xf << 16);
12346 inst.instruction |= (br & 0x300) >> 4;
12347 inst.instruction |= (br & SPSR_BIT) >> 2;
12348 }
12349 else
12350 {
12351 int flags = inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
12352
12353 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
12354 {
12355 /* PR gas/12698: The constraint is only applied for m_profile.
12356 If the user has specified -march=all, we want to ignore it as
12357 we are building for any CPU type, including non-m variants. */
12358 bfd_boolean m_profile =
12359 !ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any);
12360 constraint ((flags != 0) && m_profile, _("selected processor does "
12361 "not support requested special purpose register"));
12362 }
12363 else
12364 /* mrs only accepts APSR/CPSR/SPSR/CPSR_all/SPSR_all (for non-M profile
12365 devices). */
12366 constraint ((flags & ~SPSR_BIT) != (PSR_c|PSR_f),
12367 _("'APSR', 'CPSR' or 'SPSR' expected"));
12368
12369 inst.instruction |= (flags & SPSR_BIT) >> 2;
12370 inst.instruction |= inst.operands[1].imm & 0xff;
12371 inst.instruction |= 0xf0000;
12372 }
12373 }
12374
12375 static void
12376 do_t_msr (void)
12377 {
12378 int flags;
12379 unsigned Rn;
12380
12381 if (do_vfp_nsyn_msr () == SUCCESS)
12382 return;
12383
12384 constraint (!inst.operands[1].isreg,
12385 _("Thumb encoding does not support an immediate here"));
12386
12387 if (inst.operands[0].isreg)
12388 flags = (int)(inst.operands[0].reg);
12389 else
12390 flags = inst.operands[0].imm;
12391
12392 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
12393 {
12394 int bits = inst.operands[0].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
12395
12396 /* PR gas/12698: The constraint is only applied for m_profile.
12397 If the user has specified -march=all, we want to ignore it as
12398 we are building for any CPU type, including non-m variants. */
12399 bfd_boolean m_profile =
12400 !ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any);
12401 constraint (((ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
12402 && (bits & ~(PSR_s | PSR_f)) != 0)
12403 || (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
12404 && bits != PSR_f)) && m_profile,
12405 _("selected processor does not support requested special "
12406 "purpose register"));
12407 }
12408 else
12409 constraint ((flags & 0xff) != 0, _("selected processor does not support "
12410 "requested special purpose register"));
12411
12412 Rn = inst.operands[1].reg;
12413 reject_bad_reg (Rn);
12414
12415 inst.instruction |= (flags & SPSR_BIT) >> 2;
12416 inst.instruction |= (flags & 0xf0000) >> 8;
12417 inst.instruction |= (flags & 0x300) >> 4;
12418 inst.instruction |= (flags & 0xff);
12419 inst.instruction |= Rn << 16;
12420 }
12421
12422 static void
12423 do_t_mul (void)
12424 {
12425 bfd_boolean narrow;
12426 unsigned Rd, Rn, Rm;
12427
12428 if (!inst.operands[2].present)
12429 inst.operands[2].reg = inst.operands[0].reg;
12430
12431 Rd = inst.operands[0].reg;
12432 Rn = inst.operands[1].reg;
12433 Rm = inst.operands[2].reg;
12434
12435 if (unified_syntax)
12436 {
12437 if (inst.size_req == 4
12438 || (Rd != Rn
12439 && Rd != Rm)
12440 || Rn > 7
12441 || Rm > 7)
12442 narrow = FALSE;
12443 else if (inst.instruction == T_MNEM_muls)
12444 narrow = !in_it_block ();
12445 else
12446 narrow = in_it_block ();
12447 }
12448 else
12449 {
12450 constraint (inst.instruction == T_MNEM_muls, BAD_THUMB32);
12451 constraint (Rn > 7 || Rm > 7,
12452 BAD_HIREG);
12453 narrow = TRUE;
12454 }
12455
12456 if (narrow)
12457 {
12458 /* 16-bit MULS/Conditional MUL. */
12459 inst.instruction = THUMB_OP16 (inst.instruction);
12460 inst.instruction |= Rd;
12461
12462 if (Rd == Rn)
12463 inst.instruction |= Rm << 3;
12464 else if (Rd == Rm)
12465 inst.instruction |= Rn << 3;
12466 else
12467 constraint (1, _("dest must overlap one source register"));
12468 }
12469 else
12470 {
12471 constraint (inst.instruction != T_MNEM_mul,
12472 _("Thumb-2 MUL must not set flags"));
12473 /* 32-bit MUL. */
12474 inst.instruction = THUMB_OP32 (inst.instruction);
12475 inst.instruction |= Rd << 8;
12476 inst.instruction |= Rn << 16;
12477 inst.instruction |= Rm << 0;
12478
12479 reject_bad_reg (Rd);
12480 reject_bad_reg (Rn);
12481 reject_bad_reg (Rm);
12482 }
12483 }
12484
12485 static void
12486 do_t_mull (void)
12487 {
12488 unsigned RdLo, RdHi, Rn, Rm;
12489
12490 RdLo = inst.operands[0].reg;
12491 RdHi = inst.operands[1].reg;
12492 Rn = inst.operands[2].reg;
12493 Rm = inst.operands[3].reg;
12494
12495 reject_bad_reg (RdLo);
12496 reject_bad_reg (RdHi);
12497 reject_bad_reg (Rn);
12498 reject_bad_reg (Rm);
12499
12500 inst.instruction |= RdLo << 12;
12501 inst.instruction |= RdHi << 8;
12502 inst.instruction |= Rn << 16;
12503 inst.instruction |= Rm;
12504
12505 if (RdLo == RdHi)
12506 as_tsktsk (_("rdhi and rdlo must be different"));
12507 }
12508
12509 static void
12510 do_t_nop (void)
12511 {
12512 set_it_insn_type (NEUTRAL_IT_INSN);
12513
12514 if (unified_syntax)
12515 {
12516 if (inst.size_req == 4 || inst.operands[0].imm > 15)
12517 {
12518 inst.instruction = THUMB_OP32 (inst.instruction);
12519 inst.instruction |= inst.operands[0].imm;
12520 }
12521 else
12522 {
12523 /* PR9722: Check for Thumb2 availability before
12524 generating a thumb2 nop instruction. */
12525 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2))
12526 {
12527 inst.instruction = THUMB_OP16 (inst.instruction);
12528 inst.instruction |= inst.operands[0].imm << 4;
12529 }
12530 else
12531 inst.instruction = 0x46c0;
12532 }
12533 }
12534 else
12535 {
12536 constraint (inst.operands[0].present,
12537 _("Thumb does not support NOP with hints"));
12538 inst.instruction = 0x46c0;
12539 }
12540 }
12541
12542 static void
12543 do_t_neg (void)
12544 {
12545 if (unified_syntax)
12546 {
12547 bfd_boolean narrow;
12548
12549 if (THUMB_SETS_FLAGS (inst.instruction))
12550 narrow = !in_it_block ();
12551 else
12552 narrow = in_it_block ();
12553 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
12554 narrow = FALSE;
12555 if (inst.size_req == 4)
12556 narrow = FALSE;
12557
12558 if (!narrow)
12559 {
12560 inst.instruction = THUMB_OP32 (inst.instruction);
12561 inst.instruction |= inst.operands[0].reg << 8;
12562 inst.instruction |= inst.operands[1].reg << 16;
12563 }
12564 else
12565 {
12566 inst.instruction = THUMB_OP16 (inst.instruction);
12567 inst.instruction |= inst.operands[0].reg;
12568 inst.instruction |= inst.operands[1].reg << 3;
12569 }
12570 }
12571 else
12572 {
12573 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
12574 BAD_HIREG);
12575 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
12576
12577 inst.instruction = THUMB_OP16 (inst.instruction);
12578 inst.instruction |= inst.operands[0].reg;
12579 inst.instruction |= inst.operands[1].reg << 3;
12580 }
12581 }
12582
12583 static void
12584 do_t_orn (void)
12585 {
12586 unsigned Rd, Rn;
12587
12588 Rd = inst.operands[0].reg;
12589 Rn = inst.operands[1].present ? inst.operands[1].reg : Rd;
12590
12591 reject_bad_reg (Rd);
12592 /* Rn == REG_SP is unpredictable; Rn == REG_PC is MVN. */
12593 reject_bad_reg (Rn);
12594
12595 inst.instruction |= Rd << 8;
12596 inst.instruction |= Rn << 16;
12597
12598 if (!inst.operands[2].isreg)
12599 {
12600 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
12601 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
12602 }
12603 else
12604 {
12605 unsigned Rm;
12606
12607 Rm = inst.operands[2].reg;
12608 reject_bad_reg (Rm);
12609
12610 constraint (inst.operands[2].shifted
12611 && inst.operands[2].immisreg,
12612 _("shift must be constant"));
12613 encode_thumb32_shifted_operand (2);
12614 }
12615 }
12616
12617 static void
12618 do_t_pkhbt (void)
12619 {
12620 unsigned Rd, Rn, Rm;
12621
12622 Rd = inst.operands[0].reg;
12623 Rn = inst.operands[1].reg;
12624 Rm = inst.operands[2].reg;
12625
12626 reject_bad_reg (Rd);
12627 reject_bad_reg (Rn);
12628 reject_bad_reg (Rm);
12629
12630 inst.instruction |= Rd << 8;
12631 inst.instruction |= Rn << 16;
12632 inst.instruction |= Rm;
12633 if (inst.operands[3].present)
12634 {
12635 unsigned int val = inst.reloc.exp.X_add_number;
12636 constraint (inst.reloc.exp.X_op != O_constant,
12637 _("expression too complex"));
12638 inst.instruction |= (val & 0x1c) << 10;
12639 inst.instruction |= (val & 0x03) << 6;
12640 }
12641 }
12642
12643 static void
12644 do_t_pkhtb (void)
12645 {
12646 if (!inst.operands[3].present)
12647 {
12648 unsigned Rtmp;
12649
12650 inst.instruction &= ~0x00000020;
12651
12652 /* PR 10168. Swap the Rm and Rn registers. */
12653 Rtmp = inst.operands[1].reg;
12654 inst.operands[1].reg = inst.operands[2].reg;
12655 inst.operands[2].reg = Rtmp;
12656 }
12657 do_t_pkhbt ();
12658 }
12659
12660 static void
12661 do_t_pld (void)
12662 {
12663 if (inst.operands[0].immisreg)
12664 reject_bad_reg (inst.operands[0].imm);
12665
12666 encode_thumb32_addr_mode (0, /*is_t=*/FALSE, /*is_d=*/FALSE);
12667 }
12668
12669 static void
12670 do_t_push_pop (void)
12671 {
12672 unsigned mask;
12673
12674 constraint (inst.operands[0].writeback,
12675 _("push/pop do not support {reglist}^"));
12676 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
12677 _("expression too complex"));
12678
12679 mask = inst.operands[0].imm;
12680 if (inst.size_req != 4 && (mask & ~0xff) == 0)
12681 inst.instruction = THUMB_OP16 (inst.instruction) | mask;
12682 else if (inst.size_req != 4
12683 && (mask & ~0xff) == (1U << (inst.instruction == T_MNEM_push
12684 ? REG_LR : REG_PC)))
12685 {
12686 inst.instruction = THUMB_OP16 (inst.instruction);
12687 inst.instruction |= THUMB_PP_PC_LR;
12688 inst.instruction |= mask & 0xff;
12689 }
12690 else if (unified_syntax)
12691 {
12692 inst.instruction = THUMB_OP32 (inst.instruction);
12693 encode_thumb2_ldmstm (13, mask, TRUE);
12694 }
12695 else
12696 {
12697 inst.error = _("invalid register list to push/pop instruction");
12698 return;
12699 }
12700 }
12701
12702 static void
12703 do_t_rbit (void)
12704 {
12705 unsigned Rd, Rm;
12706
12707 Rd = inst.operands[0].reg;
12708 Rm = inst.operands[1].reg;
12709
12710 reject_bad_reg (Rd);
12711 reject_bad_reg (Rm);
12712
12713 inst.instruction |= Rd << 8;
12714 inst.instruction |= Rm << 16;
12715 inst.instruction |= Rm;
12716 }
12717
12718 static void
12719 do_t_rev (void)
12720 {
12721 unsigned Rd, Rm;
12722
12723 Rd = inst.operands[0].reg;
12724 Rm = inst.operands[1].reg;
12725
12726 reject_bad_reg (Rd);
12727 reject_bad_reg (Rm);
12728
12729 if (Rd <= 7 && Rm <= 7
12730 && inst.size_req != 4)
12731 {
12732 inst.instruction = THUMB_OP16 (inst.instruction);
12733 inst.instruction |= Rd;
12734 inst.instruction |= Rm << 3;
12735 }
12736 else if (unified_syntax)
12737 {
12738 inst.instruction = THUMB_OP32 (inst.instruction);
12739 inst.instruction |= Rd << 8;
12740 inst.instruction |= Rm << 16;
12741 inst.instruction |= Rm;
12742 }
12743 else
12744 inst.error = BAD_HIREG;
12745 }
12746
12747 static void
12748 do_t_rrx (void)
12749 {
12750 unsigned Rd, Rm;
12751
12752 Rd = inst.operands[0].reg;
12753 Rm = inst.operands[1].reg;
12754
12755 reject_bad_reg (Rd);
12756 reject_bad_reg (Rm);
12757
12758 inst.instruction |= Rd << 8;
12759 inst.instruction |= Rm;
12760 }
12761
12762 static void
12763 do_t_rsb (void)
12764 {
12765 unsigned Rd, Rs;
12766
12767 Rd = inst.operands[0].reg;
12768 Rs = (inst.operands[1].present
12769 ? inst.operands[1].reg /* Rd, Rs, foo */
12770 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
12771
12772 reject_bad_reg (Rd);
12773 reject_bad_reg (Rs);
12774 if (inst.operands[2].isreg)
12775 reject_bad_reg (inst.operands[2].reg);
12776
12777 inst.instruction |= Rd << 8;
12778 inst.instruction |= Rs << 16;
12779 if (!inst.operands[2].isreg)
12780 {
12781 bfd_boolean narrow;
12782
12783 if ((inst.instruction & 0x00100000) != 0)
12784 narrow = !in_it_block ();
12785 else
12786 narrow = in_it_block ();
12787
12788 if (Rd > 7 || Rs > 7)
12789 narrow = FALSE;
12790
12791 if (inst.size_req == 4 || !unified_syntax)
12792 narrow = FALSE;
12793
12794 if (inst.reloc.exp.X_op != O_constant
12795 || inst.reloc.exp.X_add_number != 0)
12796 narrow = FALSE;
12797
12798 /* Turn rsb #0 into 16-bit neg. We should probably do this via
12799 relaxation, but it doesn't seem worth the hassle. */
12800 if (narrow)
12801 {
12802 inst.reloc.type = BFD_RELOC_UNUSED;
12803 inst.instruction = THUMB_OP16 (T_MNEM_negs);
12804 inst.instruction |= Rs << 3;
12805 inst.instruction |= Rd;
12806 }
12807 else
12808 {
12809 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
12810 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
12811 }
12812 }
12813 else
12814 encode_thumb32_shifted_operand (2);
12815 }
12816
12817 static void
12818 do_t_setend (void)
12819 {
12820 if (warn_on_deprecated
12821 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
12822 as_tsktsk (_("setend use is deprecated for ARMv8"));
12823
12824 set_it_insn_type (OUTSIDE_IT_INSN);
12825 if (inst.operands[0].imm)
12826 inst.instruction |= 0x8;
12827 }
12828
12829 static void
12830 do_t_shift (void)
12831 {
12832 if (!inst.operands[1].present)
12833 inst.operands[1].reg = inst.operands[0].reg;
12834
12835 if (unified_syntax)
12836 {
12837 bfd_boolean narrow;
12838 int shift_kind;
12839
12840 switch (inst.instruction)
12841 {
12842 case T_MNEM_asr:
12843 case T_MNEM_asrs: shift_kind = SHIFT_ASR; break;
12844 case T_MNEM_lsl:
12845 case T_MNEM_lsls: shift_kind = SHIFT_LSL; break;
12846 case T_MNEM_lsr:
12847 case T_MNEM_lsrs: shift_kind = SHIFT_LSR; break;
12848 case T_MNEM_ror:
12849 case T_MNEM_rors: shift_kind = SHIFT_ROR; break;
12850 default: abort ();
12851 }
12852
12853 if (THUMB_SETS_FLAGS (inst.instruction))
12854 narrow = !in_it_block ();
12855 else
12856 narrow = in_it_block ();
12857 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
12858 narrow = FALSE;
12859 if (!inst.operands[2].isreg && shift_kind == SHIFT_ROR)
12860 narrow = FALSE;
12861 if (inst.operands[2].isreg
12862 && (inst.operands[1].reg != inst.operands[0].reg
12863 || inst.operands[2].reg > 7))
12864 narrow = FALSE;
12865 if (inst.size_req == 4)
12866 narrow = FALSE;
12867
12868 reject_bad_reg (inst.operands[0].reg);
12869 reject_bad_reg (inst.operands[1].reg);
12870
12871 if (!narrow)
12872 {
12873 if (inst.operands[2].isreg)
12874 {
12875 reject_bad_reg (inst.operands[2].reg);
12876 inst.instruction = THUMB_OP32 (inst.instruction);
12877 inst.instruction |= inst.operands[0].reg << 8;
12878 inst.instruction |= inst.operands[1].reg << 16;
12879 inst.instruction |= inst.operands[2].reg;
12880
12881 /* PR 12854: Error on extraneous shifts. */
12882 constraint (inst.operands[2].shifted,
12883 _("extraneous shift as part of operand to shift insn"));
12884 }
12885 else
12886 {
12887 inst.operands[1].shifted = 1;
12888 inst.operands[1].shift_kind = shift_kind;
12889 inst.instruction = THUMB_OP32 (THUMB_SETS_FLAGS (inst.instruction)
12890 ? T_MNEM_movs : T_MNEM_mov);
12891 inst.instruction |= inst.operands[0].reg << 8;
12892 encode_thumb32_shifted_operand (1);
12893 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
12894 inst.reloc.type = BFD_RELOC_UNUSED;
12895 }
12896 }
12897 else
12898 {
12899 if (inst.operands[2].isreg)
12900 {
12901 switch (shift_kind)
12902 {
12903 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_R; break;
12904 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_R; break;
12905 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_R; break;
12906 case SHIFT_ROR: inst.instruction = T_OPCODE_ROR_R; break;
12907 default: abort ();
12908 }
12909
12910 inst.instruction |= inst.operands[0].reg;
12911 inst.instruction |= inst.operands[2].reg << 3;
12912
12913 /* PR 12854: Error on extraneous shifts. */
12914 constraint (inst.operands[2].shifted,
12915 _("extraneous shift as part of operand to shift insn"));
12916 }
12917 else
12918 {
12919 switch (shift_kind)
12920 {
12921 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
12922 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
12923 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
12924 default: abort ();
12925 }
12926 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
12927 inst.instruction |= inst.operands[0].reg;
12928 inst.instruction |= inst.operands[1].reg << 3;
12929 }
12930 }
12931 }
12932 else
12933 {
12934 constraint (inst.operands[0].reg > 7
12935 || inst.operands[1].reg > 7, BAD_HIREG);
12936 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
12937
12938 if (inst.operands[2].isreg) /* Rd, {Rs,} Rn */
12939 {
12940 constraint (inst.operands[2].reg > 7, BAD_HIREG);
12941 constraint (inst.operands[0].reg != inst.operands[1].reg,
12942 _("source1 and dest must be same register"));
12943
12944 switch (inst.instruction)
12945 {
12946 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_R; break;
12947 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_R; break;
12948 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_R; break;
12949 case T_MNEM_ror: inst.instruction = T_OPCODE_ROR_R; break;
12950 default: abort ();
12951 }
12952
12953 inst.instruction |= inst.operands[0].reg;
12954 inst.instruction |= inst.operands[2].reg << 3;
12955
12956 /* PR 12854: Error on extraneous shifts. */
12957 constraint (inst.operands[2].shifted,
12958 _("extraneous shift as part of operand to shift insn"));
12959 }
12960 else
12961 {
12962 switch (inst.instruction)
12963 {
12964 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_I; break;
12965 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_I; break;
12966 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_I; break;
12967 case T_MNEM_ror: inst.error = _("ror #imm not supported"); return;
12968 default: abort ();
12969 }
12970 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
12971 inst.instruction |= inst.operands[0].reg;
12972 inst.instruction |= inst.operands[1].reg << 3;
12973 }
12974 }
12975 }
12976
12977 static void
12978 do_t_simd (void)
12979 {
12980 unsigned Rd, Rn, Rm;
12981
12982 Rd = inst.operands[0].reg;
12983 Rn = inst.operands[1].reg;
12984 Rm = inst.operands[2].reg;
12985
12986 reject_bad_reg (Rd);
12987 reject_bad_reg (Rn);
12988 reject_bad_reg (Rm);
12989
12990 inst.instruction |= Rd << 8;
12991 inst.instruction |= Rn << 16;
12992 inst.instruction |= Rm;
12993 }
12994
12995 static void
12996 do_t_simd2 (void)
12997 {
12998 unsigned Rd, Rn, Rm;
12999
13000 Rd = inst.operands[0].reg;
13001 Rm = inst.operands[1].reg;
13002 Rn = inst.operands[2].reg;
13003
13004 reject_bad_reg (Rd);
13005 reject_bad_reg (Rn);
13006 reject_bad_reg (Rm);
13007
13008 inst.instruction |= Rd << 8;
13009 inst.instruction |= Rn << 16;
13010 inst.instruction |= Rm;
13011 }
13012
13013 static void
13014 do_t_smc (void)
13015 {
13016 unsigned int value = inst.reloc.exp.X_add_number;
13017 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a),
13018 _("SMC is not permitted on this architecture"));
13019 constraint (inst.reloc.exp.X_op != O_constant,
13020 _("expression too complex"));
13021 inst.reloc.type = BFD_RELOC_UNUSED;
13022 inst.instruction |= (value & 0xf000) >> 12;
13023 inst.instruction |= (value & 0x0ff0);
13024 inst.instruction |= (value & 0x000f) << 16;
13025 /* PR gas/15623: SMC instructions must be last in an IT block. */
13026 set_it_insn_type_last ();
13027 }
13028
13029 static void
13030 do_t_hvc (void)
13031 {
13032 unsigned int value = inst.reloc.exp.X_add_number;
13033
13034 inst.reloc.type = BFD_RELOC_UNUSED;
13035 inst.instruction |= (value & 0x0fff);
13036 inst.instruction |= (value & 0xf000) << 4;
13037 }
13038
13039 static void
13040 do_t_ssat_usat (int bias)
13041 {
13042 unsigned Rd, Rn;
13043
13044 Rd = inst.operands[0].reg;
13045 Rn = inst.operands[2].reg;
13046
13047 reject_bad_reg (Rd);
13048 reject_bad_reg (Rn);
13049
13050 inst.instruction |= Rd << 8;
13051 inst.instruction |= inst.operands[1].imm - bias;
13052 inst.instruction |= Rn << 16;
13053
13054 if (inst.operands[3].present)
13055 {
13056 offsetT shift_amount = inst.reloc.exp.X_add_number;
13057
13058 inst.reloc.type = BFD_RELOC_UNUSED;
13059
13060 constraint (inst.reloc.exp.X_op != O_constant,
13061 _("expression too complex"));
13062
13063 if (shift_amount != 0)
13064 {
13065 constraint (shift_amount > 31,
13066 _("shift expression is too large"));
13067
13068 if (inst.operands[3].shift_kind == SHIFT_ASR)
13069 inst.instruction |= 0x00200000; /* sh bit. */
13070
13071 inst.instruction |= (shift_amount & 0x1c) << 10;
13072 inst.instruction |= (shift_amount & 0x03) << 6;
13073 }
13074 }
13075 }
13076
13077 static void
13078 do_t_ssat (void)
13079 {
13080 do_t_ssat_usat (1);
13081 }
13082
13083 static void
13084 do_t_ssat16 (void)
13085 {
13086 unsigned Rd, Rn;
13087
13088 Rd = inst.operands[0].reg;
13089 Rn = inst.operands[2].reg;
13090
13091 reject_bad_reg (Rd);
13092 reject_bad_reg (Rn);
13093
13094 inst.instruction |= Rd << 8;
13095 inst.instruction |= inst.operands[1].imm - 1;
13096 inst.instruction |= Rn << 16;
13097 }
13098
13099 static void
13100 do_t_strex (void)
13101 {
13102 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
13103 || inst.operands[2].postind || inst.operands[2].writeback
13104 || inst.operands[2].immisreg || inst.operands[2].shifted
13105 || inst.operands[2].negative,
13106 BAD_ADDR_MODE);
13107
13108 constraint (inst.operands[2].reg == REG_PC, BAD_PC);
13109
13110 inst.instruction |= inst.operands[0].reg << 8;
13111 inst.instruction |= inst.operands[1].reg << 12;
13112 inst.instruction |= inst.operands[2].reg << 16;
13113 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
13114 }
13115
13116 static void
13117 do_t_strexd (void)
13118 {
13119 if (!inst.operands[2].present)
13120 inst.operands[2].reg = inst.operands[1].reg + 1;
13121
13122 constraint (inst.operands[0].reg == inst.operands[1].reg
13123 || inst.operands[0].reg == inst.operands[2].reg
13124 || inst.operands[0].reg == inst.operands[3].reg,
13125 BAD_OVERLAP);
13126
13127 inst.instruction |= inst.operands[0].reg;
13128 inst.instruction |= inst.operands[1].reg << 12;
13129 inst.instruction |= inst.operands[2].reg << 8;
13130 inst.instruction |= inst.operands[3].reg << 16;
13131 }
13132
13133 static void
13134 do_t_sxtah (void)
13135 {
13136 unsigned Rd, Rn, Rm;
13137
13138 Rd = inst.operands[0].reg;
13139 Rn = inst.operands[1].reg;
13140 Rm = inst.operands[2].reg;
13141
13142 reject_bad_reg (Rd);
13143 reject_bad_reg (Rn);
13144 reject_bad_reg (Rm);
13145
13146 inst.instruction |= Rd << 8;
13147 inst.instruction |= Rn << 16;
13148 inst.instruction |= Rm;
13149 inst.instruction |= inst.operands[3].imm << 4;
13150 }
13151
13152 static void
13153 do_t_sxth (void)
13154 {
13155 unsigned Rd, Rm;
13156
13157 Rd = inst.operands[0].reg;
13158 Rm = inst.operands[1].reg;
13159
13160 reject_bad_reg (Rd);
13161 reject_bad_reg (Rm);
13162
13163 if (inst.instruction <= 0xffff
13164 && inst.size_req != 4
13165 && Rd <= 7 && Rm <= 7
13166 && (!inst.operands[2].present || inst.operands[2].imm == 0))
13167 {
13168 inst.instruction = THUMB_OP16 (inst.instruction);
13169 inst.instruction |= Rd;
13170 inst.instruction |= Rm << 3;
13171 }
13172 else if (unified_syntax)
13173 {
13174 if (inst.instruction <= 0xffff)
13175 inst.instruction = THUMB_OP32 (inst.instruction);
13176 inst.instruction |= Rd << 8;
13177 inst.instruction |= Rm;
13178 inst.instruction |= inst.operands[2].imm << 4;
13179 }
13180 else
13181 {
13182 constraint (inst.operands[2].present && inst.operands[2].imm != 0,
13183 _("Thumb encoding does not support rotation"));
13184 constraint (1, BAD_HIREG);
13185 }
13186 }
13187
13188 static void
13189 do_t_swi (void)
13190 {
13191 inst.reloc.type = BFD_RELOC_ARM_SWI;
13192 }
13193
13194 static void
13195 do_t_tb (void)
13196 {
13197 unsigned Rn, Rm;
13198 int half;
13199
13200 half = (inst.instruction & 0x10) != 0;
13201 set_it_insn_type_last ();
13202 constraint (inst.operands[0].immisreg,
13203 _("instruction requires register index"));
13204
13205 Rn = inst.operands[0].reg;
13206 Rm = inst.operands[0].imm;
13207
13208 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
13209 constraint (Rn == REG_SP, BAD_SP);
13210 reject_bad_reg (Rm);
13211
13212 constraint (!half && inst.operands[0].shifted,
13213 _("instruction does not allow shifted index"));
13214 inst.instruction |= (Rn << 16) | Rm;
13215 }
13216
13217 static void
13218 do_t_udf (void)
13219 {
13220 if (!inst.operands[0].present)
13221 inst.operands[0].imm = 0;
13222
13223 if ((unsigned int) inst.operands[0].imm > 255 || inst.size_req == 4)
13224 {
13225 constraint (inst.size_req == 2,
13226 _("immediate value out of range"));
13227 inst.instruction = THUMB_OP32 (inst.instruction);
13228 inst.instruction |= (inst.operands[0].imm & 0xf000u) << 4;
13229 inst.instruction |= (inst.operands[0].imm & 0x0fffu) << 0;
13230 }
13231 else
13232 {
13233 inst.instruction = THUMB_OP16 (inst.instruction);
13234 inst.instruction |= inst.operands[0].imm;
13235 }
13236
13237 set_it_insn_type (NEUTRAL_IT_INSN);
13238 }
13239
13240
13241 static void
13242 do_t_usat (void)
13243 {
13244 do_t_ssat_usat (0);
13245 }
13246
13247 static void
13248 do_t_usat16 (void)
13249 {
13250 unsigned Rd, Rn;
13251
13252 Rd = inst.operands[0].reg;
13253 Rn = inst.operands[2].reg;
13254
13255 reject_bad_reg (Rd);
13256 reject_bad_reg (Rn);
13257
13258 inst.instruction |= Rd << 8;
13259 inst.instruction |= inst.operands[1].imm;
13260 inst.instruction |= Rn << 16;
13261 }
13262
13263 /* Neon instruction encoder helpers. */
13264
13265 /* Encodings for the different types for various Neon opcodes. */
13266
13267 /* An "invalid" code for the following tables. */
13268 #define N_INV -1u
13269
13270 struct neon_tab_entry
13271 {
13272 unsigned integer;
13273 unsigned float_or_poly;
13274 unsigned scalar_or_imm;
13275 };
13276
13277 /* Map overloaded Neon opcodes to their respective encodings. */
13278 #define NEON_ENC_TAB \
13279 X(vabd, 0x0000700, 0x1200d00, N_INV), \
13280 X(vmax, 0x0000600, 0x0000f00, N_INV), \
13281 X(vmin, 0x0000610, 0x0200f00, N_INV), \
13282 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
13283 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
13284 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
13285 X(vadd, 0x0000800, 0x0000d00, N_INV), \
13286 X(vsub, 0x1000800, 0x0200d00, N_INV), \
13287 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
13288 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
13289 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
13290 /* Register variants of the following two instructions are encoded as
13291 vcge / vcgt with the operands reversed. */ \
13292 X(vclt, 0x0000300, 0x1200e00, 0x1b10200), \
13293 X(vcle, 0x0000310, 0x1000e00, 0x1b10180), \
13294 X(vfma, N_INV, 0x0000c10, N_INV), \
13295 X(vfms, N_INV, 0x0200c10, N_INV), \
13296 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
13297 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
13298 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
13299 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
13300 X(vmlal, 0x0800800, N_INV, 0x0800240), \
13301 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
13302 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
13303 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
13304 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
13305 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
13306 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
13307 X(vqrdmlah, 0x3000b10, N_INV, 0x0800e40), \
13308 X(vqrdmlsh, 0x3000c10, N_INV, 0x0800f40), \
13309 X(vshl, 0x0000400, N_INV, 0x0800510), \
13310 X(vqshl, 0x0000410, N_INV, 0x0800710), \
13311 X(vand, 0x0000110, N_INV, 0x0800030), \
13312 X(vbic, 0x0100110, N_INV, 0x0800030), \
13313 X(veor, 0x1000110, N_INV, N_INV), \
13314 X(vorn, 0x0300110, N_INV, 0x0800010), \
13315 X(vorr, 0x0200110, N_INV, 0x0800010), \
13316 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
13317 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
13318 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
13319 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
13320 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
13321 X(vst1, 0x0000000, 0x0800000, N_INV), \
13322 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
13323 X(vst2, 0x0000100, 0x0800100, N_INV), \
13324 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
13325 X(vst3, 0x0000200, 0x0800200, N_INV), \
13326 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
13327 X(vst4, 0x0000300, 0x0800300, N_INV), \
13328 X(vmovn, 0x1b20200, N_INV, N_INV), \
13329 X(vtrn, 0x1b20080, N_INV, N_INV), \
13330 X(vqmovn, 0x1b20200, N_INV, N_INV), \
13331 X(vqmovun, 0x1b20240, N_INV, N_INV), \
13332 X(vnmul, 0xe200a40, 0xe200b40, N_INV), \
13333 X(vnmla, 0xe100a40, 0xe100b40, N_INV), \
13334 X(vnmls, 0xe100a00, 0xe100b00, N_INV), \
13335 X(vfnma, 0xe900a40, 0xe900b40, N_INV), \
13336 X(vfnms, 0xe900a00, 0xe900b00, N_INV), \
13337 X(vcmp, 0xeb40a40, 0xeb40b40, N_INV), \
13338 X(vcmpz, 0xeb50a40, 0xeb50b40, N_INV), \
13339 X(vcmpe, 0xeb40ac0, 0xeb40bc0, N_INV), \
13340 X(vcmpez, 0xeb50ac0, 0xeb50bc0, N_INV), \
13341 X(vseleq, 0xe000a00, N_INV, N_INV), \
13342 X(vselvs, 0xe100a00, N_INV, N_INV), \
13343 X(vselge, 0xe200a00, N_INV, N_INV), \
13344 X(vselgt, 0xe300a00, N_INV, N_INV), \
13345 X(vmaxnm, 0xe800a00, 0x3000f10, N_INV), \
13346 X(vminnm, 0xe800a40, 0x3200f10, N_INV), \
13347 X(vcvta, 0xebc0a40, 0x3bb0000, N_INV), \
13348 X(vrintr, 0xeb60a40, 0x3ba0400, N_INV), \
13349 X(vrinta, 0xeb80a40, 0x3ba0400, N_INV), \
13350 X(aes, 0x3b00300, N_INV, N_INV), \
13351 X(sha3op, 0x2000c00, N_INV, N_INV), \
13352 X(sha1h, 0x3b902c0, N_INV, N_INV), \
13353 X(sha2op, 0x3ba0380, N_INV, N_INV)
13354
13355 enum neon_opc
13356 {
13357 #define X(OPC,I,F,S) N_MNEM_##OPC
13358 NEON_ENC_TAB
13359 #undef X
13360 };
13361
13362 static const struct neon_tab_entry neon_enc_tab[] =
13363 {
13364 #define X(OPC,I,F,S) { (I), (F), (S) }
13365 NEON_ENC_TAB
13366 #undef X
13367 };
13368
13369 /* Do not use these macros; instead, use NEON_ENCODE defined below. */
13370 #define NEON_ENC_INTEGER_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13371 #define NEON_ENC_ARMREG_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13372 #define NEON_ENC_POLY_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13373 #define NEON_ENC_FLOAT_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13374 #define NEON_ENC_SCALAR_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13375 #define NEON_ENC_IMMED_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13376 #define NEON_ENC_INTERLV_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13377 #define NEON_ENC_LANE_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13378 #define NEON_ENC_DUP_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13379 #define NEON_ENC_SINGLE_(X) \
13380 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
13381 #define NEON_ENC_DOUBLE_(X) \
13382 ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
13383 #define NEON_ENC_FPV8_(X) \
13384 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf000000))
13385
13386 #define NEON_ENCODE(type, inst) \
13387 do \
13388 { \
13389 inst.instruction = NEON_ENC_##type##_ (inst.instruction); \
13390 inst.is_neon = 1; \
13391 } \
13392 while (0)
13393
13394 #define check_neon_suffixes \
13395 do \
13396 { \
13397 if (!inst.error && inst.vectype.elems > 0 && !inst.is_neon) \
13398 { \
13399 as_bad (_("invalid neon suffix for non neon instruction")); \
13400 return; \
13401 } \
13402 } \
13403 while (0)
13404
13405 /* Define shapes for instruction operands. The following mnemonic characters
13406 are used in this table:
13407
13408 F - VFP S<n> register
13409 D - Neon D<n> register
13410 Q - Neon Q<n> register
13411 I - Immediate
13412 S - Scalar
13413 R - ARM register
13414 L - D<n> register list
13415
13416 This table is used to generate various data:
13417 - enumerations of the form NS_DDR to be used as arguments to
13418 neon_select_shape.
13419 - a table classifying shapes into single, double, quad, mixed.
13420 - a table used to drive neon_select_shape. */
13421
13422 #define NEON_SHAPE_DEF \
13423 X(3, (D, D, D), DOUBLE), \
13424 X(3, (Q, Q, Q), QUAD), \
13425 X(3, (D, D, I), DOUBLE), \
13426 X(3, (Q, Q, I), QUAD), \
13427 X(3, (D, D, S), DOUBLE), \
13428 X(3, (Q, Q, S), QUAD), \
13429 X(2, (D, D), DOUBLE), \
13430 X(2, (Q, Q), QUAD), \
13431 X(2, (D, S), DOUBLE), \
13432 X(2, (Q, S), QUAD), \
13433 X(2, (D, R), DOUBLE), \
13434 X(2, (Q, R), QUAD), \
13435 X(2, (D, I), DOUBLE), \
13436 X(2, (Q, I), QUAD), \
13437 X(3, (D, L, D), DOUBLE), \
13438 X(2, (D, Q), MIXED), \
13439 X(2, (Q, D), MIXED), \
13440 X(3, (D, Q, I), MIXED), \
13441 X(3, (Q, D, I), MIXED), \
13442 X(3, (Q, D, D), MIXED), \
13443 X(3, (D, Q, Q), MIXED), \
13444 X(3, (Q, Q, D), MIXED), \
13445 X(3, (Q, D, S), MIXED), \
13446 X(3, (D, Q, S), MIXED), \
13447 X(4, (D, D, D, I), DOUBLE), \
13448 X(4, (Q, Q, Q, I), QUAD), \
13449 X(4, (D, D, S, I), DOUBLE), \
13450 X(4, (Q, Q, S, I), QUAD), \
13451 X(2, (F, F), SINGLE), \
13452 X(3, (F, F, F), SINGLE), \
13453 X(2, (F, I), SINGLE), \
13454 X(2, (F, D), MIXED), \
13455 X(2, (D, F), MIXED), \
13456 X(3, (F, F, I), MIXED), \
13457 X(4, (R, R, F, F), SINGLE), \
13458 X(4, (F, F, R, R), SINGLE), \
13459 X(3, (D, R, R), DOUBLE), \
13460 X(3, (R, R, D), DOUBLE), \
13461 X(2, (S, R), SINGLE), \
13462 X(2, (R, S), SINGLE), \
13463 X(2, (F, R), SINGLE), \
13464 X(2, (R, F), SINGLE), \
13465 /* Half float shape supported so far. */\
13466 X (2, (H, D), MIXED), \
13467 X (2, (D, H), MIXED), \
13468 X (2, (H, F), MIXED), \
13469 X (2, (F, H), MIXED), \
13470 X (2, (H, H), HALF), \
13471 X (2, (H, R), HALF), \
13472 X (2, (R, H), HALF), \
13473 X (2, (H, I), HALF), \
13474 X (3, (H, H, H), HALF), \
13475 X (3, (H, F, I), MIXED), \
13476 X (3, (F, H, I), MIXED), \
13477 X (3, (D, H, H), MIXED), \
13478 X (3, (D, H, S), MIXED)
13479
13480 #define S2(A,B) NS_##A##B
13481 #define S3(A,B,C) NS_##A##B##C
13482 #define S4(A,B,C,D) NS_##A##B##C##D
13483
13484 #define X(N, L, C) S##N L
13485
13486 enum neon_shape
13487 {
13488 NEON_SHAPE_DEF,
13489 NS_NULL
13490 };
13491
13492 #undef X
13493 #undef S2
13494 #undef S3
13495 #undef S4
13496
13497 enum neon_shape_class
13498 {
13499 SC_HALF,
13500 SC_SINGLE,
13501 SC_DOUBLE,
13502 SC_QUAD,
13503 SC_MIXED
13504 };
13505
13506 #define X(N, L, C) SC_##C
13507
13508 static enum neon_shape_class neon_shape_class[] =
13509 {
13510 NEON_SHAPE_DEF
13511 };
13512
13513 #undef X
13514
13515 enum neon_shape_el
13516 {
13517 SE_H,
13518 SE_F,
13519 SE_D,
13520 SE_Q,
13521 SE_I,
13522 SE_S,
13523 SE_R,
13524 SE_L
13525 };
13526
13527 /* Register widths of above. */
13528 static unsigned neon_shape_el_size[] =
13529 {
13530 16,
13531 32,
13532 64,
13533 128,
13534 0,
13535 32,
13536 32,
13537 0
13538 };
13539
13540 struct neon_shape_info
13541 {
13542 unsigned els;
13543 enum neon_shape_el el[NEON_MAX_TYPE_ELS];
13544 };
13545
13546 #define S2(A,B) { SE_##A, SE_##B }
13547 #define S3(A,B,C) { SE_##A, SE_##B, SE_##C }
13548 #define S4(A,B,C,D) { SE_##A, SE_##B, SE_##C, SE_##D }
13549
13550 #define X(N, L, C) { N, S##N L }
13551
13552 static struct neon_shape_info neon_shape_tab[] =
13553 {
13554 NEON_SHAPE_DEF
13555 };
13556
13557 #undef X
13558 #undef S2
13559 #undef S3
13560 #undef S4
13561
13562 /* Bit masks used in type checking given instructions.
13563 'N_EQK' means the type must be the same as (or based on in some way) the key
13564 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
13565 set, various other bits can be set as well in order to modify the meaning of
13566 the type constraint. */
13567
13568 enum neon_type_mask
13569 {
13570 N_S8 = 0x0000001,
13571 N_S16 = 0x0000002,
13572 N_S32 = 0x0000004,
13573 N_S64 = 0x0000008,
13574 N_U8 = 0x0000010,
13575 N_U16 = 0x0000020,
13576 N_U32 = 0x0000040,
13577 N_U64 = 0x0000080,
13578 N_I8 = 0x0000100,
13579 N_I16 = 0x0000200,
13580 N_I32 = 0x0000400,
13581 N_I64 = 0x0000800,
13582 N_8 = 0x0001000,
13583 N_16 = 0x0002000,
13584 N_32 = 0x0004000,
13585 N_64 = 0x0008000,
13586 N_P8 = 0x0010000,
13587 N_P16 = 0x0020000,
13588 N_F16 = 0x0040000,
13589 N_F32 = 0x0080000,
13590 N_F64 = 0x0100000,
13591 N_P64 = 0x0200000,
13592 N_KEY = 0x1000000, /* Key element (main type specifier). */
13593 N_EQK = 0x2000000, /* Given operand has the same type & size as the key. */
13594 N_VFP = 0x4000000, /* VFP mode: operand size must match register width. */
13595 N_UNT = 0x8000000, /* Must be explicitly untyped. */
13596 N_DBL = 0x0000001, /* If N_EQK, this operand is twice the size. */
13597 N_HLF = 0x0000002, /* If N_EQK, this operand is half the size. */
13598 N_SGN = 0x0000004, /* If N_EQK, this operand is forced to be signed. */
13599 N_UNS = 0x0000008, /* If N_EQK, this operand is forced to be unsigned. */
13600 N_INT = 0x0000010, /* If N_EQK, this operand is forced to be integer. */
13601 N_FLT = 0x0000020, /* If N_EQK, this operand is forced to be float. */
13602 N_SIZ = 0x0000040, /* If N_EQK, this operand is forced to be size-only. */
13603 N_UTYP = 0,
13604 N_MAX_NONSPECIAL = N_P64
13605 };
13606
13607 #define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
13608
13609 #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
13610 #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
13611 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
13612 #define N_S_32 (N_S8 | N_S16 | N_S32)
13613 #define N_F_16_32 (N_F16 | N_F32)
13614 #define N_SUF_32 (N_SU_32 | N_F_16_32)
13615 #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
13616 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F16 | N_F32)
13617 #define N_F_ALL (N_F16 | N_F32 | N_F64)
13618
13619 /* Pass this as the first type argument to neon_check_type to ignore types
13620 altogether. */
13621 #define N_IGNORE_TYPE (N_KEY | N_EQK)
13622
13623 /* Select a "shape" for the current instruction (describing register types or
13624 sizes) from a list of alternatives. Return NS_NULL if the current instruction
13625 doesn't fit. For non-polymorphic shapes, checking is usually done as a
13626 function of operand parsing, so this function doesn't need to be called.
13627 Shapes should be listed in order of decreasing length. */
13628
13629 static enum neon_shape
13630 neon_select_shape (enum neon_shape shape, ...)
13631 {
13632 va_list ap;
13633 enum neon_shape first_shape = shape;
13634
13635 /* Fix missing optional operands. FIXME: we don't know at this point how
13636 many arguments we should have, so this makes the assumption that we have
13637 > 1. This is true of all current Neon opcodes, I think, but may not be
13638 true in the future. */
13639 if (!inst.operands[1].present)
13640 inst.operands[1] = inst.operands[0];
13641
13642 va_start (ap, shape);
13643
13644 for (; shape != NS_NULL; shape = (enum neon_shape) va_arg (ap, int))
13645 {
13646 unsigned j;
13647 int matches = 1;
13648
13649 for (j = 0; j < neon_shape_tab[shape].els; j++)
13650 {
13651 if (!inst.operands[j].present)
13652 {
13653 matches = 0;
13654 break;
13655 }
13656
13657 switch (neon_shape_tab[shape].el[j])
13658 {
13659 /* If a .f16, .16, .u16, .s16 type specifier is given over
13660 a VFP single precision register operand, it's essentially
13661 means only half of the register is used.
13662
13663 If the type specifier is given after the mnemonics, the
13664 information is stored in inst.vectype. If the type specifier
13665 is given after register operand, the information is stored
13666 in inst.operands[].vectype.
13667
13668 When there is only one type specifier, and all the register
13669 operands are the same type of hardware register, the type
13670 specifier applies to all register operands.
13671
13672 If no type specifier is given, the shape is inferred from
13673 operand information.
13674
13675 for example:
13676 vadd.f16 s0, s1, s2: NS_HHH
13677 vabs.f16 s0, s1: NS_HH
13678 vmov.f16 s0, r1: NS_HR
13679 vmov.f16 r0, s1: NS_RH
13680 vcvt.f16 r0, s1: NS_RH
13681 vcvt.f16.s32 s2, s2, #29: NS_HFI
13682 vcvt.f16.s32 s2, s2: NS_HF
13683 */
13684 case SE_H:
13685 if (!(inst.operands[j].isreg
13686 && inst.operands[j].isvec
13687 && inst.operands[j].issingle
13688 && !inst.operands[j].isquad
13689 && ((inst.vectype.elems == 1
13690 && inst.vectype.el[0].size == 16)
13691 || (inst.vectype.elems > 1
13692 && inst.vectype.el[j].size == 16)
13693 || (inst.vectype.elems == 0
13694 && inst.operands[j].vectype.type != NT_invtype
13695 && inst.operands[j].vectype.size == 16))))
13696 matches = 0;
13697 break;
13698
13699 case SE_F:
13700 if (!(inst.operands[j].isreg
13701 && inst.operands[j].isvec
13702 && inst.operands[j].issingle
13703 && !inst.operands[j].isquad
13704 && ((inst.vectype.elems == 1 && inst.vectype.el[0].size == 32)
13705 || (inst.vectype.elems > 1 && inst.vectype.el[j].size == 32)
13706 || (inst.vectype.elems == 0
13707 && (inst.operands[j].vectype.size == 32
13708 || inst.operands[j].vectype.type == NT_invtype)))))
13709 matches = 0;
13710 break;
13711
13712 case SE_D:
13713 if (!(inst.operands[j].isreg
13714 && inst.operands[j].isvec
13715 && !inst.operands[j].isquad
13716 && !inst.operands[j].issingle))
13717 matches = 0;
13718 break;
13719
13720 case SE_R:
13721 if (!(inst.operands[j].isreg
13722 && !inst.operands[j].isvec))
13723 matches = 0;
13724 break;
13725
13726 case SE_Q:
13727 if (!(inst.operands[j].isreg
13728 && inst.operands[j].isvec
13729 && inst.operands[j].isquad
13730 && !inst.operands[j].issingle))
13731 matches = 0;
13732 break;
13733
13734 case SE_I:
13735 if (!(!inst.operands[j].isreg
13736 && !inst.operands[j].isscalar))
13737 matches = 0;
13738 break;
13739
13740 case SE_S:
13741 if (!(!inst.operands[j].isreg
13742 && inst.operands[j].isscalar))
13743 matches = 0;
13744 break;
13745
13746 case SE_L:
13747 break;
13748 }
13749 if (!matches)
13750 break;
13751 }
13752 if (matches && (j >= ARM_IT_MAX_OPERANDS || !inst.operands[j].present))
13753 /* We've matched all the entries in the shape table, and we don't
13754 have any left over operands which have not been matched. */
13755 break;
13756 }
13757
13758 va_end (ap);
13759
13760 if (shape == NS_NULL && first_shape != NS_NULL)
13761 first_error (_("invalid instruction shape"));
13762
13763 return shape;
13764 }
13765
13766 /* True if SHAPE is predominantly a quadword operation (most of the time, this
13767 means the Q bit should be set). */
13768
13769 static int
13770 neon_quad (enum neon_shape shape)
13771 {
13772 return neon_shape_class[shape] == SC_QUAD;
13773 }
13774
13775 static void
13776 neon_modify_type_size (unsigned typebits, enum neon_el_type *g_type,
13777 unsigned *g_size)
13778 {
13779 /* Allow modification to be made to types which are constrained to be
13780 based on the key element, based on bits set alongside N_EQK. */
13781 if ((typebits & N_EQK) != 0)
13782 {
13783 if ((typebits & N_HLF) != 0)
13784 *g_size /= 2;
13785 else if ((typebits & N_DBL) != 0)
13786 *g_size *= 2;
13787 if ((typebits & N_SGN) != 0)
13788 *g_type = NT_signed;
13789 else if ((typebits & N_UNS) != 0)
13790 *g_type = NT_unsigned;
13791 else if ((typebits & N_INT) != 0)
13792 *g_type = NT_integer;
13793 else if ((typebits & N_FLT) != 0)
13794 *g_type = NT_float;
13795 else if ((typebits & N_SIZ) != 0)
13796 *g_type = NT_untyped;
13797 }
13798 }
13799
13800 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
13801 operand type, i.e. the single type specified in a Neon instruction when it
13802 is the only one given. */
13803
13804 static struct neon_type_el
13805 neon_type_promote (struct neon_type_el *key, unsigned thisarg)
13806 {
13807 struct neon_type_el dest = *key;
13808
13809 gas_assert ((thisarg & N_EQK) != 0);
13810
13811 neon_modify_type_size (thisarg, &dest.type, &dest.size);
13812
13813 return dest;
13814 }
13815
13816 /* Convert Neon type and size into compact bitmask representation. */
13817
13818 static enum neon_type_mask
13819 type_chk_of_el_type (enum neon_el_type type, unsigned size)
13820 {
13821 switch (type)
13822 {
13823 case NT_untyped:
13824 switch (size)
13825 {
13826 case 8: return N_8;
13827 case 16: return N_16;
13828 case 32: return N_32;
13829 case 64: return N_64;
13830 default: ;
13831 }
13832 break;
13833
13834 case NT_integer:
13835 switch (size)
13836 {
13837 case 8: return N_I8;
13838 case 16: return N_I16;
13839 case 32: return N_I32;
13840 case 64: return N_I64;
13841 default: ;
13842 }
13843 break;
13844
13845 case NT_float:
13846 switch (size)
13847 {
13848 case 16: return N_F16;
13849 case 32: return N_F32;
13850 case 64: return N_F64;
13851 default: ;
13852 }
13853 break;
13854
13855 case NT_poly:
13856 switch (size)
13857 {
13858 case 8: return N_P8;
13859 case 16: return N_P16;
13860 case 64: return N_P64;
13861 default: ;
13862 }
13863 break;
13864
13865 case NT_signed:
13866 switch (size)
13867 {
13868 case 8: return N_S8;
13869 case 16: return N_S16;
13870 case 32: return N_S32;
13871 case 64: return N_S64;
13872 default: ;
13873 }
13874 break;
13875
13876 case NT_unsigned:
13877 switch (size)
13878 {
13879 case 8: return N_U8;
13880 case 16: return N_U16;
13881 case 32: return N_U32;
13882 case 64: return N_U64;
13883 default: ;
13884 }
13885 break;
13886
13887 default: ;
13888 }
13889
13890 return N_UTYP;
13891 }
13892
13893 /* Convert compact Neon bitmask type representation to a type and size. Only
13894 handles the case where a single bit is set in the mask. */
13895
13896 static int
13897 el_type_of_type_chk (enum neon_el_type *type, unsigned *size,
13898 enum neon_type_mask mask)
13899 {
13900 if ((mask & N_EQK) != 0)
13901 return FAIL;
13902
13903 if ((mask & (N_S8 | N_U8 | N_I8 | N_8 | N_P8)) != 0)
13904 *size = 8;
13905 else if ((mask & (N_S16 | N_U16 | N_I16 | N_16 | N_F16 | N_P16)) != 0)
13906 *size = 16;
13907 else if ((mask & (N_S32 | N_U32 | N_I32 | N_32 | N_F32)) != 0)
13908 *size = 32;
13909 else if ((mask & (N_S64 | N_U64 | N_I64 | N_64 | N_F64 | N_P64)) != 0)
13910 *size = 64;
13911 else
13912 return FAIL;
13913
13914 if ((mask & (N_S8 | N_S16 | N_S32 | N_S64)) != 0)
13915 *type = NT_signed;
13916 else if ((mask & (N_U8 | N_U16 | N_U32 | N_U64)) != 0)
13917 *type = NT_unsigned;
13918 else if ((mask & (N_I8 | N_I16 | N_I32 | N_I64)) != 0)
13919 *type = NT_integer;
13920 else if ((mask & (N_8 | N_16 | N_32 | N_64)) != 0)
13921 *type = NT_untyped;
13922 else if ((mask & (N_P8 | N_P16 | N_P64)) != 0)
13923 *type = NT_poly;
13924 else if ((mask & (N_F_ALL)) != 0)
13925 *type = NT_float;
13926 else
13927 return FAIL;
13928
13929 return SUCCESS;
13930 }
13931
13932 /* Modify a bitmask of allowed types. This is only needed for type
13933 relaxation. */
13934
13935 static unsigned
13936 modify_types_allowed (unsigned allowed, unsigned mods)
13937 {
13938 unsigned size;
13939 enum neon_el_type type;
13940 unsigned destmask;
13941 int i;
13942
13943 destmask = 0;
13944
13945 for (i = 1; i <= N_MAX_NONSPECIAL; i <<= 1)
13946 {
13947 if (el_type_of_type_chk (&type, &size,
13948 (enum neon_type_mask) (allowed & i)) == SUCCESS)
13949 {
13950 neon_modify_type_size (mods, &type, &size);
13951 destmask |= type_chk_of_el_type (type, size);
13952 }
13953 }
13954
13955 return destmask;
13956 }
13957
13958 /* Check type and return type classification.
13959 The manual states (paraphrase): If one datatype is given, it indicates the
13960 type given in:
13961 - the second operand, if there is one
13962 - the operand, if there is no second operand
13963 - the result, if there are no operands.
13964 This isn't quite good enough though, so we use a concept of a "key" datatype
13965 which is set on a per-instruction basis, which is the one which matters when
13966 only one data type is written.
13967 Note: this function has side-effects (e.g. filling in missing operands). All
13968 Neon instructions should call it before performing bit encoding. */
13969
13970 static struct neon_type_el
13971 neon_check_type (unsigned els, enum neon_shape ns, ...)
13972 {
13973 va_list ap;
13974 unsigned i, pass, key_el = 0;
13975 unsigned types[NEON_MAX_TYPE_ELS];
13976 enum neon_el_type k_type = NT_invtype;
13977 unsigned k_size = -1u;
13978 struct neon_type_el badtype = {NT_invtype, -1};
13979 unsigned key_allowed = 0;
13980
13981 /* Optional registers in Neon instructions are always (not) in operand 1.
13982 Fill in the missing operand here, if it was omitted. */
13983 if (els > 1 && !inst.operands[1].present)
13984 inst.operands[1] = inst.operands[0];
13985
13986 /* Suck up all the varargs. */
13987 va_start (ap, ns);
13988 for (i = 0; i < els; i++)
13989 {
13990 unsigned thisarg = va_arg (ap, unsigned);
13991 if (thisarg == N_IGNORE_TYPE)
13992 {
13993 va_end (ap);
13994 return badtype;
13995 }
13996 types[i] = thisarg;
13997 if ((thisarg & N_KEY) != 0)
13998 key_el = i;
13999 }
14000 va_end (ap);
14001
14002 if (inst.vectype.elems > 0)
14003 for (i = 0; i < els; i++)
14004 if (inst.operands[i].vectype.type != NT_invtype)
14005 {
14006 first_error (_("types specified in both the mnemonic and operands"));
14007 return badtype;
14008 }
14009
14010 /* Duplicate inst.vectype elements here as necessary.
14011 FIXME: No idea if this is exactly the same as the ARM assembler,
14012 particularly when an insn takes one register and one non-register
14013 operand. */
14014 if (inst.vectype.elems == 1 && els > 1)
14015 {
14016 unsigned j;
14017 inst.vectype.elems = els;
14018 inst.vectype.el[key_el] = inst.vectype.el[0];
14019 for (j = 0; j < els; j++)
14020 if (j != key_el)
14021 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
14022 types[j]);
14023 }
14024 else if (inst.vectype.elems == 0 && els > 0)
14025 {
14026 unsigned j;
14027 /* No types were given after the mnemonic, so look for types specified
14028 after each operand. We allow some flexibility here; as long as the
14029 "key" operand has a type, we can infer the others. */
14030 for (j = 0; j < els; j++)
14031 if (inst.operands[j].vectype.type != NT_invtype)
14032 inst.vectype.el[j] = inst.operands[j].vectype;
14033
14034 if (inst.operands[key_el].vectype.type != NT_invtype)
14035 {
14036 for (j = 0; j < els; j++)
14037 if (inst.operands[j].vectype.type == NT_invtype)
14038 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
14039 types[j]);
14040 }
14041 else
14042 {
14043 first_error (_("operand types can't be inferred"));
14044 return badtype;
14045 }
14046 }
14047 else if (inst.vectype.elems != els)
14048 {
14049 first_error (_("type specifier has the wrong number of parts"));
14050 return badtype;
14051 }
14052
14053 for (pass = 0; pass < 2; pass++)
14054 {
14055 for (i = 0; i < els; i++)
14056 {
14057 unsigned thisarg = types[i];
14058 unsigned types_allowed = ((thisarg & N_EQK) != 0 && pass != 0)
14059 ? modify_types_allowed (key_allowed, thisarg) : thisarg;
14060 enum neon_el_type g_type = inst.vectype.el[i].type;
14061 unsigned g_size = inst.vectype.el[i].size;
14062
14063 /* Decay more-specific signed & unsigned types to sign-insensitive
14064 integer types if sign-specific variants are unavailable. */
14065 if ((g_type == NT_signed || g_type == NT_unsigned)
14066 && (types_allowed & N_SU_ALL) == 0)
14067 g_type = NT_integer;
14068
14069 /* If only untyped args are allowed, decay any more specific types to
14070 them. Some instructions only care about signs for some element
14071 sizes, so handle that properly. */
14072 if (((types_allowed & N_UNT) == 0)
14073 && ((g_size == 8 && (types_allowed & N_8) != 0)
14074 || (g_size == 16 && (types_allowed & N_16) != 0)
14075 || (g_size == 32 && (types_allowed & N_32) != 0)
14076 || (g_size == 64 && (types_allowed & N_64) != 0)))
14077 g_type = NT_untyped;
14078
14079 if (pass == 0)
14080 {
14081 if ((thisarg & N_KEY) != 0)
14082 {
14083 k_type = g_type;
14084 k_size = g_size;
14085 key_allowed = thisarg & ~N_KEY;
14086
14087 /* Check architecture constraint on FP16 extension. */
14088 if (k_size == 16
14089 && k_type == NT_float
14090 && ! ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16))
14091 {
14092 inst.error = _(BAD_FP16);
14093 return badtype;
14094 }
14095 }
14096 }
14097 else
14098 {
14099 if ((thisarg & N_VFP) != 0)
14100 {
14101 enum neon_shape_el regshape;
14102 unsigned regwidth, match;
14103
14104 /* PR 11136: Catch the case where we are passed a shape of NS_NULL. */
14105 if (ns == NS_NULL)
14106 {
14107 first_error (_("invalid instruction shape"));
14108 return badtype;
14109 }
14110 regshape = neon_shape_tab[ns].el[i];
14111 regwidth = neon_shape_el_size[regshape];
14112
14113 /* In VFP mode, operands must match register widths. If we
14114 have a key operand, use its width, else use the width of
14115 the current operand. */
14116 if (k_size != -1u)
14117 match = k_size;
14118 else
14119 match = g_size;
14120
14121 /* FP16 will use a single precision register. */
14122 if (regwidth == 32 && match == 16)
14123 {
14124 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16))
14125 match = regwidth;
14126 else
14127 {
14128 inst.error = _(BAD_FP16);
14129 return badtype;
14130 }
14131 }
14132
14133 if (regwidth != match)
14134 {
14135 first_error (_("operand size must match register width"));
14136 return badtype;
14137 }
14138 }
14139
14140 if ((thisarg & N_EQK) == 0)
14141 {
14142 unsigned given_type = type_chk_of_el_type (g_type, g_size);
14143
14144 if ((given_type & types_allowed) == 0)
14145 {
14146 first_error (_("bad type in Neon instruction"));
14147 return badtype;
14148 }
14149 }
14150 else
14151 {
14152 enum neon_el_type mod_k_type = k_type;
14153 unsigned mod_k_size = k_size;
14154 neon_modify_type_size (thisarg, &mod_k_type, &mod_k_size);
14155 if (g_type != mod_k_type || g_size != mod_k_size)
14156 {
14157 first_error (_("inconsistent types in Neon instruction"));
14158 return badtype;
14159 }
14160 }
14161 }
14162 }
14163 }
14164
14165 return inst.vectype.el[key_el];
14166 }
14167
14168 /* Neon-style VFP instruction forwarding. */
14169
14170 /* Thumb VFP instructions have 0xE in the condition field. */
14171
14172 static void
14173 do_vfp_cond_or_thumb (void)
14174 {
14175 inst.is_neon = 1;
14176
14177 if (thumb_mode)
14178 inst.instruction |= 0xe0000000;
14179 else
14180 inst.instruction |= inst.cond << 28;
14181 }
14182
14183 /* Look up and encode a simple mnemonic, for use as a helper function for the
14184 Neon-style VFP syntax. This avoids duplication of bits of the insns table,
14185 etc. It is assumed that operand parsing has already been done, and that the
14186 operands are in the form expected by the given opcode (this isn't necessarily
14187 the same as the form in which they were parsed, hence some massaging must
14188 take place before this function is called).
14189 Checks current arch version against that in the looked-up opcode. */
14190
14191 static void
14192 do_vfp_nsyn_opcode (const char *opname)
14193 {
14194 const struct asm_opcode *opcode;
14195
14196 opcode = (const struct asm_opcode *) hash_find (arm_ops_hsh, opname);
14197
14198 if (!opcode)
14199 abort ();
14200
14201 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant,
14202 thumb_mode ? *opcode->tvariant : *opcode->avariant),
14203 _(BAD_FPU));
14204
14205 inst.is_neon = 1;
14206
14207 if (thumb_mode)
14208 {
14209 inst.instruction = opcode->tvalue;
14210 opcode->tencode ();
14211 }
14212 else
14213 {
14214 inst.instruction = (inst.cond << 28) | opcode->avalue;
14215 opcode->aencode ();
14216 }
14217 }
14218
14219 static void
14220 do_vfp_nsyn_add_sub (enum neon_shape rs)
14221 {
14222 int is_add = (inst.instruction & 0x0fffffff) == N_MNEM_vadd;
14223
14224 if (rs == NS_FFF || rs == NS_HHH)
14225 {
14226 if (is_add)
14227 do_vfp_nsyn_opcode ("fadds");
14228 else
14229 do_vfp_nsyn_opcode ("fsubs");
14230
14231 /* ARMv8.2 fp16 instruction. */
14232 if (rs == NS_HHH)
14233 do_scalar_fp16_v82_encode ();
14234 }
14235 else
14236 {
14237 if (is_add)
14238 do_vfp_nsyn_opcode ("faddd");
14239 else
14240 do_vfp_nsyn_opcode ("fsubd");
14241 }
14242 }
14243
14244 /* Check operand types to see if this is a VFP instruction, and if so call
14245 PFN (). */
14246
14247 static int
14248 try_vfp_nsyn (int args, void (*pfn) (enum neon_shape))
14249 {
14250 enum neon_shape rs;
14251 struct neon_type_el et;
14252
14253 switch (args)
14254 {
14255 case 2:
14256 rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_NULL);
14257 et = neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY | N_VFP);
14258 break;
14259
14260 case 3:
14261 rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_NULL);
14262 et = neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
14263 N_F_ALL | N_KEY | N_VFP);
14264 break;
14265
14266 default:
14267 abort ();
14268 }
14269
14270 if (et.type != NT_invtype)
14271 {
14272 pfn (rs);
14273 return SUCCESS;
14274 }
14275
14276 inst.error = NULL;
14277 return FAIL;
14278 }
14279
14280 static void
14281 do_vfp_nsyn_mla_mls (enum neon_shape rs)
14282 {
14283 int is_mla = (inst.instruction & 0x0fffffff) == N_MNEM_vmla;
14284
14285 if (rs == NS_FFF || rs == NS_HHH)
14286 {
14287 if (is_mla)
14288 do_vfp_nsyn_opcode ("fmacs");
14289 else
14290 do_vfp_nsyn_opcode ("fnmacs");
14291
14292 /* ARMv8.2 fp16 instruction. */
14293 if (rs == NS_HHH)
14294 do_scalar_fp16_v82_encode ();
14295 }
14296 else
14297 {
14298 if (is_mla)
14299 do_vfp_nsyn_opcode ("fmacd");
14300 else
14301 do_vfp_nsyn_opcode ("fnmacd");
14302 }
14303 }
14304
14305 static void
14306 do_vfp_nsyn_fma_fms (enum neon_shape rs)
14307 {
14308 int is_fma = (inst.instruction & 0x0fffffff) == N_MNEM_vfma;
14309
14310 if (rs == NS_FFF || rs == NS_HHH)
14311 {
14312 if (is_fma)
14313 do_vfp_nsyn_opcode ("ffmas");
14314 else
14315 do_vfp_nsyn_opcode ("ffnmas");
14316
14317 /* ARMv8.2 fp16 instruction. */
14318 if (rs == NS_HHH)
14319 do_scalar_fp16_v82_encode ();
14320 }
14321 else
14322 {
14323 if (is_fma)
14324 do_vfp_nsyn_opcode ("ffmad");
14325 else
14326 do_vfp_nsyn_opcode ("ffnmad");
14327 }
14328 }
14329
14330 static void
14331 do_vfp_nsyn_mul (enum neon_shape rs)
14332 {
14333 if (rs == NS_FFF || rs == NS_HHH)
14334 {
14335 do_vfp_nsyn_opcode ("fmuls");
14336
14337 /* ARMv8.2 fp16 instruction. */
14338 if (rs == NS_HHH)
14339 do_scalar_fp16_v82_encode ();
14340 }
14341 else
14342 do_vfp_nsyn_opcode ("fmuld");
14343 }
14344
14345 static void
14346 do_vfp_nsyn_abs_neg (enum neon_shape rs)
14347 {
14348 int is_neg = (inst.instruction & 0x80) != 0;
14349 neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_VFP | N_KEY);
14350
14351 if (rs == NS_FF || rs == NS_HH)
14352 {
14353 if (is_neg)
14354 do_vfp_nsyn_opcode ("fnegs");
14355 else
14356 do_vfp_nsyn_opcode ("fabss");
14357
14358 /* ARMv8.2 fp16 instruction. */
14359 if (rs == NS_HH)
14360 do_scalar_fp16_v82_encode ();
14361 }
14362 else
14363 {
14364 if (is_neg)
14365 do_vfp_nsyn_opcode ("fnegd");
14366 else
14367 do_vfp_nsyn_opcode ("fabsd");
14368 }
14369 }
14370
14371 /* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
14372 insns belong to Neon, and are handled elsewhere. */
14373
14374 static void
14375 do_vfp_nsyn_ldm_stm (int is_dbmode)
14376 {
14377 int is_ldm = (inst.instruction & (1 << 20)) != 0;
14378 if (is_ldm)
14379 {
14380 if (is_dbmode)
14381 do_vfp_nsyn_opcode ("fldmdbs");
14382 else
14383 do_vfp_nsyn_opcode ("fldmias");
14384 }
14385 else
14386 {
14387 if (is_dbmode)
14388 do_vfp_nsyn_opcode ("fstmdbs");
14389 else
14390 do_vfp_nsyn_opcode ("fstmias");
14391 }
14392 }
14393
14394 static void
14395 do_vfp_nsyn_sqrt (void)
14396 {
14397 enum neon_shape rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_NULL);
14398 neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY | N_VFP);
14399
14400 if (rs == NS_FF || rs == NS_HH)
14401 {
14402 do_vfp_nsyn_opcode ("fsqrts");
14403
14404 /* ARMv8.2 fp16 instruction. */
14405 if (rs == NS_HH)
14406 do_scalar_fp16_v82_encode ();
14407 }
14408 else
14409 do_vfp_nsyn_opcode ("fsqrtd");
14410 }
14411
14412 static void
14413 do_vfp_nsyn_div (void)
14414 {
14415 enum neon_shape rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_NULL);
14416 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
14417 N_F_ALL | N_KEY | N_VFP);
14418
14419 if (rs == NS_FFF || rs == NS_HHH)
14420 {
14421 do_vfp_nsyn_opcode ("fdivs");
14422
14423 /* ARMv8.2 fp16 instruction. */
14424 if (rs == NS_HHH)
14425 do_scalar_fp16_v82_encode ();
14426 }
14427 else
14428 do_vfp_nsyn_opcode ("fdivd");
14429 }
14430
14431 static void
14432 do_vfp_nsyn_nmul (void)
14433 {
14434 enum neon_shape rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_NULL);
14435 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
14436 N_F_ALL | N_KEY | N_VFP);
14437
14438 if (rs == NS_FFF || rs == NS_HHH)
14439 {
14440 NEON_ENCODE (SINGLE, inst);
14441 do_vfp_sp_dyadic ();
14442
14443 /* ARMv8.2 fp16 instruction. */
14444 if (rs == NS_HHH)
14445 do_scalar_fp16_v82_encode ();
14446 }
14447 else
14448 {
14449 NEON_ENCODE (DOUBLE, inst);
14450 do_vfp_dp_rd_rn_rm ();
14451 }
14452 do_vfp_cond_or_thumb ();
14453
14454 }
14455
14456 static void
14457 do_vfp_nsyn_cmp (void)
14458 {
14459 enum neon_shape rs;
14460 if (inst.operands[1].isreg)
14461 {
14462 rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_NULL);
14463 neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY | N_VFP);
14464
14465 if (rs == NS_FF || rs == NS_HH)
14466 {
14467 NEON_ENCODE (SINGLE, inst);
14468 do_vfp_sp_monadic ();
14469 }
14470 else
14471 {
14472 NEON_ENCODE (DOUBLE, inst);
14473 do_vfp_dp_rd_rm ();
14474 }
14475 }
14476 else
14477 {
14478 rs = neon_select_shape (NS_HI, NS_FI, NS_DI, NS_NULL);
14479 neon_check_type (2, rs, N_F_ALL | N_KEY | N_VFP, N_EQK);
14480
14481 switch (inst.instruction & 0x0fffffff)
14482 {
14483 case N_MNEM_vcmp:
14484 inst.instruction += N_MNEM_vcmpz - N_MNEM_vcmp;
14485 break;
14486 case N_MNEM_vcmpe:
14487 inst.instruction += N_MNEM_vcmpez - N_MNEM_vcmpe;
14488 break;
14489 default:
14490 abort ();
14491 }
14492
14493 if (rs == NS_FI || rs == NS_HI)
14494 {
14495 NEON_ENCODE (SINGLE, inst);
14496 do_vfp_sp_compare_z ();
14497 }
14498 else
14499 {
14500 NEON_ENCODE (DOUBLE, inst);
14501 do_vfp_dp_rd ();
14502 }
14503 }
14504 do_vfp_cond_or_thumb ();
14505
14506 /* ARMv8.2 fp16 instruction. */
14507 if (rs == NS_HI || rs == NS_HH)
14508 do_scalar_fp16_v82_encode ();
14509 }
14510
14511 static void
14512 nsyn_insert_sp (void)
14513 {
14514 inst.operands[1] = inst.operands[0];
14515 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
14516 inst.operands[0].reg = REG_SP;
14517 inst.operands[0].isreg = 1;
14518 inst.operands[0].writeback = 1;
14519 inst.operands[0].present = 1;
14520 }
14521
14522 static void
14523 do_vfp_nsyn_push (void)
14524 {
14525 nsyn_insert_sp ();
14526
14527 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
14528 _("register list must contain at least 1 and at most 16 "
14529 "registers"));
14530
14531 if (inst.operands[1].issingle)
14532 do_vfp_nsyn_opcode ("fstmdbs");
14533 else
14534 do_vfp_nsyn_opcode ("fstmdbd");
14535 }
14536
14537 static void
14538 do_vfp_nsyn_pop (void)
14539 {
14540 nsyn_insert_sp ();
14541
14542 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
14543 _("register list must contain at least 1 and at most 16 "
14544 "registers"));
14545
14546 if (inst.operands[1].issingle)
14547 do_vfp_nsyn_opcode ("fldmias");
14548 else
14549 do_vfp_nsyn_opcode ("fldmiad");
14550 }
14551
14552 /* Fix up Neon data-processing instructions, ORing in the correct bits for
14553 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
14554
14555 static void
14556 neon_dp_fixup (struct arm_it* insn)
14557 {
14558 unsigned int i = insn->instruction;
14559 insn->is_neon = 1;
14560
14561 if (thumb_mode)
14562 {
14563 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
14564 if (i & (1 << 24))
14565 i |= 1 << 28;
14566
14567 i &= ~(1 << 24);
14568
14569 i |= 0xef000000;
14570 }
14571 else
14572 i |= 0xf2000000;
14573
14574 insn->instruction = i;
14575 }
14576
14577 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
14578 (0, 1, 2, 3). */
14579
14580 static unsigned
14581 neon_logbits (unsigned x)
14582 {
14583 return ffs (x) - 4;
14584 }
14585
14586 #define LOW4(R) ((R) & 0xf)
14587 #define HI1(R) (((R) >> 4) & 1)
14588
14589 /* Encode insns with bit pattern:
14590
14591 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
14592 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
14593
14594 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
14595 different meaning for some instruction. */
14596
14597 static void
14598 neon_three_same (int isquad, int ubit, int size)
14599 {
14600 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14601 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14602 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
14603 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
14604 inst.instruction |= LOW4 (inst.operands[2].reg);
14605 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
14606 inst.instruction |= (isquad != 0) << 6;
14607 inst.instruction |= (ubit != 0) << 24;
14608 if (size != -1)
14609 inst.instruction |= neon_logbits (size) << 20;
14610
14611 neon_dp_fixup (&inst);
14612 }
14613
14614 /* Encode instructions of the form:
14615
14616 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
14617 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
14618
14619 Don't write size if SIZE == -1. */
14620
14621 static void
14622 neon_two_same (int qbit, int ubit, int size)
14623 {
14624 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14625 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14626 inst.instruction |= LOW4 (inst.operands[1].reg);
14627 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14628 inst.instruction |= (qbit != 0) << 6;
14629 inst.instruction |= (ubit != 0) << 24;
14630
14631 if (size != -1)
14632 inst.instruction |= neon_logbits (size) << 18;
14633
14634 neon_dp_fixup (&inst);
14635 }
14636
14637 /* Neon instruction encoders, in approximate order of appearance. */
14638
14639 static void
14640 do_neon_dyadic_i_su (void)
14641 {
14642 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14643 struct neon_type_el et = neon_check_type (3, rs,
14644 N_EQK, N_EQK, N_SU_32 | N_KEY);
14645 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14646 }
14647
14648 static void
14649 do_neon_dyadic_i64_su (void)
14650 {
14651 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14652 struct neon_type_el et = neon_check_type (3, rs,
14653 N_EQK, N_EQK, N_SU_ALL | N_KEY);
14654 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14655 }
14656
14657 static void
14658 neon_imm_shift (int write_ubit, int uval, int isquad, struct neon_type_el et,
14659 unsigned immbits)
14660 {
14661 unsigned size = et.size >> 3;
14662 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14663 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14664 inst.instruction |= LOW4 (inst.operands[1].reg);
14665 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14666 inst.instruction |= (isquad != 0) << 6;
14667 inst.instruction |= immbits << 16;
14668 inst.instruction |= (size >> 3) << 7;
14669 inst.instruction |= (size & 0x7) << 19;
14670 if (write_ubit)
14671 inst.instruction |= (uval != 0) << 24;
14672
14673 neon_dp_fixup (&inst);
14674 }
14675
14676 static void
14677 do_neon_shl_imm (void)
14678 {
14679 if (!inst.operands[2].isreg)
14680 {
14681 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14682 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_KEY | N_I_ALL);
14683 int imm = inst.operands[2].imm;
14684
14685 constraint (imm < 0 || (unsigned)imm >= et.size,
14686 _("immediate out of range for shift"));
14687 NEON_ENCODE (IMMED, inst);
14688 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
14689 }
14690 else
14691 {
14692 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14693 struct neon_type_el et = neon_check_type (3, rs,
14694 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
14695 unsigned int tmp;
14696
14697 /* VSHL/VQSHL 3-register variants have syntax such as:
14698 vshl.xx Dd, Dm, Dn
14699 whereas other 3-register operations encoded by neon_three_same have
14700 syntax like:
14701 vadd.xx Dd, Dn, Dm
14702 (i.e. with Dn & Dm reversed). Swap operands[1].reg and operands[2].reg
14703 here. */
14704 tmp = inst.operands[2].reg;
14705 inst.operands[2].reg = inst.operands[1].reg;
14706 inst.operands[1].reg = tmp;
14707 NEON_ENCODE (INTEGER, inst);
14708 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14709 }
14710 }
14711
14712 static void
14713 do_neon_qshl_imm (void)
14714 {
14715 if (!inst.operands[2].isreg)
14716 {
14717 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14718 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
14719 int imm = inst.operands[2].imm;
14720
14721 constraint (imm < 0 || (unsigned)imm >= et.size,
14722 _("immediate out of range for shift"));
14723 NEON_ENCODE (IMMED, inst);
14724 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et, imm);
14725 }
14726 else
14727 {
14728 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14729 struct neon_type_el et = neon_check_type (3, rs,
14730 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
14731 unsigned int tmp;
14732
14733 /* See note in do_neon_shl_imm. */
14734 tmp = inst.operands[2].reg;
14735 inst.operands[2].reg = inst.operands[1].reg;
14736 inst.operands[1].reg = tmp;
14737 NEON_ENCODE (INTEGER, inst);
14738 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14739 }
14740 }
14741
14742 static void
14743 do_neon_rshl (void)
14744 {
14745 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14746 struct neon_type_el et = neon_check_type (3, rs,
14747 N_EQK, N_EQK, N_SU_ALL | N_KEY);
14748 unsigned int tmp;
14749
14750 tmp = inst.operands[2].reg;
14751 inst.operands[2].reg = inst.operands[1].reg;
14752 inst.operands[1].reg = tmp;
14753 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14754 }
14755
14756 static int
14757 neon_cmode_for_logic_imm (unsigned immediate, unsigned *immbits, int size)
14758 {
14759 /* Handle .I8 pseudo-instructions. */
14760 if (size == 8)
14761 {
14762 /* Unfortunately, this will make everything apart from zero out-of-range.
14763 FIXME is this the intended semantics? There doesn't seem much point in
14764 accepting .I8 if so. */
14765 immediate |= immediate << 8;
14766 size = 16;
14767 }
14768
14769 if (size >= 32)
14770 {
14771 if (immediate == (immediate & 0x000000ff))
14772 {
14773 *immbits = immediate;
14774 return 0x1;
14775 }
14776 else if (immediate == (immediate & 0x0000ff00))
14777 {
14778 *immbits = immediate >> 8;
14779 return 0x3;
14780 }
14781 else if (immediate == (immediate & 0x00ff0000))
14782 {
14783 *immbits = immediate >> 16;
14784 return 0x5;
14785 }
14786 else if (immediate == (immediate & 0xff000000))
14787 {
14788 *immbits = immediate >> 24;
14789 return 0x7;
14790 }
14791 if ((immediate & 0xffff) != (immediate >> 16))
14792 goto bad_immediate;
14793 immediate &= 0xffff;
14794 }
14795
14796 if (immediate == (immediate & 0x000000ff))
14797 {
14798 *immbits = immediate;
14799 return 0x9;
14800 }
14801 else if (immediate == (immediate & 0x0000ff00))
14802 {
14803 *immbits = immediate >> 8;
14804 return 0xb;
14805 }
14806
14807 bad_immediate:
14808 first_error (_("immediate value out of range"));
14809 return FAIL;
14810 }
14811
14812 static void
14813 do_neon_logic (void)
14814 {
14815 if (inst.operands[2].present && inst.operands[2].isreg)
14816 {
14817 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14818 neon_check_type (3, rs, N_IGNORE_TYPE);
14819 /* U bit and size field were set as part of the bitmask. */
14820 NEON_ENCODE (INTEGER, inst);
14821 neon_three_same (neon_quad (rs), 0, -1);
14822 }
14823 else
14824 {
14825 const int three_ops_form = (inst.operands[2].present
14826 && !inst.operands[2].isreg);
14827 const int immoperand = (three_ops_form ? 2 : 1);
14828 enum neon_shape rs = (three_ops_form
14829 ? neon_select_shape (NS_DDI, NS_QQI, NS_NULL)
14830 : neon_select_shape (NS_DI, NS_QI, NS_NULL));
14831 struct neon_type_el et = neon_check_type (2, rs,
14832 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
14833 enum neon_opc opcode = (enum neon_opc) inst.instruction & 0x0fffffff;
14834 unsigned immbits;
14835 int cmode;
14836
14837 if (et.type == NT_invtype)
14838 return;
14839
14840 if (three_ops_form)
14841 constraint (inst.operands[0].reg != inst.operands[1].reg,
14842 _("first and second operands shall be the same register"));
14843
14844 NEON_ENCODE (IMMED, inst);
14845
14846 immbits = inst.operands[immoperand].imm;
14847 if (et.size == 64)
14848 {
14849 /* .i64 is a pseudo-op, so the immediate must be a repeating
14850 pattern. */
14851 if (immbits != (inst.operands[immoperand].regisimm ?
14852 inst.operands[immoperand].reg : 0))
14853 {
14854 /* Set immbits to an invalid constant. */
14855 immbits = 0xdeadbeef;
14856 }
14857 }
14858
14859 switch (opcode)
14860 {
14861 case N_MNEM_vbic:
14862 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14863 break;
14864
14865 case N_MNEM_vorr:
14866 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14867 break;
14868
14869 case N_MNEM_vand:
14870 /* Pseudo-instruction for VBIC. */
14871 neon_invert_size (&immbits, 0, et.size);
14872 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14873 break;
14874
14875 case N_MNEM_vorn:
14876 /* Pseudo-instruction for VORR. */
14877 neon_invert_size (&immbits, 0, et.size);
14878 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14879 break;
14880
14881 default:
14882 abort ();
14883 }
14884
14885 if (cmode == FAIL)
14886 return;
14887
14888 inst.instruction |= neon_quad (rs) << 6;
14889 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14890 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14891 inst.instruction |= cmode << 8;
14892 neon_write_immbits (immbits);
14893
14894 neon_dp_fixup (&inst);
14895 }
14896 }
14897
14898 static void
14899 do_neon_bitfield (void)
14900 {
14901 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14902 neon_check_type (3, rs, N_IGNORE_TYPE);
14903 neon_three_same (neon_quad (rs), 0, -1);
14904 }
14905
14906 static void
14907 neon_dyadic_misc (enum neon_el_type ubit_meaning, unsigned types,
14908 unsigned destbits)
14909 {
14910 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14911 struct neon_type_el et = neon_check_type (3, rs, N_EQK | destbits, N_EQK,
14912 types | N_KEY);
14913 if (et.type == NT_float)
14914 {
14915 NEON_ENCODE (FLOAT, inst);
14916 neon_three_same (neon_quad (rs), 0, et.size == 16 ? (int) et.size : -1);
14917 }
14918 else
14919 {
14920 NEON_ENCODE (INTEGER, inst);
14921 neon_three_same (neon_quad (rs), et.type == ubit_meaning, et.size);
14922 }
14923 }
14924
14925 static void
14926 do_neon_dyadic_if_su (void)
14927 {
14928 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
14929 }
14930
14931 static void
14932 do_neon_dyadic_if_su_d (void)
14933 {
14934 /* This version only allow D registers, but that constraint is enforced during
14935 operand parsing so we don't need to do anything extra here. */
14936 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
14937 }
14938
14939 static void
14940 do_neon_dyadic_if_i_d (void)
14941 {
14942 /* The "untyped" case can't happen. Do this to stop the "U" bit being
14943 affected if we specify unsigned args. */
14944 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
14945 }
14946
14947 enum vfp_or_neon_is_neon_bits
14948 {
14949 NEON_CHECK_CC = 1,
14950 NEON_CHECK_ARCH = 2,
14951 NEON_CHECK_ARCH8 = 4
14952 };
14953
14954 /* Call this function if an instruction which may have belonged to the VFP or
14955 Neon instruction sets, but turned out to be a Neon instruction (due to the
14956 operand types involved, etc.). We have to check and/or fix-up a couple of
14957 things:
14958
14959 - Make sure the user hasn't attempted to make a Neon instruction
14960 conditional.
14961 - Alter the value in the condition code field if necessary.
14962 - Make sure that the arch supports Neon instructions.
14963
14964 Which of these operations take place depends on bits from enum
14965 vfp_or_neon_is_neon_bits.
14966
14967 WARNING: This function has side effects! If NEON_CHECK_CC is used and the
14968 current instruction's condition is COND_ALWAYS, the condition field is
14969 changed to inst.uncond_value. This is necessary because instructions shared
14970 between VFP and Neon may be conditional for the VFP variants only, and the
14971 unconditional Neon version must have, e.g., 0xF in the condition field. */
14972
14973 static int
14974 vfp_or_neon_is_neon (unsigned check)
14975 {
14976 /* Conditions are always legal in Thumb mode (IT blocks). */
14977 if (!thumb_mode && (check & NEON_CHECK_CC))
14978 {
14979 if (inst.cond != COND_ALWAYS)
14980 {
14981 first_error (_(BAD_COND));
14982 return FAIL;
14983 }
14984 if (inst.uncond_value != -1)
14985 inst.instruction |= inst.uncond_value << 28;
14986 }
14987
14988 if ((check & NEON_CHECK_ARCH)
14989 && !mark_feature_used (&fpu_neon_ext_v1))
14990 {
14991 first_error (_(BAD_FPU));
14992 return FAIL;
14993 }
14994
14995 if ((check & NEON_CHECK_ARCH8)
14996 && !mark_feature_used (&fpu_neon_ext_armv8))
14997 {
14998 first_error (_(BAD_FPU));
14999 return FAIL;
15000 }
15001
15002 return SUCCESS;
15003 }
15004
15005 static void
15006 do_neon_addsub_if_i (void)
15007 {
15008 if (try_vfp_nsyn (3, do_vfp_nsyn_add_sub) == SUCCESS)
15009 return;
15010
15011 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15012 return;
15013
15014 /* The "untyped" case can't happen. Do this to stop the "U" bit being
15015 affected if we specify unsigned args. */
15016 neon_dyadic_misc (NT_untyped, N_IF_32 | N_I64, 0);
15017 }
15018
15019 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
15020 result to be:
15021 V<op> A,B (A is operand 0, B is operand 2)
15022 to mean:
15023 V<op> A,B,A
15024 not:
15025 V<op> A,B,B
15026 so handle that case specially. */
15027
15028 static void
15029 neon_exchange_operands (void)
15030 {
15031 if (inst.operands[1].present)
15032 {
15033 void *scratch = xmalloc (sizeof (inst.operands[0]));
15034
15035 /* Swap operands[1] and operands[2]. */
15036 memcpy (scratch, &inst.operands[1], sizeof (inst.operands[0]));
15037 inst.operands[1] = inst.operands[2];
15038 memcpy (&inst.operands[2], scratch, sizeof (inst.operands[0]));
15039 free (scratch);
15040 }
15041 else
15042 {
15043 inst.operands[1] = inst.operands[2];
15044 inst.operands[2] = inst.operands[0];
15045 }
15046 }
15047
15048 static void
15049 neon_compare (unsigned regtypes, unsigned immtypes, int invert)
15050 {
15051 if (inst.operands[2].isreg)
15052 {
15053 if (invert)
15054 neon_exchange_operands ();
15055 neon_dyadic_misc (NT_unsigned, regtypes, N_SIZ);
15056 }
15057 else
15058 {
15059 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
15060 struct neon_type_el et = neon_check_type (2, rs,
15061 N_EQK | N_SIZ, immtypes | N_KEY);
15062
15063 NEON_ENCODE (IMMED, inst);
15064 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15065 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15066 inst.instruction |= LOW4 (inst.operands[1].reg);
15067 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15068 inst.instruction |= neon_quad (rs) << 6;
15069 inst.instruction |= (et.type == NT_float) << 10;
15070 inst.instruction |= neon_logbits (et.size) << 18;
15071
15072 neon_dp_fixup (&inst);
15073 }
15074 }
15075
15076 static void
15077 do_neon_cmp (void)
15078 {
15079 neon_compare (N_SUF_32, N_S_32 | N_F_16_32, FALSE);
15080 }
15081
15082 static void
15083 do_neon_cmp_inv (void)
15084 {
15085 neon_compare (N_SUF_32, N_S_32 | N_F_16_32, TRUE);
15086 }
15087
15088 static void
15089 do_neon_ceq (void)
15090 {
15091 neon_compare (N_IF_32, N_IF_32, FALSE);
15092 }
15093
15094 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
15095 scalars, which are encoded in 5 bits, M : Rm.
15096 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
15097 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
15098 index in M.
15099
15100 Dot Product instructions are similar to multiply instructions except elsize
15101 should always be 32.
15102
15103 This function translates SCALAR, which is GAS's internal encoding of indexed
15104 scalar register, to raw encoding. There is also register and index range
15105 check based on ELSIZE. */
15106
15107 static unsigned
15108 neon_scalar_for_mul (unsigned scalar, unsigned elsize)
15109 {
15110 unsigned regno = NEON_SCALAR_REG (scalar);
15111 unsigned elno = NEON_SCALAR_INDEX (scalar);
15112
15113 switch (elsize)
15114 {
15115 case 16:
15116 if (regno > 7 || elno > 3)
15117 goto bad_scalar;
15118 return regno | (elno << 3);
15119
15120 case 32:
15121 if (regno > 15 || elno > 1)
15122 goto bad_scalar;
15123 return regno | (elno << 4);
15124
15125 default:
15126 bad_scalar:
15127 first_error (_("scalar out of range for multiply instruction"));
15128 }
15129
15130 return 0;
15131 }
15132
15133 /* Encode multiply / multiply-accumulate scalar instructions. */
15134
15135 static void
15136 neon_mul_mac (struct neon_type_el et, int ubit)
15137 {
15138 unsigned scalar;
15139
15140 /* Give a more helpful error message if we have an invalid type. */
15141 if (et.type == NT_invtype)
15142 return;
15143
15144 scalar = neon_scalar_for_mul (inst.operands[2].reg, et.size);
15145 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15146 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15147 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
15148 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
15149 inst.instruction |= LOW4 (scalar);
15150 inst.instruction |= HI1 (scalar) << 5;
15151 inst.instruction |= (et.type == NT_float) << 8;
15152 inst.instruction |= neon_logbits (et.size) << 20;
15153 inst.instruction |= (ubit != 0) << 24;
15154
15155 neon_dp_fixup (&inst);
15156 }
15157
15158 static void
15159 do_neon_mac_maybe_scalar (void)
15160 {
15161 if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls) == SUCCESS)
15162 return;
15163
15164 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15165 return;
15166
15167 if (inst.operands[2].isscalar)
15168 {
15169 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
15170 struct neon_type_el et = neon_check_type (3, rs,
15171 N_EQK, N_EQK, N_I16 | N_I32 | N_F_16_32 | N_KEY);
15172 NEON_ENCODE (SCALAR, inst);
15173 neon_mul_mac (et, neon_quad (rs));
15174 }
15175 else
15176 {
15177 /* The "untyped" case can't happen. Do this to stop the "U" bit being
15178 affected if we specify unsigned args. */
15179 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
15180 }
15181 }
15182
15183 static void
15184 do_neon_fmac (void)
15185 {
15186 if (try_vfp_nsyn (3, do_vfp_nsyn_fma_fms) == SUCCESS)
15187 return;
15188
15189 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15190 return;
15191
15192 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
15193 }
15194
15195 static void
15196 do_neon_tst (void)
15197 {
15198 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15199 struct neon_type_el et = neon_check_type (3, rs,
15200 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_KEY);
15201 neon_three_same (neon_quad (rs), 0, et.size);
15202 }
15203
15204 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
15205 same types as the MAC equivalents. The polynomial type for this instruction
15206 is encoded the same as the integer type. */
15207
15208 static void
15209 do_neon_mul (void)
15210 {
15211 if (try_vfp_nsyn (3, do_vfp_nsyn_mul) == SUCCESS)
15212 return;
15213
15214 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15215 return;
15216
15217 if (inst.operands[2].isscalar)
15218 do_neon_mac_maybe_scalar ();
15219 else
15220 neon_dyadic_misc (NT_poly, N_I8 | N_I16 | N_I32 | N_F16 | N_F32 | N_P8, 0);
15221 }
15222
15223 static void
15224 do_neon_qdmulh (void)
15225 {
15226 if (inst.operands[2].isscalar)
15227 {
15228 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
15229 struct neon_type_el et = neon_check_type (3, rs,
15230 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
15231 NEON_ENCODE (SCALAR, inst);
15232 neon_mul_mac (et, neon_quad (rs));
15233 }
15234 else
15235 {
15236 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15237 struct neon_type_el et = neon_check_type (3, rs,
15238 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
15239 NEON_ENCODE (INTEGER, inst);
15240 /* The U bit (rounding) comes from bit mask. */
15241 neon_three_same (neon_quad (rs), 0, et.size);
15242 }
15243 }
15244
15245 static void
15246 do_neon_qrdmlah (void)
15247 {
15248 /* Check we're on the correct architecture. */
15249 if (!mark_feature_used (&fpu_neon_ext_armv8))
15250 inst.error =
15251 _("instruction form not available on this architecture.");
15252 else if (!mark_feature_used (&fpu_neon_ext_v8_1))
15253 {
15254 as_warn (_("this instruction implies use of ARMv8.1 AdvSIMD."));
15255 record_feature_use (&fpu_neon_ext_v8_1);
15256 }
15257
15258 if (inst.operands[2].isscalar)
15259 {
15260 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
15261 struct neon_type_el et = neon_check_type (3, rs,
15262 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
15263 NEON_ENCODE (SCALAR, inst);
15264 neon_mul_mac (et, neon_quad (rs));
15265 }
15266 else
15267 {
15268 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15269 struct neon_type_el et = neon_check_type (3, rs,
15270 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
15271 NEON_ENCODE (INTEGER, inst);
15272 /* The U bit (rounding) comes from bit mask. */
15273 neon_three_same (neon_quad (rs), 0, et.size);
15274 }
15275 }
15276
15277 static void
15278 do_neon_fcmp_absolute (void)
15279 {
15280 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15281 struct neon_type_el et = neon_check_type (3, rs, N_EQK, N_EQK,
15282 N_F_16_32 | N_KEY);
15283 /* Size field comes from bit mask. */
15284 neon_three_same (neon_quad (rs), 1, et.size == 16 ? (int) et.size : -1);
15285 }
15286
15287 static void
15288 do_neon_fcmp_absolute_inv (void)
15289 {
15290 neon_exchange_operands ();
15291 do_neon_fcmp_absolute ();
15292 }
15293
15294 static void
15295 do_neon_step (void)
15296 {
15297 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15298 struct neon_type_el et = neon_check_type (3, rs, N_EQK, N_EQK,
15299 N_F_16_32 | N_KEY);
15300 neon_three_same (neon_quad (rs), 0, et.size == 16 ? (int) et.size : -1);
15301 }
15302
15303 static void
15304 do_neon_abs_neg (void)
15305 {
15306 enum neon_shape rs;
15307 struct neon_type_el et;
15308
15309 if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg) == SUCCESS)
15310 return;
15311
15312 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15313 return;
15314
15315 rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15316 et = neon_check_type (2, rs, N_EQK, N_S_32 | N_F_16_32 | N_KEY);
15317
15318 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15319 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15320 inst.instruction |= LOW4 (inst.operands[1].reg);
15321 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15322 inst.instruction |= neon_quad (rs) << 6;
15323 inst.instruction |= (et.type == NT_float) << 10;
15324 inst.instruction |= neon_logbits (et.size) << 18;
15325
15326 neon_dp_fixup (&inst);
15327 }
15328
15329 static void
15330 do_neon_sli (void)
15331 {
15332 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
15333 struct neon_type_el et = neon_check_type (2, rs,
15334 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
15335 int imm = inst.operands[2].imm;
15336 constraint (imm < 0 || (unsigned)imm >= et.size,
15337 _("immediate out of range for insert"));
15338 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
15339 }
15340
15341 static void
15342 do_neon_sri (void)
15343 {
15344 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
15345 struct neon_type_el et = neon_check_type (2, rs,
15346 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
15347 int imm = inst.operands[2].imm;
15348 constraint (imm < 1 || (unsigned)imm > et.size,
15349 _("immediate out of range for insert"));
15350 neon_imm_shift (FALSE, 0, neon_quad (rs), et, et.size - imm);
15351 }
15352
15353 static void
15354 do_neon_qshlu_imm (void)
15355 {
15356 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
15357 struct neon_type_el et = neon_check_type (2, rs,
15358 N_EQK | N_UNS, N_S8 | N_S16 | N_S32 | N_S64 | N_KEY);
15359 int imm = inst.operands[2].imm;
15360 constraint (imm < 0 || (unsigned)imm >= et.size,
15361 _("immediate out of range for shift"));
15362 /* Only encodes the 'U present' variant of the instruction.
15363 In this case, signed types have OP (bit 8) set to 0.
15364 Unsigned types have OP set to 1. */
15365 inst.instruction |= (et.type == NT_unsigned) << 8;
15366 /* The rest of the bits are the same as other immediate shifts. */
15367 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
15368 }
15369
15370 static void
15371 do_neon_qmovn (void)
15372 {
15373 struct neon_type_el et = neon_check_type (2, NS_DQ,
15374 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
15375 /* Saturating move where operands can be signed or unsigned, and the
15376 destination has the same signedness. */
15377 NEON_ENCODE (INTEGER, inst);
15378 if (et.type == NT_unsigned)
15379 inst.instruction |= 0xc0;
15380 else
15381 inst.instruction |= 0x80;
15382 neon_two_same (0, 1, et.size / 2);
15383 }
15384
15385 static void
15386 do_neon_qmovun (void)
15387 {
15388 struct neon_type_el et = neon_check_type (2, NS_DQ,
15389 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
15390 /* Saturating move with unsigned results. Operands must be signed. */
15391 NEON_ENCODE (INTEGER, inst);
15392 neon_two_same (0, 1, et.size / 2);
15393 }
15394
15395 static void
15396 do_neon_rshift_sat_narrow (void)
15397 {
15398 /* FIXME: Types for narrowing. If operands are signed, results can be signed
15399 or unsigned. If operands are unsigned, results must also be unsigned. */
15400 struct neon_type_el et = neon_check_type (2, NS_DQI,
15401 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
15402 int imm = inst.operands[2].imm;
15403 /* This gets the bounds check, size encoding and immediate bits calculation
15404 right. */
15405 et.size /= 2;
15406
15407 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
15408 VQMOVN.I<size> <Dd>, <Qm>. */
15409 if (imm == 0)
15410 {
15411 inst.operands[2].present = 0;
15412 inst.instruction = N_MNEM_vqmovn;
15413 do_neon_qmovn ();
15414 return;
15415 }
15416
15417 constraint (imm < 1 || (unsigned)imm > et.size,
15418 _("immediate out of range"));
15419 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, et.size - imm);
15420 }
15421
15422 static void
15423 do_neon_rshift_sat_narrow_u (void)
15424 {
15425 /* FIXME: Types for narrowing. If operands are signed, results can be signed
15426 or unsigned. If operands are unsigned, results must also be unsigned. */
15427 struct neon_type_el et = neon_check_type (2, NS_DQI,
15428 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
15429 int imm = inst.operands[2].imm;
15430 /* This gets the bounds check, size encoding and immediate bits calculation
15431 right. */
15432 et.size /= 2;
15433
15434 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
15435 VQMOVUN.I<size> <Dd>, <Qm>. */
15436 if (imm == 0)
15437 {
15438 inst.operands[2].present = 0;
15439 inst.instruction = N_MNEM_vqmovun;
15440 do_neon_qmovun ();
15441 return;
15442 }
15443
15444 constraint (imm < 1 || (unsigned)imm > et.size,
15445 _("immediate out of range"));
15446 /* FIXME: The manual is kind of unclear about what value U should have in
15447 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
15448 must be 1. */
15449 neon_imm_shift (TRUE, 1, 0, et, et.size - imm);
15450 }
15451
15452 static void
15453 do_neon_movn (void)
15454 {
15455 struct neon_type_el et = neon_check_type (2, NS_DQ,
15456 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
15457 NEON_ENCODE (INTEGER, inst);
15458 neon_two_same (0, 1, et.size / 2);
15459 }
15460
15461 static void
15462 do_neon_rshift_narrow (void)
15463 {
15464 struct neon_type_el et = neon_check_type (2, NS_DQI,
15465 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
15466 int imm = inst.operands[2].imm;
15467 /* This gets the bounds check, size encoding and immediate bits calculation
15468 right. */
15469 et.size /= 2;
15470
15471 /* If immediate is zero then we are a pseudo-instruction for
15472 VMOVN.I<size> <Dd>, <Qm> */
15473 if (imm == 0)
15474 {
15475 inst.operands[2].present = 0;
15476 inst.instruction = N_MNEM_vmovn;
15477 do_neon_movn ();
15478 return;
15479 }
15480
15481 constraint (imm < 1 || (unsigned)imm > et.size,
15482 _("immediate out of range for narrowing operation"));
15483 neon_imm_shift (FALSE, 0, 0, et, et.size - imm);
15484 }
15485
15486 static void
15487 do_neon_shll (void)
15488 {
15489 /* FIXME: Type checking when lengthening. */
15490 struct neon_type_el et = neon_check_type (2, NS_QDI,
15491 N_EQK | N_DBL, N_I8 | N_I16 | N_I32 | N_KEY);
15492 unsigned imm = inst.operands[2].imm;
15493
15494 if (imm == et.size)
15495 {
15496 /* Maximum shift variant. */
15497 NEON_ENCODE (INTEGER, inst);
15498 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15499 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15500 inst.instruction |= LOW4 (inst.operands[1].reg);
15501 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15502 inst.instruction |= neon_logbits (et.size) << 18;
15503
15504 neon_dp_fixup (&inst);
15505 }
15506 else
15507 {
15508 /* A more-specific type check for non-max versions. */
15509 et = neon_check_type (2, NS_QDI,
15510 N_EQK | N_DBL, N_SU_32 | N_KEY);
15511 NEON_ENCODE (IMMED, inst);
15512 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, imm);
15513 }
15514 }
15515
15516 /* Check the various types for the VCVT instruction, and return which version
15517 the current instruction is. */
15518
15519 #define CVT_FLAVOUR_VAR \
15520 CVT_VAR (s32_f32, N_S32, N_F32, whole_reg, "ftosls", "ftosis", "ftosizs") \
15521 CVT_VAR (u32_f32, N_U32, N_F32, whole_reg, "ftouls", "ftouis", "ftouizs") \
15522 CVT_VAR (f32_s32, N_F32, N_S32, whole_reg, "fsltos", "fsitos", NULL) \
15523 CVT_VAR (f32_u32, N_F32, N_U32, whole_reg, "fultos", "fuitos", NULL) \
15524 /* Half-precision conversions. */ \
15525 CVT_VAR (s16_f16, N_S16, N_F16 | N_KEY, whole_reg, NULL, NULL, NULL) \
15526 CVT_VAR (u16_f16, N_U16, N_F16 | N_KEY, whole_reg, NULL, NULL, NULL) \
15527 CVT_VAR (f16_s16, N_F16 | N_KEY, N_S16, whole_reg, NULL, NULL, NULL) \
15528 CVT_VAR (f16_u16, N_F16 | N_KEY, N_U16, whole_reg, NULL, NULL, NULL) \
15529 CVT_VAR (f32_f16, N_F32, N_F16, whole_reg, NULL, NULL, NULL) \
15530 CVT_VAR (f16_f32, N_F16, N_F32, whole_reg, NULL, NULL, NULL) \
15531 /* New VCVT instructions introduced by ARMv8.2 fp16 extension. \
15532 Compared with single/double precision variants, only the co-processor \
15533 field is different, so the encoding flow is reused here. */ \
15534 CVT_VAR (f16_s32, N_F16 | N_KEY, N_S32, N_VFP, "fsltos", "fsitos", NULL) \
15535 CVT_VAR (f16_u32, N_F16 | N_KEY, N_U32, N_VFP, "fultos", "fuitos", NULL) \
15536 CVT_VAR (u32_f16, N_U32, N_F16 | N_KEY, N_VFP, "ftouls", "ftouis", "ftouizs")\
15537 CVT_VAR (s32_f16, N_S32, N_F16 | N_KEY, N_VFP, "ftosls", "ftosis", "ftosizs")\
15538 /* VFP instructions. */ \
15539 CVT_VAR (f32_f64, N_F32, N_F64, N_VFP, NULL, "fcvtsd", NULL) \
15540 CVT_VAR (f64_f32, N_F64, N_F32, N_VFP, NULL, "fcvtds", NULL) \
15541 CVT_VAR (s32_f64, N_S32, N_F64 | key, N_VFP, "ftosld", "ftosid", "ftosizd") \
15542 CVT_VAR (u32_f64, N_U32, N_F64 | key, N_VFP, "ftould", "ftouid", "ftouizd") \
15543 CVT_VAR (f64_s32, N_F64 | key, N_S32, N_VFP, "fsltod", "fsitod", NULL) \
15544 CVT_VAR (f64_u32, N_F64 | key, N_U32, N_VFP, "fultod", "fuitod", NULL) \
15545 /* VFP instructions with bitshift. */ \
15546 CVT_VAR (f32_s16, N_F32 | key, N_S16, N_VFP, "fshtos", NULL, NULL) \
15547 CVT_VAR (f32_u16, N_F32 | key, N_U16, N_VFP, "fuhtos", NULL, NULL) \
15548 CVT_VAR (f64_s16, N_F64 | key, N_S16, N_VFP, "fshtod", NULL, NULL) \
15549 CVT_VAR (f64_u16, N_F64 | key, N_U16, N_VFP, "fuhtod", NULL, NULL) \
15550 CVT_VAR (s16_f32, N_S16, N_F32 | key, N_VFP, "ftoshs", NULL, NULL) \
15551 CVT_VAR (u16_f32, N_U16, N_F32 | key, N_VFP, "ftouhs", NULL, NULL) \
15552 CVT_VAR (s16_f64, N_S16, N_F64 | key, N_VFP, "ftoshd", NULL, NULL) \
15553 CVT_VAR (u16_f64, N_U16, N_F64 | key, N_VFP, "ftouhd", NULL, NULL)
15554
15555 #define CVT_VAR(C, X, Y, R, BSN, CN, ZN) \
15556 neon_cvt_flavour_##C,
15557
15558 /* The different types of conversions we can do. */
15559 enum neon_cvt_flavour
15560 {
15561 CVT_FLAVOUR_VAR
15562 neon_cvt_flavour_invalid,
15563 neon_cvt_flavour_first_fp = neon_cvt_flavour_f32_f64
15564 };
15565
15566 #undef CVT_VAR
15567
15568 static enum neon_cvt_flavour
15569 get_neon_cvt_flavour (enum neon_shape rs)
15570 {
15571 #define CVT_VAR(C,X,Y,R,BSN,CN,ZN) \
15572 et = neon_check_type (2, rs, (R) | (X), (R) | (Y)); \
15573 if (et.type != NT_invtype) \
15574 { \
15575 inst.error = NULL; \
15576 return (neon_cvt_flavour_##C); \
15577 }
15578
15579 struct neon_type_el et;
15580 unsigned whole_reg = (rs == NS_FFI || rs == NS_FD || rs == NS_DF
15581 || rs == NS_FF) ? N_VFP : 0;
15582 /* The instruction versions which take an immediate take one register
15583 argument, which is extended to the width of the full register. Thus the
15584 "source" and "destination" registers must have the same width. Hack that
15585 here by making the size equal to the key (wider, in this case) operand. */
15586 unsigned key = (rs == NS_QQI || rs == NS_DDI || rs == NS_FFI) ? N_KEY : 0;
15587
15588 CVT_FLAVOUR_VAR;
15589
15590 return neon_cvt_flavour_invalid;
15591 #undef CVT_VAR
15592 }
15593
15594 enum neon_cvt_mode
15595 {
15596 neon_cvt_mode_a,
15597 neon_cvt_mode_n,
15598 neon_cvt_mode_p,
15599 neon_cvt_mode_m,
15600 neon_cvt_mode_z,
15601 neon_cvt_mode_x,
15602 neon_cvt_mode_r
15603 };
15604
15605 /* Neon-syntax VFP conversions. */
15606
15607 static void
15608 do_vfp_nsyn_cvt (enum neon_shape rs, enum neon_cvt_flavour flavour)
15609 {
15610 const char *opname = 0;
15611
15612 if (rs == NS_DDI || rs == NS_QQI || rs == NS_FFI
15613 || rs == NS_FHI || rs == NS_HFI)
15614 {
15615 /* Conversions with immediate bitshift. */
15616 const char *enc[] =
15617 {
15618 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) BSN,
15619 CVT_FLAVOUR_VAR
15620 NULL
15621 #undef CVT_VAR
15622 };
15623
15624 if (flavour < (int) ARRAY_SIZE (enc))
15625 {
15626 opname = enc[flavour];
15627 constraint (inst.operands[0].reg != inst.operands[1].reg,
15628 _("operands 0 and 1 must be the same register"));
15629 inst.operands[1] = inst.operands[2];
15630 memset (&inst.operands[2], '\0', sizeof (inst.operands[2]));
15631 }
15632 }
15633 else
15634 {
15635 /* Conversions without bitshift. */
15636 const char *enc[] =
15637 {
15638 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) CN,
15639 CVT_FLAVOUR_VAR
15640 NULL
15641 #undef CVT_VAR
15642 };
15643
15644 if (flavour < (int) ARRAY_SIZE (enc))
15645 opname = enc[flavour];
15646 }
15647
15648 if (opname)
15649 do_vfp_nsyn_opcode (opname);
15650
15651 /* ARMv8.2 fp16 VCVT instruction. */
15652 if (flavour == neon_cvt_flavour_s32_f16
15653 || flavour == neon_cvt_flavour_u32_f16
15654 || flavour == neon_cvt_flavour_f16_u32
15655 || flavour == neon_cvt_flavour_f16_s32)
15656 do_scalar_fp16_v82_encode ();
15657 }
15658
15659 static void
15660 do_vfp_nsyn_cvtz (void)
15661 {
15662 enum neon_shape rs = neon_select_shape (NS_FH, NS_FF, NS_FD, NS_NULL);
15663 enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
15664 const char *enc[] =
15665 {
15666 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) ZN,
15667 CVT_FLAVOUR_VAR
15668 NULL
15669 #undef CVT_VAR
15670 };
15671
15672 if (flavour < (int) ARRAY_SIZE (enc) && enc[flavour])
15673 do_vfp_nsyn_opcode (enc[flavour]);
15674 }
15675
15676 static void
15677 do_vfp_nsyn_cvt_fpv8 (enum neon_cvt_flavour flavour,
15678 enum neon_cvt_mode mode)
15679 {
15680 int sz, op;
15681 int rm;
15682
15683 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
15684 D register operands. */
15685 if (flavour == neon_cvt_flavour_s32_f64
15686 || flavour == neon_cvt_flavour_u32_f64)
15687 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
15688 _(BAD_FPU));
15689
15690 if (flavour == neon_cvt_flavour_s32_f16
15691 || flavour == neon_cvt_flavour_u32_f16)
15692 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16),
15693 _(BAD_FP16));
15694
15695 set_it_insn_type (OUTSIDE_IT_INSN);
15696
15697 switch (flavour)
15698 {
15699 case neon_cvt_flavour_s32_f64:
15700 sz = 1;
15701 op = 1;
15702 break;
15703 case neon_cvt_flavour_s32_f32:
15704 sz = 0;
15705 op = 1;
15706 break;
15707 case neon_cvt_flavour_s32_f16:
15708 sz = 0;
15709 op = 1;
15710 break;
15711 case neon_cvt_flavour_u32_f64:
15712 sz = 1;
15713 op = 0;
15714 break;
15715 case neon_cvt_flavour_u32_f32:
15716 sz = 0;
15717 op = 0;
15718 break;
15719 case neon_cvt_flavour_u32_f16:
15720 sz = 0;
15721 op = 0;
15722 break;
15723 default:
15724 first_error (_("invalid instruction shape"));
15725 return;
15726 }
15727
15728 switch (mode)
15729 {
15730 case neon_cvt_mode_a: rm = 0; break;
15731 case neon_cvt_mode_n: rm = 1; break;
15732 case neon_cvt_mode_p: rm = 2; break;
15733 case neon_cvt_mode_m: rm = 3; break;
15734 default: first_error (_("invalid rounding mode")); return;
15735 }
15736
15737 NEON_ENCODE (FPV8, inst);
15738 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
15739 encode_arm_vfp_reg (inst.operands[1].reg, sz == 1 ? VFP_REG_Dm : VFP_REG_Sm);
15740 inst.instruction |= sz << 8;
15741
15742 /* ARMv8.2 fp16 VCVT instruction. */
15743 if (flavour == neon_cvt_flavour_s32_f16
15744 ||flavour == neon_cvt_flavour_u32_f16)
15745 do_scalar_fp16_v82_encode ();
15746 inst.instruction |= op << 7;
15747 inst.instruction |= rm << 16;
15748 inst.instruction |= 0xf0000000;
15749 inst.is_neon = TRUE;
15750 }
15751
15752 static void
15753 do_neon_cvt_1 (enum neon_cvt_mode mode)
15754 {
15755 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_FFI, NS_DD, NS_QQ,
15756 NS_FD, NS_DF, NS_FF, NS_QD, NS_DQ,
15757 NS_FH, NS_HF, NS_FHI, NS_HFI,
15758 NS_NULL);
15759 enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
15760
15761 if (flavour == neon_cvt_flavour_invalid)
15762 return;
15763
15764 /* PR11109: Handle round-to-zero for VCVT conversions. */
15765 if (mode == neon_cvt_mode_z
15766 && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_vfp_v2)
15767 && (flavour == neon_cvt_flavour_s16_f16
15768 || flavour == neon_cvt_flavour_u16_f16
15769 || flavour == neon_cvt_flavour_s32_f32
15770 || flavour == neon_cvt_flavour_u32_f32
15771 || flavour == neon_cvt_flavour_s32_f64
15772 || flavour == neon_cvt_flavour_u32_f64)
15773 && (rs == NS_FD || rs == NS_FF))
15774 {
15775 do_vfp_nsyn_cvtz ();
15776 return;
15777 }
15778
15779 /* ARMv8.2 fp16 VCVT conversions. */
15780 if (mode == neon_cvt_mode_z
15781 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16)
15782 && (flavour == neon_cvt_flavour_s32_f16
15783 || flavour == neon_cvt_flavour_u32_f16)
15784 && (rs == NS_FH))
15785 {
15786 do_vfp_nsyn_cvtz ();
15787 do_scalar_fp16_v82_encode ();
15788 return;
15789 }
15790
15791 /* VFP rather than Neon conversions. */
15792 if (flavour >= neon_cvt_flavour_first_fp)
15793 {
15794 if (mode == neon_cvt_mode_x || mode == neon_cvt_mode_z)
15795 do_vfp_nsyn_cvt (rs, flavour);
15796 else
15797 do_vfp_nsyn_cvt_fpv8 (flavour, mode);
15798
15799 return;
15800 }
15801
15802 switch (rs)
15803 {
15804 case NS_DDI:
15805 case NS_QQI:
15806 {
15807 unsigned immbits;
15808 unsigned enctab[] = {0x0000100, 0x1000100, 0x0, 0x1000000,
15809 0x0000100, 0x1000100, 0x0, 0x1000000};
15810
15811 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15812 return;
15813
15814 /* Fixed-point conversion with #0 immediate is encoded as an
15815 integer conversion. */
15816 if (inst.operands[2].present && inst.operands[2].imm == 0)
15817 goto int_encode;
15818 NEON_ENCODE (IMMED, inst);
15819 if (flavour != neon_cvt_flavour_invalid)
15820 inst.instruction |= enctab[flavour];
15821 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15822 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15823 inst.instruction |= LOW4 (inst.operands[1].reg);
15824 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15825 inst.instruction |= neon_quad (rs) << 6;
15826 inst.instruction |= 1 << 21;
15827 if (flavour < neon_cvt_flavour_s16_f16)
15828 {
15829 inst.instruction |= 1 << 21;
15830 immbits = 32 - inst.operands[2].imm;
15831 inst.instruction |= immbits << 16;
15832 }
15833 else
15834 {
15835 inst.instruction |= 3 << 20;
15836 immbits = 16 - inst.operands[2].imm;
15837 inst.instruction |= immbits << 16;
15838 inst.instruction &= ~(1 << 9);
15839 }
15840
15841 neon_dp_fixup (&inst);
15842 }
15843 break;
15844
15845 case NS_DD:
15846 case NS_QQ:
15847 if (mode != neon_cvt_mode_x && mode != neon_cvt_mode_z)
15848 {
15849 NEON_ENCODE (FLOAT, inst);
15850 set_it_insn_type (OUTSIDE_IT_INSN);
15851
15852 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
15853 return;
15854
15855 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15856 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15857 inst.instruction |= LOW4 (inst.operands[1].reg);
15858 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15859 inst.instruction |= neon_quad (rs) << 6;
15860 inst.instruction |= (flavour == neon_cvt_flavour_u16_f16
15861 || flavour == neon_cvt_flavour_u32_f32) << 7;
15862 inst.instruction |= mode << 8;
15863 if (flavour == neon_cvt_flavour_u16_f16
15864 || flavour == neon_cvt_flavour_s16_f16)
15865 /* Mask off the original size bits and reencode them. */
15866 inst.instruction = ((inst.instruction & 0xfff3ffff) | (1 << 18));
15867
15868 if (thumb_mode)
15869 inst.instruction |= 0xfc000000;
15870 else
15871 inst.instruction |= 0xf0000000;
15872 }
15873 else
15874 {
15875 int_encode:
15876 {
15877 unsigned enctab[] = { 0x100, 0x180, 0x0, 0x080,
15878 0x100, 0x180, 0x0, 0x080};
15879
15880 NEON_ENCODE (INTEGER, inst);
15881
15882 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15883 return;
15884
15885 if (flavour != neon_cvt_flavour_invalid)
15886 inst.instruction |= enctab[flavour];
15887
15888 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15889 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15890 inst.instruction |= LOW4 (inst.operands[1].reg);
15891 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15892 inst.instruction |= neon_quad (rs) << 6;
15893 if (flavour >= neon_cvt_flavour_s16_f16
15894 && flavour <= neon_cvt_flavour_f16_u16)
15895 /* Half precision. */
15896 inst.instruction |= 1 << 18;
15897 else
15898 inst.instruction |= 2 << 18;
15899
15900 neon_dp_fixup (&inst);
15901 }
15902 }
15903 break;
15904
15905 /* Half-precision conversions for Advanced SIMD -- neon. */
15906 case NS_QD:
15907 case NS_DQ:
15908
15909 if ((rs == NS_DQ)
15910 && (inst.vectype.el[0].size != 16 || inst.vectype.el[1].size != 32))
15911 {
15912 as_bad (_("operand size must match register width"));
15913 break;
15914 }
15915
15916 if ((rs == NS_QD)
15917 && ((inst.vectype.el[0].size != 32 || inst.vectype.el[1].size != 16)))
15918 {
15919 as_bad (_("operand size must match register width"));
15920 break;
15921 }
15922
15923 if (rs == NS_DQ)
15924 inst.instruction = 0x3b60600;
15925 else
15926 inst.instruction = 0x3b60700;
15927
15928 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15929 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15930 inst.instruction |= LOW4 (inst.operands[1].reg);
15931 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15932 neon_dp_fixup (&inst);
15933 break;
15934
15935 default:
15936 /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32). */
15937 if (mode == neon_cvt_mode_x || mode == neon_cvt_mode_z)
15938 do_vfp_nsyn_cvt (rs, flavour);
15939 else
15940 do_vfp_nsyn_cvt_fpv8 (flavour, mode);
15941 }
15942 }
15943
15944 static void
15945 do_neon_cvtr (void)
15946 {
15947 do_neon_cvt_1 (neon_cvt_mode_x);
15948 }
15949
15950 static void
15951 do_neon_cvt (void)
15952 {
15953 do_neon_cvt_1 (neon_cvt_mode_z);
15954 }
15955
15956 static void
15957 do_neon_cvta (void)
15958 {
15959 do_neon_cvt_1 (neon_cvt_mode_a);
15960 }
15961
15962 static void
15963 do_neon_cvtn (void)
15964 {
15965 do_neon_cvt_1 (neon_cvt_mode_n);
15966 }
15967
15968 static void
15969 do_neon_cvtp (void)
15970 {
15971 do_neon_cvt_1 (neon_cvt_mode_p);
15972 }
15973
15974 static void
15975 do_neon_cvtm (void)
15976 {
15977 do_neon_cvt_1 (neon_cvt_mode_m);
15978 }
15979
15980 static void
15981 do_neon_cvttb_2 (bfd_boolean t, bfd_boolean to, bfd_boolean is_double)
15982 {
15983 if (is_double)
15984 mark_feature_used (&fpu_vfp_ext_armv8);
15985
15986 encode_arm_vfp_reg (inst.operands[0].reg,
15987 (is_double && !to) ? VFP_REG_Dd : VFP_REG_Sd);
15988 encode_arm_vfp_reg (inst.operands[1].reg,
15989 (is_double && to) ? VFP_REG_Dm : VFP_REG_Sm);
15990 inst.instruction |= to ? 0x10000 : 0;
15991 inst.instruction |= t ? 0x80 : 0;
15992 inst.instruction |= is_double ? 0x100 : 0;
15993 do_vfp_cond_or_thumb ();
15994 }
15995
15996 static void
15997 do_neon_cvttb_1 (bfd_boolean t)
15998 {
15999 enum neon_shape rs = neon_select_shape (NS_HF, NS_HD, NS_FH, NS_FF, NS_FD,
16000 NS_DF, NS_DH, NS_NULL);
16001
16002 if (rs == NS_NULL)
16003 return;
16004 else if (neon_check_type (2, rs, N_F16, N_F32 | N_VFP).type != NT_invtype)
16005 {
16006 inst.error = NULL;
16007 do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/FALSE);
16008 }
16009 else if (neon_check_type (2, rs, N_F32 | N_VFP, N_F16).type != NT_invtype)
16010 {
16011 inst.error = NULL;
16012 do_neon_cvttb_2 (t, /*to=*/FALSE, /*is_double=*/FALSE);
16013 }
16014 else if (neon_check_type (2, rs, N_F16, N_F64 | N_VFP).type != NT_invtype)
16015 {
16016 /* The VCVTB and VCVTT instructions with D-register operands
16017 don't work for SP only targets. */
16018 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
16019 _(BAD_FPU));
16020
16021 inst.error = NULL;
16022 do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/TRUE);
16023 }
16024 else if (neon_check_type (2, rs, N_F64 | N_VFP, N_F16).type != NT_invtype)
16025 {
16026 /* The VCVTB and VCVTT instructions with D-register operands
16027 don't work for SP only targets. */
16028 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
16029 _(BAD_FPU));
16030
16031 inst.error = NULL;
16032 do_neon_cvttb_2 (t, /*to=*/FALSE, /*is_double=*/TRUE);
16033 }
16034 else
16035 return;
16036 }
16037
16038 static void
16039 do_neon_cvtb (void)
16040 {
16041 do_neon_cvttb_1 (FALSE);
16042 }
16043
16044
16045 static void
16046 do_neon_cvtt (void)
16047 {
16048 do_neon_cvttb_1 (TRUE);
16049 }
16050
16051 static void
16052 neon_move_immediate (void)
16053 {
16054 enum neon_shape rs = neon_select_shape (NS_DI, NS_QI, NS_NULL);
16055 struct neon_type_el et = neon_check_type (2, rs,
16056 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
16057 unsigned immlo, immhi = 0, immbits;
16058 int op, cmode, float_p;
16059
16060 constraint (et.type == NT_invtype,
16061 _("operand size must be specified for immediate VMOV"));
16062
16063 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
16064 op = (inst.instruction & (1 << 5)) != 0;
16065
16066 immlo = inst.operands[1].imm;
16067 if (inst.operands[1].regisimm)
16068 immhi = inst.operands[1].reg;
16069
16070 constraint (et.size < 32 && (immlo & ~((1 << et.size) - 1)) != 0,
16071 _("immediate has bits set outside the operand size"));
16072
16073 float_p = inst.operands[1].immisfloat;
16074
16075 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits, &op,
16076 et.size, et.type)) == FAIL)
16077 {
16078 /* Invert relevant bits only. */
16079 neon_invert_size (&immlo, &immhi, et.size);
16080 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
16081 with one or the other; those cases are caught by
16082 neon_cmode_for_move_imm. */
16083 op = !op;
16084 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits,
16085 &op, et.size, et.type)) == FAIL)
16086 {
16087 first_error (_("immediate out of range"));
16088 return;
16089 }
16090 }
16091
16092 inst.instruction &= ~(1 << 5);
16093 inst.instruction |= op << 5;
16094
16095 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16096 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16097 inst.instruction |= neon_quad (rs) << 6;
16098 inst.instruction |= cmode << 8;
16099
16100 neon_write_immbits (immbits);
16101 }
16102
16103 static void
16104 do_neon_mvn (void)
16105 {
16106 if (inst.operands[1].isreg)
16107 {
16108 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16109
16110 NEON_ENCODE (INTEGER, inst);
16111 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16112 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16113 inst.instruction |= LOW4 (inst.operands[1].reg);
16114 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16115 inst.instruction |= neon_quad (rs) << 6;
16116 }
16117 else
16118 {
16119 NEON_ENCODE (IMMED, inst);
16120 neon_move_immediate ();
16121 }
16122
16123 neon_dp_fixup (&inst);
16124 }
16125
16126 /* Encode instructions of form:
16127
16128 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
16129 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm | */
16130
16131 static void
16132 neon_mixed_length (struct neon_type_el et, unsigned size)
16133 {
16134 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16135 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16136 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16137 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16138 inst.instruction |= LOW4 (inst.operands[2].reg);
16139 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16140 inst.instruction |= (et.type == NT_unsigned) << 24;
16141 inst.instruction |= neon_logbits (size) << 20;
16142
16143 neon_dp_fixup (&inst);
16144 }
16145
16146 static void
16147 do_neon_dyadic_long (void)
16148 {
16149 /* FIXME: Type checking for lengthening op. */
16150 struct neon_type_el et = neon_check_type (3, NS_QDD,
16151 N_EQK | N_DBL, N_EQK, N_SU_32 | N_KEY);
16152 neon_mixed_length (et, et.size);
16153 }
16154
16155 static void
16156 do_neon_abal (void)
16157 {
16158 struct neon_type_el et = neon_check_type (3, NS_QDD,
16159 N_EQK | N_INT | N_DBL, N_EQK, N_SU_32 | N_KEY);
16160 neon_mixed_length (et, et.size);
16161 }
16162
16163 static void
16164 neon_mac_reg_scalar_long (unsigned regtypes, unsigned scalartypes)
16165 {
16166 if (inst.operands[2].isscalar)
16167 {
16168 struct neon_type_el et = neon_check_type (3, NS_QDS,
16169 N_EQK | N_DBL, N_EQK, regtypes | N_KEY);
16170 NEON_ENCODE (SCALAR, inst);
16171 neon_mul_mac (et, et.type == NT_unsigned);
16172 }
16173 else
16174 {
16175 struct neon_type_el et = neon_check_type (3, NS_QDD,
16176 N_EQK | N_DBL, N_EQK, scalartypes | N_KEY);
16177 NEON_ENCODE (INTEGER, inst);
16178 neon_mixed_length (et, et.size);
16179 }
16180 }
16181
16182 static void
16183 do_neon_mac_maybe_scalar_long (void)
16184 {
16185 neon_mac_reg_scalar_long (N_S16 | N_S32 | N_U16 | N_U32, N_SU_32);
16186 }
16187
16188 /* Like neon_scalar_for_mul, this function generate Rm encoding from GAS's
16189 internal SCALAR. QUAD_P is 1 if it's for Q format, otherwise it's 0. */
16190
16191 static unsigned
16192 neon_scalar_for_fmac_fp16_long (unsigned scalar, unsigned quad_p)
16193 {
16194 unsigned regno = NEON_SCALAR_REG (scalar);
16195 unsigned elno = NEON_SCALAR_INDEX (scalar);
16196
16197 if (quad_p)
16198 {
16199 if (regno > 7 || elno > 3)
16200 goto bad_scalar;
16201
16202 return ((regno & 0x7)
16203 | ((elno & 0x1) << 3)
16204 | (((elno >> 1) & 0x1) << 5));
16205 }
16206 else
16207 {
16208 if (regno > 15 || elno > 1)
16209 goto bad_scalar;
16210
16211 return (((regno & 0x1) << 5)
16212 | ((regno >> 1) & 0x7)
16213 | ((elno & 0x1) << 3));
16214 }
16215
16216 bad_scalar:
16217 first_error (_("scalar out of range for multiply instruction"));
16218 return 0;
16219 }
16220
16221 static void
16222 do_neon_fmac_maybe_scalar_long (int subtype)
16223 {
16224 enum neon_shape rs;
16225 int high8;
16226 /* NOTE: vfmal/vfmsl use slightly different NEON three-same encoding. 'size"
16227 field (bits[21:20]) has different meaning. For scalar index variant, it's
16228 used to differentiate add and subtract, otherwise it's with fixed value
16229 0x2. */
16230 int size = -1;
16231
16232 if (inst.cond != COND_ALWAYS)
16233 as_warn (_("vfmal/vfmsl with FP16 type cannot be conditional, the "
16234 "behaviour is UNPREDICTABLE"));
16235
16236 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16_fml),
16237 _(BAD_FP16));
16238
16239 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_armv8),
16240 _(BAD_FPU));
16241
16242 /* vfmal/vfmsl are in three-same D/Q register format or the third operand can
16243 be a scalar index register. */
16244 if (inst.operands[2].isscalar)
16245 {
16246 high8 = 0xfe000000;
16247 if (subtype)
16248 size = 16;
16249 rs = neon_select_shape (NS_DHS, NS_QDS, NS_NULL);
16250 }
16251 else
16252 {
16253 high8 = 0xfc000000;
16254 size = 32;
16255 if (subtype)
16256 inst.instruction |= (0x1 << 23);
16257 rs = neon_select_shape (NS_DHH, NS_QDD, NS_NULL);
16258 }
16259
16260 neon_check_type (3, rs, N_EQK, N_EQK, N_KEY | N_F16);
16261
16262 /* "opcode" from template has included "ubit", so simply pass 0 here. Also,
16263 the "S" bit in size field has been reused to differentiate vfmal and vfmsl,
16264 so we simply pass -1 as size. */
16265 unsigned quad_p = (rs == NS_QDD || rs == NS_QDS);
16266 neon_three_same (quad_p, 0, size);
16267
16268 /* Undo neon_dp_fixup. Redo the high eight bits. */
16269 inst.instruction &= 0x00ffffff;
16270 inst.instruction |= high8;
16271
16272 #define LOW1(R) ((R) & 0x1)
16273 #define HI4(R) (((R) >> 1) & 0xf)
16274 /* Unlike usually NEON three-same, encoding for Vn and Vm will depend on
16275 whether the instruction is in Q form and whether Vm is a scalar indexed
16276 operand. */
16277 if (inst.operands[2].isscalar)
16278 {
16279 unsigned rm
16280 = neon_scalar_for_fmac_fp16_long (inst.operands[2].reg, quad_p);
16281 inst.instruction &= 0xffffffd0;
16282 inst.instruction |= rm;
16283
16284 if (!quad_p)
16285 {
16286 /* Redo Rn as well. */
16287 inst.instruction &= 0xfff0ff7f;
16288 inst.instruction |= HI4 (inst.operands[1].reg) << 16;
16289 inst.instruction |= LOW1 (inst.operands[1].reg) << 7;
16290 }
16291 }
16292 else if (!quad_p)
16293 {
16294 /* Redo Rn and Rm. */
16295 inst.instruction &= 0xfff0ff50;
16296 inst.instruction |= HI4 (inst.operands[1].reg) << 16;
16297 inst.instruction |= LOW1 (inst.operands[1].reg) << 7;
16298 inst.instruction |= HI4 (inst.operands[2].reg);
16299 inst.instruction |= LOW1 (inst.operands[2].reg) << 5;
16300 }
16301 }
16302
16303 static void
16304 do_neon_vfmal (void)
16305 {
16306 return do_neon_fmac_maybe_scalar_long (0);
16307 }
16308
16309 static void
16310 do_neon_vfmsl (void)
16311 {
16312 return do_neon_fmac_maybe_scalar_long (1);
16313 }
16314
16315 static void
16316 do_neon_dyadic_wide (void)
16317 {
16318 struct neon_type_el et = neon_check_type (3, NS_QQD,
16319 N_EQK | N_DBL, N_EQK | N_DBL, N_SU_32 | N_KEY);
16320 neon_mixed_length (et, et.size);
16321 }
16322
16323 static void
16324 do_neon_dyadic_narrow (void)
16325 {
16326 struct neon_type_el et = neon_check_type (3, NS_QDD,
16327 N_EQK | N_DBL, N_EQK, N_I16 | N_I32 | N_I64 | N_KEY);
16328 /* Operand sign is unimportant, and the U bit is part of the opcode,
16329 so force the operand type to integer. */
16330 et.type = NT_integer;
16331 neon_mixed_length (et, et.size / 2);
16332 }
16333
16334 static void
16335 do_neon_mul_sat_scalar_long (void)
16336 {
16337 neon_mac_reg_scalar_long (N_S16 | N_S32, N_S16 | N_S32);
16338 }
16339
16340 static void
16341 do_neon_vmull (void)
16342 {
16343 if (inst.operands[2].isscalar)
16344 do_neon_mac_maybe_scalar_long ();
16345 else
16346 {
16347 struct neon_type_el et = neon_check_type (3, NS_QDD,
16348 N_EQK | N_DBL, N_EQK, N_SU_32 | N_P8 | N_P64 | N_KEY);
16349
16350 if (et.type == NT_poly)
16351 NEON_ENCODE (POLY, inst);
16352 else
16353 NEON_ENCODE (INTEGER, inst);
16354
16355 /* For polynomial encoding the U bit must be zero, and the size must
16356 be 8 (encoded as 0b00) or, on ARMv8 or later 64 (encoded, non
16357 obviously, as 0b10). */
16358 if (et.size == 64)
16359 {
16360 /* Check we're on the correct architecture. */
16361 if (!mark_feature_used (&fpu_crypto_ext_armv8))
16362 inst.error =
16363 _("Instruction form not available on this architecture.");
16364
16365 et.size = 32;
16366 }
16367
16368 neon_mixed_length (et, et.size);
16369 }
16370 }
16371
16372 static void
16373 do_neon_ext (void)
16374 {
16375 enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
16376 struct neon_type_el et = neon_check_type (3, rs,
16377 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
16378 unsigned imm = (inst.operands[3].imm * et.size) / 8;
16379
16380 constraint (imm >= (unsigned) (neon_quad (rs) ? 16 : 8),
16381 _("shift out of range"));
16382 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16383 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16384 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16385 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16386 inst.instruction |= LOW4 (inst.operands[2].reg);
16387 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16388 inst.instruction |= neon_quad (rs) << 6;
16389 inst.instruction |= imm << 8;
16390
16391 neon_dp_fixup (&inst);
16392 }
16393
16394 static void
16395 do_neon_rev (void)
16396 {
16397 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16398 struct neon_type_el et = neon_check_type (2, rs,
16399 N_EQK, N_8 | N_16 | N_32 | N_KEY);
16400 unsigned op = (inst.instruction >> 7) & 3;
16401 /* N (width of reversed regions) is encoded as part of the bitmask. We
16402 extract it here to check the elements to be reversed are smaller.
16403 Otherwise we'd get a reserved instruction. */
16404 unsigned elsize = (op == 2) ? 16 : (op == 1) ? 32 : (op == 0) ? 64 : 0;
16405 gas_assert (elsize != 0);
16406 constraint (et.size >= elsize,
16407 _("elements must be smaller than reversal region"));
16408 neon_two_same (neon_quad (rs), 1, et.size);
16409 }
16410
16411 static void
16412 do_neon_dup (void)
16413 {
16414 if (inst.operands[1].isscalar)
16415 {
16416 enum neon_shape rs = neon_select_shape (NS_DS, NS_QS, NS_NULL);
16417 struct neon_type_el et = neon_check_type (2, rs,
16418 N_EQK, N_8 | N_16 | N_32 | N_KEY);
16419 unsigned sizebits = et.size >> 3;
16420 unsigned dm = NEON_SCALAR_REG (inst.operands[1].reg);
16421 int logsize = neon_logbits (et.size);
16422 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg) << logsize;
16423
16424 if (vfp_or_neon_is_neon (NEON_CHECK_CC) == FAIL)
16425 return;
16426
16427 NEON_ENCODE (SCALAR, inst);
16428 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16429 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16430 inst.instruction |= LOW4 (dm);
16431 inst.instruction |= HI1 (dm) << 5;
16432 inst.instruction |= neon_quad (rs) << 6;
16433 inst.instruction |= x << 17;
16434 inst.instruction |= sizebits << 16;
16435
16436 neon_dp_fixup (&inst);
16437 }
16438 else
16439 {
16440 enum neon_shape rs = neon_select_shape (NS_DR, NS_QR, NS_NULL);
16441 struct neon_type_el et = neon_check_type (2, rs,
16442 N_8 | N_16 | N_32 | N_KEY, N_EQK);
16443 /* Duplicate ARM register to lanes of vector. */
16444 NEON_ENCODE (ARMREG, inst);
16445 switch (et.size)
16446 {
16447 case 8: inst.instruction |= 0x400000; break;
16448 case 16: inst.instruction |= 0x000020; break;
16449 case 32: inst.instruction |= 0x000000; break;
16450 default: break;
16451 }
16452 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
16453 inst.instruction |= LOW4 (inst.operands[0].reg) << 16;
16454 inst.instruction |= HI1 (inst.operands[0].reg) << 7;
16455 inst.instruction |= neon_quad (rs) << 21;
16456 /* The encoding for this instruction is identical for the ARM and Thumb
16457 variants, except for the condition field. */
16458 do_vfp_cond_or_thumb ();
16459 }
16460 }
16461
16462 /* VMOV has particularly many variations. It can be one of:
16463 0. VMOV<c><q> <Qd>, <Qm>
16464 1. VMOV<c><q> <Dd>, <Dm>
16465 (Register operations, which are VORR with Rm = Rn.)
16466 2. VMOV<c><q>.<dt> <Qd>, #<imm>
16467 3. VMOV<c><q>.<dt> <Dd>, #<imm>
16468 (Immediate loads.)
16469 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
16470 (ARM register to scalar.)
16471 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
16472 (Two ARM registers to vector.)
16473 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
16474 (Scalar to ARM register.)
16475 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
16476 (Vector to two ARM registers.)
16477 8. VMOV.F32 <Sd>, <Sm>
16478 9. VMOV.F64 <Dd>, <Dm>
16479 (VFP register moves.)
16480 10. VMOV.F32 <Sd>, #imm
16481 11. VMOV.F64 <Dd>, #imm
16482 (VFP float immediate load.)
16483 12. VMOV <Rd>, <Sm>
16484 (VFP single to ARM reg.)
16485 13. VMOV <Sd>, <Rm>
16486 (ARM reg to VFP single.)
16487 14. VMOV <Rd>, <Re>, <Sn>, <Sm>
16488 (Two ARM regs to two VFP singles.)
16489 15. VMOV <Sd>, <Se>, <Rn>, <Rm>
16490 (Two VFP singles to two ARM regs.)
16491
16492 These cases can be disambiguated using neon_select_shape, except cases 1/9
16493 and 3/11 which depend on the operand type too.
16494
16495 All the encoded bits are hardcoded by this function.
16496
16497 Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
16498 Cases 5, 7 may be used with VFPv2 and above.
16499
16500 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
16501 can specify a type where it doesn't make sense to, and is ignored). */
16502
16503 static void
16504 do_neon_mov (void)
16505 {
16506 enum neon_shape rs = neon_select_shape (NS_RRFF, NS_FFRR, NS_DRR, NS_RRD,
16507 NS_QQ, NS_DD, NS_QI, NS_DI, NS_SR,
16508 NS_RS, NS_FF, NS_FI, NS_RF, NS_FR,
16509 NS_HR, NS_RH, NS_HI, NS_NULL);
16510 struct neon_type_el et;
16511 const char *ldconst = 0;
16512
16513 switch (rs)
16514 {
16515 case NS_DD: /* case 1/9. */
16516 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
16517 /* It is not an error here if no type is given. */
16518 inst.error = NULL;
16519 if (et.type == NT_float && et.size == 64)
16520 {
16521 do_vfp_nsyn_opcode ("fcpyd");
16522 break;
16523 }
16524 /* fall through. */
16525
16526 case NS_QQ: /* case 0/1. */
16527 {
16528 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
16529 return;
16530 /* The architecture manual I have doesn't explicitly state which
16531 value the U bit should have for register->register moves, but
16532 the equivalent VORR instruction has U = 0, so do that. */
16533 inst.instruction = 0x0200110;
16534 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16535 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16536 inst.instruction |= LOW4 (inst.operands[1].reg);
16537 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16538 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16539 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16540 inst.instruction |= neon_quad (rs) << 6;
16541
16542 neon_dp_fixup (&inst);
16543 }
16544 break;
16545
16546 case NS_DI: /* case 3/11. */
16547 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
16548 inst.error = NULL;
16549 if (et.type == NT_float && et.size == 64)
16550 {
16551 /* case 11 (fconstd). */
16552 ldconst = "fconstd";
16553 goto encode_fconstd;
16554 }
16555 /* fall through. */
16556
16557 case NS_QI: /* case 2/3. */
16558 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
16559 return;
16560 inst.instruction = 0x0800010;
16561 neon_move_immediate ();
16562 neon_dp_fixup (&inst);
16563 break;
16564
16565 case NS_SR: /* case 4. */
16566 {
16567 unsigned bcdebits = 0;
16568 int logsize;
16569 unsigned dn = NEON_SCALAR_REG (inst.operands[0].reg);
16570 unsigned x = NEON_SCALAR_INDEX (inst.operands[0].reg);
16571
16572 /* .<size> is optional here, defaulting to .32. */
16573 if (inst.vectype.elems == 0
16574 && inst.operands[0].vectype.type == NT_invtype
16575 && inst.operands[1].vectype.type == NT_invtype)
16576 {
16577 inst.vectype.el[0].type = NT_untyped;
16578 inst.vectype.el[0].size = 32;
16579 inst.vectype.elems = 1;
16580 }
16581
16582 et = neon_check_type (2, NS_NULL, N_8 | N_16 | N_32 | N_KEY, N_EQK);
16583 logsize = neon_logbits (et.size);
16584
16585 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
16586 _(BAD_FPU));
16587 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
16588 && et.size != 32, _(BAD_FPU));
16589 constraint (et.type == NT_invtype, _("bad type for scalar"));
16590 constraint (x >= 64 / et.size, _("scalar index out of range"));
16591
16592 switch (et.size)
16593 {
16594 case 8: bcdebits = 0x8; break;
16595 case 16: bcdebits = 0x1; break;
16596 case 32: bcdebits = 0x0; break;
16597 default: ;
16598 }
16599
16600 bcdebits |= x << logsize;
16601
16602 inst.instruction = 0xe000b10;
16603 do_vfp_cond_or_thumb ();
16604 inst.instruction |= LOW4 (dn) << 16;
16605 inst.instruction |= HI1 (dn) << 7;
16606 inst.instruction |= inst.operands[1].reg << 12;
16607 inst.instruction |= (bcdebits & 3) << 5;
16608 inst.instruction |= (bcdebits >> 2) << 21;
16609 }
16610 break;
16611
16612 case NS_DRR: /* case 5 (fmdrr). */
16613 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
16614 _(BAD_FPU));
16615
16616 inst.instruction = 0xc400b10;
16617 do_vfp_cond_or_thumb ();
16618 inst.instruction |= LOW4 (inst.operands[0].reg);
16619 inst.instruction |= HI1 (inst.operands[0].reg) << 5;
16620 inst.instruction |= inst.operands[1].reg << 12;
16621 inst.instruction |= inst.operands[2].reg << 16;
16622 break;
16623
16624 case NS_RS: /* case 6. */
16625 {
16626 unsigned logsize;
16627 unsigned dn = NEON_SCALAR_REG (inst.operands[1].reg);
16628 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg);
16629 unsigned abcdebits = 0;
16630
16631 /* .<dt> is optional here, defaulting to .32. */
16632 if (inst.vectype.elems == 0
16633 && inst.operands[0].vectype.type == NT_invtype
16634 && inst.operands[1].vectype.type == NT_invtype)
16635 {
16636 inst.vectype.el[0].type = NT_untyped;
16637 inst.vectype.el[0].size = 32;
16638 inst.vectype.elems = 1;
16639 }
16640
16641 et = neon_check_type (2, NS_NULL,
16642 N_EQK, N_S8 | N_S16 | N_U8 | N_U16 | N_32 | N_KEY);
16643 logsize = neon_logbits (et.size);
16644
16645 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
16646 _(BAD_FPU));
16647 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
16648 && et.size != 32, _(BAD_FPU));
16649 constraint (et.type == NT_invtype, _("bad type for scalar"));
16650 constraint (x >= 64 / et.size, _("scalar index out of range"));
16651
16652 switch (et.size)
16653 {
16654 case 8: abcdebits = (et.type == NT_signed) ? 0x08 : 0x18; break;
16655 case 16: abcdebits = (et.type == NT_signed) ? 0x01 : 0x11; break;
16656 case 32: abcdebits = 0x00; break;
16657 default: ;
16658 }
16659
16660 abcdebits |= x << logsize;
16661 inst.instruction = 0xe100b10;
16662 do_vfp_cond_or_thumb ();
16663 inst.instruction |= LOW4 (dn) << 16;
16664 inst.instruction |= HI1 (dn) << 7;
16665 inst.instruction |= inst.operands[0].reg << 12;
16666 inst.instruction |= (abcdebits & 3) << 5;
16667 inst.instruction |= (abcdebits >> 2) << 21;
16668 }
16669 break;
16670
16671 case NS_RRD: /* case 7 (fmrrd). */
16672 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
16673 _(BAD_FPU));
16674
16675 inst.instruction = 0xc500b10;
16676 do_vfp_cond_or_thumb ();
16677 inst.instruction |= inst.operands[0].reg << 12;
16678 inst.instruction |= inst.operands[1].reg << 16;
16679 inst.instruction |= LOW4 (inst.operands[2].reg);
16680 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16681 break;
16682
16683 case NS_FF: /* case 8 (fcpys). */
16684 do_vfp_nsyn_opcode ("fcpys");
16685 break;
16686
16687 case NS_HI:
16688 case NS_FI: /* case 10 (fconsts). */
16689 ldconst = "fconsts";
16690 encode_fconstd:
16691 if (is_quarter_float (inst.operands[1].imm))
16692 {
16693 inst.operands[1].imm = neon_qfloat_bits (inst.operands[1].imm);
16694 do_vfp_nsyn_opcode (ldconst);
16695
16696 /* ARMv8.2 fp16 vmov.f16 instruction. */
16697 if (rs == NS_HI)
16698 do_scalar_fp16_v82_encode ();
16699 }
16700 else
16701 first_error (_("immediate out of range"));
16702 break;
16703
16704 case NS_RH:
16705 case NS_RF: /* case 12 (fmrs). */
16706 do_vfp_nsyn_opcode ("fmrs");
16707 /* ARMv8.2 fp16 vmov.f16 instruction. */
16708 if (rs == NS_RH)
16709 do_scalar_fp16_v82_encode ();
16710 break;
16711
16712 case NS_HR:
16713 case NS_FR: /* case 13 (fmsr). */
16714 do_vfp_nsyn_opcode ("fmsr");
16715 /* ARMv8.2 fp16 vmov.f16 instruction. */
16716 if (rs == NS_HR)
16717 do_scalar_fp16_v82_encode ();
16718 break;
16719
16720 /* The encoders for the fmrrs and fmsrr instructions expect three operands
16721 (one of which is a list), but we have parsed four. Do some fiddling to
16722 make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
16723 expect. */
16724 case NS_RRFF: /* case 14 (fmrrs). */
16725 constraint (inst.operands[3].reg != inst.operands[2].reg + 1,
16726 _("VFP registers must be adjacent"));
16727 inst.operands[2].imm = 2;
16728 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
16729 do_vfp_nsyn_opcode ("fmrrs");
16730 break;
16731
16732 case NS_FFRR: /* case 15 (fmsrr). */
16733 constraint (inst.operands[1].reg != inst.operands[0].reg + 1,
16734 _("VFP registers must be adjacent"));
16735 inst.operands[1] = inst.operands[2];
16736 inst.operands[2] = inst.operands[3];
16737 inst.operands[0].imm = 2;
16738 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
16739 do_vfp_nsyn_opcode ("fmsrr");
16740 break;
16741
16742 case NS_NULL:
16743 /* neon_select_shape has determined that the instruction
16744 shape is wrong and has already set the error message. */
16745 break;
16746
16747 default:
16748 abort ();
16749 }
16750 }
16751
16752 static void
16753 do_neon_rshift_round_imm (void)
16754 {
16755 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
16756 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
16757 int imm = inst.operands[2].imm;
16758
16759 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
16760 if (imm == 0)
16761 {
16762 inst.operands[2].present = 0;
16763 do_neon_mov ();
16764 return;
16765 }
16766
16767 constraint (imm < 1 || (unsigned)imm > et.size,
16768 _("immediate out of range for shift"));
16769 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et,
16770 et.size - imm);
16771 }
16772
16773 static void
16774 do_neon_movhf (void)
16775 {
16776 enum neon_shape rs = neon_select_shape (NS_HH, NS_NULL);
16777 constraint (rs != NS_HH, _("invalid suffix"));
16778
16779 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
16780 _(BAD_FPU));
16781
16782 if (inst.cond != COND_ALWAYS)
16783 {
16784 if (thumb_mode)
16785 {
16786 as_warn (_("ARMv8.2 scalar fp16 instruction cannot be conditional,"
16787 " the behaviour is UNPREDICTABLE"));
16788 }
16789 else
16790 {
16791 inst.error = BAD_COND;
16792 return;
16793 }
16794 }
16795
16796 do_vfp_sp_monadic ();
16797
16798 inst.is_neon = 1;
16799 inst.instruction |= 0xf0000000;
16800 }
16801
16802 static void
16803 do_neon_movl (void)
16804 {
16805 struct neon_type_el et = neon_check_type (2, NS_QD,
16806 N_EQK | N_DBL, N_SU_32 | N_KEY);
16807 unsigned sizebits = et.size >> 3;
16808 inst.instruction |= sizebits << 19;
16809 neon_two_same (0, et.type == NT_unsigned, -1);
16810 }
16811
16812 static void
16813 do_neon_trn (void)
16814 {
16815 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16816 struct neon_type_el et = neon_check_type (2, rs,
16817 N_EQK, N_8 | N_16 | N_32 | N_KEY);
16818 NEON_ENCODE (INTEGER, inst);
16819 neon_two_same (neon_quad (rs), 1, et.size);
16820 }
16821
16822 static void
16823 do_neon_zip_uzp (void)
16824 {
16825 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16826 struct neon_type_el et = neon_check_type (2, rs,
16827 N_EQK, N_8 | N_16 | N_32 | N_KEY);
16828 if (rs == NS_DD && et.size == 32)
16829 {
16830 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
16831 inst.instruction = N_MNEM_vtrn;
16832 do_neon_trn ();
16833 return;
16834 }
16835 neon_two_same (neon_quad (rs), 1, et.size);
16836 }
16837
16838 static void
16839 do_neon_sat_abs_neg (void)
16840 {
16841 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16842 struct neon_type_el et = neon_check_type (2, rs,
16843 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
16844 neon_two_same (neon_quad (rs), 1, et.size);
16845 }
16846
16847 static void
16848 do_neon_pair_long (void)
16849 {
16850 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16851 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_32 | N_KEY);
16852 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
16853 inst.instruction |= (et.type == NT_unsigned) << 7;
16854 neon_two_same (neon_quad (rs), 1, et.size);
16855 }
16856
16857 static void
16858 do_neon_recip_est (void)
16859 {
16860 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16861 struct neon_type_el et = neon_check_type (2, rs,
16862 N_EQK | N_FLT, N_F_16_32 | N_U32 | N_KEY);
16863 inst.instruction |= (et.type == NT_float) << 8;
16864 neon_two_same (neon_quad (rs), 1, et.size);
16865 }
16866
16867 static void
16868 do_neon_cls (void)
16869 {
16870 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16871 struct neon_type_el et = neon_check_type (2, rs,
16872 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
16873 neon_two_same (neon_quad (rs), 1, et.size);
16874 }
16875
16876 static void
16877 do_neon_clz (void)
16878 {
16879 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16880 struct neon_type_el et = neon_check_type (2, rs,
16881 N_EQK, N_I8 | N_I16 | N_I32 | N_KEY);
16882 neon_two_same (neon_quad (rs), 1, et.size);
16883 }
16884
16885 static void
16886 do_neon_cnt (void)
16887 {
16888 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16889 struct neon_type_el et = neon_check_type (2, rs,
16890 N_EQK | N_INT, N_8 | N_KEY);
16891 neon_two_same (neon_quad (rs), 1, et.size);
16892 }
16893
16894 static void
16895 do_neon_swp (void)
16896 {
16897 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16898 neon_two_same (neon_quad (rs), 1, -1);
16899 }
16900
16901 static void
16902 do_neon_tbl_tbx (void)
16903 {
16904 unsigned listlenbits;
16905 neon_check_type (3, NS_DLD, N_EQK, N_EQK, N_8 | N_KEY);
16906
16907 if (inst.operands[1].imm < 1 || inst.operands[1].imm > 4)
16908 {
16909 first_error (_("bad list length for table lookup"));
16910 return;
16911 }
16912
16913 listlenbits = inst.operands[1].imm - 1;
16914 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16915 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16916 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16917 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16918 inst.instruction |= LOW4 (inst.operands[2].reg);
16919 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16920 inst.instruction |= listlenbits << 8;
16921
16922 neon_dp_fixup (&inst);
16923 }
16924
16925 static void
16926 do_neon_ldm_stm (void)
16927 {
16928 /* P, U and L bits are part of bitmask. */
16929 int is_dbmode = (inst.instruction & (1 << 24)) != 0;
16930 unsigned offsetbits = inst.operands[1].imm * 2;
16931
16932 if (inst.operands[1].issingle)
16933 {
16934 do_vfp_nsyn_ldm_stm (is_dbmode);
16935 return;
16936 }
16937
16938 constraint (is_dbmode && !inst.operands[0].writeback,
16939 _("writeback (!) must be used for VLDMDB and VSTMDB"));
16940
16941 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
16942 _("register list must contain at least 1 and at most 16 "
16943 "registers"));
16944
16945 inst.instruction |= inst.operands[0].reg << 16;
16946 inst.instruction |= inst.operands[0].writeback << 21;
16947 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
16948 inst.instruction |= HI1 (inst.operands[1].reg) << 22;
16949
16950 inst.instruction |= offsetbits;
16951
16952 do_vfp_cond_or_thumb ();
16953 }
16954
16955 static void
16956 do_neon_ldr_str (void)
16957 {
16958 int is_ldr = (inst.instruction & (1 << 20)) != 0;
16959
16960 /* Use of PC in vstr in ARM mode is deprecated in ARMv7.
16961 And is UNPREDICTABLE in thumb mode. */
16962 if (!is_ldr
16963 && inst.operands[1].reg == REG_PC
16964 && (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7) || thumb_mode))
16965 {
16966 if (thumb_mode)
16967 inst.error = _("Use of PC here is UNPREDICTABLE");
16968 else if (warn_on_deprecated)
16969 as_tsktsk (_("Use of PC here is deprecated"));
16970 }
16971
16972 if (inst.operands[0].issingle)
16973 {
16974 if (is_ldr)
16975 do_vfp_nsyn_opcode ("flds");
16976 else
16977 do_vfp_nsyn_opcode ("fsts");
16978
16979 /* ARMv8.2 vldr.16/vstr.16 instruction. */
16980 if (inst.vectype.el[0].size == 16)
16981 do_scalar_fp16_v82_encode ();
16982 }
16983 else
16984 {
16985 if (is_ldr)
16986 do_vfp_nsyn_opcode ("fldd");
16987 else
16988 do_vfp_nsyn_opcode ("fstd");
16989 }
16990 }
16991
16992 /* "interleave" version also handles non-interleaving register VLD1/VST1
16993 instructions. */
16994
16995 static void
16996 do_neon_ld_st_interleave (void)
16997 {
16998 struct neon_type_el et = neon_check_type (1, NS_NULL,
16999 N_8 | N_16 | N_32 | N_64);
17000 unsigned alignbits = 0;
17001 unsigned idx;
17002 /* The bits in this table go:
17003 0: register stride of one (0) or two (1)
17004 1,2: register list length, minus one (1, 2, 3, 4).
17005 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
17006 We use -1 for invalid entries. */
17007 const int typetable[] =
17008 {
17009 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
17010 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
17011 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
17012 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
17013 };
17014 int typebits;
17015
17016 if (et.type == NT_invtype)
17017 return;
17018
17019 if (inst.operands[1].immisalign)
17020 switch (inst.operands[1].imm >> 8)
17021 {
17022 case 64: alignbits = 1; break;
17023 case 128:
17024 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2
17025 && NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
17026 goto bad_alignment;
17027 alignbits = 2;
17028 break;
17029 case 256:
17030 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
17031 goto bad_alignment;
17032 alignbits = 3;
17033 break;
17034 default:
17035 bad_alignment:
17036 first_error (_("bad alignment"));
17037 return;
17038 }
17039
17040 inst.instruction |= alignbits << 4;
17041 inst.instruction |= neon_logbits (et.size) << 6;
17042
17043 /* Bits [4:6] of the immediate in a list specifier encode register stride
17044 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
17045 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
17046 up the right value for "type" in a table based on this value and the given
17047 list style, then stick it back. */
17048 idx = ((inst.operands[0].imm >> 4) & 7)
17049 | (((inst.instruction >> 8) & 3) << 3);
17050
17051 typebits = typetable[idx];
17052
17053 constraint (typebits == -1, _("bad list type for instruction"));
17054 constraint (((inst.instruction >> 8) & 3) && et.size == 64,
17055 _("bad element type for instruction"));
17056
17057 inst.instruction &= ~0xf00;
17058 inst.instruction |= typebits << 8;
17059 }
17060
17061 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
17062 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
17063 otherwise. The variable arguments are a list of pairs of legal (size, align)
17064 values, terminated with -1. */
17065
17066 static int
17067 neon_alignment_bit (int size, int align, int *do_alignment, ...)
17068 {
17069 va_list ap;
17070 int result = FAIL, thissize, thisalign;
17071
17072 if (!inst.operands[1].immisalign)
17073 {
17074 *do_alignment = 0;
17075 return SUCCESS;
17076 }
17077
17078 va_start (ap, do_alignment);
17079
17080 do
17081 {
17082 thissize = va_arg (ap, int);
17083 if (thissize == -1)
17084 break;
17085 thisalign = va_arg (ap, int);
17086
17087 if (size == thissize && align == thisalign)
17088 result = SUCCESS;
17089 }
17090 while (result != SUCCESS);
17091
17092 va_end (ap);
17093
17094 if (result == SUCCESS)
17095 *do_alignment = 1;
17096 else
17097 first_error (_("unsupported alignment for instruction"));
17098
17099 return result;
17100 }
17101
17102 static void
17103 do_neon_ld_st_lane (void)
17104 {
17105 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
17106 int align_good, do_alignment = 0;
17107 int logsize = neon_logbits (et.size);
17108 int align = inst.operands[1].imm >> 8;
17109 int n = (inst.instruction >> 8) & 3;
17110 int max_el = 64 / et.size;
17111
17112 if (et.type == NT_invtype)
17113 return;
17114
17115 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != n + 1,
17116 _("bad list length"));
17117 constraint (NEON_LANE (inst.operands[0].imm) >= max_el,
17118 _("scalar index out of range"));
17119 constraint (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2
17120 && et.size == 8,
17121 _("stride of 2 unavailable when element size is 8"));
17122
17123 switch (n)
17124 {
17125 case 0: /* VLD1 / VST1. */
17126 align_good = neon_alignment_bit (et.size, align, &do_alignment, 16, 16,
17127 32, 32, -1);
17128 if (align_good == FAIL)
17129 return;
17130 if (do_alignment)
17131 {
17132 unsigned alignbits = 0;
17133 switch (et.size)
17134 {
17135 case 16: alignbits = 0x1; break;
17136 case 32: alignbits = 0x3; break;
17137 default: ;
17138 }
17139 inst.instruction |= alignbits << 4;
17140 }
17141 break;
17142
17143 case 1: /* VLD2 / VST2. */
17144 align_good = neon_alignment_bit (et.size, align, &do_alignment, 8, 16,
17145 16, 32, 32, 64, -1);
17146 if (align_good == FAIL)
17147 return;
17148 if (do_alignment)
17149 inst.instruction |= 1 << 4;
17150 break;
17151
17152 case 2: /* VLD3 / VST3. */
17153 constraint (inst.operands[1].immisalign,
17154 _("can't use alignment with this instruction"));
17155 break;
17156
17157 case 3: /* VLD4 / VST4. */
17158 align_good = neon_alignment_bit (et.size, align, &do_alignment, 8, 32,
17159 16, 64, 32, 64, 32, 128, -1);
17160 if (align_good == FAIL)
17161 return;
17162 if (do_alignment)
17163 {
17164 unsigned alignbits = 0;
17165 switch (et.size)
17166 {
17167 case 8: alignbits = 0x1; break;
17168 case 16: alignbits = 0x1; break;
17169 case 32: alignbits = (align == 64) ? 0x1 : 0x2; break;
17170 default: ;
17171 }
17172 inst.instruction |= alignbits << 4;
17173 }
17174 break;
17175
17176 default: ;
17177 }
17178
17179 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
17180 if (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2)
17181 inst.instruction |= 1 << (4 + logsize);
17182
17183 inst.instruction |= NEON_LANE (inst.operands[0].imm) << (logsize + 5);
17184 inst.instruction |= logsize << 10;
17185 }
17186
17187 /* Encode single n-element structure to all lanes VLD<n> instructions. */
17188
17189 static void
17190 do_neon_ld_dup (void)
17191 {
17192 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
17193 int align_good, do_alignment = 0;
17194
17195 if (et.type == NT_invtype)
17196 return;
17197
17198 switch ((inst.instruction >> 8) & 3)
17199 {
17200 case 0: /* VLD1. */
17201 gas_assert (NEON_REG_STRIDE (inst.operands[0].imm) != 2);
17202 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
17203 &do_alignment, 16, 16, 32, 32, -1);
17204 if (align_good == FAIL)
17205 return;
17206 switch (NEON_REGLIST_LENGTH (inst.operands[0].imm))
17207 {
17208 case 1: break;
17209 case 2: inst.instruction |= 1 << 5; break;
17210 default: first_error (_("bad list length")); return;
17211 }
17212 inst.instruction |= neon_logbits (et.size) << 6;
17213 break;
17214
17215 case 1: /* VLD2. */
17216 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
17217 &do_alignment, 8, 16, 16, 32, 32, 64,
17218 -1);
17219 if (align_good == FAIL)
17220 return;
17221 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2,
17222 _("bad list length"));
17223 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
17224 inst.instruction |= 1 << 5;
17225 inst.instruction |= neon_logbits (et.size) << 6;
17226 break;
17227
17228 case 2: /* VLD3. */
17229 constraint (inst.operands[1].immisalign,
17230 _("can't use alignment with this instruction"));
17231 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 3,
17232 _("bad list length"));
17233 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
17234 inst.instruction |= 1 << 5;
17235 inst.instruction |= neon_logbits (et.size) << 6;
17236 break;
17237
17238 case 3: /* VLD4. */
17239 {
17240 int align = inst.operands[1].imm >> 8;
17241 align_good = neon_alignment_bit (et.size, align, &do_alignment, 8, 32,
17242 16, 64, 32, 64, 32, 128, -1);
17243 if (align_good == FAIL)
17244 return;
17245 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4,
17246 _("bad list length"));
17247 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
17248 inst.instruction |= 1 << 5;
17249 if (et.size == 32 && align == 128)
17250 inst.instruction |= 0x3 << 6;
17251 else
17252 inst.instruction |= neon_logbits (et.size) << 6;
17253 }
17254 break;
17255
17256 default: ;
17257 }
17258
17259 inst.instruction |= do_alignment << 4;
17260 }
17261
17262 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
17263 apart from bits [11:4]. */
17264
17265 static void
17266 do_neon_ldx_stx (void)
17267 {
17268 if (inst.operands[1].isreg)
17269 constraint (inst.operands[1].reg == REG_PC, BAD_PC);
17270
17271 switch (NEON_LANE (inst.operands[0].imm))
17272 {
17273 case NEON_INTERLEAVE_LANES:
17274 NEON_ENCODE (INTERLV, inst);
17275 do_neon_ld_st_interleave ();
17276 break;
17277
17278 case NEON_ALL_LANES:
17279 NEON_ENCODE (DUP, inst);
17280 if (inst.instruction == N_INV)
17281 {
17282 first_error ("only loads support such operands");
17283 break;
17284 }
17285 do_neon_ld_dup ();
17286 break;
17287
17288 default:
17289 NEON_ENCODE (LANE, inst);
17290 do_neon_ld_st_lane ();
17291 }
17292
17293 /* L bit comes from bit mask. */
17294 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17295 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17296 inst.instruction |= inst.operands[1].reg << 16;
17297
17298 if (inst.operands[1].postind)
17299 {
17300 int postreg = inst.operands[1].imm & 0xf;
17301 constraint (!inst.operands[1].immisreg,
17302 _("post-index must be a register"));
17303 constraint (postreg == 0xd || postreg == 0xf,
17304 _("bad register for post-index"));
17305 inst.instruction |= postreg;
17306 }
17307 else
17308 {
17309 constraint (inst.operands[1].immisreg, BAD_ADDR_MODE);
17310 constraint (inst.reloc.exp.X_op != O_constant
17311 || inst.reloc.exp.X_add_number != 0,
17312 BAD_ADDR_MODE);
17313
17314 if (inst.operands[1].writeback)
17315 {
17316 inst.instruction |= 0xd;
17317 }
17318 else
17319 inst.instruction |= 0xf;
17320 }
17321
17322 if (thumb_mode)
17323 inst.instruction |= 0xf9000000;
17324 else
17325 inst.instruction |= 0xf4000000;
17326 }
17327
17328 /* FP v8. */
17329 static void
17330 do_vfp_nsyn_fpv8 (enum neon_shape rs)
17331 {
17332 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
17333 D register operands. */
17334 if (neon_shape_class[rs] == SC_DOUBLE)
17335 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
17336 _(BAD_FPU));
17337
17338 NEON_ENCODE (FPV8, inst);
17339
17340 if (rs == NS_FFF || rs == NS_HHH)
17341 {
17342 do_vfp_sp_dyadic ();
17343
17344 /* ARMv8.2 fp16 instruction. */
17345 if (rs == NS_HHH)
17346 do_scalar_fp16_v82_encode ();
17347 }
17348 else
17349 do_vfp_dp_rd_rn_rm ();
17350
17351 if (rs == NS_DDD)
17352 inst.instruction |= 0x100;
17353
17354 inst.instruction |= 0xf0000000;
17355 }
17356
17357 static void
17358 do_vsel (void)
17359 {
17360 set_it_insn_type (OUTSIDE_IT_INSN);
17361
17362 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8) != SUCCESS)
17363 first_error (_("invalid instruction shape"));
17364 }
17365
17366 static void
17367 do_vmaxnm (void)
17368 {
17369 set_it_insn_type (OUTSIDE_IT_INSN);
17370
17371 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8) == SUCCESS)
17372 return;
17373
17374 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
17375 return;
17376
17377 neon_dyadic_misc (NT_untyped, N_F_16_32, 0);
17378 }
17379
17380 static void
17381 do_vrint_1 (enum neon_cvt_mode mode)
17382 {
17383 enum neon_shape rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_QQ, NS_NULL);
17384 struct neon_type_el et;
17385
17386 if (rs == NS_NULL)
17387 return;
17388
17389 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
17390 D register operands. */
17391 if (neon_shape_class[rs] == SC_DOUBLE)
17392 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
17393 _(BAD_FPU));
17394
17395 et = neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY
17396 | N_VFP);
17397 if (et.type != NT_invtype)
17398 {
17399 /* VFP encodings. */
17400 if (mode == neon_cvt_mode_a || mode == neon_cvt_mode_n
17401 || mode == neon_cvt_mode_p || mode == neon_cvt_mode_m)
17402 set_it_insn_type (OUTSIDE_IT_INSN);
17403
17404 NEON_ENCODE (FPV8, inst);
17405 if (rs == NS_FF || rs == NS_HH)
17406 do_vfp_sp_monadic ();
17407 else
17408 do_vfp_dp_rd_rm ();
17409
17410 switch (mode)
17411 {
17412 case neon_cvt_mode_r: inst.instruction |= 0x00000000; break;
17413 case neon_cvt_mode_z: inst.instruction |= 0x00000080; break;
17414 case neon_cvt_mode_x: inst.instruction |= 0x00010000; break;
17415 case neon_cvt_mode_a: inst.instruction |= 0xf0000000; break;
17416 case neon_cvt_mode_n: inst.instruction |= 0xf0010000; break;
17417 case neon_cvt_mode_p: inst.instruction |= 0xf0020000; break;
17418 case neon_cvt_mode_m: inst.instruction |= 0xf0030000; break;
17419 default: abort ();
17420 }
17421
17422 inst.instruction |= (rs == NS_DD) << 8;
17423 do_vfp_cond_or_thumb ();
17424
17425 /* ARMv8.2 fp16 vrint instruction. */
17426 if (rs == NS_HH)
17427 do_scalar_fp16_v82_encode ();
17428 }
17429 else
17430 {
17431 /* Neon encodings (or something broken...). */
17432 inst.error = NULL;
17433 et = neon_check_type (2, rs, N_EQK, N_F_16_32 | N_KEY);
17434
17435 if (et.type == NT_invtype)
17436 return;
17437
17438 set_it_insn_type (OUTSIDE_IT_INSN);
17439 NEON_ENCODE (FLOAT, inst);
17440
17441 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
17442 return;
17443
17444 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17445 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17446 inst.instruction |= LOW4 (inst.operands[1].reg);
17447 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
17448 inst.instruction |= neon_quad (rs) << 6;
17449 /* Mask off the original size bits and reencode them. */
17450 inst.instruction = ((inst.instruction & 0xfff3ffff)
17451 | neon_logbits (et.size) << 18);
17452
17453 switch (mode)
17454 {
17455 case neon_cvt_mode_z: inst.instruction |= 3 << 7; break;
17456 case neon_cvt_mode_x: inst.instruction |= 1 << 7; break;
17457 case neon_cvt_mode_a: inst.instruction |= 2 << 7; break;
17458 case neon_cvt_mode_n: inst.instruction |= 0 << 7; break;
17459 case neon_cvt_mode_p: inst.instruction |= 7 << 7; break;
17460 case neon_cvt_mode_m: inst.instruction |= 5 << 7; break;
17461 case neon_cvt_mode_r: inst.error = _("invalid rounding mode"); break;
17462 default: abort ();
17463 }
17464
17465 if (thumb_mode)
17466 inst.instruction |= 0xfc000000;
17467 else
17468 inst.instruction |= 0xf0000000;
17469 }
17470 }
17471
17472 static void
17473 do_vrintx (void)
17474 {
17475 do_vrint_1 (neon_cvt_mode_x);
17476 }
17477
17478 static void
17479 do_vrintz (void)
17480 {
17481 do_vrint_1 (neon_cvt_mode_z);
17482 }
17483
17484 static void
17485 do_vrintr (void)
17486 {
17487 do_vrint_1 (neon_cvt_mode_r);
17488 }
17489
17490 static void
17491 do_vrinta (void)
17492 {
17493 do_vrint_1 (neon_cvt_mode_a);
17494 }
17495
17496 static void
17497 do_vrintn (void)
17498 {
17499 do_vrint_1 (neon_cvt_mode_n);
17500 }
17501
17502 static void
17503 do_vrintp (void)
17504 {
17505 do_vrint_1 (neon_cvt_mode_p);
17506 }
17507
17508 static void
17509 do_vrintm (void)
17510 {
17511 do_vrint_1 (neon_cvt_mode_m);
17512 }
17513
17514 static unsigned
17515 neon_scalar_for_vcmla (unsigned opnd, unsigned elsize)
17516 {
17517 unsigned regno = NEON_SCALAR_REG (opnd);
17518 unsigned elno = NEON_SCALAR_INDEX (opnd);
17519
17520 if (elsize == 16 && elno < 2 && regno < 16)
17521 return regno | (elno << 4);
17522 else if (elsize == 32 && elno == 0)
17523 return regno;
17524
17525 first_error (_("scalar out of range"));
17526 return 0;
17527 }
17528
17529 static void
17530 do_vcmla (void)
17531 {
17532 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_armv8),
17533 _(BAD_FPU));
17534 constraint (inst.reloc.exp.X_op != O_constant, _("expression too complex"));
17535 unsigned rot = inst.reloc.exp.X_add_number;
17536 constraint (rot != 0 && rot != 90 && rot != 180 && rot != 270,
17537 _("immediate out of range"));
17538 rot /= 90;
17539 if (inst.operands[2].isscalar)
17540 {
17541 enum neon_shape rs = neon_select_shape (NS_DDSI, NS_QQSI, NS_NULL);
17542 unsigned size = neon_check_type (3, rs, N_EQK, N_EQK,
17543 N_KEY | N_F16 | N_F32).size;
17544 unsigned m = neon_scalar_for_vcmla (inst.operands[2].reg, size);
17545 inst.is_neon = 1;
17546 inst.instruction = 0xfe000800;
17547 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17548 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17549 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
17550 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
17551 inst.instruction |= LOW4 (m);
17552 inst.instruction |= HI1 (m) << 5;
17553 inst.instruction |= neon_quad (rs) << 6;
17554 inst.instruction |= rot << 20;
17555 inst.instruction |= (size == 32) << 23;
17556 }
17557 else
17558 {
17559 enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
17560 unsigned size = neon_check_type (3, rs, N_EQK, N_EQK,
17561 N_KEY | N_F16 | N_F32).size;
17562 neon_three_same (neon_quad (rs), 0, -1);
17563 inst.instruction &= 0x00ffffff; /* Undo neon_dp_fixup. */
17564 inst.instruction |= 0xfc200800;
17565 inst.instruction |= rot << 23;
17566 inst.instruction |= (size == 32) << 20;
17567 }
17568 }
17569
17570 static void
17571 do_vcadd (void)
17572 {
17573 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_armv8),
17574 _(BAD_FPU));
17575 constraint (inst.reloc.exp.X_op != O_constant, _("expression too complex"));
17576 unsigned rot = inst.reloc.exp.X_add_number;
17577 constraint (rot != 90 && rot != 270, _("immediate out of range"));
17578 enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
17579 unsigned size = neon_check_type (3, rs, N_EQK, N_EQK,
17580 N_KEY | N_F16 | N_F32).size;
17581 neon_three_same (neon_quad (rs), 0, -1);
17582 inst.instruction &= 0x00ffffff; /* Undo neon_dp_fixup. */
17583 inst.instruction |= 0xfc800800;
17584 inst.instruction |= (rot == 270) << 24;
17585 inst.instruction |= (size == 32) << 20;
17586 }
17587
17588 /* Dot Product instructions encoding support. */
17589
17590 static void
17591 do_neon_dotproduct (int unsigned_p)
17592 {
17593 enum neon_shape rs;
17594 unsigned scalar_oprd2 = 0;
17595 int high8;
17596
17597 if (inst.cond != COND_ALWAYS)
17598 as_warn (_("Dot Product instructions cannot be conditional, the behaviour "
17599 "is UNPREDICTABLE"));
17600
17601 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_armv8),
17602 _(BAD_FPU));
17603
17604 /* Dot Product instructions are in three-same D/Q register format or the third
17605 operand can be a scalar index register. */
17606 if (inst.operands[2].isscalar)
17607 {
17608 scalar_oprd2 = neon_scalar_for_mul (inst.operands[2].reg, 32);
17609 high8 = 0xfe000000;
17610 rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
17611 }
17612 else
17613 {
17614 high8 = 0xfc000000;
17615 rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
17616 }
17617
17618 if (unsigned_p)
17619 neon_check_type (3, rs, N_EQK, N_EQK, N_KEY | N_U8);
17620 else
17621 neon_check_type (3, rs, N_EQK, N_EQK, N_KEY | N_S8);
17622
17623 /* The "U" bit in traditional Three Same encoding is fixed to 0 for Dot
17624 Product instruction, so we pass 0 as the "ubit" parameter. And the
17625 "Size" field are fixed to 0x2, so we pass 32 as the "size" parameter. */
17626 neon_three_same (neon_quad (rs), 0, 32);
17627
17628 /* Undo neon_dp_fixup. Dot Product instructions are using a slightly
17629 different NEON three-same encoding. */
17630 inst.instruction &= 0x00ffffff;
17631 inst.instruction |= high8;
17632 /* Encode 'U' bit which indicates signedness. */
17633 inst.instruction |= (unsigned_p ? 1 : 0) << 4;
17634 /* Re-encode operand2 if it's indexed scalar operand. What has been encoded
17635 from inst.operand[2].reg in neon_three_same is GAS's internal encoding, not
17636 the instruction encoding. */
17637 if (inst.operands[2].isscalar)
17638 {
17639 inst.instruction &= 0xffffffd0;
17640 inst.instruction |= LOW4 (scalar_oprd2);
17641 inst.instruction |= HI1 (scalar_oprd2) << 5;
17642 }
17643 }
17644
17645 /* Dot Product instructions for signed integer. */
17646
17647 static void
17648 do_neon_dotproduct_s (void)
17649 {
17650 return do_neon_dotproduct (0);
17651 }
17652
17653 /* Dot Product instructions for unsigned integer. */
17654
17655 static void
17656 do_neon_dotproduct_u (void)
17657 {
17658 return do_neon_dotproduct (1);
17659 }
17660
17661 /* Crypto v1 instructions. */
17662 static void
17663 do_crypto_2op_1 (unsigned elttype, int op)
17664 {
17665 set_it_insn_type (OUTSIDE_IT_INSN);
17666
17667 if (neon_check_type (2, NS_QQ, N_EQK | N_UNT, elttype | N_UNT | N_KEY).type
17668 == NT_invtype)
17669 return;
17670
17671 inst.error = NULL;
17672
17673 NEON_ENCODE (INTEGER, inst);
17674 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17675 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17676 inst.instruction |= LOW4 (inst.operands[1].reg);
17677 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
17678 if (op != -1)
17679 inst.instruction |= op << 6;
17680
17681 if (thumb_mode)
17682 inst.instruction |= 0xfc000000;
17683 else
17684 inst.instruction |= 0xf0000000;
17685 }
17686
17687 static void
17688 do_crypto_3op_1 (int u, int op)
17689 {
17690 set_it_insn_type (OUTSIDE_IT_INSN);
17691
17692 if (neon_check_type (3, NS_QQQ, N_EQK | N_UNT, N_EQK | N_UNT,
17693 N_32 | N_UNT | N_KEY).type == NT_invtype)
17694 return;
17695
17696 inst.error = NULL;
17697
17698 NEON_ENCODE (INTEGER, inst);
17699 neon_three_same (1, u, 8 << op);
17700 }
17701
17702 static void
17703 do_aese (void)
17704 {
17705 do_crypto_2op_1 (N_8, 0);
17706 }
17707
17708 static void
17709 do_aesd (void)
17710 {
17711 do_crypto_2op_1 (N_8, 1);
17712 }
17713
17714 static void
17715 do_aesmc (void)
17716 {
17717 do_crypto_2op_1 (N_8, 2);
17718 }
17719
17720 static void
17721 do_aesimc (void)
17722 {
17723 do_crypto_2op_1 (N_8, 3);
17724 }
17725
17726 static void
17727 do_sha1c (void)
17728 {
17729 do_crypto_3op_1 (0, 0);
17730 }
17731
17732 static void
17733 do_sha1p (void)
17734 {
17735 do_crypto_3op_1 (0, 1);
17736 }
17737
17738 static void
17739 do_sha1m (void)
17740 {
17741 do_crypto_3op_1 (0, 2);
17742 }
17743
17744 static void
17745 do_sha1su0 (void)
17746 {
17747 do_crypto_3op_1 (0, 3);
17748 }
17749
17750 static void
17751 do_sha256h (void)
17752 {
17753 do_crypto_3op_1 (1, 0);
17754 }
17755
17756 static void
17757 do_sha256h2 (void)
17758 {
17759 do_crypto_3op_1 (1, 1);
17760 }
17761
17762 static void
17763 do_sha256su1 (void)
17764 {
17765 do_crypto_3op_1 (1, 2);
17766 }
17767
17768 static void
17769 do_sha1h (void)
17770 {
17771 do_crypto_2op_1 (N_32, -1);
17772 }
17773
17774 static void
17775 do_sha1su1 (void)
17776 {
17777 do_crypto_2op_1 (N_32, 0);
17778 }
17779
17780 static void
17781 do_sha256su0 (void)
17782 {
17783 do_crypto_2op_1 (N_32, 1);
17784 }
17785
17786 static void
17787 do_crc32_1 (unsigned int poly, unsigned int sz)
17788 {
17789 unsigned int Rd = inst.operands[0].reg;
17790 unsigned int Rn = inst.operands[1].reg;
17791 unsigned int Rm = inst.operands[2].reg;
17792
17793 set_it_insn_type (OUTSIDE_IT_INSN);
17794 inst.instruction |= LOW4 (Rd) << (thumb_mode ? 8 : 12);
17795 inst.instruction |= LOW4 (Rn) << 16;
17796 inst.instruction |= LOW4 (Rm);
17797 inst.instruction |= sz << (thumb_mode ? 4 : 21);
17798 inst.instruction |= poly << (thumb_mode ? 20 : 9);
17799
17800 if (Rd == REG_PC || Rn == REG_PC || Rm == REG_PC)
17801 as_warn (UNPRED_REG ("r15"));
17802 }
17803
17804 static void
17805 do_crc32b (void)
17806 {
17807 do_crc32_1 (0, 0);
17808 }
17809
17810 static void
17811 do_crc32h (void)
17812 {
17813 do_crc32_1 (0, 1);
17814 }
17815
17816 static void
17817 do_crc32w (void)
17818 {
17819 do_crc32_1 (0, 2);
17820 }
17821
17822 static void
17823 do_crc32cb (void)
17824 {
17825 do_crc32_1 (1, 0);
17826 }
17827
17828 static void
17829 do_crc32ch (void)
17830 {
17831 do_crc32_1 (1, 1);
17832 }
17833
17834 static void
17835 do_crc32cw (void)
17836 {
17837 do_crc32_1 (1, 2);
17838 }
17839
17840 static void
17841 do_vjcvt (void)
17842 {
17843 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
17844 _(BAD_FPU));
17845 neon_check_type (2, NS_FD, N_S32, N_F64);
17846 do_vfp_sp_dp_cvt ();
17847 do_vfp_cond_or_thumb ();
17848 }
17849
17850 \f
17851 /* Overall per-instruction processing. */
17852
17853 /* We need to be able to fix up arbitrary expressions in some statements.
17854 This is so that we can handle symbols that are an arbitrary distance from
17855 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
17856 which returns part of an address in a form which will be valid for
17857 a data instruction. We do this by pushing the expression into a symbol
17858 in the expr_section, and creating a fix for that. */
17859
17860 static void
17861 fix_new_arm (fragS * frag,
17862 int where,
17863 short int size,
17864 expressionS * exp,
17865 int pc_rel,
17866 int reloc)
17867 {
17868 fixS * new_fix;
17869
17870 switch (exp->X_op)
17871 {
17872 case O_constant:
17873 if (pc_rel)
17874 {
17875 /* Create an absolute valued symbol, so we have something to
17876 refer to in the object file. Unfortunately for us, gas's
17877 generic expression parsing will already have folded out
17878 any use of .set foo/.type foo %function that may have
17879 been used to set type information of the target location,
17880 that's being specified symbolically. We have to presume
17881 the user knows what they are doing. */
17882 char name[16 + 8];
17883 symbolS *symbol;
17884
17885 sprintf (name, "*ABS*0x%lx", (unsigned long)exp->X_add_number);
17886
17887 symbol = symbol_find_or_make (name);
17888 S_SET_SEGMENT (symbol, absolute_section);
17889 symbol_set_frag (symbol, &zero_address_frag);
17890 S_SET_VALUE (symbol, exp->X_add_number);
17891 exp->X_op = O_symbol;
17892 exp->X_add_symbol = symbol;
17893 exp->X_add_number = 0;
17894 }
17895 /* FALLTHROUGH */
17896 case O_symbol:
17897 case O_add:
17898 case O_subtract:
17899 new_fix = fix_new_exp (frag, where, size, exp, pc_rel,
17900 (enum bfd_reloc_code_real) reloc);
17901 break;
17902
17903 default:
17904 new_fix = (fixS *) fix_new (frag, where, size, make_expr_symbol (exp), 0,
17905 pc_rel, (enum bfd_reloc_code_real) reloc);
17906 break;
17907 }
17908
17909 /* Mark whether the fix is to a THUMB instruction, or an ARM
17910 instruction. */
17911 new_fix->tc_fix_data = thumb_mode;
17912 }
17913
17914 /* Create a frg for an instruction requiring relaxation. */
17915 static void
17916 output_relax_insn (void)
17917 {
17918 char * to;
17919 symbolS *sym;
17920 int offset;
17921
17922 /* The size of the instruction is unknown, so tie the debug info to the
17923 start of the instruction. */
17924 dwarf2_emit_insn (0);
17925
17926 switch (inst.reloc.exp.X_op)
17927 {
17928 case O_symbol:
17929 sym = inst.reloc.exp.X_add_symbol;
17930 offset = inst.reloc.exp.X_add_number;
17931 break;
17932 case O_constant:
17933 sym = NULL;
17934 offset = inst.reloc.exp.X_add_number;
17935 break;
17936 default:
17937 sym = make_expr_symbol (&inst.reloc.exp);
17938 offset = 0;
17939 break;
17940 }
17941 to = frag_var (rs_machine_dependent, INSN_SIZE, THUMB_SIZE,
17942 inst.relax, sym, offset, NULL/*offset, opcode*/);
17943 md_number_to_chars (to, inst.instruction, THUMB_SIZE);
17944 }
17945
17946 /* Write a 32-bit thumb instruction to buf. */
17947 static void
17948 put_thumb32_insn (char * buf, unsigned long insn)
17949 {
17950 md_number_to_chars (buf, insn >> 16, THUMB_SIZE);
17951 md_number_to_chars (buf + THUMB_SIZE, insn, THUMB_SIZE);
17952 }
17953
17954 static void
17955 output_inst (const char * str)
17956 {
17957 char * to = NULL;
17958
17959 if (inst.error)
17960 {
17961 as_bad ("%s -- `%s'", inst.error, str);
17962 return;
17963 }
17964 if (inst.relax)
17965 {
17966 output_relax_insn ();
17967 return;
17968 }
17969 if (inst.size == 0)
17970 return;
17971
17972 to = frag_more (inst.size);
17973 /* PR 9814: Record the thumb mode into the current frag so that we know
17974 what type of NOP padding to use, if necessary. We override any previous
17975 setting so that if the mode has changed then the NOPS that we use will
17976 match the encoding of the last instruction in the frag. */
17977 frag_now->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
17978
17979 if (thumb_mode && (inst.size > THUMB_SIZE))
17980 {
17981 gas_assert (inst.size == (2 * THUMB_SIZE));
17982 put_thumb32_insn (to, inst.instruction);
17983 }
17984 else if (inst.size > INSN_SIZE)
17985 {
17986 gas_assert (inst.size == (2 * INSN_SIZE));
17987 md_number_to_chars (to, inst.instruction, INSN_SIZE);
17988 md_number_to_chars (to + INSN_SIZE, inst.instruction, INSN_SIZE);
17989 }
17990 else
17991 md_number_to_chars (to, inst.instruction, inst.size);
17992
17993 if (inst.reloc.type != BFD_RELOC_UNUSED)
17994 fix_new_arm (frag_now, to - frag_now->fr_literal,
17995 inst.size, & inst.reloc.exp, inst.reloc.pc_rel,
17996 inst.reloc.type);
17997
17998 dwarf2_emit_insn (inst.size);
17999 }
18000
18001 static char *
18002 output_it_inst (int cond, int mask, char * to)
18003 {
18004 unsigned long instruction = 0xbf00;
18005
18006 mask &= 0xf;
18007 instruction |= mask;
18008 instruction |= cond << 4;
18009
18010 if (to == NULL)
18011 {
18012 to = frag_more (2);
18013 #ifdef OBJ_ELF
18014 dwarf2_emit_insn (2);
18015 #endif
18016 }
18017
18018 md_number_to_chars (to, instruction, 2);
18019
18020 return to;
18021 }
18022
18023 /* Tag values used in struct asm_opcode's tag field. */
18024 enum opcode_tag
18025 {
18026 OT_unconditional, /* Instruction cannot be conditionalized.
18027 The ARM condition field is still 0xE. */
18028 OT_unconditionalF, /* Instruction cannot be conditionalized
18029 and carries 0xF in its ARM condition field. */
18030 OT_csuffix, /* Instruction takes a conditional suffix. */
18031 OT_csuffixF, /* Some forms of the instruction take a conditional
18032 suffix, others place 0xF where the condition field
18033 would be. */
18034 OT_cinfix3, /* Instruction takes a conditional infix,
18035 beginning at character index 3. (In
18036 unified mode, it becomes a suffix.) */
18037 OT_cinfix3_deprecated, /* The same as OT_cinfix3. This is used for
18038 tsts, cmps, cmns, and teqs. */
18039 OT_cinfix3_legacy, /* Legacy instruction takes a conditional infix at
18040 character index 3, even in unified mode. Used for
18041 legacy instructions where suffix and infix forms
18042 may be ambiguous. */
18043 OT_csuf_or_in3, /* Instruction takes either a conditional
18044 suffix or an infix at character index 3. */
18045 OT_odd_infix_unc, /* This is the unconditional variant of an
18046 instruction that takes a conditional infix
18047 at an unusual position. In unified mode,
18048 this variant will accept a suffix. */
18049 OT_odd_infix_0 /* Values greater than or equal to OT_odd_infix_0
18050 are the conditional variants of instructions that
18051 take conditional infixes in unusual positions.
18052 The infix appears at character index
18053 (tag - OT_odd_infix_0). These are not accepted
18054 in unified mode. */
18055 };
18056
18057 /* Subroutine of md_assemble, responsible for looking up the primary
18058 opcode from the mnemonic the user wrote. STR points to the
18059 beginning of the mnemonic.
18060
18061 This is not simply a hash table lookup, because of conditional
18062 variants. Most instructions have conditional variants, which are
18063 expressed with a _conditional affix_ to the mnemonic. If we were
18064 to encode each conditional variant as a literal string in the opcode
18065 table, it would have approximately 20,000 entries.
18066
18067 Most mnemonics take this affix as a suffix, and in unified syntax,
18068 'most' is upgraded to 'all'. However, in the divided syntax, some
18069 instructions take the affix as an infix, notably the s-variants of
18070 the arithmetic instructions. Of those instructions, all but six
18071 have the infix appear after the third character of the mnemonic.
18072
18073 Accordingly, the algorithm for looking up primary opcodes given
18074 an identifier is:
18075
18076 1. Look up the identifier in the opcode table.
18077 If we find a match, go to step U.
18078
18079 2. Look up the last two characters of the identifier in the
18080 conditions table. If we find a match, look up the first N-2
18081 characters of the identifier in the opcode table. If we
18082 find a match, go to step CE.
18083
18084 3. Look up the fourth and fifth characters of the identifier in
18085 the conditions table. If we find a match, extract those
18086 characters from the identifier, and look up the remaining
18087 characters in the opcode table. If we find a match, go
18088 to step CM.
18089
18090 4. Fail.
18091
18092 U. Examine the tag field of the opcode structure, in case this is
18093 one of the six instructions with its conditional infix in an
18094 unusual place. If it is, the tag tells us where to find the
18095 infix; look it up in the conditions table and set inst.cond
18096 accordingly. Otherwise, this is an unconditional instruction.
18097 Again set inst.cond accordingly. Return the opcode structure.
18098
18099 CE. Examine the tag field to make sure this is an instruction that
18100 should receive a conditional suffix. If it is not, fail.
18101 Otherwise, set inst.cond from the suffix we already looked up,
18102 and return the opcode structure.
18103
18104 CM. Examine the tag field to make sure this is an instruction that
18105 should receive a conditional infix after the third character.
18106 If it is not, fail. Otherwise, undo the edits to the current
18107 line of input and proceed as for case CE. */
18108
18109 static const struct asm_opcode *
18110 opcode_lookup (char **str)
18111 {
18112 char *end, *base;
18113 char *affix;
18114 const struct asm_opcode *opcode;
18115 const struct asm_cond *cond;
18116 char save[2];
18117
18118 /* Scan up to the end of the mnemonic, which must end in white space,
18119 '.' (in unified mode, or for Neon/VFP instructions), or end of string. */
18120 for (base = end = *str; *end != '\0'; end++)
18121 if (*end == ' ' || *end == '.')
18122 break;
18123
18124 if (end == base)
18125 return NULL;
18126
18127 /* Handle a possible width suffix and/or Neon type suffix. */
18128 if (end[0] == '.')
18129 {
18130 int offset = 2;
18131
18132 /* The .w and .n suffixes are only valid if the unified syntax is in
18133 use. */
18134 if (unified_syntax && end[1] == 'w')
18135 inst.size_req = 4;
18136 else if (unified_syntax && end[1] == 'n')
18137 inst.size_req = 2;
18138 else
18139 offset = 0;
18140
18141 inst.vectype.elems = 0;
18142
18143 *str = end + offset;
18144
18145 if (end[offset] == '.')
18146 {
18147 /* See if we have a Neon type suffix (possible in either unified or
18148 non-unified ARM syntax mode). */
18149 if (parse_neon_type (&inst.vectype, str) == FAIL)
18150 return NULL;
18151 }
18152 else if (end[offset] != '\0' && end[offset] != ' ')
18153 return NULL;
18154 }
18155 else
18156 *str = end;
18157
18158 /* Look for unaffixed or special-case affixed mnemonic. */
18159 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
18160 end - base);
18161 if (opcode)
18162 {
18163 /* step U */
18164 if (opcode->tag < OT_odd_infix_0)
18165 {
18166 inst.cond = COND_ALWAYS;
18167 return opcode;
18168 }
18169
18170 if (warn_on_deprecated && unified_syntax)
18171 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
18172 affix = base + (opcode->tag - OT_odd_infix_0);
18173 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
18174 gas_assert (cond);
18175
18176 inst.cond = cond->value;
18177 return opcode;
18178 }
18179
18180 /* Cannot have a conditional suffix on a mnemonic of less than two
18181 characters. */
18182 if (end - base < 3)
18183 return NULL;
18184
18185 /* Look for suffixed mnemonic. */
18186 affix = end - 2;
18187 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
18188 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
18189 affix - base);
18190 if (opcode && cond)
18191 {
18192 /* step CE */
18193 switch (opcode->tag)
18194 {
18195 case OT_cinfix3_legacy:
18196 /* Ignore conditional suffixes matched on infix only mnemonics. */
18197 break;
18198
18199 case OT_cinfix3:
18200 case OT_cinfix3_deprecated:
18201 case OT_odd_infix_unc:
18202 if (!unified_syntax)
18203 return NULL;
18204 /* Fall through. */
18205
18206 case OT_csuffix:
18207 case OT_csuffixF:
18208 case OT_csuf_or_in3:
18209 inst.cond = cond->value;
18210 return opcode;
18211
18212 case OT_unconditional:
18213 case OT_unconditionalF:
18214 if (thumb_mode)
18215 inst.cond = cond->value;
18216 else
18217 {
18218 /* Delayed diagnostic. */
18219 inst.error = BAD_COND;
18220 inst.cond = COND_ALWAYS;
18221 }
18222 return opcode;
18223
18224 default:
18225 return NULL;
18226 }
18227 }
18228
18229 /* Cannot have a usual-position infix on a mnemonic of less than
18230 six characters (five would be a suffix). */
18231 if (end - base < 6)
18232 return NULL;
18233
18234 /* Look for infixed mnemonic in the usual position. */
18235 affix = base + 3;
18236 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
18237 if (!cond)
18238 return NULL;
18239
18240 memcpy (save, affix, 2);
18241 memmove (affix, affix + 2, (end - affix) - 2);
18242 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
18243 (end - base) - 2);
18244 memmove (affix + 2, affix, (end - affix) - 2);
18245 memcpy (affix, save, 2);
18246
18247 if (opcode
18248 && (opcode->tag == OT_cinfix3
18249 || opcode->tag == OT_cinfix3_deprecated
18250 || opcode->tag == OT_csuf_or_in3
18251 || opcode->tag == OT_cinfix3_legacy))
18252 {
18253 /* Step CM. */
18254 if (warn_on_deprecated && unified_syntax
18255 && (opcode->tag == OT_cinfix3
18256 || opcode->tag == OT_cinfix3_deprecated))
18257 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
18258
18259 inst.cond = cond->value;
18260 return opcode;
18261 }
18262
18263 return NULL;
18264 }
18265
18266 /* This function generates an initial IT instruction, leaving its block
18267 virtually open for the new instructions. Eventually,
18268 the mask will be updated by now_it_add_mask () each time
18269 a new instruction needs to be included in the IT block.
18270 Finally, the block is closed with close_automatic_it_block ().
18271 The block closure can be requested either from md_assemble (),
18272 a tencode (), or due to a label hook. */
18273
18274 static void
18275 new_automatic_it_block (int cond)
18276 {
18277 now_it.state = AUTOMATIC_IT_BLOCK;
18278 now_it.mask = 0x18;
18279 now_it.cc = cond;
18280 now_it.block_length = 1;
18281 mapping_state (MAP_THUMB);
18282 now_it.insn = output_it_inst (cond, now_it.mask, NULL);
18283 now_it.warn_deprecated = FALSE;
18284 now_it.insn_cond = TRUE;
18285 }
18286
18287 /* Close an automatic IT block.
18288 See comments in new_automatic_it_block (). */
18289
18290 static void
18291 close_automatic_it_block (void)
18292 {
18293 now_it.mask = 0x10;
18294 now_it.block_length = 0;
18295 }
18296
18297 /* Update the mask of the current automatically-generated IT
18298 instruction. See comments in new_automatic_it_block (). */
18299
18300 static void
18301 now_it_add_mask (int cond)
18302 {
18303 #define CLEAR_BIT(value, nbit) ((value) & ~(1 << (nbit)))
18304 #define SET_BIT_VALUE(value, bitvalue, nbit) (CLEAR_BIT (value, nbit) \
18305 | ((bitvalue) << (nbit)))
18306 const int resulting_bit = (cond & 1);
18307
18308 now_it.mask &= 0xf;
18309 now_it.mask = SET_BIT_VALUE (now_it.mask,
18310 resulting_bit,
18311 (5 - now_it.block_length));
18312 now_it.mask = SET_BIT_VALUE (now_it.mask,
18313 1,
18314 ((5 - now_it.block_length) - 1) );
18315 output_it_inst (now_it.cc, now_it.mask, now_it.insn);
18316
18317 #undef CLEAR_BIT
18318 #undef SET_BIT_VALUE
18319 }
18320
18321 /* The IT blocks handling machinery is accessed through the these functions:
18322 it_fsm_pre_encode () from md_assemble ()
18323 set_it_insn_type () optional, from the tencode functions
18324 set_it_insn_type_last () ditto
18325 in_it_block () ditto
18326 it_fsm_post_encode () from md_assemble ()
18327 force_automatic_it_block_close () from label handling functions
18328
18329 Rationale:
18330 1) md_assemble () calls it_fsm_pre_encode () before calling tencode (),
18331 initializing the IT insn type with a generic initial value depending
18332 on the inst.condition.
18333 2) During the tencode function, two things may happen:
18334 a) The tencode function overrides the IT insn type by
18335 calling either set_it_insn_type (type) or set_it_insn_type_last ().
18336 b) The tencode function queries the IT block state by
18337 calling in_it_block () (i.e. to determine narrow/not narrow mode).
18338
18339 Both set_it_insn_type and in_it_block run the internal FSM state
18340 handling function (handle_it_state), because: a) setting the IT insn
18341 type may incur in an invalid state (exiting the function),
18342 and b) querying the state requires the FSM to be updated.
18343 Specifically we want to avoid creating an IT block for conditional
18344 branches, so it_fsm_pre_encode is actually a guess and we can't
18345 determine whether an IT block is required until the tencode () routine
18346 has decided what type of instruction this actually it.
18347 Because of this, if set_it_insn_type and in_it_block have to be used,
18348 set_it_insn_type has to be called first.
18349
18350 set_it_insn_type_last () is a wrapper of set_it_insn_type (type), that
18351 determines the insn IT type depending on the inst.cond code.
18352 When a tencode () routine encodes an instruction that can be
18353 either outside an IT block, or, in the case of being inside, has to be
18354 the last one, set_it_insn_type_last () will determine the proper
18355 IT instruction type based on the inst.cond code. Otherwise,
18356 set_it_insn_type can be called for overriding that logic or
18357 for covering other cases.
18358
18359 Calling handle_it_state () may not transition the IT block state to
18360 OUTSIDE_IT_BLOCK immediately, since the (current) state could be
18361 still queried. Instead, if the FSM determines that the state should
18362 be transitioned to OUTSIDE_IT_BLOCK, a flag is marked to be closed
18363 after the tencode () function: that's what it_fsm_post_encode () does.
18364
18365 Since in_it_block () calls the state handling function to get an
18366 updated state, an error may occur (due to invalid insns combination).
18367 In that case, inst.error is set.
18368 Therefore, inst.error has to be checked after the execution of
18369 the tencode () routine.
18370
18371 3) Back in md_assemble(), it_fsm_post_encode () is called to commit
18372 any pending state change (if any) that didn't take place in
18373 handle_it_state () as explained above. */
18374
18375 static void
18376 it_fsm_pre_encode (void)
18377 {
18378 if (inst.cond != COND_ALWAYS)
18379 inst.it_insn_type = INSIDE_IT_INSN;
18380 else
18381 inst.it_insn_type = OUTSIDE_IT_INSN;
18382
18383 now_it.state_handled = 0;
18384 }
18385
18386 /* IT state FSM handling function. */
18387
18388 static int
18389 handle_it_state (void)
18390 {
18391 now_it.state_handled = 1;
18392 now_it.insn_cond = FALSE;
18393
18394 switch (now_it.state)
18395 {
18396 case OUTSIDE_IT_BLOCK:
18397 switch (inst.it_insn_type)
18398 {
18399 case OUTSIDE_IT_INSN:
18400 break;
18401
18402 case INSIDE_IT_INSN:
18403 case INSIDE_IT_LAST_INSN:
18404 if (thumb_mode == 0)
18405 {
18406 if (unified_syntax
18407 && !(implicit_it_mode & IMPLICIT_IT_MODE_ARM))
18408 as_tsktsk (_("Warning: conditional outside an IT block"\
18409 " for Thumb."));
18410 }
18411 else
18412 {
18413 if ((implicit_it_mode & IMPLICIT_IT_MODE_THUMB)
18414 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2))
18415 {
18416 /* Automatically generate the IT instruction. */
18417 new_automatic_it_block (inst.cond);
18418 if (inst.it_insn_type == INSIDE_IT_LAST_INSN)
18419 close_automatic_it_block ();
18420 }
18421 else
18422 {
18423 inst.error = BAD_OUT_IT;
18424 return FAIL;
18425 }
18426 }
18427 break;
18428
18429 case IF_INSIDE_IT_LAST_INSN:
18430 case NEUTRAL_IT_INSN:
18431 break;
18432
18433 case IT_INSN:
18434 now_it.state = MANUAL_IT_BLOCK;
18435 now_it.block_length = 0;
18436 break;
18437 }
18438 break;
18439
18440 case AUTOMATIC_IT_BLOCK:
18441 /* Three things may happen now:
18442 a) We should increment current it block size;
18443 b) We should close current it block (closing insn or 4 insns);
18444 c) We should close current it block and start a new one (due
18445 to incompatible conditions or
18446 4 insns-length block reached). */
18447
18448 switch (inst.it_insn_type)
18449 {
18450 case OUTSIDE_IT_INSN:
18451 /* The closure of the block shall happen immediately,
18452 so any in_it_block () call reports the block as closed. */
18453 force_automatic_it_block_close ();
18454 break;
18455
18456 case INSIDE_IT_INSN:
18457 case INSIDE_IT_LAST_INSN:
18458 case IF_INSIDE_IT_LAST_INSN:
18459 now_it.block_length++;
18460
18461 if (now_it.block_length > 4
18462 || !now_it_compatible (inst.cond))
18463 {
18464 force_automatic_it_block_close ();
18465 if (inst.it_insn_type != IF_INSIDE_IT_LAST_INSN)
18466 new_automatic_it_block (inst.cond);
18467 }
18468 else
18469 {
18470 now_it.insn_cond = TRUE;
18471 now_it_add_mask (inst.cond);
18472 }
18473
18474 if (now_it.state == AUTOMATIC_IT_BLOCK
18475 && (inst.it_insn_type == INSIDE_IT_LAST_INSN
18476 || inst.it_insn_type == IF_INSIDE_IT_LAST_INSN))
18477 close_automatic_it_block ();
18478 break;
18479
18480 case NEUTRAL_IT_INSN:
18481 now_it.block_length++;
18482 now_it.insn_cond = TRUE;
18483
18484 if (now_it.block_length > 4)
18485 force_automatic_it_block_close ();
18486 else
18487 now_it_add_mask (now_it.cc & 1);
18488 break;
18489
18490 case IT_INSN:
18491 close_automatic_it_block ();
18492 now_it.state = MANUAL_IT_BLOCK;
18493 break;
18494 }
18495 break;
18496
18497 case MANUAL_IT_BLOCK:
18498 {
18499 /* Check conditional suffixes. */
18500 const int cond = now_it.cc ^ ((now_it.mask >> 4) & 1) ^ 1;
18501 int is_last;
18502 now_it.mask <<= 1;
18503 now_it.mask &= 0x1f;
18504 is_last = (now_it.mask == 0x10);
18505 now_it.insn_cond = TRUE;
18506
18507 switch (inst.it_insn_type)
18508 {
18509 case OUTSIDE_IT_INSN:
18510 inst.error = BAD_NOT_IT;
18511 return FAIL;
18512
18513 case INSIDE_IT_INSN:
18514 if (cond != inst.cond)
18515 {
18516 inst.error = BAD_IT_COND;
18517 return FAIL;
18518 }
18519 break;
18520
18521 case INSIDE_IT_LAST_INSN:
18522 case IF_INSIDE_IT_LAST_INSN:
18523 if (cond != inst.cond)
18524 {
18525 inst.error = BAD_IT_COND;
18526 return FAIL;
18527 }
18528 if (!is_last)
18529 {
18530 inst.error = BAD_BRANCH;
18531 return FAIL;
18532 }
18533 break;
18534
18535 case NEUTRAL_IT_INSN:
18536 /* The BKPT instruction is unconditional even in an IT block. */
18537 break;
18538
18539 case IT_INSN:
18540 inst.error = BAD_IT_IT;
18541 return FAIL;
18542 }
18543 }
18544 break;
18545 }
18546
18547 return SUCCESS;
18548 }
18549
18550 struct depr_insn_mask
18551 {
18552 unsigned long pattern;
18553 unsigned long mask;
18554 const char* description;
18555 };
18556
18557 /* List of 16-bit instruction patterns deprecated in an IT block in
18558 ARMv8. */
18559 static const struct depr_insn_mask depr_it_insns[] = {
18560 { 0xc000, 0xc000, N_("Short branches, Undefined, SVC, LDM/STM") },
18561 { 0xb000, 0xb000, N_("Miscellaneous 16-bit instructions") },
18562 { 0xa000, 0xb800, N_("ADR") },
18563 { 0x4800, 0xf800, N_("Literal loads") },
18564 { 0x4478, 0xf478, N_("Hi-register ADD, MOV, CMP, BX, BLX using pc") },
18565 { 0x4487, 0xfc87, N_("Hi-register ADD, MOV, CMP using pc") },
18566 /* NOTE: 0x00dd is not the real encoding, instead, it is the 'tvalue'
18567 field in asm_opcode. 'tvalue' is used at the stage this check happen. */
18568 { 0x00dd, 0x7fff, N_("ADD/SUB sp, sp #imm") },
18569 { 0, 0, NULL }
18570 };
18571
18572 static void
18573 it_fsm_post_encode (void)
18574 {
18575 int is_last;
18576
18577 if (!now_it.state_handled)
18578 handle_it_state ();
18579
18580 if (now_it.insn_cond
18581 && !now_it.warn_deprecated
18582 && warn_on_deprecated
18583 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8)
18584 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_m))
18585 {
18586 if (inst.instruction >= 0x10000)
18587 {
18588 as_tsktsk (_("IT blocks containing 32-bit Thumb instructions are "
18589 "performance deprecated in ARMv8-A and ARMv8-R"));
18590 now_it.warn_deprecated = TRUE;
18591 }
18592 else
18593 {
18594 const struct depr_insn_mask *p = depr_it_insns;
18595
18596 while (p->mask != 0)
18597 {
18598 if ((inst.instruction & p->mask) == p->pattern)
18599 {
18600 as_tsktsk (_("IT blocks containing 16-bit Thumb "
18601 "instructions of the following class are "
18602 "performance deprecated in ARMv8-A and "
18603 "ARMv8-R: %s"), p->description);
18604 now_it.warn_deprecated = TRUE;
18605 break;
18606 }
18607
18608 ++p;
18609 }
18610 }
18611
18612 if (now_it.block_length > 1)
18613 {
18614 as_tsktsk (_("IT blocks containing more than one conditional "
18615 "instruction are performance deprecated in ARMv8-A and "
18616 "ARMv8-R"));
18617 now_it.warn_deprecated = TRUE;
18618 }
18619 }
18620
18621 is_last = (now_it.mask == 0x10);
18622 if (is_last)
18623 {
18624 now_it.state = OUTSIDE_IT_BLOCK;
18625 now_it.mask = 0;
18626 }
18627 }
18628
18629 static void
18630 force_automatic_it_block_close (void)
18631 {
18632 if (now_it.state == AUTOMATIC_IT_BLOCK)
18633 {
18634 close_automatic_it_block ();
18635 now_it.state = OUTSIDE_IT_BLOCK;
18636 now_it.mask = 0;
18637 }
18638 }
18639
18640 static int
18641 in_it_block (void)
18642 {
18643 if (!now_it.state_handled)
18644 handle_it_state ();
18645
18646 return now_it.state != OUTSIDE_IT_BLOCK;
18647 }
18648
18649 /* Whether OPCODE only has T32 encoding. Since this function is only used by
18650 t32_insn_ok, OPCODE enabled by v6t2 extension bit do not need to be listed
18651 here, hence the "known" in the function name. */
18652
18653 static bfd_boolean
18654 known_t32_only_insn (const struct asm_opcode *opcode)
18655 {
18656 /* Original Thumb-1 wide instruction. */
18657 if (opcode->tencode == do_t_blx
18658 || opcode->tencode == do_t_branch23
18659 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_msr)
18660 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_barrier))
18661 return TRUE;
18662
18663 /* Wide-only instruction added to ARMv8-M Baseline. */
18664 if (ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_v8m_m_only)
18665 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_atomics)
18666 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_v6t2_v8m)
18667 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_div))
18668 return TRUE;
18669
18670 return FALSE;
18671 }
18672
18673 /* Whether wide instruction variant can be used if available for a valid OPCODE
18674 in ARCH. */
18675
18676 static bfd_boolean
18677 t32_insn_ok (arm_feature_set arch, const struct asm_opcode *opcode)
18678 {
18679 if (known_t32_only_insn (opcode))
18680 return TRUE;
18681
18682 /* Instruction with narrow and wide encoding added to ARMv8-M. Availability
18683 of variant T3 of B.W is checked in do_t_branch. */
18684 if (ARM_CPU_HAS_FEATURE (arch, arm_ext_v8m)
18685 && opcode->tencode == do_t_branch)
18686 return TRUE;
18687
18688 /* MOV accepts T1/T3 encodings under Baseline, T3 encoding is 32bit. */
18689 if (ARM_CPU_HAS_FEATURE (arch, arm_ext_v8m)
18690 && opcode->tencode == do_t_mov_cmp
18691 /* Make sure CMP instruction is not affected. */
18692 && opcode->aencode == do_mov)
18693 return TRUE;
18694
18695 /* Wide instruction variants of all instructions with narrow *and* wide
18696 variants become available with ARMv6t2. Other opcodes are either
18697 narrow-only or wide-only and are thus available if OPCODE is valid. */
18698 if (ARM_CPU_HAS_FEATURE (arch, arm_ext_v6t2))
18699 return TRUE;
18700
18701 /* OPCODE with narrow only instruction variant or wide variant not
18702 available. */
18703 return FALSE;
18704 }
18705
18706 void
18707 md_assemble (char *str)
18708 {
18709 char *p = str;
18710 const struct asm_opcode * opcode;
18711
18712 /* Align the previous label if needed. */
18713 if (last_label_seen != NULL)
18714 {
18715 symbol_set_frag (last_label_seen, frag_now);
18716 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
18717 S_SET_SEGMENT (last_label_seen, now_seg);
18718 }
18719
18720 memset (&inst, '\0', sizeof (inst));
18721 inst.reloc.type = BFD_RELOC_UNUSED;
18722
18723 opcode = opcode_lookup (&p);
18724 if (!opcode)
18725 {
18726 /* It wasn't an instruction, but it might be a register alias of
18727 the form alias .req reg, or a Neon .dn/.qn directive. */
18728 if (! create_register_alias (str, p)
18729 && ! create_neon_reg_alias (str, p))
18730 as_bad (_("bad instruction `%s'"), str);
18731
18732 return;
18733 }
18734
18735 if (warn_on_deprecated && opcode->tag == OT_cinfix3_deprecated)
18736 as_tsktsk (_("s suffix on comparison instruction is deprecated"));
18737
18738 /* The value which unconditional instructions should have in place of the
18739 condition field. */
18740 inst.uncond_value = (opcode->tag == OT_csuffixF) ? 0xf : -1;
18741
18742 if (thumb_mode)
18743 {
18744 arm_feature_set variant;
18745
18746 variant = cpu_variant;
18747 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
18748 if (!ARM_CPU_HAS_FEATURE (variant, arm_arch_t2))
18749 ARM_CLEAR_FEATURE (variant, variant, fpu_any_hard);
18750 /* Check that this instruction is supported for this CPU. */
18751 if (!opcode->tvariant
18752 || (thumb_mode == 1
18753 && !ARM_CPU_HAS_FEATURE (variant, *opcode->tvariant)))
18754 {
18755 if (opcode->tencode == do_t_swi)
18756 as_bad (_("SVC is not permitted on this architecture"));
18757 else
18758 as_bad (_("selected processor does not support `%s' in Thumb mode"), str);
18759 return;
18760 }
18761 if (inst.cond != COND_ALWAYS && !unified_syntax
18762 && opcode->tencode != do_t_branch)
18763 {
18764 as_bad (_("Thumb does not support conditional execution"));
18765 return;
18766 }
18767
18768 /* Two things are addressed here:
18769 1) Implicit require narrow instructions on Thumb-1.
18770 This avoids relaxation accidentally introducing Thumb-2
18771 instructions.
18772 2) Reject wide instructions in non Thumb-2 cores.
18773
18774 Only instructions with narrow and wide variants need to be handled
18775 but selecting all non wide-only instructions is easier. */
18776 if (!ARM_CPU_HAS_FEATURE (variant, arm_ext_v6t2)
18777 && !t32_insn_ok (variant, opcode))
18778 {
18779 if (inst.size_req == 0)
18780 inst.size_req = 2;
18781 else if (inst.size_req == 4)
18782 {
18783 if (ARM_CPU_HAS_FEATURE (variant, arm_ext_v8m))
18784 as_bad (_("selected processor does not support 32bit wide "
18785 "variant of instruction `%s'"), str);
18786 else
18787 as_bad (_("selected processor does not support `%s' in "
18788 "Thumb-2 mode"), str);
18789 return;
18790 }
18791 }
18792
18793 inst.instruction = opcode->tvalue;
18794
18795 if (!parse_operands (p, opcode->operands, /*thumb=*/TRUE))
18796 {
18797 /* Prepare the it_insn_type for those encodings that don't set
18798 it. */
18799 it_fsm_pre_encode ();
18800
18801 opcode->tencode ();
18802
18803 it_fsm_post_encode ();
18804 }
18805
18806 if (!(inst.error || inst.relax))
18807 {
18808 gas_assert (inst.instruction < 0xe800 || inst.instruction > 0xffff);
18809 inst.size = (inst.instruction > 0xffff ? 4 : 2);
18810 if (inst.size_req && inst.size_req != inst.size)
18811 {
18812 as_bad (_("cannot honor width suffix -- `%s'"), str);
18813 return;
18814 }
18815 }
18816
18817 /* Something has gone badly wrong if we try to relax a fixed size
18818 instruction. */
18819 gas_assert (inst.size_req == 0 || !inst.relax);
18820
18821 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
18822 *opcode->tvariant);
18823 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
18824 set those bits when Thumb-2 32-bit instructions are seen. The impact
18825 of relaxable instructions will be considered later after we finish all
18826 relaxation. */
18827 if (ARM_FEATURE_CORE_EQUAL (cpu_variant, arm_arch_any))
18828 variant = arm_arch_none;
18829 else
18830 variant = cpu_variant;
18831 if (inst.size == 4 && !t32_insn_ok (variant, opcode))
18832 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
18833 arm_ext_v6t2);
18834
18835 check_neon_suffixes;
18836
18837 if (!inst.error)
18838 {
18839 mapping_state (MAP_THUMB);
18840 }
18841 }
18842 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
18843 {
18844 bfd_boolean is_bx;
18845
18846 /* bx is allowed on v5 cores, and sometimes on v4 cores. */
18847 is_bx = (opcode->aencode == do_bx);
18848
18849 /* Check that this instruction is supported for this CPU. */
18850 if (!(is_bx && fix_v4bx)
18851 && !(opcode->avariant &&
18852 ARM_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant)))
18853 {
18854 as_bad (_("selected processor does not support `%s' in ARM mode"), str);
18855 return;
18856 }
18857 if (inst.size_req)
18858 {
18859 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str);
18860 return;
18861 }
18862
18863 inst.instruction = opcode->avalue;
18864 if (opcode->tag == OT_unconditionalF)
18865 inst.instruction |= 0xFU << 28;
18866 else
18867 inst.instruction |= inst.cond << 28;
18868 inst.size = INSN_SIZE;
18869 if (!parse_operands (p, opcode->operands, /*thumb=*/FALSE))
18870 {
18871 it_fsm_pre_encode ();
18872 opcode->aencode ();
18873 it_fsm_post_encode ();
18874 }
18875 /* Arm mode bx is marked as both v4T and v5 because it's still required
18876 on a hypothetical non-thumb v5 core. */
18877 if (is_bx)
18878 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, arm_ext_v4t);
18879 else
18880 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
18881 *opcode->avariant);
18882
18883 check_neon_suffixes;
18884
18885 if (!inst.error)
18886 {
18887 mapping_state (MAP_ARM);
18888 }
18889 }
18890 else
18891 {
18892 as_bad (_("attempt to use an ARM instruction on a Thumb-only processor "
18893 "-- `%s'"), str);
18894 return;
18895 }
18896 output_inst (str);
18897 }
18898
18899 static void
18900 check_it_blocks_finished (void)
18901 {
18902 #ifdef OBJ_ELF
18903 asection *sect;
18904
18905 for (sect = stdoutput->sections; sect != NULL; sect = sect->next)
18906 if (seg_info (sect)->tc_segment_info_data.current_it.state
18907 == MANUAL_IT_BLOCK)
18908 {
18909 as_warn (_("section '%s' finished with an open IT block."),
18910 sect->name);
18911 }
18912 #else
18913 if (now_it.state == MANUAL_IT_BLOCK)
18914 as_warn (_("file finished with an open IT block."));
18915 #endif
18916 }
18917
18918 /* Various frobbings of labels and their addresses. */
18919
18920 void
18921 arm_start_line_hook (void)
18922 {
18923 last_label_seen = NULL;
18924 }
18925
18926 void
18927 arm_frob_label (symbolS * sym)
18928 {
18929 last_label_seen = sym;
18930
18931 ARM_SET_THUMB (sym, thumb_mode);
18932
18933 #if defined OBJ_COFF || defined OBJ_ELF
18934 ARM_SET_INTERWORK (sym, support_interwork);
18935 #endif
18936
18937 force_automatic_it_block_close ();
18938
18939 /* Note - do not allow local symbols (.Lxxx) to be labelled
18940 as Thumb functions. This is because these labels, whilst
18941 they exist inside Thumb code, are not the entry points for
18942 possible ARM->Thumb calls. Also, these labels can be used
18943 as part of a computed goto or switch statement. eg gcc
18944 can generate code that looks like this:
18945
18946 ldr r2, [pc, .Laaa]
18947 lsl r3, r3, #2
18948 ldr r2, [r3, r2]
18949 mov pc, r2
18950
18951 .Lbbb: .word .Lxxx
18952 .Lccc: .word .Lyyy
18953 ..etc...
18954 .Laaa: .word Lbbb
18955
18956 The first instruction loads the address of the jump table.
18957 The second instruction converts a table index into a byte offset.
18958 The third instruction gets the jump address out of the table.
18959 The fourth instruction performs the jump.
18960
18961 If the address stored at .Laaa is that of a symbol which has the
18962 Thumb_Func bit set, then the linker will arrange for this address
18963 to have the bottom bit set, which in turn would mean that the
18964 address computation performed by the third instruction would end
18965 up with the bottom bit set. Since the ARM is capable of unaligned
18966 word loads, the instruction would then load the incorrect address
18967 out of the jump table, and chaos would ensue. */
18968 if (label_is_thumb_function_name
18969 && (S_GET_NAME (sym)[0] != '.' || S_GET_NAME (sym)[1] != 'L')
18970 && (bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE) != 0)
18971 {
18972 /* When the address of a Thumb function is taken the bottom
18973 bit of that address should be set. This will allow
18974 interworking between Arm and Thumb functions to work
18975 correctly. */
18976
18977 THUMB_SET_FUNC (sym, 1);
18978
18979 label_is_thumb_function_name = FALSE;
18980 }
18981
18982 dwarf2_emit_label (sym);
18983 }
18984
18985 bfd_boolean
18986 arm_data_in_code (void)
18987 {
18988 if (thumb_mode && ! strncmp (input_line_pointer + 1, "data:", 5))
18989 {
18990 *input_line_pointer = '/';
18991 input_line_pointer += 5;
18992 *input_line_pointer = 0;
18993 return TRUE;
18994 }
18995
18996 return FALSE;
18997 }
18998
18999 char *
19000 arm_canonicalize_symbol_name (char * name)
19001 {
19002 int len;
19003
19004 if (thumb_mode && (len = strlen (name)) > 5
19005 && streq (name + len - 5, "/data"))
19006 *(name + len - 5) = 0;
19007
19008 return name;
19009 }
19010 \f
19011 /* Table of all register names defined by default. The user can
19012 define additional names with .req. Note that all register names
19013 should appear in both upper and lowercase variants. Some registers
19014 also have mixed-case names. */
19015
19016 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
19017 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
19018 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
19019 #define REGSET(p,t) \
19020 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
19021 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
19022 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
19023 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
19024 #define REGSETH(p,t) \
19025 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
19026 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
19027 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
19028 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
19029 #define REGSET2(p,t) \
19030 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
19031 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
19032 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
19033 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
19034 #define SPLRBANK(base,bank,t) \
19035 REGDEF(lr_##bank, 768|((base+0)<<16), t), \
19036 REGDEF(sp_##bank, 768|((base+1)<<16), t), \
19037 REGDEF(spsr_##bank, 768|(base<<16)|SPSR_BIT, t), \
19038 REGDEF(LR_##bank, 768|((base+0)<<16), t), \
19039 REGDEF(SP_##bank, 768|((base+1)<<16), t), \
19040 REGDEF(SPSR_##bank, 768|(base<<16)|SPSR_BIT, t)
19041
19042 static const struct reg_entry reg_names[] =
19043 {
19044 /* ARM integer registers. */
19045 REGSET(r, RN), REGSET(R, RN),
19046
19047 /* ATPCS synonyms. */
19048 REGDEF(a1,0,RN), REGDEF(a2,1,RN), REGDEF(a3, 2,RN), REGDEF(a4, 3,RN),
19049 REGDEF(v1,4,RN), REGDEF(v2,5,RN), REGDEF(v3, 6,RN), REGDEF(v4, 7,RN),
19050 REGDEF(v5,8,RN), REGDEF(v6,9,RN), REGDEF(v7,10,RN), REGDEF(v8,11,RN),
19051
19052 REGDEF(A1,0,RN), REGDEF(A2,1,RN), REGDEF(A3, 2,RN), REGDEF(A4, 3,RN),
19053 REGDEF(V1,4,RN), REGDEF(V2,5,RN), REGDEF(V3, 6,RN), REGDEF(V4, 7,RN),
19054 REGDEF(V5,8,RN), REGDEF(V6,9,RN), REGDEF(V7,10,RN), REGDEF(V8,11,RN),
19055
19056 /* Well-known aliases. */
19057 REGDEF(wr, 7,RN), REGDEF(sb, 9,RN), REGDEF(sl,10,RN), REGDEF(fp,11,RN),
19058 REGDEF(ip,12,RN), REGDEF(sp,13,RN), REGDEF(lr,14,RN), REGDEF(pc,15,RN),
19059
19060 REGDEF(WR, 7,RN), REGDEF(SB, 9,RN), REGDEF(SL,10,RN), REGDEF(FP,11,RN),
19061 REGDEF(IP,12,RN), REGDEF(SP,13,RN), REGDEF(LR,14,RN), REGDEF(PC,15,RN),
19062
19063 /* Coprocessor numbers. */
19064 REGSET(p, CP), REGSET(P, CP),
19065
19066 /* Coprocessor register numbers. The "cr" variants are for backward
19067 compatibility. */
19068 REGSET(c, CN), REGSET(C, CN),
19069 REGSET(cr, CN), REGSET(CR, CN),
19070
19071 /* ARM banked registers. */
19072 REGDEF(R8_usr,512|(0<<16),RNB), REGDEF(r8_usr,512|(0<<16),RNB),
19073 REGDEF(R9_usr,512|(1<<16),RNB), REGDEF(r9_usr,512|(1<<16),RNB),
19074 REGDEF(R10_usr,512|(2<<16),RNB), REGDEF(r10_usr,512|(2<<16),RNB),
19075 REGDEF(R11_usr,512|(3<<16),RNB), REGDEF(r11_usr,512|(3<<16),RNB),
19076 REGDEF(R12_usr,512|(4<<16),RNB), REGDEF(r12_usr,512|(4<<16),RNB),
19077 REGDEF(SP_usr,512|(5<<16),RNB), REGDEF(sp_usr,512|(5<<16),RNB),
19078 REGDEF(LR_usr,512|(6<<16),RNB), REGDEF(lr_usr,512|(6<<16),RNB),
19079
19080 REGDEF(R8_fiq,512|(8<<16),RNB), REGDEF(r8_fiq,512|(8<<16),RNB),
19081 REGDEF(R9_fiq,512|(9<<16),RNB), REGDEF(r9_fiq,512|(9<<16),RNB),
19082 REGDEF(R10_fiq,512|(10<<16),RNB), REGDEF(r10_fiq,512|(10<<16),RNB),
19083 REGDEF(R11_fiq,512|(11<<16),RNB), REGDEF(r11_fiq,512|(11<<16),RNB),
19084 REGDEF(R12_fiq,512|(12<<16),RNB), REGDEF(r12_fiq,512|(12<<16),RNB),
19085 REGDEF(SP_fiq,512|(13<<16),RNB), REGDEF(sp_fiq,512|(13<<16),RNB),
19086 REGDEF(LR_fiq,512|(14<<16),RNB), REGDEF(lr_fiq,512|(14<<16),RNB),
19087 REGDEF(SPSR_fiq,512|(14<<16)|SPSR_BIT,RNB), REGDEF(spsr_fiq,512|(14<<16)|SPSR_BIT,RNB),
19088
19089 SPLRBANK(0,IRQ,RNB), SPLRBANK(0,irq,RNB),
19090 SPLRBANK(2,SVC,RNB), SPLRBANK(2,svc,RNB),
19091 SPLRBANK(4,ABT,RNB), SPLRBANK(4,abt,RNB),
19092 SPLRBANK(6,UND,RNB), SPLRBANK(6,und,RNB),
19093 SPLRBANK(12,MON,RNB), SPLRBANK(12,mon,RNB),
19094 REGDEF(elr_hyp,768|(14<<16),RNB), REGDEF(ELR_hyp,768|(14<<16),RNB),
19095 REGDEF(sp_hyp,768|(15<<16),RNB), REGDEF(SP_hyp,768|(15<<16),RNB),
19096 REGDEF(spsr_hyp,768|(14<<16)|SPSR_BIT,RNB),
19097 REGDEF(SPSR_hyp,768|(14<<16)|SPSR_BIT,RNB),
19098
19099 /* FPA registers. */
19100 REGNUM(f,0,FN), REGNUM(f,1,FN), REGNUM(f,2,FN), REGNUM(f,3,FN),
19101 REGNUM(f,4,FN), REGNUM(f,5,FN), REGNUM(f,6,FN), REGNUM(f,7, FN),
19102
19103 REGNUM(F,0,FN), REGNUM(F,1,FN), REGNUM(F,2,FN), REGNUM(F,3,FN),
19104 REGNUM(F,4,FN), REGNUM(F,5,FN), REGNUM(F,6,FN), REGNUM(F,7, FN),
19105
19106 /* VFP SP registers. */
19107 REGSET(s,VFS), REGSET(S,VFS),
19108 REGSETH(s,VFS), REGSETH(S,VFS),
19109
19110 /* VFP DP Registers. */
19111 REGSET(d,VFD), REGSET(D,VFD),
19112 /* Extra Neon DP registers. */
19113 REGSETH(d,VFD), REGSETH(D,VFD),
19114
19115 /* Neon QP registers. */
19116 REGSET2(q,NQ), REGSET2(Q,NQ),
19117
19118 /* VFP control registers. */
19119 REGDEF(fpsid,0,VFC), REGDEF(fpscr,1,VFC), REGDEF(fpexc,8,VFC),
19120 REGDEF(FPSID,0,VFC), REGDEF(FPSCR,1,VFC), REGDEF(FPEXC,8,VFC),
19121 REGDEF(fpinst,9,VFC), REGDEF(fpinst2,10,VFC),
19122 REGDEF(FPINST,9,VFC), REGDEF(FPINST2,10,VFC),
19123 REGDEF(mvfr0,7,VFC), REGDEF(mvfr1,6,VFC),
19124 REGDEF(MVFR0,7,VFC), REGDEF(MVFR1,6,VFC),
19125 REGDEF(mvfr2,5,VFC), REGDEF(MVFR2,5,VFC),
19126
19127 /* Maverick DSP coprocessor registers. */
19128 REGSET(mvf,MVF), REGSET(mvd,MVD), REGSET(mvfx,MVFX), REGSET(mvdx,MVDX),
19129 REGSET(MVF,MVF), REGSET(MVD,MVD), REGSET(MVFX,MVFX), REGSET(MVDX,MVDX),
19130
19131 REGNUM(mvax,0,MVAX), REGNUM(mvax,1,MVAX),
19132 REGNUM(mvax,2,MVAX), REGNUM(mvax,3,MVAX),
19133 REGDEF(dspsc,0,DSPSC),
19134
19135 REGNUM(MVAX,0,MVAX), REGNUM(MVAX,1,MVAX),
19136 REGNUM(MVAX,2,MVAX), REGNUM(MVAX,3,MVAX),
19137 REGDEF(DSPSC,0,DSPSC),
19138
19139 /* iWMMXt data registers - p0, c0-15. */
19140 REGSET(wr,MMXWR), REGSET(wR,MMXWR), REGSET(WR, MMXWR),
19141
19142 /* iWMMXt control registers - p1, c0-3. */
19143 REGDEF(wcid, 0,MMXWC), REGDEF(wCID, 0,MMXWC), REGDEF(WCID, 0,MMXWC),
19144 REGDEF(wcon, 1,MMXWC), REGDEF(wCon, 1,MMXWC), REGDEF(WCON, 1,MMXWC),
19145 REGDEF(wcssf, 2,MMXWC), REGDEF(wCSSF, 2,MMXWC), REGDEF(WCSSF, 2,MMXWC),
19146 REGDEF(wcasf, 3,MMXWC), REGDEF(wCASF, 3,MMXWC), REGDEF(WCASF, 3,MMXWC),
19147
19148 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
19149 REGDEF(wcgr0, 8,MMXWCG), REGDEF(wCGR0, 8,MMXWCG), REGDEF(WCGR0, 8,MMXWCG),
19150 REGDEF(wcgr1, 9,MMXWCG), REGDEF(wCGR1, 9,MMXWCG), REGDEF(WCGR1, 9,MMXWCG),
19151 REGDEF(wcgr2,10,MMXWCG), REGDEF(wCGR2,10,MMXWCG), REGDEF(WCGR2,10,MMXWCG),
19152 REGDEF(wcgr3,11,MMXWCG), REGDEF(wCGR3,11,MMXWCG), REGDEF(WCGR3,11,MMXWCG),
19153
19154 /* XScale accumulator registers. */
19155 REGNUM(acc,0,XSCALE), REGNUM(ACC,0,XSCALE),
19156 };
19157 #undef REGDEF
19158 #undef REGNUM
19159 #undef REGSET
19160
19161 /* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
19162 within psr_required_here. */
19163 static const struct asm_psr psrs[] =
19164 {
19165 /* Backward compatibility notation. Note that "all" is no longer
19166 truly all possible PSR bits. */
19167 {"all", PSR_c | PSR_f},
19168 {"flg", PSR_f},
19169 {"ctl", PSR_c},
19170
19171 /* Individual flags. */
19172 {"f", PSR_f},
19173 {"c", PSR_c},
19174 {"x", PSR_x},
19175 {"s", PSR_s},
19176
19177 /* Combinations of flags. */
19178 {"fs", PSR_f | PSR_s},
19179 {"fx", PSR_f | PSR_x},
19180 {"fc", PSR_f | PSR_c},
19181 {"sf", PSR_s | PSR_f},
19182 {"sx", PSR_s | PSR_x},
19183 {"sc", PSR_s | PSR_c},
19184 {"xf", PSR_x | PSR_f},
19185 {"xs", PSR_x | PSR_s},
19186 {"xc", PSR_x | PSR_c},
19187 {"cf", PSR_c | PSR_f},
19188 {"cs", PSR_c | PSR_s},
19189 {"cx", PSR_c | PSR_x},
19190 {"fsx", PSR_f | PSR_s | PSR_x},
19191 {"fsc", PSR_f | PSR_s | PSR_c},
19192 {"fxs", PSR_f | PSR_x | PSR_s},
19193 {"fxc", PSR_f | PSR_x | PSR_c},
19194 {"fcs", PSR_f | PSR_c | PSR_s},
19195 {"fcx", PSR_f | PSR_c | PSR_x},
19196 {"sfx", PSR_s | PSR_f | PSR_x},
19197 {"sfc", PSR_s | PSR_f | PSR_c},
19198 {"sxf", PSR_s | PSR_x | PSR_f},
19199 {"sxc", PSR_s | PSR_x | PSR_c},
19200 {"scf", PSR_s | PSR_c | PSR_f},
19201 {"scx", PSR_s | PSR_c | PSR_x},
19202 {"xfs", PSR_x | PSR_f | PSR_s},
19203 {"xfc", PSR_x | PSR_f | PSR_c},
19204 {"xsf", PSR_x | PSR_s | PSR_f},
19205 {"xsc", PSR_x | PSR_s | PSR_c},
19206 {"xcf", PSR_x | PSR_c | PSR_f},
19207 {"xcs", PSR_x | PSR_c | PSR_s},
19208 {"cfs", PSR_c | PSR_f | PSR_s},
19209 {"cfx", PSR_c | PSR_f | PSR_x},
19210 {"csf", PSR_c | PSR_s | PSR_f},
19211 {"csx", PSR_c | PSR_s | PSR_x},
19212 {"cxf", PSR_c | PSR_x | PSR_f},
19213 {"cxs", PSR_c | PSR_x | PSR_s},
19214 {"fsxc", PSR_f | PSR_s | PSR_x | PSR_c},
19215 {"fscx", PSR_f | PSR_s | PSR_c | PSR_x},
19216 {"fxsc", PSR_f | PSR_x | PSR_s | PSR_c},
19217 {"fxcs", PSR_f | PSR_x | PSR_c | PSR_s},
19218 {"fcsx", PSR_f | PSR_c | PSR_s | PSR_x},
19219 {"fcxs", PSR_f | PSR_c | PSR_x | PSR_s},
19220 {"sfxc", PSR_s | PSR_f | PSR_x | PSR_c},
19221 {"sfcx", PSR_s | PSR_f | PSR_c | PSR_x},
19222 {"sxfc", PSR_s | PSR_x | PSR_f | PSR_c},
19223 {"sxcf", PSR_s | PSR_x | PSR_c | PSR_f},
19224 {"scfx", PSR_s | PSR_c | PSR_f | PSR_x},
19225 {"scxf", PSR_s | PSR_c | PSR_x | PSR_f},
19226 {"xfsc", PSR_x | PSR_f | PSR_s | PSR_c},
19227 {"xfcs", PSR_x | PSR_f | PSR_c | PSR_s},
19228 {"xsfc", PSR_x | PSR_s | PSR_f | PSR_c},
19229 {"xscf", PSR_x | PSR_s | PSR_c | PSR_f},
19230 {"xcfs", PSR_x | PSR_c | PSR_f | PSR_s},
19231 {"xcsf", PSR_x | PSR_c | PSR_s | PSR_f},
19232 {"cfsx", PSR_c | PSR_f | PSR_s | PSR_x},
19233 {"cfxs", PSR_c | PSR_f | PSR_x | PSR_s},
19234 {"csfx", PSR_c | PSR_s | PSR_f | PSR_x},
19235 {"csxf", PSR_c | PSR_s | PSR_x | PSR_f},
19236 {"cxfs", PSR_c | PSR_x | PSR_f | PSR_s},
19237 {"cxsf", PSR_c | PSR_x | PSR_s | PSR_f},
19238 };
19239
19240 /* Table of V7M psr names. */
19241 static const struct asm_psr v7m_psrs[] =
19242 {
19243 {"apsr", 0x0 }, {"APSR", 0x0 },
19244 {"iapsr", 0x1 }, {"IAPSR", 0x1 },
19245 {"eapsr", 0x2 }, {"EAPSR", 0x2 },
19246 {"psr", 0x3 }, {"PSR", 0x3 },
19247 {"xpsr", 0x3 }, {"XPSR", 0x3 }, {"xPSR", 3 },
19248 {"ipsr", 0x5 }, {"IPSR", 0x5 },
19249 {"epsr", 0x6 }, {"EPSR", 0x6 },
19250 {"iepsr", 0x7 }, {"IEPSR", 0x7 },
19251 {"msp", 0x8 }, {"MSP", 0x8 },
19252 {"psp", 0x9 }, {"PSP", 0x9 },
19253 {"msplim", 0xa }, {"MSPLIM", 0xa },
19254 {"psplim", 0xb }, {"PSPLIM", 0xb },
19255 {"primask", 0x10}, {"PRIMASK", 0x10},
19256 {"basepri", 0x11}, {"BASEPRI", 0x11},
19257 {"basepri_max", 0x12}, {"BASEPRI_MAX", 0x12},
19258 {"faultmask", 0x13}, {"FAULTMASK", 0x13},
19259 {"control", 0x14}, {"CONTROL", 0x14},
19260 {"msp_ns", 0x88}, {"MSP_NS", 0x88},
19261 {"psp_ns", 0x89}, {"PSP_NS", 0x89},
19262 {"msplim_ns", 0x8a}, {"MSPLIM_NS", 0x8a},
19263 {"psplim_ns", 0x8b}, {"PSPLIM_NS", 0x8b},
19264 {"primask_ns", 0x90}, {"PRIMASK_NS", 0x90},
19265 {"basepri_ns", 0x91}, {"BASEPRI_NS", 0x91},
19266 {"faultmask_ns", 0x93}, {"FAULTMASK_NS", 0x93},
19267 {"control_ns", 0x94}, {"CONTROL_NS", 0x94},
19268 {"sp_ns", 0x98}, {"SP_NS", 0x98 }
19269 };
19270
19271 /* Table of all shift-in-operand names. */
19272 static const struct asm_shift_name shift_names [] =
19273 {
19274 { "asl", SHIFT_LSL }, { "ASL", SHIFT_LSL },
19275 { "lsl", SHIFT_LSL }, { "LSL", SHIFT_LSL },
19276 { "lsr", SHIFT_LSR }, { "LSR", SHIFT_LSR },
19277 { "asr", SHIFT_ASR }, { "ASR", SHIFT_ASR },
19278 { "ror", SHIFT_ROR }, { "ROR", SHIFT_ROR },
19279 { "rrx", SHIFT_RRX }, { "RRX", SHIFT_RRX }
19280 };
19281
19282 /* Table of all explicit relocation names. */
19283 #ifdef OBJ_ELF
19284 static struct reloc_entry reloc_names[] =
19285 {
19286 { "got", BFD_RELOC_ARM_GOT32 }, { "GOT", BFD_RELOC_ARM_GOT32 },
19287 { "gotoff", BFD_RELOC_ARM_GOTOFF }, { "GOTOFF", BFD_RELOC_ARM_GOTOFF },
19288 { "plt", BFD_RELOC_ARM_PLT32 }, { "PLT", BFD_RELOC_ARM_PLT32 },
19289 { "target1", BFD_RELOC_ARM_TARGET1 }, { "TARGET1", BFD_RELOC_ARM_TARGET1 },
19290 { "target2", BFD_RELOC_ARM_TARGET2 }, { "TARGET2", BFD_RELOC_ARM_TARGET2 },
19291 { "sbrel", BFD_RELOC_ARM_SBREL32 }, { "SBREL", BFD_RELOC_ARM_SBREL32 },
19292 { "tlsgd", BFD_RELOC_ARM_TLS_GD32}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32},
19293 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32},
19294 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32},
19295 { "gottpoff",BFD_RELOC_ARM_TLS_IE32}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32},
19296 { "tpoff", BFD_RELOC_ARM_TLS_LE32}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32},
19297 { "got_prel", BFD_RELOC_ARM_GOT_PREL}, { "GOT_PREL", BFD_RELOC_ARM_GOT_PREL},
19298 { "tlsdesc", BFD_RELOC_ARM_TLS_GOTDESC},
19299 { "TLSDESC", BFD_RELOC_ARM_TLS_GOTDESC},
19300 { "tlscall", BFD_RELOC_ARM_TLS_CALL},
19301 { "TLSCALL", BFD_RELOC_ARM_TLS_CALL},
19302 { "tlsdescseq", BFD_RELOC_ARM_TLS_DESCSEQ},
19303 { "TLSDESCSEQ", BFD_RELOC_ARM_TLS_DESCSEQ}
19304 };
19305 #endif
19306
19307 /* Table of all conditional affixes. 0xF is not defined as a condition code. */
19308 static const struct asm_cond conds[] =
19309 {
19310 {"eq", 0x0},
19311 {"ne", 0x1},
19312 {"cs", 0x2}, {"hs", 0x2},
19313 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
19314 {"mi", 0x4},
19315 {"pl", 0x5},
19316 {"vs", 0x6},
19317 {"vc", 0x7},
19318 {"hi", 0x8},
19319 {"ls", 0x9},
19320 {"ge", 0xa},
19321 {"lt", 0xb},
19322 {"gt", 0xc},
19323 {"le", 0xd},
19324 {"al", 0xe}
19325 };
19326
19327 #define UL_BARRIER(L,U,CODE,FEAT) \
19328 { L, CODE, ARM_FEATURE_CORE_LOW (FEAT) }, \
19329 { U, CODE, ARM_FEATURE_CORE_LOW (FEAT) }
19330
19331 static struct asm_barrier_opt barrier_opt_names[] =
19332 {
19333 UL_BARRIER ("sy", "SY", 0xf, ARM_EXT_BARRIER),
19334 UL_BARRIER ("st", "ST", 0xe, ARM_EXT_BARRIER),
19335 UL_BARRIER ("ld", "LD", 0xd, ARM_EXT_V8),
19336 UL_BARRIER ("ish", "ISH", 0xb, ARM_EXT_BARRIER),
19337 UL_BARRIER ("sh", "SH", 0xb, ARM_EXT_BARRIER),
19338 UL_BARRIER ("ishst", "ISHST", 0xa, ARM_EXT_BARRIER),
19339 UL_BARRIER ("shst", "SHST", 0xa, ARM_EXT_BARRIER),
19340 UL_BARRIER ("ishld", "ISHLD", 0x9, ARM_EXT_V8),
19341 UL_BARRIER ("un", "UN", 0x7, ARM_EXT_BARRIER),
19342 UL_BARRIER ("nsh", "NSH", 0x7, ARM_EXT_BARRIER),
19343 UL_BARRIER ("unst", "UNST", 0x6, ARM_EXT_BARRIER),
19344 UL_BARRIER ("nshst", "NSHST", 0x6, ARM_EXT_BARRIER),
19345 UL_BARRIER ("nshld", "NSHLD", 0x5, ARM_EXT_V8),
19346 UL_BARRIER ("osh", "OSH", 0x3, ARM_EXT_BARRIER),
19347 UL_BARRIER ("oshst", "OSHST", 0x2, ARM_EXT_BARRIER),
19348 UL_BARRIER ("oshld", "OSHLD", 0x1, ARM_EXT_V8)
19349 };
19350
19351 #undef UL_BARRIER
19352
19353 /* Table of ARM-format instructions. */
19354
19355 /* Macros for gluing together operand strings. N.B. In all cases
19356 other than OPS0, the trailing OP_stop comes from default
19357 zero-initialization of the unspecified elements of the array. */
19358 #define OPS0() { OP_stop, }
19359 #define OPS1(a) { OP_##a, }
19360 #define OPS2(a,b) { OP_##a,OP_##b, }
19361 #define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
19362 #define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
19363 #define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
19364 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
19365
19366 /* These macros are similar to the OPSn, but do not prepend the OP_ prefix.
19367 This is useful when mixing operands for ARM and THUMB, i.e. using the
19368 MIX_ARM_THUMB_OPERANDS macro.
19369 In order to use these macros, prefix the number of operands with _
19370 e.g. _3. */
19371 #define OPS_1(a) { a, }
19372 #define OPS_2(a,b) { a,b, }
19373 #define OPS_3(a,b,c) { a,b,c, }
19374 #define OPS_4(a,b,c,d) { a,b,c,d, }
19375 #define OPS_5(a,b,c,d,e) { a,b,c,d,e, }
19376 #define OPS_6(a,b,c,d,e,f) { a,b,c,d,e,f, }
19377
19378 /* These macros abstract out the exact format of the mnemonic table and
19379 save some repeated characters. */
19380
19381 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
19382 #define TxCE(mnem, op, top, nops, ops, ae, te) \
19383 { mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
19384 THUMB_VARIANT, do_##ae, do_##te }
19385
19386 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
19387 a T_MNEM_xyz enumerator. */
19388 #define TCE(mnem, aop, top, nops, ops, ae, te) \
19389 TxCE (mnem, aop, 0x##top, nops, ops, ae, te)
19390 #define tCE(mnem, aop, top, nops, ops, ae, te) \
19391 TxCE (mnem, aop, T_MNEM##top, nops, ops, ae, te)
19392
19393 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
19394 infix after the third character. */
19395 #define TxC3(mnem, op, top, nops, ops, ae, te) \
19396 { mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
19397 THUMB_VARIANT, do_##ae, do_##te }
19398 #define TxC3w(mnem, op, top, nops, ops, ae, te) \
19399 { mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
19400 THUMB_VARIANT, do_##ae, do_##te }
19401 #define TC3(mnem, aop, top, nops, ops, ae, te) \
19402 TxC3 (mnem, aop, 0x##top, nops, ops, ae, te)
19403 #define TC3w(mnem, aop, top, nops, ops, ae, te) \
19404 TxC3w (mnem, aop, 0x##top, nops, ops, ae, te)
19405 #define tC3(mnem, aop, top, nops, ops, ae, te) \
19406 TxC3 (mnem, aop, T_MNEM##top, nops, ops, ae, te)
19407 #define tC3w(mnem, aop, top, nops, ops, ae, te) \
19408 TxC3w (mnem, aop, T_MNEM##top, nops, ops, ae, te)
19409
19410 /* Mnemonic that cannot be conditionalized. The ARM condition-code
19411 field is still 0xE. Many of the Thumb variants can be executed
19412 conditionally, so this is checked separately. */
19413 #define TUE(mnem, op, top, nops, ops, ae, te) \
19414 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
19415 THUMB_VARIANT, do_##ae, do_##te }
19416
19417 /* Same as TUE but the encoding function for ARM and Thumb modes is the same.
19418 Used by mnemonics that have very minimal differences in the encoding for
19419 ARM and Thumb variants and can be handled in a common function. */
19420 #define TUEc(mnem, op, top, nops, ops, en) \
19421 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
19422 THUMB_VARIANT, do_##en, do_##en }
19423
19424 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
19425 condition code field. */
19426 #define TUF(mnem, op, top, nops, ops, ae, te) \
19427 { mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
19428 THUMB_VARIANT, do_##ae, do_##te }
19429
19430 /* ARM-only variants of all the above. */
19431 #define CE(mnem, op, nops, ops, ae) \
19432 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
19433
19434 #define C3(mnem, op, nops, ops, ae) \
19435 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
19436
19437 /* Thumb-only variants of TCE and TUE. */
19438 #define ToC(mnem, top, nops, ops, te) \
19439 { mnem, OPS##nops ops, OT_csuffix, 0x0, 0x##top, 0, THUMB_VARIANT, NULL, \
19440 do_##te }
19441
19442 #define ToU(mnem, top, nops, ops, te) \
19443 { mnem, OPS##nops ops, OT_unconditional, 0x0, 0x##top, 0, THUMB_VARIANT, \
19444 NULL, do_##te }
19445
19446 /* Legacy mnemonics that always have conditional infix after the third
19447 character. */
19448 #define CL(mnem, op, nops, ops, ae) \
19449 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
19450 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
19451
19452 /* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
19453 #define cCE(mnem, op, nops, ops, ae) \
19454 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
19455
19456 /* Legacy coprocessor instructions where conditional infix and conditional
19457 suffix are ambiguous. For consistency this includes all FPA instructions,
19458 not just the potentially ambiguous ones. */
19459 #define cCL(mnem, op, nops, ops, ae) \
19460 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
19461 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
19462
19463 /* Coprocessor, takes either a suffix or a position-3 infix
19464 (for an FPA corner case). */
19465 #define C3E(mnem, op, nops, ops, ae) \
19466 { mnem, OPS##nops ops, OT_csuf_or_in3, \
19467 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
19468
19469 #define xCM_(m1, m2, m3, op, nops, ops, ae) \
19470 { m1 #m2 m3, OPS##nops ops, \
19471 sizeof (#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof (m1) - 1, \
19472 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
19473
19474 #define CM(m1, m2, op, nops, ops, ae) \
19475 xCM_ (m1, , m2, op, nops, ops, ae), \
19476 xCM_ (m1, eq, m2, op, nops, ops, ae), \
19477 xCM_ (m1, ne, m2, op, nops, ops, ae), \
19478 xCM_ (m1, cs, m2, op, nops, ops, ae), \
19479 xCM_ (m1, hs, m2, op, nops, ops, ae), \
19480 xCM_ (m1, cc, m2, op, nops, ops, ae), \
19481 xCM_ (m1, ul, m2, op, nops, ops, ae), \
19482 xCM_ (m1, lo, m2, op, nops, ops, ae), \
19483 xCM_ (m1, mi, m2, op, nops, ops, ae), \
19484 xCM_ (m1, pl, m2, op, nops, ops, ae), \
19485 xCM_ (m1, vs, m2, op, nops, ops, ae), \
19486 xCM_ (m1, vc, m2, op, nops, ops, ae), \
19487 xCM_ (m1, hi, m2, op, nops, ops, ae), \
19488 xCM_ (m1, ls, m2, op, nops, ops, ae), \
19489 xCM_ (m1, ge, m2, op, nops, ops, ae), \
19490 xCM_ (m1, lt, m2, op, nops, ops, ae), \
19491 xCM_ (m1, gt, m2, op, nops, ops, ae), \
19492 xCM_ (m1, le, m2, op, nops, ops, ae), \
19493 xCM_ (m1, al, m2, op, nops, ops, ae)
19494
19495 #define UE(mnem, op, nops, ops, ae) \
19496 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
19497
19498 #define UF(mnem, op, nops, ops, ae) \
19499 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
19500
19501 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
19502 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
19503 use the same encoding function for each. */
19504 #define NUF(mnem, op, nops, ops, enc) \
19505 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
19506 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
19507
19508 /* Neon data processing, version which indirects through neon_enc_tab for
19509 the various overloaded versions of opcodes. */
19510 #define nUF(mnem, op, nops, ops, enc) \
19511 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM##op, N_MNEM##op, \
19512 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
19513
19514 /* Neon insn with conditional suffix for the ARM version, non-overloaded
19515 version. */
19516 #define NCE_tag(mnem, op, nops, ops, enc, tag) \
19517 { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT, \
19518 THUMB_VARIANT, do_##enc, do_##enc }
19519
19520 #define NCE(mnem, op, nops, ops, enc) \
19521 NCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
19522
19523 #define NCEF(mnem, op, nops, ops, enc) \
19524 NCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
19525
19526 /* Neon insn with conditional suffix for the ARM version, overloaded types. */
19527 #define nCE_tag(mnem, op, nops, ops, enc, tag) \
19528 { #mnem, OPS##nops ops, tag, N_MNEM##op, N_MNEM##op, \
19529 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
19530
19531 #define nCE(mnem, op, nops, ops, enc) \
19532 nCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
19533
19534 #define nCEF(mnem, op, nops, ops, enc) \
19535 nCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
19536
19537 #define do_0 0
19538
19539 static const struct asm_opcode insns[] =
19540 {
19541 #define ARM_VARIANT & arm_ext_v1 /* Core ARM Instructions. */
19542 #define THUMB_VARIANT & arm_ext_v4t
19543 tCE("and", 0000000, _and, 3, (RR, oRR, SH), arit, t_arit3c),
19544 tC3("ands", 0100000, _ands, 3, (RR, oRR, SH), arit, t_arit3c),
19545 tCE("eor", 0200000, _eor, 3, (RR, oRR, SH), arit, t_arit3c),
19546 tC3("eors", 0300000, _eors, 3, (RR, oRR, SH), arit, t_arit3c),
19547 tCE("sub", 0400000, _sub, 3, (RR, oRR, SH), arit, t_add_sub),
19548 tC3("subs", 0500000, _subs, 3, (RR, oRR, SH), arit, t_add_sub),
19549 tCE("add", 0800000, _add, 3, (RR, oRR, SHG), arit, t_add_sub),
19550 tC3("adds", 0900000, _adds, 3, (RR, oRR, SHG), arit, t_add_sub),
19551 tCE("adc", 0a00000, _adc, 3, (RR, oRR, SH), arit, t_arit3c),
19552 tC3("adcs", 0b00000, _adcs, 3, (RR, oRR, SH), arit, t_arit3c),
19553 tCE("sbc", 0c00000, _sbc, 3, (RR, oRR, SH), arit, t_arit3),
19554 tC3("sbcs", 0d00000, _sbcs, 3, (RR, oRR, SH), arit, t_arit3),
19555 tCE("orr", 1800000, _orr, 3, (RR, oRR, SH), arit, t_arit3c),
19556 tC3("orrs", 1900000, _orrs, 3, (RR, oRR, SH), arit, t_arit3c),
19557 tCE("bic", 1c00000, _bic, 3, (RR, oRR, SH), arit, t_arit3),
19558 tC3("bics", 1d00000, _bics, 3, (RR, oRR, SH), arit, t_arit3),
19559
19560 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
19561 for setting PSR flag bits. They are obsolete in V6 and do not
19562 have Thumb equivalents. */
19563 tCE("tst", 1100000, _tst, 2, (RR, SH), cmp, t_mvn_tst),
19564 tC3w("tsts", 1100000, _tst, 2, (RR, SH), cmp, t_mvn_tst),
19565 CL("tstp", 110f000, 2, (RR, SH), cmp),
19566 tCE("cmp", 1500000, _cmp, 2, (RR, SH), cmp, t_mov_cmp),
19567 tC3w("cmps", 1500000, _cmp, 2, (RR, SH), cmp, t_mov_cmp),
19568 CL("cmpp", 150f000, 2, (RR, SH), cmp),
19569 tCE("cmn", 1700000, _cmn, 2, (RR, SH), cmp, t_mvn_tst),
19570 tC3w("cmns", 1700000, _cmn, 2, (RR, SH), cmp, t_mvn_tst),
19571 CL("cmnp", 170f000, 2, (RR, SH), cmp),
19572
19573 tCE("mov", 1a00000, _mov, 2, (RR, SH), mov, t_mov_cmp),
19574 tC3("movs", 1b00000, _movs, 2, (RR, SHG), mov, t_mov_cmp),
19575 tCE("mvn", 1e00000, _mvn, 2, (RR, SH), mov, t_mvn_tst),
19576 tC3("mvns", 1f00000, _mvns, 2, (RR, SH), mov, t_mvn_tst),
19577
19578 tCE("ldr", 4100000, _ldr, 2, (RR, ADDRGLDR),ldst, t_ldst),
19579 tC3("ldrb", 4500000, _ldrb, 2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
19580 tCE("str", 4000000, _str, _2, (MIX_ARM_THUMB_OPERANDS (OP_RR,
19581 OP_RRnpc),
19582 OP_ADDRGLDR),ldst, t_ldst),
19583 tC3("strb", 4400000, _strb, 2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
19584
19585 tCE("stm", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19586 tC3("stmia", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19587 tC3("stmea", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19588 tCE("ldm", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19589 tC3("ldmia", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19590 tC3("ldmfd", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19591
19592 tCE("b", a000000, _b, 1, (EXPr), branch, t_branch),
19593 TCE("bl", b000000, f000f800, 1, (EXPr), bl, t_branch23),
19594
19595 /* Pseudo ops. */
19596 tCE("adr", 28f0000, _adr, 2, (RR, EXP), adr, t_adr),
19597 C3(adrl, 28f0000, 2, (RR, EXP), adrl),
19598 tCE("nop", 1a00000, _nop, 1, (oI255c), nop, t_nop),
19599 tCE("udf", 7f000f0, _udf, 1, (oIffffb), bkpt, t_udf),
19600
19601 /* Thumb-compatibility pseudo ops. */
19602 tCE("lsl", 1a00000, _lsl, 3, (RR, oRR, SH), shift, t_shift),
19603 tC3("lsls", 1b00000, _lsls, 3, (RR, oRR, SH), shift, t_shift),
19604 tCE("lsr", 1a00020, _lsr, 3, (RR, oRR, SH), shift, t_shift),
19605 tC3("lsrs", 1b00020, _lsrs, 3, (RR, oRR, SH), shift, t_shift),
19606 tCE("asr", 1a00040, _asr, 3, (RR, oRR, SH), shift, t_shift),
19607 tC3("asrs", 1b00040, _asrs, 3, (RR, oRR, SH), shift, t_shift),
19608 tCE("ror", 1a00060, _ror, 3, (RR, oRR, SH), shift, t_shift),
19609 tC3("rors", 1b00060, _rors, 3, (RR, oRR, SH), shift, t_shift),
19610 tCE("neg", 2600000, _neg, 2, (RR, RR), rd_rn, t_neg),
19611 tC3("negs", 2700000, _negs, 2, (RR, RR), rd_rn, t_neg),
19612 tCE("push", 92d0000, _push, 1, (REGLST), push_pop, t_push_pop),
19613 tCE("pop", 8bd0000, _pop, 1, (REGLST), push_pop, t_push_pop),
19614
19615 /* These may simplify to neg. */
19616 TCE("rsb", 0600000, ebc00000, 3, (RR, oRR, SH), arit, t_rsb),
19617 TC3("rsbs", 0700000, ebd00000, 3, (RR, oRR, SH), arit, t_rsb),
19618
19619 #undef THUMB_VARIANT
19620 #define THUMB_VARIANT & arm_ext_os
19621
19622 TCE("swi", f000000, df00, 1, (EXPi), swi, t_swi),
19623 TCE("svc", f000000, df00, 1, (EXPi), swi, t_swi),
19624
19625 #undef THUMB_VARIANT
19626 #define THUMB_VARIANT & arm_ext_v6
19627
19628 TCE("cpy", 1a00000, 4600, 2, (RR, RR), rd_rm, t_cpy),
19629
19630 /* V1 instructions with no Thumb analogue prior to V6T2. */
19631 #undef THUMB_VARIANT
19632 #define THUMB_VARIANT & arm_ext_v6t2
19633
19634 TCE("teq", 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
19635 TC3w("teqs", 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
19636 CL("teqp", 130f000, 2, (RR, SH), cmp),
19637
19638 TC3("ldrt", 4300000, f8500e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
19639 TC3("ldrbt", 4700000, f8100e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
19640 TC3("strt", 4200000, f8400e00, 2, (RR_npcsp, ADDR), ldstt, t_ldstt),
19641 TC3("strbt", 4600000, f8000e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
19642
19643 TC3("stmdb", 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19644 TC3("stmfd", 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19645
19646 TC3("ldmdb", 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19647 TC3("ldmea", 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19648
19649 /* V1 instructions with no Thumb analogue at all. */
19650 CE("rsc", 0e00000, 3, (RR, oRR, SH), arit),
19651 C3(rscs, 0f00000, 3, (RR, oRR, SH), arit),
19652
19653 C3(stmib, 9800000, 2, (RRw, REGLST), ldmstm),
19654 C3(stmfa, 9800000, 2, (RRw, REGLST), ldmstm),
19655 C3(stmda, 8000000, 2, (RRw, REGLST), ldmstm),
19656 C3(stmed, 8000000, 2, (RRw, REGLST), ldmstm),
19657 C3(ldmib, 9900000, 2, (RRw, REGLST), ldmstm),
19658 C3(ldmed, 9900000, 2, (RRw, REGLST), ldmstm),
19659 C3(ldmda, 8100000, 2, (RRw, REGLST), ldmstm),
19660 C3(ldmfa, 8100000, 2, (RRw, REGLST), ldmstm),
19661
19662 #undef ARM_VARIANT
19663 #define ARM_VARIANT & arm_ext_v2 /* ARM 2 - multiplies. */
19664 #undef THUMB_VARIANT
19665 #define THUMB_VARIANT & arm_ext_v4t
19666
19667 tCE("mul", 0000090, _mul, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
19668 tC3("muls", 0100090, _muls, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
19669
19670 #undef THUMB_VARIANT
19671 #define THUMB_VARIANT & arm_ext_v6t2
19672
19673 TCE("mla", 0200090, fb000000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
19674 C3(mlas, 0300090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas),
19675
19676 /* Generic coprocessor instructions. */
19677 TCE("cdp", e000000, ee000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
19678 TCE("ldc", c100000, ec100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
19679 TC3("ldcl", c500000, ec500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
19680 TCE("stc", c000000, ec000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
19681 TC3("stcl", c400000, ec400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
19682 TCE("mcr", e000010, ee000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
19683 TCE("mrc", e100010, ee100010, 6, (RCP, I7b, APSR_RR, RCN, RCN, oI7b), co_reg, co_reg),
19684
19685 #undef ARM_VARIANT
19686 #define ARM_VARIANT & arm_ext_v2s /* ARM 3 - swp instructions. */
19687
19688 CE("swp", 1000090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
19689 C3(swpb, 1400090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
19690
19691 #undef ARM_VARIANT
19692 #define ARM_VARIANT & arm_ext_v3 /* ARM 6 Status register instructions. */
19693 #undef THUMB_VARIANT
19694 #define THUMB_VARIANT & arm_ext_msr
19695
19696 TCE("mrs", 1000000, f3e08000, 2, (RRnpc, rPSR), mrs, t_mrs),
19697 TCE("msr", 120f000, f3808000, 2, (wPSR, RR_EXi), msr, t_msr),
19698
19699 #undef ARM_VARIANT
19700 #define ARM_VARIANT & arm_ext_v3m /* ARM 7M long multiplies. */
19701 #undef THUMB_VARIANT
19702 #define THUMB_VARIANT & arm_ext_v6t2
19703
19704 TCE("smull", 0c00090, fb800000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
19705 CM("smull","s", 0d00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
19706 TCE("umull", 0800090, fba00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
19707 CM("umull","s", 0900090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
19708 TCE("smlal", 0e00090, fbc00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
19709 CM("smlal","s", 0f00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
19710 TCE("umlal", 0a00090, fbe00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
19711 CM("umlal","s", 0b00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
19712
19713 #undef ARM_VARIANT
19714 #define ARM_VARIANT & arm_ext_v4 /* ARM Architecture 4. */
19715 #undef THUMB_VARIANT
19716 #define THUMB_VARIANT & arm_ext_v4t
19717
19718 tC3("ldrh", 01000b0, _ldrh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
19719 tC3("strh", 00000b0, _strh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
19720 tC3("ldrsh", 01000f0, _ldrsh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
19721 tC3("ldrsb", 01000d0, _ldrsb, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
19722 tC3("ldsh", 01000f0, _ldrsh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
19723 tC3("ldsb", 01000d0, _ldrsb, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
19724
19725 #undef ARM_VARIANT
19726 #define ARM_VARIANT & arm_ext_v4t_5
19727
19728 /* ARM Architecture 4T. */
19729 /* Note: bx (and blx) are required on V5, even if the processor does
19730 not support Thumb. */
19731 TCE("bx", 12fff10, 4700, 1, (RR), bx, t_bx),
19732
19733 #undef ARM_VARIANT
19734 #define ARM_VARIANT & arm_ext_v5 /* ARM Architecture 5T. */
19735 #undef THUMB_VARIANT
19736 #define THUMB_VARIANT & arm_ext_v5t
19737
19738 /* Note: blx has 2 variants; the .value coded here is for
19739 BLX(2). Only this variant has conditional execution. */
19740 TCE("blx", 12fff30, 4780, 1, (RR_EXr), blx, t_blx),
19741 TUE("bkpt", 1200070, be00, 1, (oIffffb), bkpt, t_bkpt),
19742
19743 #undef THUMB_VARIANT
19744 #define THUMB_VARIANT & arm_ext_v6t2
19745
19746 TCE("clz", 16f0f10, fab0f080, 2, (RRnpc, RRnpc), rd_rm, t_clz),
19747 TUF("ldc2", c100000, fc100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
19748 TUF("ldc2l", c500000, fc500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
19749 TUF("stc2", c000000, fc000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
19750 TUF("stc2l", c400000, fc400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
19751 TUF("cdp2", e000000, fe000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
19752 TUF("mcr2", e000010, fe000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
19753 TUF("mrc2", e100010, fe100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
19754
19755 #undef ARM_VARIANT
19756 #define ARM_VARIANT & arm_ext_v5exp /* ARM Architecture 5TExP. */
19757 #undef THUMB_VARIANT
19758 #define THUMB_VARIANT & arm_ext_v5exp
19759
19760 TCE("smlabb", 1000080, fb100000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
19761 TCE("smlatb", 10000a0, fb100020, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
19762 TCE("smlabt", 10000c0, fb100010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
19763 TCE("smlatt", 10000e0, fb100030, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
19764
19765 TCE("smlawb", 1200080, fb300000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
19766 TCE("smlawt", 12000c0, fb300010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
19767
19768 TCE("smlalbb", 1400080, fbc00080, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
19769 TCE("smlaltb", 14000a0, fbc000a0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
19770 TCE("smlalbt", 14000c0, fbc00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
19771 TCE("smlaltt", 14000e0, fbc000b0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
19772
19773 TCE("smulbb", 1600080, fb10f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19774 TCE("smultb", 16000a0, fb10f020, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19775 TCE("smulbt", 16000c0, fb10f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19776 TCE("smultt", 16000e0, fb10f030, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19777
19778 TCE("smulwb", 12000a0, fb30f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19779 TCE("smulwt", 12000e0, fb30f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19780
19781 TCE("qadd", 1000050, fa80f080, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
19782 TCE("qdadd", 1400050, fa80f090, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
19783 TCE("qsub", 1200050, fa80f0a0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
19784 TCE("qdsub", 1600050, fa80f0b0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
19785
19786 #undef ARM_VARIANT
19787 #define ARM_VARIANT & arm_ext_v5e /* ARM Architecture 5TE. */
19788 #undef THUMB_VARIANT
19789 #define THUMB_VARIANT & arm_ext_v6t2
19790
19791 TUF("pld", 450f000, f810f000, 1, (ADDR), pld, t_pld),
19792 TC3("ldrd", 00000d0, e8500000, 3, (RRnpc_npcsp, oRRnpc_npcsp, ADDRGLDRS),
19793 ldrd, t_ldstd),
19794 TC3("strd", 00000f0, e8400000, 3, (RRnpc_npcsp, oRRnpc_npcsp,
19795 ADDRGLDRS), ldrd, t_ldstd),
19796
19797 TCE("mcrr", c400000, ec400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
19798 TCE("mrrc", c500000, ec500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
19799
19800 #undef ARM_VARIANT
19801 #define ARM_VARIANT & arm_ext_v5j /* ARM Architecture 5TEJ. */
19802
19803 TCE("bxj", 12fff20, f3c08f00, 1, (RR), bxj, t_bxj),
19804
19805 #undef ARM_VARIANT
19806 #define ARM_VARIANT & arm_ext_v6 /* ARM V6. */
19807 #undef THUMB_VARIANT
19808 #define THUMB_VARIANT & arm_ext_v6
19809
19810 TUF("cpsie", 1080000, b660, 2, (CPSF, oI31b), cpsi, t_cpsi),
19811 TUF("cpsid", 10c0000, b670, 2, (CPSF, oI31b), cpsi, t_cpsi),
19812 tCE("rev", 6bf0f30, _rev, 2, (RRnpc, RRnpc), rd_rm, t_rev),
19813 tCE("rev16", 6bf0fb0, _rev16, 2, (RRnpc, RRnpc), rd_rm, t_rev),
19814 tCE("revsh", 6ff0fb0, _revsh, 2, (RRnpc, RRnpc), rd_rm, t_rev),
19815 tCE("sxth", 6bf0070, _sxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
19816 tCE("uxth", 6ff0070, _uxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
19817 tCE("sxtb", 6af0070, _sxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
19818 tCE("uxtb", 6ef0070, _uxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
19819 TUF("setend", 1010000, b650, 1, (ENDI), setend, t_setend),
19820
19821 #undef THUMB_VARIANT
19822 #define THUMB_VARIANT & arm_ext_v6t2_v8m
19823
19824 TCE("ldrex", 1900f9f, e8500f00, 2, (RRnpc_npcsp, ADDR), ldrex, t_ldrex),
19825 TCE("strex", 1800f90, e8400000, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
19826 strex, t_strex),
19827 #undef THUMB_VARIANT
19828 #define THUMB_VARIANT & arm_ext_v6t2
19829
19830 TUF("mcrr2", c400000, fc400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
19831 TUF("mrrc2", c500000, fc500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
19832
19833 TCE("ssat", 6a00010, f3000000, 4, (RRnpc, I32, RRnpc, oSHllar),ssat, t_ssat),
19834 TCE("usat", 6e00010, f3800000, 4, (RRnpc, I31, RRnpc, oSHllar),usat, t_usat),
19835
19836 /* ARM V6 not included in V7M. */
19837 #undef THUMB_VARIANT
19838 #define THUMB_VARIANT & arm_ext_v6_notm
19839 TUF("rfeia", 8900a00, e990c000, 1, (RRw), rfe, rfe),
19840 TUF("rfe", 8900a00, e990c000, 1, (RRw), rfe, rfe),
19841 UF(rfeib, 9900a00, 1, (RRw), rfe),
19842 UF(rfeda, 8100a00, 1, (RRw), rfe),
19843 TUF("rfedb", 9100a00, e810c000, 1, (RRw), rfe, rfe),
19844 TUF("rfefd", 8900a00, e990c000, 1, (RRw), rfe, rfe),
19845 UF(rfefa, 8100a00, 1, (RRw), rfe),
19846 TUF("rfeea", 9100a00, e810c000, 1, (RRw), rfe, rfe),
19847 UF(rfeed, 9900a00, 1, (RRw), rfe),
19848 TUF("srsia", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
19849 TUF("srs", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
19850 TUF("srsea", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
19851 UF(srsib, 9c00500, 2, (oRRw, I31w), srs),
19852 UF(srsfa, 9c00500, 2, (oRRw, I31w), srs),
19853 UF(srsda, 8400500, 2, (oRRw, I31w), srs),
19854 UF(srsed, 8400500, 2, (oRRw, I31w), srs),
19855 TUF("srsdb", 9400500, e800c000, 2, (oRRw, I31w), srs, srs),
19856 TUF("srsfd", 9400500, e800c000, 2, (oRRw, I31w), srs, srs),
19857 TUF("cps", 1020000, f3af8100, 1, (I31b), imm0, t_cps),
19858
19859 /* ARM V6 not included in V7M (eg. integer SIMD). */
19860 #undef THUMB_VARIANT
19861 #define THUMB_VARIANT & arm_ext_v6_dsp
19862 TCE("pkhbt", 6800010, eac00000, 4, (RRnpc, RRnpc, RRnpc, oSHll), pkhbt, t_pkhbt),
19863 TCE("pkhtb", 6800050, eac00020, 4, (RRnpc, RRnpc, RRnpc, oSHar), pkhtb, t_pkhtb),
19864 TCE("qadd16", 6200f10, fa90f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19865 TCE("qadd8", 6200f90, fa80f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19866 TCE("qasx", 6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19867 /* Old name for QASX. */
19868 TCE("qaddsubx",6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19869 TCE("qsax", 6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19870 /* Old name for QSAX. */
19871 TCE("qsubaddx",6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19872 TCE("qsub16", 6200f70, fad0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19873 TCE("qsub8", 6200ff0, fac0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19874 TCE("sadd16", 6100f10, fa90f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19875 TCE("sadd8", 6100f90, fa80f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19876 TCE("sasx", 6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19877 /* Old name for SASX. */
19878 TCE("saddsubx",6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19879 TCE("shadd16", 6300f10, fa90f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19880 TCE("shadd8", 6300f90, fa80f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19881 TCE("shasx", 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19882 /* Old name for SHASX. */
19883 TCE("shaddsubx", 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19884 TCE("shsax", 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19885 /* Old name for SHSAX. */
19886 TCE("shsubaddx", 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19887 TCE("shsub16", 6300f70, fad0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19888 TCE("shsub8", 6300ff0, fac0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19889 TCE("ssax", 6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19890 /* Old name for SSAX. */
19891 TCE("ssubaddx",6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19892 TCE("ssub16", 6100f70, fad0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19893 TCE("ssub8", 6100ff0, fac0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19894 TCE("uadd16", 6500f10, fa90f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19895 TCE("uadd8", 6500f90, fa80f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19896 TCE("uasx", 6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19897 /* Old name for UASX. */
19898 TCE("uaddsubx",6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19899 TCE("uhadd16", 6700f10, fa90f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19900 TCE("uhadd8", 6700f90, fa80f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19901 TCE("uhasx", 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19902 /* Old name for UHASX. */
19903 TCE("uhaddsubx", 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19904 TCE("uhsax", 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19905 /* Old name for UHSAX. */
19906 TCE("uhsubaddx", 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19907 TCE("uhsub16", 6700f70, fad0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19908 TCE("uhsub8", 6700ff0, fac0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19909 TCE("uqadd16", 6600f10, fa90f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19910 TCE("uqadd8", 6600f90, fa80f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19911 TCE("uqasx", 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19912 /* Old name for UQASX. */
19913 TCE("uqaddsubx", 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19914 TCE("uqsax", 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19915 /* Old name for UQSAX. */
19916 TCE("uqsubaddx", 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19917 TCE("uqsub16", 6600f70, fad0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19918 TCE("uqsub8", 6600ff0, fac0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19919 TCE("usub16", 6500f70, fad0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19920 TCE("usax", 6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19921 /* Old name for USAX. */
19922 TCE("usubaddx",6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19923 TCE("usub8", 6500ff0, fac0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19924 TCE("sxtah", 6b00070, fa00f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19925 TCE("sxtab16", 6800070, fa20f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19926 TCE("sxtab", 6a00070, fa40f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19927 TCE("sxtb16", 68f0070, fa2ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
19928 TCE("uxtah", 6f00070, fa10f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19929 TCE("uxtab16", 6c00070, fa30f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19930 TCE("uxtab", 6e00070, fa50f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19931 TCE("uxtb16", 6cf0070, fa3ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
19932 TCE("sel", 6800fb0, faa0f080, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19933 TCE("smlad", 7000010, fb200000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19934 TCE("smladx", 7000030, fb200010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19935 TCE("smlald", 7400010, fbc000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
19936 TCE("smlaldx", 7400030, fbc000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
19937 TCE("smlsd", 7000050, fb400000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19938 TCE("smlsdx", 7000070, fb400010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19939 TCE("smlsld", 7400050, fbd000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
19940 TCE("smlsldx", 7400070, fbd000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
19941 TCE("smmla", 7500010, fb500000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19942 TCE("smmlar", 7500030, fb500010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19943 TCE("smmls", 75000d0, fb600000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19944 TCE("smmlsr", 75000f0, fb600010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19945 TCE("smmul", 750f010, fb50f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19946 TCE("smmulr", 750f030, fb50f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19947 TCE("smuad", 700f010, fb20f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19948 TCE("smuadx", 700f030, fb20f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19949 TCE("smusd", 700f050, fb40f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19950 TCE("smusdx", 700f070, fb40f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19951 TCE("ssat16", 6a00f30, f3200000, 3, (RRnpc, I16, RRnpc), ssat16, t_ssat16),
19952 TCE("umaal", 0400090, fbe00060, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal, t_mlal),
19953 TCE("usad8", 780f010, fb70f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19954 TCE("usada8", 7800010, fb700000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19955 TCE("usat16", 6e00f30, f3a00000, 3, (RRnpc, I15, RRnpc), usat16, t_usat16),
19956
19957 #undef ARM_VARIANT
19958 #define ARM_VARIANT & arm_ext_v6k
19959 #undef THUMB_VARIANT
19960 #define THUMB_VARIANT & arm_ext_v6k
19961
19962 tCE("yield", 320f001, _yield, 0, (), noargs, t_hint),
19963 tCE("wfe", 320f002, _wfe, 0, (), noargs, t_hint),
19964 tCE("wfi", 320f003, _wfi, 0, (), noargs, t_hint),
19965 tCE("sev", 320f004, _sev, 0, (), noargs, t_hint),
19966
19967 #undef THUMB_VARIANT
19968 #define THUMB_VARIANT & arm_ext_v6_notm
19969 TCE("ldrexd", 1b00f9f, e8d0007f, 3, (RRnpc_npcsp, oRRnpc_npcsp, RRnpcb),
19970 ldrexd, t_ldrexd),
19971 TCE("strexd", 1a00f90, e8c00070, 4, (RRnpc_npcsp, RRnpc_npcsp, oRRnpc_npcsp,
19972 RRnpcb), strexd, t_strexd),
19973
19974 #undef THUMB_VARIANT
19975 #define THUMB_VARIANT & arm_ext_v6t2_v8m
19976 TCE("ldrexb", 1d00f9f, e8d00f4f, 2, (RRnpc_npcsp,RRnpcb),
19977 rd_rn, rd_rn),
19978 TCE("ldrexh", 1f00f9f, e8d00f5f, 2, (RRnpc_npcsp, RRnpcb),
19979 rd_rn, rd_rn),
19980 TCE("strexb", 1c00f90, e8c00f40, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
19981 strex, t_strexbh),
19982 TCE("strexh", 1e00f90, e8c00f50, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
19983 strex, t_strexbh),
19984 TUF("clrex", 57ff01f, f3bf8f2f, 0, (), noargs, noargs),
19985
19986 #undef ARM_VARIANT
19987 #define ARM_VARIANT & arm_ext_sec
19988 #undef THUMB_VARIANT
19989 #define THUMB_VARIANT & arm_ext_sec
19990
19991 TCE("smc", 1600070, f7f08000, 1, (EXPi), smc, t_smc),
19992
19993 #undef ARM_VARIANT
19994 #define ARM_VARIANT & arm_ext_virt
19995 #undef THUMB_VARIANT
19996 #define THUMB_VARIANT & arm_ext_virt
19997
19998 TCE("hvc", 1400070, f7e08000, 1, (EXPi), hvc, t_hvc),
19999 TCE("eret", 160006e, f3de8f00, 0, (), noargs, noargs),
20000
20001 #undef ARM_VARIANT
20002 #define ARM_VARIANT & arm_ext_pan
20003 #undef THUMB_VARIANT
20004 #define THUMB_VARIANT & arm_ext_pan
20005
20006 TUF("setpan", 1100000, b610, 1, (I7), setpan, t_setpan),
20007
20008 #undef ARM_VARIANT
20009 #define ARM_VARIANT & arm_ext_v6t2
20010 #undef THUMB_VARIANT
20011 #define THUMB_VARIANT & arm_ext_v6t2
20012
20013 TCE("bfc", 7c0001f, f36f0000, 3, (RRnpc, I31, I32), bfc, t_bfc),
20014 TCE("bfi", 7c00010, f3600000, 4, (RRnpc, RRnpc_I0, I31, I32), bfi, t_bfi),
20015 TCE("sbfx", 7a00050, f3400000, 4, (RR, RR, I31, I32), bfx, t_bfx),
20016 TCE("ubfx", 7e00050, f3c00000, 4, (RR, RR, I31, I32), bfx, t_bfx),
20017
20018 TCE("mls", 0600090, fb000010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
20019 TCE("rbit", 6ff0f30, fa90f0a0, 2, (RR, RR), rd_rm, t_rbit),
20020
20021 TC3("ldrht", 03000b0, f8300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
20022 TC3("ldrsht", 03000f0, f9300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
20023 TC3("ldrsbt", 03000d0, f9100e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
20024 TC3("strht", 02000b0, f8200e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
20025
20026 #undef ARM_VARIANT
20027 #define ARM_VARIANT & arm_ext_v3
20028 #undef THUMB_VARIANT
20029 #define THUMB_VARIANT & arm_ext_v6t2
20030
20031 TUE("csdb", 320f014, f3af8014, 0, (), noargs, t_csdb),
20032
20033 #undef ARM_VARIANT
20034 #define ARM_VARIANT & arm_ext_v6t2
20035 #undef THUMB_VARIANT
20036 #define THUMB_VARIANT & arm_ext_v6t2_v8m
20037 TCE("movw", 3000000, f2400000, 2, (RRnpc, HALF), mov16, t_mov16),
20038 TCE("movt", 3400000, f2c00000, 2, (RRnpc, HALF), mov16, t_mov16),
20039
20040 /* Thumb-only instructions. */
20041 #undef ARM_VARIANT
20042 #define ARM_VARIANT NULL
20043 TUE("cbnz", 0, b900, 2, (RR, EXP), 0, t_cbz),
20044 TUE("cbz", 0, b100, 2, (RR, EXP), 0, t_cbz),
20045
20046 /* ARM does not really have an IT instruction, so always allow it.
20047 The opcode is copied from Thumb in order to allow warnings in
20048 -mimplicit-it=[never | arm] modes. */
20049 #undef ARM_VARIANT
20050 #define ARM_VARIANT & arm_ext_v1
20051 #undef THUMB_VARIANT
20052 #define THUMB_VARIANT & arm_ext_v6t2
20053
20054 TUE("it", bf08, bf08, 1, (COND), it, t_it),
20055 TUE("itt", bf0c, bf0c, 1, (COND), it, t_it),
20056 TUE("ite", bf04, bf04, 1, (COND), it, t_it),
20057 TUE("ittt", bf0e, bf0e, 1, (COND), it, t_it),
20058 TUE("itet", bf06, bf06, 1, (COND), it, t_it),
20059 TUE("itte", bf0a, bf0a, 1, (COND), it, t_it),
20060 TUE("itee", bf02, bf02, 1, (COND), it, t_it),
20061 TUE("itttt", bf0f, bf0f, 1, (COND), it, t_it),
20062 TUE("itett", bf07, bf07, 1, (COND), it, t_it),
20063 TUE("ittet", bf0b, bf0b, 1, (COND), it, t_it),
20064 TUE("iteet", bf03, bf03, 1, (COND), it, t_it),
20065 TUE("ittte", bf0d, bf0d, 1, (COND), it, t_it),
20066 TUE("itete", bf05, bf05, 1, (COND), it, t_it),
20067 TUE("ittee", bf09, bf09, 1, (COND), it, t_it),
20068 TUE("iteee", bf01, bf01, 1, (COND), it, t_it),
20069 /* ARM/Thumb-2 instructions with no Thumb-1 equivalent. */
20070 TC3("rrx", 01a00060, ea4f0030, 2, (RR, RR), rd_rm, t_rrx),
20071 TC3("rrxs", 01b00060, ea5f0030, 2, (RR, RR), rd_rm, t_rrx),
20072
20073 /* Thumb2 only instructions. */
20074 #undef ARM_VARIANT
20075 #define ARM_VARIANT NULL
20076
20077 TCE("addw", 0, f2000000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
20078 TCE("subw", 0, f2a00000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
20079 TCE("orn", 0, ea600000, 3, (RR, oRR, SH), 0, t_orn),
20080 TCE("orns", 0, ea700000, 3, (RR, oRR, SH), 0, t_orn),
20081 TCE("tbb", 0, e8d0f000, 1, (TB), 0, t_tb),
20082 TCE("tbh", 0, e8d0f010, 1, (TB), 0, t_tb),
20083
20084 /* Hardware division instructions. */
20085 #undef ARM_VARIANT
20086 #define ARM_VARIANT & arm_ext_adiv
20087 #undef THUMB_VARIANT
20088 #define THUMB_VARIANT & arm_ext_div
20089
20090 TCE("sdiv", 710f010, fb90f0f0, 3, (RR, oRR, RR), div, t_div),
20091 TCE("udiv", 730f010, fbb0f0f0, 3, (RR, oRR, RR), div, t_div),
20092
20093 /* ARM V6M/V7 instructions. */
20094 #undef ARM_VARIANT
20095 #define ARM_VARIANT & arm_ext_barrier
20096 #undef THUMB_VARIANT
20097 #define THUMB_VARIANT & arm_ext_barrier
20098
20099 TUF("dmb", 57ff050, f3bf8f50, 1, (oBARRIER_I15), barrier, barrier),
20100 TUF("dsb", 57ff040, f3bf8f40, 1, (oBARRIER_I15), barrier, barrier),
20101 TUF("isb", 57ff060, f3bf8f60, 1, (oBARRIER_I15), barrier, barrier),
20102
20103 /* ARM V7 instructions. */
20104 #undef ARM_VARIANT
20105 #define ARM_VARIANT & arm_ext_v7
20106 #undef THUMB_VARIANT
20107 #define THUMB_VARIANT & arm_ext_v7
20108
20109 TUF("pli", 450f000, f910f000, 1, (ADDR), pli, t_pld),
20110 TCE("dbg", 320f0f0, f3af80f0, 1, (I15), dbg, t_dbg),
20111
20112 #undef ARM_VARIANT
20113 #define ARM_VARIANT & arm_ext_mp
20114 #undef THUMB_VARIANT
20115 #define THUMB_VARIANT & arm_ext_mp
20116
20117 TUF("pldw", 410f000, f830f000, 1, (ADDR), pld, t_pld),
20118
20119 /* AArchv8 instructions. */
20120 #undef ARM_VARIANT
20121 #define ARM_VARIANT & arm_ext_v8
20122
20123 /* Instructions shared between armv8-a and armv8-m. */
20124 #undef THUMB_VARIANT
20125 #define THUMB_VARIANT & arm_ext_atomics
20126
20127 TCE("lda", 1900c9f, e8d00faf, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
20128 TCE("ldab", 1d00c9f, e8d00f8f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
20129 TCE("ldah", 1f00c9f, e8d00f9f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
20130 TCE("stl", 180fc90, e8c00faf, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
20131 TCE("stlb", 1c0fc90, e8c00f8f, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
20132 TCE("stlh", 1e0fc90, e8c00f9f, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
20133 TCE("ldaex", 1900e9f, e8d00fef, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
20134 TCE("ldaexb", 1d00e9f, e8d00fcf, 2, (RRnpc,RRnpcb), rd_rn, rd_rn),
20135 TCE("ldaexh", 1f00e9f, e8d00fdf, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
20136 TCE("stlex", 1800e90, e8c00fe0, 3, (RRnpc, RRnpc, RRnpcb),
20137 stlex, t_stlex),
20138 TCE("stlexb", 1c00e90, e8c00fc0, 3, (RRnpc, RRnpc, RRnpcb),
20139 stlex, t_stlex),
20140 TCE("stlexh", 1e00e90, e8c00fd0, 3, (RRnpc, RRnpc, RRnpcb),
20141 stlex, t_stlex),
20142 #undef THUMB_VARIANT
20143 #define THUMB_VARIANT & arm_ext_v8
20144
20145 tCE("sevl", 320f005, _sevl, 0, (), noargs, t_hint),
20146 TUE("hlt", 1000070, ba80, 1, (oIffffb), bkpt, t_hlt),
20147 TCE("ldaexd", 1b00e9f, e8d000ff, 3, (RRnpc, oRRnpc, RRnpcb),
20148 ldrexd, t_ldrexd),
20149 TCE("stlexd", 1a00e90, e8c000f0, 4, (RRnpc, RRnpc, oRRnpc, RRnpcb),
20150 strexd, t_strexd),
20151 /* ARMv8 T32 only. */
20152 #undef ARM_VARIANT
20153 #define ARM_VARIANT NULL
20154 TUF("dcps1", 0, f78f8001, 0, (), noargs, noargs),
20155 TUF("dcps2", 0, f78f8002, 0, (), noargs, noargs),
20156 TUF("dcps3", 0, f78f8003, 0, (), noargs, noargs),
20157
20158 /* FP for ARMv8. */
20159 #undef ARM_VARIANT
20160 #define ARM_VARIANT & fpu_vfp_ext_armv8xd
20161 #undef THUMB_VARIANT
20162 #define THUMB_VARIANT & fpu_vfp_ext_armv8xd
20163
20164 nUF(vseleq, _vseleq, 3, (RVSD, RVSD, RVSD), vsel),
20165 nUF(vselvs, _vselvs, 3, (RVSD, RVSD, RVSD), vsel),
20166 nUF(vselge, _vselge, 3, (RVSD, RVSD, RVSD), vsel),
20167 nUF(vselgt, _vselgt, 3, (RVSD, RVSD, RVSD), vsel),
20168 nUF(vmaxnm, _vmaxnm, 3, (RNSDQ, oRNSDQ, RNSDQ), vmaxnm),
20169 nUF(vminnm, _vminnm, 3, (RNSDQ, oRNSDQ, RNSDQ), vmaxnm),
20170 nUF(vcvta, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvta),
20171 nUF(vcvtn, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvtn),
20172 nUF(vcvtp, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvtp),
20173 nUF(vcvtm, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvtm),
20174 nCE(vrintr, _vrintr, 2, (RNSDQ, oRNSDQ), vrintr),
20175 nCE(vrintz, _vrintr, 2, (RNSDQ, oRNSDQ), vrintz),
20176 nCE(vrintx, _vrintr, 2, (RNSDQ, oRNSDQ), vrintx),
20177 nUF(vrinta, _vrinta, 2, (RNSDQ, oRNSDQ), vrinta),
20178 nUF(vrintn, _vrinta, 2, (RNSDQ, oRNSDQ), vrintn),
20179 nUF(vrintp, _vrinta, 2, (RNSDQ, oRNSDQ), vrintp),
20180 nUF(vrintm, _vrinta, 2, (RNSDQ, oRNSDQ), vrintm),
20181
20182 /* Crypto v1 extensions. */
20183 #undef ARM_VARIANT
20184 #define ARM_VARIANT & fpu_crypto_ext_armv8
20185 #undef THUMB_VARIANT
20186 #define THUMB_VARIANT & fpu_crypto_ext_armv8
20187
20188 nUF(aese, _aes, 2, (RNQ, RNQ), aese),
20189 nUF(aesd, _aes, 2, (RNQ, RNQ), aesd),
20190 nUF(aesmc, _aes, 2, (RNQ, RNQ), aesmc),
20191 nUF(aesimc, _aes, 2, (RNQ, RNQ), aesimc),
20192 nUF(sha1c, _sha3op, 3, (RNQ, RNQ, RNQ), sha1c),
20193 nUF(sha1p, _sha3op, 3, (RNQ, RNQ, RNQ), sha1p),
20194 nUF(sha1m, _sha3op, 3, (RNQ, RNQ, RNQ), sha1m),
20195 nUF(sha1su0, _sha3op, 3, (RNQ, RNQ, RNQ), sha1su0),
20196 nUF(sha256h, _sha3op, 3, (RNQ, RNQ, RNQ), sha256h),
20197 nUF(sha256h2, _sha3op, 3, (RNQ, RNQ, RNQ), sha256h2),
20198 nUF(sha256su1, _sha3op, 3, (RNQ, RNQ, RNQ), sha256su1),
20199 nUF(sha1h, _sha1h, 2, (RNQ, RNQ), sha1h),
20200 nUF(sha1su1, _sha2op, 2, (RNQ, RNQ), sha1su1),
20201 nUF(sha256su0, _sha2op, 2, (RNQ, RNQ), sha256su0),
20202
20203 #undef ARM_VARIANT
20204 #define ARM_VARIANT & crc_ext_armv8
20205 #undef THUMB_VARIANT
20206 #define THUMB_VARIANT & crc_ext_armv8
20207 TUEc("crc32b", 1000040, fac0f080, 3, (RR, oRR, RR), crc32b),
20208 TUEc("crc32h", 1200040, fac0f090, 3, (RR, oRR, RR), crc32h),
20209 TUEc("crc32w", 1400040, fac0f0a0, 3, (RR, oRR, RR), crc32w),
20210 TUEc("crc32cb",1000240, fad0f080, 3, (RR, oRR, RR), crc32cb),
20211 TUEc("crc32ch",1200240, fad0f090, 3, (RR, oRR, RR), crc32ch),
20212 TUEc("crc32cw",1400240, fad0f0a0, 3, (RR, oRR, RR), crc32cw),
20213
20214 /* ARMv8.2 RAS extension. */
20215 #undef ARM_VARIANT
20216 #define ARM_VARIANT & arm_ext_ras
20217 #undef THUMB_VARIANT
20218 #define THUMB_VARIANT & arm_ext_ras
20219 TUE ("esb", 320f010, f3af8010, 0, (), noargs, noargs),
20220
20221 #undef ARM_VARIANT
20222 #define ARM_VARIANT & arm_ext_v8_3
20223 #undef THUMB_VARIANT
20224 #define THUMB_VARIANT & arm_ext_v8_3
20225 NCE (vjcvt, eb90bc0, 2, (RVS, RVD), vjcvt),
20226 NUF (vcmla, 0, 4, (RNDQ, RNDQ, RNDQ_RNSC, EXPi), vcmla),
20227 NUF (vcadd, 0, 4, (RNDQ, RNDQ, RNDQ, EXPi), vcadd),
20228
20229 #undef ARM_VARIANT
20230 #define ARM_VARIANT & fpu_neon_ext_dotprod
20231 #undef THUMB_VARIANT
20232 #define THUMB_VARIANT & fpu_neon_ext_dotprod
20233 NUF (vsdot, d00, 3, (RNDQ, RNDQ, RNDQ_RNSC), neon_dotproduct_s),
20234 NUF (vudot, d00, 3, (RNDQ, RNDQ, RNDQ_RNSC), neon_dotproduct_u),
20235
20236 #undef ARM_VARIANT
20237 #define ARM_VARIANT & fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
20238 #undef THUMB_VARIANT
20239 #define THUMB_VARIANT NULL
20240
20241 cCE("wfs", e200110, 1, (RR), rd),
20242 cCE("rfs", e300110, 1, (RR), rd),
20243 cCE("wfc", e400110, 1, (RR), rd),
20244 cCE("rfc", e500110, 1, (RR), rd),
20245
20246 cCL("ldfs", c100100, 2, (RF, ADDRGLDC), rd_cpaddr),
20247 cCL("ldfd", c108100, 2, (RF, ADDRGLDC), rd_cpaddr),
20248 cCL("ldfe", c500100, 2, (RF, ADDRGLDC), rd_cpaddr),
20249 cCL("ldfp", c508100, 2, (RF, ADDRGLDC), rd_cpaddr),
20250
20251 cCL("stfs", c000100, 2, (RF, ADDRGLDC), rd_cpaddr),
20252 cCL("stfd", c008100, 2, (RF, ADDRGLDC), rd_cpaddr),
20253 cCL("stfe", c400100, 2, (RF, ADDRGLDC), rd_cpaddr),
20254 cCL("stfp", c408100, 2, (RF, ADDRGLDC), rd_cpaddr),
20255
20256 cCL("mvfs", e008100, 2, (RF, RF_IF), rd_rm),
20257 cCL("mvfsp", e008120, 2, (RF, RF_IF), rd_rm),
20258 cCL("mvfsm", e008140, 2, (RF, RF_IF), rd_rm),
20259 cCL("mvfsz", e008160, 2, (RF, RF_IF), rd_rm),
20260 cCL("mvfd", e008180, 2, (RF, RF_IF), rd_rm),
20261 cCL("mvfdp", e0081a0, 2, (RF, RF_IF), rd_rm),
20262 cCL("mvfdm", e0081c0, 2, (RF, RF_IF), rd_rm),
20263 cCL("mvfdz", e0081e0, 2, (RF, RF_IF), rd_rm),
20264 cCL("mvfe", e088100, 2, (RF, RF_IF), rd_rm),
20265 cCL("mvfep", e088120, 2, (RF, RF_IF), rd_rm),
20266 cCL("mvfem", e088140, 2, (RF, RF_IF), rd_rm),
20267 cCL("mvfez", e088160, 2, (RF, RF_IF), rd_rm),
20268
20269 cCL("mnfs", e108100, 2, (RF, RF_IF), rd_rm),
20270 cCL("mnfsp", e108120, 2, (RF, RF_IF), rd_rm),
20271 cCL("mnfsm", e108140, 2, (RF, RF_IF), rd_rm),
20272 cCL("mnfsz", e108160, 2, (RF, RF_IF), rd_rm),
20273 cCL("mnfd", e108180, 2, (RF, RF_IF), rd_rm),
20274 cCL("mnfdp", e1081a0, 2, (RF, RF_IF), rd_rm),
20275 cCL("mnfdm", e1081c0, 2, (RF, RF_IF), rd_rm),
20276 cCL("mnfdz", e1081e0, 2, (RF, RF_IF), rd_rm),
20277 cCL("mnfe", e188100, 2, (RF, RF_IF), rd_rm),
20278 cCL("mnfep", e188120, 2, (RF, RF_IF), rd_rm),
20279 cCL("mnfem", e188140, 2, (RF, RF_IF), rd_rm),
20280 cCL("mnfez", e188160, 2, (RF, RF_IF), rd_rm),
20281
20282 cCL("abss", e208100, 2, (RF, RF_IF), rd_rm),
20283 cCL("abssp", e208120, 2, (RF, RF_IF), rd_rm),
20284 cCL("abssm", e208140, 2, (RF, RF_IF), rd_rm),
20285 cCL("abssz", e208160, 2, (RF, RF_IF), rd_rm),
20286 cCL("absd", e208180, 2, (RF, RF_IF), rd_rm),
20287 cCL("absdp", e2081a0, 2, (RF, RF_IF), rd_rm),
20288 cCL("absdm", e2081c0, 2, (RF, RF_IF), rd_rm),
20289 cCL("absdz", e2081e0, 2, (RF, RF_IF), rd_rm),
20290 cCL("abse", e288100, 2, (RF, RF_IF), rd_rm),
20291 cCL("absep", e288120, 2, (RF, RF_IF), rd_rm),
20292 cCL("absem", e288140, 2, (RF, RF_IF), rd_rm),
20293 cCL("absez", e288160, 2, (RF, RF_IF), rd_rm),
20294
20295 cCL("rnds", e308100, 2, (RF, RF_IF), rd_rm),
20296 cCL("rndsp", e308120, 2, (RF, RF_IF), rd_rm),
20297 cCL("rndsm", e308140, 2, (RF, RF_IF), rd_rm),
20298 cCL("rndsz", e308160, 2, (RF, RF_IF), rd_rm),
20299 cCL("rndd", e308180, 2, (RF, RF_IF), rd_rm),
20300 cCL("rnddp", e3081a0, 2, (RF, RF_IF), rd_rm),
20301 cCL("rnddm", e3081c0, 2, (RF, RF_IF), rd_rm),
20302 cCL("rnddz", e3081e0, 2, (RF, RF_IF), rd_rm),
20303 cCL("rnde", e388100, 2, (RF, RF_IF), rd_rm),
20304 cCL("rndep", e388120, 2, (RF, RF_IF), rd_rm),
20305 cCL("rndem", e388140, 2, (RF, RF_IF), rd_rm),
20306 cCL("rndez", e388160, 2, (RF, RF_IF), rd_rm),
20307
20308 cCL("sqts", e408100, 2, (RF, RF_IF), rd_rm),
20309 cCL("sqtsp", e408120, 2, (RF, RF_IF), rd_rm),
20310 cCL("sqtsm", e408140, 2, (RF, RF_IF), rd_rm),
20311 cCL("sqtsz", e408160, 2, (RF, RF_IF), rd_rm),
20312 cCL("sqtd", e408180, 2, (RF, RF_IF), rd_rm),
20313 cCL("sqtdp", e4081a0, 2, (RF, RF_IF), rd_rm),
20314 cCL("sqtdm", e4081c0, 2, (RF, RF_IF), rd_rm),
20315 cCL("sqtdz", e4081e0, 2, (RF, RF_IF), rd_rm),
20316 cCL("sqte", e488100, 2, (RF, RF_IF), rd_rm),
20317 cCL("sqtep", e488120, 2, (RF, RF_IF), rd_rm),
20318 cCL("sqtem", e488140, 2, (RF, RF_IF), rd_rm),
20319 cCL("sqtez", e488160, 2, (RF, RF_IF), rd_rm),
20320
20321 cCL("logs", e508100, 2, (RF, RF_IF), rd_rm),
20322 cCL("logsp", e508120, 2, (RF, RF_IF), rd_rm),
20323 cCL("logsm", e508140, 2, (RF, RF_IF), rd_rm),
20324 cCL("logsz", e508160, 2, (RF, RF_IF), rd_rm),
20325 cCL("logd", e508180, 2, (RF, RF_IF), rd_rm),
20326 cCL("logdp", e5081a0, 2, (RF, RF_IF), rd_rm),
20327 cCL("logdm", e5081c0, 2, (RF, RF_IF), rd_rm),
20328 cCL("logdz", e5081e0, 2, (RF, RF_IF), rd_rm),
20329 cCL("loge", e588100, 2, (RF, RF_IF), rd_rm),
20330 cCL("logep", e588120, 2, (RF, RF_IF), rd_rm),
20331 cCL("logem", e588140, 2, (RF, RF_IF), rd_rm),
20332 cCL("logez", e588160, 2, (RF, RF_IF), rd_rm),
20333
20334 cCL("lgns", e608100, 2, (RF, RF_IF), rd_rm),
20335 cCL("lgnsp", e608120, 2, (RF, RF_IF), rd_rm),
20336 cCL("lgnsm", e608140, 2, (RF, RF_IF), rd_rm),
20337 cCL("lgnsz", e608160, 2, (RF, RF_IF), rd_rm),
20338 cCL("lgnd", e608180, 2, (RF, RF_IF), rd_rm),
20339 cCL("lgndp", e6081a0, 2, (RF, RF_IF), rd_rm),
20340 cCL("lgndm", e6081c0, 2, (RF, RF_IF), rd_rm),
20341 cCL("lgndz", e6081e0, 2, (RF, RF_IF), rd_rm),
20342 cCL("lgne", e688100, 2, (RF, RF_IF), rd_rm),
20343 cCL("lgnep", e688120, 2, (RF, RF_IF), rd_rm),
20344 cCL("lgnem", e688140, 2, (RF, RF_IF), rd_rm),
20345 cCL("lgnez", e688160, 2, (RF, RF_IF), rd_rm),
20346
20347 cCL("exps", e708100, 2, (RF, RF_IF), rd_rm),
20348 cCL("expsp", e708120, 2, (RF, RF_IF), rd_rm),
20349 cCL("expsm", e708140, 2, (RF, RF_IF), rd_rm),
20350 cCL("expsz", e708160, 2, (RF, RF_IF), rd_rm),
20351 cCL("expd", e708180, 2, (RF, RF_IF), rd_rm),
20352 cCL("expdp", e7081a0, 2, (RF, RF_IF), rd_rm),
20353 cCL("expdm", e7081c0, 2, (RF, RF_IF), rd_rm),
20354 cCL("expdz", e7081e0, 2, (RF, RF_IF), rd_rm),
20355 cCL("expe", e788100, 2, (RF, RF_IF), rd_rm),
20356 cCL("expep", e788120, 2, (RF, RF_IF), rd_rm),
20357 cCL("expem", e788140, 2, (RF, RF_IF), rd_rm),
20358 cCL("expdz", e788160, 2, (RF, RF_IF), rd_rm),
20359
20360 cCL("sins", e808100, 2, (RF, RF_IF), rd_rm),
20361 cCL("sinsp", e808120, 2, (RF, RF_IF), rd_rm),
20362 cCL("sinsm", e808140, 2, (RF, RF_IF), rd_rm),
20363 cCL("sinsz", e808160, 2, (RF, RF_IF), rd_rm),
20364 cCL("sind", e808180, 2, (RF, RF_IF), rd_rm),
20365 cCL("sindp", e8081a0, 2, (RF, RF_IF), rd_rm),
20366 cCL("sindm", e8081c0, 2, (RF, RF_IF), rd_rm),
20367 cCL("sindz", e8081e0, 2, (RF, RF_IF), rd_rm),
20368 cCL("sine", e888100, 2, (RF, RF_IF), rd_rm),
20369 cCL("sinep", e888120, 2, (RF, RF_IF), rd_rm),
20370 cCL("sinem", e888140, 2, (RF, RF_IF), rd_rm),
20371 cCL("sinez", e888160, 2, (RF, RF_IF), rd_rm),
20372
20373 cCL("coss", e908100, 2, (RF, RF_IF), rd_rm),
20374 cCL("cossp", e908120, 2, (RF, RF_IF), rd_rm),
20375 cCL("cossm", e908140, 2, (RF, RF_IF), rd_rm),
20376 cCL("cossz", e908160, 2, (RF, RF_IF), rd_rm),
20377 cCL("cosd", e908180, 2, (RF, RF_IF), rd_rm),
20378 cCL("cosdp", e9081a0, 2, (RF, RF_IF), rd_rm),
20379 cCL("cosdm", e9081c0, 2, (RF, RF_IF), rd_rm),
20380 cCL("cosdz", e9081e0, 2, (RF, RF_IF), rd_rm),
20381 cCL("cose", e988100, 2, (RF, RF_IF), rd_rm),
20382 cCL("cosep", e988120, 2, (RF, RF_IF), rd_rm),
20383 cCL("cosem", e988140, 2, (RF, RF_IF), rd_rm),
20384 cCL("cosez", e988160, 2, (RF, RF_IF), rd_rm),
20385
20386 cCL("tans", ea08100, 2, (RF, RF_IF), rd_rm),
20387 cCL("tansp", ea08120, 2, (RF, RF_IF), rd_rm),
20388 cCL("tansm", ea08140, 2, (RF, RF_IF), rd_rm),
20389 cCL("tansz", ea08160, 2, (RF, RF_IF), rd_rm),
20390 cCL("tand", ea08180, 2, (RF, RF_IF), rd_rm),
20391 cCL("tandp", ea081a0, 2, (RF, RF_IF), rd_rm),
20392 cCL("tandm", ea081c0, 2, (RF, RF_IF), rd_rm),
20393 cCL("tandz", ea081e0, 2, (RF, RF_IF), rd_rm),
20394 cCL("tane", ea88100, 2, (RF, RF_IF), rd_rm),
20395 cCL("tanep", ea88120, 2, (RF, RF_IF), rd_rm),
20396 cCL("tanem", ea88140, 2, (RF, RF_IF), rd_rm),
20397 cCL("tanez", ea88160, 2, (RF, RF_IF), rd_rm),
20398
20399 cCL("asns", eb08100, 2, (RF, RF_IF), rd_rm),
20400 cCL("asnsp", eb08120, 2, (RF, RF_IF), rd_rm),
20401 cCL("asnsm", eb08140, 2, (RF, RF_IF), rd_rm),
20402 cCL("asnsz", eb08160, 2, (RF, RF_IF), rd_rm),
20403 cCL("asnd", eb08180, 2, (RF, RF_IF), rd_rm),
20404 cCL("asndp", eb081a0, 2, (RF, RF_IF), rd_rm),
20405 cCL("asndm", eb081c0, 2, (RF, RF_IF), rd_rm),
20406 cCL("asndz", eb081e0, 2, (RF, RF_IF), rd_rm),
20407 cCL("asne", eb88100, 2, (RF, RF_IF), rd_rm),
20408 cCL("asnep", eb88120, 2, (RF, RF_IF), rd_rm),
20409 cCL("asnem", eb88140, 2, (RF, RF_IF), rd_rm),
20410 cCL("asnez", eb88160, 2, (RF, RF_IF), rd_rm),
20411
20412 cCL("acss", ec08100, 2, (RF, RF_IF), rd_rm),
20413 cCL("acssp", ec08120, 2, (RF, RF_IF), rd_rm),
20414 cCL("acssm", ec08140, 2, (RF, RF_IF), rd_rm),
20415 cCL("acssz", ec08160, 2, (RF, RF_IF), rd_rm),
20416 cCL("acsd", ec08180, 2, (RF, RF_IF), rd_rm),
20417 cCL("acsdp", ec081a0, 2, (RF, RF_IF), rd_rm),
20418 cCL("acsdm", ec081c0, 2, (RF, RF_IF), rd_rm),
20419 cCL("acsdz", ec081e0, 2, (RF, RF_IF), rd_rm),
20420 cCL("acse", ec88100, 2, (RF, RF_IF), rd_rm),
20421 cCL("acsep", ec88120, 2, (RF, RF_IF), rd_rm),
20422 cCL("acsem", ec88140, 2, (RF, RF_IF), rd_rm),
20423 cCL("acsez", ec88160, 2, (RF, RF_IF), rd_rm),
20424
20425 cCL("atns", ed08100, 2, (RF, RF_IF), rd_rm),
20426 cCL("atnsp", ed08120, 2, (RF, RF_IF), rd_rm),
20427 cCL("atnsm", ed08140, 2, (RF, RF_IF), rd_rm),
20428 cCL("atnsz", ed08160, 2, (RF, RF_IF), rd_rm),
20429 cCL("atnd", ed08180, 2, (RF, RF_IF), rd_rm),
20430 cCL("atndp", ed081a0, 2, (RF, RF_IF), rd_rm),
20431 cCL("atndm", ed081c0, 2, (RF, RF_IF), rd_rm),
20432 cCL("atndz", ed081e0, 2, (RF, RF_IF), rd_rm),
20433 cCL("atne", ed88100, 2, (RF, RF_IF), rd_rm),
20434 cCL("atnep", ed88120, 2, (RF, RF_IF), rd_rm),
20435 cCL("atnem", ed88140, 2, (RF, RF_IF), rd_rm),
20436 cCL("atnez", ed88160, 2, (RF, RF_IF), rd_rm),
20437
20438 cCL("urds", ee08100, 2, (RF, RF_IF), rd_rm),
20439 cCL("urdsp", ee08120, 2, (RF, RF_IF), rd_rm),
20440 cCL("urdsm", ee08140, 2, (RF, RF_IF), rd_rm),
20441 cCL("urdsz", ee08160, 2, (RF, RF_IF), rd_rm),
20442 cCL("urdd", ee08180, 2, (RF, RF_IF), rd_rm),
20443 cCL("urddp", ee081a0, 2, (RF, RF_IF), rd_rm),
20444 cCL("urddm", ee081c0, 2, (RF, RF_IF), rd_rm),
20445 cCL("urddz", ee081e0, 2, (RF, RF_IF), rd_rm),
20446 cCL("urde", ee88100, 2, (RF, RF_IF), rd_rm),
20447 cCL("urdep", ee88120, 2, (RF, RF_IF), rd_rm),
20448 cCL("urdem", ee88140, 2, (RF, RF_IF), rd_rm),
20449 cCL("urdez", ee88160, 2, (RF, RF_IF), rd_rm),
20450
20451 cCL("nrms", ef08100, 2, (RF, RF_IF), rd_rm),
20452 cCL("nrmsp", ef08120, 2, (RF, RF_IF), rd_rm),
20453 cCL("nrmsm", ef08140, 2, (RF, RF_IF), rd_rm),
20454 cCL("nrmsz", ef08160, 2, (RF, RF_IF), rd_rm),
20455 cCL("nrmd", ef08180, 2, (RF, RF_IF), rd_rm),
20456 cCL("nrmdp", ef081a0, 2, (RF, RF_IF), rd_rm),
20457 cCL("nrmdm", ef081c0, 2, (RF, RF_IF), rd_rm),
20458 cCL("nrmdz", ef081e0, 2, (RF, RF_IF), rd_rm),
20459 cCL("nrme", ef88100, 2, (RF, RF_IF), rd_rm),
20460 cCL("nrmep", ef88120, 2, (RF, RF_IF), rd_rm),
20461 cCL("nrmem", ef88140, 2, (RF, RF_IF), rd_rm),
20462 cCL("nrmez", ef88160, 2, (RF, RF_IF), rd_rm),
20463
20464 cCL("adfs", e000100, 3, (RF, RF, RF_IF), rd_rn_rm),
20465 cCL("adfsp", e000120, 3, (RF, RF, RF_IF), rd_rn_rm),
20466 cCL("adfsm", e000140, 3, (RF, RF, RF_IF), rd_rn_rm),
20467 cCL("adfsz", e000160, 3, (RF, RF, RF_IF), rd_rn_rm),
20468 cCL("adfd", e000180, 3, (RF, RF, RF_IF), rd_rn_rm),
20469 cCL("adfdp", e0001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20470 cCL("adfdm", e0001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20471 cCL("adfdz", e0001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20472 cCL("adfe", e080100, 3, (RF, RF, RF_IF), rd_rn_rm),
20473 cCL("adfep", e080120, 3, (RF, RF, RF_IF), rd_rn_rm),
20474 cCL("adfem", e080140, 3, (RF, RF, RF_IF), rd_rn_rm),
20475 cCL("adfez", e080160, 3, (RF, RF, RF_IF), rd_rn_rm),
20476
20477 cCL("sufs", e200100, 3, (RF, RF, RF_IF), rd_rn_rm),
20478 cCL("sufsp", e200120, 3, (RF, RF, RF_IF), rd_rn_rm),
20479 cCL("sufsm", e200140, 3, (RF, RF, RF_IF), rd_rn_rm),
20480 cCL("sufsz", e200160, 3, (RF, RF, RF_IF), rd_rn_rm),
20481 cCL("sufd", e200180, 3, (RF, RF, RF_IF), rd_rn_rm),
20482 cCL("sufdp", e2001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20483 cCL("sufdm", e2001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20484 cCL("sufdz", e2001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20485 cCL("sufe", e280100, 3, (RF, RF, RF_IF), rd_rn_rm),
20486 cCL("sufep", e280120, 3, (RF, RF, RF_IF), rd_rn_rm),
20487 cCL("sufem", e280140, 3, (RF, RF, RF_IF), rd_rn_rm),
20488 cCL("sufez", e280160, 3, (RF, RF, RF_IF), rd_rn_rm),
20489
20490 cCL("rsfs", e300100, 3, (RF, RF, RF_IF), rd_rn_rm),
20491 cCL("rsfsp", e300120, 3, (RF, RF, RF_IF), rd_rn_rm),
20492 cCL("rsfsm", e300140, 3, (RF, RF, RF_IF), rd_rn_rm),
20493 cCL("rsfsz", e300160, 3, (RF, RF, RF_IF), rd_rn_rm),
20494 cCL("rsfd", e300180, 3, (RF, RF, RF_IF), rd_rn_rm),
20495 cCL("rsfdp", e3001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20496 cCL("rsfdm", e3001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20497 cCL("rsfdz", e3001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20498 cCL("rsfe", e380100, 3, (RF, RF, RF_IF), rd_rn_rm),
20499 cCL("rsfep", e380120, 3, (RF, RF, RF_IF), rd_rn_rm),
20500 cCL("rsfem", e380140, 3, (RF, RF, RF_IF), rd_rn_rm),
20501 cCL("rsfez", e380160, 3, (RF, RF, RF_IF), rd_rn_rm),
20502
20503 cCL("mufs", e100100, 3, (RF, RF, RF_IF), rd_rn_rm),
20504 cCL("mufsp", e100120, 3, (RF, RF, RF_IF), rd_rn_rm),
20505 cCL("mufsm", e100140, 3, (RF, RF, RF_IF), rd_rn_rm),
20506 cCL("mufsz", e100160, 3, (RF, RF, RF_IF), rd_rn_rm),
20507 cCL("mufd", e100180, 3, (RF, RF, RF_IF), rd_rn_rm),
20508 cCL("mufdp", e1001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20509 cCL("mufdm", e1001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20510 cCL("mufdz", e1001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20511 cCL("mufe", e180100, 3, (RF, RF, RF_IF), rd_rn_rm),
20512 cCL("mufep", e180120, 3, (RF, RF, RF_IF), rd_rn_rm),
20513 cCL("mufem", e180140, 3, (RF, RF, RF_IF), rd_rn_rm),
20514 cCL("mufez", e180160, 3, (RF, RF, RF_IF), rd_rn_rm),
20515
20516 cCL("dvfs", e400100, 3, (RF, RF, RF_IF), rd_rn_rm),
20517 cCL("dvfsp", e400120, 3, (RF, RF, RF_IF), rd_rn_rm),
20518 cCL("dvfsm", e400140, 3, (RF, RF, RF_IF), rd_rn_rm),
20519 cCL("dvfsz", e400160, 3, (RF, RF, RF_IF), rd_rn_rm),
20520 cCL("dvfd", e400180, 3, (RF, RF, RF_IF), rd_rn_rm),
20521 cCL("dvfdp", e4001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20522 cCL("dvfdm", e4001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20523 cCL("dvfdz", e4001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20524 cCL("dvfe", e480100, 3, (RF, RF, RF_IF), rd_rn_rm),
20525 cCL("dvfep", e480120, 3, (RF, RF, RF_IF), rd_rn_rm),
20526 cCL("dvfem", e480140, 3, (RF, RF, RF_IF), rd_rn_rm),
20527 cCL("dvfez", e480160, 3, (RF, RF, RF_IF), rd_rn_rm),
20528
20529 cCL("rdfs", e500100, 3, (RF, RF, RF_IF), rd_rn_rm),
20530 cCL("rdfsp", e500120, 3, (RF, RF, RF_IF), rd_rn_rm),
20531 cCL("rdfsm", e500140, 3, (RF, RF, RF_IF), rd_rn_rm),
20532 cCL("rdfsz", e500160, 3, (RF, RF, RF_IF), rd_rn_rm),
20533 cCL("rdfd", e500180, 3, (RF, RF, RF_IF), rd_rn_rm),
20534 cCL("rdfdp", e5001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20535 cCL("rdfdm", e5001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20536 cCL("rdfdz", e5001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20537 cCL("rdfe", e580100, 3, (RF, RF, RF_IF), rd_rn_rm),
20538 cCL("rdfep", e580120, 3, (RF, RF, RF_IF), rd_rn_rm),
20539 cCL("rdfem", e580140, 3, (RF, RF, RF_IF), rd_rn_rm),
20540 cCL("rdfez", e580160, 3, (RF, RF, RF_IF), rd_rn_rm),
20541
20542 cCL("pows", e600100, 3, (RF, RF, RF_IF), rd_rn_rm),
20543 cCL("powsp", e600120, 3, (RF, RF, RF_IF), rd_rn_rm),
20544 cCL("powsm", e600140, 3, (RF, RF, RF_IF), rd_rn_rm),
20545 cCL("powsz", e600160, 3, (RF, RF, RF_IF), rd_rn_rm),
20546 cCL("powd", e600180, 3, (RF, RF, RF_IF), rd_rn_rm),
20547 cCL("powdp", e6001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20548 cCL("powdm", e6001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20549 cCL("powdz", e6001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20550 cCL("powe", e680100, 3, (RF, RF, RF_IF), rd_rn_rm),
20551 cCL("powep", e680120, 3, (RF, RF, RF_IF), rd_rn_rm),
20552 cCL("powem", e680140, 3, (RF, RF, RF_IF), rd_rn_rm),
20553 cCL("powez", e680160, 3, (RF, RF, RF_IF), rd_rn_rm),
20554
20555 cCL("rpws", e700100, 3, (RF, RF, RF_IF), rd_rn_rm),
20556 cCL("rpwsp", e700120, 3, (RF, RF, RF_IF), rd_rn_rm),
20557 cCL("rpwsm", e700140, 3, (RF, RF, RF_IF), rd_rn_rm),
20558 cCL("rpwsz", e700160, 3, (RF, RF, RF_IF), rd_rn_rm),
20559 cCL("rpwd", e700180, 3, (RF, RF, RF_IF), rd_rn_rm),
20560 cCL("rpwdp", e7001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20561 cCL("rpwdm", e7001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20562 cCL("rpwdz", e7001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20563 cCL("rpwe", e780100, 3, (RF, RF, RF_IF), rd_rn_rm),
20564 cCL("rpwep", e780120, 3, (RF, RF, RF_IF), rd_rn_rm),
20565 cCL("rpwem", e780140, 3, (RF, RF, RF_IF), rd_rn_rm),
20566 cCL("rpwez", e780160, 3, (RF, RF, RF_IF), rd_rn_rm),
20567
20568 cCL("rmfs", e800100, 3, (RF, RF, RF_IF), rd_rn_rm),
20569 cCL("rmfsp", e800120, 3, (RF, RF, RF_IF), rd_rn_rm),
20570 cCL("rmfsm", e800140, 3, (RF, RF, RF_IF), rd_rn_rm),
20571 cCL("rmfsz", e800160, 3, (RF, RF, RF_IF), rd_rn_rm),
20572 cCL("rmfd", e800180, 3, (RF, RF, RF_IF), rd_rn_rm),
20573 cCL("rmfdp", e8001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20574 cCL("rmfdm", e8001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20575 cCL("rmfdz", e8001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20576 cCL("rmfe", e880100, 3, (RF, RF, RF_IF), rd_rn_rm),
20577 cCL("rmfep", e880120, 3, (RF, RF, RF_IF), rd_rn_rm),
20578 cCL("rmfem", e880140, 3, (RF, RF, RF_IF), rd_rn_rm),
20579 cCL("rmfez", e880160, 3, (RF, RF, RF_IF), rd_rn_rm),
20580
20581 cCL("fmls", e900100, 3, (RF, RF, RF_IF), rd_rn_rm),
20582 cCL("fmlsp", e900120, 3, (RF, RF, RF_IF), rd_rn_rm),
20583 cCL("fmlsm", e900140, 3, (RF, RF, RF_IF), rd_rn_rm),
20584 cCL("fmlsz", e900160, 3, (RF, RF, RF_IF), rd_rn_rm),
20585 cCL("fmld", e900180, 3, (RF, RF, RF_IF), rd_rn_rm),
20586 cCL("fmldp", e9001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20587 cCL("fmldm", e9001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20588 cCL("fmldz", e9001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20589 cCL("fmle", e980100, 3, (RF, RF, RF_IF), rd_rn_rm),
20590 cCL("fmlep", e980120, 3, (RF, RF, RF_IF), rd_rn_rm),
20591 cCL("fmlem", e980140, 3, (RF, RF, RF_IF), rd_rn_rm),
20592 cCL("fmlez", e980160, 3, (RF, RF, RF_IF), rd_rn_rm),
20593
20594 cCL("fdvs", ea00100, 3, (RF, RF, RF_IF), rd_rn_rm),
20595 cCL("fdvsp", ea00120, 3, (RF, RF, RF_IF), rd_rn_rm),
20596 cCL("fdvsm", ea00140, 3, (RF, RF, RF_IF), rd_rn_rm),
20597 cCL("fdvsz", ea00160, 3, (RF, RF, RF_IF), rd_rn_rm),
20598 cCL("fdvd", ea00180, 3, (RF, RF, RF_IF), rd_rn_rm),
20599 cCL("fdvdp", ea001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20600 cCL("fdvdm", ea001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20601 cCL("fdvdz", ea001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20602 cCL("fdve", ea80100, 3, (RF, RF, RF_IF), rd_rn_rm),
20603 cCL("fdvep", ea80120, 3, (RF, RF, RF_IF), rd_rn_rm),
20604 cCL("fdvem", ea80140, 3, (RF, RF, RF_IF), rd_rn_rm),
20605 cCL("fdvez", ea80160, 3, (RF, RF, RF_IF), rd_rn_rm),
20606
20607 cCL("frds", eb00100, 3, (RF, RF, RF_IF), rd_rn_rm),
20608 cCL("frdsp", eb00120, 3, (RF, RF, RF_IF), rd_rn_rm),
20609 cCL("frdsm", eb00140, 3, (RF, RF, RF_IF), rd_rn_rm),
20610 cCL("frdsz", eb00160, 3, (RF, RF, RF_IF), rd_rn_rm),
20611 cCL("frdd", eb00180, 3, (RF, RF, RF_IF), rd_rn_rm),
20612 cCL("frddp", eb001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20613 cCL("frddm", eb001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20614 cCL("frddz", eb001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20615 cCL("frde", eb80100, 3, (RF, RF, RF_IF), rd_rn_rm),
20616 cCL("frdep", eb80120, 3, (RF, RF, RF_IF), rd_rn_rm),
20617 cCL("frdem", eb80140, 3, (RF, RF, RF_IF), rd_rn_rm),
20618 cCL("frdez", eb80160, 3, (RF, RF, RF_IF), rd_rn_rm),
20619
20620 cCL("pols", ec00100, 3, (RF, RF, RF_IF), rd_rn_rm),
20621 cCL("polsp", ec00120, 3, (RF, RF, RF_IF), rd_rn_rm),
20622 cCL("polsm", ec00140, 3, (RF, RF, RF_IF), rd_rn_rm),
20623 cCL("polsz", ec00160, 3, (RF, RF, RF_IF), rd_rn_rm),
20624 cCL("pold", ec00180, 3, (RF, RF, RF_IF), rd_rn_rm),
20625 cCL("poldp", ec001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20626 cCL("poldm", ec001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20627 cCL("poldz", ec001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20628 cCL("pole", ec80100, 3, (RF, RF, RF_IF), rd_rn_rm),
20629 cCL("polep", ec80120, 3, (RF, RF, RF_IF), rd_rn_rm),
20630 cCL("polem", ec80140, 3, (RF, RF, RF_IF), rd_rn_rm),
20631 cCL("polez", ec80160, 3, (RF, RF, RF_IF), rd_rn_rm),
20632
20633 cCE("cmf", e90f110, 2, (RF, RF_IF), fpa_cmp),
20634 C3E("cmfe", ed0f110, 2, (RF, RF_IF), fpa_cmp),
20635 cCE("cnf", eb0f110, 2, (RF, RF_IF), fpa_cmp),
20636 C3E("cnfe", ef0f110, 2, (RF, RF_IF), fpa_cmp),
20637
20638 cCL("flts", e000110, 2, (RF, RR), rn_rd),
20639 cCL("fltsp", e000130, 2, (RF, RR), rn_rd),
20640 cCL("fltsm", e000150, 2, (RF, RR), rn_rd),
20641 cCL("fltsz", e000170, 2, (RF, RR), rn_rd),
20642 cCL("fltd", e000190, 2, (RF, RR), rn_rd),
20643 cCL("fltdp", e0001b0, 2, (RF, RR), rn_rd),
20644 cCL("fltdm", e0001d0, 2, (RF, RR), rn_rd),
20645 cCL("fltdz", e0001f0, 2, (RF, RR), rn_rd),
20646 cCL("flte", e080110, 2, (RF, RR), rn_rd),
20647 cCL("fltep", e080130, 2, (RF, RR), rn_rd),
20648 cCL("fltem", e080150, 2, (RF, RR), rn_rd),
20649 cCL("fltez", e080170, 2, (RF, RR), rn_rd),
20650
20651 /* The implementation of the FIX instruction is broken on some
20652 assemblers, in that it accepts a precision specifier as well as a
20653 rounding specifier, despite the fact that this is meaningless.
20654 To be more compatible, we accept it as well, though of course it
20655 does not set any bits. */
20656 cCE("fix", e100110, 2, (RR, RF), rd_rm),
20657 cCL("fixp", e100130, 2, (RR, RF), rd_rm),
20658 cCL("fixm", e100150, 2, (RR, RF), rd_rm),
20659 cCL("fixz", e100170, 2, (RR, RF), rd_rm),
20660 cCL("fixsp", e100130, 2, (RR, RF), rd_rm),
20661 cCL("fixsm", e100150, 2, (RR, RF), rd_rm),
20662 cCL("fixsz", e100170, 2, (RR, RF), rd_rm),
20663 cCL("fixdp", e100130, 2, (RR, RF), rd_rm),
20664 cCL("fixdm", e100150, 2, (RR, RF), rd_rm),
20665 cCL("fixdz", e100170, 2, (RR, RF), rd_rm),
20666 cCL("fixep", e100130, 2, (RR, RF), rd_rm),
20667 cCL("fixem", e100150, 2, (RR, RF), rd_rm),
20668 cCL("fixez", e100170, 2, (RR, RF), rd_rm),
20669
20670 /* Instructions that were new with the real FPA, call them V2. */
20671 #undef ARM_VARIANT
20672 #define ARM_VARIANT & fpu_fpa_ext_v2
20673
20674 cCE("lfm", c100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
20675 cCL("lfmfd", c900200, 3, (RF, I4b, ADDR), fpa_ldmstm),
20676 cCL("lfmea", d100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
20677 cCE("sfm", c000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
20678 cCL("sfmfd", d000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
20679 cCL("sfmea", c800200, 3, (RF, I4b, ADDR), fpa_ldmstm),
20680
20681 #undef ARM_VARIANT
20682 #define ARM_VARIANT & fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
20683
20684 /* Moves and type conversions. */
20685 cCE("fcpys", eb00a40, 2, (RVS, RVS), vfp_sp_monadic),
20686 cCE("fmrs", e100a10, 2, (RR, RVS), vfp_reg_from_sp),
20687 cCE("fmsr", e000a10, 2, (RVS, RR), vfp_sp_from_reg),
20688 cCE("fmstat", ef1fa10, 0, (), noargs),
20689 cCE("vmrs", ef00a10, 2, (APSR_RR, RVC), vmrs),
20690 cCE("vmsr", ee00a10, 2, (RVC, RR), vmsr),
20691 cCE("fsitos", eb80ac0, 2, (RVS, RVS), vfp_sp_monadic),
20692 cCE("fuitos", eb80a40, 2, (RVS, RVS), vfp_sp_monadic),
20693 cCE("ftosis", ebd0a40, 2, (RVS, RVS), vfp_sp_monadic),
20694 cCE("ftosizs", ebd0ac0, 2, (RVS, RVS), vfp_sp_monadic),
20695 cCE("ftouis", ebc0a40, 2, (RVS, RVS), vfp_sp_monadic),
20696 cCE("ftouizs", ebc0ac0, 2, (RVS, RVS), vfp_sp_monadic),
20697 cCE("fmrx", ef00a10, 2, (RR, RVC), rd_rn),
20698 cCE("fmxr", ee00a10, 2, (RVC, RR), rn_rd),
20699
20700 /* Memory operations. */
20701 cCE("flds", d100a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
20702 cCE("fsts", d000a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
20703 cCE("fldmias", c900a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
20704 cCE("fldmfds", c900a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
20705 cCE("fldmdbs", d300a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
20706 cCE("fldmeas", d300a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
20707 cCE("fldmiax", c900b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
20708 cCE("fldmfdx", c900b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
20709 cCE("fldmdbx", d300b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
20710 cCE("fldmeax", d300b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
20711 cCE("fstmias", c800a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
20712 cCE("fstmeas", c800a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
20713 cCE("fstmdbs", d200a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
20714 cCE("fstmfds", d200a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
20715 cCE("fstmiax", c800b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
20716 cCE("fstmeax", c800b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
20717 cCE("fstmdbx", d200b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
20718 cCE("fstmfdx", d200b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
20719
20720 /* Monadic operations. */
20721 cCE("fabss", eb00ac0, 2, (RVS, RVS), vfp_sp_monadic),
20722 cCE("fnegs", eb10a40, 2, (RVS, RVS), vfp_sp_monadic),
20723 cCE("fsqrts", eb10ac0, 2, (RVS, RVS), vfp_sp_monadic),
20724
20725 /* Dyadic operations. */
20726 cCE("fadds", e300a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20727 cCE("fsubs", e300a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20728 cCE("fmuls", e200a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20729 cCE("fdivs", e800a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20730 cCE("fmacs", e000a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20731 cCE("fmscs", e100a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20732 cCE("fnmuls", e200a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20733 cCE("fnmacs", e000a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20734 cCE("fnmscs", e100a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20735
20736 /* Comparisons. */
20737 cCE("fcmps", eb40a40, 2, (RVS, RVS), vfp_sp_monadic),
20738 cCE("fcmpzs", eb50a40, 1, (RVS), vfp_sp_compare_z),
20739 cCE("fcmpes", eb40ac0, 2, (RVS, RVS), vfp_sp_monadic),
20740 cCE("fcmpezs", eb50ac0, 1, (RVS), vfp_sp_compare_z),
20741
20742 /* Double precision load/store are still present on single precision
20743 implementations. */
20744 cCE("fldd", d100b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
20745 cCE("fstd", d000b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
20746 cCE("fldmiad", c900b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
20747 cCE("fldmfdd", c900b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
20748 cCE("fldmdbd", d300b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
20749 cCE("fldmead", d300b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
20750 cCE("fstmiad", c800b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
20751 cCE("fstmead", c800b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
20752 cCE("fstmdbd", d200b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
20753 cCE("fstmfdd", d200b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
20754
20755 #undef ARM_VARIANT
20756 #define ARM_VARIANT & fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
20757
20758 /* Moves and type conversions. */
20759 cCE("fcpyd", eb00b40, 2, (RVD, RVD), vfp_dp_rd_rm),
20760 cCE("fcvtds", eb70ac0, 2, (RVD, RVS), vfp_dp_sp_cvt),
20761 cCE("fcvtsd", eb70bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
20762 cCE("fmdhr", e200b10, 2, (RVD, RR), vfp_dp_rn_rd),
20763 cCE("fmdlr", e000b10, 2, (RVD, RR), vfp_dp_rn_rd),
20764 cCE("fmrdh", e300b10, 2, (RR, RVD), vfp_dp_rd_rn),
20765 cCE("fmrdl", e100b10, 2, (RR, RVD), vfp_dp_rd_rn),
20766 cCE("fsitod", eb80bc0, 2, (RVD, RVS), vfp_dp_sp_cvt),
20767 cCE("fuitod", eb80b40, 2, (RVD, RVS), vfp_dp_sp_cvt),
20768 cCE("ftosid", ebd0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
20769 cCE("ftosizd", ebd0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
20770 cCE("ftouid", ebc0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
20771 cCE("ftouizd", ebc0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
20772
20773 /* Monadic operations. */
20774 cCE("fabsd", eb00bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
20775 cCE("fnegd", eb10b40, 2, (RVD, RVD), vfp_dp_rd_rm),
20776 cCE("fsqrtd", eb10bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
20777
20778 /* Dyadic operations. */
20779 cCE("faddd", e300b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20780 cCE("fsubd", e300b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20781 cCE("fmuld", e200b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20782 cCE("fdivd", e800b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20783 cCE("fmacd", e000b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20784 cCE("fmscd", e100b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20785 cCE("fnmuld", e200b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20786 cCE("fnmacd", e000b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20787 cCE("fnmscd", e100b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20788
20789 /* Comparisons. */
20790 cCE("fcmpd", eb40b40, 2, (RVD, RVD), vfp_dp_rd_rm),
20791 cCE("fcmpzd", eb50b40, 1, (RVD), vfp_dp_rd),
20792 cCE("fcmped", eb40bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
20793 cCE("fcmpezd", eb50bc0, 1, (RVD), vfp_dp_rd),
20794
20795 #undef ARM_VARIANT
20796 #define ARM_VARIANT & fpu_vfp_ext_v2
20797
20798 cCE("fmsrr", c400a10, 3, (VRSLST, RR, RR), vfp_sp2_from_reg2),
20799 cCE("fmrrs", c500a10, 3, (RR, RR, VRSLST), vfp_reg2_from_sp2),
20800 cCE("fmdrr", c400b10, 3, (RVD, RR, RR), vfp_dp_rm_rd_rn),
20801 cCE("fmrrd", c500b10, 3, (RR, RR, RVD), vfp_dp_rd_rn_rm),
20802
20803 /* Instructions which may belong to either the Neon or VFP instruction sets.
20804 Individual encoder functions perform additional architecture checks. */
20805 #undef ARM_VARIANT
20806 #define ARM_VARIANT & fpu_vfp_ext_v1xd
20807 #undef THUMB_VARIANT
20808 #define THUMB_VARIANT & fpu_vfp_ext_v1xd
20809
20810 /* These mnemonics are unique to VFP. */
20811 NCE(vsqrt, 0, 2, (RVSD, RVSD), vfp_nsyn_sqrt),
20812 NCE(vdiv, 0, 3, (RVSD, RVSD, RVSD), vfp_nsyn_div),
20813 nCE(vnmul, _vnmul, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
20814 nCE(vnmla, _vnmla, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
20815 nCE(vnmls, _vnmls, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
20816 nCE(vcmp, _vcmp, 2, (RVSD, RSVD_FI0), vfp_nsyn_cmp),
20817 nCE(vcmpe, _vcmpe, 2, (RVSD, RSVD_FI0), vfp_nsyn_cmp),
20818 NCE(vpush, 0, 1, (VRSDLST), vfp_nsyn_push),
20819 NCE(vpop, 0, 1, (VRSDLST), vfp_nsyn_pop),
20820 NCE(vcvtz, 0, 2, (RVSD, RVSD), vfp_nsyn_cvtz),
20821
20822 /* Mnemonics shared by Neon and VFP. */
20823 nCEF(vmul, _vmul, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mul),
20824 nCEF(vmla, _vmla, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
20825 nCEF(vmls, _vmls, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
20826
20827 nCEF(vadd, _vadd, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
20828 nCEF(vsub, _vsub, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
20829
20830 NCEF(vabs, 1b10300, 2, (RNSDQ, RNSDQ), neon_abs_neg),
20831 NCEF(vneg, 1b10380, 2, (RNSDQ, RNSDQ), neon_abs_neg),
20832
20833 NCE(vldm, c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
20834 NCE(vldmia, c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
20835 NCE(vldmdb, d100b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
20836 NCE(vstm, c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
20837 NCE(vstmia, c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
20838 NCE(vstmdb, d000b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
20839 NCE(vldr, d100b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
20840 NCE(vstr, d000b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
20841
20842 nCEF(vcvt, _vcvt, 3, (RNSDQ, RNSDQ, oI32z), neon_cvt),
20843 nCEF(vcvtr, _vcvt, 2, (RNSDQ, RNSDQ), neon_cvtr),
20844 NCEF(vcvtb, eb20a40, 2, (RVSD, RVSD), neon_cvtb),
20845 NCEF(vcvtt, eb20a40, 2, (RVSD, RVSD), neon_cvtt),
20846
20847
20848 /* NOTE: All VMOV encoding is special-cased! */
20849 NCE(vmov, 0, 1, (VMOV), neon_mov),
20850 NCE(vmovq, 0, 1, (VMOV), neon_mov),
20851
20852 #undef ARM_VARIANT
20853 #define ARM_VARIANT & arm_ext_fp16
20854 #undef THUMB_VARIANT
20855 #define THUMB_VARIANT & arm_ext_fp16
20856 /* New instructions added from v8.2, allowing the extraction and insertion of
20857 the upper 16 bits of a 32-bit vector register. */
20858 NCE (vmovx, eb00a40, 2, (RVS, RVS), neon_movhf),
20859 NCE (vins, eb00ac0, 2, (RVS, RVS), neon_movhf),
20860
20861 /* New backported fma/fms instructions optional in v8.2. */
20862 NCE (vfmal, 810, 3, (RNDQ, RNSD, RNSD_RNSC), neon_vfmal),
20863 NCE (vfmsl, 810, 3, (RNDQ, RNSD, RNSD_RNSC), neon_vfmsl),
20864
20865 #undef THUMB_VARIANT
20866 #define THUMB_VARIANT & fpu_neon_ext_v1
20867 #undef ARM_VARIANT
20868 #define ARM_VARIANT & fpu_neon_ext_v1
20869
20870 /* Data processing with three registers of the same length. */
20871 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
20872 NUF(vaba, 0000710, 3, (RNDQ, RNDQ, RNDQ), neon_dyadic_i_su),
20873 NUF(vabaq, 0000710, 3, (RNQ, RNQ, RNQ), neon_dyadic_i_su),
20874 NUF(vhadd, 0000000, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
20875 NUF(vhaddq, 0000000, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
20876 NUF(vrhadd, 0000100, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
20877 NUF(vrhaddq, 0000100, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
20878 NUF(vhsub, 0000200, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
20879 NUF(vhsubq, 0000200, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
20880 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
20881 NUF(vqadd, 0000010, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
20882 NUF(vqaddq, 0000010, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
20883 NUF(vqsub, 0000210, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
20884 NUF(vqsubq, 0000210, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
20885 NUF(vrshl, 0000500, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
20886 NUF(vrshlq, 0000500, 3, (RNQ, oRNQ, RNQ), neon_rshl),
20887 NUF(vqrshl, 0000510, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
20888 NUF(vqrshlq, 0000510, 3, (RNQ, oRNQ, RNQ), neon_rshl),
20889 /* If not immediate, fall back to neon_dyadic_i64_su.
20890 shl_imm should accept I8 I16 I32 I64,
20891 qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64. */
20892 nUF(vshl, _vshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_shl_imm),
20893 nUF(vshlq, _vshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_shl_imm),
20894 nUF(vqshl, _vqshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_qshl_imm),
20895 nUF(vqshlq, _vqshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_qshl_imm),
20896 /* Logic ops, types optional & ignored. */
20897 nUF(vand, _vand, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
20898 nUF(vandq, _vand, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
20899 nUF(vbic, _vbic, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
20900 nUF(vbicq, _vbic, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
20901 nUF(vorr, _vorr, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
20902 nUF(vorrq, _vorr, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
20903 nUF(vorn, _vorn, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
20904 nUF(vornq, _vorn, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
20905 nUF(veor, _veor, 3, (RNDQ, oRNDQ, RNDQ), neon_logic),
20906 nUF(veorq, _veor, 3, (RNQ, oRNQ, RNQ), neon_logic),
20907 /* Bitfield ops, untyped. */
20908 NUF(vbsl, 1100110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
20909 NUF(vbslq, 1100110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
20910 NUF(vbit, 1200110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
20911 NUF(vbitq, 1200110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
20912 NUF(vbif, 1300110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
20913 NUF(vbifq, 1300110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
20914 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F16 F32. */
20915 nUF(vabd, _vabd, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
20916 nUF(vabdq, _vabd, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
20917 nUF(vmax, _vmax, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
20918 nUF(vmaxq, _vmax, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
20919 nUF(vmin, _vmin, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
20920 nUF(vminq, _vmin, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
20921 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
20922 back to neon_dyadic_if_su. */
20923 nUF(vcge, _vcge, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
20924 nUF(vcgeq, _vcge, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
20925 nUF(vcgt, _vcgt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
20926 nUF(vcgtq, _vcgt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
20927 nUF(vclt, _vclt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
20928 nUF(vcltq, _vclt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
20929 nUF(vcle, _vcle, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
20930 nUF(vcleq, _vcle, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
20931 /* Comparison. Type I8 I16 I32 F32. */
20932 nUF(vceq, _vceq, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_ceq),
20933 nUF(vceqq, _vceq, 3, (RNQ, oRNQ, RNDQ_I0), neon_ceq),
20934 /* As above, D registers only. */
20935 nUF(vpmax, _vpmax, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
20936 nUF(vpmin, _vpmin, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
20937 /* Int and float variants, signedness unimportant. */
20938 nUF(vmlaq, _vmla, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
20939 nUF(vmlsq, _vmls, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
20940 nUF(vpadd, _vpadd, 3, (RND, oRND, RND), neon_dyadic_if_i_d),
20941 /* Add/sub take types I8 I16 I32 I64 F32. */
20942 nUF(vaddq, _vadd, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
20943 nUF(vsubq, _vsub, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
20944 /* vtst takes sizes 8, 16, 32. */
20945 NUF(vtst, 0000810, 3, (RNDQ, oRNDQ, RNDQ), neon_tst),
20946 NUF(vtstq, 0000810, 3, (RNQ, oRNQ, RNQ), neon_tst),
20947 /* VMUL takes I8 I16 I32 F32 P8. */
20948 nUF(vmulq, _vmul, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mul),
20949 /* VQD{R}MULH takes S16 S32. */
20950 nUF(vqdmulh, _vqdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
20951 nUF(vqdmulhq, _vqdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
20952 nUF(vqrdmulh, _vqrdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
20953 nUF(vqrdmulhq, _vqrdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
20954 NUF(vacge, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
20955 NUF(vacgeq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
20956 NUF(vacgt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
20957 NUF(vacgtq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
20958 NUF(vaclt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
20959 NUF(vacltq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
20960 NUF(vacle, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
20961 NUF(vacleq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
20962 NUF(vrecps, 0000f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
20963 NUF(vrecpsq, 0000f10, 3, (RNQ, oRNQ, RNQ), neon_step),
20964 NUF(vrsqrts, 0200f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
20965 NUF(vrsqrtsq, 0200f10, 3, (RNQ, oRNQ, RNQ), neon_step),
20966 /* ARM v8.1 extension. */
20967 nUF (vqrdmlah, _vqrdmlah, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qrdmlah),
20968 nUF (vqrdmlahq, _vqrdmlah, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qrdmlah),
20969 nUF (vqrdmlsh, _vqrdmlsh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qrdmlah),
20970 nUF (vqrdmlshq, _vqrdmlsh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qrdmlah),
20971
20972 /* Two address, int/float. Types S8 S16 S32 F32. */
20973 NUF(vabsq, 1b10300, 2, (RNQ, RNQ), neon_abs_neg),
20974 NUF(vnegq, 1b10380, 2, (RNQ, RNQ), neon_abs_neg),
20975
20976 /* Data processing with two registers and a shift amount. */
20977 /* Right shifts, and variants with rounding.
20978 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
20979 NUF(vshr, 0800010, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
20980 NUF(vshrq, 0800010, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
20981 NUF(vrshr, 0800210, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
20982 NUF(vrshrq, 0800210, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
20983 NUF(vsra, 0800110, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
20984 NUF(vsraq, 0800110, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
20985 NUF(vrsra, 0800310, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
20986 NUF(vrsraq, 0800310, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
20987 /* Shift and insert. Sizes accepted 8 16 32 64. */
20988 NUF(vsli, 1800510, 3, (RNDQ, oRNDQ, I63), neon_sli),
20989 NUF(vsliq, 1800510, 3, (RNQ, oRNQ, I63), neon_sli),
20990 NUF(vsri, 1800410, 3, (RNDQ, oRNDQ, I64), neon_sri),
20991 NUF(vsriq, 1800410, 3, (RNQ, oRNQ, I64), neon_sri),
20992 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
20993 NUF(vqshlu, 1800610, 3, (RNDQ, oRNDQ, I63), neon_qshlu_imm),
20994 NUF(vqshluq, 1800610, 3, (RNQ, oRNQ, I63), neon_qshlu_imm),
20995 /* Right shift immediate, saturating & narrowing, with rounding variants.
20996 Types accepted S16 S32 S64 U16 U32 U64. */
20997 NUF(vqshrn, 0800910, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
20998 NUF(vqrshrn, 0800950, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
20999 /* As above, unsigned. Types accepted S16 S32 S64. */
21000 NUF(vqshrun, 0800810, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
21001 NUF(vqrshrun, 0800850, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
21002 /* Right shift narrowing. Types accepted I16 I32 I64. */
21003 NUF(vshrn, 0800810, 3, (RND, RNQ, I32z), neon_rshift_narrow),
21004 NUF(vrshrn, 0800850, 3, (RND, RNQ, I32z), neon_rshift_narrow),
21005 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
21006 nUF(vshll, _vshll, 3, (RNQ, RND, I32), neon_shll),
21007 /* CVT with optional immediate for fixed-point variant. */
21008 nUF(vcvtq, _vcvt, 3, (RNQ, RNQ, oI32b), neon_cvt),
21009
21010 nUF(vmvn, _vmvn, 2, (RNDQ, RNDQ_Ibig), neon_mvn),
21011 nUF(vmvnq, _vmvn, 2, (RNQ, RNDQ_Ibig), neon_mvn),
21012
21013 /* Data processing, three registers of different lengths. */
21014 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
21015 NUF(vabal, 0800500, 3, (RNQ, RND, RND), neon_abal),
21016 NUF(vabdl, 0800700, 3, (RNQ, RND, RND), neon_dyadic_long),
21017 NUF(vaddl, 0800000, 3, (RNQ, RND, RND), neon_dyadic_long),
21018 NUF(vsubl, 0800200, 3, (RNQ, RND, RND), neon_dyadic_long),
21019 /* If not scalar, fall back to neon_dyadic_long.
21020 Vector types as above, scalar types S16 S32 U16 U32. */
21021 nUF(vmlal, _vmlal, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
21022 nUF(vmlsl, _vmlsl, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
21023 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
21024 NUF(vaddw, 0800100, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
21025 NUF(vsubw, 0800300, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
21026 /* Dyadic, narrowing insns. Types I16 I32 I64. */
21027 NUF(vaddhn, 0800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
21028 NUF(vraddhn, 1800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
21029 NUF(vsubhn, 0800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
21030 NUF(vrsubhn, 1800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
21031 /* Saturating doubling multiplies. Types S16 S32. */
21032 nUF(vqdmlal, _vqdmlal, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
21033 nUF(vqdmlsl, _vqdmlsl, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
21034 nUF(vqdmull, _vqdmull, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
21035 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
21036 S16 S32 U16 U32. */
21037 nUF(vmull, _vmull, 3, (RNQ, RND, RND_RNSC), neon_vmull),
21038
21039 /* Extract. Size 8. */
21040 NUF(vext, 0b00000, 4, (RNDQ, oRNDQ, RNDQ, I15), neon_ext),
21041 NUF(vextq, 0b00000, 4, (RNQ, oRNQ, RNQ, I15), neon_ext),
21042
21043 /* Two registers, miscellaneous. */
21044 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
21045 NUF(vrev64, 1b00000, 2, (RNDQ, RNDQ), neon_rev),
21046 NUF(vrev64q, 1b00000, 2, (RNQ, RNQ), neon_rev),
21047 NUF(vrev32, 1b00080, 2, (RNDQ, RNDQ), neon_rev),
21048 NUF(vrev32q, 1b00080, 2, (RNQ, RNQ), neon_rev),
21049 NUF(vrev16, 1b00100, 2, (RNDQ, RNDQ), neon_rev),
21050 NUF(vrev16q, 1b00100, 2, (RNQ, RNQ), neon_rev),
21051 /* Vector replicate. Sizes 8 16 32. */
21052 nCE(vdup, _vdup, 2, (RNDQ, RR_RNSC), neon_dup),
21053 nCE(vdupq, _vdup, 2, (RNQ, RR_RNSC), neon_dup),
21054 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
21055 NUF(vmovl, 0800a10, 2, (RNQ, RND), neon_movl),
21056 /* VMOVN. Types I16 I32 I64. */
21057 nUF(vmovn, _vmovn, 2, (RND, RNQ), neon_movn),
21058 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
21059 nUF(vqmovn, _vqmovn, 2, (RND, RNQ), neon_qmovn),
21060 /* VQMOVUN. Types S16 S32 S64. */
21061 nUF(vqmovun, _vqmovun, 2, (RND, RNQ), neon_qmovun),
21062 /* VZIP / VUZP. Sizes 8 16 32. */
21063 NUF(vzip, 1b20180, 2, (RNDQ, RNDQ), neon_zip_uzp),
21064 NUF(vzipq, 1b20180, 2, (RNQ, RNQ), neon_zip_uzp),
21065 NUF(vuzp, 1b20100, 2, (RNDQ, RNDQ), neon_zip_uzp),
21066 NUF(vuzpq, 1b20100, 2, (RNQ, RNQ), neon_zip_uzp),
21067 /* VQABS / VQNEG. Types S8 S16 S32. */
21068 NUF(vqabs, 1b00700, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
21069 NUF(vqabsq, 1b00700, 2, (RNQ, RNQ), neon_sat_abs_neg),
21070 NUF(vqneg, 1b00780, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
21071 NUF(vqnegq, 1b00780, 2, (RNQ, RNQ), neon_sat_abs_neg),
21072 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
21073 NUF(vpadal, 1b00600, 2, (RNDQ, RNDQ), neon_pair_long),
21074 NUF(vpadalq, 1b00600, 2, (RNQ, RNQ), neon_pair_long),
21075 NUF(vpaddl, 1b00200, 2, (RNDQ, RNDQ), neon_pair_long),
21076 NUF(vpaddlq, 1b00200, 2, (RNQ, RNQ), neon_pair_long),
21077 /* Reciprocal estimates. Types U32 F16 F32. */
21078 NUF(vrecpe, 1b30400, 2, (RNDQ, RNDQ), neon_recip_est),
21079 NUF(vrecpeq, 1b30400, 2, (RNQ, RNQ), neon_recip_est),
21080 NUF(vrsqrte, 1b30480, 2, (RNDQ, RNDQ), neon_recip_est),
21081 NUF(vrsqrteq, 1b30480, 2, (RNQ, RNQ), neon_recip_est),
21082 /* VCLS. Types S8 S16 S32. */
21083 NUF(vcls, 1b00400, 2, (RNDQ, RNDQ), neon_cls),
21084 NUF(vclsq, 1b00400, 2, (RNQ, RNQ), neon_cls),
21085 /* VCLZ. Types I8 I16 I32. */
21086 NUF(vclz, 1b00480, 2, (RNDQ, RNDQ), neon_clz),
21087 NUF(vclzq, 1b00480, 2, (RNQ, RNQ), neon_clz),
21088 /* VCNT. Size 8. */
21089 NUF(vcnt, 1b00500, 2, (RNDQ, RNDQ), neon_cnt),
21090 NUF(vcntq, 1b00500, 2, (RNQ, RNQ), neon_cnt),
21091 /* Two address, untyped. */
21092 NUF(vswp, 1b20000, 2, (RNDQ, RNDQ), neon_swp),
21093 NUF(vswpq, 1b20000, 2, (RNQ, RNQ), neon_swp),
21094 /* VTRN. Sizes 8 16 32. */
21095 nUF(vtrn, _vtrn, 2, (RNDQ, RNDQ), neon_trn),
21096 nUF(vtrnq, _vtrn, 2, (RNQ, RNQ), neon_trn),
21097
21098 /* Table lookup. Size 8. */
21099 NUF(vtbl, 1b00800, 3, (RND, NRDLST, RND), neon_tbl_tbx),
21100 NUF(vtbx, 1b00840, 3, (RND, NRDLST, RND), neon_tbl_tbx),
21101
21102 #undef THUMB_VARIANT
21103 #define THUMB_VARIANT & fpu_vfp_v3_or_neon_ext
21104 #undef ARM_VARIANT
21105 #define ARM_VARIANT & fpu_vfp_v3_or_neon_ext
21106
21107 /* Neon element/structure load/store. */
21108 nUF(vld1, _vld1, 2, (NSTRLST, ADDR), neon_ldx_stx),
21109 nUF(vst1, _vst1, 2, (NSTRLST, ADDR), neon_ldx_stx),
21110 nUF(vld2, _vld2, 2, (NSTRLST, ADDR), neon_ldx_stx),
21111 nUF(vst2, _vst2, 2, (NSTRLST, ADDR), neon_ldx_stx),
21112 nUF(vld3, _vld3, 2, (NSTRLST, ADDR), neon_ldx_stx),
21113 nUF(vst3, _vst3, 2, (NSTRLST, ADDR), neon_ldx_stx),
21114 nUF(vld4, _vld4, 2, (NSTRLST, ADDR), neon_ldx_stx),
21115 nUF(vst4, _vst4, 2, (NSTRLST, ADDR), neon_ldx_stx),
21116
21117 #undef THUMB_VARIANT
21118 #define THUMB_VARIANT & fpu_vfp_ext_v3xd
21119 #undef ARM_VARIANT
21120 #define ARM_VARIANT & fpu_vfp_ext_v3xd
21121 cCE("fconsts", eb00a00, 2, (RVS, I255), vfp_sp_const),
21122 cCE("fshtos", eba0a40, 2, (RVS, I16z), vfp_sp_conv_16),
21123 cCE("fsltos", eba0ac0, 2, (RVS, I32), vfp_sp_conv_32),
21124 cCE("fuhtos", ebb0a40, 2, (RVS, I16z), vfp_sp_conv_16),
21125 cCE("fultos", ebb0ac0, 2, (RVS, I32), vfp_sp_conv_32),
21126 cCE("ftoshs", ebe0a40, 2, (RVS, I16z), vfp_sp_conv_16),
21127 cCE("ftosls", ebe0ac0, 2, (RVS, I32), vfp_sp_conv_32),
21128 cCE("ftouhs", ebf0a40, 2, (RVS, I16z), vfp_sp_conv_16),
21129 cCE("ftouls", ebf0ac0, 2, (RVS, I32), vfp_sp_conv_32),
21130
21131 #undef THUMB_VARIANT
21132 #define THUMB_VARIANT & fpu_vfp_ext_v3
21133 #undef ARM_VARIANT
21134 #define ARM_VARIANT & fpu_vfp_ext_v3
21135
21136 cCE("fconstd", eb00b00, 2, (RVD, I255), vfp_dp_const),
21137 cCE("fshtod", eba0b40, 2, (RVD, I16z), vfp_dp_conv_16),
21138 cCE("fsltod", eba0bc0, 2, (RVD, I32), vfp_dp_conv_32),
21139 cCE("fuhtod", ebb0b40, 2, (RVD, I16z), vfp_dp_conv_16),
21140 cCE("fultod", ebb0bc0, 2, (RVD, I32), vfp_dp_conv_32),
21141 cCE("ftoshd", ebe0b40, 2, (RVD, I16z), vfp_dp_conv_16),
21142 cCE("ftosld", ebe0bc0, 2, (RVD, I32), vfp_dp_conv_32),
21143 cCE("ftouhd", ebf0b40, 2, (RVD, I16z), vfp_dp_conv_16),
21144 cCE("ftould", ebf0bc0, 2, (RVD, I32), vfp_dp_conv_32),
21145
21146 #undef ARM_VARIANT
21147 #define ARM_VARIANT & fpu_vfp_ext_fma
21148 #undef THUMB_VARIANT
21149 #define THUMB_VARIANT & fpu_vfp_ext_fma
21150 /* Mnemonics shared by Neon and VFP. These are included in the
21151 VFP FMA variant; NEON and VFP FMA always includes the NEON
21152 FMA instructions. */
21153 nCEF(vfma, _vfma, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_fmac),
21154 nCEF(vfms, _vfms, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_fmac),
21155 /* ffmas/ffmad/ffmss/ffmsd are dummy mnemonics to satisfy gas;
21156 the v form should always be used. */
21157 cCE("ffmas", ea00a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
21158 cCE("ffnmas", ea00a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
21159 cCE("ffmad", ea00b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
21160 cCE("ffnmad", ea00b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
21161 nCE(vfnma, _vfnma, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
21162 nCE(vfnms, _vfnms, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
21163
21164 #undef THUMB_VARIANT
21165 #undef ARM_VARIANT
21166 #define ARM_VARIANT & arm_cext_xscale /* Intel XScale extensions. */
21167
21168 cCE("mia", e200010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
21169 cCE("miaph", e280010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
21170 cCE("miabb", e2c0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
21171 cCE("miabt", e2d0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
21172 cCE("miatb", e2e0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
21173 cCE("miatt", e2f0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
21174 cCE("mar", c400000, 3, (RXA, RRnpc, RRnpc), xsc_mar),
21175 cCE("mra", c500000, 3, (RRnpc, RRnpc, RXA), xsc_mra),
21176
21177 #undef ARM_VARIANT
21178 #define ARM_VARIANT & arm_cext_iwmmxt /* Intel Wireless MMX technology. */
21179
21180 cCE("tandcb", e13f130, 1, (RR), iwmmxt_tandorc),
21181 cCE("tandch", e53f130, 1, (RR), iwmmxt_tandorc),
21182 cCE("tandcw", e93f130, 1, (RR), iwmmxt_tandorc),
21183 cCE("tbcstb", e400010, 2, (RIWR, RR), rn_rd),
21184 cCE("tbcsth", e400050, 2, (RIWR, RR), rn_rd),
21185 cCE("tbcstw", e400090, 2, (RIWR, RR), rn_rd),
21186 cCE("textrcb", e130170, 2, (RR, I7), iwmmxt_textrc),
21187 cCE("textrch", e530170, 2, (RR, I7), iwmmxt_textrc),
21188 cCE("textrcw", e930170, 2, (RR, I7), iwmmxt_textrc),
21189 cCE("textrmub",e100070, 3, (RR, RIWR, I7), iwmmxt_textrm),
21190 cCE("textrmuh",e500070, 3, (RR, RIWR, I7), iwmmxt_textrm),
21191 cCE("textrmuw",e900070, 3, (RR, RIWR, I7), iwmmxt_textrm),
21192 cCE("textrmsb",e100078, 3, (RR, RIWR, I7), iwmmxt_textrm),
21193 cCE("textrmsh",e500078, 3, (RR, RIWR, I7), iwmmxt_textrm),
21194 cCE("textrmsw",e900078, 3, (RR, RIWR, I7), iwmmxt_textrm),
21195 cCE("tinsrb", e600010, 3, (RIWR, RR, I7), iwmmxt_tinsr),
21196 cCE("tinsrh", e600050, 3, (RIWR, RR, I7), iwmmxt_tinsr),
21197 cCE("tinsrw", e600090, 3, (RIWR, RR, I7), iwmmxt_tinsr),
21198 cCE("tmcr", e000110, 2, (RIWC_RIWG, RR), rn_rd),
21199 cCE("tmcrr", c400000, 3, (RIWR, RR, RR), rm_rd_rn),
21200 cCE("tmia", e200010, 3, (RIWR, RR, RR), iwmmxt_tmia),
21201 cCE("tmiaph", e280010, 3, (RIWR, RR, RR), iwmmxt_tmia),
21202 cCE("tmiabb", e2c0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
21203 cCE("tmiabt", e2d0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
21204 cCE("tmiatb", e2e0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
21205 cCE("tmiatt", e2f0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
21206 cCE("tmovmskb",e100030, 2, (RR, RIWR), rd_rn),
21207 cCE("tmovmskh",e500030, 2, (RR, RIWR), rd_rn),
21208 cCE("tmovmskw",e900030, 2, (RR, RIWR), rd_rn),
21209 cCE("tmrc", e100110, 2, (RR, RIWC_RIWG), rd_rn),
21210 cCE("tmrrc", c500000, 3, (RR, RR, RIWR), rd_rn_rm),
21211 cCE("torcb", e13f150, 1, (RR), iwmmxt_tandorc),
21212 cCE("torch", e53f150, 1, (RR), iwmmxt_tandorc),
21213 cCE("torcw", e93f150, 1, (RR), iwmmxt_tandorc),
21214 cCE("waccb", e0001c0, 2, (RIWR, RIWR), rd_rn),
21215 cCE("wacch", e4001c0, 2, (RIWR, RIWR), rd_rn),
21216 cCE("waccw", e8001c0, 2, (RIWR, RIWR), rd_rn),
21217 cCE("waddbss", e300180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21218 cCE("waddb", e000180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21219 cCE("waddbus", e100180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21220 cCE("waddhss", e700180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21221 cCE("waddh", e400180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21222 cCE("waddhus", e500180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21223 cCE("waddwss", eb00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21224 cCE("waddw", e800180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21225 cCE("waddwus", e900180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21226 cCE("waligni", e000020, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_waligni),
21227 cCE("walignr0",e800020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21228 cCE("walignr1",e900020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21229 cCE("walignr2",ea00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21230 cCE("walignr3",eb00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21231 cCE("wand", e200000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21232 cCE("wandn", e300000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21233 cCE("wavg2b", e800000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21234 cCE("wavg2br", e900000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21235 cCE("wavg2h", ec00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21236 cCE("wavg2hr", ed00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21237 cCE("wcmpeqb", e000060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21238 cCE("wcmpeqh", e400060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21239 cCE("wcmpeqw", e800060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21240 cCE("wcmpgtub",e100060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21241 cCE("wcmpgtuh",e500060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21242 cCE("wcmpgtuw",e900060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21243 cCE("wcmpgtsb",e300060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21244 cCE("wcmpgtsh",e700060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21245 cCE("wcmpgtsw",eb00060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21246 cCE("wldrb", c100000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
21247 cCE("wldrh", c500000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
21248 cCE("wldrw", c100100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
21249 cCE("wldrd", c500100, 2, (RIWR, ADDR), iwmmxt_wldstd),
21250 cCE("wmacs", e600100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21251 cCE("wmacsz", e700100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21252 cCE("wmacu", e400100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21253 cCE("wmacuz", e500100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21254 cCE("wmadds", ea00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21255 cCE("wmaddu", e800100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21256 cCE("wmaxsb", e200160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21257 cCE("wmaxsh", e600160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21258 cCE("wmaxsw", ea00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21259 cCE("wmaxub", e000160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21260 cCE("wmaxuh", e400160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21261 cCE("wmaxuw", e800160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21262 cCE("wminsb", e300160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21263 cCE("wminsh", e700160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21264 cCE("wminsw", eb00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21265 cCE("wminub", e100160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21266 cCE("wminuh", e500160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21267 cCE("wminuw", e900160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21268 cCE("wmov", e000000, 2, (RIWR, RIWR), iwmmxt_wmov),
21269 cCE("wmulsm", e300100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21270 cCE("wmulsl", e200100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21271 cCE("wmulum", e100100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21272 cCE("wmulul", e000100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21273 cCE("wor", e000000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21274 cCE("wpackhss",e700080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21275 cCE("wpackhus",e500080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21276 cCE("wpackwss",eb00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21277 cCE("wpackwus",e900080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21278 cCE("wpackdss",ef00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21279 cCE("wpackdus",ed00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21280 cCE("wrorh", e700040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21281 cCE("wrorhg", e700148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21282 cCE("wrorw", eb00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21283 cCE("wrorwg", eb00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21284 cCE("wrord", ef00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21285 cCE("wrordg", ef00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21286 cCE("wsadb", e000120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21287 cCE("wsadbz", e100120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21288 cCE("wsadh", e400120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21289 cCE("wsadhz", e500120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21290 cCE("wshufh", e0001e0, 3, (RIWR, RIWR, I255), iwmmxt_wshufh),
21291 cCE("wsllh", e500040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21292 cCE("wsllhg", e500148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21293 cCE("wsllw", e900040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21294 cCE("wsllwg", e900148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21295 cCE("wslld", ed00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21296 cCE("wslldg", ed00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21297 cCE("wsrah", e400040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21298 cCE("wsrahg", e400148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21299 cCE("wsraw", e800040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21300 cCE("wsrawg", e800148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21301 cCE("wsrad", ec00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21302 cCE("wsradg", ec00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21303 cCE("wsrlh", e600040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21304 cCE("wsrlhg", e600148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21305 cCE("wsrlw", ea00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21306 cCE("wsrlwg", ea00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21307 cCE("wsrld", ee00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21308 cCE("wsrldg", ee00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21309 cCE("wstrb", c000000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
21310 cCE("wstrh", c400000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
21311 cCE("wstrw", c000100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
21312 cCE("wstrd", c400100, 2, (RIWR, ADDR), iwmmxt_wldstd),
21313 cCE("wsubbss", e3001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21314 cCE("wsubb", e0001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21315 cCE("wsubbus", e1001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21316 cCE("wsubhss", e7001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21317 cCE("wsubh", e4001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21318 cCE("wsubhus", e5001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21319 cCE("wsubwss", eb001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21320 cCE("wsubw", e8001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21321 cCE("wsubwus", e9001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21322 cCE("wunpckehub",e0000c0, 2, (RIWR, RIWR), rd_rn),
21323 cCE("wunpckehuh",e4000c0, 2, (RIWR, RIWR), rd_rn),
21324 cCE("wunpckehuw",e8000c0, 2, (RIWR, RIWR), rd_rn),
21325 cCE("wunpckehsb",e2000c0, 2, (RIWR, RIWR), rd_rn),
21326 cCE("wunpckehsh",e6000c0, 2, (RIWR, RIWR), rd_rn),
21327 cCE("wunpckehsw",ea000c0, 2, (RIWR, RIWR), rd_rn),
21328 cCE("wunpckihb", e1000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21329 cCE("wunpckihh", e5000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21330 cCE("wunpckihw", e9000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21331 cCE("wunpckelub",e0000e0, 2, (RIWR, RIWR), rd_rn),
21332 cCE("wunpckeluh",e4000e0, 2, (RIWR, RIWR), rd_rn),
21333 cCE("wunpckeluw",e8000e0, 2, (RIWR, RIWR), rd_rn),
21334 cCE("wunpckelsb",e2000e0, 2, (RIWR, RIWR), rd_rn),
21335 cCE("wunpckelsh",e6000e0, 2, (RIWR, RIWR), rd_rn),
21336 cCE("wunpckelsw",ea000e0, 2, (RIWR, RIWR), rd_rn),
21337 cCE("wunpckilb", e1000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21338 cCE("wunpckilh", e5000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21339 cCE("wunpckilw", e9000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21340 cCE("wxor", e100000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21341 cCE("wzero", e300000, 1, (RIWR), iwmmxt_wzero),
21342
21343 #undef ARM_VARIANT
21344 #define ARM_VARIANT & arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2. */
21345
21346 cCE("torvscb", e12f190, 1, (RR), iwmmxt_tandorc),
21347 cCE("torvsch", e52f190, 1, (RR), iwmmxt_tandorc),
21348 cCE("torvscw", e92f190, 1, (RR), iwmmxt_tandorc),
21349 cCE("wabsb", e2001c0, 2, (RIWR, RIWR), rd_rn),
21350 cCE("wabsh", e6001c0, 2, (RIWR, RIWR), rd_rn),
21351 cCE("wabsw", ea001c0, 2, (RIWR, RIWR), rd_rn),
21352 cCE("wabsdiffb", e1001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21353 cCE("wabsdiffh", e5001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21354 cCE("wabsdiffw", e9001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21355 cCE("waddbhusl", e2001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21356 cCE("waddbhusm", e6001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21357 cCE("waddhc", e600180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21358 cCE("waddwc", ea00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21359 cCE("waddsubhx", ea001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21360 cCE("wavg4", e400000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21361 cCE("wavg4r", e500000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21362 cCE("wmaddsn", ee00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21363 cCE("wmaddsx", eb00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21364 cCE("wmaddun", ec00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21365 cCE("wmaddux", e900100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21366 cCE("wmerge", e000080, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_wmerge),
21367 cCE("wmiabb", e0000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21368 cCE("wmiabt", e1000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21369 cCE("wmiatb", e2000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21370 cCE("wmiatt", e3000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21371 cCE("wmiabbn", e4000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21372 cCE("wmiabtn", e5000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21373 cCE("wmiatbn", e6000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21374 cCE("wmiattn", e7000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21375 cCE("wmiawbb", e800120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21376 cCE("wmiawbt", e900120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21377 cCE("wmiawtb", ea00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21378 cCE("wmiawtt", eb00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21379 cCE("wmiawbbn", ec00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21380 cCE("wmiawbtn", ed00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21381 cCE("wmiawtbn", ee00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21382 cCE("wmiawttn", ef00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21383 cCE("wmulsmr", ef00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21384 cCE("wmulumr", ed00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21385 cCE("wmulwumr", ec000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21386 cCE("wmulwsmr", ee000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21387 cCE("wmulwum", ed000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21388 cCE("wmulwsm", ef000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21389 cCE("wmulwl", eb000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21390 cCE("wqmiabb", e8000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21391 cCE("wqmiabt", e9000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21392 cCE("wqmiatb", ea000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21393 cCE("wqmiatt", eb000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21394 cCE("wqmiabbn", ec000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21395 cCE("wqmiabtn", ed000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21396 cCE("wqmiatbn", ee000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21397 cCE("wqmiattn", ef000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21398 cCE("wqmulm", e100080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21399 cCE("wqmulmr", e300080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21400 cCE("wqmulwm", ec000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21401 cCE("wqmulwmr", ee000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21402 cCE("wsubaddhx", ed001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21403
21404 #undef ARM_VARIANT
21405 #define ARM_VARIANT & arm_cext_maverick /* Cirrus Maverick instructions. */
21406
21407 cCE("cfldrs", c100400, 2, (RMF, ADDRGLDC), rd_cpaddr),
21408 cCE("cfldrd", c500400, 2, (RMD, ADDRGLDC), rd_cpaddr),
21409 cCE("cfldr32", c100500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
21410 cCE("cfldr64", c500500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
21411 cCE("cfstrs", c000400, 2, (RMF, ADDRGLDC), rd_cpaddr),
21412 cCE("cfstrd", c400400, 2, (RMD, ADDRGLDC), rd_cpaddr),
21413 cCE("cfstr32", c000500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
21414 cCE("cfstr64", c400500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
21415 cCE("cfmvsr", e000450, 2, (RMF, RR), rn_rd),
21416 cCE("cfmvrs", e100450, 2, (RR, RMF), rd_rn),
21417 cCE("cfmvdlr", e000410, 2, (RMD, RR), rn_rd),
21418 cCE("cfmvrdl", e100410, 2, (RR, RMD), rd_rn),
21419 cCE("cfmvdhr", e000430, 2, (RMD, RR), rn_rd),
21420 cCE("cfmvrdh", e100430, 2, (RR, RMD), rd_rn),
21421 cCE("cfmv64lr",e000510, 2, (RMDX, RR), rn_rd),
21422 cCE("cfmvr64l",e100510, 2, (RR, RMDX), rd_rn),
21423 cCE("cfmv64hr",e000530, 2, (RMDX, RR), rn_rd),
21424 cCE("cfmvr64h",e100530, 2, (RR, RMDX), rd_rn),
21425 cCE("cfmval32",e200440, 2, (RMAX, RMFX), rd_rn),
21426 cCE("cfmv32al",e100440, 2, (RMFX, RMAX), rd_rn),
21427 cCE("cfmvam32",e200460, 2, (RMAX, RMFX), rd_rn),
21428 cCE("cfmv32am",e100460, 2, (RMFX, RMAX), rd_rn),
21429 cCE("cfmvah32",e200480, 2, (RMAX, RMFX), rd_rn),
21430 cCE("cfmv32ah",e100480, 2, (RMFX, RMAX), rd_rn),
21431 cCE("cfmva32", e2004a0, 2, (RMAX, RMFX), rd_rn),
21432 cCE("cfmv32a", e1004a0, 2, (RMFX, RMAX), rd_rn),
21433 cCE("cfmva64", e2004c0, 2, (RMAX, RMDX), rd_rn),
21434 cCE("cfmv64a", e1004c0, 2, (RMDX, RMAX), rd_rn),
21435 cCE("cfmvsc32",e2004e0, 2, (RMDS, RMDX), mav_dspsc),
21436 cCE("cfmv32sc",e1004e0, 2, (RMDX, RMDS), rd),
21437 cCE("cfcpys", e000400, 2, (RMF, RMF), rd_rn),
21438 cCE("cfcpyd", e000420, 2, (RMD, RMD), rd_rn),
21439 cCE("cfcvtsd", e000460, 2, (RMD, RMF), rd_rn),
21440 cCE("cfcvtds", e000440, 2, (RMF, RMD), rd_rn),
21441 cCE("cfcvt32s",e000480, 2, (RMF, RMFX), rd_rn),
21442 cCE("cfcvt32d",e0004a0, 2, (RMD, RMFX), rd_rn),
21443 cCE("cfcvt64s",e0004c0, 2, (RMF, RMDX), rd_rn),
21444 cCE("cfcvt64d",e0004e0, 2, (RMD, RMDX), rd_rn),
21445 cCE("cfcvts32",e100580, 2, (RMFX, RMF), rd_rn),
21446 cCE("cfcvtd32",e1005a0, 2, (RMFX, RMD), rd_rn),
21447 cCE("cftruncs32",e1005c0, 2, (RMFX, RMF), rd_rn),
21448 cCE("cftruncd32",e1005e0, 2, (RMFX, RMD), rd_rn),
21449 cCE("cfrshl32",e000550, 3, (RMFX, RMFX, RR), mav_triple),
21450 cCE("cfrshl64",e000570, 3, (RMDX, RMDX, RR), mav_triple),
21451 cCE("cfsh32", e000500, 3, (RMFX, RMFX, I63s), mav_shift),
21452 cCE("cfsh64", e200500, 3, (RMDX, RMDX, I63s), mav_shift),
21453 cCE("cfcmps", e100490, 3, (RR, RMF, RMF), rd_rn_rm),
21454 cCE("cfcmpd", e1004b0, 3, (RR, RMD, RMD), rd_rn_rm),
21455 cCE("cfcmp32", e100590, 3, (RR, RMFX, RMFX), rd_rn_rm),
21456 cCE("cfcmp64", e1005b0, 3, (RR, RMDX, RMDX), rd_rn_rm),
21457 cCE("cfabss", e300400, 2, (RMF, RMF), rd_rn),
21458 cCE("cfabsd", e300420, 2, (RMD, RMD), rd_rn),
21459 cCE("cfnegs", e300440, 2, (RMF, RMF), rd_rn),
21460 cCE("cfnegd", e300460, 2, (RMD, RMD), rd_rn),
21461 cCE("cfadds", e300480, 3, (RMF, RMF, RMF), rd_rn_rm),
21462 cCE("cfaddd", e3004a0, 3, (RMD, RMD, RMD), rd_rn_rm),
21463 cCE("cfsubs", e3004c0, 3, (RMF, RMF, RMF), rd_rn_rm),
21464 cCE("cfsubd", e3004e0, 3, (RMD, RMD, RMD), rd_rn_rm),
21465 cCE("cfmuls", e100400, 3, (RMF, RMF, RMF), rd_rn_rm),
21466 cCE("cfmuld", e100420, 3, (RMD, RMD, RMD), rd_rn_rm),
21467 cCE("cfabs32", e300500, 2, (RMFX, RMFX), rd_rn),
21468 cCE("cfabs64", e300520, 2, (RMDX, RMDX), rd_rn),
21469 cCE("cfneg32", e300540, 2, (RMFX, RMFX), rd_rn),
21470 cCE("cfneg64", e300560, 2, (RMDX, RMDX), rd_rn),
21471 cCE("cfadd32", e300580, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
21472 cCE("cfadd64", e3005a0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
21473 cCE("cfsub32", e3005c0, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
21474 cCE("cfsub64", e3005e0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
21475 cCE("cfmul32", e100500, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
21476 cCE("cfmul64", e100520, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
21477 cCE("cfmac32", e100540, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
21478 cCE("cfmsc32", e100560, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
21479 cCE("cfmadd32",e000600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
21480 cCE("cfmsub32",e100600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
21481 cCE("cfmadda32", e200600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
21482 cCE("cfmsuba32", e300600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
21483
21484 /* ARMv8-M instructions. */
21485 #undef ARM_VARIANT
21486 #define ARM_VARIANT NULL
21487 #undef THUMB_VARIANT
21488 #define THUMB_VARIANT & arm_ext_v8m
21489 ToU("sg", e97fe97f, 0, (), noargs),
21490 ToC("blxns", 4784, 1, (RRnpc), t_blx),
21491 ToC("bxns", 4704, 1, (RRnpc), t_bx),
21492 ToC("tt", e840f000, 2, (RRnpc, RRnpc), tt),
21493 ToC("ttt", e840f040, 2, (RRnpc, RRnpc), tt),
21494 ToC("tta", e840f080, 2, (RRnpc, RRnpc), tt),
21495 ToC("ttat", e840f0c0, 2, (RRnpc, RRnpc), tt),
21496
21497 /* FP for ARMv8-M Mainline. Enabled for ARMv8-M Mainline because the
21498 instructions behave as nop if no VFP is present. */
21499 #undef THUMB_VARIANT
21500 #define THUMB_VARIANT & arm_ext_v8m_main
21501 ToC("vlldm", ec300a00, 1, (RRnpc), rn),
21502 ToC("vlstm", ec200a00, 1, (RRnpc), rn),
21503 };
21504 #undef ARM_VARIANT
21505 #undef THUMB_VARIANT
21506 #undef TCE
21507 #undef TUE
21508 #undef TUF
21509 #undef TCC
21510 #undef cCE
21511 #undef cCL
21512 #undef C3E
21513 #undef CE
21514 #undef CM
21515 #undef UE
21516 #undef UF
21517 #undef UT
21518 #undef NUF
21519 #undef nUF
21520 #undef NCE
21521 #undef nCE
21522 #undef OPS0
21523 #undef OPS1
21524 #undef OPS2
21525 #undef OPS3
21526 #undef OPS4
21527 #undef OPS5
21528 #undef OPS6
21529 #undef do_0
21530 \f
21531 /* MD interface: bits in the object file. */
21532
21533 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
21534 for use in the a.out file, and stores them in the array pointed to by buf.
21535 This knows about the endian-ness of the target machine and does
21536 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
21537 2 (short) and 4 (long) Floating numbers are put out as a series of
21538 LITTLENUMS (shorts, here at least). */
21539
21540 void
21541 md_number_to_chars (char * buf, valueT val, int n)
21542 {
21543 if (target_big_endian)
21544 number_to_chars_bigendian (buf, val, n);
21545 else
21546 number_to_chars_littleendian (buf, val, n);
21547 }
21548
21549 static valueT
21550 md_chars_to_number (char * buf, int n)
21551 {
21552 valueT result = 0;
21553 unsigned char * where = (unsigned char *) buf;
21554
21555 if (target_big_endian)
21556 {
21557 while (n--)
21558 {
21559 result <<= 8;
21560 result |= (*where++ & 255);
21561 }
21562 }
21563 else
21564 {
21565 while (n--)
21566 {
21567 result <<= 8;
21568 result |= (where[n] & 255);
21569 }
21570 }
21571
21572 return result;
21573 }
21574
21575 /* MD interface: Sections. */
21576
21577 /* Calculate the maximum variable size (i.e., excluding fr_fix)
21578 that an rs_machine_dependent frag may reach. */
21579
21580 unsigned int
21581 arm_frag_max_var (fragS *fragp)
21582 {
21583 /* We only use rs_machine_dependent for variable-size Thumb instructions,
21584 which are either THUMB_SIZE (2) or INSN_SIZE (4).
21585
21586 Note that we generate relaxable instructions even for cases that don't
21587 really need it, like an immediate that's a trivial constant. So we're
21588 overestimating the instruction size for some of those cases. Rather
21589 than putting more intelligence here, it would probably be better to
21590 avoid generating a relaxation frag in the first place when it can be
21591 determined up front that a short instruction will suffice. */
21592
21593 gas_assert (fragp->fr_type == rs_machine_dependent);
21594 return INSN_SIZE;
21595 }
21596
21597 /* Estimate the size of a frag before relaxing. Assume everything fits in
21598 2 bytes. */
21599
21600 int
21601 md_estimate_size_before_relax (fragS * fragp,
21602 segT segtype ATTRIBUTE_UNUSED)
21603 {
21604 fragp->fr_var = 2;
21605 return 2;
21606 }
21607
21608 /* Convert a machine dependent frag. */
21609
21610 void
21611 md_convert_frag (bfd *abfd, segT asec ATTRIBUTE_UNUSED, fragS *fragp)
21612 {
21613 unsigned long insn;
21614 unsigned long old_op;
21615 char *buf;
21616 expressionS exp;
21617 fixS *fixp;
21618 int reloc_type;
21619 int pc_rel;
21620 int opcode;
21621
21622 buf = fragp->fr_literal + fragp->fr_fix;
21623
21624 old_op = bfd_get_16(abfd, buf);
21625 if (fragp->fr_symbol)
21626 {
21627 exp.X_op = O_symbol;
21628 exp.X_add_symbol = fragp->fr_symbol;
21629 }
21630 else
21631 {
21632 exp.X_op = O_constant;
21633 }
21634 exp.X_add_number = fragp->fr_offset;
21635 opcode = fragp->fr_subtype;
21636 switch (opcode)
21637 {
21638 case T_MNEM_ldr_pc:
21639 case T_MNEM_ldr_pc2:
21640 case T_MNEM_ldr_sp:
21641 case T_MNEM_str_sp:
21642 case T_MNEM_ldr:
21643 case T_MNEM_ldrb:
21644 case T_MNEM_ldrh:
21645 case T_MNEM_str:
21646 case T_MNEM_strb:
21647 case T_MNEM_strh:
21648 if (fragp->fr_var == 4)
21649 {
21650 insn = THUMB_OP32 (opcode);
21651 if ((old_op >> 12) == 4 || (old_op >> 12) == 9)
21652 {
21653 insn |= (old_op & 0x700) << 4;
21654 }
21655 else
21656 {
21657 insn |= (old_op & 7) << 12;
21658 insn |= (old_op & 0x38) << 13;
21659 }
21660 insn |= 0x00000c00;
21661 put_thumb32_insn (buf, insn);
21662 reloc_type = BFD_RELOC_ARM_T32_OFFSET_IMM;
21663 }
21664 else
21665 {
21666 reloc_type = BFD_RELOC_ARM_THUMB_OFFSET;
21667 }
21668 pc_rel = (opcode == T_MNEM_ldr_pc2);
21669 break;
21670 case T_MNEM_adr:
21671 if (fragp->fr_var == 4)
21672 {
21673 insn = THUMB_OP32 (opcode);
21674 insn |= (old_op & 0xf0) << 4;
21675 put_thumb32_insn (buf, insn);
21676 reloc_type = BFD_RELOC_ARM_T32_ADD_PC12;
21677 }
21678 else
21679 {
21680 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
21681 exp.X_add_number -= 4;
21682 }
21683 pc_rel = 1;
21684 break;
21685 case T_MNEM_mov:
21686 case T_MNEM_movs:
21687 case T_MNEM_cmp:
21688 case T_MNEM_cmn:
21689 if (fragp->fr_var == 4)
21690 {
21691 int r0off = (opcode == T_MNEM_mov
21692 || opcode == T_MNEM_movs) ? 0 : 8;
21693 insn = THUMB_OP32 (opcode);
21694 insn = (insn & 0xe1ffffff) | 0x10000000;
21695 insn |= (old_op & 0x700) << r0off;
21696 put_thumb32_insn (buf, insn);
21697 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
21698 }
21699 else
21700 {
21701 reloc_type = BFD_RELOC_ARM_THUMB_IMM;
21702 }
21703 pc_rel = 0;
21704 break;
21705 case T_MNEM_b:
21706 if (fragp->fr_var == 4)
21707 {
21708 insn = THUMB_OP32(opcode);
21709 put_thumb32_insn (buf, insn);
21710 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH25;
21711 }
21712 else
21713 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH12;
21714 pc_rel = 1;
21715 break;
21716 case T_MNEM_bcond:
21717 if (fragp->fr_var == 4)
21718 {
21719 insn = THUMB_OP32(opcode);
21720 insn |= (old_op & 0xf00) << 14;
21721 put_thumb32_insn (buf, insn);
21722 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH20;
21723 }
21724 else
21725 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH9;
21726 pc_rel = 1;
21727 break;
21728 case T_MNEM_add_sp:
21729 case T_MNEM_add_pc:
21730 case T_MNEM_inc_sp:
21731 case T_MNEM_dec_sp:
21732 if (fragp->fr_var == 4)
21733 {
21734 /* ??? Choose between add and addw. */
21735 insn = THUMB_OP32 (opcode);
21736 insn |= (old_op & 0xf0) << 4;
21737 put_thumb32_insn (buf, insn);
21738 if (opcode == T_MNEM_add_pc)
21739 reloc_type = BFD_RELOC_ARM_T32_IMM12;
21740 else
21741 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
21742 }
21743 else
21744 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
21745 pc_rel = 0;
21746 break;
21747
21748 case T_MNEM_addi:
21749 case T_MNEM_addis:
21750 case T_MNEM_subi:
21751 case T_MNEM_subis:
21752 if (fragp->fr_var == 4)
21753 {
21754 insn = THUMB_OP32 (opcode);
21755 insn |= (old_op & 0xf0) << 4;
21756 insn |= (old_op & 0xf) << 16;
21757 put_thumb32_insn (buf, insn);
21758 if (insn & (1 << 20))
21759 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
21760 else
21761 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
21762 }
21763 else
21764 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
21765 pc_rel = 0;
21766 break;
21767 default:
21768 abort ();
21769 }
21770 fixp = fix_new_exp (fragp, fragp->fr_fix, fragp->fr_var, &exp, pc_rel,
21771 (enum bfd_reloc_code_real) reloc_type);
21772 fixp->fx_file = fragp->fr_file;
21773 fixp->fx_line = fragp->fr_line;
21774 fragp->fr_fix += fragp->fr_var;
21775
21776 /* Set whether we use thumb-2 ISA based on final relaxation results. */
21777 if (thumb_mode && fragp->fr_var == 4 && no_cpu_selected ()
21778 && !ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_t2))
21779 ARM_MERGE_FEATURE_SETS (arm_arch_used, thumb_arch_used, arm_ext_v6t2);
21780 }
21781
21782 /* Return the size of a relaxable immediate operand instruction.
21783 SHIFT and SIZE specify the form of the allowable immediate. */
21784 static int
21785 relax_immediate (fragS *fragp, int size, int shift)
21786 {
21787 offsetT offset;
21788 offsetT mask;
21789 offsetT low;
21790
21791 /* ??? Should be able to do better than this. */
21792 if (fragp->fr_symbol)
21793 return 4;
21794
21795 low = (1 << shift) - 1;
21796 mask = (1 << (shift + size)) - (1 << shift);
21797 offset = fragp->fr_offset;
21798 /* Force misaligned offsets to 32-bit variant. */
21799 if (offset & low)
21800 return 4;
21801 if (offset & ~mask)
21802 return 4;
21803 return 2;
21804 }
21805
21806 /* Get the address of a symbol during relaxation. */
21807 static addressT
21808 relaxed_symbol_addr (fragS *fragp, long stretch)
21809 {
21810 fragS *sym_frag;
21811 addressT addr;
21812 symbolS *sym;
21813
21814 sym = fragp->fr_symbol;
21815 sym_frag = symbol_get_frag (sym);
21816 know (S_GET_SEGMENT (sym) != absolute_section
21817 || sym_frag == &zero_address_frag);
21818 addr = S_GET_VALUE (sym) + fragp->fr_offset;
21819
21820 /* If frag has yet to be reached on this pass, assume it will
21821 move by STRETCH just as we did. If this is not so, it will
21822 be because some frag between grows, and that will force
21823 another pass. */
21824
21825 if (stretch != 0
21826 && sym_frag->relax_marker != fragp->relax_marker)
21827 {
21828 fragS *f;
21829
21830 /* Adjust stretch for any alignment frag. Note that if have
21831 been expanding the earlier code, the symbol may be
21832 defined in what appears to be an earlier frag. FIXME:
21833 This doesn't handle the fr_subtype field, which specifies
21834 a maximum number of bytes to skip when doing an
21835 alignment. */
21836 for (f = fragp; f != NULL && f != sym_frag; f = f->fr_next)
21837 {
21838 if (f->fr_type == rs_align || f->fr_type == rs_align_code)
21839 {
21840 if (stretch < 0)
21841 stretch = - ((- stretch)
21842 & ~ ((1 << (int) f->fr_offset) - 1));
21843 else
21844 stretch &= ~ ((1 << (int) f->fr_offset) - 1);
21845 if (stretch == 0)
21846 break;
21847 }
21848 }
21849 if (f != NULL)
21850 addr += stretch;
21851 }
21852
21853 return addr;
21854 }
21855
21856 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
21857 load. */
21858 static int
21859 relax_adr (fragS *fragp, asection *sec, long stretch)
21860 {
21861 addressT addr;
21862 offsetT val;
21863
21864 /* Assume worst case for symbols not known to be in the same section. */
21865 if (fragp->fr_symbol == NULL
21866 || !S_IS_DEFINED (fragp->fr_symbol)
21867 || sec != S_GET_SEGMENT (fragp->fr_symbol)
21868 || S_IS_WEAK (fragp->fr_symbol))
21869 return 4;
21870
21871 val = relaxed_symbol_addr (fragp, stretch);
21872 addr = fragp->fr_address + fragp->fr_fix;
21873 addr = (addr + 4) & ~3;
21874 /* Force misaligned targets to 32-bit variant. */
21875 if (val & 3)
21876 return 4;
21877 val -= addr;
21878 if (val < 0 || val > 1020)
21879 return 4;
21880 return 2;
21881 }
21882
21883 /* Return the size of a relaxable add/sub immediate instruction. */
21884 static int
21885 relax_addsub (fragS *fragp, asection *sec)
21886 {
21887 char *buf;
21888 int op;
21889
21890 buf = fragp->fr_literal + fragp->fr_fix;
21891 op = bfd_get_16(sec->owner, buf);
21892 if ((op & 0xf) == ((op >> 4) & 0xf))
21893 return relax_immediate (fragp, 8, 0);
21894 else
21895 return relax_immediate (fragp, 3, 0);
21896 }
21897
21898 /* Return TRUE iff the definition of symbol S could be pre-empted
21899 (overridden) at link or load time. */
21900 static bfd_boolean
21901 symbol_preemptible (symbolS *s)
21902 {
21903 /* Weak symbols can always be pre-empted. */
21904 if (S_IS_WEAK (s))
21905 return TRUE;
21906
21907 /* Non-global symbols cannot be pre-empted. */
21908 if (! S_IS_EXTERNAL (s))
21909 return FALSE;
21910
21911 #ifdef OBJ_ELF
21912 /* In ELF, a global symbol can be marked protected, or private. In that
21913 case it can't be pre-empted (other definitions in the same link unit
21914 would violate the ODR). */
21915 if (ELF_ST_VISIBILITY (S_GET_OTHER (s)) > STV_DEFAULT)
21916 return FALSE;
21917 #endif
21918
21919 /* Other global symbols might be pre-empted. */
21920 return TRUE;
21921 }
21922
21923 /* Return the size of a relaxable branch instruction. BITS is the
21924 size of the offset field in the narrow instruction. */
21925
21926 static int
21927 relax_branch (fragS *fragp, asection *sec, int bits, long stretch)
21928 {
21929 addressT addr;
21930 offsetT val;
21931 offsetT limit;
21932
21933 /* Assume worst case for symbols not known to be in the same section. */
21934 if (!S_IS_DEFINED (fragp->fr_symbol)
21935 || sec != S_GET_SEGMENT (fragp->fr_symbol)
21936 || S_IS_WEAK (fragp->fr_symbol))
21937 return 4;
21938
21939 #ifdef OBJ_ELF
21940 /* A branch to a function in ARM state will require interworking. */
21941 if (S_IS_DEFINED (fragp->fr_symbol)
21942 && ARM_IS_FUNC (fragp->fr_symbol))
21943 return 4;
21944 #endif
21945
21946 if (symbol_preemptible (fragp->fr_symbol))
21947 return 4;
21948
21949 val = relaxed_symbol_addr (fragp, stretch);
21950 addr = fragp->fr_address + fragp->fr_fix + 4;
21951 val -= addr;
21952
21953 /* Offset is a signed value *2 */
21954 limit = 1 << bits;
21955 if (val >= limit || val < -limit)
21956 return 4;
21957 return 2;
21958 }
21959
21960
21961 /* Relax a machine dependent frag. This returns the amount by which
21962 the current size of the frag should change. */
21963
21964 int
21965 arm_relax_frag (asection *sec, fragS *fragp, long stretch)
21966 {
21967 int oldsize;
21968 int newsize;
21969
21970 oldsize = fragp->fr_var;
21971 switch (fragp->fr_subtype)
21972 {
21973 case T_MNEM_ldr_pc2:
21974 newsize = relax_adr (fragp, sec, stretch);
21975 break;
21976 case T_MNEM_ldr_pc:
21977 case T_MNEM_ldr_sp:
21978 case T_MNEM_str_sp:
21979 newsize = relax_immediate (fragp, 8, 2);
21980 break;
21981 case T_MNEM_ldr:
21982 case T_MNEM_str:
21983 newsize = relax_immediate (fragp, 5, 2);
21984 break;
21985 case T_MNEM_ldrh:
21986 case T_MNEM_strh:
21987 newsize = relax_immediate (fragp, 5, 1);
21988 break;
21989 case T_MNEM_ldrb:
21990 case T_MNEM_strb:
21991 newsize = relax_immediate (fragp, 5, 0);
21992 break;
21993 case T_MNEM_adr:
21994 newsize = relax_adr (fragp, sec, stretch);
21995 break;
21996 case T_MNEM_mov:
21997 case T_MNEM_movs:
21998 case T_MNEM_cmp:
21999 case T_MNEM_cmn:
22000 newsize = relax_immediate (fragp, 8, 0);
22001 break;
22002 case T_MNEM_b:
22003 newsize = relax_branch (fragp, sec, 11, stretch);
22004 break;
22005 case T_MNEM_bcond:
22006 newsize = relax_branch (fragp, sec, 8, stretch);
22007 break;
22008 case T_MNEM_add_sp:
22009 case T_MNEM_add_pc:
22010 newsize = relax_immediate (fragp, 8, 2);
22011 break;
22012 case T_MNEM_inc_sp:
22013 case T_MNEM_dec_sp:
22014 newsize = relax_immediate (fragp, 7, 2);
22015 break;
22016 case T_MNEM_addi:
22017 case T_MNEM_addis:
22018 case T_MNEM_subi:
22019 case T_MNEM_subis:
22020 newsize = relax_addsub (fragp, sec);
22021 break;
22022 default:
22023 abort ();
22024 }
22025
22026 fragp->fr_var = newsize;
22027 /* Freeze wide instructions that are at or before the same location as
22028 in the previous pass. This avoids infinite loops.
22029 Don't freeze them unconditionally because targets may be artificially
22030 misaligned by the expansion of preceding frags. */
22031 if (stretch <= 0 && newsize > 2)
22032 {
22033 md_convert_frag (sec->owner, sec, fragp);
22034 frag_wane (fragp);
22035 }
22036
22037 return newsize - oldsize;
22038 }
22039
22040 /* Round up a section size to the appropriate boundary. */
22041
22042 valueT
22043 md_section_align (segT segment ATTRIBUTE_UNUSED,
22044 valueT size)
22045 {
22046 return size;
22047 }
22048
22049 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
22050 of an rs_align_code fragment. */
22051
22052 void
22053 arm_handle_align (fragS * fragP)
22054 {
22055 static unsigned char const arm_noop[2][2][4] =
22056 {
22057 { /* ARMv1 */
22058 {0x00, 0x00, 0xa0, 0xe1}, /* LE */
22059 {0xe1, 0xa0, 0x00, 0x00}, /* BE */
22060 },
22061 { /* ARMv6k */
22062 {0x00, 0xf0, 0x20, 0xe3}, /* LE */
22063 {0xe3, 0x20, 0xf0, 0x00}, /* BE */
22064 },
22065 };
22066 static unsigned char const thumb_noop[2][2][2] =
22067 {
22068 { /* Thumb-1 */
22069 {0xc0, 0x46}, /* LE */
22070 {0x46, 0xc0}, /* BE */
22071 },
22072 { /* Thumb-2 */
22073 {0x00, 0xbf}, /* LE */
22074 {0xbf, 0x00} /* BE */
22075 }
22076 };
22077 static unsigned char const wide_thumb_noop[2][4] =
22078 { /* Wide Thumb-2 */
22079 {0xaf, 0xf3, 0x00, 0x80}, /* LE */
22080 {0xf3, 0xaf, 0x80, 0x00}, /* BE */
22081 };
22082
22083 unsigned bytes, fix, noop_size;
22084 char * p;
22085 const unsigned char * noop;
22086 const unsigned char *narrow_noop = NULL;
22087 #ifdef OBJ_ELF
22088 enum mstate state;
22089 #endif
22090
22091 if (fragP->fr_type != rs_align_code)
22092 return;
22093
22094 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
22095 p = fragP->fr_literal + fragP->fr_fix;
22096 fix = 0;
22097
22098 if (bytes > MAX_MEM_FOR_RS_ALIGN_CODE)
22099 bytes &= MAX_MEM_FOR_RS_ALIGN_CODE;
22100
22101 gas_assert ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) != 0);
22102
22103 if (fragP->tc_frag_data.thumb_mode & (~ MODE_RECORDED))
22104 {
22105 if (ARM_CPU_HAS_FEATURE (selected_cpu_name[0]
22106 ? selected_cpu : arm_arch_none, arm_ext_v6t2))
22107 {
22108 narrow_noop = thumb_noop[1][target_big_endian];
22109 noop = wide_thumb_noop[target_big_endian];
22110 }
22111 else
22112 noop = thumb_noop[0][target_big_endian];
22113 noop_size = 2;
22114 #ifdef OBJ_ELF
22115 state = MAP_THUMB;
22116 #endif
22117 }
22118 else
22119 {
22120 noop = arm_noop[ARM_CPU_HAS_FEATURE (selected_cpu_name[0]
22121 ? selected_cpu : arm_arch_none,
22122 arm_ext_v6k) != 0]
22123 [target_big_endian];
22124 noop_size = 4;
22125 #ifdef OBJ_ELF
22126 state = MAP_ARM;
22127 #endif
22128 }
22129
22130 fragP->fr_var = noop_size;
22131
22132 if (bytes & (noop_size - 1))
22133 {
22134 fix = bytes & (noop_size - 1);
22135 #ifdef OBJ_ELF
22136 insert_data_mapping_symbol (state, fragP->fr_fix, fragP, fix);
22137 #endif
22138 memset (p, 0, fix);
22139 p += fix;
22140 bytes -= fix;
22141 }
22142
22143 if (narrow_noop)
22144 {
22145 if (bytes & noop_size)
22146 {
22147 /* Insert a narrow noop. */
22148 memcpy (p, narrow_noop, noop_size);
22149 p += noop_size;
22150 bytes -= noop_size;
22151 fix += noop_size;
22152 }
22153
22154 /* Use wide noops for the remainder */
22155 noop_size = 4;
22156 }
22157
22158 while (bytes >= noop_size)
22159 {
22160 memcpy (p, noop, noop_size);
22161 p += noop_size;
22162 bytes -= noop_size;
22163 fix += noop_size;
22164 }
22165
22166 fragP->fr_fix += fix;
22167 }
22168
22169 /* Called from md_do_align. Used to create an alignment
22170 frag in a code section. */
22171
22172 void
22173 arm_frag_align_code (int n, int max)
22174 {
22175 char * p;
22176
22177 /* We assume that there will never be a requirement
22178 to support alignments greater than MAX_MEM_FOR_RS_ALIGN_CODE bytes. */
22179 if (max > MAX_MEM_FOR_RS_ALIGN_CODE)
22180 {
22181 char err_msg[128];
22182
22183 sprintf (err_msg,
22184 _("alignments greater than %d bytes not supported in .text sections."),
22185 MAX_MEM_FOR_RS_ALIGN_CODE + 1);
22186 as_fatal ("%s", err_msg);
22187 }
22188
22189 p = frag_var (rs_align_code,
22190 MAX_MEM_FOR_RS_ALIGN_CODE,
22191 1,
22192 (relax_substateT) max,
22193 (symbolS *) NULL,
22194 (offsetT) n,
22195 (char *) NULL);
22196 *p = 0;
22197 }
22198
22199 /* Perform target specific initialisation of a frag.
22200 Note - despite the name this initialisation is not done when the frag
22201 is created, but only when its type is assigned. A frag can be created
22202 and used a long time before its type is set, so beware of assuming that
22203 this initialisation is performed first. */
22204
22205 #ifndef OBJ_ELF
22206 void
22207 arm_init_frag (fragS * fragP, int max_chars ATTRIBUTE_UNUSED)
22208 {
22209 /* Record whether this frag is in an ARM or a THUMB area. */
22210 fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
22211 }
22212
22213 #else /* OBJ_ELF is defined. */
22214 void
22215 arm_init_frag (fragS * fragP, int max_chars)
22216 {
22217 bfd_boolean frag_thumb_mode;
22218
22219 /* If the current ARM vs THUMB mode has not already
22220 been recorded into this frag then do so now. */
22221 if ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) == 0)
22222 fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
22223
22224 /* PR 21809: Do not set a mapping state for debug sections
22225 - it just confuses other tools. */
22226 if (bfd_get_section_flags (NULL, now_seg) & SEC_DEBUGGING)
22227 return;
22228
22229 frag_thumb_mode = fragP->tc_frag_data.thumb_mode ^ MODE_RECORDED;
22230
22231 /* Record a mapping symbol for alignment frags. We will delete this
22232 later if the alignment ends up empty. */
22233 switch (fragP->fr_type)
22234 {
22235 case rs_align:
22236 case rs_align_test:
22237 case rs_fill:
22238 mapping_state_2 (MAP_DATA, max_chars);
22239 break;
22240 case rs_align_code:
22241 mapping_state_2 (frag_thumb_mode ? MAP_THUMB : MAP_ARM, max_chars);
22242 break;
22243 default:
22244 break;
22245 }
22246 }
22247
22248 /* When we change sections we need to issue a new mapping symbol. */
22249
22250 void
22251 arm_elf_change_section (void)
22252 {
22253 /* Link an unlinked unwind index table section to the .text section. */
22254 if (elf_section_type (now_seg) == SHT_ARM_EXIDX
22255 && elf_linked_to_section (now_seg) == NULL)
22256 elf_linked_to_section (now_seg) = text_section;
22257 }
22258
22259 int
22260 arm_elf_section_type (const char * str, size_t len)
22261 {
22262 if (len == 5 && strncmp (str, "exidx", 5) == 0)
22263 return SHT_ARM_EXIDX;
22264
22265 return -1;
22266 }
22267 \f
22268 /* Code to deal with unwinding tables. */
22269
22270 static void add_unwind_adjustsp (offsetT);
22271
22272 /* Generate any deferred unwind frame offset. */
22273
22274 static void
22275 flush_pending_unwind (void)
22276 {
22277 offsetT offset;
22278
22279 offset = unwind.pending_offset;
22280 unwind.pending_offset = 0;
22281 if (offset != 0)
22282 add_unwind_adjustsp (offset);
22283 }
22284
22285 /* Add an opcode to this list for this function. Two-byte opcodes should
22286 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse
22287 order. */
22288
22289 static void
22290 add_unwind_opcode (valueT op, int length)
22291 {
22292 /* Add any deferred stack adjustment. */
22293 if (unwind.pending_offset)
22294 flush_pending_unwind ();
22295
22296 unwind.sp_restored = 0;
22297
22298 if (unwind.opcode_count + length > unwind.opcode_alloc)
22299 {
22300 unwind.opcode_alloc += ARM_OPCODE_CHUNK_SIZE;
22301 if (unwind.opcodes)
22302 unwind.opcodes = XRESIZEVEC (unsigned char, unwind.opcodes,
22303 unwind.opcode_alloc);
22304 else
22305 unwind.opcodes = XNEWVEC (unsigned char, unwind.opcode_alloc);
22306 }
22307 while (length > 0)
22308 {
22309 length--;
22310 unwind.opcodes[unwind.opcode_count] = op & 0xff;
22311 op >>= 8;
22312 unwind.opcode_count++;
22313 }
22314 }
22315
22316 /* Add unwind opcodes to adjust the stack pointer. */
22317
22318 static void
22319 add_unwind_adjustsp (offsetT offset)
22320 {
22321 valueT op;
22322
22323 if (offset > 0x200)
22324 {
22325 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */
22326 char bytes[5];
22327 int n;
22328 valueT o;
22329
22330 /* Long form: 0xb2, uleb128. */
22331 /* This might not fit in a word so add the individual bytes,
22332 remembering the list is built in reverse order. */
22333 o = (valueT) ((offset - 0x204) >> 2);
22334 if (o == 0)
22335 add_unwind_opcode (0, 1);
22336
22337 /* Calculate the uleb128 encoding of the offset. */
22338 n = 0;
22339 while (o)
22340 {
22341 bytes[n] = o & 0x7f;
22342 o >>= 7;
22343 if (o)
22344 bytes[n] |= 0x80;
22345 n++;
22346 }
22347 /* Add the insn. */
22348 for (; n; n--)
22349 add_unwind_opcode (bytes[n - 1], 1);
22350 add_unwind_opcode (0xb2, 1);
22351 }
22352 else if (offset > 0x100)
22353 {
22354 /* Two short opcodes. */
22355 add_unwind_opcode (0x3f, 1);
22356 op = (offset - 0x104) >> 2;
22357 add_unwind_opcode (op, 1);
22358 }
22359 else if (offset > 0)
22360 {
22361 /* Short opcode. */
22362 op = (offset - 4) >> 2;
22363 add_unwind_opcode (op, 1);
22364 }
22365 else if (offset < 0)
22366 {
22367 offset = -offset;
22368 while (offset > 0x100)
22369 {
22370 add_unwind_opcode (0x7f, 1);
22371 offset -= 0x100;
22372 }
22373 op = ((offset - 4) >> 2) | 0x40;
22374 add_unwind_opcode (op, 1);
22375 }
22376 }
22377
22378 /* Finish the list of unwind opcodes for this function. */
22379
22380 static void
22381 finish_unwind_opcodes (void)
22382 {
22383 valueT op;
22384
22385 if (unwind.fp_used)
22386 {
22387 /* Adjust sp as necessary. */
22388 unwind.pending_offset += unwind.fp_offset - unwind.frame_size;
22389 flush_pending_unwind ();
22390
22391 /* After restoring sp from the frame pointer. */
22392 op = 0x90 | unwind.fp_reg;
22393 add_unwind_opcode (op, 1);
22394 }
22395 else
22396 flush_pending_unwind ();
22397 }
22398
22399
22400 /* Start an exception table entry. If idx is nonzero this is an index table
22401 entry. */
22402
22403 static void
22404 start_unwind_section (const segT text_seg, int idx)
22405 {
22406 const char * text_name;
22407 const char * prefix;
22408 const char * prefix_once;
22409 const char * group_name;
22410 char * sec_name;
22411 int type;
22412 int flags;
22413 int linkonce;
22414
22415 if (idx)
22416 {
22417 prefix = ELF_STRING_ARM_unwind;
22418 prefix_once = ELF_STRING_ARM_unwind_once;
22419 type = SHT_ARM_EXIDX;
22420 }
22421 else
22422 {
22423 prefix = ELF_STRING_ARM_unwind_info;
22424 prefix_once = ELF_STRING_ARM_unwind_info_once;
22425 type = SHT_PROGBITS;
22426 }
22427
22428 text_name = segment_name (text_seg);
22429 if (streq (text_name, ".text"))
22430 text_name = "";
22431
22432 if (strncmp (text_name, ".gnu.linkonce.t.",
22433 strlen (".gnu.linkonce.t.")) == 0)
22434 {
22435 prefix = prefix_once;
22436 text_name += strlen (".gnu.linkonce.t.");
22437 }
22438
22439 sec_name = concat (prefix, text_name, (char *) NULL);
22440
22441 flags = SHF_ALLOC;
22442 linkonce = 0;
22443 group_name = 0;
22444
22445 /* Handle COMDAT group. */
22446 if (prefix != prefix_once && (text_seg->flags & SEC_LINK_ONCE) != 0)
22447 {
22448 group_name = elf_group_name (text_seg);
22449 if (group_name == NULL)
22450 {
22451 as_bad (_("Group section `%s' has no group signature"),
22452 segment_name (text_seg));
22453 ignore_rest_of_line ();
22454 return;
22455 }
22456 flags |= SHF_GROUP;
22457 linkonce = 1;
22458 }
22459
22460 obj_elf_change_section (sec_name, type, 0, flags, 0, group_name,
22461 linkonce, 0);
22462
22463 /* Set the section link for index tables. */
22464 if (idx)
22465 elf_linked_to_section (now_seg) = text_seg;
22466 }
22467
22468
22469 /* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
22470 personality routine data. Returns zero, or the index table value for
22471 an inline entry. */
22472
22473 static valueT
22474 create_unwind_entry (int have_data)
22475 {
22476 int size;
22477 addressT where;
22478 char *ptr;
22479 /* The current word of data. */
22480 valueT data;
22481 /* The number of bytes left in this word. */
22482 int n;
22483
22484 finish_unwind_opcodes ();
22485
22486 /* Remember the current text section. */
22487 unwind.saved_seg = now_seg;
22488 unwind.saved_subseg = now_subseg;
22489
22490 start_unwind_section (now_seg, 0);
22491
22492 if (unwind.personality_routine == NULL)
22493 {
22494 if (unwind.personality_index == -2)
22495 {
22496 if (have_data)
22497 as_bad (_("handlerdata in cantunwind frame"));
22498 return 1; /* EXIDX_CANTUNWIND. */
22499 }
22500
22501 /* Use a default personality routine if none is specified. */
22502 if (unwind.personality_index == -1)
22503 {
22504 if (unwind.opcode_count > 3)
22505 unwind.personality_index = 1;
22506 else
22507 unwind.personality_index = 0;
22508 }
22509
22510 /* Space for the personality routine entry. */
22511 if (unwind.personality_index == 0)
22512 {
22513 if (unwind.opcode_count > 3)
22514 as_bad (_("too many unwind opcodes for personality routine 0"));
22515
22516 if (!have_data)
22517 {
22518 /* All the data is inline in the index table. */
22519 data = 0x80;
22520 n = 3;
22521 while (unwind.opcode_count > 0)
22522 {
22523 unwind.opcode_count--;
22524 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
22525 n--;
22526 }
22527
22528 /* Pad with "finish" opcodes. */
22529 while (n--)
22530 data = (data << 8) | 0xb0;
22531
22532 return data;
22533 }
22534 size = 0;
22535 }
22536 else
22537 /* We get two opcodes "free" in the first word. */
22538 size = unwind.opcode_count - 2;
22539 }
22540 else
22541 {
22542 /* PR 16765: Missing or misplaced unwind directives can trigger this. */
22543 if (unwind.personality_index != -1)
22544 {
22545 as_bad (_("attempt to recreate an unwind entry"));
22546 return 1;
22547 }
22548
22549 /* An extra byte is required for the opcode count. */
22550 size = unwind.opcode_count + 1;
22551 }
22552
22553 size = (size + 3) >> 2;
22554 if (size > 0xff)
22555 as_bad (_("too many unwind opcodes"));
22556
22557 frag_align (2, 0, 0);
22558 record_alignment (now_seg, 2);
22559 unwind.table_entry = expr_build_dot ();
22560
22561 /* Allocate the table entry. */
22562 ptr = frag_more ((size << 2) + 4);
22563 /* PR 13449: Zero the table entries in case some of them are not used. */
22564 memset (ptr, 0, (size << 2) + 4);
22565 where = frag_now_fix () - ((size << 2) + 4);
22566
22567 switch (unwind.personality_index)
22568 {
22569 case -1:
22570 /* ??? Should this be a PLT generating relocation? */
22571 /* Custom personality routine. */
22572 fix_new (frag_now, where, 4, unwind.personality_routine, 0, 1,
22573 BFD_RELOC_ARM_PREL31);
22574
22575 where += 4;
22576 ptr += 4;
22577
22578 /* Set the first byte to the number of additional words. */
22579 data = size > 0 ? size - 1 : 0;
22580 n = 3;
22581 break;
22582
22583 /* ABI defined personality routines. */
22584 case 0:
22585 /* Three opcodes bytes are packed into the first word. */
22586 data = 0x80;
22587 n = 3;
22588 break;
22589
22590 case 1:
22591 case 2:
22592 /* The size and first two opcode bytes go in the first word. */
22593 data = ((0x80 + unwind.personality_index) << 8) | size;
22594 n = 2;
22595 break;
22596
22597 default:
22598 /* Should never happen. */
22599 abort ();
22600 }
22601
22602 /* Pack the opcodes into words (MSB first), reversing the list at the same
22603 time. */
22604 while (unwind.opcode_count > 0)
22605 {
22606 if (n == 0)
22607 {
22608 md_number_to_chars (ptr, data, 4);
22609 ptr += 4;
22610 n = 4;
22611 data = 0;
22612 }
22613 unwind.opcode_count--;
22614 n--;
22615 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
22616 }
22617
22618 /* Finish off the last word. */
22619 if (n < 4)
22620 {
22621 /* Pad with "finish" opcodes. */
22622 while (n--)
22623 data = (data << 8) | 0xb0;
22624
22625 md_number_to_chars (ptr, data, 4);
22626 }
22627
22628 if (!have_data)
22629 {
22630 /* Add an empty descriptor if there is no user-specified data. */
22631 ptr = frag_more (4);
22632 md_number_to_chars (ptr, 0, 4);
22633 }
22634
22635 return 0;
22636 }
22637
22638
22639 /* Initialize the DWARF-2 unwind information for this procedure. */
22640
22641 void
22642 tc_arm_frame_initial_instructions (void)
22643 {
22644 cfi_add_CFA_def_cfa (REG_SP, 0);
22645 }
22646 #endif /* OBJ_ELF */
22647
22648 /* Convert REGNAME to a DWARF-2 register number. */
22649
22650 int
22651 tc_arm_regname_to_dw2regnum (char *regname)
22652 {
22653 int reg = arm_reg_parse (&regname, REG_TYPE_RN);
22654 if (reg != FAIL)
22655 return reg;
22656
22657 /* PR 16694: Allow VFP registers as well. */
22658 reg = arm_reg_parse (&regname, REG_TYPE_VFS);
22659 if (reg != FAIL)
22660 return 64 + reg;
22661
22662 reg = arm_reg_parse (&regname, REG_TYPE_VFD);
22663 if (reg != FAIL)
22664 return reg + 256;
22665
22666 return FAIL;
22667 }
22668
22669 #ifdef TE_PE
22670 void
22671 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
22672 {
22673 expressionS exp;
22674
22675 exp.X_op = O_secrel;
22676 exp.X_add_symbol = symbol;
22677 exp.X_add_number = 0;
22678 emit_expr (&exp, size);
22679 }
22680 #endif
22681
22682 /* MD interface: Symbol and relocation handling. */
22683
22684 /* Return the address within the segment that a PC-relative fixup is
22685 relative to. For ARM, PC-relative fixups applied to instructions
22686 are generally relative to the location of the fixup plus 8 bytes.
22687 Thumb branches are offset by 4, and Thumb loads relative to PC
22688 require special handling. */
22689
22690 long
22691 md_pcrel_from_section (fixS * fixP, segT seg)
22692 {
22693 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
22694
22695 /* If this is pc-relative and we are going to emit a relocation
22696 then we just want to put out any pipeline compensation that the linker
22697 will need. Otherwise we want to use the calculated base.
22698 For WinCE we skip the bias for externals as well, since this
22699 is how the MS ARM-CE assembler behaves and we want to be compatible. */
22700 if (fixP->fx_pcrel
22701 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
22702 || (arm_force_relocation (fixP)
22703 #ifdef TE_WINCE
22704 && !S_IS_EXTERNAL (fixP->fx_addsy)
22705 #endif
22706 )))
22707 base = 0;
22708
22709
22710 switch (fixP->fx_r_type)
22711 {
22712 /* PC relative addressing on the Thumb is slightly odd as the
22713 bottom two bits of the PC are forced to zero for the
22714 calculation. This happens *after* application of the
22715 pipeline offset. However, Thumb adrl already adjusts for
22716 this, so we need not do it again. */
22717 case BFD_RELOC_ARM_THUMB_ADD:
22718 return base & ~3;
22719
22720 case BFD_RELOC_ARM_THUMB_OFFSET:
22721 case BFD_RELOC_ARM_T32_OFFSET_IMM:
22722 case BFD_RELOC_ARM_T32_ADD_PC12:
22723 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
22724 return (base + 4) & ~3;
22725
22726 /* Thumb branches are simply offset by +4. */
22727 case BFD_RELOC_THUMB_PCREL_BRANCH7:
22728 case BFD_RELOC_THUMB_PCREL_BRANCH9:
22729 case BFD_RELOC_THUMB_PCREL_BRANCH12:
22730 case BFD_RELOC_THUMB_PCREL_BRANCH20:
22731 case BFD_RELOC_THUMB_PCREL_BRANCH25:
22732 return base + 4;
22733
22734 case BFD_RELOC_THUMB_PCREL_BRANCH23:
22735 if (fixP->fx_addsy
22736 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22737 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22738 && ARM_IS_FUNC (fixP->fx_addsy)
22739 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
22740 base = fixP->fx_where + fixP->fx_frag->fr_address;
22741 return base + 4;
22742
22743 /* BLX is like branches above, but forces the low two bits of PC to
22744 zero. */
22745 case BFD_RELOC_THUMB_PCREL_BLX:
22746 if (fixP->fx_addsy
22747 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22748 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22749 && THUMB_IS_FUNC (fixP->fx_addsy)
22750 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
22751 base = fixP->fx_where + fixP->fx_frag->fr_address;
22752 return (base + 4) & ~3;
22753
22754 /* ARM mode branches are offset by +8. However, the Windows CE
22755 loader expects the relocation not to take this into account. */
22756 case BFD_RELOC_ARM_PCREL_BLX:
22757 if (fixP->fx_addsy
22758 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22759 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22760 && ARM_IS_FUNC (fixP->fx_addsy)
22761 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
22762 base = fixP->fx_where + fixP->fx_frag->fr_address;
22763 return base + 8;
22764
22765 case BFD_RELOC_ARM_PCREL_CALL:
22766 if (fixP->fx_addsy
22767 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22768 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22769 && THUMB_IS_FUNC (fixP->fx_addsy)
22770 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
22771 base = fixP->fx_where + fixP->fx_frag->fr_address;
22772 return base + 8;
22773
22774 case BFD_RELOC_ARM_PCREL_BRANCH:
22775 case BFD_RELOC_ARM_PCREL_JUMP:
22776 case BFD_RELOC_ARM_PLT32:
22777 #ifdef TE_WINCE
22778 /* When handling fixups immediately, because we have already
22779 discovered the value of a symbol, or the address of the frag involved
22780 we must account for the offset by +8, as the OS loader will never see the reloc.
22781 see fixup_segment() in write.c
22782 The S_IS_EXTERNAL test handles the case of global symbols.
22783 Those need the calculated base, not just the pipe compensation the linker will need. */
22784 if (fixP->fx_pcrel
22785 && fixP->fx_addsy != NULL
22786 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22787 && (S_IS_EXTERNAL (fixP->fx_addsy) || !arm_force_relocation (fixP)))
22788 return base + 8;
22789 return base;
22790 #else
22791 return base + 8;
22792 #endif
22793
22794
22795 /* ARM mode loads relative to PC are also offset by +8. Unlike
22796 branches, the Windows CE loader *does* expect the relocation
22797 to take this into account. */
22798 case BFD_RELOC_ARM_OFFSET_IMM:
22799 case BFD_RELOC_ARM_OFFSET_IMM8:
22800 case BFD_RELOC_ARM_HWLITERAL:
22801 case BFD_RELOC_ARM_LITERAL:
22802 case BFD_RELOC_ARM_CP_OFF_IMM:
22803 return base + 8;
22804
22805
22806 /* Other PC-relative relocations are un-offset. */
22807 default:
22808 return base;
22809 }
22810 }
22811
22812 static bfd_boolean flag_warn_syms = TRUE;
22813
22814 bfd_boolean
22815 arm_tc_equal_in_insn (int c ATTRIBUTE_UNUSED, char * name)
22816 {
22817 /* PR 18347 - Warn if the user attempts to create a symbol with the same
22818 name as an ARM instruction. Whilst strictly speaking it is allowed, it
22819 does mean that the resulting code might be very confusing to the reader.
22820 Also this warning can be triggered if the user omits an operand before
22821 an immediate address, eg:
22822
22823 LDR =foo
22824
22825 GAS treats this as an assignment of the value of the symbol foo to a
22826 symbol LDR, and so (without this code) it will not issue any kind of
22827 warning or error message.
22828
22829 Note - ARM instructions are case-insensitive but the strings in the hash
22830 table are all stored in lower case, so we must first ensure that name is
22831 lower case too. */
22832 if (flag_warn_syms && arm_ops_hsh)
22833 {
22834 char * nbuf = strdup (name);
22835 char * p;
22836
22837 for (p = nbuf; *p; p++)
22838 *p = TOLOWER (*p);
22839 if (hash_find (arm_ops_hsh, nbuf) != NULL)
22840 {
22841 static struct hash_control * already_warned = NULL;
22842
22843 if (already_warned == NULL)
22844 already_warned = hash_new ();
22845 /* Only warn about the symbol once. To keep the code
22846 simple we let hash_insert do the lookup for us. */
22847 if (hash_insert (already_warned, name, NULL) == NULL)
22848 as_warn (_("[-mwarn-syms]: Assignment makes a symbol match an ARM instruction: %s"), name);
22849 }
22850 else
22851 free (nbuf);
22852 }
22853
22854 return FALSE;
22855 }
22856
22857 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
22858 Otherwise we have no need to default values of symbols. */
22859
22860 symbolS *
22861 md_undefined_symbol (char * name ATTRIBUTE_UNUSED)
22862 {
22863 #ifdef OBJ_ELF
22864 if (name[0] == '_' && name[1] == 'G'
22865 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
22866 {
22867 if (!GOT_symbol)
22868 {
22869 if (symbol_find (name))
22870 as_bad (_("GOT already in the symbol table"));
22871
22872 GOT_symbol = symbol_new (name, undefined_section,
22873 (valueT) 0, & zero_address_frag);
22874 }
22875
22876 return GOT_symbol;
22877 }
22878 #endif
22879
22880 return NULL;
22881 }
22882
22883 /* Subroutine of md_apply_fix. Check to see if an immediate can be
22884 computed as two separate immediate values, added together. We
22885 already know that this value cannot be computed by just one ARM
22886 instruction. */
22887
22888 static unsigned int
22889 validate_immediate_twopart (unsigned int val,
22890 unsigned int * highpart)
22891 {
22892 unsigned int a;
22893 unsigned int i;
22894
22895 for (i = 0; i < 32; i += 2)
22896 if (((a = rotate_left (val, i)) & 0xff) != 0)
22897 {
22898 if (a & 0xff00)
22899 {
22900 if (a & ~ 0xffff)
22901 continue;
22902 * highpart = (a >> 8) | ((i + 24) << 7);
22903 }
22904 else if (a & 0xff0000)
22905 {
22906 if (a & 0xff000000)
22907 continue;
22908 * highpart = (a >> 16) | ((i + 16) << 7);
22909 }
22910 else
22911 {
22912 gas_assert (a & 0xff000000);
22913 * highpart = (a >> 24) | ((i + 8) << 7);
22914 }
22915
22916 return (a & 0xff) | (i << 7);
22917 }
22918
22919 return FAIL;
22920 }
22921
22922 static int
22923 validate_offset_imm (unsigned int val, int hwse)
22924 {
22925 if ((hwse && val > 255) || val > 4095)
22926 return FAIL;
22927 return val;
22928 }
22929
22930 /* Subroutine of md_apply_fix. Do those data_ops which can take a
22931 negative immediate constant by altering the instruction. A bit of
22932 a hack really.
22933 MOV <-> MVN
22934 AND <-> BIC
22935 ADC <-> SBC
22936 by inverting the second operand, and
22937 ADD <-> SUB
22938 CMP <-> CMN
22939 by negating the second operand. */
22940
22941 static int
22942 negate_data_op (unsigned long * instruction,
22943 unsigned long value)
22944 {
22945 int op, new_inst;
22946 unsigned long negated, inverted;
22947
22948 negated = encode_arm_immediate (-value);
22949 inverted = encode_arm_immediate (~value);
22950
22951 op = (*instruction >> DATA_OP_SHIFT) & 0xf;
22952 switch (op)
22953 {
22954 /* First negates. */
22955 case OPCODE_SUB: /* ADD <-> SUB */
22956 new_inst = OPCODE_ADD;
22957 value = negated;
22958 break;
22959
22960 case OPCODE_ADD:
22961 new_inst = OPCODE_SUB;
22962 value = negated;
22963 break;
22964
22965 case OPCODE_CMP: /* CMP <-> CMN */
22966 new_inst = OPCODE_CMN;
22967 value = negated;
22968 break;
22969
22970 case OPCODE_CMN:
22971 new_inst = OPCODE_CMP;
22972 value = negated;
22973 break;
22974
22975 /* Now Inverted ops. */
22976 case OPCODE_MOV: /* MOV <-> MVN */
22977 new_inst = OPCODE_MVN;
22978 value = inverted;
22979 break;
22980
22981 case OPCODE_MVN:
22982 new_inst = OPCODE_MOV;
22983 value = inverted;
22984 break;
22985
22986 case OPCODE_AND: /* AND <-> BIC */
22987 new_inst = OPCODE_BIC;
22988 value = inverted;
22989 break;
22990
22991 case OPCODE_BIC:
22992 new_inst = OPCODE_AND;
22993 value = inverted;
22994 break;
22995
22996 case OPCODE_ADC: /* ADC <-> SBC */
22997 new_inst = OPCODE_SBC;
22998 value = inverted;
22999 break;
23000
23001 case OPCODE_SBC:
23002 new_inst = OPCODE_ADC;
23003 value = inverted;
23004 break;
23005
23006 /* We cannot do anything. */
23007 default:
23008 return FAIL;
23009 }
23010
23011 if (value == (unsigned) FAIL)
23012 return FAIL;
23013
23014 *instruction &= OPCODE_MASK;
23015 *instruction |= new_inst << DATA_OP_SHIFT;
23016 return value;
23017 }
23018
23019 /* Like negate_data_op, but for Thumb-2. */
23020
23021 static unsigned int
23022 thumb32_negate_data_op (offsetT *instruction, unsigned int value)
23023 {
23024 int op, new_inst;
23025 int rd;
23026 unsigned int negated, inverted;
23027
23028 negated = encode_thumb32_immediate (-value);
23029 inverted = encode_thumb32_immediate (~value);
23030
23031 rd = (*instruction >> 8) & 0xf;
23032 op = (*instruction >> T2_DATA_OP_SHIFT) & 0xf;
23033 switch (op)
23034 {
23035 /* ADD <-> SUB. Includes CMP <-> CMN. */
23036 case T2_OPCODE_SUB:
23037 new_inst = T2_OPCODE_ADD;
23038 value = negated;
23039 break;
23040
23041 case T2_OPCODE_ADD:
23042 new_inst = T2_OPCODE_SUB;
23043 value = negated;
23044 break;
23045
23046 /* ORR <-> ORN. Includes MOV <-> MVN. */
23047 case T2_OPCODE_ORR:
23048 new_inst = T2_OPCODE_ORN;
23049 value = inverted;
23050 break;
23051
23052 case T2_OPCODE_ORN:
23053 new_inst = T2_OPCODE_ORR;
23054 value = inverted;
23055 break;
23056
23057 /* AND <-> BIC. TST has no inverted equivalent. */
23058 case T2_OPCODE_AND:
23059 new_inst = T2_OPCODE_BIC;
23060 if (rd == 15)
23061 value = FAIL;
23062 else
23063 value = inverted;
23064 break;
23065
23066 case T2_OPCODE_BIC:
23067 new_inst = T2_OPCODE_AND;
23068 value = inverted;
23069 break;
23070
23071 /* ADC <-> SBC */
23072 case T2_OPCODE_ADC:
23073 new_inst = T2_OPCODE_SBC;
23074 value = inverted;
23075 break;
23076
23077 case T2_OPCODE_SBC:
23078 new_inst = T2_OPCODE_ADC;
23079 value = inverted;
23080 break;
23081
23082 /* We cannot do anything. */
23083 default:
23084 return FAIL;
23085 }
23086
23087 if (value == (unsigned int)FAIL)
23088 return FAIL;
23089
23090 *instruction &= T2_OPCODE_MASK;
23091 *instruction |= new_inst << T2_DATA_OP_SHIFT;
23092 return value;
23093 }
23094
23095 /* Read a 32-bit thumb instruction from buf. */
23096
23097 static unsigned long
23098 get_thumb32_insn (char * buf)
23099 {
23100 unsigned long insn;
23101 insn = md_chars_to_number (buf, THUMB_SIZE) << 16;
23102 insn |= md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
23103
23104 return insn;
23105 }
23106
23107 /* We usually want to set the low bit on the address of thumb function
23108 symbols. In particular .word foo - . should have the low bit set.
23109 Generic code tries to fold the difference of two symbols to
23110 a constant. Prevent this and force a relocation when the first symbols
23111 is a thumb function. */
23112
23113 bfd_boolean
23114 arm_optimize_expr (expressionS *l, operatorT op, expressionS *r)
23115 {
23116 if (op == O_subtract
23117 && l->X_op == O_symbol
23118 && r->X_op == O_symbol
23119 && THUMB_IS_FUNC (l->X_add_symbol))
23120 {
23121 l->X_op = O_subtract;
23122 l->X_op_symbol = r->X_add_symbol;
23123 l->X_add_number -= r->X_add_number;
23124 return TRUE;
23125 }
23126
23127 /* Process as normal. */
23128 return FALSE;
23129 }
23130
23131 /* Encode Thumb2 unconditional branches and calls. The encoding
23132 for the 2 are identical for the immediate values. */
23133
23134 static void
23135 encode_thumb2_b_bl_offset (char * buf, offsetT value)
23136 {
23137 #define T2I1I2MASK ((1 << 13) | (1 << 11))
23138 offsetT newval;
23139 offsetT newval2;
23140 addressT S, I1, I2, lo, hi;
23141
23142 S = (value >> 24) & 0x01;
23143 I1 = (value >> 23) & 0x01;
23144 I2 = (value >> 22) & 0x01;
23145 hi = (value >> 12) & 0x3ff;
23146 lo = (value >> 1) & 0x7ff;
23147 newval = md_chars_to_number (buf, THUMB_SIZE);
23148 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
23149 newval |= (S << 10) | hi;
23150 newval2 &= ~T2I1I2MASK;
23151 newval2 |= (((I1 ^ S) << 13) | ((I2 ^ S) << 11) | lo) ^ T2I1I2MASK;
23152 md_number_to_chars (buf, newval, THUMB_SIZE);
23153 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
23154 }
23155
23156 void
23157 md_apply_fix (fixS * fixP,
23158 valueT * valP,
23159 segT seg)
23160 {
23161 offsetT value = * valP;
23162 offsetT newval;
23163 unsigned int newimm;
23164 unsigned long temp;
23165 int sign;
23166 char * buf = fixP->fx_where + fixP->fx_frag->fr_literal;
23167
23168 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
23169
23170 /* Note whether this will delete the relocation. */
23171
23172 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
23173 fixP->fx_done = 1;
23174
23175 /* On a 64-bit host, silently truncate 'value' to 32 bits for
23176 consistency with the behaviour on 32-bit hosts. Remember value
23177 for emit_reloc. */
23178 value &= 0xffffffff;
23179 value ^= 0x80000000;
23180 value -= 0x80000000;
23181
23182 *valP = value;
23183 fixP->fx_addnumber = value;
23184
23185 /* Same treatment for fixP->fx_offset. */
23186 fixP->fx_offset &= 0xffffffff;
23187 fixP->fx_offset ^= 0x80000000;
23188 fixP->fx_offset -= 0x80000000;
23189
23190 switch (fixP->fx_r_type)
23191 {
23192 case BFD_RELOC_NONE:
23193 /* This will need to go in the object file. */
23194 fixP->fx_done = 0;
23195 break;
23196
23197 case BFD_RELOC_ARM_IMMEDIATE:
23198 /* We claim that this fixup has been processed here,
23199 even if in fact we generate an error because we do
23200 not have a reloc for it, so tc_gen_reloc will reject it. */
23201 fixP->fx_done = 1;
23202
23203 if (fixP->fx_addsy)
23204 {
23205 const char *msg = 0;
23206
23207 if (! S_IS_DEFINED (fixP->fx_addsy))
23208 msg = _("undefined symbol %s used as an immediate value");
23209 else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
23210 msg = _("symbol %s is in a different section");
23211 else if (S_IS_WEAK (fixP->fx_addsy))
23212 msg = _("symbol %s is weak and may be overridden later");
23213
23214 if (msg)
23215 {
23216 as_bad_where (fixP->fx_file, fixP->fx_line,
23217 msg, S_GET_NAME (fixP->fx_addsy));
23218 break;
23219 }
23220 }
23221
23222 temp = md_chars_to_number (buf, INSN_SIZE);
23223
23224 /* If the offset is negative, we should use encoding A2 for ADR. */
23225 if ((temp & 0xfff0000) == 0x28f0000 && value < 0)
23226 newimm = negate_data_op (&temp, value);
23227 else
23228 {
23229 newimm = encode_arm_immediate (value);
23230
23231 /* If the instruction will fail, see if we can fix things up by
23232 changing the opcode. */
23233 if (newimm == (unsigned int) FAIL)
23234 newimm = negate_data_op (&temp, value);
23235 /* MOV accepts both ARM modified immediate (A1 encoding) and
23236 UINT16 (A2 encoding) when possible, MOVW only accepts UINT16.
23237 When disassembling, MOV is preferred when there is no encoding
23238 overlap. */
23239 if (newimm == (unsigned int) FAIL
23240 && ((temp >> DATA_OP_SHIFT) & 0xf) == OPCODE_MOV
23241 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2)
23242 && !((temp >> SBIT_SHIFT) & 0x1)
23243 && value >= 0 && value <= 0xffff)
23244 {
23245 /* Clear bits[23:20] to change encoding from A1 to A2. */
23246 temp &= 0xff0fffff;
23247 /* Encoding high 4bits imm. Code below will encode the remaining
23248 low 12bits. */
23249 temp |= (value & 0x0000f000) << 4;
23250 newimm = value & 0x00000fff;
23251 }
23252 }
23253
23254 if (newimm == (unsigned int) FAIL)
23255 {
23256 as_bad_where (fixP->fx_file, fixP->fx_line,
23257 _("invalid constant (%lx) after fixup"),
23258 (unsigned long) value);
23259 break;
23260 }
23261
23262 newimm |= (temp & 0xfffff000);
23263 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
23264 break;
23265
23266 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
23267 {
23268 unsigned int highpart = 0;
23269 unsigned int newinsn = 0xe1a00000; /* nop. */
23270
23271 if (fixP->fx_addsy)
23272 {
23273 const char *msg = 0;
23274
23275 if (! S_IS_DEFINED (fixP->fx_addsy))
23276 msg = _("undefined symbol %s used as an immediate value");
23277 else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
23278 msg = _("symbol %s is in a different section");
23279 else if (S_IS_WEAK (fixP->fx_addsy))
23280 msg = _("symbol %s is weak and may be overridden later");
23281
23282 if (msg)
23283 {
23284 as_bad_where (fixP->fx_file, fixP->fx_line,
23285 msg, S_GET_NAME (fixP->fx_addsy));
23286 break;
23287 }
23288 }
23289
23290 newimm = encode_arm_immediate (value);
23291 temp = md_chars_to_number (buf, INSN_SIZE);
23292
23293 /* If the instruction will fail, see if we can fix things up by
23294 changing the opcode. */
23295 if (newimm == (unsigned int) FAIL
23296 && (newimm = negate_data_op (& temp, value)) == (unsigned int) FAIL)
23297 {
23298 /* No ? OK - try using two ADD instructions to generate
23299 the value. */
23300 newimm = validate_immediate_twopart (value, & highpart);
23301
23302 /* Yes - then make sure that the second instruction is
23303 also an add. */
23304 if (newimm != (unsigned int) FAIL)
23305 newinsn = temp;
23306 /* Still No ? Try using a negated value. */
23307 else if ((newimm = validate_immediate_twopart (- value, & highpart)) != (unsigned int) FAIL)
23308 temp = newinsn = (temp & OPCODE_MASK) | OPCODE_SUB << DATA_OP_SHIFT;
23309 /* Otherwise - give up. */
23310 else
23311 {
23312 as_bad_where (fixP->fx_file, fixP->fx_line,
23313 _("unable to compute ADRL instructions for PC offset of 0x%lx"),
23314 (long) value);
23315 break;
23316 }
23317
23318 /* Replace the first operand in the 2nd instruction (which
23319 is the PC) with the destination register. We have
23320 already added in the PC in the first instruction and we
23321 do not want to do it again. */
23322 newinsn &= ~ 0xf0000;
23323 newinsn |= ((newinsn & 0x0f000) << 4);
23324 }
23325
23326 newimm |= (temp & 0xfffff000);
23327 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
23328
23329 highpart |= (newinsn & 0xfffff000);
23330 md_number_to_chars (buf + INSN_SIZE, (valueT) highpart, INSN_SIZE);
23331 }
23332 break;
23333
23334 case BFD_RELOC_ARM_OFFSET_IMM:
23335 if (!fixP->fx_done && seg->use_rela_p)
23336 value = 0;
23337 /* Fall through. */
23338
23339 case BFD_RELOC_ARM_LITERAL:
23340 sign = value > 0;
23341
23342 if (value < 0)
23343 value = - value;
23344
23345 if (validate_offset_imm (value, 0) == FAIL)
23346 {
23347 if (fixP->fx_r_type == BFD_RELOC_ARM_LITERAL)
23348 as_bad_where (fixP->fx_file, fixP->fx_line,
23349 _("invalid literal constant: pool needs to be closer"));
23350 else
23351 as_bad_where (fixP->fx_file, fixP->fx_line,
23352 _("bad immediate value for offset (%ld)"),
23353 (long) value);
23354 break;
23355 }
23356
23357 newval = md_chars_to_number (buf, INSN_SIZE);
23358 if (value == 0)
23359 newval &= 0xfffff000;
23360 else
23361 {
23362 newval &= 0xff7ff000;
23363 newval |= value | (sign ? INDEX_UP : 0);
23364 }
23365 md_number_to_chars (buf, newval, INSN_SIZE);
23366 break;
23367
23368 case BFD_RELOC_ARM_OFFSET_IMM8:
23369 case BFD_RELOC_ARM_HWLITERAL:
23370 sign = value > 0;
23371
23372 if (value < 0)
23373 value = - value;
23374
23375 if (validate_offset_imm (value, 1) == FAIL)
23376 {
23377 if (fixP->fx_r_type == BFD_RELOC_ARM_HWLITERAL)
23378 as_bad_where (fixP->fx_file, fixP->fx_line,
23379 _("invalid literal constant: pool needs to be closer"));
23380 else
23381 as_bad_where (fixP->fx_file, fixP->fx_line,
23382 _("bad immediate value for 8-bit offset (%ld)"),
23383 (long) value);
23384 break;
23385 }
23386
23387 newval = md_chars_to_number (buf, INSN_SIZE);
23388 if (value == 0)
23389 newval &= 0xfffff0f0;
23390 else
23391 {
23392 newval &= 0xff7ff0f0;
23393 newval |= ((value >> 4) << 8) | (value & 0xf) | (sign ? INDEX_UP : 0);
23394 }
23395 md_number_to_chars (buf, newval, INSN_SIZE);
23396 break;
23397
23398 case BFD_RELOC_ARM_T32_OFFSET_U8:
23399 if (value < 0 || value > 1020 || value % 4 != 0)
23400 as_bad_where (fixP->fx_file, fixP->fx_line,
23401 _("bad immediate value for offset (%ld)"), (long) value);
23402 value /= 4;
23403
23404 newval = md_chars_to_number (buf+2, THUMB_SIZE);
23405 newval |= value;
23406 md_number_to_chars (buf+2, newval, THUMB_SIZE);
23407 break;
23408
23409 case BFD_RELOC_ARM_T32_OFFSET_IMM:
23410 /* This is a complicated relocation used for all varieties of Thumb32
23411 load/store instruction with immediate offset:
23412
23413 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
23414 *4, optional writeback(W)
23415 (doubleword load/store)
23416
23417 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
23418 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
23419 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
23420 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
23421 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
23422
23423 Uppercase letters indicate bits that are already encoded at
23424 this point. Lowercase letters are our problem. For the
23425 second block of instructions, the secondary opcode nybble
23426 (bits 8..11) is present, and bit 23 is zero, even if this is
23427 a PC-relative operation. */
23428 newval = md_chars_to_number (buf, THUMB_SIZE);
23429 newval <<= 16;
23430 newval |= md_chars_to_number (buf+THUMB_SIZE, THUMB_SIZE);
23431
23432 if ((newval & 0xf0000000) == 0xe0000000)
23433 {
23434 /* Doubleword load/store: 8-bit offset, scaled by 4. */
23435 if (value >= 0)
23436 newval |= (1 << 23);
23437 else
23438 value = -value;
23439 if (value % 4 != 0)
23440 {
23441 as_bad_where (fixP->fx_file, fixP->fx_line,
23442 _("offset not a multiple of 4"));
23443 break;
23444 }
23445 value /= 4;
23446 if (value > 0xff)
23447 {
23448 as_bad_where (fixP->fx_file, fixP->fx_line,
23449 _("offset out of range"));
23450 break;
23451 }
23452 newval &= ~0xff;
23453 }
23454 else if ((newval & 0x000f0000) == 0x000f0000)
23455 {
23456 /* PC-relative, 12-bit offset. */
23457 if (value >= 0)
23458 newval |= (1 << 23);
23459 else
23460 value = -value;
23461 if (value > 0xfff)
23462 {
23463 as_bad_where (fixP->fx_file, fixP->fx_line,
23464 _("offset out of range"));
23465 break;
23466 }
23467 newval &= ~0xfff;
23468 }
23469 else if ((newval & 0x00000100) == 0x00000100)
23470 {
23471 /* Writeback: 8-bit, +/- offset. */
23472 if (value >= 0)
23473 newval |= (1 << 9);
23474 else
23475 value = -value;
23476 if (value > 0xff)
23477 {
23478 as_bad_where (fixP->fx_file, fixP->fx_line,
23479 _("offset out of range"));
23480 break;
23481 }
23482 newval &= ~0xff;
23483 }
23484 else if ((newval & 0x00000f00) == 0x00000e00)
23485 {
23486 /* T-instruction: positive 8-bit offset. */
23487 if (value < 0 || value > 0xff)
23488 {
23489 as_bad_where (fixP->fx_file, fixP->fx_line,
23490 _("offset out of range"));
23491 break;
23492 }
23493 newval &= ~0xff;
23494 newval |= value;
23495 }
23496 else
23497 {
23498 /* Positive 12-bit or negative 8-bit offset. */
23499 int limit;
23500 if (value >= 0)
23501 {
23502 newval |= (1 << 23);
23503 limit = 0xfff;
23504 }
23505 else
23506 {
23507 value = -value;
23508 limit = 0xff;
23509 }
23510 if (value > limit)
23511 {
23512 as_bad_where (fixP->fx_file, fixP->fx_line,
23513 _("offset out of range"));
23514 break;
23515 }
23516 newval &= ~limit;
23517 }
23518
23519 newval |= value;
23520 md_number_to_chars (buf, (newval >> 16) & 0xffff, THUMB_SIZE);
23521 md_number_to_chars (buf + THUMB_SIZE, newval & 0xffff, THUMB_SIZE);
23522 break;
23523
23524 case BFD_RELOC_ARM_SHIFT_IMM:
23525 newval = md_chars_to_number (buf, INSN_SIZE);
23526 if (((unsigned long) value) > 32
23527 || (value == 32
23528 && (((newval & 0x60) == 0) || (newval & 0x60) == 0x60)))
23529 {
23530 as_bad_where (fixP->fx_file, fixP->fx_line,
23531 _("shift expression is too large"));
23532 break;
23533 }
23534
23535 if (value == 0)
23536 /* Shifts of zero must be done as lsl. */
23537 newval &= ~0x60;
23538 else if (value == 32)
23539 value = 0;
23540 newval &= 0xfffff07f;
23541 newval |= (value & 0x1f) << 7;
23542 md_number_to_chars (buf, newval, INSN_SIZE);
23543 break;
23544
23545 case BFD_RELOC_ARM_T32_IMMEDIATE:
23546 case BFD_RELOC_ARM_T32_ADD_IMM:
23547 case BFD_RELOC_ARM_T32_IMM12:
23548 case BFD_RELOC_ARM_T32_ADD_PC12:
23549 /* We claim that this fixup has been processed here,
23550 even if in fact we generate an error because we do
23551 not have a reloc for it, so tc_gen_reloc will reject it. */
23552 fixP->fx_done = 1;
23553
23554 if (fixP->fx_addsy
23555 && ! S_IS_DEFINED (fixP->fx_addsy))
23556 {
23557 as_bad_where (fixP->fx_file, fixP->fx_line,
23558 _("undefined symbol %s used as an immediate value"),
23559 S_GET_NAME (fixP->fx_addsy));
23560 break;
23561 }
23562
23563 newval = md_chars_to_number (buf, THUMB_SIZE);
23564 newval <<= 16;
23565 newval |= md_chars_to_number (buf+2, THUMB_SIZE);
23566
23567 newimm = FAIL;
23568 if ((fixP->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
23569 /* ARMv8-M Baseline MOV will reach here, but it doesn't support
23570 Thumb2 modified immediate encoding (T2). */
23571 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2))
23572 || fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
23573 {
23574 newimm = encode_thumb32_immediate (value);
23575 if (newimm == (unsigned int) FAIL)
23576 newimm = thumb32_negate_data_op (&newval, value);
23577 }
23578 if (newimm == (unsigned int) FAIL)
23579 {
23580 if (fixP->fx_r_type != BFD_RELOC_ARM_T32_IMMEDIATE)
23581 {
23582 /* Turn add/sum into addw/subw. */
23583 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
23584 newval = (newval & 0xfeffffff) | 0x02000000;
23585 /* No flat 12-bit imm encoding for addsw/subsw. */
23586 if ((newval & 0x00100000) == 0)
23587 {
23588 /* 12 bit immediate for addw/subw. */
23589 if (value < 0)
23590 {
23591 value = -value;
23592 newval ^= 0x00a00000;
23593 }
23594 if (value > 0xfff)
23595 newimm = (unsigned int) FAIL;
23596 else
23597 newimm = value;
23598 }
23599 }
23600 else
23601 {
23602 /* MOV accepts both Thumb2 modified immediate (T2 encoding) and
23603 UINT16 (T3 encoding), MOVW only accepts UINT16. When
23604 disassembling, MOV is preferred when there is no encoding
23605 overlap. */
23606 if (((newval >> T2_DATA_OP_SHIFT) & 0xf) == T2_OPCODE_ORR
23607 /* NOTE: MOV uses the ORR opcode in Thumb 2 mode
23608 but with the Rn field [19:16] set to 1111. */
23609 && (((newval >> 16) & 0xf) == 0xf)
23610 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m)
23611 && !((newval >> T2_SBIT_SHIFT) & 0x1)
23612 && value >= 0 && value <= 0xffff)
23613 {
23614 /* Toggle bit[25] to change encoding from T2 to T3. */
23615 newval ^= 1 << 25;
23616 /* Clear bits[19:16]. */
23617 newval &= 0xfff0ffff;
23618 /* Encoding high 4bits imm. Code below will encode the
23619 remaining low 12bits. */
23620 newval |= (value & 0x0000f000) << 4;
23621 newimm = value & 0x00000fff;
23622 }
23623 }
23624 }
23625
23626 if (newimm == (unsigned int)FAIL)
23627 {
23628 as_bad_where (fixP->fx_file, fixP->fx_line,
23629 _("invalid constant (%lx) after fixup"),
23630 (unsigned long) value);
23631 break;
23632 }
23633
23634 newval |= (newimm & 0x800) << 15;
23635 newval |= (newimm & 0x700) << 4;
23636 newval |= (newimm & 0x0ff);
23637
23638 md_number_to_chars (buf, (valueT) ((newval >> 16) & 0xffff), THUMB_SIZE);
23639 md_number_to_chars (buf+2, (valueT) (newval & 0xffff), THUMB_SIZE);
23640 break;
23641
23642 case BFD_RELOC_ARM_SMC:
23643 if (((unsigned long) value) > 0xffff)
23644 as_bad_where (fixP->fx_file, fixP->fx_line,
23645 _("invalid smc expression"));
23646 newval = md_chars_to_number (buf, INSN_SIZE);
23647 newval |= (value & 0xf) | ((value & 0xfff0) << 4);
23648 md_number_to_chars (buf, newval, INSN_SIZE);
23649 break;
23650
23651 case BFD_RELOC_ARM_HVC:
23652 if (((unsigned long) value) > 0xffff)
23653 as_bad_where (fixP->fx_file, fixP->fx_line,
23654 _("invalid hvc expression"));
23655 newval = md_chars_to_number (buf, INSN_SIZE);
23656 newval |= (value & 0xf) | ((value & 0xfff0) << 4);
23657 md_number_to_chars (buf, newval, INSN_SIZE);
23658 break;
23659
23660 case BFD_RELOC_ARM_SWI:
23661 if (fixP->tc_fix_data != 0)
23662 {
23663 if (((unsigned long) value) > 0xff)
23664 as_bad_where (fixP->fx_file, fixP->fx_line,
23665 _("invalid swi expression"));
23666 newval = md_chars_to_number (buf, THUMB_SIZE);
23667 newval |= value;
23668 md_number_to_chars (buf, newval, THUMB_SIZE);
23669 }
23670 else
23671 {
23672 if (((unsigned long) value) > 0x00ffffff)
23673 as_bad_where (fixP->fx_file, fixP->fx_line,
23674 _("invalid swi expression"));
23675 newval = md_chars_to_number (buf, INSN_SIZE);
23676 newval |= value;
23677 md_number_to_chars (buf, newval, INSN_SIZE);
23678 }
23679 break;
23680
23681 case BFD_RELOC_ARM_MULTI:
23682 if (((unsigned long) value) > 0xffff)
23683 as_bad_where (fixP->fx_file, fixP->fx_line,
23684 _("invalid expression in load/store multiple"));
23685 newval = value | md_chars_to_number (buf, INSN_SIZE);
23686 md_number_to_chars (buf, newval, INSN_SIZE);
23687 break;
23688
23689 #ifdef OBJ_ELF
23690 case BFD_RELOC_ARM_PCREL_CALL:
23691
23692 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
23693 && fixP->fx_addsy
23694 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
23695 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23696 && THUMB_IS_FUNC (fixP->fx_addsy))
23697 /* Flip the bl to blx. This is a simple flip
23698 bit here because we generate PCREL_CALL for
23699 unconditional bls. */
23700 {
23701 newval = md_chars_to_number (buf, INSN_SIZE);
23702 newval = newval | 0x10000000;
23703 md_number_to_chars (buf, newval, INSN_SIZE);
23704 temp = 1;
23705 fixP->fx_done = 1;
23706 }
23707 else
23708 temp = 3;
23709 goto arm_branch_common;
23710
23711 case BFD_RELOC_ARM_PCREL_JUMP:
23712 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
23713 && fixP->fx_addsy
23714 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
23715 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23716 && THUMB_IS_FUNC (fixP->fx_addsy))
23717 {
23718 /* This would map to a bl<cond>, b<cond>,
23719 b<always> to a Thumb function. We
23720 need to force a relocation for this particular
23721 case. */
23722 newval = md_chars_to_number (buf, INSN_SIZE);
23723 fixP->fx_done = 0;
23724 }
23725 /* Fall through. */
23726
23727 case BFD_RELOC_ARM_PLT32:
23728 #endif
23729 case BFD_RELOC_ARM_PCREL_BRANCH:
23730 temp = 3;
23731 goto arm_branch_common;
23732
23733 case BFD_RELOC_ARM_PCREL_BLX:
23734
23735 temp = 1;
23736 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
23737 && fixP->fx_addsy
23738 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
23739 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23740 && ARM_IS_FUNC (fixP->fx_addsy))
23741 {
23742 /* Flip the blx to a bl and warn. */
23743 const char *name = S_GET_NAME (fixP->fx_addsy);
23744 newval = 0xeb000000;
23745 as_warn_where (fixP->fx_file, fixP->fx_line,
23746 _("blx to '%s' an ARM ISA state function changed to bl"),
23747 name);
23748 md_number_to_chars (buf, newval, INSN_SIZE);
23749 temp = 3;
23750 fixP->fx_done = 1;
23751 }
23752
23753 #ifdef OBJ_ELF
23754 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
23755 fixP->fx_r_type = BFD_RELOC_ARM_PCREL_CALL;
23756 #endif
23757
23758 arm_branch_common:
23759 /* We are going to store value (shifted right by two) in the
23760 instruction, in a 24 bit, signed field. Bits 26 through 32 either
23761 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
23762 also be clear. */
23763 if (value & temp)
23764 as_bad_where (fixP->fx_file, fixP->fx_line,
23765 _("misaligned branch destination"));
23766 if ((value & (offsetT)0xfe000000) != (offsetT)0
23767 && (value & (offsetT)0xfe000000) != (offsetT)0xfe000000)
23768 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
23769
23770 if (fixP->fx_done || !seg->use_rela_p)
23771 {
23772 newval = md_chars_to_number (buf, INSN_SIZE);
23773 newval |= (value >> 2) & 0x00ffffff;
23774 /* Set the H bit on BLX instructions. */
23775 if (temp == 1)
23776 {
23777 if (value & 2)
23778 newval |= 0x01000000;
23779 else
23780 newval &= ~0x01000000;
23781 }
23782 md_number_to_chars (buf, newval, INSN_SIZE);
23783 }
23784 break;
23785
23786 case BFD_RELOC_THUMB_PCREL_BRANCH7: /* CBZ */
23787 /* CBZ can only branch forward. */
23788
23789 /* Attempts to use CBZ to branch to the next instruction
23790 (which, strictly speaking, are prohibited) will be turned into
23791 no-ops.
23792
23793 FIXME: It may be better to remove the instruction completely and
23794 perform relaxation. */
23795 if (value == -2)
23796 {
23797 newval = md_chars_to_number (buf, THUMB_SIZE);
23798 newval = 0xbf00; /* NOP encoding T1 */
23799 md_number_to_chars (buf, newval, THUMB_SIZE);
23800 }
23801 else
23802 {
23803 if (value & ~0x7e)
23804 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
23805
23806 if (fixP->fx_done || !seg->use_rela_p)
23807 {
23808 newval = md_chars_to_number (buf, THUMB_SIZE);
23809 newval |= ((value & 0x3e) << 2) | ((value & 0x40) << 3);
23810 md_number_to_chars (buf, newval, THUMB_SIZE);
23811 }
23812 }
23813 break;
23814
23815 case BFD_RELOC_THUMB_PCREL_BRANCH9: /* Conditional branch. */
23816 if ((value & ~0xff) && ((value & ~0xff) != ~0xff))
23817 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
23818
23819 if (fixP->fx_done || !seg->use_rela_p)
23820 {
23821 newval = md_chars_to_number (buf, THUMB_SIZE);
23822 newval |= (value & 0x1ff) >> 1;
23823 md_number_to_chars (buf, newval, THUMB_SIZE);
23824 }
23825 break;
23826
23827 case BFD_RELOC_THUMB_PCREL_BRANCH12: /* Unconditional branch. */
23828 if ((value & ~0x7ff) && ((value & ~0x7ff) != ~0x7ff))
23829 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
23830
23831 if (fixP->fx_done || !seg->use_rela_p)
23832 {
23833 newval = md_chars_to_number (buf, THUMB_SIZE);
23834 newval |= (value & 0xfff) >> 1;
23835 md_number_to_chars (buf, newval, THUMB_SIZE);
23836 }
23837 break;
23838
23839 case BFD_RELOC_THUMB_PCREL_BRANCH20:
23840 if (fixP->fx_addsy
23841 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23842 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
23843 && ARM_IS_FUNC (fixP->fx_addsy)
23844 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
23845 {
23846 /* Force a relocation for a branch 20 bits wide. */
23847 fixP->fx_done = 0;
23848 }
23849 if ((value & ~0x1fffff) && ((value & ~0x0fffff) != ~0x0fffff))
23850 as_bad_where (fixP->fx_file, fixP->fx_line,
23851 _("conditional branch out of range"));
23852
23853 if (fixP->fx_done || !seg->use_rela_p)
23854 {
23855 offsetT newval2;
23856 addressT S, J1, J2, lo, hi;
23857
23858 S = (value & 0x00100000) >> 20;
23859 J2 = (value & 0x00080000) >> 19;
23860 J1 = (value & 0x00040000) >> 18;
23861 hi = (value & 0x0003f000) >> 12;
23862 lo = (value & 0x00000ffe) >> 1;
23863
23864 newval = md_chars_to_number (buf, THUMB_SIZE);
23865 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
23866 newval |= (S << 10) | hi;
23867 newval2 |= (J1 << 13) | (J2 << 11) | lo;
23868 md_number_to_chars (buf, newval, THUMB_SIZE);
23869 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
23870 }
23871 break;
23872
23873 case BFD_RELOC_THUMB_PCREL_BLX:
23874 /* If there is a blx from a thumb state function to
23875 another thumb function flip this to a bl and warn
23876 about it. */
23877
23878 if (fixP->fx_addsy
23879 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
23880 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23881 && THUMB_IS_FUNC (fixP->fx_addsy))
23882 {
23883 const char *name = S_GET_NAME (fixP->fx_addsy);
23884 as_warn_where (fixP->fx_file, fixP->fx_line,
23885 _("blx to Thumb func '%s' from Thumb ISA state changed to bl"),
23886 name);
23887 newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
23888 newval = newval | 0x1000;
23889 md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
23890 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
23891 fixP->fx_done = 1;
23892 }
23893
23894
23895 goto thumb_bl_common;
23896
23897 case BFD_RELOC_THUMB_PCREL_BRANCH23:
23898 /* A bl from Thumb state ISA to an internal ARM state function
23899 is converted to a blx. */
23900 if (fixP->fx_addsy
23901 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23902 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
23903 && ARM_IS_FUNC (fixP->fx_addsy)
23904 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
23905 {
23906 newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
23907 newval = newval & ~0x1000;
23908 md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
23909 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BLX;
23910 fixP->fx_done = 1;
23911 }
23912
23913 thumb_bl_common:
23914
23915 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
23916 /* For a BLX instruction, make sure that the relocation is rounded up
23917 to a word boundary. This follows the semantics of the instruction
23918 which specifies that bit 1 of the target address will come from bit
23919 1 of the base address. */
23920 value = (value + 3) & ~ 3;
23921
23922 #ifdef OBJ_ELF
23923 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4
23924 && fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
23925 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
23926 #endif
23927
23928 if ((value & ~0x3fffff) && ((value & ~0x3fffff) != ~0x3fffff))
23929 {
23930 if (!(ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2)))
23931 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
23932 else if ((value & ~0x1ffffff)
23933 && ((value & ~0x1ffffff) != ~0x1ffffff))
23934 as_bad_where (fixP->fx_file, fixP->fx_line,
23935 _("Thumb2 branch out of range"));
23936 }
23937
23938 if (fixP->fx_done || !seg->use_rela_p)
23939 encode_thumb2_b_bl_offset (buf, value);
23940
23941 break;
23942
23943 case BFD_RELOC_THUMB_PCREL_BRANCH25:
23944 if ((value & ~0x0ffffff) && ((value & ~0x0ffffff) != ~0x0ffffff))
23945 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
23946
23947 if (fixP->fx_done || !seg->use_rela_p)
23948 encode_thumb2_b_bl_offset (buf, value);
23949
23950 break;
23951
23952 case BFD_RELOC_8:
23953 if (fixP->fx_done || !seg->use_rela_p)
23954 *buf = value;
23955 break;
23956
23957 case BFD_RELOC_16:
23958 if (fixP->fx_done || !seg->use_rela_p)
23959 md_number_to_chars (buf, value, 2);
23960 break;
23961
23962 #ifdef OBJ_ELF
23963 case BFD_RELOC_ARM_TLS_CALL:
23964 case BFD_RELOC_ARM_THM_TLS_CALL:
23965 case BFD_RELOC_ARM_TLS_DESCSEQ:
23966 case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
23967 case BFD_RELOC_ARM_TLS_GOTDESC:
23968 case BFD_RELOC_ARM_TLS_GD32:
23969 case BFD_RELOC_ARM_TLS_LE32:
23970 case BFD_RELOC_ARM_TLS_IE32:
23971 case BFD_RELOC_ARM_TLS_LDM32:
23972 case BFD_RELOC_ARM_TLS_LDO32:
23973 S_SET_THREAD_LOCAL (fixP->fx_addsy);
23974 break;
23975
23976 case BFD_RELOC_ARM_GOT32:
23977 case BFD_RELOC_ARM_GOTOFF:
23978 break;
23979
23980 case BFD_RELOC_ARM_GOT_PREL:
23981 if (fixP->fx_done || !seg->use_rela_p)
23982 md_number_to_chars (buf, value, 4);
23983 break;
23984
23985 case BFD_RELOC_ARM_TARGET2:
23986 /* TARGET2 is not partial-inplace, so we need to write the
23987 addend here for REL targets, because it won't be written out
23988 during reloc processing later. */
23989 if (fixP->fx_done || !seg->use_rela_p)
23990 md_number_to_chars (buf, fixP->fx_offset, 4);
23991 break;
23992 #endif
23993
23994 case BFD_RELOC_RVA:
23995 case BFD_RELOC_32:
23996 case BFD_RELOC_ARM_TARGET1:
23997 case BFD_RELOC_ARM_ROSEGREL32:
23998 case BFD_RELOC_ARM_SBREL32:
23999 case BFD_RELOC_32_PCREL:
24000 #ifdef TE_PE
24001 case BFD_RELOC_32_SECREL:
24002 #endif
24003 if (fixP->fx_done || !seg->use_rela_p)
24004 #ifdef TE_WINCE
24005 /* For WinCE we only do this for pcrel fixups. */
24006 if (fixP->fx_done || fixP->fx_pcrel)
24007 #endif
24008 md_number_to_chars (buf, value, 4);
24009 break;
24010
24011 #ifdef OBJ_ELF
24012 case BFD_RELOC_ARM_PREL31:
24013 if (fixP->fx_done || !seg->use_rela_p)
24014 {
24015 newval = md_chars_to_number (buf, 4) & 0x80000000;
24016 if ((value ^ (value >> 1)) & 0x40000000)
24017 {
24018 as_bad_where (fixP->fx_file, fixP->fx_line,
24019 _("rel31 relocation overflow"));
24020 }
24021 newval |= value & 0x7fffffff;
24022 md_number_to_chars (buf, newval, 4);
24023 }
24024 break;
24025 #endif
24026
24027 case BFD_RELOC_ARM_CP_OFF_IMM:
24028 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
24029 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM)
24030 newval = md_chars_to_number (buf, INSN_SIZE);
24031 else
24032 newval = get_thumb32_insn (buf);
24033 if ((newval & 0x0f200f00) == 0x0d000900)
24034 {
24035 /* This is a fp16 vstr/vldr. The immediate offset in the mnemonic
24036 has permitted values that are multiples of 2, in the range 0
24037 to 510. */
24038 if (value < -510 || value > 510 || (value & 1))
24039 as_bad_where (fixP->fx_file, fixP->fx_line,
24040 _("co-processor offset out of range"));
24041 }
24042 else if (value < -1023 || value > 1023 || (value & 3))
24043 as_bad_where (fixP->fx_file, fixP->fx_line,
24044 _("co-processor offset out of range"));
24045 cp_off_common:
24046 sign = value > 0;
24047 if (value < 0)
24048 value = -value;
24049 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
24050 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
24051 newval = md_chars_to_number (buf, INSN_SIZE);
24052 else
24053 newval = get_thumb32_insn (buf);
24054 if (value == 0)
24055 newval &= 0xffffff00;
24056 else
24057 {
24058 newval &= 0xff7fff00;
24059 if ((newval & 0x0f200f00) == 0x0d000900)
24060 {
24061 /* This is a fp16 vstr/vldr.
24062
24063 It requires the immediate offset in the instruction is shifted
24064 left by 1 to be a half-word offset.
24065
24066 Here, left shift by 1 first, and later right shift by 2
24067 should get the right offset. */
24068 value <<= 1;
24069 }
24070 newval |= (value >> 2) | (sign ? INDEX_UP : 0);
24071 }
24072 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
24073 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
24074 md_number_to_chars (buf, newval, INSN_SIZE);
24075 else
24076 put_thumb32_insn (buf, newval);
24077 break;
24078
24079 case BFD_RELOC_ARM_CP_OFF_IMM_S2:
24080 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2:
24081 if (value < -255 || value > 255)
24082 as_bad_where (fixP->fx_file, fixP->fx_line,
24083 _("co-processor offset out of range"));
24084 value *= 4;
24085 goto cp_off_common;
24086
24087 case BFD_RELOC_ARM_THUMB_OFFSET:
24088 newval = md_chars_to_number (buf, THUMB_SIZE);
24089 /* Exactly what ranges, and where the offset is inserted depends
24090 on the type of instruction, we can establish this from the
24091 top 4 bits. */
24092 switch (newval >> 12)
24093 {
24094 case 4: /* PC load. */
24095 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
24096 forced to zero for these loads; md_pcrel_from has already
24097 compensated for this. */
24098 if (value & 3)
24099 as_bad_where (fixP->fx_file, fixP->fx_line,
24100 _("invalid offset, target not word aligned (0x%08lX)"),
24101 (((unsigned long) fixP->fx_frag->fr_address
24102 + (unsigned long) fixP->fx_where) & ~3)
24103 + (unsigned long) value);
24104
24105 if (value & ~0x3fc)
24106 as_bad_where (fixP->fx_file, fixP->fx_line,
24107 _("invalid offset, value too big (0x%08lX)"),
24108 (long) value);
24109
24110 newval |= value >> 2;
24111 break;
24112
24113 case 9: /* SP load/store. */
24114 if (value & ~0x3fc)
24115 as_bad_where (fixP->fx_file, fixP->fx_line,
24116 _("invalid offset, value too big (0x%08lX)"),
24117 (long) value);
24118 newval |= value >> 2;
24119 break;
24120
24121 case 6: /* Word load/store. */
24122 if (value & ~0x7c)
24123 as_bad_where (fixP->fx_file, fixP->fx_line,
24124 _("invalid offset, value too big (0x%08lX)"),
24125 (long) value);
24126 newval |= value << 4; /* 6 - 2. */
24127 break;
24128
24129 case 7: /* Byte load/store. */
24130 if (value & ~0x1f)
24131 as_bad_where (fixP->fx_file, fixP->fx_line,
24132 _("invalid offset, value too big (0x%08lX)"),
24133 (long) value);
24134 newval |= value << 6;
24135 break;
24136
24137 case 8: /* Halfword load/store. */
24138 if (value & ~0x3e)
24139 as_bad_where (fixP->fx_file, fixP->fx_line,
24140 _("invalid offset, value too big (0x%08lX)"),
24141 (long) value);
24142 newval |= value << 5; /* 6 - 1. */
24143 break;
24144
24145 default:
24146 as_bad_where (fixP->fx_file, fixP->fx_line,
24147 "Unable to process relocation for thumb opcode: %lx",
24148 (unsigned long) newval);
24149 break;
24150 }
24151 md_number_to_chars (buf, newval, THUMB_SIZE);
24152 break;
24153
24154 case BFD_RELOC_ARM_THUMB_ADD:
24155 /* This is a complicated relocation, since we use it for all of
24156 the following immediate relocations:
24157
24158 3bit ADD/SUB
24159 8bit ADD/SUB
24160 9bit ADD/SUB SP word-aligned
24161 10bit ADD PC/SP word-aligned
24162
24163 The type of instruction being processed is encoded in the
24164 instruction field:
24165
24166 0x8000 SUB
24167 0x00F0 Rd
24168 0x000F Rs
24169 */
24170 newval = md_chars_to_number (buf, THUMB_SIZE);
24171 {
24172 int rd = (newval >> 4) & 0xf;
24173 int rs = newval & 0xf;
24174 int subtract = !!(newval & 0x8000);
24175
24176 /* Check for HI regs, only very restricted cases allowed:
24177 Adjusting SP, and using PC or SP to get an address. */
24178 if ((rd > 7 && (rd != REG_SP || rs != REG_SP))
24179 || (rs > 7 && rs != REG_SP && rs != REG_PC))
24180 as_bad_where (fixP->fx_file, fixP->fx_line,
24181 _("invalid Hi register with immediate"));
24182
24183 /* If value is negative, choose the opposite instruction. */
24184 if (value < 0)
24185 {
24186 value = -value;
24187 subtract = !subtract;
24188 if (value < 0)
24189 as_bad_where (fixP->fx_file, fixP->fx_line,
24190 _("immediate value out of range"));
24191 }
24192
24193 if (rd == REG_SP)
24194 {
24195 if (value & ~0x1fc)
24196 as_bad_where (fixP->fx_file, fixP->fx_line,
24197 _("invalid immediate for stack address calculation"));
24198 newval = subtract ? T_OPCODE_SUB_ST : T_OPCODE_ADD_ST;
24199 newval |= value >> 2;
24200 }
24201 else if (rs == REG_PC || rs == REG_SP)
24202 {
24203 /* PR gas/18541. If the addition is for a defined symbol
24204 within range of an ADR instruction then accept it. */
24205 if (subtract
24206 && value == 4
24207 && fixP->fx_addsy != NULL)
24208 {
24209 subtract = 0;
24210
24211 if (! S_IS_DEFINED (fixP->fx_addsy)
24212 || S_GET_SEGMENT (fixP->fx_addsy) != seg
24213 || S_IS_WEAK (fixP->fx_addsy))
24214 {
24215 as_bad_where (fixP->fx_file, fixP->fx_line,
24216 _("address calculation needs a strongly defined nearby symbol"));
24217 }
24218 else
24219 {
24220 offsetT v = fixP->fx_where + fixP->fx_frag->fr_address;
24221
24222 /* Round up to the next 4-byte boundary. */
24223 if (v & 3)
24224 v = (v + 3) & ~ 3;
24225 else
24226 v += 4;
24227 v = S_GET_VALUE (fixP->fx_addsy) - v;
24228
24229 if (v & ~0x3fc)
24230 {
24231 as_bad_where (fixP->fx_file, fixP->fx_line,
24232 _("symbol too far away"));
24233 }
24234 else
24235 {
24236 fixP->fx_done = 1;
24237 value = v;
24238 }
24239 }
24240 }
24241
24242 if (subtract || value & ~0x3fc)
24243 as_bad_where (fixP->fx_file, fixP->fx_line,
24244 _("invalid immediate for address calculation (value = 0x%08lX)"),
24245 (unsigned long) (subtract ? - value : value));
24246 newval = (rs == REG_PC ? T_OPCODE_ADD_PC : T_OPCODE_ADD_SP);
24247 newval |= rd << 8;
24248 newval |= value >> 2;
24249 }
24250 else if (rs == rd)
24251 {
24252 if (value & ~0xff)
24253 as_bad_where (fixP->fx_file, fixP->fx_line,
24254 _("immediate value out of range"));
24255 newval = subtract ? T_OPCODE_SUB_I8 : T_OPCODE_ADD_I8;
24256 newval |= (rd << 8) | value;
24257 }
24258 else
24259 {
24260 if (value & ~0x7)
24261 as_bad_where (fixP->fx_file, fixP->fx_line,
24262 _("immediate value out of range"));
24263 newval = subtract ? T_OPCODE_SUB_I3 : T_OPCODE_ADD_I3;
24264 newval |= rd | (rs << 3) | (value << 6);
24265 }
24266 }
24267 md_number_to_chars (buf, newval, THUMB_SIZE);
24268 break;
24269
24270 case BFD_RELOC_ARM_THUMB_IMM:
24271 newval = md_chars_to_number (buf, THUMB_SIZE);
24272 if (value < 0 || value > 255)
24273 as_bad_where (fixP->fx_file, fixP->fx_line,
24274 _("invalid immediate: %ld is out of range"),
24275 (long) value);
24276 newval |= value;
24277 md_number_to_chars (buf, newval, THUMB_SIZE);
24278 break;
24279
24280 case BFD_RELOC_ARM_THUMB_SHIFT:
24281 /* 5bit shift value (0..32). LSL cannot take 32. */
24282 newval = md_chars_to_number (buf, THUMB_SIZE) & 0xf83f;
24283 temp = newval & 0xf800;
24284 if (value < 0 || value > 32 || (value == 32 && temp == T_OPCODE_LSL_I))
24285 as_bad_where (fixP->fx_file, fixP->fx_line,
24286 _("invalid shift value: %ld"), (long) value);
24287 /* Shifts of zero must be encoded as LSL. */
24288 if (value == 0)
24289 newval = (newval & 0x003f) | T_OPCODE_LSL_I;
24290 /* Shifts of 32 are encoded as zero. */
24291 else if (value == 32)
24292 value = 0;
24293 newval |= value << 6;
24294 md_number_to_chars (buf, newval, THUMB_SIZE);
24295 break;
24296
24297 case BFD_RELOC_VTABLE_INHERIT:
24298 case BFD_RELOC_VTABLE_ENTRY:
24299 fixP->fx_done = 0;
24300 return;
24301
24302 case BFD_RELOC_ARM_MOVW:
24303 case BFD_RELOC_ARM_MOVT:
24304 case BFD_RELOC_ARM_THUMB_MOVW:
24305 case BFD_RELOC_ARM_THUMB_MOVT:
24306 if (fixP->fx_done || !seg->use_rela_p)
24307 {
24308 /* REL format relocations are limited to a 16-bit addend. */
24309 if (!fixP->fx_done)
24310 {
24311 if (value < -0x8000 || value > 0x7fff)
24312 as_bad_where (fixP->fx_file, fixP->fx_line,
24313 _("offset out of range"));
24314 }
24315 else if (fixP->fx_r_type == BFD_RELOC_ARM_MOVT
24316 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
24317 {
24318 value >>= 16;
24319 }
24320
24321 if (fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
24322 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
24323 {
24324 newval = get_thumb32_insn (buf);
24325 newval &= 0xfbf08f00;
24326 newval |= (value & 0xf000) << 4;
24327 newval |= (value & 0x0800) << 15;
24328 newval |= (value & 0x0700) << 4;
24329 newval |= (value & 0x00ff);
24330 put_thumb32_insn (buf, newval);
24331 }
24332 else
24333 {
24334 newval = md_chars_to_number (buf, 4);
24335 newval &= 0xfff0f000;
24336 newval |= value & 0x0fff;
24337 newval |= (value & 0xf000) << 4;
24338 md_number_to_chars (buf, newval, 4);
24339 }
24340 }
24341 return;
24342
24343 case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC:
24344 case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC:
24345 case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC:
24346 case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC:
24347 gas_assert (!fixP->fx_done);
24348 {
24349 bfd_vma insn;
24350 bfd_boolean is_mov;
24351 bfd_vma encoded_addend = value;
24352
24353 /* Check that addend can be encoded in instruction. */
24354 if (!seg->use_rela_p && (value < 0 || value > 255))
24355 as_bad_where (fixP->fx_file, fixP->fx_line,
24356 _("the offset 0x%08lX is not representable"),
24357 (unsigned long) encoded_addend);
24358
24359 /* Extract the instruction. */
24360 insn = md_chars_to_number (buf, THUMB_SIZE);
24361 is_mov = (insn & 0xf800) == 0x2000;
24362
24363 /* Encode insn. */
24364 if (is_mov)
24365 {
24366 if (!seg->use_rela_p)
24367 insn |= encoded_addend;
24368 }
24369 else
24370 {
24371 int rd, rs;
24372
24373 /* Extract the instruction. */
24374 /* Encoding is the following
24375 0x8000 SUB
24376 0x00F0 Rd
24377 0x000F Rs
24378 */
24379 /* The following conditions must be true :
24380 - ADD
24381 - Rd == Rs
24382 - Rd <= 7
24383 */
24384 rd = (insn >> 4) & 0xf;
24385 rs = insn & 0xf;
24386 if ((insn & 0x8000) || (rd != rs) || rd > 7)
24387 as_bad_where (fixP->fx_file, fixP->fx_line,
24388 _("Unable to process relocation for thumb opcode: %lx"),
24389 (unsigned long) insn);
24390
24391 /* Encode as ADD immediate8 thumb 1 code. */
24392 insn = 0x3000 | (rd << 8);
24393
24394 /* Place the encoded addend into the first 8 bits of the
24395 instruction. */
24396 if (!seg->use_rela_p)
24397 insn |= encoded_addend;
24398 }
24399
24400 /* Update the instruction. */
24401 md_number_to_chars (buf, insn, THUMB_SIZE);
24402 }
24403 break;
24404
24405 case BFD_RELOC_ARM_ALU_PC_G0_NC:
24406 case BFD_RELOC_ARM_ALU_PC_G0:
24407 case BFD_RELOC_ARM_ALU_PC_G1_NC:
24408 case BFD_RELOC_ARM_ALU_PC_G1:
24409 case BFD_RELOC_ARM_ALU_PC_G2:
24410 case BFD_RELOC_ARM_ALU_SB_G0_NC:
24411 case BFD_RELOC_ARM_ALU_SB_G0:
24412 case BFD_RELOC_ARM_ALU_SB_G1_NC:
24413 case BFD_RELOC_ARM_ALU_SB_G1:
24414 case BFD_RELOC_ARM_ALU_SB_G2:
24415 gas_assert (!fixP->fx_done);
24416 if (!seg->use_rela_p)
24417 {
24418 bfd_vma insn;
24419 bfd_vma encoded_addend;
24420 bfd_vma addend_abs = abs (value);
24421
24422 /* Check that the absolute value of the addend can be
24423 expressed as an 8-bit constant plus a rotation. */
24424 encoded_addend = encode_arm_immediate (addend_abs);
24425 if (encoded_addend == (unsigned int) FAIL)
24426 as_bad_where (fixP->fx_file, fixP->fx_line,
24427 _("the offset 0x%08lX is not representable"),
24428 (unsigned long) addend_abs);
24429
24430 /* Extract the instruction. */
24431 insn = md_chars_to_number (buf, INSN_SIZE);
24432
24433 /* If the addend is positive, use an ADD instruction.
24434 Otherwise use a SUB. Take care not to destroy the S bit. */
24435 insn &= 0xff1fffff;
24436 if (value < 0)
24437 insn |= 1 << 22;
24438 else
24439 insn |= 1 << 23;
24440
24441 /* Place the encoded addend into the first 12 bits of the
24442 instruction. */
24443 insn &= 0xfffff000;
24444 insn |= encoded_addend;
24445
24446 /* Update the instruction. */
24447 md_number_to_chars (buf, insn, INSN_SIZE);
24448 }
24449 break;
24450
24451 case BFD_RELOC_ARM_LDR_PC_G0:
24452 case BFD_RELOC_ARM_LDR_PC_G1:
24453 case BFD_RELOC_ARM_LDR_PC_G2:
24454 case BFD_RELOC_ARM_LDR_SB_G0:
24455 case BFD_RELOC_ARM_LDR_SB_G1:
24456 case BFD_RELOC_ARM_LDR_SB_G2:
24457 gas_assert (!fixP->fx_done);
24458 if (!seg->use_rela_p)
24459 {
24460 bfd_vma insn;
24461 bfd_vma addend_abs = abs (value);
24462
24463 /* Check that the absolute value of the addend can be
24464 encoded in 12 bits. */
24465 if (addend_abs >= 0x1000)
24466 as_bad_where (fixP->fx_file, fixP->fx_line,
24467 _("bad offset 0x%08lX (only 12 bits available for the magnitude)"),
24468 (unsigned long) addend_abs);
24469
24470 /* Extract the instruction. */
24471 insn = md_chars_to_number (buf, INSN_SIZE);
24472
24473 /* If the addend is negative, clear bit 23 of the instruction.
24474 Otherwise set it. */
24475 if (value < 0)
24476 insn &= ~(1 << 23);
24477 else
24478 insn |= 1 << 23;
24479
24480 /* Place the absolute value of the addend into the first 12 bits
24481 of the instruction. */
24482 insn &= 0xfffff000;
24483 insn |= addend_abs;
24484
24485 /* Update the instruction. */
24486 md_number_to_chars (buf, insn, INSN_SIZE);
24487 }
24488 break;
24489
24490 case BFD_RELOC_ARM_LDRS_PC_G0:
24491 case BFD_RELOC_ARM_LDRS_PC_G1:
24492 case BFD_RELOC_ARM_LDRS_PC_G2:
24493 case BFD_RELOC_ARM_LDRS_SB_G0:
24494 case BFD_RELOC_ARM_LDRS_SB_G1:
24495 case BFD_RELOC_ARM_LDRS_SB_G2:
24496 gas_assert (!fixP->fx_done);
24497 if (!seg->use_rela_p)
24498 {
24499 bfd_vma insn;
24500 bfd_vma addend_abs = abs (value);
24501
24502 /* Check that the absolute value of the addend can be
24503 encoded in 8 bits. */
24504 if (addend_abs >= 0x100)
24505 as_bad_where (fixP->fx_file, fixP->fx_line,
24506 _("bad offset 0x%08lX (only 8 bits available for the magnitude)"),
24507 (unsigned long) addend_abs);
24508
24509 /* Extract the instruction. */
24510 insn = md_chars_to_number (buf, INSN_SIZE);
24511
24512 /* If the addend is negative, clear bit 23 of the instruction.
24513 Otherwise set it. */
24514 if (value < 0)
24515 insn &= ~(1 << 23);
24516 else
24517 insn |= 1 << 23;
24518
24519 /* Place the first four bits of the absolute value of the addend
24520 into the first 4 bits of the instruction, and the remaining
24521 four into bits 8 .. 11. */
24522 insn &= 0xfffff0f0;
24523 insn |= (addend_abs & 0xf) | ((addend_abs & 0xf0) << 4);
24524
24525 /* Update the instruction. */
24526 md_number_to_chars (buf, insn, INSN_SIZE);
24527 }
24528 break;
24529
24530 case BFD_RELOC_ARM_LDC_PC_G0:
24531 case BFD_RELOC_ARM_LDC_PC_G1:
24532 case BFD_RELOC_ARM_LDC_PC_G2:
24533 case BFD_RELOC_ARM_LDC_SB_G0:
24534 case BFD_RELOC_ARM_LDC_SB_G1:
24535 case BFD_RELOC_ARM_LDC_SB_G2:
24536 gas_assert (!fixP->fx_done);
24537 if (!seg->use_rela_p)
24538 {
24539 bfd_vma insn;
24540 bfd_vma addend_abs = abs (value);
24541
24542 /* Check that the absolute value of the addend is a multiple of
24543 four and, when divided by four, fits in 8 bits. */
24544 if (addend_abs & 0x3)
24545 as_bad_where (fixP->fx_file, fixP->fx_line,
24546 _("bad offset 0x%08lX (must be word-aligned)"),
24547 (unsigned long) addend_abs);
24548
24549 if ((addend_abs >> 2) > 0xff)
24550 as_bad_where (fixP->fx_file, fixP->fx_line,
24551 _("bad offset 0x%08lX (must be an 8-bit number of words)"),
24552 (unsigned long) addend_abs);
24553
24554 /* Extract the instruction. */
24555 insn = md_chars_to_number (buf, INSN_SIZE);
24556
24557 /* If the addend is negative, clear bit 23 of the instruction.
24558 Otherwise set it. */
24559 if (value < 0)
24560 insn &= ~(1 << 23);
24561 else
24562 insn |= 1 << 23;
24563
24564 /* Place the addend (divided by four) into the first eight
24565 bits of the instruction. */
24566 insn &= 0xfffffff0;
24567 insn |= addend_abs >> 2;
24568
24569 /* Update the instruction. */
24570 md_number_to_chars (buf, insn, INSN_SIZE);
24571 }
24572 break;
24573
24574 case BFD_RELOC_ARM_V4BX:
24575 /* This will need to go in the object file. */
24576 fixP->fx_done = 0;
24577 break;
24578
24579 case BFD_RELOC_UNUSED:
24580 default:
24581 as_bad_where (fixP->fx_file, fixP->fx_line,
24582 _("bad relocation fixup type (%d)"), fixP->fx_r_type);
24583 }
24584 }
24585
24586 /* Translate internal representation of relocation info to BFD target
24587 format. */
24588
24589 arelent *
24590 tc_gen_reloc (asection *section, fixS *fixp)
24591 {
24592 arelent * reloc;
24593 bfd_reloc_code_real_type code;
24594
24595 reloc = XNEW (arelent);
24596
24597 reloc->sym_ptr_ptr = XNEW (asymbol *);
24598 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
24599 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
24600
24601 if (fixp->fx_pcrel)
24602 {
24603 if (section->use_rela_p)
24604 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
24605 else
24606 fixp->fx_offset = reloc->address;
24607 }
24608 reloc->addend = fixp->fx_offset;
24609
24610 switch (fixp->fx_r_type)
24611 {
24612 case BFD_RELOC_8:
24613 if (fixp->fx_pcrel)
24614 {
24615 code = BFD_RELOC_8_PCREL;
24616 break;
24617 }
24618 /* Fall through. */
24619
24620 case BFD_RELOC_16:
24621 if (fixp->fx_pcrel)
24622 {
24623 code = BFD_RELOC_16_PCREL;
24624 break;
24625 }
24626 /* Fall through. */
24627
24628 case BFD_RELOC_32:
24629 if (fixp->fx_pcrel)
24630 {
24631 code = BFD_RELOC_32_PCREL;
24632 break;
24633 }
24634 /* Fall through. */
24635
24636 case BFD_RELOC_ARM_MOVW:
24637 if (fixp->fx_pcrel)
24638 {
24639 code = BFD_RELOC_ARM_MOVW_PCREL;
24640 break;
24641 }
24642 /* Fall through. */
24643
24644 case BFD_RELOC_ARM_MOVT:
24645 if (fixp->fx_pcrel)
24646 {
24647 code = BFD_RELOC_ARM_MOVT_PCREL;
24648 break;
24649 }
24650 /* Fall through. */
24651
24652 case BFD_RELOC_ARM_THUMB_MOVW:
24653 if (fixp->fx_pcrel)
24654 {
24655 code = BFD_RELOC_ARM_THUMB_MOVW_PCREL;
24656 break;
24657 }
24658 /* Fall through. */
24659
24660 case BFD_RELOC_ARM_THUMB_MOVT:
24661 if (fixp->fx_pcrel)
24662 {
24663 code = BFD_RELOC_ARM_THUMB_MOVT_PCREL;
24664 break;
24665 }
24666 /* Fall through. */
24667
24668 case BFD_RELOC_NONE:
24669 case BFD_RELOC_ARM_PCREL_BRANCH:
24670 case BFD_RELOC_ARM_PCREL_BLX:
24671 case BFD_RELOC_RVA:
24672 case BFD_RELOC_THUMB_PCREL_BRANCH7:
24673 case BFD_RELOC_THUMB_PCREL_BRANCH9:
24674 case BFD_RELOC_THUMB_PCREL_BRANCH12:
24675 case BFD_RELOC_THUMB_PCREL_BRANCH20:
24676 case BFD_RELOC_THUMB_PCREL_BRANCH23:
24677 case BFD_RELOC_THUMB_PCREL_BRANCH25:
24678 case BFD_RELOC_VTABLE_ENTRY:
24679 case BFD_RELOC_VTABLE_INHERIT:
24680 #ifdef TE_PE
24681 case BFD_RELOC_32_SECREL:
24682 #endif
24683 code = fixp->fx_r_type;
24684 break;
24685
24686 case BFD_RELOC_THUMB_PCREL_BLX:
24687 #ifdef OBJ_ELF
24688 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
24689 code = BFD_RELOC_THUMB_PCREL_BRANCH23;
24690 else
24691 #endif
24692 code = BFD_RELOC_THUMB_PCREL_BLX;
24693 break;
24694
24695 case BFD_RELOC_ARM_LITERAL:
24696 case BFD_RELOC_ARM_HWLITERAL:
24697 /* If this is called then the a literal has
24698 been referenced across a section boundary. */
24699 as_bad_where (fixp->fx_file, fixp->fx_line,
24700 _("literal referenced across section boundary"));
24701 return NULL;
24702
24703 #ifdef OBJ_ELF
24704 case BFD_RELOC_ARM_TLS_CALL:
24705 case BFD_RELOC_ARM_THM_TLS_CALL:
24706 case BFD_RELOC_ARM_TLS_DESCSEQ:
24707 case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
24708 case BFD_RELOC_ARM_GOT32:
24709 case BFD_RELOC_ARM_GOTOFF:
24710 case BFD_RELOC_ARM_GOT_PREL:
24711 case BFD_RELOC_ARM_PLT32:
24712 case BFD_RELOC_ARM_TARGET1:
24713 case BFD_RELOC_ARM_ROSEGREL32:
24714 case BFD_RELOC_ARM_SBREL32:
24715 case BFD_RELOC_ARM_PREL31:
24716 case BFD_RELOC_ARM_TARGET2:
24717 case BFD_RELOC_ARM_TLS_LDO32:
24718 case BFD_RELOC_ARM_PCREL_CALL:
24719 case BFD_RELOC_ARM_PCREL_JUMP:
24720 case BFD_RELOC_ARM_ALU_PC_G0_NC:
24721 case BFD_RELOC_ARM_ALU_PC_G0:
24722 case BFD_RELOC_ARM_ALU_PC_G1_NC:
24723 case BFD_RELOC_ARM_ALU_PC_G1:
24724 case BFD_RELOC_ARM_ALU_PC_G2:
24725 case BFD_RELOC_ARM_LDR_PC_G0:
24726 case BFD_RELOC_ARM_LDR_PC_G1:
24727 case BFD_RELOC_ARM_LDR_PC_G2:
24728 case BFD_RELOC_ARM_LDRS_PC_G0:
24729 case BFD_RELOC_ARM_LDRS_PC_G1:
24730 case BFD_RELOC_ARM_LDRS_PC_G2:
24731 case BFD_RELOC_ARM_LDC_PC_G0:
24732 case BFD_RELOC_ARM_LDC_PC_G1:
24733 case BFD_RELOC_ARM_LDC_PC_G2:
24734 case BFD_RELOC_ARM_ALU_SB_G0_NC:
24735 case BFD_RELOC_ARM_ALU_SB_G0:
24736 case BFD_RELOC_ARM_ALU_SB_G1_NC:
24737 case BFD_RELOC_ARM_ALU_SB_G1:
24738 case BFD_RELOC_ARM_ALU_SB_G2:
24739 case BFD_RELOC_ARM_LDR_SB_G0:
24740 case BFD_RELOC_ARM_LDR_SB_G1:
24741 case BFD_RELOC_ARM_LDR_SB_G2:
24742 case BFD_RELOC_ARM_LDRS_SB_G0:
24743 case BFD_RELOC_ARM_LDRS_SB_G1:
24744 case BFD_RELOC_ARM_LDRS_SB_G2:
24745 case BFD_RELOC_ARM_LDC_SB_G0:
24746 case BFD_RELOC_ARM_LDC_SB_G1:
24747 case BFD_RELOC_ARM_LDC_SB_G2:
24748 case BFD_RELOC_ARM_V4BX:
24749 case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC:
24750 case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC:
24751 case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC:
24752 case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC:
24753 code = fixp->fx_r_type;
24754 break;
24755
24756 case BFD_RELOC_ARM_TLS_GOTDESC:
24757 case BFD_RELOC_ARM_TLS_GD32:
24758 case BFD_RELOC_ARM_TLS_LE32:
24759 case BFD_RELOC_ARM_TLS_IE32:
24760 case BFD_RELOC_ARM_TLS_LDM32:
24761 /* BFD will include the symbol's address in the addend.
24762 But we don't want that, so subtract it out again here. */
24763 if (!S_IS_COMMON (fixp->fx_addsy))
24764 reloc->addend -= (*reloc->sym_ptr_ptr)->value;
24765 code = fixp->fx_r_type;
24766 break;
24767 #endif
24768
24769 case BFD_RELOC_ARM_IMMEDIATE:
24770 as_bad_where (fixp->fx_file, fixp->fx_line,
24771 _("internal relocation (type: IMMEDIATE) not fixed up"));
24772 return NULL;
24773
24774 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
24775 as_bad_where (fixp->fx_file, fixp->fx_line,
24776 _("ADRL used for a symbol not defined in the same file"));
24777 return NULL;
24778
24779 case BFD_RELOC_ARM_OFFSET_IMM:
24780 if (section->use_rela_p)
24781 {
24782 code = fixp->fx_r_type;
24783 break;
24784 }
24785
24786 if (fixp->fx_addsy != NULL
24787 && !S_IS_DEFINED (fixp->fx_addsy)
24788 && S_IS_LOCAL (fixp->fx_addsy))
24789 {
24790 as_bad_where (fixp->fx_file, fixp->fx_line,
24791 _("undefined local label `%s'"),
24792 S_GET_NAME (fixp->fx_addsy));
24793 return NULL;
24794 }
24795
24796 as_bad_where (fixp->fx_file, fixp->fx_line,
24797 _("internal_relocation (type: OFFSET_IMM) not fixed up"));
24798 return NULL;
24799
24800 default:
24801 {
24802 const char * type;
24803
24804 switch (fixp->fx_r_type)
24805 {
24806 case BFD_RELOC_NONE: type = "NONE"; break;
24807 case BFD_RELOC_ARM_OFFSET_IMM8: type = "OFFSET_IMM8"; break;
24808 case BFD_RELOC_ARM_SHIFT_IMM: type = "SHIFT_IMM"; break;
24809 case BFD_RELOC_ARM_SMC: type = "SMC"; break;
24810 case BFD_RELOC_ARM_SWI: type = "SWI"; break;
24811 case BFD_RELOC_ARM_MULTI: type = "MULTI"; break;
24812 case BFD_RELOC_ARM_CP_OFF_IMM: type = "CP_OFF_IMM"; break;
24813 case BFD_RELOC_ARM_T32_OFFSET_IMM: type = "T32_OFFSET_IMM"; break;
24814 case BFD_RELOC_ARM_T32_CP_OFF_IMM: type = "T32_CP_OFF_IMM"; break;
24815 case BFD_RELOC_ARM_THUMB_ADD: type = "THUMB_ADD"; break;
24816 case BFD_RELOC_ARM_THUMB_SHIFT: type = "THUMB_SHIFT"; break;
24817 case BFD_RELOC_ARM_THUMB_IMM: type = "THUMB_IMM"; break;
24818 case BFD_RELOC_ARM_THUMB_OFFSET: type = "THUMB_OFFSET"; break;
24819 default: type = _("<unknown>"); break;
24820 }
24821 as_bad_where (fixp->fx_file, fixp->fx_line,
24822 _("cannot represent %s relocation in this object file format"),
24823 type);
24824 return NULL;
24825 }
24826 }
24827
24828 #ifdef OBJ_ELF
24829 if ((code == BFD_RELOC_32_PCREL || code == BFD_RELOC_32)
24830 && GOT_symbol
24831 && fixp->fx_addsy == GOT_symbol)
24832 {
24833 code = BFD_RELOC_ARM_GOTPC;
24834 reloc->addend = fixp->fx_offset = reloc->address;
24835 }
24836 #endif
24837
24838 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
24839
24840 if (reloc->howto == NULL)
24841 {
24842 as_bad_where (fixp->fx_file, fixp->fx_line,
24843 _("cannot represent %s relocation in this object file format"),
24844 bfd_get_reloc_code_name (code));
24845 return NULL;
24846 }
24847
24848 /* HACK: Since arm ELF uses Rel instead of Rela, encode the
24849 vtable entry to be used in the relocation's section offset. */
24850 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
24851 reloc->address = fixp->fx_offset;
24852
24853 return reloc;
24854 }
24855
24856 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
24857
24858 void
24859 cons_fix_new_arm (fragS * frag,
24860 int where,
24861 int size,
24862 expressionS * exp,
24863 bfd_reloc_code_real_type reloc)
24864 {
24865 int pcrel = 0;
24866
24867 /* Pick a reloc.
24868 FIXME: @@ Should look at CPU word size. */
24869 switch (size)
24870 {
24871 case 1:
24872 reloc = BFD_RELOC_8;
24873 break;
24874 case 2:
24875 reloc = BFD_RELOC_16;
24876 break;
24877 case 4:
24878 default:
24879 reloc = BFD_RELOC_32;
24880 break;
24881 case 8:
24882 reloc = BFD_RELOC_64;
24883 break;
24884 }
24885
24886 #ifdef TE_PE
24887 if (exp->X_op == O_secrel)
24888 {
24889 exp->X_op = O_symbol;
24890 reloc = BFD_RELOC_32_SECREL;
24891 }
24892 #endif
24893
24894 fix_new_exp (frag, where, size, exp, pcrel, reloc);
24895 }
24896
24897 #if defined (OBJ_COFF)
24898 void
24899 arm_validate_fix (fixS * fixP)
24900 {
24901 /* If the destination of the branch is a defined symbol which does not have
24902 the THUMB_FUNC attribute, then we must be calling a function which has
24903 the (interfacearm) attribute. We look for the Thumb entry point to that
24904 function and change the branch to refer to that function instead. */
24905 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BRANCH23
24906 && fixP->fx_addsy != NULL
24907 && S_IS_DEFINED (fixP->fx_addsy)
24908 && ! THUMB_IS_FUNC (fixP->fx_addsy))
24909 {
24910 fixP->fx_addsy = find_real_start (fixP->fx_addsy);
24911 }
24912 }
24913 #endif
24914
24915
24916 int
24917 arm_force_relocation (struct fix * fixp)
24918 {
24919 #if defined (OBJ_COFF) && defined (TE_PE)
24920 if (fixp->fx_r_type == BFD_RELOC_RVA)
24921 return 1;
24922 #endif
24923
24924 /* In case we have a call or a branch to a function in ARM ISA mode from
24925 a thumb function or vice-versa force the relocation. These relocations
24926 are cleared off for some cores that might have blx and simple transformations
24927 are possible. */
24928
24929 #ifdef OBJ_ELF
24930 switch (fixp->fx_r_type)
24931 {
24932 case BFD_RELOC_ARM_PCREL_JUMP:
24933 case BFD_RELOC_ARM_PCREL_CALL:
24934 case BFD_RELOC_THUMB_PCREL_BLX:
24935 if (THUMB_IS_FUNC (fixp->fx_addsy))
24936 return 1;
24937 break;
24938
24939 case BFD_RELOC_ARM_PCREL_BLX:
24940 case BFD_RELOC_THUMB_PCREL_BRANCH25:
24941 case BFD_RELOC_THUMB_PCREL_BRANCH20:
24942 case BFD_RELOC_THUMB_PCREL_BRANCH23:
24943 if (ARM_IS_FUNC (fixp->fx_addsy))
24944 return 1;
24945 break;
24946
24947 default:
24948 break;
24949 }
24950 #endif
24951
24952 /* Resolve these relocations even if the symbol is extern or weak.
24953 Technically this is probably wrong due to symbol preemption.
24954 In practice these relocations do not have enough range to be useful
24955 at dynamic link time, and some code (e.g. in the Linux kernel)
24956 expects these references to be resolved. */
24957 if (fixp->fx_r_type == BFD_RELOC_ARM_IMMEDIATE
24958 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM
24959 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM8
24960 || fixp->fx_r_type == BFD_RELOC_ARM_ADRL_IMMEDIATE
24961 || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
24962 || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2
24963 || fixp->fx_r_type == BFD_RELOC_ARM_THUMB_OFFSET
24964 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM
24965 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
24966 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMM12
24967 || fixp->fx_r_type == BFD_RELOC_ARM_T32_OFFSET_IMM
24968 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_PC12
24969 || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM
24970 || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM_S2)
24971 return 0;
24972
24973 /* Always leave these relocations for the linker. */
24974 if ((fixp->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
24975 && fixp->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
24976 || fixp->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
24977 return 1;
24978
24979 /* Always generate relocations against function symbols. */
24980 if (fixp->fx_r_type == BFD_RELOC_32
24981 && fixp->fx_addsy
24982 && (symbol_get_bfdsym (fixp->fx_addsy)->flags & BSF_FUNCTION))
24983 return 1;
24984
24985 return generic_force_reloc (fixp);
24986 }
24987
24988 #if defined (OBJ_ELF) || defined (OBJ_COFF)
24989 /* Relocations against function names must be left unadjusted,
24990 so that the linker can use this information to generate interworking
24991 stubs. The MIPS version of this function
24992 also prevents relocations that are mips-16 specific, but I do not
24993 know why it does this.
24994
24995 FIXME:
24996 There is one other problem that ought to be addressed here, but
24997 which currently is not: Taking the address of a label (rather
24998 than a function) and then later jumping to that address. Such
24999 addresses also ought to have their bottom bit set (assuming that
25000 they reside in Thumb code), but at the moment they will not. */
25001
25002 bfd_boolean
25003 arm_fix_adjustable (fixS * fixP)
25004 {
25005 if (fixP->fx_addsy == NULL)
25006 return 1;
25007
25008 /* Preserve relocations against symbols with function type. */
25009 if (symbol_get_bfdsym (fixP->fx_addsy)->flags & BSF_FUNCTION)
25010 return FALSE;
25011
25012 if (THUMB_IS_FUNC (fixP->fx_addsy)
25013 && fixP->fx_subsy == NULL)
25014 return FALSE;
25015
25016 /* We need the symbol name for the VTABLE entries. */
25017 if ( fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
25018 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
25019 return FALSE;
25020
25021 /* Don't allow symbols to be discarded on GOT related relocs. */
25022 if (fixP->fx_r_type == BFD_RELOC_ARM_PLT32
25023 || fixP->fx_r_type == BFD_RELOC_ARM_GOT32
25024 || fixP->fx_r_type == BFD_RELOC_ARM_GOTOFF
25025 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GD32
25026 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LE32
25027 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_IE32
25028 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDM32
25029 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDO32
25030 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GOTDESC
25031 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_CALL
25032 || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_CALL
25033 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_DESCSEQ
25034 || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_DESCSEQ
25035 || fixP->fx_r_type == BFD_RELOC_ARM_TARGET2)
25036 return FALSE;
25037
25038 /* Similarly for group relocations. */
25039 if ((fixP->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
25040 && fixP->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
25041 || fixP->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
25042 return FALSE;
25043
25044 /* MOVW/MOVT REL relocations have limited offsets, so keep the symbols. */
25045 if (fixP->fx_r_type == BFD_RELOC_ARM_MOVW
25046 || fixP->fx_r_type == BFD_RELOC_ARM_MOVT
25047 || fixP->fx_r_type == BFD_RELOC_ARM_MOVW_PCREL
25048 || fixP->fx_r_type == BFD_RELOC_ARM_MOVT_PCREL
25049 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
25050 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT
25051 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW_PCREL
25052 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT_PCREL)
25053 return FALSE;
25054
25055 /* BFD_RELOC_ARM_THUMB_ALU_ABS_Gx_NC relocations have VERY limited
25056 offsets, so keep these symbols. */
25057 if (fixP->fx_r_type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
25058 && fixP->fx_r_type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC)
25059 return FALSE;
25060
25061 return TRUE;
25062 }
25063 #endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */
25064
25065 #ifdef OBJ_ELF
25066 const char *
25067 elf32_arm_target_format (void)
25068 {
25069 #ifdef TE_SYMBIAN
25070 return (target_big_endian
25071 ? "elf32-bigarm-symbian"
25072 : "elf32-littlearm-symbian");
25073 #elif defined (TE_VXWORKS)
25074 return (target_big_endian
25075 ? "elf32-bigarm-vxworks"
25076 : "elf32-littlearm-vxworks");
25077 #elif defined (TE_NACL)
25078 return (target_big_endian
25079 ? "elf32-bigarm-nacl"
25080 : "elf32-littlearm-nacl");
25081 #else
25082 if (target_big_endian)
25083 return "elf32-bigarm";
25084 else
25085 return "elf32-littlearm";
25086 #endif
25087 }
25088
25089 void
25090 armelf_frob_symbol (symbolS * symp,
25091 int * puntp)
25092 {
25093 elf_frob_symbol (symp, puntp);
25094 }
25095 #endif
25096
25097 /* MD interface: Finalization. */
25098
25099 void
25100 arm_cleanup (void)
25101 {
25102 literal_pool * pool;
25103
25104 /* Ensure that all the IT blocks are properly closed. */
25105 check_it_blocks_finished ();
25106
25107 for (pool = list_of_pools; pool; pool = pool->next)
25108 {
25109 /* Put it at the end of the relevant section. */
25110 subseg_set (pool->section, pool->sub_section);
25111 #ifdef OBJ_ELF
25112 arm_elf_change_section ();
25113 #endif
25114 s_ltorg (0);
25115 }
25116 }
25117
25118 #ifdef OBJ_ELF
25119 /* Remove any excess mapping symbols generated for alignment frags in
25120 SEC. We may have created a mapping symbol before a zero byte
25121 alignment; remove it if there's a mapping symbol after the
25122 alignment. */
25123 static void
25124 check_mapping_symbols (bfd *abfd ATTRIBUTE_UNUSED, asection *sec,
25125 void *dummy ATTRIBUTE_UNUSED)
25126 {
25127 segment_info_type *seginfo = seg_info (sec);
25128 fragS *fragp;
25129
25130 if (seginfo == NULL || seginfo->frchainP == NULL)
25131 return;
25132
25133 for (fragp = seginfo->frchainP->frch_root;
25134 fragp != NULL;
25135 fragp = fragp->fr_next)
25136 {
25137 symbolS *sym = fragp->tc_frag_data.last_map;
25138 fragS *next = fragp->fr_next;
25139
25140 /* Variable-sized frags have been converted to fixed size by
25141 this point. But if this was variable-sized to start with,
25142 there will be a fixed-size frag after it. So don't handle
25143 next == NULL. */
25144 if (sym == NULL || next == NULL)
25145 continue;
25146
25147 if (S_GET_VALUE (sym) < next->fr_address)
25148 /* Not at the end of this frag. */
25149 continue;
25150 know (S_GET_VALUE (sym) == next->fr_address);
25151
25152 do
25153 {
25154 if (next->tc_frag_data.first_map != NULL)
25155 {
25156 /* Next frag starts with a mapping symbol. Discard this
25157 one. */
25158 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
25159 break;
25160 }
25161
25162 if (next->fr_next == NULL)
25163 {
25164 /* This mapping symbol is at the end of the section. Discard
25165 it. */
25166 know (next->fr_fix == 0 && next->fr_var == 0);
25167 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
25168 break;
25169 }
25170
25171 /* As long as we have empty frags without any mapping symbols,
25172 keep looking. */
25173 /* If the next frag is non-empty and does not start with a
25174 mapping symbol, then this mapping symbol is required. */
25175 if (next->fr_address != next->fr_next->fr_address)
25176 break;
25177
25178 next = next->fr_next;
25179 }
25180 while (next != NULL);
25181 }
25182 }
25183 #endif
25184
25185 /* Adjust the symbol table. This marks Thumb symbols as distinct from
25186 ARM ones. */
25187
25188 void
25189 arm_adjust_symtab (void)
25190 {
25191 #ifdef OBJ_COFF
25192 symbolS * sym;
25193
25194 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
25195 {
25196 if (ARM_IS_THUMB (sym))
25197 {
25198 if (THUMB_IS_FUNC (sym))
25199 {
25200 /* Mark the symbol as a Thumb function. */
25201 if ( S_GET_STORAGE_CLASS (sym) == C_STAT
25202 || S_GET_STORAGE_CLASS (sym) == C_LABEL) /* This can happen! */
25203 S_SET_STORAGE_CLASS (sym, C_THUMBSTATFUNC);
25204
25205 else if (S_GET_STORAGE_CLASS (sym) == C_EXT)
25206 S_SET_STORAGE_CLASS (sym, C_THUMBEXTFUNC);
25207 else
25208 as_bad (_("%s: unexpected function type: %d"),
25209 S_GET_NAME (sym), S_GET_STORAGE_CLASS (sym));
25210 }
25211 else switch (S_GET_STORAGE_CLASS (sym))
25212 {
25213 case C_EXT:
25214 S_SET_STORAGE_CLASS (sym, C_THUMBEXT);
25215 break;
25216 case C_STAT:
25217 S_SET_STORAGE_CLASS (sym, C_THUMBSTAT);
25218 break;
25219 case C_LABEL:
25220 S_SET_STORAGE_CLASS (sym, C_THUMBLABEL);
25221 break;
25222 default:
25223 /* Do nothing. */
25224 break;
25225 }
25226 }
25227
25228 if (ARM_IS_INTERWORK (sym))
25229 coffsymbol (symbol_get_bfdsym (sym))->native->u.syment.n_flags = 0xFF;
25230 }
25231 #endif
25232 #ifdef OBJ_ELF
25233 symbolS * sym;
25234 char bind;
25235
25236 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
25237 {
25238 if (ARM_IS_THUMB (sym))
25239 {
25240 elf_symbol_type * elf_sym;
25241
25242 elf_sym = elf_symbol (symbol_get_bfdsym (sym));
25243 bind = ELF_ST_BIND (elf_sym->internal_elf_sym.st_info);
25244
25245 if (! bfd_is_arm_special_symbol_name (elf_sym->symbol.name,
25246 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
25247 {
25248 /* If it's a .thumb_func, declare it as so,
25249 otherwise tag label as .code 16. */
25250 if (THUMB_IS_FUNC (sym))
25251 ARM_SET_SYM_BRANCH_TYPE (elf_sym->internal_elf_sym.st_target_internal,
25252 ST_BRANCH_TO_THUMB);
25253 else if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
25254 elf_sym->internal_elf_sym.st_info =
25255 ELF_ST_INFO (bind, STT_ARM_16BIT);
25256 }
25257 }
25258 }
25259
25260 /* Remove any overlapping mapping symbols generated by alignment frags. */
25261 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
25262 /* Now do generic ELF adjustments. */
25263 elf_adjust_symtab ();
25264 #endif
25265 }
25266
25267 /* MD interface: Initialization. */
25268
25269 static void
25270 set_constant_flonums (void)
25271 {
25272 int i;
25273
25274 for (i = 0; i < NUM_FLOAT_VALS; i++)
25275 if (atof_ieee ((char *) fp_const[i], 'x', fp_values[i]) == NULL)
25276 abort ();
25277 }
25278
25279 /* Auto-select Thumb mode if it's the only available instruction set for the
25280 given architecture. */
25281
25282 static void
25283 autoselect_thumb_from_cpu_variant (void)
25284 {
25285 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
25286 opcode_select (16);
25287 }
25288
25289 void
25290 md_begin (void)
25291 {
25292 unsigned mach;
25293 unsigned int i;
25294
25295 if ( (arm_ops_hsh = hash_new ()) == NULL
25296 || (arm_cond_hsh = hash_new ()) == NULL
25297 || (arm_shift_hsh = hash_new ()) == NULL
25298 || (arm_psr_hsh = hash_new ()) == NULL
25299 || (arm_v7m_psr_hsh = hash_new ()) == NULL
25300 || (arm_reg_hsh = hash_new ()) == NULL
25301 || (arm_reloc_hsh = hash_new ()) == NULL
25302 || (arm_barrier_opt_hsh = hash_new ()) == NULL)
25303 as_fatal (_("virtual memory exhausted"));
25304
25305 for (i = 0; i < sizeof (insns) / sizeof (struct asm_opcode); i++)
25306 hash_insert (arm_ops_hsh, insns[i].template_name, (void *) (insns + i));
25307 for (i = 0; i < sizeof (conds) / sizeof (struct asm_cond); i++)
25308 hash_insert (arm_cond_hsh, conds[i].template_name, (void *) (conds + i));
25309 for (i = 0; i < sizeof (shift_names) / sizeof (struct asm_shift_name); i++)
25310 hash_insert (arm_shift_hsh, shift_names[i].name, (void *) (shift_names + i));
25311 for (i = 0; i < sizeof (psrs) / sizeof (struct asm_psr); i++)
25312 hash_insert (arm_psr_hsh, psrs[i].template_name, (void *) (psrs + i));
25313 for (i = 0; i < sizeof (v7m_psrs) / sizeof (struct asm_psr); i++)
25314 hash_insert (arm_v7m_psr_hsh, v7m_psrs[i].template_name,
25315 (void *) (v7m_psrs + i));
25316 for (i = 0; i < sizeof (reg_names) / sizeof (struct reg_entry); i++)
25317 hash_insert (arm_reg_hsh, reg_names[i].name, (void *) (reg_names + i));
25318 for (i = 0;
25319 i < sizeof (barrier_opt_names) / sizeof (struct asm_barrier_opt);
25320 i++)
25321 hash_insert (arm_barrier_opt_hsh, barrier_opt_names[i].template_name,
25322 (void *) (barrier_opt_names + i));
25323 #ifdef OBJ_ELF
25324 for (i = 0; i < ARRAY_SIZE (reloc_names); i++)
25325 {
25326 struct reloc_entry * entry = reloc_names + i;
25327
25328 if (arm_is_eabi() && entry->reloc == BFD_RELOC_ARM_PLT32)
25329 /* This makes encode_branch() use the EABI versions of this relocation. */
25330 entry->reloc = BFD_RELOC_UNUSED;
25331
25332 hash_insert (arm_reloc_hsh, entry->name, (void *) entry);
25333 }
25334 #endif
25335
25336 set_constant_flonums ();
25337
25338 /* Set the cpu variant based on the command-line options. We prefer
25339 -mcpu= over -march= if both are set (as for GCC); and we prefer
25340 -mfpu= over any other way of setting the floating point unit.
25341 Use of legacy options with new options are faulted. */
25342 if (legacy_cpu)
25343 {
25344 if (mcpu_cpu_opt || march_cpu_opt)
25345 as_bad (_("use of old and new-style options to set CPU type"));
25346
25347 selected_arch = *legacy_cpu;
25348 }
25349 else if (mcpu_cpu_opt)
25350 {
25351 selected_arch = *mcpu_cpu_opt;
25352 selected_ext = *mcpu_ext_opt;
25353 }
25354 else if (march_cpu_opt)
25355 {
25356 selected_arch = *march_cpu_opt;
25357 selected_ext = *march_ext_opt;
25358 }
25359 ARM_MERGE_FEATURE_SETS (selected_cpu, selected_arch, selected_ext);
25360
25361 if (legacy_fpu)
25362 {
25363 if (mfpu_opt)
25364 as_bad (_("use of old and new-style options to set FPU type"));
25365
25366 selected_fpu = *legacy_fpu;
25367 }
25368 else if (mfpu_opt)
25369 selected_fpu = *mfpu_opt;
25370 else
25371 {
25372 #if !(defined (EABI_DEFAULT) || defined (TE_LINUX) \
25373 || defined (TE_NetBSD) || defined (TE_VXWORKS))
25374 /* Some environments specify a default FPU. If they don't, infer it
25375 from the processor. */
25376 if (mcpu_fpu_opt)
25377 selected_fpu = *mcpu_fpu_opt;
25378 else if (march_fpu_opt)
25379 selected_fpu = *march_fpu_opt;
25380 #else
25381 selected_fpu = fpu_default;
25382 #endif
25383 }
25384
25385 if (ARM_FEATURE_ZERO (selected_fpu))
25386 {
25387 if (!no_cpu_selected ())
25388 selected_fpu = fpu_default;
25389 else
25390 selected_fpu = fpu_arch_fpa;
25391 }
25392
25393 #ifdef CPU_DEFAULT
25394 if (ARM_FEATURE_ZERO (selected_arch))
25395 {
25396 selected_arch = cpu_default;
25397 selected_cpu = selected_arch;
25398 }
25399 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
25400 #else
25401 /* Autodection of feature mode: allow all features in cpu_variant but leave
25402 selected_cpu unset. It will be set in aeabi_set_public_attributes ()
25403 after all instruction have been processed and we can decide what CPU
25404 should be selected. */
25405 if (ARM_FEATURE_ZERO (selected_arch))
25406 ARM_MERGE_FEATURE_SETS (cpu_variant, arm_arch_any, selected_fpu);
25407 else
25408 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
25409 #endif
25410
25411 autoselect_thumb_from_cpu_variant ();
25412
25413 arm_arch_used = thumb_arch_used = arm_arch_none;
25414
25415 #if defined OBJ_COFF || defined OBJ_ELF
25416 {
25417 unsigned int flags = 0;
25418
25419 #if defined OBJ_ELF
25420 flags = meabi_flags;
25421
25422 switch (meabi_flags)
25423 {
25424 case EF_ARM_EABI_UNKNOWN:
25425 #endif
25426 /* Set the flags in the private structure. */
25427 if (uses_apcs_26) flags |= F_APCS26;
25428 if (support_interwork) flags |= F_INTERWORK;
25429 if (uses_apcs_float) flags |= F_APCS_FLOAT;
25430 if (pic_code) flags |= F_PIC;
25431 if (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_any_hard))
25432 flags |= F_SOFT_FLOAT;
25433
25434 switch (mfloat_abi_opt)
25435 {
25436 case ARM_FLOAT_ABI_SOFT:
25437 case ARM_FLOAT_ABI_SOFTFP:
25438 flags |= F_SOFT_FLOAT;
25439 break;
25440
25441 case ARM_FLOAT_ABI_HARD:
25442 if (flags & F_SOFT_FLOAT)
25443 as_bad (_("hard-float conflicts with specified fpu"));
25444 break;
25445 }
25446
25447 /* Using pure-endian doubles (even if soft-float). */
25448 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
25449 flags |= F_VFP_FLOAT;
25450
25451 #if defined OBJ_ELF
25452 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_maverick))
25453 flags |= EF_ARM_MAVERICK_FLOAT;
25454 break;
25455
25456 case EF_ARM_EABI_VER4:
25457 case EF_ARM_EABI_VER5:
25458 /* No additional flags to set. */
25459 break;
25460
25461 default:
25462 abort ();
25463 }
25464 #endif
25465 bfd_set_private_flags (stdoutput, flags);
25466
25467 /* We have run out flags in the COFF header to encode the
25468 status of ATPCS support, so instead we create a dummy,
25469 empty, debug section called .arm.atpcs. */
25470 if (atpcs)
25471 {
25472 asection * sec;
25473
25474 sec = bfd_make_section (stdoutput, ".arm.atpcs");
25475
25476 if (sec != NULL)
25477 {
25478 bfd_set_section_flags
25479 (stdoutput, sec, SEC_READONLY | SEC_DEBUGGING /* | SEC_HAS_CONTENTS */);
25480 bfd_set_section_size (stdoutput, sec, 0);
25481 bfd_set_section_contents (stdoutput, sec, NULL, 0, 0);
25482 }
25483 }
25484 }
25485 #endif
25486
25487 /* Record the CPU type as well. */
25488 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2))
25489 mach = bfd_mach_arm_iWMMXt2;
25490 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt))
25491 mach = bfd_mach_arm_iWMMXt;
25492 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_xscale))
25493 mach = bfd_mach_arm_XScale;
25494 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_maverick))
25495 mach = bfd_mach_arm_ep9312;
25496 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5e))
25497 mach = bfd_mach_arm_5TE;
25498 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5))
25499 {
25500 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
25501 mach = bfd_mach_arm_5T;
25502 else
25503 mach = bfd_mach_arm_5;
25504 }
25505 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4))
25506 {
25507 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
25508 mach = bfd_mach_arm_4T;
25509 else
25510 mach = bfd_mach_arm_4;
25511 }
25512 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3m))
25513 mach = bfd_mach_arm_3M;
25514 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3))
25515 mach = bfd_mach_arm_3;
25516 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2s))
25517 mach = bfd_mach_arm_2a;
25518 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2))
25519 mach = bfd_mach_arm_2;
25520 else
25521 mach = bfd_mach_arm_unknown;
25522
25523 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
25524 }
25525
25526 /* Command line processing. */
25527
25528 /* md_parse_option
25529 Invocation line includes a switch not recognized by the base assembler.
25530 See if it's a processor-specific option.
25531
25532 This routine is somewhat complicated by the need for backwards
25533 compatibility (since older releases of gcc can't be changed).
25534 The new options try to make the interface as compatible as
25535 possible with GCC.
25536
25537 New options (supported) are:
25538
25539 -mcpu=<cpu name> Assemble for selected processor
25540 -march=<architecture name> Assemble for selected architecture
25541 -mfpu=<fpu architecture> Assemble for selected FPU.
25542 -EB/-mbig-endian Big-endian
25543 -EL/-mlittle-endian Little-endian
25544 -k Generate PIC code
25545 -mthumb Start in Thumb mode
25546 -mthumb-interwork Code supports ARM/Thumb interworking
25547
25548 -m[no-]warn-deprecated Warn about deprecated features
25549 -m[no-]warn-syms Warn when symbols match instructions
25550
25551 For now we will also provide support for:
25552
25553 -mapcs-32 32-bit Program counter
25554 -mapcs-26 26-bit Program counter
25555 -macps-float Floats passed in FP registers
25556 -mapcs-reentrant Reentrant code
25557 -matpcs
25558 (sometime these will probably be replaced with -mapcs=<list of options>
25559 and -matpcs=<list of options>)
25560
25561 The remaining options are only supported for back-wards compatibility.
25562 Cpu variants, the arm part is optional:
25563 -m[arm]1 Currently not supported.
25564 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor
25565 -m[arm]3 Arm 3 processor
25566 -m[arm]6[xx], Arm 6 processors
25567 -m[arm]7[xx][t][[d]m] Arm 7 processors
25568 -m[arm]8[10] Arm 8 processors
25569 -m[arm]9[20][tdmi] Arm 9 processors
25570 -mstrongarm[110[0]] StrongARM processors
25571 -mxscale XScale processors
25572 -m[arm]v[2345[t[e]]] Arm architectures
25573 -mall All (except the ARM1)
25574 FP variants:
25575 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions
25576 -mfpe-old (No float load/store multiples)
25577 -mvfpxd VFP Single precision
25578 -mvfp All VFP
25579 -mno-fpu Disable all floating point instructions
25580
25581 The following CPU names are recognized:
25582 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
25583 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
25584 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
25585 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
25586 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
25587 arm10t arm10e, arm1020t, arm1020e, arm10200e,
25588 strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
25589
25590 */
25591
25592 const char * md_shortopts = "m:k";
25593
25594 #ifdef ARM_BI_ENDIAN
25595 #define OPTION_EB (OPTION_MD_BASE + 0)
25596 #define OPTION_EL (OPTION_MD_BASE + 1)
25597 #else
25598 #if TARGET_BYTES_BIG_ENDIAN
25599 #define OPTION_EB (OPTION_MD_BASE + 0)
25600 #else
25601 #define OPTION_EL (OPTION_MD_BASE + 1)
25602 #endif
25603 #endif
25604 #define OPTION_FIX_V4BX (OPTION_MD_BASE + 2)
25605
25606 struct option md_longopts[] =
25607 {
25608 #ifdef OPTION_EB
25609 {"EB", no_argument, NULL, OPTION_EB},
25610 #endif
25611 #ifdef OPTION_EL
25612 {"EL", no_argument, NULL, OPTION_EL},
25613 #endif
25614 {"fix-v4bx", no_argument, NULL, OPTION_FIX_V4BX},
25615 {NULL, no_argument, NULL, 0}
25616 };
25617
25618 size_t md_longopts_size = sizeof (md_longopts);
25619
25620 struct arm_option_table
25621 {
25622 const char * option; /* Option name to match. */
25623 const char * help; /* Help information. */
25624 int * var; /* Variable to change. */
25625 int value; /* What to change it to. */
25626 const char * deprecated; /* If non-null, print this message. */
25627 };
25628
25629 struct arm_option_table arm_opts[] =
25630 {
25631 {"k", N_("generate PIC code"), &pic_code, 1, NULL},
25632 {"mthumb", N_("assemble Thumb code"), &thumb_mode, 1, NULL},
25633 {"mthumb-interwork", N_("support ARM/Thumb interworking"),
25634 &support_interwork, 1, NULL},
25635 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26, 0, NULL},
25636 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26, 1, NULL},
25637 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float,
25638 1, NULL},
25639 {"mapcs-reentrant", N_("re-entrant code"), &pic_code, 1, NULL},
25640 {"matpcs", N_("code is ATPCS conformant"), &atpcs, 1, NULL},
25641 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
25642 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
25643 NULL},
25644
25645 /* These are recognized by the assembler, but have no affect on code. */
25646 {"mapcs-frame", N_("use frame pointer"), NULL, 0, NULL},
25647 {"mapcs-stack-check", N_("use stack size checking"), NULL, 0, NULL},
25648
25649 {"mwarn-deprecated", NULL, &warn_on_deprecated, 1, NULL},
25650 {"mno-warn-deprecated", N_("do not warn on use of deprecated feature"),
25651 &warn_on_deprecated, 0, NULL},
25652 {"mwarn-syms", N_("warn about symbols that match instruction names [default]"), (int *) (& flag_warn_syms), TRUE, NULL},
25653 {"mno-warn-syms", N_("disable warnings about symobls that match instructions"), (int *) (& flag_warn_syms), FALSE, NULL},
25654 {NULL, NULL, NULL, 0, NULL}
25655 };
25656
25657 struct arm_legacy_option_table
25658 {
25659 const char * option; /* Option name to match. */
25660 const arm_feature_set ** var; /* Variable to change. */
25661 const arm_feature_set value; /* What to change it to. */
25662 const char * deprecated; /* If non-null, print this message. */
25663 };
25664
25665 const struct arm_legacy_option_table arm_legacy_opts[] =
25666 {
25667 /* DON'T add any new processors to this list -- we want the whole list
25668 to go away... Add them to the processors table instead. */
25669 {"marm1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
25670 {"m1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
25671 {"marm2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
25672 {"m2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
25673 {"marm250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
25674 {"m250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
25675 {"marm3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
25676 {"m3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
25677 {"marm6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
25678 {"m6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
25679 {"marm600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
25680 {"m600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
25681 {"marm610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
25682 {"m610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
25683 {"marm620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
25684 {"m620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
25685 {"marm7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
25686 {"m7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
25687 {"marm70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
25688 {"m70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
25689 {"marm700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
25690 {"m700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
25691 {"marm700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
25692 {"m700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
25693 {"marm710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
25694 {"m710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
25695 {"marm710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
25696 {"m710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
25697 {"marm720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
25698 {"m720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
25699 {"marm7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
25700 {"m7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
25701 {"marm7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
25702 {"m7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
25703 {"marm7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
25704 {"m7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
25705 {"marm7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
25706 {"m7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
25707 {"marm7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
25708 {"m7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
25709 {"marm7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
25710 {"m7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
25711 {"marm7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
25712 {"m7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
25713 {"marm7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
25714 {"m7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
25715 {"marm7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
25716 {"m7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
25717 {"marm7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
25718 {"m7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
25719 {"marm710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
25720 {"m710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
25721 {"marm720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
25722 {"m720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
25723 {"marm740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
25724 {"m740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
25725 {"marm8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
25726 {"m8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
25727 {"marm810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
25728 {"m810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
25729 {"marm9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
25730 {"m9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
25731 {"marm9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
25732 {"m9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
25733 {"marm920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
25734 {"m920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
25735 {"marm940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
25736 {"m940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
25737 {"mstrongarm", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=strongarm")},
25738 {"mstrongarm110", &legacy_cpu, ARM_ARCH_V4,
25739 N_("use -mcpu=strongarm110")},
25740 {"mstrongarm1100", &legacy_cpu, ARM_ARCH_V4,
25741 N_("use -mcpu=strongarm1100")},
25742 {"mstrongarm1110", &legacy_cpu, ARM_ARCH_V4,
25743 N_("use -mcpu=strongarm1110")},
25744 {"mxscale", &legacy_cpu, ARM_ARCH_XSCALE, N_("use -mcpu=xscale")},
25745 {"miwmmxt", &legacy_cpu, ARM_ARCH_IWMMXT, N_("use -mcpu=iwmmxt")},
25746 {"mall", &legacy_cpu, ARM_ANY, N_("use -mcpu=all")},
25747
25748 /* Architecture variants -- don't add any more to this list either. */
25749 {"mv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
25750 {"marmv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
25751 {"mv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
25752 {"marmv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
25753 {"mv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
25754 {"marmv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
25755 {"mv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
25756 {"marmv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
25757 {"mv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
25758 {"marmv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
25759 {"mv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
25760 {"marmv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
25761 {"mv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
25762 {"marmv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
25763 {"mv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
25764 {"marmv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
25765 {"mv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
25766 {"marmv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
25767
25768 /* Floating point variants -- don't add any more to this list either. */
25769 {"mfpe-old", &legacy_fpu, FPU_ARCH_FPE, N_("use -mfpu=fpe")},
25770 {"mfpa10", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa10")},
25771 {"mfpa11", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa11")},
25772 {"mno-fpu", &legacy_fpu, ARM_ARCH_NONE,
25773 N_("use either -mfpu=softfpa or -mfpu=softvfp")},
25774
25775 {NULL, NULL, ARM_ARCH_NONE, NULL}
25776 };
25777
25778 struct arm_cpu_option_table
25779 {
25780 const char * name;
25781 size_t name_len;
25782 const arm_feature_set value;
25783 const arm_feature_set ext;
25784 /* For some CPUs we assume an FPU unless the user explicitly sets
25785 -mfpu=... */
25786 const arm_feature_set default_fpu;
25787 /* The canonical name of the CPU, or NULL to use NAME converted to upper
25788 case. */
25789 const char * canonical_name;
25790 };
25791
25792 /* This list should, at a minimum, contain all the cpu names
25793 recognized by GCC. */
25794 #define ARM_CPU_OPT(N, CN, V, E, DF) { N, sizeof (N) - 1, V, E, DF, CN }
25795
25796 static const struct arm_cpu_option_table arm_cpus[] =
25797 {
25798 ARM_CPU_OPT ("all", NULL, ARM_ANY,
25799 ARM_ARCH_NONE,
25800 FPU_ARCH_FPA),
25801 ARM_CPU_OPT ("arm1", NULL, ARM_ARCH_V1,
25802 ARM_ARCH_NONE,
25803 FPU_ARCH_FPA),
25804 ARM_CPU_OPT ("arm2", NULL, ARM_ARCH_V2,
25805 ARM_ARCH_NONE,
25806 FPU_ARCH_FPA),
25807 ARM_CPU_OPT ("arm250", NULL, ARM_ARCH_V2S,
25808 ARM_ARCH_NONE,
25809 FPU_ARCH_FPA),
25810 ARM_CPU_OPT ("arm3", NULL, ARM_ARCH_V2S,
25811 ARM_ARCH_NONE,
25812 FPU_ARCH_FPA),
25813 ARM_CPU_OPT ("arm6", NULL, ARM_ARCH_V3,
25814 ARM_ARCH_NONE,
25815 FPU_ARCH_FPA),
25816 ARM_CPU_OPT ("arm60", NULL, ARM_ARCH_V3,
25817 ARM_ARCH_NONE,
25818 FPU_ARCH_FPA),
25819 ARM_CPU_OPT ("arm600", NULL, ARM_ARCH_V3,
25820 ARM_ARCH_NONE,
25821 FPU_ARCH_FPA),
25822 ARM_CPU_OPT ("arm610", NULL, ARM_ARCH_V3,
25823 ARM_ARCH_NONE,
25824 FPU_ARCH_FPA),
25825 ARM_CPU_OPT ("arm620", NULL, ARM_ARCH_V3,
25826 ARM_ARCH_NONE,
25827 FPU_ARCH_FPA),
25828 ARM_CPU_OPT ("arm7", NULL, ARM_ARCH_V3,
25829 ARM_ARCH_NONE,
25830 FPU_ARCH_FPA),
25831 ARM_CPU_OPT ("arm7m", NULL, ARM_ARCH_V3M,
25832 ARM_ARCH_NONE,
25833 FPU_ARCH_FPA),
25834 ARM_CPU_OPT ("arm7d", NULL, ARM_ARCH_V3,
25835 ARM_ARCH_NONE,
25836 FPU_ARCH_FPA),
25837 ARM_CPU_OPT ("arm7dm", NULL, ARM_ARCH_V3M,
25838 ARM_ARCH_NONE,
25839 FPU_ARCH_FPA),
25840 ARM_CPU_OPT ("arm7di", NULL, ARM_ARCH_V3,
25841 ARM_ARCH_NONE,
25842 FPU_ARCH_FPA),
25843 ARM_CPU_OPT ("arm7dmi", NULL, ARM_ARCH_V3M,
25844 ARM_ARCH_NONE,
25845 FPU_ARCH_FPA),
25846 ARM_CPU_OPT ("arm70", NULL, ARM_ARCH_V3,
25847 ARM_ARCH_NONE,
25848 FPU_ARCH_FPA),
25849 ARM_CPU_OPT ("arm700", NULL, ARM_ARCH_V3,
25850 ARM_ARCH_NONE,
25851 FPU_ARCH_FPA),
25852 ARM_CPU_OPT ("arm700i", NULL, ARM_ARCH_V3,
25853 ARM_ARCH_NONE,
25854 FPU_ARCH_FPA),
25855 ARM_CPU_OPT ("arm710", NULL, ARM_ARCH_V3,
25856 ARM_ARCH_NONE,
25857 FPU_ARCH_FPA),
25858 ARM_CPU_OPT ("arm710t", NULL, ARM_ARCH_V4T,
25859 ARM_ARCH_NONE,
25860 FPU_ARCH_FPA),
25861 ARM_CPU_OPT ("arm720", NULL, ARM_ARCH_V3,
25862 ARM_ARCH_NONE,
25863 FPU_ARCH_FPA),
25864 ARM_CPU_OPT ("arm720t", NULL, ARM_ARCH_V4T,
25865 ARM_ARCH_NONE,
25866 FPU_ARCH_FPA),
25867 ARM_CPU_OPT ("arm740t", NULL, ARM_ARCH_V4T,
25868 ARM_ARCH_NONE,
25869 FPU_ARCH_FPA),
25870 ARM_CPU_OPT ("arm710c", NULL, ARM_ARCH_V3,
25871 ARM_ARCH_NONE,
25872 FPU_ARCH_FPA),
25873 ARM_CPU_OPT ("arm7100", NULL, ARM_ARCH_V3,
25874 ARM_ARCH_NONE,
25875 FPU_ARCH_FPA),
25876 ARM_CPU_OPT ("arm7500", NULL, ARM_ARCH_V3,
25877 ARM_ARCH_NONE,
25878 FPU_ARCH_FPA),
25879 ARM_CPU_OPT ("arm7500fe", NULL, ARM_ARCH_V3,
25880 ARM_ARCH_NONE,
25881 FPU_ARCH_FPA),
25882 ARM_CPU_OPT ("arm7t", NULL, ARM_ARCH_V4T,
25883 ARM_ARCH_NONE,
25884 FPU_ARCH_FPA),
25885 ARM_CPU_OPT ("arm7tdmi", NULL, ARM_ARCH_V4T,
25886 ARM_ARCH_NONE,
25887 FPU_ARCH_FPA),
25888 ARM_CPU_OPT ("arm7tdmi-s", NULL, ARM_ARCH_V4T,
25889 ARM_ARCH_NONE,
25890 FPU_ARCH_FPA),
25891 ARM_CPU_OPT ("arm8", NULL, ARM_ARCH_V4,
25892 ARM_ARCH_NONE,
25893 FPU_ARCH_FPA),
25894 ARM_CPU_OPT ("arm810", NULL, ARM_ARCH_V4,
25895 ARM_ARCH_NONE,
25896 FPU_ARCH_FPA),
25897 ARM_CPU_OPT ("strongarm", NULL, ARM_ARCH_V4,
25898 ARM_ARCH_NONE,
25899 FPU_ARCH_FPA),
25900 ARM_CPU_OPT ("strongarm1", NULL, ARM_ARCH_V4,
25901 ARM_ARCH_NONE,
25902 FPU_ARCH_FPA),
25903 ARM_CPU_OPT ("strongarm110", NULL, ARM_ARCH_V4,
25904 ARM_ARCH_NONE,
25905 FPU_ARCH_FPA),
25906 ARM_CPU_OPT ("strongarm1100", NULL, ARM_ARCH_V4,
25907 ARM_ARCH_NONE,
25908 FPU_ARCH_FPA),
25909 ARM_CPU_OPT ("strongarm1110", NULL, ARM_ARCH_V4,
25910 ARM_ARCH_NONE,
25911 FPU_ARCH_FPA),
25912 ARM_CPU_OPT ("arm9", NULL, ARM_ARCH_V4T,
25913 ARM_ARCH_NONE,
25914 FPU_ARCH_FPA),
25915 ARM_CPU_OPT ("arm920", "ARM920T", ARM_ARCH_V4T,
25916 ARM_ARCH_NONE,
25917 FPU_ARCH_FPA),
25918 ARM_CPU_OPT ("arm920t", NULL, ARM_ARCH_V4T,
25919 ARM_ARCH_NONE,
25920 FPU_ARCH_FPA),
25921 ARM_CPU_OPT ("arm922t", NULL, ARM_ARCH_V4T,
25922 ARM_ARCH_NONE,
25923 FPU_ARCH_FPA),
25924 ARM_CPU_OPT ("arm940t", NULL, ARM_ARCH_V4T,
25925 ARM_ARCH_NONE,
25926 FPU_ARCH_FPA),
25927 ARM_CPU_OPT ("arm9tdmi", NULL, ARM_ARCH_V4T,
25928 ARM_ARCH_NONE,
25929 FPU_ARCH_FPA),
25930 ARM_CPU_OPT ("fa526", NULL, ARM_ARCH_V4,
25931 ARM_ARCH_NONE,
25932 FPU_ARCH_FPA),
25933 ARM_CPU_OPT ("fa626", NULL, ARM_ARCH_V4,
25934 ARM_ARCH_NONE,
25935 FPU_ARCH_FPA),
25936
25937 /* For V5 or later processors we default to using VFP; but the user
25938 should really set the FPU type explicitly. */
25939 ARM_CPU_OPT ("arm9e-r0", NULL, ARM_ARCH_V5TExP,
25940 ARM_ARCH_NONE,
25941 FPU_ARCH_VFP_V2),
25942 ARM_CPU_OPT ("arm9e", NULL, ARM_ARCH_V5TE,
25943 ARM_ARCH_NONE,
25944 FPU_ARCH_VFP_V2),
25945 ARM_CPU_OPT ("arm926ej", "ARM926EJ-S", ARM_ARCH_V5TEJ,
25946 ARM_ARCH_NONE,
25947 FPU_ARCH_VFP_V2),
25948 ARM_CPU_OPT ("arm926ejs", "ARM926EJ-S", ARM_ARCH_V5TEJ,
25949 ARM_ARCH_NONE,
25950 FPU_ARCH_VFP_V2),
25951 ARM_CPU_OPT ("arm926ej-s", NULL, ARM_ARCH_V5TEJ,
25952 ARM_ARCH_NONE,
25953 FPU_ARCH_VFP_V2),
25954 ARM_CPU_OPT ("arm946e-r0", NULL, ARM_ARCH_V5TExP,
25955 ARM_ARCH_NONE,
25956 FPU_ARCH_VFP_V2),
25957 ARM_CPU_OPT ("arm946e", "ARM946E-S", ARM_ARCH_V5TE,
25958 ARM_ARCH_NONE,
25959 FPU_ARCH_VFP_V2),
25960 ARM_CPU_OPT ("arm946e-s", NULL, ARM_ARCH_V5TE,
25961 ARM_ARCH_NONE,
25962 FPU_ARCH_VFP_V2),
25963 ARM_CPU_OPT ("arm966e-r0", NULL, ARM_ARCH_V5TExP,
25964 ARM_ARCH_NONE,
25965 FPU_ARCH_VFP_V2),
25966 ARM_CPU_OPT ("arm966e", "ARM966E-S", ARM_ARCH_V5TE,
25967 ARM_ARCH_NONE,
25968 FPU_ARCH_VFP_V2),
25969 ARM_CPU_OPT ("arm966e-s", NULL, ARM_ARCH_V5TE,
25970 ARM_ARCH_NONE,
25971 FPU_ARCH_VFP_V2),
25972 ARM_CPU_OPT ("arm968e-s", NULL, ARM_ARCH_V5TE,
25973 ARM_ARCH_NONE,
25974 FPU_ARCH_VFP_V2),
25975 ARM_CPU_OPT ("arm10t", NULL, ARM_ARCH_V5T,
25976 ARM_ARCH_NONE,
25977 FPU_ARCH_VFP_V1),
25978 ARM_CPU_OPT ("arm10tdmi", NULL, ARM_ARCH_V5T,
25979 ARM_ARCH_NONE,
25980 FPU_ARCH_VFP_V1),
25981 ARM_CPU_OPT ("arm10e", NULL, ARM_ARCH_V5TE,
25982 ARM_ARCH_NONE,
25983 FPU_ARCH_VFP_V2),
25984 ARM_CPU_OPT ("arm1020", "ARM1020E", ARM_ARCH_V5TE,
25985 ARM_ARCH_NONE,
25986 FPU_ARCH_VFP_V2),
25987 ARM_CPU_OPT ("arm1020t", NULL, ARM_ARCH_V5T,
25988 ARM_ARCH_NONE,
25989 FPU_ARCH_VFP_V1),
25990 ARM_CPU_OPT ("arm1020e", NULL, ARM_ARCH_V5TE,
25991 ARM_ARCH_NONE,
25992 FPU_ARCH_VFP_V2),
25993 ARM_CPU_OPT ("arm1022e", NULL, ARM_ARCH_V5TE,
25994 ARM_ARCH_NONE,
25995 FPU_ARCH_VFP_V2),
25996 ARM_CPU_OPT ("arm1026ejs", "ARM1026EJ-S", ARM_ARCH_V5TEJ,
25997 ARM_ARCH_NONE,
25998 FPU_ARCH_VFP_V2),
25999 ARM_CPU_OPT ("arm1026ej-s", NULL, ARM_ARCH_V5TEJ,
26000 ARM_ARCH_NONE,
26001 FPU_ARCH_VFP_V2),
26002 ARM_CPU_OPT ("fa606te", NULL, ARM_ARCH_V5TE,
26003 ARM_ARCH_NONE,
26004 FPU_ARCH_VFP_V2),
26005 ARM_CPU_OPT ("fa616te", NULL, ARM_ARCH_V5TE,
26006 ARM_ARCH_NONE,
26007 FPU_ARCH_VFP_V2),
26008 ARM_CPU_OPT ("fa626te", NULL, ARM_ARCH_V5TE,
26009 ARM_ARCH_NONE,
26010 FPU_ARCH_VFP_V2),
26011 ARM_CPU_OPT ("fmp626", NULL, ARM_ARCH_V5TE,
26012 ARM_ARCH_NONE,
26013 FPU_ARCH_VFP_V2),
26014 ARM_CPU_OPT ("fa726te", NULL, ARM_ARCH_V5TE,
26015 ARM_ARCH_NONE,
26016 FPU_ARCH_VFP_V2),
26017 ARM_CPU_OPT ("arm1136js", "ARM1136J-S", ARM_ARCH_V6,
26018 ARM_ARCH_NONE,
26019 FPU_NONE),
26020 ARM_CPU_OPT ("arm1136j-s", NULL, ARM_ARCH_V6,
26021 ARM_ARCH_NONE,
26022 FPU_NONE),
26023 ARM_CPU_OPT ("arm1136jfs", "ARM1136JF-S", ARM_ARCH_V6,
26024 ARM_ARCH_NONE,
26025 FPU_ARCH_VFP_V2),
26026 ARM_CPU_OPT ("arm1136jf-s", NULL, ARM_ARCH_V6,
26027 ARM_ARCH_NONE,
26028 FPU_ARCH_VFP_V2),
26029 ARM_CPU_OPT ("mpcore", "MPCore", ARM_ARCH_V6K,
26030 ARM_ARCH_NONE,
26031 FPU_ARCH_VFP_V2),
26032 ARM_CPU_OPT ("mpcorenovfp", "MPCore", ARM_ARCH_V6K,
26033 ARM_ARCH_NONE,
26034 FPU_NONE),
26035 ARM_CPU_OPT ("arm1156t2-s", NULL, ARM_ARCH_V6T2,
26036 ARM_ARCH_NONE,
26037 FPU_NONE),
26038 ARM_CPU_OPT ("arm1156t2f-s", NULL, ARM_ARCH_V6T2,
26039 ARM_ARCH_NONE,
26040 FPU_ARCH_VFP_V2),
26041 ARM_CPU_OPT ("arm1176jz-s", NULL, ARM_ARCH_V6KZ,
26042 ARM_ARCH_NONE,
26043 FPU_NONE),
26044 ARM_CPU_OPT ("arm1176jzf-s", NULL, ARM_ARCH_V6KZ,
26045 ARM_ARCH_NONE,
26046 FPU_ARCH_VFP_V2),
26047 ARM_CPU_OPT ("cortex-a5", "Cortex-A5", ARM_ARCH_V7A,
26048 ARM_FEATURE_CORE_LOW (ARM_EXT_MP | ARM_EXT_SEC),
26049 FPU_NONE),
26050 ARM_CPU_OPT ("cortex-a7", "Cortex-A7", ARM_ARCH_V7VE,
26051 ARM_ARCH_NONE,
26052 FPU_ARCH_NEON_VFP_V4),
26053 ARM_CPU_OPT ("cortex-a8", "Cortex-A8", ARM_ARCH_V7A,
26054 ARM_FEATURE_CORE_LOW (ARM_EXT_SEC),
26055 ARM_FEATURE_COPROC (FPU_VFP_V3 | FPU_NEON_EXT_V1)),
26056 ARM_CPU_OPT ("cortex-a9", "Cortex-A9", ARM_ARCH_V7A,
26057 ARM_FEATURE_CORE_LOW (ARM_EXT_MP | ARM_EXT_SEC),
26058 ARM_FEATURE_COPROC (FPU_VFP_V3 | FPU_NEON_EXT_V1)),
26059 ARM_CPU_OPT ("cortex-a12", "Cortex-A12", ARM_ARCH_V7VE,
26060 ARM_ARCH_NONE,
26061 FPU_ARCH_NEON_VFP_V4),
26062 ARM_CPU_OPT ("cortex-a15", "Cortex-A15", ARM_ARCH_V7VE,
26063 ARM_ARCH_NONE,
26064 FPU_ARCH_NEON_VFP_V4),
26065 ARM_CPU_OPT ("cortex-a17", "Cortex-A17", ARM_ARCH_V7VE,
26066 ARM_ARCH_NONE,
26067 FPU_ARCH_NEON_VFP_V4),
26068 ARM_CPU_OPT ("cortex-a32", "Cortex-A32", ARM_ARCH_V8A,
26069 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
26070 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
26071 ARM_CPU_OPT ("cortex-a35", "Cortex-A35", ARM_ARCH_V8A,
26072 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
26073 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
26074 ARM_CPU_OPT ("cortex-a53", "Cortex-A53", ARM_ARCH_V8A,
26075 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
26076 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
26077 ARM_CPU_OPT ("cortex-a55", "Cortex-A55", ARM_ARCH_V8_2A,
26078 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
26079 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
26080 ARM_CPU_OPT ("cortex-a57", "Cortex-A57", ARM_ARCH_V8A,
26081 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
26082 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
26083 ARM_CPU_OPT ("cortex-a72", "Cortex-A72", ARM_ARCH_V8A,
26084 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
26085 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
26086 ARM_CPU_OPT ("cortex-a73", "Cortex-A73", ARM_ARCH_V8A,
26087 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
26088 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
26089 ARM_CPU_OPT ("cortex-a75", "Cortex-A75", ARM_ARCH_V8_2A,
26090 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
26091 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
26092 ARM_CPU_OPT ("cortex-r4", "Cortex-R4", ARM_ARCH_V7R,
26093 ARM_ARCH_NONE,
26094 FPU_NONE),
26095 ARM_CPU_OPT ("cortex-r4f", "Cortex-R4F", ARM_ARCH_V7R,
26096 ARM_ARCH_NONE,
26097 FPU_ARCH_VFP_V3D16),
26098 ARM_CPU_OPT ("cortex-r5", "Cortex-R5", ARM_ARCH_V7R,
26099 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV),
26100 FPU_NONE),
26101 ARM_CPU_OPT ("cortex-r7", "Cortex-R7", ARM_ARCH_V7R,
26102 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV),
26103 FPU_ARCH_VFP_V3D16),
26104 ARM_CPU_OPT ("cortex-r8", "Cortex-R8", ARM_ARCH_V7R,
26105 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV),
26106 FPU_ARCH_VFP_V3D16),
26107 ARM_CPU_OPT ("cortex-r52", "Cortex-R52", ARM_ARCH_V8R,
26108 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
26109 FPU_ARCH_NEON_VFP_ARMV8),
26110 ARM_CPU_OPT ("cortex-m33", "Cortex-M33", ARM_ARCH_V8M_MAIN,
26111 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
26112 FPU_NONE),
26113 ARM_CPU_OPT ("cortex-m23", "Cortex-M23", ARM_ARCH_V8M_BASE,
26114 ARM_ARCH_NONE,
26115 FPU_NONE),
26116 ARM_CPU_OPT ("cortex-m7", "Cortex-M7", ARM_ARCH_V7EM,
26117 ARM_ARCH_NONE,
26118 FPU_NONE),
26119 ARM_CPU_OPT ("cortex-m4", "Cortex-M4", ARM_ARCH_V7EM,
26120 ARM_ARCH_NONE,
26121 FPU_NONE),
26122 ARM_CPU_OPT ("cortex-m3", "Cortex-M3", ARM_ARCH_V7M,
26123 ARM_ARCH_NONE,
26124 FPU_NONE),
26125 ARM_CPU_OPT ("cortex-m1", "Cortex-M1", ARM_ARCH_V6SM,
26126 ARM_ARCH_NONE,
26127 FPU_NONE),
26128 ARM_CPU_OPT ("cortex-m0", "Cortex-M0", ARM_ARCH_V6SM,
26129 ARM_ARCH_NONE,
26130 FPU_NONE),
26131 ARM_CPU_OPT ("cortex-m0plus", "Cortex-M0+", ARM_ARCH_V6SM,
26132 ARM_ARCH_NONE,
26133 FPU_NONE),
26134 ARM_CPU_OPT ("exynos-m1", "Samsung Exynos M1", ARM_ARCH_V8A,
26135 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
26136 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
26137
26138 /* ??? XSCALE is really an architecture. */
26139 ARM_CPU_OPT ("xscale", NULL, ARM_ARCH_XSCALE,
26140 ARM_ARCH_NONE,
26141 FPU_ARCH_VFP_V2),
26142
26143 /* ??? iwmmxt is not a processor. */
26144 ARM_CPU_OPT ("iwmmxt", NULL, ARM_ARCH_IWMMXT,
26145 ARM_ARCH_NONE,
26146 FPU_ARCH_VFP_V2),
26147 ARM_CPU_OPT ("iwmmxt2", NULL, ARM_ARCH_IWMMXT2,
26148 ARM_ARCH_NONE,
26149 FPU_ARCH_VFP_V2),
26150 ARM_CPU_OPT ("i80200", NULL, ARM_ARCH_XSCALE,
26151 ARM_ARCH_NONE,
26152 FPU_ARCH_VFP_V2),
26153
26154 /* Maverick. */
26155 ARM_CPU_OPT ("ep9312", "ARM920T",
26156 ARM_FEATURE_LOW (ARM_AEXT_V4T, ARM_CEXT_MAVERICK),
26157 ARM_ARCH_NONE, FPU_ARCH_MAVERICK),
26158
26159 /* Marvell processors. */
26160 ARM_CPU_OPT ("marvell-pj4", NULL, ARM_ARCH_V7A,
26161 ARM_FEATURE_CORE_LOW (ARM_EXT_MP | ARM_EXT_SEC),
26162 FPU_ARCH_VFP_V3D16),
26163 ARM_CPU_OPT ("marvell-whitney", NULL, ARM_ARCH_V7A,
26164 ARM_FEATURE_CORE_LOW (ARM_EXT_MP | ARM_EXT_SEC),
26165 FPU_ARCH_NEON_VFP_V4),
26166
26167 /* APM X-Gene family. */
26168 ARM_CPU_OPT ("xgene1", "APM X-Gene 1", ARM_ARCH_V8A,
26169 ARM_ARCH_NONE,
26170 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
26171 ARM_CPU_OPT ("xgene2", "APM X-Gene 2", ARM_ARCH_V8A,
26172 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
26173 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
26174
26175 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, ARM_ARCH_NONE, NULL }
26176 };
26177 #undef ARM_CPU_OPT
26178
26179 struct arm_arch_option_table
26180 {
26181 const char * name;
26182 size_t name_len;
26183 const arm_feature_set value;
26184 const arm_feature_set default_fpu;
26185 };
26186
26187 /* This list should, at a minimum, contain all the architecture names
26188 recognized by GCC. */
26189 #define ARM_ARCH_OPT(N, V, DF) { N, sizeof (N) - 1, V, DF }
26190
26191 static const struct arm_arch_option_table arm_archs[] =
26192 {
26193 ARM_ARCH_OPT ("all", ARM_ANY, FPU_ARCH_FPA),
26194 ARM_ARCH_OPT ("armv1", ARM_ARCH_V1, FPU_ARCH_FPA),
26195 ARM_ARCH_OPT ("armv2", ARM_ARCH_V2, FPU_ARCH_FPA),
26196 ARM_ARCH_OPT ("armv2a", ARM_ARCH_V2S, FPU_ARCH_FPA),
26197 ARM_ARCH_OPT ("armv2s", ARM_ARCH_V2S, FPU_ARCH_FPA),
26198 ARM_ARCH_OPT ("armv3", ARM_ARCH_V3, FPU_ARCH_FPA),
26199 ARM_ARCH_OPT ("armv3m", ARM_ARCH_V3M, FPU_ARCH_FPA),
26200 ARM_ARCH_OPT ("armv4", ARM_ARCH_V4, FPU_ARCH_FPA),
26201 ARM_ARCH_OPT ("armv4xm", ARM_ARCH_V4xM, FPU_ARCH_FPA),
26202 ARM_ARCH_OPT ("armv4t", ARM_ARCH_V4T, FPU_ARCH_FPA),
26203 ARM_ARCH_OPT ("armv4txm", ARM_ARCH_V4TxM, FPU_ARCH_FPA),
26204 ARM_ARCH_OPT ("armv5", ARM_ARCH_V5, FPU_ARCH_VFP),
26205 ARM_ARCH_OPT ("armv5t", ARM_ARCH_V5T, FPU_ARCH_VFP),
26206 ARM_ARCH_OPT ("armv5txm", ARM_ARCH_V5TxM, FPU_ARCH_VFP),
26207 ARM_ARCH_OPT ("armv5te", ARM_ARCH_V5TE, FPU_ARCH_VFP),
26208 ARM_ARCH_OPT ("armv5texp", ARM_ARCH_V5TExP, FPU_ARCH_VFP),
26209 ARM_ARCH_OPT ("armv5tej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP),
26210 ARM_ARCH_OPT ("armv6", ARM_ARCH_V6, FPU_ARCH_VFP),
26211 ARM_ARCH_OPT ("armv6j", ARM_ARCH_V6, FPU_ARCH_VFP),
26212 ARM_ARCH_OPT ("armv6k", ARM_ARCH_V6K, FPU_ARCH_VFP),
26213 ARM_ARCH_OPT ("armv6z", ARM_ARCH_V6Z, FPU_ARCH_VFP),
26214 /* The official spelling of this variant is ARMv6KZ, the name "armv6zk" is
26215 kept to preserve existing behaviour. */
26216 ARM_ARCH_OPT ("armv6kz", ARM_ARCH_V6KZ, FPU_ARCH_VFP),
26217 ARM_ARCH_OPT ("armv6zk", ARM_ARCH_V6KZ, FPU_ARCH_VFP),
26218 ARM_ARCH_OPT ("armv6t2", ARM_ARCH_V6T2, FPU_ARCH_VFP),
26219 ARM_ARCH_OPT ("armv6kt2", ARM_ARCH_V6KT2, FPU_ARCH_VFP),
26220 ARM_ARCH_OPT ("armv6zt2", ARM_ARCH_V6ZT2, FPU_ARCH_VFP),
26221 /* The official spelling of this variant is ARMv6KZ, the name "armv6zkt2" is
26222 kept to preserve existing behaviour. */
26223 ARM_ARCH_OPT ("armv6kzt2", ARM_ARCH_V6KZT2, FPU_ARCH_VFP),
26224 ARM_ARCH_OPT ("armv6zkt2", ARM_ARCH_V6KZT2, FPU_ARCH_VFP),
26225 ARM_ARCH_OPT ("armv6-m", ARM_ARCH_V6M, FPU_ARCH_VFP),
26226 ARM_ARCH_OPT ("armv6s-m", ARM_ARCH_V6SM, FPU_ARCH_VFP),
26227 ARM_ARCH_OPT ("armv7", ARM_ARCH_V7, FPU_ARCH_VFP),
26228 /* The official spelling of the ARMv7 profile variants is the dashed form.
26229 Accept the non-dashed form for compatibility with old toolchains. */
26230 ARM_ARCH_OPT ("armv7a", ARM_ARCH_V7A, FPU_ARCH_VFP),
26231 ARM_ARCH_OPT ("armv7ve", ARM_ARCH_V7VE, FPU_ARCH_VFP),
26232 ARM_ARCH_OPT ("armv7r", ARM_ARCH_V7R, FPU_ARCH_VFP),
26233 ARM_ARCH_OPT ("armv7m", ARM_ARCH_V7M, FPU_ARCH_VFP),
26234 ARM_ARCH_OPT ("armv7-a", ARM_ARCH_V7A, FPU_ARCH_VFP),
26235 ARM_ARCH_OPT ("armv7-r", ARM_ARCH_V7R, FPU_ARCH_VFP),
26236 ARM_ARCH_OPT ("armv7-m", ARM_ARCH_V7M, FPU_ARCH_VFP),
26237 ARM_ARCH_OPT ("armv7e-m", ARM_ARCH_V7EM, FPU_ARCH_VFP),
26238 ARM_ARCH_OPT ("armv8-m.base", ARM_ARCH_V8M_BASE, FPU_ARCH_VFP),
26239 ARM_ARCH_OPT ("armv8-m.main", ARM_ARCH_V8M_MAIN, FPU_ARCH_VFP),
26240 ARM_ARCH_OPT ("armv8-a", ARM_ARCH_V8A, FPU_ARCH_VFP),
26241 ARM_ARCH_OPT ("armv8.1-a", ARM_ARCH_V8_1A, FPU_ARCH_VFP),
26242 ARM_ARCH_OPT ("armv8.2-a", ARM_ARCH_V8_2A, FPU_ARCH_VFP),
26243 ARM_ARCH_OPT ("armv8.3-a", ARM_ARCH_V8_3A, FPU_ARCH_VFP),
26244 ARM_ARCH_OPT ("armv8-r", ARM_ARCH_V8R, FPU_ARCH_VFP),
26245 ARM_ARCH_OPT ("armv8.4-a", ARM_ARCH_V8_4A, FPU_ARCH_VFP),
26246 ARM_ARCH_OPT ("xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP),
26247 ARM_ARCH_OPT ("iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP),
26248 ARM_ARCH_OPT ("iwmmxt2", ARM_ARCH_IWMMXT2,FPU_ARCH_VFP),
26249 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
26250 };
26251 #undef ARM_ARCH_OPT
26252
26253 /* ISA extensions in the co-processor and main instruction set space. */
26254
26255 struct arm_option_extension_value_table
26256 {
26257 const char * name;
26258 size_t name_len;
26259 const arm_feature_set merge_value;
26260 const arm_feature_set clear_value;
26261 /* List of architectures for which an extension is available. ARM_ARCH_NONE
26262 indicates that an extension is available for all architectures while
26263 ARM_ANY marks an empty entry. */
26264 const arm_feature_set allowed_archs[2];
26265 };
26266
26267 /* The following table must be in alphabetical order with a NULL last entry. */
26268
26269 #define ARM_EXT_OPT(N, M, C, AA) { N, sizeof (N) - 1, M, C, { AA, ARM_ANY } }
26270 #define ARM_EXT_OPT2(N, M, C, AA1, AA2) { N, sizeof (N) - 1, M, C, {AA1, AA2} }
26271
26272 static const struct arm_option_extension_value_table arm_extensions[] =
26273 {
26274 ARM_EXT_OPT ("crc", ARCH_CRC_ARMV8, ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
26275 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
26276 ARM_EXT_OPT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
26277 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8),
26278 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
26279 ARM_EXT_OPT ("dotprod", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8,
26280 ARM_FEATURE_COPROC (FPU_NEON_EXT_DOTPROD),
26281 ARM_ARCH_V8_2A),
26282 ARM_EXT_OPT ("dsp", ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
26283 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
26284 ARM_FEATURE_CORE (ARM_EXT_V7M, ARM_EXT2_V8M)),
26285 ARM_EXT_OPT ("fp", FPU_ARCH_VFP_ARMV8, ARM_FEATURE_COPROC (FPU_VFP_ARMV8),
26286 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
26287 ARM_EXT_OPT ("fp16", ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
26288 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
26289 ARM_ARCH_V8_2A),
26290 ARM_EXT_OPT ("fp16fml", ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
26291 | ARM_EXT2_FP16_FML),
26292 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
26293 | ARM_EXT2_FP16_FML),
26294 ARM_ARCH_V8_2A),
26295 ARM_EXT_OPT2 ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV),
26296 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV),
26297 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A),
26298 ARM_FEATURE_CORE_LOW (ARM_EXT_V7R)),
26299 /* Duplicate entry for the purpose of allowing ARMv7 to match in presence of
26300 Thumb divide instruction. Due to this having the same name as the
26301 previous entry, this will be ignored when doing command-line parsing and
26302 only considered by build attribute selection code. */
26303 ARM_EXT_OPT ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_DIV),
26304 ARM_FEATURE_CORE_LOW (ARM_EXT_DIV),
26305 ARM_FEATURE_CORE_LOW (ARM_EXT_V7)),
26306 ARM_EXT_OPT ("iwmmxt",ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT),
26307 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT), ARM_ARCH_NONE),
26308 ARM_EXT_OPT ("iwmmxt2", ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2),
26309 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2), ARM_ARCH_NONE),
26310 ARM_EXT_OPT ("maverick", ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK),
26311 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK), ARM_ARCH_NONE),
26312 ARM_EXT_OPT2 ("mp", ARM_FEATURE_CORE_LOW (ARM_EXT_MP),
26313 ARM_FEATURE_CORE_LOW (ARM_EXT_MP),
26314 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A),
26315 ARM_FEATURE_CORE_LOW (ARM_EXT_V7R)),
26316 ARM_EXT_OPT ("os", ARM_FEATURE_CORE_LOW (ARM_EXT_OS),
26317 ARM_FEATURE_CORE_LOW (ARM_EXT_OS),
26318 ARM_FEATURE_CORE_LOW (ARM_EXT_V6M)),
26319 ARM_EXT_OPT ("pan", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN),
26320 ARM_FEATURE (ARM_EXT_V8, ARM_EXT2_PAN, 0),
26321 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8A)),
26322 ARM_EXT_OPT ("ras", ARM_FEATURE_CORE_HIGH (ARM_EXT2_RAS),
26323 ARM_FEATURE (ARM_EXT_V8, ARM_EXT2_RAS, 0),
26324 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8A)),
26325 ARM_EXT_OPT ("rdma", FPU_ARCH_NEON_VFP_ARMV8_1,
26326 ARM_FEATURE_COPROC (FPU_NEON_ARMV8 | FPU_NEON_EXT_RDMA),
26327 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8A)),
26328 ARM_EXT_OPT2 ("sec", ARM_FEATURE_CORE_LOW (ARM_EXT_SEC),
26329 ARM_FEATURE_CORE_LOW (ARM_EXT_SEC),
26330 ARM_FEATURE_CORE_LOW (ARM_EXT_V6K),
26331 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A)),
26332 ARM_EXT_OPT ("simd", FPU_ARCH_NEON_VFP_ARMV8,
26333 ARM_FEATURE_COPROC (FPU_NEON_ARMV8),
26334 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
26335 ARM_EXT_OPT ("virt", ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT | ARM_EXT_ADIV
26336 | ARM_EXT_DIV),
26337 ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT),
26338 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A)),
26339 ARM_EXT_OPT ("xscale",ARM_FEATURE_COPROC (ARM_CEXT_XSCALE),
26340 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE), ARM_ARCH_NONE),
26341 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, { ARM_ARCH_NONE, ARM_ARCH_NONE } }
26342 };
26343 #undef ARM_EXT_OPT
26344
26345 /* ISA floating-point and Advanced SIMD extensions. */
26346 struct arm_option_fpu_value_table
26347 {
26348 const char * name;
26349 const arm_feature_set value;
26350 };
26351
26352 /* This list should, at a minimum, contain all the fpu names
26353 recognized by GCC. */
26354 static const struct arm_option_fpu_value_table arm_fpus[] =
26355 {
26356 {"softfpa", FPU_NONE},
26357 {"fpe", FPU_ARCH_FPE},
26358 {"fpe2", FPU_ARCH_FPE},
26359 {"fpe3", FPU_ARCH_FPA}, /* Third release supports LFM/SFM. */
26360 {"fpa", FPU_ARCH_FPA},
26361 {"fpa10", FPU_ARCH_FPA},
26362 {"fpa11", FPU_ARCH_FPA},
26363 {"arm7500fe", FPU_ARCH_FPA},
26364 {"softvfp", FPU_ARCH_VFP},
26365 {"softvfp+vfp", FPU_ARCH_VFP_V2},
26366 {"vfp", FPU_ARCH_VFP_V2},
26367 {"vfp9", FPU_ARCH_VFP_V2},
26368 {"vfp3", FPU_ARCH_VFP_V3}, /* Undocumented, use vfpv3. */
26369 {"vfp10", FPU_ARCH_VFP_V2},
26370 {"vfp10-r0", FPU_ARCH_VFP_V1},
26371 {"vfpxd", FPU_ARCH_VFP_V1xD},
26372 {"vfpv2", FPU_ARCH_VFP_V2},
26373 {"vfpv3", FPU_ARCH_VFP_V3},
26374 {"vfpv3-fp16", FPU_ARCH_VFP_V3_FP16},
26375 {"vfpv3-d16", FPU_ARCH_VFP_V3D16},
26376 {"vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16},
26377 {"vfpv3xd", FPU_ARCH_VFP_V3xD},
26378 {"vfpv3xd-fp16", FPU_ARCH_VFP_V3xD_FP16},
26379 {"arm1020t", FPU_ARCH_VFP_V1},
26380 {"arm1020e", FPU_ARCH_VFP_V2},
26381 {"arm1136jfs", FPU_ARCH_VFP_V2}, /* Undocumented, use arm1136jf-s. */
26382 {"arm1136jf-s", FPU_ARCH_VFP_V2},
26383 {"maverick", FPU_ARCH_MAVERICK},
26384 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1},
26385 {"neon-vfpv3", FPU_ARCH_VFP_V3_PLUS_NEON_V1},
26386 {"neon-fp16", FPU_ARCH_NEON_FP16},
26387 {"vfpv4", FPU_ARCH_VFP_V4},
26388 {"vfpv4-d16", FPU_ARCH_VFP_V4D16},
26389 {"fpv4-sp-d16", FPU_ARCH_VFP_V4_SP_D16},
26390 {"fpv5-d16", FPU_ARCH_VFP_V5D16},
26391 {"fpv5-sp-d16", FPU_ARCH_VFP_V5_SP_D16},
26392 {"neon-vfpv4", FPU_ARCH_NEON_VFP_V4},
26393 {"fp-armv8", FPU_ARCH_VFP_ARMV8},
26394 {"neon-fp-armv8", FPU_ARCH_NEON_VFP_ARMV8},
26395 {"crypto-neon-fp-armv8",
26396 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8},
26397 {"neon-fp-armv8.1", FPU_ARCH_NEON_VFP_ARMV8_1},
26398 {"crypto-neon-fp-armv8.1",
26399 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1},
26400 {NULL, ARM_ARCH_NONE}
26401 };
26402
26403 struct arm_option_value_table
26404 {
26405 const char *name;
26406 long value;
26407 };
26408
26409 static const struct arm_option_value_table arm_float_abis[] =
26410 {
26411 {"hard", ARM_FLOAT_ABI_HARD},
26412 {"softfp", ARM_FLOAT_ABI_SOFTFP},
26413 {"soft", ARM_FLOAT_ABI_SOFT},
26414 {NULL, 0}
26415 };
26416
26417 #ifdef OBJ_ELF
26418 /* We only know how to output GNU and ver 4/5 (AAELF) formats. */
26419 static const struct arm_option_value_table arm_eabis[] =
26420 {
26421 {"gnu", EF_ARM_EABI_UNKNOWN},
26422 {"4", EF_ARM_EABI_VER4},
26423 {"5", EF_ARM_EABI_VER5},
26424 {NULL, 0}
26425 };
26426 #endif
26427
26428 struct arm_long_option_table
26429 {
26430 const char * option; /* Substring to match. */
26431 const char * help; /* Help information. */
26432 int (* func) (const char * subopt); /* Function to decode sub-option. */
26433 const char * deprecated; /* If non-null, print this message. */
26434 };
26435
26436 static bfd_boolean
26437 arm_parse_extension (const char *str, const arm_feature_set *opt_set,
26438 arm_feature_set *ext_set)
26439 {
26440 /* We insist on extensions being specified in alphabetical order, and with
26441 extensions being added before being removed. We achieve this by having
26442 the global ARM_EXTENSIONS table in alphabetical order, and using the
26443 ADDING_VALUE variable to indicate whether we are adding an extension (1)
26444 or removing it (0) and only allowing it to change in the order
26445 -1 -> 1 -> 0. */
26446 const struct arm_option_extension_value_table * opt = NULL;
26447 const arm_feature_set arm_any = ARM_ANY;
26448 int adding_value = -1;
26449
26450 while (str != NULL && *str != 0)
26451 {
26452 const char *ext;
26453 size_t len;
26454
26455 if (*str != '+')
26456 {
26457 as_bad (_("invalid architectural extension"));
26458 return FALSE;
26459 }
26460
26461 str++;
26462 ext = strchr (str, '+');
26463
26464 if (ext != NULL)
26465 len = ext - str;
26466 else
26467 len = strlen (str);
26468
26469 if (len >= 2 && strncmp (str, "no", 2) == 0)
26470 {
26471 if (adding_value != 0)
26472 {
26473 adding_value = 0;
26474 opt = arm_extensions;
26475 }
26476
26477 len -= 2;
26478 str += 2;
26479 }
26480 else if (len > 0)
26481 {
26482 if (adding_value == -1)
26483 {
26484 adding_value = 1;
26485 opt = arm_extensions;
26486 }
26487 else if (adding_value != 1)
26488 {
26489 as_bad (_("must specify extensions to add before specifying "
26490 "those to remove"));
26491 return FALSE;
26492 }
26493 }
26494
26495 if (len == 0)
26496 {
26497 as_bad (_("missing architectural extension"));
26498 return FALSE;
26499 }
26500
26501 gas_assert (adding_value != -1);
26502 gas_assert (opt != NULL);
26503
26504 /* Scan over the options table trying to find an exact match. */
26505 for (; opt->name != NULL; opt++)
26506 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
26507 {
26508 int i, nb_allowed_archs =
26509 sizeof (opt->allowed_archs) / sizeof (opt->allowed_archs[0]);
26510 /* Check we can apply the extension to this architecture. */
26511 for (i = 0; i < nb_allowed_archs; i++)
26512 {
26513 /* Empty entry. */
26514 if (ARM_FEATURE_EQUAL (opt->allowed_archs[i], arm_any))
26515 continue;
26516 if (ARM_FSET_CPU_SUBSET (opt->allowed_archs[i], *opt_set))
26517 break;
26518 }
26519 if (i == nb_allowed_archs)
26520 {
26521 as_bad (_("extension does not apply to the base architecture"));
26522 return FALSE;
26523 }
26524
26525 /* Add or remove the extension. */
26526 if (adding_value)
26527 ARM_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->merge_value);
26528 else
26529 ARM_CLEAR_FEATURE (*ext_set, *ext_set, opt->clear_value);
26530
26531 /* Allowing Thumb division instructions for ARMv7 in autodetection
26532 rely on this break so that duplicate extensions (extensions
26533 with the same name as a previous extension in the list) are not
26534 considered for command-line parsing. */
26535 break;
26536 }
26537
26538 if (opt->name == NULL)
26539 {
26540 /* Did we fail to find an extension because it wasn't specified in
26541 alphabetical order, or because it does not exist? */
26542
26543 for (opt = arm_extensions; opt->name != NULL; opt++)
26544 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
26545 break;
26546
26547 if (opt->name == NULL)
26548 as_bad (_("unknown architectural extension `%s'"), str);
26549 else
26550 as_bad (_("architectural extensions must be specified in "
26551 "alphabetical order"));
26552
26553 return FALSE;
26554 }
26555 else
26556 {
26557 /* We should skip the extension we've just matched the next time
26558 round. */
26559 opt++;
26560 }
26561
26562 str = ext;
26563 };
26564
26565 return TRUE;
26566 }
26567
26568 static bfd_boolean
26569 arm_parse_cpu (const char *str)
26570 {
26571 const struct arm_cpu_option_table *opt;
26572 const char *ext = strchr (str, '+');
26573 size_t len;
26574
26575 if (ext != NULL)
26576 len = ext - str;
26577 else
26578 len = strlen (str);
26579
26580 if (len == 0)
26581 {
26582 as_bad (_("missing cpu name `%s'"), str);
26583 return FALSE;
26584 }
26585
26586 for (opt = arm_cpus; opt->name != NULL; opt++)
26587 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
26588 {
26589 mcpu_cpu_opt = &opt->value;
26590 if (mcpu_ext_opt == NULL)
26591 mcpu_ext_opt = XNEW (arm_feature_set);
26592 *mcpu_ext_opt = opt->ext;
26593 mcpu_fpu_opt = &opt->default_fpu;
26594 if (opt->canonical_name)
26595 {
26596 gas_assert (sizeof selected_cpu_name > strlen (opt->canonical_name));
26597 strcpy (selected_cpu_name, opt->canonical_name);
26598 }
26599 else
26600 {
26601 size_t i;
26602
26603 if (len >= sizeof selected_cpu_name)
26604 len = (sizeof selected_cpu_name) - 1;
26605
26606 for (i = 0; i < len; i++)
26607 selected_cpu_name[i] = TOUPPER (opt->name[i]);
26608 selected_cpu_name[i] = 0;
26609 }
26610
26611 if (ext != NULL)
26612 return arm_parse_extension (ext, mcpu_cpu_opt, mcpu_ext_opt);
26613
26614 return TRUE;
26615 }
26616
26617 as_bad (_("unknown cpu `%s'"), str);
26618 return FALSE;
26619 }
26620
26621 static bfd_boolean
26622 arm_parse_arch (const char *str)
26623 {
26624 const struct arm_arch_option_table *opt;
26625 const char *ext = strchr (str, '+');
26626 size_t len;
26627
26628 if (ext != NULL)
26629 len = ext - str;
26630 else
26631 len = strlen (str);
26632
26633 if (len == 0)
26634 {
26635 as_bad (_("missing architecture name `%s'"), str);
26636 return FALSE;
26637 }
26638
26639 for (opt = arm_archs; opt->name != NULL; opt++)
26640 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
26641 {
26642 march_cpu_opt = &opt->value;
26643 if (march_ext_opt == NULL)
26644 march_ext_opt = XNEW (arm_feature_set);
26645 *march_ext_opt = arm_arch_none;
26646 march_fpu_opt = &opt->default_fpu;
26647 strcpy (selected_cpu_name, opt->name);
26648
26649 if (ext != NULL)
26650 return arm_parse_extension (ext, march_cpu_opt, march_ext_opt);
26651
26652 return TRUE;
26653 }
26654
26655 as_bad (_("unknown architecture `%s'\n"), str);
26656 return FALSE;
26657 }
26658
26659 static bfd_boolean
26660 arm_parse_fpu (const char * str)
26661 {
26662 const struct arm_option_fpu_value_table * opt;
26663
26664 for (opt = arm_fpus; opt->name != NULL; opt++)
26665 if (streq (opt->name, str))
26666 {
26667 mfpu_opt = &opt->value;
26668 return TRUE;
26669 }
26670
26671 as_bad (_("unknown floating point format `%s'\n"), str);
26672 return FALSE;
26673 }
26674
26675 static bfd_boolean
26676 arm_parse_float_abi (const char * str)
26677 {
26678 const struct arm_option_value_table * opt;
26679
26680 for (opt = arm_float_abis; opt->name != NULL; opt++)
26681 if (streq (opt->name, str))
26682 {
26683 mfloat_abi_opt = opt->value;
26684 return TRUE;
26685 }
26686
26687 as_bad (_("unknown floating point abi `%s'\n"), str);
26688 return FALSE;
26689 }
26690
26691 #ifdef OBJ_ELF
26692 static bfd_boolean
26693 arm_parse_eabi (const char * str)
26694 {
26695 const struct arm_option_value_table *opt;
26696
26697 for (opt = arm_eabis; opt->name != NULL; opt++)
26698 if (streq (opt->name, str))
26699 {
26700 meabi_flags = opt->value;
26701 return TRUE;
26702 }
26703 as_bad (_("unknown EABI `%s'\n"), str);
26704 return FALSE;
26705 }
26706 #endif
26707
26708 static bfd_boolean
26709 arm_parse_it_mode (const char * str)
26710 {
26711 bfd_boolean ret = TRUE;
26712
26713 if (streq ("arm", str))
26714 implicit_it_mode = IMPLICIT_IT_MODE_ARM;
26715 else if (streq ("thumb", str))
26716 implicit_it_mode = IMPLICIT_IT_MODE_THUMB;
26717 else if (streq ("always", str))
26718 implicit_it_mode = IMPLICIT_IT_MODE_ALWAYS;
26719 else if (streq ("never", str))
26720 implicit_it_mode = IMPLICIT_IT_MODE_NEVER;
26721 else
26722 {
26723 as_bad (_("unknown implicit IT mode `%s', should be "\
26724 "arm, thumb, always, or never."), str);
26725 ret = FALSE;
26726 }
26727
26728 return ret;
26729 }
26730
26731 static bfd_boolean
26732 arm_ccs_mode (const char * unused ATTRIBUTE_UNUSED)
26733 {
26734 codecomposer_syntax = TRUE;
26735 arm_comment_chars[0] = ';';
26736 arm_line_separator_chars[0] = 0;
26737 return TRUE;
26738 }
26739
26740 struct arm_long_option_table arm_long_opts[] =
26741 {
26742 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
26743 arm_parse_cpu, NULL},
26744 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
26745 arm_parse_arch, NULL},
26746 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"),
26747 arm_parse_fpu, NULL},
26748 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"),
26749 arm_parse_float_abi, NULL},
26750 #ifdef OBJ_ELF
26751 {"meabi=", N_("<ver>\t\t assemble for eabi version <ver>"),
26752 arm_parse_eabi, NULL},
26753 #endif
26754 {"mimplicit-it=", N_("<mode>\t controls implicit insertion of IT instructions"),
26755 arm_parse_it_mode, NULL},
26756 {"mccs", N_("\t\t\t TI CodeComposer Studio syntax compatibility mode"),
26757 arm_ccs_mode, NULL},
26758 {NULL, NULL, 0, NULL}
26759 };
26760
26761 int
26762 md_parse_option (int c, const char * arg)
26763 {
26764 struct arm_option_table *opt;
26765 const struct arm_legacy_option_table *fopt;
26766 struct arm_long_option_table *lopt;
26767
26768 switch (c)
26769 {
26770 #ifdef OPTION_EB
26771 case OPTION_EB:
26772 target_big_endian = 1;
26773 break;
26774 #endif
26775
26776 #ifdef OPTION_EL
26777 case OPTION_EL:
26778 target_big_endian = 0;
26779 break;
26780 #endif
26781
26782 case OPTION_FIX_V4BX:
26783 fix_v4bx = TRUE;
26784 break;
26785
26786 case 'a':
26787 /* Listing option. Just ignore these, we don't support additional
26788 ones. */
26789 return 0;
26790
26791 default:
26792 for (opt = arm_opts; opt->option != NULL; opt++)
26793 {
26794 if (c == opt->option[0]
26795 && ((arg == NULL && opt->option[1] == 0)
26796 || streq (arg, opt->option + 1)))
26797 {
26798 /* If the option is deprecated, tell the user. */
26799 if (warn_on_deprecated && opt->deprecated != NULL)
26800 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
26801 arg ? arg : "", _(opt->deprecated));
26802
26803 if (opt->var != NULL)
26804 *opt->var = opt->value;
26805
26806 return 1;
26807 }
26808 }
26809
26810 for (fopt = arm_legacy_opts; fopt->option != NULL; fopt++)
26811 {
26812 if (c == fopt->option[0]
26813 && ((arg == NULL && fopt->option[1] == 0)
26814 || streq (arg, fopt->option + 1)))
26815 {
26816 /* If the option is deprecated, tell the user. */
26817 if (warn_on_deprecated && fopt->deprecated != NULL)
26818 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
26819 arg ? arg : "", _(fopt->deprecated));
26820
26821 if (fopt->var != NULL)
26822 *fopt->var = &fopt->value;
26823
26824 return 1;
26825 }
26826 }
26827
26828 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
26829 {
26830 /* These options are expected to have an argument. */
26831 if (c == lopt->option[0]
26832 && arg != NULL
26833 && strncmp (arg, lopt->option + 1,
26834 strlen (lopt->option + 1)) == 0)
26835 {
26836 /* If the option is deprecated, tell the user. */
26837 if (warn_on_deprecated && lopt->deprecated != NULL)
26838 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
26839 _(lopt->deprecated));
26840
26841 /* Call the sup-option parser. */
26842 return lopt->func (arg + strlen (lopt->option) - 1);
26843 }
26844 }
26845
26846 return 0;
26847 }
26848
26849 return 1;
26850 }
26851
26852 void
26853 md_show_usage (FILE * fp)
26854 {
26855 struct arm_option_table *opt;
26856 struct arm_long_option_table *lopt;
26857
26858 fprintf (fp, _(" ARM-specific assembler options:\n"));
26859
26860 for (opt = arm_opts; opt->option != NULL; opt++)
26861 if (opt->help != NULL)
26862 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
26863
26864 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
26865 if (lopt->help != NULL)
26866 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
26867
26868 #ifdef OPTION_EB
26869 fprintf (fp, _("\
26870 -EB assemble code for a big-endian cpu\n"));
26871 #endif
26872
26873 #ifdef OPTION_EL
26874 fprintf (fp, _("\
26875 -EL assemble code for a little-endian cpu\n"));
26876 #endif
26877
26878 fprintf (fp, _("\
26879 --fix-v4bx Allow BX in ARMv4 code\n"));
26880 }
26881
26882 #ifdef OBJ_ELF
26883
26884 typedef struct
26885 {
26886 int val;
26887 arm_feature_set flags;
26888 } cpu_arch_ver_table;
26889
26890 /* Mapping from CPU features to EABI CPU arch values. Table must be sorted
26891 chronologically for architectures, with an exception for ARMv6-M and
26892 ARMv6S-M due to legacy reasons. No new architecture should have a
26893 special case. This allows for build attribute selection results to be
26894 stable when new architectures are added. */
26895 static const cpu_arch_ver_table cpu_arch_ver[] =
26896 {
26897 {0, ARM_ARCH_V1},
26898 {0, ARM_ARCH_V2},
26899 {0, ARM_ARCH_V2S},
26900 {0, ARM_ARCH_V3},
26901 {0, ARM_ARCH_V3M},
26902 {1, ARM_ARCH_V4xM},
26903 {1, ARM_ARCH_V4},
26904 {2, ARM_ARCH_V4TxM},
26905 {2, ARM_ARCH_V4T},
26906 {3, ARM_ARCH_V5xM},
26907 {3, ARM_ARCH_V5},
26908 {3, ARM_ARCH_V5TxM},
26909 {3, ARM_ARCH_V5T},
26910 {4, ARM_ARCH_V5TExP},
26911 {4, ARM_ARCH_V5TE},
26912 {5, ARM_ARCH_V5TEJ},
26913 {6, ARM_ARCH_V6},
26914 {7, ARM_ARCH_V6Z},
26915 {7, ARM_ARCH_V6KZ},
26916 {9, ARM_ARCH_V6K},
26917 {8, ARM_ARCH_V6T2},
26918 {8, ARM_ARCH_V6KT2},
26919 {8, ARM_ARCH_V6ZT2},
26920 {8, ARM_ARCH_V6KZT2},
26921
26922 /* When assembling a file with only ARMv6-M or ARMv6S-M instruction, GNU as
26923 always selected build attributes to match those of ARMv6-M
26924 (resp. ARMv6S-M). However, due to these architectures being a strict
26925 subset of ARMv7-M in terms of instructions available, ARMv7-M attributes
26926 would be selected when fully respecting chronology of architectures.
26927 It is thus necessary to make a special case of ARMv6-M and ARMv6S-M and
26928 move them before ARMv7 architectures. */
26929 {11, ARM_ARCH_V6M},
26930 {12, ARM_ARCH_V6SM},
26931
26932 {10, ARM_ARCH_V7},
26933 {10, ARM_ARCH_V7A},
26934 {10, ARM_ARCH_V7R},
26935 {10, ARM_ARCH_V7M},
26936 {10, ARM_ARCH_V7VE},
26937 {13, ARM_ARCH_V7EM},
26938 {14, ARM_ARCH_V8A},
26939 {14, ARM_ARCH_V8_1A},
26940 {14, ARM_ARCH_V8_2A},
26941 {14, ARM_ARCH_V8_3A},
26942 {16, ARM_ARCH_V8M_BASE},
26943 {17, ARM_ARCH_V8M_MAIN},
26944 {15, ARM_ARCH_V8R},
26945 {14, ARM_ARCH_V8_4A},
26946 {-1, ARM_ARCH_NONE}
26947 };
26948
26949 /* Set an attribute if it has not already been set by the user. */
26950
26951 static void
26952 aeabi_set_attribute_int (int tag, int value)
26953 {
26954 if (tag < 1
26955 || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
26956 || !attributes_set_explicitly[tag])
26957 bfd_elf_add_proc_attr_int (stdoutput, tag, value);
26958 }
26959
26960 static void
26961 aeabi_set_attribute_string (int tag, const char *value)
26962 {
26963 if (tag < 1
26964 || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
26965 || !attributes_set_explicitly[tag])
26966 bfd_elf_add_proc_attr_string (stdoutput, tag, value);
26967 }
26968
26969 /* Return whether features in the *NEEDED feature set are available via
26970 extensions for the architecture whose feature set is *ARCH_FSET. */
26971
26972 static bfd_boolean
26973 have_ext_for_needed_feat_p (const arm_feature_set *arch_fset,
26974 const arm_feature_set *needed)
26975 {
26976 int i, nb_allowed_archs;
26977 arm_feature_set ext_fset;
26978 const struct arm_option_extension_value_table *opt;
26979
26980 ext_fset = arm_arch_none;
26981 for (opt = arm_extensions; opt->name != NULL; opt++)
26982 {
26983 /* Extension does not provide any feature we need. */
26984 if (!ARM_CPU_HAS_FEATURE (*needed, opt->merge_value))
26985 continue;
26986
26987 nb_allowed_archs =
26988 sizeof (opt->allowed_archs) / sizeof (opt->allowed_archs[0]);
26989 for (i = 0; i < nb_allowed_archs; i++)
26990 {
26991 /* Empty entry. */
26992 if (ARM_FEATURE_EQUAL (opt->allowed_archs[i], arm_arch_any))
26993 break;
26994
26995 /* Extension is available, add it. */
26996 if (ARM_FSET_CPU_SUBSET (opt->allowed_archs[i], *arch_fset))
26997 ARM_MERGE_FEATURE_SETS (ext_fset, ext_fset, opt->merge_value);
26998 }
26999 }
27000
27001 /* Can we enable all features in *needed? */
27002 return ARM_FSET_CPU_SUBSET (*needed, ext_fset);
27003 }
27004
27005 /* Select value for Tag_CPU_arch and Tag_CPU_arch_profile build attributes for
27006 a given architecture feature set *ARCH_EXT_FSET including extension feature
27007 set *EXT_FSET. Selection logic used depend on EXACT_MATCH:
27008 - if true, check for an exact match of the architecture modulo extensions;
27009 - otherwise, select build attribute value of the first superset
27010 architecture released so that results remains stable when new architectures
27011 are added.
27012 For -march/-mcpu=all the build attribute value of the most featureful
27013 architecture is returned. Tag_CPU_arch_profile result is returned in
27014 PROFILE. */
27015
27016 static int
27017 get_aeabi_cpu_arch_from_fset (const arm_feature_set *arch_ext_fset,
27018 const arm_feature_set *ext_fset,
27019 char *profile, int exact_match)
27020 {
27021 arm_feature_set arch_fset;
27022 const cpu_arch_ver_table *p_ver, *p_ver_ret = NULL;
27023
27024 /* Select most featureful architecture with all its extensions if building
27025 for -march=all as the feature sets used to set build attributes. */
27026 if (ARM_FEATURE_EQUAL (*arch_ext_fset, arm_arch_any))
27027 {
27028 /* Force revisiting of decision for each new architecture. */
27029 gas_assert (MAX_TAG_CPU_ARCH <= TAG_CPU_ARCH_V8M_MAIN);
27030 *profile = 'A';
27031 return TAG_CPU_ARCH_V8;
27032 }
27033
27034 ARM_CLEAR_FEATURE (arch_fset, *arch_ext_fset, *ext_fset);
27035
27036 for (p_ver = cpu_arch_ver; p_ver->val != -1; p_ver++)
27037 {
27038 arm_feature_set known_arch_fset;
27039
27040 ARM_CLEAR_FEATURE (known_arch_fset, p_ver->flags, fpu_any);
27041 if (exact_match)
27042 {
27043 /* Base architecture match user-specified architecture and
27044 extensions, eg. ARMv6S-M matching -march=armv6-m+os. */
27045 if (ARM_FEATURE_EQUAL (*arch_ext_fset, known_arch_fset))
27046 {
27047 p_ver_ret = p_ver;
27048 goto found;
27049 }
27050 /* Base architecture match user-specified architecture only
27051 (eg. ARMv6-M in the same case as above). Record it in case we
27052 find a match with above condition. */
27053 else if (p_ver_ret == NULL
27054 && ARM_FEATURE_EQUAL (arch_fset, known_arch_fset))
27055 p_ver_ret = p_ver;
27056 }
27057 else
27058 {
27059
27060 /* Architecture has all features wanted. */
27061 if (ARM_FSET_CPU_SUBSET (arch_fset, known_arch_fset))
27062 {
27063 arm_feature_set added_fset;
27064
27065 /* Compute features added by this architecture over the one
27066 recorded in p_ver_ret. */
27067 if (p_ver_ret != NULL)
27068 ARM_CLEAR_FEATURE (added_fset, known_arch_fset,
27069 p_ver_ret->flags);
27070 /* First architecture that match incl. with extensions, or the
27071 only difference in features over the recorded match is
27072 features that were optional and are now mandatory. */
27073 if (p_ver_ret == NULL
27074 || ARM_FSET_CPU_SUBSET (added_fset, arch_fset))
27075 {
27076 p_ver_ret = p_ver;
27077 goto found;
27078 }
27079 }
27080 else if (p_ver_ret == NULL)
27081 {
27082 arm_feature_set needed_ext_fset;
27083
27084 ARM_CLEAR_FEATURE (needed_ext_fset, arch_fset, known_arch_fset);
27085
27086 /* Architecture has all features needed when using some
27087 extensions. Record it and continue searching in case there
27088 exist an architecture providing all needed features without
27089 the need for extensions (eg. ARMv6S-M Vs ARMv6-M with
27090 OS extension). */
27091 if (have_ext_for_needed_feat_p (&known_arch_fset,
27092 &needed_ext_fset))
27093 p_ver_ret = p_ver;
27094 }
27095 }
27096 }
27097
27098 if (p_ver_ret == NULL)
27099 return -1;
27100
27101 found:
27102 /* Tag_CPU_arch_profile. */
27103 if (ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_v7a)
27104 || ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_v8)
27105 || (ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_atomics)
27106 && !ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_v8m_m_only)))
27107 *profile = 'A';
27108 else if (ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_v7r))
27109 *profile = 'R';
27110 else if (ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_m))
27111 *profile = 'M';
27112 else
27113 *profile = '\0';
27114 return p_ver_ret->val;
27115 }
27116
27117 /* Set the public EABI object attributes. */
27118
27119 static void
27120 aeabi_set_public_attributes (void)
27121 {
27122 char profile = '\0';
27123 int arch = -1;
27124 int virt_sec = 0;
27125 int fp16_optional = 0;
27126 int skip_exact_match = 0;
27127 arm_feature_set flags, flags_arch, flags_ext;
27128
27129 /* Autodetection mode, choose the architecture based the instructions
27130 actually used. */
27131 if (no_cpu_selected ())
27132 {
27133 ARM_MERGE_FEATURE_SETS (flags, arm_arch_used, thumb_arch_used);
27134
27135 if (ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_any))
27136 ARM_MERGE_FEATURE_SETS (flags, flags, arm_ext_v1);
27137
27138 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_any))
27139 ARM_MERGE_FEATURE_SETS (flags, flags, arm_ext_v4t);
27140
27141 /* Code run during relaxation relies on selected_cpu being set. */
27142 ARM_CLEAR_FEATURE (flags_arch, flags, fpu_any);
27143 flags_ext = arm_arch_none;
27144 ARM_CLEAR_FEATURE (selected_arch, flags_arch, flags_ext);
27145 selected_ext = flags_ext;
27146 selected_cpu = flags;
27147 }
27148 /* Otherwise, choose the architecture based on the capabilities of the
27149 requested cpu. */
27150 else
27151 {
27152 ARM_MERGE_FEATURE_SETS (flags_arch, selected_arch, selected_ext);
27153 ARM_CLEAR_FEATURE (flags_arch, flags_arch, fpu_any);
27154 flags_ext = selected_ext;
27155 flags = selected_cpu;
27156 }
27157 ARM_MERGE_FEATURE_SETS (flags, flags, selected_fpu);
27158
27159 /* Allow the user to override the reported architecture. */
27160 if (!ARM_FEATURE_ZERO (selected_object_arch))
27161 {
27162 ARM_CLEAR_FEATURE (flags_arch, selected_object_arch, fpu_any);
27163 flags_ext = arm_arch_none;
27164 }
27165 else
27166 skip_exact_match = ARM_FEATURE_EQUAL (selected_cpu, arm_arch_any);
27167
27168 /* When this function is run again after relaxation has happened there is no
27169 way to determine whether an architecture or CPU was specified by the user:
27170 - selected_cpu is set above for relaxation to work;
27171 - march_cpu_opt is not set if only -mcpu or .cpu is used;
27172 - mcpu_cpu_opt is set to arm_arch_any for autodetection.
27173 Therefore, if not in -march=all case we first try an exact match and fall
27174 back to autodetection. */
27175 if (!skip_exact_match)
27176 arch = get_aeabi_cpu_arch_from_fset (&flags_arch, &flags_ext, &profile, 1);
27177 if (arch == -1)
27178 arch = get_aeabi_cpu_arch_from_fset (&flags_arch, &flags_ext, &profile, 0);
27179 if (arch == -1)
27180 as_bad (_("no architecture contains all the instructions used\n"));
27181
27182 /* Tag_CPU_name. */
27183 if (selected_cpu_name[0])
27184 {
27185 char *q;
27186
27187 q = selected_cpu_name;
27188 if (strncmp (q, "armv", 4) == 0)
27189 {
27190 int i;
27191
27192 q += 4;
27193 for (i = 0; q[i]; i++)
27194 q[i] = TOUPPER (q[i]);
27195 }
27196 aeabi_set_attribute_string (Tag_CPU_name, q);
27197 }
27198
27199 /* Tag_CPU_arch. */
27200 aeabi_set_attribute_int (Tag_CPU_arch, arch);
27201
27202 /* Tag_CPU_arch_profile. */
27203 if (profile != '\0')
27204 aeabi_set_attribute_int (Tag_CPU_arch_profile, profile);
27205
27206 /* Tag_DSP_extension. */
27207 if (ARM_CPU_HAS_FEATURE (selected_ext, arm_ext_dsp))
27208 aeabi_set_attribute_int (Tag_DSP_extension, 1);
27209
27210 ARM_CLEAR_FEATURE (flags_arch, flags, fpu_any);
27211 /* Tag_ARM_ISA_use. */
27212 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v1)
27213 || ARM_FEATURE_ZERO (flags_arch))
27214 aeabi_set_attribute_int (Tag_ARM_ISA_use, 1);
27215
27216 /* Tag_THUMB_ISA_use. */
27217 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v4t)
27218 || ARM_FEATURE_ZERO (flags_arch))
27219 {
27220 int thumb_isa_use;
27221
27222 if (!ARM_CPU_HAS_FEATURE (flags, arm_ext_v8)
27223 && ARM_CPU_HAS_FEATURE (flags, arm_ext_v8m_m_only))
27224 thumb_isa_use = 3;
27225 else if (ARM_CPU_HAS_FEATURE (flags, arm_arch_t2))
27226 thumb_isa_use = 2;
27227 else
27228 thumb_isa_use = 1;
27229 aeabi_set_attribute_int (Tag_THUMB_ISA_use, thumb_isa_use);
27230 }
27231
27232 /* Tag_VFP_arch. */
27233 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_armv8xd))
27234 aeabi_set_attribute_int (Tag_VFP_arch,
27235 ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32)
27236 ? 7 : 8);
27237 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_fma))
27238 aeabi_set_attribute_int (Tag_VFP_arch,
27239 ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32)
27240 ? 5 : 6);
27241 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32))
27242 {
27243 fp16_optional = 1;
27244 aeabi_set_attribute_int (Tag_VFP_arch, 3);
27245 }
27246 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v3xd))
27247 {
27248 aeabi_set_attribute_int (Tag_VFP_arch, 4);
27249 fp16_optional = 1;
27250 }
27251 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v2))
27252 aeabi_set_attribute_int (Tag_VFP_arch, 2);
27253 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1)
27254 || ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd))
27255 aeabi_set_attribute_int (Tag_VFP_arch, 1);
27256
27257 /* Tag_ABI_HardFP_use. */
27258 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd)
27259 && !ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1))
27260 aeabi_set_attribute_int (Tag_ABI_HardFP_use, 1);
27261
27262 /* Tag_WMMX_arch. */
27263 if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt2))
27264 aeabi_set_attribute_int (Tag_WMMX_arch, 2);
27265 else if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt))
27266 aeabi_set_attribute_int (Tag_WMMX_arch, 1);
27267
27268 /* Tag_Advanced_SIMD_arch (formerly Tag_NEON_arch). */
27269 if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_v8_1))
27270 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 4);
27271 else if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_armv8))
27272 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 3);
27273 else if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_v1))
27274 {
27275 if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_fma))
27276 {
27277 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 2);
27278 }
27279 else
27280 {
27281 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 1);
27282 fp16_optional = 1;
27283 }
27284 }
27285
27286 /* Tag_VFP_HP_extension (formerly Tag_NEON_FP16_arch). */
27287 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_fp16) && fp16_optional)
27288 aeabi_set_attribute_int (Tag_VFP_HP_extension, 1);
27289
27290 /* Tag_DIV_use.
27291
27292 We set Tag_DIV_use to two when integer divide instructions have been used
27293 in ARM state, or when Thumb integer divide instructions have been used,
27294 but we have no architecture profile set, nor have we any ARM instructions.
27295
27296 For ARMv8-A and ARMv8-M we set the tag to 0 as integer divide is implied
27297 by the base architecture.
27298
27299 For new architectures we will have to check these tests. */
27300 gas_assert (arch <= TAG_CPU_ARCH_V8M_MAIN);
27301 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v8)
27302 || ARM_CPU_HAS_FEATURE (flags, arm_ext_v8m))
27303 aeabi_set_attribute_int (Tag_DIV_use, 0);
27304 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_adiv)
27305 || (profile == '\0'
27306 && ARM_CPU_HAS_FEATURE (flags, arm_ext_div)
27307 && !ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_any)))
27308 aeabi_set_attribute_int (Tag_DIV_use, 2);
27309
27310 /* Tag_MP_extension_use. */
27311 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_mp))
27312 aeabi_set_attribute_int (Tag_MPextension_use, 1);
27313
27314 /* Tag Virtualization_use. */
27315 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_sec))
27316 virt_sec |= 1;
27317 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_virt))
27318 virt_sec |= 2;
27319 if (virt_sec != 0)
27320 aeabi_set_attribute_int (Tag_Virtualization_use, virt_sec);
27321 }
27322
27323 /* Post relaxation hook. Recompute ARM attributes now that relaxation is
27324 finished and free extension feature bits which will not be used anymore. */
27325
27326 void
27327 arm_md_post_relax (void)
27328 {
27329 aeabi_set_public_attributes ();
27330 XDELETE (mcpu_ext_opt);
27331 mcpu_ext_opt = NULL;
27332 XDELETE (march_ext_opt);
27333 march_ext_opt = NULL;
27334 }
27335
27336 /* Add the default contents for the .ARM.attributes section. */
27337
27338 void
27339 arm_md_end (void)
27340 {
27341 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
27342 return;
27343
27344 aeabi_set_public_attributes ();
27345 }
27346 #endif /* OBJ_ELF */
27347
27348 /* Parse a .cpu directive. */
27349
27350 static void
27351 s_arm_cpu (int ignored ATTRIBUTE_UNUSED)
27352 {
27353 const struct arm_cpu_option_table *opt;
27354 char *name;
27355 char saved_char;
27356
27357 name = input_line_pointer;
27358 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
27359 input_line_pointer++;
27360 saved_char = *input_line_pointer;
27361 *input_line_pointer = 0;
27362
27363 /* Skip the first "all" entry. */
27364 for (opt = arm_cpus + 1; opt->name != NULL; opt++)
27365 if (streq (opt->name, name))
27366 {
27367 selected_arch = opt->value;
27368 selected_ext = opt->ext;
27369 ARM_MERGE_FEATURE_SETS (selected_cpu, selected_arch, selected_ext);
27370 if (opt->canonical_name)
27371 strcpy (selected_cpu_name, opt->canonical_name);
27372 else
27373 {
27374 int i;
27375 for (i = 0; opt->name[i]; i++)
27376 selected_cpu_name[i] = TOUPPER (opt->name[i]);
27377
27378 selected_cpu_name[i] = 0;
27379 }
27380 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
27381
27382 *input_line_pointer = saved_char;
27383 demand_empty_rest_of_line ();
27384 return;
27385 }
27386 as_bad (_("unknown cpu `%s'"), name);
27387 *input_line_pointer = saved_char;
27388 ignore_rest_of_line ();
27389 }
27390
27391 /* Parse a .arch directive. */
27392
27393 static void
27394 s_arm_arch (int ignored ATTRIBUTE_UNUSED)
27395 {
27396 const struct arm_arch_option_table *opt;
27397 char saved_char;
27398 char *name;
27399
27400 name = input_line_pointer;
27401 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
27402 input_line_pointer++;
27403 saved_char = *input_line_pointer;
27404 *input_line_pointer = 0;
27405
27406 /* Skip the first "all" entry. */
27407 for (opt = arm_archs + 1; opt->name != NULL; opt++)
27408 if (streq (opt->name, name))
27409 {
27410 selected_arch = opt->value;
27411 selected_ext = arm_arch_none;
27412 selected_cpu = selected_arch;
27413 strcpy (selected_cpu_name, opt->name);
27414 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
27415 *input_line_pointer = saved_char;
27416 demand_empty_rest_of_line ();
27417 return;
27418 }
27419
27420 as_bad (_("unknown architecture `%s'\n"), name);
27421 *input_line_pointer = saved_char;
27422 ignore_rest_of_line ();
27423 }
27424
27425 /* Parse a .object_arch directive. */
27426
27427 static void
27428 s_arm_object_arch (int ignored ATTRIBUTE_UNUSED)
27429 {
27430 const struct arm_arch_option_table *opt;
27431 char saved_char;
27432 char *name;
27433
27434 name = input_line_pointer;
27435 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
27436 input_line_pointer++;
27437 saved_char = *input_line_pointer;
27438 *input_line_pointer = 0;
27439
27440 /* Skip the first "all" entry. */
27441 for (opt = arm_archs + 1; opt->name != NULL; opt++)
27442 if (streq (opt->name, name))
27443 {
27444 selected_object_arch = opt->value;
27445 *input_line_pointer = saved_char;
27446 demand_empty_rest_of_line ();
27447 return;
27448 }
27449
27450 as_bad (_("unknown architecture `%s'\n"), name);
27451 *input_line_pointer = saved_char;
27452 ignore_rest_of_line ();
27453 }
27454
27455 /* Parse a .arch_extension directive. */
27456
27457 static void
27458 s_arm_arch_extension (int ignored ATTRIBUTE_UNUSED)
27459 {
27460 const struct arm_option_extension_value_table *opt;
27461 char saved_char;
27462 char *name;
27463 int adding_value = 1;
27464
27465 name = input_line_pointer;
27466 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
27467 input_line_pointer++;
27468 saved_char = *input_line_pointer;
27469 *input_line_pointer = 0;
27470
27471 if (strlen (name) >= 2
27472 && strncmp (name, "no", 2) == 0)
27473 {
27474 adding_value = 0;
27475 name += 2;
27476 }
27477
27478 for (opt = arm_extensions; opt->name != NULL; opt++)
27479 if (streq (opt->name, name))
27480 {
27481 int i, nb_allowed_archs =
27482 sizeof (opt->allowed_archs) / sizeof (opt->allowed_archs[i]);
27483 for (i = 0; i < nb_allowed_archs; i++)
27484 {
27485 /* Empty entry. */
27486 if (ARM_CPU_IS_ANY (opt->allowed_archs[i]))
27487 continue;
27488 if (ARM_FSET_CPU_SUBSET (opt->allowed_archs[i], selected_arch))
27489 break;
27490 }
27491
27492 if (i == nb_allowed_archs)
27493 {
27494 as_bad (_("architectural extension `%s' is not allowed for the "
27495 "current base architecture"), name);
27496 break;
27497 }
27498
27499 if (adding_value)
27500 ARM_MERGE_FEATURE_SETS (selected_ext, selected_ext,
27501 opt->merge_value);
27502 else
27503 ARM_CLEAR_FEATURE (selected_ext, selected_ext, opt->clear_value);
27504
27505 ARM_MERGE_FEATURE_SETS (selected_cpu, selected_arch, selected_ext);
27506 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
27507 *input_line_pointer = saved_char;
27508 demand_empty_rest_of_line ();
27509 /* Allowing Thumb division instructions for ARMv7 in autodetection rely
27510 on this return so that duplicate extensions (extensions with the
27511 same name as a previous extension in the list) are not considered
27512 for command-line parsing. */
27513 return;
27514 }
27515
27516 if (opt->name == NULL)
27517 as_bad (_("unknown architecture extension `%s'\n"), name);
27518
27519 *input_line_pointer = saved_char;
27520 ignore_rest_of_line ();
27521 }
27522
27523 /* Parse a .fpu directive. */
27524
27525 static void
27526 s_arm_fpu (int ignored ATTRIBUTE_UNUSED)
27527 {
27528 const struct arm_option_fpu_value_table *opt;
27529 char saved_char;
27530 char *name;
27531
27532 name = input_line_pointer;
27533 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
27534 input_line_pointer++;
27535 saved_char = *input_line_pointer;
27536 *input_line_pointer = 0;
27537
27538 for (opt = arm_fpus; opt->name != NULL; opt++)
27539 if (streq (opt->name, name))
27540 {
27541 selected_fpu = opt->value;
27542 #ifndef CPU_DEFAULT
27543 if (no_cpu_selected ())
27544 ARM_MERGE_FEATURE_SETS (cpu_variant, arm_arch_any, selected_fpu);
27545 else
27546 #endif
27547 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
27548 *input_line_pointer = saved_char;
27549 demand_empty_rest_of_line ();
27550 return;
27551 }
27552
27553 as_bad (_("unknown floating point format `%s'\n"), name);
27554 *input_line_pointer = saved_char;
27555 ignore_rest_of_line ();
27556 }
27557
27558 /* Copy symbol information. */
27559
27560 void
27561 arm_copy_symbol_attributes (symbolS *dest, symbolS *src)
27562 {
27563 ARM_GET_FLAG (dest) = ARM_GET_FLAG (src);
27564 }
27565
27566 #ifdef OBJ_ELF
27567 /* Given a symbolic attribute NAME, return the proper integer value.
27568 Returns -1 if the attribute is not known. */
27569
27570 int
27571 arm_convert_symbolic_attribute (const char *name)
27572 {
27573 static const struct
27574 {
27575 const char * name;
27576 const int tag;
27577 }
27578 attribute_table[] =
27579 {
27580 /* When you modify this table you should
27581 also modify the list in doc/c-arm.texi. */
27582 #define T(tag) {#tag, tag}
27583 T (Tag_CPU_raw_name),
27584 T (Tag_CPU_name),
27585 T (Tag_CPU_arch),
27586 T (Tag_CPU_arch_profile),
27587 T (Tag_ARM_ISA_use),
27588 T (Tag_THUMB_ISA_use),
27589 T (Tag_FP_arch),
27590 T (Tag_VFP_arch),
27591 T (Tag_WMMX_arch),
27592 T (Tag_Advanced_SIMD_arch),
27593 T (Tag_PCS_config),
27594 T (Tag_ABI_PCS_R9_use),
27595 T (Tag_ABI_PCS_RW_data),
27596 T (Tag_ABI_PCS_RO_data),
27597 T (Tag_ABI_PCS_GOT_use),
27598 T (Tag_ABI_PCS_wchar_t),
27599 T (Tag_ABI_FP_rounding),
27600 T (Tag_ABI_FP_denormal),
27601 T (Tag_ABI_FP_exceptions),
27602 T (Tag_ABI_FP_user_exceptions),
27603 T (Tag_ABI_FP_number_model),
27604 T (Tag_ABI_align_needed),
27605 T (Tag_ABI_align8_needed),
27606 T (Tag_ABI_align_preserved),
27607 T (Tag_ABI_align8_preserved),
27608 T (Tag_ABI_enum_size),
27609 T (Tag_ABI_HardFP_use),
27610 T (Tag_ABI_VFP_args),
27611 T (Tag_ABI_WMMX_args),
27612 T (Tag_ABI_optimization_goals),
27613 T (Tag_ABI_FP_optimization_goals),
27614 T (Tag_compatibility),
27615 T (Tag_CPU_unaligned_access),
27616 T (Tag_FP_HP_extension),
27617 T (Tag_VFP_HP_extension),
27618 T (Tag_ABI_FP_16bit_format),
27619 T (Tag_MPextension_use),
27620 T (Tag_DIV_use),
27621 T (Tag_nodefaults),
27622 T (Tag_also_compatible_with),
27623 T (Tag_conformance),
27624 T (Tag_T2EE_use),
27625 T (Tag_Virtualization_use),
27626 T (Tag_DSP_extension),
27627 /* We deliberately do not include Tag_MPextension_use_legacy. */
27628 #undef T
27629 };
27630 unsigned int i;
27631
27632 if (name == NULL)
27633 return -1;
27634
27635 for (i = 0; i < ARRAY_SIZE (attribute_table); i++)
27636 if (streq (name, attribute_table[i].name))
27637 return attribute_table[i].tag;
27638
27639 return -1;
27640 }
27641
27642 /* Apply sym value for relocations only in the case that they are for
27643 local symbols in the same segment as the fixup and you have the
27644 respective architectural feature for blx and simple switches. */
27645
27646 int
27647 arm_apply_sym_value (struct fix * fixP, segT this_seg)
27648 {
27649 if (fixP->fx_addsy
27650 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
27651 /* PR 17444: If the local symbol is in a different section then a reloc
27652 will always be generated for it, so applying the symbol value now
27653 will result in a double offset being stored in the relocation. */
27654 && (S_GET_SEGMENT (fixP->fx_addsy) == this_seg)
27655 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE))
27656 {
27657 switch (fixP->fx_r_type)
27658 {
27659 case BFD_RELOC_ARM_PCREL_BLX:
27660 case BFD_RELOC_THUMB_PCREL_BRANCH23:
27661 if (ARM_IS_FUNC (fixP->fx_addsy))
27662 return 1;
27663 break;
27664
27665 case BFD_RELOC_ARM_PCREL_CALL:
27666 case BFD_RELOC_THUMB_PCREL_BLX:
27667 if (THUMB_IS_FUNC (fixP->fx_addsy))
27668 return 1;
27669 break;
27670
27671 default:
27672 break;
27673 }
27674
27675 }
27676 return 0;
27677 }
27678 #endif /* OBJ_ELF */