d23181c01f3a73ff2257cc9f420a9c237ade3d2b
[binutils-gdb.git] / gas / config / tc-arm.c
1 /* tc-arm.c -- Assemble for the ARM
2 Copyright (C) 1994-2015 Free Software Foundation, Inc.
3 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
4 Modified by David Taylor (dtaylor@armltd.co.uk)
5 Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
6 Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
7 Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
8
9 This file is part of GAS, the GNU Assembler.
10
11 GAS is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 3, or (at your option)
14 any later version.
15
16 GAS is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
20
21 You should have received a copy of the GNU General Public License
22 along with GAS; see the file COPYING. If not, write to the Free
23 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
24 02110-1301, USA. */
25
26 #include "as.h"
27 #include <limits.h>
28 #include <stdarg.h>
29 #define NO_RELOC 0
30 #include "safe-ctype.h"
31 #include "subsegs.h"
32 #include "obstack.h"
33 #include "libiberty.h"
34 #include "opcode/arm.h"
35
36 #ifdef OBJ_ELF
37 #include "elf/arm.h"
38 #include "dw2gencfi.h"
39 #endif
40
41 #include "dwarf2dbg.h"
42
43 #ifdef OBJ_ELF
44 /* Must be at least the size of the largest unwind opcode (currently two). */
45 #define ARM_OPCODE_CHUNK_SIZE 8
46
47 /* This structure holds the unwinding state. */
48
49 static struct
50 {
51 symbolS * proc_start;
52 symbolS * table_entry;
53 symbolS * personality_routine;
54 int personality_index;
55 /* The segment containing the function. */
56 segT saved_seg;
57 subsegT saved_subseg;
58 /* Opcodes generated from this function. */
59 unsigned char * opcodes;
60 int opcode_count;
61 int opcode_alloc;
62 /* The number of bytes pushed to the stack. */
63 offsetT frame_size;
64 /* We don't add stack adjustment opcodes immediately so that we can merge
65 multiple adjustments. We can also omit the final adjustment
66 when using a frame pointer. */
67 offsetT pending_offset;
68 /* These two fields are set by both unwind_movsp and unwind_setfp. They
69 hold the reg+offset to use when restoring sp from a frame pointer. */
70 offsetT fp_offset;
71 int fp_reg;
72 /* Nonzero if an unwind_setfp directive has been seen. */
73 unsigned fp_used:1;
74 /* Nonzero if the last opcode restores sp from fp_reg. */
75 unsigned sp_restored:1;
76 } unwind;
77
78 #endif /* OBJ_ELF */
79
80 /* Results from operand parsing worker functions. */
81
82 typedef enum
83 {
84 PARSE_OPERAND_SUCCESS,
85 PARSE_OPERAND_FAIL,
86 PARSE_OPERAND_FAIL_NO_BACKTRACK
87 } parse_operand_result;
88
89 enum arm_float_abi
90 {
91 ARM_FLOAT_ABI_HARD,
92 ARM_FLOAT_ABI_SOFTFP,
93 ARM_FLOAT_ABI_SOFT
94 };
95
96 /* Types of processor to assemble for. */
97 #ifndef CPU_DEFAULT
98 /* The code that was here used to select a default CPU depending on compiler
99 pre-defines which were only present when doing native builds, thus
100 changing gas' default behaviour depending upon the build host.
101
102 If you have a target that requires a default CPU option then the you
103 should define CPU_DEFAULT here. */
104 #endif
105
106 #ifndef FPU_DEFAULT
107 # ifdef TE_LINUX
108 # define FPU_DEFAULT FPU_ARCH_FPA
109 # elif defined (TE_NetBSD)
110 # ifdef OBJ_ELF
111 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */
112 # else
113 /* Legacy a.out format. */
114 # define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */
115 # endif
116 # elif defined (TE_VXWORKS)
117 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */
118 # else
119 /* For backwards compatibility, default to FPA. */
120 # define FPU_DEFAULT FPU_ARCH_FPA
121 # endif
122 #endif /* ifndef FPU_DEFAULT */
123
124 #define streq(a, b) (strcmp (a, b) == 0)
125
126 static arm_feature_set cpu_variant;
127 static arm_feature_set arm_arch_used;
128 static arm_feature_set thumb_arch_used;
129
130 /* Flags stored in private area of BFD structure. */
131 static int uses_apcs_26 = FALSE;
132 static int atpcs = FALSE;
133 static int support_interwork = FALSE;
134 static int uses_apcs_float = FALSE;
135 static int pic_code = FALSE;
136 static int fix_v4bx = FALSE;
137 /* Warn on using deprecated features. */
138 static int warn_on_deprecated = TRUE;
139
140 /* Understand CodeComposer Studio assembly syntax. */
141 bfd_boolean codecomposer_syntax = FALSE;
142
143 /* Variables that we set while parsing command-line options. Once all
144 options have been read we re-process these values to set the real
145 assembly flags. */
146 static const arm_feature_set *legacy_cpu = NULL;
147 static const arm_feature_set *legacy_fpu = NULL;
148
149 static const arm_feature_set *mcpu_cpu_opt = NULL;
150 static const arm_feature_set *mcpu_fpu_opt = NULL;
151 static const arm_feature_set *march_cpu_opt = NULL;
152 static const arm_feature_set *march_fpu_opt = NULL;
153 static const arm_feature_set *mfpu_opt = NULL;
154 static const arm_feature_set *object_arch = NULL;
155
156 /* Constants for known architecture features. */
157 static const arm_feature_set fpu_default = FPU_DEFAULT;
158 static const arm_feature_set fpu_arch_vfp_v1 = FPU_ARCH_VFP_V1;
159 static const arm_feature_set fpu_arch_vfp_v2 = FPU_ARCH_VFP_V2;
160 static const arm_feature_set fpu_arch_vfp_v3 = FPU_ARCH_VFP_V3;
161 static const arm_feature_set fpu_arch_neon_v1 = FPU_ARCH_NEON_V1;
162 static const arm_feature_set fpu_arch_fpa = FPU_ARCH_FPA;
163 static const arm_feature_set fpu_any_hard = FPU_ANY_HARD;
164 static const arm_feature_set fpu_arch_maverick = FPU_ARCH_MAVERICK;
165 static const arm_feature_set fpu_endian_pure = FPU_ARCH_ENDIAN_PURE;
166
167 #ifdef CPU_DEFAULT
168 static const arm_feature_set cpu_default = CPU_DEFAULT;
169 #endif
170
171 static const arm_feature_set arm_ext_v1 = ARM_FEATURE_CORE_LOW (ARM_EXT_V1);
172 static const arm_feature_set arm_ext_v2 = ARM_FEATURE_CORE_LOW (ARM_EXT_V1);
173 static const arm_feature_set arm_ext_v2s = ARM_FEATURE_CORE_LOW (ARM_EXT_V2S);
174 static const arm_feature_set arm_ext_v3 = ARM_FEATURE_CORE_LOW (ARM_EXT_V3);
175 static const arm_feature_set arm_ext_v3m = ARM_FEATURE_CORE_LOW (ARM_EXT_V3M);
176 static const arm_feature_set arm_ext_v4 = ARM_FEATURE_CORE_LOW (ARM_EXT_V4);
177 static const arm_feature_set arm_ext_v4t = ARM_FEATURE_CORE_LOW (ARM_EXT_V4T);
178 static const arm_feature_set arm_ext_v5 = ARM_FEATURE_CORE_LOW (ARM_EXT_V5);
179 static const arm_feature_set arm_ext_v4t_5 =
180 ARM_FEATURE_CORE_LOW (ARM_EXT_V4T | ARM_EXT_V5);
181 static const arm_feature_set arm_ext_v5t = ARM_FEATURE_CORE_LOW (ARM_EXT_V5T);
182 static const arm_feature_set arm_ext_v5e = ARM_FEATURE_CORE_LOW (ARM_EXT_V5E);
183 static const arm_feature_set arm_ext_v5exp = ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP);
184 static const arm_feature_set arm_ext_v5j = ARM_FEATURE_CORE_LOW (ARM_EXT_V5J);
185 static const arm_feature_set arm_ext_v6 = ARM_FEATURE_CORE_LOW (ARM_EXT_V6);
186 static const arm_feature_set arm_ext_v6k = ARM_FEATURE_CORE_LOW (ARM_EXT_V6K);
187 static const arm_feature_set arm_ext_v6t2 = ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2);
188 static const arm_feature_set arm_ext_v6m = ARM_FEATURE_CORE_LOW (ARM_EXT_V6M);
189 static const arm_feature_set arm_ext_v6_notm =
190 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_NOTM);
191 static const arm_feature_set arm_ext_v6_dsp =
192 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_DSP);
193 static const arm_feature_set arm_ext_barrier =
194 ARM_FEATURE_CORE_LOW (ARM_EXT_BARRIER);
195 static const arm_feature_set arm_ext_msr =
196 ARM_FEATURE_CORE_LOW (ARM_EXT_THUMB_MSR);
197 static const arm_feature_set arm_ext_div = ARM_FEATURE_CORE_LOW (ARM_EXT_DIV);
198 static const arm_feature_set arm_ext_v7 = ARM_FEATURE_CORE_LOW (ARM_EXT_V7);
199 static const arm_feature_set arm_ext_v7a = ARM_FEATURE_CORE_LOW (ARM_EXT_V7A);
200 static const arm_feature_set arm_ext_v7r = ARM_FEATURE_CORE_LOW (ARM_EXT_V7R);
201 static const arm_feature_set arm_ext_v7m = ARM_FEATURE_CORE_LOW (ARM_EXT_V7M);
202 static const arm_feature_set arm_ext_v8 = ARM_FEATURE_CORE_LOW (ARM_EXT_V8);
203 static const arm_feature_set arm_ext_m =
204 ARM_FEATURE_CORE (ARM_EXT_V6M | ARM_EXT_OS | ARM_EXT_V7M, ARM_EXT2_V8M);
205 static const arm_feature_set arm_ext_mp = ARM_FEATURE_CORE_LOW (ARM_EXT_MP);
206 static const arm_feature_set arm_ext_sec = ARM_FEATURE_CORE_LOW (ARM_EXT_SEC);
207 static const arm_feature_set arm_ext_os = ARM_FEATURE_CORE_LOW (ARM_EXT_OS);
208 static const arm_feature_set arm_ext_adiv = ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV);
209 static const arm_feature_set arm_ext_virt = ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT);
210 static const arm_feature_set arm_ext_pan = ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN);
211 static const arm_feature_set arm_ext_v8m = ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M);
212 static const arm_feature_set arm_ext_v6t2_v8m =
213 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V6T2_V8M);
214 /* Instructions shared between ARMv8-A and ARMv8-M. */
215 static const arm_feature_set arm_ext_atomics =
216 ARM_FEATURE_CORE_HIGH (ARM_EXT2_ATOMICS);
217
218 static const arm_feature_set arm_arch_any = ARM_ANY;
219 static const arm_feature_set arm_arch_full = ARM_FEATURE (-1, -1, -1);
220 static const arm_feature_set arm_arch_t2 = ARM_ARCH_THUMB2;
221 static const arm_feature_set arm_arch_none = ARM_ARCH_NONE;
222 static const arm_feature_set arm_arch_v6m_only = ARM_ARCH_V6M_ONLY;
223
224 static const arm_feature_set arm_cext_iwmmxt2 =
225 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2);
226 static const arm_feature_set arm_cext_iwmmxt =
227 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT);
228 static const arm_feature_set arm_cext_xscale =
229 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE);
230 static const arm_feature_set arm_cext_maverick =
231 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK);
232 static const arm_feature_set fpu_fpa_ext_v1 =
233 ARM_FEATURE_COPROC (FPU_FPA_EXT_V1);
234 static const arm_feature_set fpu_fpa_ext_v2 =
235 ARM_FEATURE_COPROC (FPU_FPA_EXT_V2);
236 static const arm_feature_set fpu_vfp_ext_v1xd =
237 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1xD);
238 static const arm_feature_set fpu_vfp_ext_v1 =
239 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1);
240 static const arm_feature_set fpu_vfp_ext_v2 =
241 ARM_FEATURE_COPROC (FPU_VFP_EXT_V2);
242 static const arm_feature_set fpu_vfp_ext_v3xd =
243 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3xD);
244 static const arm_feature_set fpu_vfp_ext_v3 =
245 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3);
246 static const arm_feature_set fpu_vfp_ext_d32 =
247 ARM_FEATURE_COPROC (FPU_VFP_EXT_D32);
248 static const arm_feature_set fpu_neon_ext_v1 =
249 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1);
250 static const arm_feature_set fpu_vfp_v3_or_neon_ext =
251 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1 | FPU_VFP_EXT_V3);
252 static const arm_feature_set fpu_vfp_fp16 =
253 ARM_FEATURE_COPROC (FPU_VFP_EXT_FP16);
254 static const arm_feature_set fpu_neon_ext_fma =
255 ARM_FEATURE_COPROC (FPU_NEON_EXT_FMA);
256 static const arm_feature_set fpu_vfp_ext_fma =
257 ARM_FEATURE_COPROC (FPU_VFP_EXT_FMA);
258 static const arm_feature_set fpu_vfp_ext_armv8 =
259 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8);
260 static const arm_feature_set fpu_vfp_ext_armv8xd =
261 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8xD);
262 static const arm_feature_set fpu_neon_ext_armv8 =
263 ARM_FEATURE_COPROC (FPU_NEON_EXT_ARMV8);
264 static const arm_feature_set fpu_crypto_ext_armv8 =
265 ARM_FEATURE_COPROC (FPU_CRYPTO_EXT_ARMV8);
266 static const arm_feature_set crc_ext_armv8 =
267 ARM_FEATURE_COPROC (CRC_EXT_ARMV8);
268 static const arm_feature_set fpu_neon_ext_v8_1 =
269 ARM_FEATURE_COPROC (FPU_NEON_EXT_ARMV8 | FPU_NEON_EXT_RDMA);
270
271 static int mfloat_abi_opt = -1;
272 /* Record user cpu selection for object attributes. */
273 static arm_feature_set selected_cpu = ARM_ARCH_NONE;
274 /* Must be long enough to hold any of the names in arm_cpus. */
275 static char selected_cpu_name[20];
276
277 extern FLONUM_TYPE generic_floating_point_number;
278
279 /* Return if no cpu was selected on command-line. */
280 static bfd_boolean
281 no_cpu_selected (void)
282 {
283 return ARM_FEATURE_EQUAL (selected_cpu, arm_arch_none);
284 }
285
286 #ifdef OBJ_ELF
287 # ifdef EABI_DEFAULT
288 static int meabi_flags = EABI_DEFAULT;
289 # else
290 static int meabi_flags = EF_ARM_EABI_UNKNOWN;
291 # endif
292
293 static int attributes_set_explicitly[NUM_KNOWN_OBJ_ATTRIBUTES];
294
295 bfd_boolean
296 arm_is_eabi (void)
297 {
298 return (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4);
299 }
300 #endif
301
302 #ifdef OBJ_ELF
303 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
304 symbolS * GOT_symbol;
305 #endif
306
307 /* 0: assemble for ARM,
308 1: assemble for Thumb,
309 2: assemble for Thumb even though target CPU does not support thumb
310 instructions. */
311 static int thumb_mode = 0;
312 /* A value distinct from the possible values for thumb_mode that we
313 can use to record whether thumb_mode has been copied into the
314 tc_frag_data field of a frag. */
315 #define MODE_RECORDED (1 << 4)
316
317 /* Specifies the intrinsic IT insn behavior mode. */
318 enum implicit_it_mode
319 {
320 IMPLICIT_IT_MODE_NEVER = 0x00,
321 IMPLICIT_IT_MODE_ARM = 0x01,
322 IMPLICIT_IT_MODE_THUMB = 0x02,
323 IMPLICIT_IT_MODE_ALWAYS = (IMPLICIT_IT_MODE_ARM | IMPLICIT_IT_MODE_THUMB)
324 };
325 static int implicit_it_mode = IMPLICIT_IT_MODE_ARM;
326
327 /* If unified_syntax is true, we are processing the new unified
328 ARM/Thumb syntax. Important differences from the old ARM mode:
329
330 - Immediate operands do not require a # prefix.
331 - Conditional affixes always appear at the end of the
332 instruction. (For backward compatibility, those instructions
333 that formerly had them in the middle, continue to accept them
334 there.)
335 - The IT instruction may appear, and if it does is validated
336 against subsequent conditional affixes. It does not generate
337 machine code.
338
339 Important differences from the old Thumb mode:
340
341 - Immediate operands do not require a # prefix.
342 - Most of the V6T2 instructions are only available in unified mode.
343 - The .N and .W suffixes are recognized and honored (it is an error
344 if they cannot be honored).
345 - All instructions set the flags if and only if they have an 's' affix.
346 - Conditional affixes may be used. They are validated against
347 preceding IT instructions. Unlike ARM mode, you cannot use a
348 conditional affix except in the scope of an IT instruction. */
349
350 static bfd_boolean unified_syntax = FALSE;
351
352 /* An immediate operand can start with #, and ld*, st*, pld operands
353 can contain [ and ]. We need to tell APP not to elide whitespace
354 before a [, which can appear as the first operand for pld.
355 Likewise, a { can appear as the first operand for push, pop, vld*, etc. */
356 const char arm_symbol_chars[] = "#[]{}";
357
358 enum neon_el_type
359 {
360 NT_invtype,
361 NT_untyped,
362 NT_integer,
363 NT_float,
364 NT_poly,
365 NT_signed,
366 NT_unsigned
367 };
368
369 struct neon_type_el
370 {
371 enum neon_el_type type;
372 unsigned size;
373 };
374
375 #define NEON_MAX_TYPE_ELS 4
376
377 struct neon_type
378 {
379 struct neon_type_el el[NEON_MAX_TYPE_ELS];
380 unsigned elems;
381 };
382
383 enum it_instruction_type
384 {
385 OUTSIDE_IT_INSN,
386 INSIDE_IT_INSN,
387 INSIDE_IT_LAST_INSN,
388 IF_INSIDE_IT_LAST_INSN, /* Either outside or inside;
389 if inside, should be the last one. */
390 NEUTRAL_IT_INSN, /* This could be either inside or outside,
391 i.e. BKPT and NOP. */
392 IT_INSN /* The IT insn has been parsed. */
393 };
394
395 /* The maximum number of operands we need. */
396 #define ARM_IT_MAX_OPERANDS 6
397
398 struct arm_it
399 {
400 const char * error;
401 unsigned long instruction;
402 int size;
403 int size_req;
404 int cond;
405 /* "uncond_value" is set to the value in place of the conditional field in
406 unconditional versions of the instruction, or -1 if nothing is
407 appropriate. */
408 int uncond_value;
409 struct neon_type vectype;
410 /* This does not indicate an actual NEON instruction, only that
411 the mnemonic accepts neon-style type suffixes. */
412 int is_neon;
413 /* Set to the opcode if the instruction needs relaxation.
414 Zero if the instruction is not relaxed. */
415 unsigned long relax;
416 struct
417 {
418 bfd_reloc_code_real_type type;
419 expressionS exp;
420 int pc_rel;
421 } reloc;
422
423 enum it_instruction_type it_insn_type;
424
425 struct
426 {
427 unsigned reg;
428 signed int imm;
429 struct neon_type_el vectype;
430 unsigned present : 1; /* Operand present. */
431 unsigned isreg : 1; /* Operand was a register. */
432 unsigned immisreg : 1; /* .imm field is a second register. */
433 unsigned isscalar : 1; /* Operand is a (Neon) scalar. */
434 unsigned immisalign : 1; /* Immediate is an alignment specifier. */
435 unsigned immisfloat : 1; /* Immediate was parsed as a float. */
436 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
437 instructions. This allows us to disambiguate ARM <-> vector insns. */
438 unsigned regisimm : 1; /* 64-bit immediate, reg forms high 32 bits. */
439 unsigned isvec : 1; /* Is a single, double or quad VFP/Neon reg. */
440 unsigned isquad : 1; /* Operand is Neon quad-precision register. */
441 unsigned issingle : 1; /* Operand is VFP single-precision register. */
442 unsigned hasreloc : 1; /* Operand has relocation suffix. */
443 unsigned writeback : 1; /* Operand has trailing ! */
444 unsigned preind : 1; /* Preindexed address. */
445 unsigned postind : 1; /* Postindexed address. */
446 unsigned negative : 1; /* Index register was negated. */
447 unsigned shifted : 1; /* Shift applied to operation. */
448 unsigned shift_kind : 3; /* Shift operation (enum shift_kind). */
449 } operands[ARM_IT_MAX_OPERANDS];
450 };
451
452 static struct arm_it inst;
453
454 #define NUM_FLOAT_VALS 8
455
456 const char * fp_const[] =
457 {
458 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
459 };
460
461 /* Number of littlenums required to hold an extended precision number. */
462 #define MAX_LITTLENUMS 6
463
464 LITTLENUM_TYPE fp_values[NUM_FLOAT_VALS][MAX_LITTLENUMS];
465
466 #define FAIL (-1)
467 #define SUCCESS (0)
468
469 #define SUFF_S 1
470 #define SUFF_D 2
471 #define SUFF_E 3
472 #define SUFF_P 4
473
474 #define CP_T_X 0x00008000
475 #define CP_T_Y 0x00400000
476
477 #define CONDS_BIT 0x00100000
478 #define LOAD_BIT 0x00100000
479
480 #define DOUBLE_LOAD_FLAG 0x00000001
481
482 struct asm_cond
483 {
484 const char * template_name;
485 unsigned long value;
486 };
487
488 #define COND_ALWAYS 0xE
489
490 struct asm_psr
491 {
492 const char * template_name;
493 unsigned long field;
494 };
495
496 struct asm_barrier_opt
497 {
498 const char * template_name;
499 unsigned long value;
500 const arm_feature_set arch;
501 };
502
503 /* The bit that distinguishes CPSR and SPSR. */
504 #define SPSR_BIT (1 << 22)
505
506 /* The individual PSR flag bits. */
507 #define PSR_c (1 << 16)
508 #define PSR_x (1 << 17)
509 #define PSR_s (1 << 18)
510 #define PSR_f (1 << 19)
511
512 struct reloc_entry
513 {
514 char * name;
515 bfd_reloc_code_real_type reloc;
516 };
517
518 enum vfp_reg_pos
519 {
520 VFP_REG_Sd, VFP_REG_Sm, VFP_REG_Sn,
521 VFP_REG_Dd, VFP_REG_Dm, VFP_REG_Dn
522 };
523
524 enum vfp_ldstm_type
525 {
526 VFP_LDSTMIA, VFP_LDSTMDB, VFP_LDSTMIAX, VFP_LDSTMDBX
527 };
528
529 /* Bits for DEFINED field in neon_typed_alias. */
530 #define NTA_HASTYPE 1
531 #define NTA_HASINDEX 2
532
533 struct neon_typed_alias
534 {
535 unsigned char defined;
536 unsigned char index;
537 struct neon_type_el eltype;
538 };
539
540 /* ARM register categories. This includes coprocessor numbers and various
541 architecture extensions' registers. */
542 enum arm_reg_type
543 {
544 REG_TYPE_RN,
545 REG_TYPE_CP,
546 REG_TYPE_CN,
547 REG_TYPE_FN,
548 REG_TYPE_VFS,
549 REG_TYPE_VFD,
550 REG_TYPE_NQ,
551 REG_TYPE_VFSD,
552 REG_TYPE_NDQ,
553 REG_TYPE_NSDQ,
554 REG_TYPE_VFC,
555 REG_TYPE_MVF,
556 REG_TYPE_MVD,
557 REG_TYPE_MVFX,
558 REG_TYPE_MVDX,
559 REG_TYPE_MVAX,
560 REG_TYPE_DSPSC,
561 REG_TYPE_MMXWR,
562 REG_TYPE_MMXWC,
563 REG_TYPE_MMXWCG,
564 REG_TYPE_XSCALE,
565 REG_TYPE_RNB
566 };
567
568 /* Structure for a hash table entry for a register.
569 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
570 information which states whether a vector type or index is specified (for a
571 register alias created with .dn or .qn). Otherwise NEON should be NULL. */
572 struct reg_entry
573 {
574 const char * name;
575 unsigned int number;
576 unsigned char type;
577 unsigned char builtin;
578 struct neon_typed_alias * neon;
579 };
580
581 /* Diagnostics used when we don't get a register of the expected type. */
582 const char * const reg_expected_msgs[] =
583 {
584 N_("ARM register expected"),
585 N_("bad or missing co-processor number"),
586 N_("co-processor register expected"),
587 N_("FPA register expected"),
588 N_("VFP single precision register expected"),
589 N_("VFP/Neon double precision register expected"),
590 N_("Neon quad precision register expected"),
591 N_("VFP single or double precision register expected"),
592 N_("Neon double or quad precision register expected"),
593 N_("VFP single, double or Neon quad precision register expected"),
594 N_("VFP system register expected"),
595 N_("Maverick MVF register expected"),
596 N_("Maverick MVD register expected"),
597 N_("Maverick MVFX register expected"),
598 N_("Maverick MVDX register expected"),
599 N_("Maverick MVAX register expected"),
600 N_("Maverick DSPSC register expected"),
601 N_("iWMMXt data register expected"),
602 N_("iWMMXt control register expected"),
603 N_("iWMMXt scalar register expected"),
604 N_("XScale accumulator register expected"),
605 };
606
607 /* Some well known registers that we refer to directly elsewhere. */
608 #define REG_R12 12
609 #define REG_SP 13
610 #define REG_LR 14
611 #define REG_PC 15
612
613 /* ARM instructions take 4bytes in the object file, Thumb instructions
614 take 2: */
615 #define INSN_SIZE 4
616
617 struct asm_opcode
618 {
619 /* Basic string to match. */
620 const char * template_name;
621
622 /* Parameters to instruction. */
623 unsigned int operands[8];
624
625 /* Conditional tag - see opcode_lookup. */
626 unsigned int tag : 4;
627
628 /* Basic instruction code. */
629 unsigned int avalue : 28;
630
631 /* Thumb-format instruction code. */
632 unsigned int tvalue;
633
634 /* Which architecture variant provides this instruction. */
635 const arm_feature_set * avariant;
636 const arm_feature_set * tvariant;
637
638 /* Function to call to encode instruction in ARM format. */
639 void (* aencode) (void);
640
641 /* Function to call to encode instruction in Thumb format. */
642 void (* tencode) (void);
643 };
644
645 /* Defines for various bits that we will want to toggle. */
646 #define INST_IMMEDIATE 0x02000000
647 #define OFFSET_REG 0x02000000
648 #define HWOFFSET_IMM 0x00400000
649 #define SHIFT_BY_REG 0x00000010
650 #define PRE_INDEX 0x01000000
651 #define INDEX_UP 0x00800000
652 #define WRITE_BACK 0x00200000
653 #define LDM_TYPE_2_OR_3 0x00400000
654 #define CPSI_MMOD 0x00020000
655
656 #define LITERAL_MASK 0xf000f000
657 #define OPCODE_MASK 0xfe1fffff
658 #define V4_STR_BIT 0x00000020
659 #define VLDR_VMOV_SAME 0x0040f000
660
661 #define T2_SUBS_PC_LR 0xf3de8f00
662
663 #define DATA_OP_SHIFT 21
664
665 #define T2_OPCODE_MASK 0xfe1fffff
666 #define T2_DATA_OP_SHIFT 21
667
668 #define A_COND_MASK 0xf0000000
669 #define A_PUSH_POP_OP_MASK 0x0fff0000
670
671 /* Opcodes for pushing/poping registers to/from the stack. */
672 #define A1_OPCODE_PUSH 0x092d0000
673 #define A2_OPCODE_PUSH 0x052d0004
674 #define A2_OPCODE_POP 0x049d0004
675
676 /* Codes to distinguish the arithmetic instructions. */
677 #define OPCODE_AND 0
678 #define OPCODE_EOR 1
679 #define OPCODE_SUB 2
680 #define OPCODE_RSB 3
681 #define OPCODE_ADD 4
682 #define OPCODE_ADC 5
683 #define OPCODE_SBC 6
684 #define OPCODE_RSC 7
685 #define OPCODE_TST 8
686 #define OPCODE_TEQ 9
687 #define OPCODE_CMP 10
688 #define OPCODE_CMN 11
689 #define OPCODE_ORR 12
690 #define OPCODE_MOV 13
691 #define OPCODE_BIC 14
692 #define OPCODE_MVN 15
693
694 #define T2_OPCODE_AND 0
695 #define T2_OPCODE_BIC 1
696 #define T2_OPCODE_ORR 2
697 #define T2_OPCODE_ORN 3
698 #define T2_OPCODE_EOR 4
699 #define T2_OPCODE_ADD 8
700 #define T2_OPCODE_ADC 10
701 #define T2_OPCODE_SBC 11
702 #define T2_OPCODE_SUB 13
703 #define T2_OPCODE_RSB 14
704
705 #define T_OPCODE_MUL 0x4340
706 #define T_OPCODE_TST 0x4200
707 #define T_OPCODE_CMN 0x42c0
708 #define T_OPCODE_NEG 0x4240
709 #define T_OPCODE_MVN 0x43c0
710
711 #define T_OPCODE_ADD_R3 0x1800
712 #define T_OPCODE_SUB_R3 0x1a00
713 #define T_OPCODE_ADD_HI 0x4400
714 #define T_OPCODE_ADD_ST 0xb000
715 #define T_OPCODE_SUB_ST 0xb080
716 #define T_OPCODE_ADD_SP 0xa800
717 #define T_OPCODE_ADD_PC 0xa000
718 #define T_OPCODE_ADD_I8 0x3000
719 #define T_OPCODE_SUB_I8 0x3800
720 #define T_OPCODE_ADD_I3 0x1c00
721 #define T_OPCODE_SUB_I3 0x1e00
722
723 #define T_OPCODE_ASR_R 0x4100
724 #define T_OPCODE_LSL_R 0x4080
725 #define T_OPCODE_LSR_R 0x40c0
726 #define T_OPCODE_ROR_R 0x41c0
727 #define T_OPCODE_ASR_I 0x1000
728 #define T_OPCODE_LSL_I 0x0000
729 #define T_OPCODE_LSR_I 0x0800
730
731 #define T_OPCODE_MOV_I8 0x2000
732 #define T_OPCODE_CMP_I8 0x2800
733 #define T_OPCODE_CMP_LR 0x4280
734 #define T_OPCODE_MOV_HR 0x4600
735 #define T_OPCODE_CMP_HR 0x4500
736
737 #define T_OPCODE_LDR_PC 0x4800
738 #define T_OPCODE_LDR_SP 0x9800
739 #define T_OPCODE_STR_SP 0x9000
740 #define T_OPCODE_LDR_IW 0x6800
741 #define T_OPCODE_STR_IW 0x6000
742 #define T_OPCODE_LDR_IH 0x8800
743 #define T_OPCODE_STR_IH 0x8000
744 #define T_OPCODE_LDR_IB 0x7800
745 #define T_OPCODE_STR_IB 0x7000
746 #define T_OPCODE_LDR_RW 0x5800
747 #define T_OPCODE_STR_RW 0x5000
748 #define T_OPCODE_LDR_RH 0x5a00
749 #define T_OPCODE_STR_RH 0x5200
750 #define T_OPCODE_LDR_RB 0x5c00
751 #define T_OPCODE_STR_RB 0x5400
752
753 #define T_OPCODE_PUSH 0xb400
754 #define T_OPCODE_POP 0xbc00
755
756 #define T_OPCODE_BRANCH 0xe000
757
758 #define THUMB_SIZE 2 /* Size of thumb instruction. */
759 #define THUMB_PP_PC_LR 0x0100
760 #define THUMB_LOAD_BIT 0x0800
761 #define THUMB2_LOAD_BIT 0x00100000
762
763 #define BAD_ARGS _("bad arguments to instruction")
764 #define BAD_SP _("r13 not allowed here")
765 #define BAD_PC _("r15 not allowed here")
766 #define BAD_COND _("instruction cannot be conditional")
767 #define BAD_OVERLAP _("registers may not be the same")
768 #define BAD_HIREG _("lo register required")
769 #define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
770 #define BAD_ADDR_MODE _("instruction does not accept this addressing mode");
771 #define BAD_BRANCH _("branch must be last instruction in IT block")
772 #define BAD_NOT_IT _("instruction not allowed in IT block")
773 #define BAD_FPU _("selected FPU does not support instruction")
774 #define BAD_OUT_IT _("thumb conditional instruction should be in IT block")
775 #define BAD_IT_COND _("incorrect condition in IT block")
776 #define BAD_IT_IT _("IT falling in the range of a previous IT block")
777 #define MISSING_FNSTART _("missing .fnstart before unwinding directive")
778 #define BAD_PC_ADDRESSING \
779 _("cannot use register index with PC-relative addressing")
780 #define BAD_PC_WRITEBACK \
781 _("cannot use writeback with PC-relative addressing")
782 #define BAD_RANGE _("branch out of range")
783 #define UNPRED_REG(R) _("using " R " results in unpredictable behaviour")
784
785 static struct hash_control * arm_ops_hsh;
786 static struct hash_control * arm_cond_hsh;
787 static struct hash_control * arm_shift_hsh;
788 static struct hash_control * arm_psr_hsh;
789 static struct hash_control * arm_v7m_psr_hsh;
790 static struct hash_control * arm_reg_hsh;
791 static struct hash_control * arm_reloc_hsh;
792 static struct hash_control * arm_barrier_opt_hsh;
793
794 /* Stuff needed to resolve the label ambiguity
795 As:
796 ...
797 label: <insn>
798 may differ from:
799 ...
800 label:
801 <insn> */
802
803 symbolS * last_label_seen;
804 static int label_is_thumb_function_name = FALSE;
805
806 /* Literal pool structure. Held on a per-section
807 and per-sub-section basis. */
808
809 #define MAX_LITERAL_POOL_SIZE 1024
810 typedef struct literal_pool
811 {
812 expressionS literals [MAX_LITERAL_POOL_SIZE];
813 unsigned int next_free_entry;
814 unsigned int id;
815 symbolS * symbol;
816 segT section;
817 subsegT sub_section;
818 #ifdef OBJ_ELF
819 struct dwarf2_line_info locs [MAX_LITERAL_POOL_SIZE];
820 #endif
821 struct literal_pool * next;
822 unsigned int alignment;
823 } literal_pool;
824
825 /* Pointer to a linked list of literal pools. */
826 literal_pool * list_of_pools = NULL;
827
828 typedef enum asmfunc_states
829 {
830 OUTSIDE_ASMFUNC,
831 WAITING_ASMFUNC_NAME,
832 WAITING_ENDASMFUNC
833 } asmfunc_states;
834
835 static asmfunc_states asmfunc_state = OUTSIDE_ASMFUNC;
836
837 #ifdef OBJ_ELF
838 # define now_it seg_info (now_seg)->tc_segment_info_data.current_it
839 #else
840 static struct current_it now_it;
841 #endif
842
843 static inline int
844 now_it_compatible (int cond)
845 {
846 return (cond & ~1) == (now_it.cc & ~1);
847 }
848
849 static inline int
850 conditional_insn (void)
851 {
852 return inst.cond != COND_ALWAYS;
853 }
854
855 static int in_it_block (void);
856
857 static int handle_it_state (void);
858
859 static void force_automatic_it_block_close (void);
860
861 static void it_fsm_post_encode (void);
862
863 #define set_it_insn_type(type) \
864 do \
865 { \
866 inst.it_insn_type = type; \
867 if (handle_it_state () == FAIL) \
868 return; \
869 } \
870 while (0)
871
872 #define set_it_insn_type_nonvoid(type, failret) \
873 do \
874 { \
875 inst.it_insn_type = type; \
876 if (handle_it_state () == FAIL) \
877 return failret; \
878 } \
879 while(0)
880
881 #define set_it_insn_type_last() \
882 do \
883 { \
884 if (inst.cond == COND_ALWAYS) \
885 set_it_insn_type (IF_INSIDE_IT_LAST_INSN); \
886 else \
887 set_it_insn_type (INSIDE_IT_LAST_INSN); \
888 } \
889 while (0)
890
891 /* Pure syntax. */
892
893 /* This array holds the chars that always start a comment. If the
894 pre-processor is disabled, these aren't very useful. */
895 char arm_comment_chars[] = "@";
896
897 /* This array holds the chars that only start a comment at the beginning of
898 a line. If the line seems to have the form '# 123 filename'
899 .line and .file directives will appear in the pre-processed output. */
900 /* Note that input_file.c hand checks for '#' at the beginning of the
901 first line of the input file. This is because the compiler outputs
902 #NO_APP at the beginning of its output. */
903 /* Also note that comments like this one will always work. */
904 const char line_comment_chars[] = "#";
905
906 char arm_line_separator_chars[] = ";";
907
908 /* Chars that can be used to separate mant
909 from exp in floating point numbers. */
910 const char EXP_CHARS[] = "eE";
911
912 /* Chars that mean this number is a floating point constant. */
913 /* As in 0f12.456 */
914 /* or 0d1.2345e12 */
915
916 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
917
918 /* Prefix characters that indicate the start of an immediate
919 value. */
920 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
921
922 /* Separator character handling. */
923
924 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
925
926 static inline int
927 skip_past_char (char ** str, char c)
928 {
929 /* PR gas/14987: Allow for whitespace before the expected character. */
930 skip_whitespace (*str);
931
932 if (**str == c)
933 {
934 (*str)++;
935 return SUCCESS;
936 }
937 else
938 return FAIL;
939 }
940
941 #define skip_past_comma(str) skip_past_char (str, ',')
942
943 /* Arithmetic expressions (possibly involving symbols). */
944
945 /* Return TRUE if anything in the expression is a bignum. */
946
947 static int
948 walk_no_bignums (symbolS * sp)
949 {
950 if (symbol_get_value_expression (sp)->X_op == O_big)
951 return 1;
952
953 if (symbol_get_value_expression (sp)->X_add_symbol)
954 {
955 return (walk_no_bignums (symbol_get_value_expression (sp)->X_add_symbol)
956 || (symbol_get_value_expression (sp)->X_op_symbol
957 && walk_no_bignums (symbol_get_value_expression (sp)->X_op_symbol)));
958 }
959
960 return 0;
961 }
962
963 static int in_my_get_expression = 0;
964
965 /* Third argument to my_get_expression. */
966 #define GE_NO_PREFIX 0
967 #define GE_IMM_PREFIX 1
968 #define GE_OPT_PREFIX 2
969 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
970 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
971 #define GE_OPT_PREFIX_BIG 3
972
973 static int
974 my_get_expression (expressionS * ep, char ** str, int prefix_mode)
975 {
976 char * save_in;
977 segT seg;
978
979 /* In unified syntax, all prefixes are optional. */
980 if (unified_syntax)
981 prefix_mode = (prefix_mode == GE_OPT_PREFIX_BIG) ? prefix_mode
982 : GE_OPT_PREFIX;
983
984 switch (prefix_mode)
985 {
986 case GE_NO_PREFIX: break;
987 case GE_IMM_PREFIX:
988 if (!is_immediate_prefix (**str))
989 {
990 inst.error = _("immediate expression requires a # prefix");
991 return FAIL;
992 }
993 (*str)++;
994 break;
995 case GE_OPT_PREFIX:
996 case GE_OPT_PREFIX_BIG:
997 if (is_immediate_prefix (**str))
998 (*str)++;
999 break;
1000 default: abort ();
1001 }
1002
1003 memset (ep, 0, sizeof (expressionS));
1004
1005 save_in = input_line_pointer;
1006 input_line_pointer = *str;
1007 in_my_get_expression = 1;
1008 seg = expression (ep);
1009 in_my_get_expression = 0;
1010
1011 if (ep->X_op == O_illegal || ep->X_op == O_absent)
1012 {
1013 /* We found a bad or missing expression in md_operand(). */
1014 *str = input_line_pointer;
1015 input_line_pointer = save_in;
1016 if (inst.error == NULL)
1017 inst.error = (ep->X_op == O_absent
1018 ? _("missing expression") :_("bad expression"));
1019 return 1;
1020 }
1021
1022 #ifdef OBJ_AOUT
1023 if (seg != absolute_section
1024 && seg != text_section
1025 && seg != data_section
1026 && seg != bss_section
1027 && seg != undefined_section)
1028 {
1029 inst.error = _("bad segment");
1030 *str = input_line_pointer;
1031 input_line_pointer = save_in;
1032 return 1;
1033 }
1034 #else
1035 (void) seg;
1036 #endif
1037
1038 /* Get rid of any bignums now, so that we don't generate an error for which
1039 we can't establish a line number later on. Big numbers are never valid
1040 in instructions, which is where this routine is always called. */
1041 if (prefix_mode != GE_OPT_PREFIX_BIG
1042 && (ep->X_op == O_big
1043 || (ep->X_add_symbol
1044 && (walk_no_bignums (ep->X_add_symbol)
1045 || (ep->X_op_symbol
1046 && walk_no_bignums (ep->X_op_symbol))))))
1047 {
1048 inst.error = _("invalid constant");
1049 *str = input_line_pointer;
1050 input_line_pointer = save_in;
1051 return 1;
1052 }
1053
1054 *str = input_line_pointer;
1055 input_line_pointer = save_in;
1056 return 0;
1057 }
1058
1059 /* Turn a string in input_line_pointer into a floating point constant
1060 of type TYPE, and store the appropriate bytes in *LITP. The number
1061 of LITTLENUMS emitted is stored in *SIZEP. An error message is
1062 returned, or NULL on OK.
1063
1064 Note that fp constants aren't represent in the normal way on the ARM.
1065 In big endian mode, things are as expected. However, in little endian
1066 mode fp constants are big-endian word-wise, and little-endian byte-wise
1067 within the words. For example, (double) 1.1 in big endian mode is
1068 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
1069 the byte sequence 99 99 f1 3f 9a 99 99 99.
1070
1071 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
1072
1073 char *
1074 md_atof (int type, char * litP, int * sizeP)
1075 {
1076 int prec;
1077 LITTLENUM_TYPE words[MAX_LITTLENUMS];
1078 char *t;
1079 int i;
1080
1081 switch (type)
1082 {
1083 case 'f':
1084 case 'F':
1085 case 's':
1086 case 'S':
1087 prec = 2;
1088 break;
1089
1090 case 'd':
1091 case 'D':
1092 case 'r':
1093 case 'R':
1094 prec = 4;
1095 break;
1096
1097 case 'x':
1098 case 'X':
1099 prec = 5;
1100 break;
1101
1102 case 'p':
1103 case 'P':
1104 prec = 5;
1105 break;
1106
1107 default:
1108 *sizeP = 0;
1109 return _("Unrecognized or unsupported floating point constant");
1110 }
1111
1112 t = atof_ieee (input_line_pointer, type, words);
1113 if (t)
1114 input_line_pointer = t;
1115 *sizeP = prec * sizeof (LITTLENUM_TYPE);
1116
1117 if (target_big_endian)
1118 {
1119 for (i = 0; i < prec; i++)
1120 {
1121 md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1122 litP += sizeof (LITTLENUM_TYPE);
1123 }
1124 }
1125 else
1126 {
1127 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
1128 for (i = prec - 1; i >= 0; i--)
1129 {
1130 md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1131 litP += sizeof (LITTLENUM_TYPE);
1132 }
1133 else
1134 /* For a 4 byte float the order of elements in `words' is 1 0.
1135 For an 8 byte float the order is 1 0 3 2. */
1136 for (i = 0; i < prec; i += 2)
1137 {
1138 md_number_to_chars (litP, (valueT) words[i + 1],
1139 sizeof (LITTLENUM_TYPE));
1140 md_number_to_chars (litP + sizeof (LITTLENUM_TYPE),
1141 (valueT) words[i], sizeof (LITTLENUM_TYPE));
1142 litP += 2 * sizeof (LITTLENUM_TYPE);
1143 }
1144 }
1145
1146 return NULL;
1147 }
1148
1149 /* We handle all bad expressions here, so that we can report the faulty
1150 instruction in the error message. */
1151 void
1152 md_operand (expressionS * exp)
1153 {
1154 if (in_my_get_expression)
1155 exp->X_op = O_illegal;
1156 }
1157
1158 /* Immediate values. */
1159
1160 /* Generic immediate-value read function for use in directives.
1161 Accepts anything that 'expression' can fold to a constant.
1162 *val receives the number. */
1163 #ifdef OBJ_ELF
1164 static int
1165 immediate_for_directive (int *val)
1166 {
1167 expressionS exp;
1168 exp.X_op = O_illegal;
1169
1170 if (is_immediate_prefix (*input_line_pointer))
1171 {
1172 input_line_pointer++;
1173 expression (&exp);
1174 }
1175
1176 if (exp.X_op != O_constant)
1177 {
1178 as_bad (_("expected #constant"));
1179 ignore_rest_of_line ();
1180 return FAIL;
1181 }
1182 *val = exp.X_add_number;
1183 return SUCCESS;
1184 }
1185 #endif
1186
1187 /* Register parsing. */
1188
1189 /* Generic register parser. CCP points to what should be the
1190 beginning of a register name. If it is indeed a valid register
1191 name, advance CCP over it and return the reg_entry structure;
1192 otherwise return NULL. Does not issue diagnostics. */
1193
1194 static struct reg_entry *
1195 arm_reg_parse_multi (char **ccp)
1196 {
1197 char *start = *ccp;
1198 char *p;
1199 struct reg_entry *reg;
1200
1201 skip_whitespace (start);
1202
1203 #ifdef REGISTER_PREFIX
1204 if (*start != REGISTER_PREFIX)
1205 return NULL;
1206 start++;
1207 #endif
1208 #ifdef OPTIONAL_REGISTER_PREFIX
1209 if (*start == OPTIONAL_REGISTER_PREFIX)
1210 start++;
1211 #endif
1212
1213 p = start;
1214 if (!ISALPHA (*p) || !is_name_beginner (*p))
1215 return NULL;
1216
1217 do
1218 p++;
1219 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
1220
1221 reg = (struct reg_entry *) hash_find_n (arm_reg_hsh, start, p - start);
1222
1223 if (!reg)
1224 return NULL;
1225
1226 *ccp = p;
1227 return reg;
1228 }
1229
1230 static int
1231 arm_reg_alt_syntax (char **ccp, char *start, struct reg_entry *reg,
1232 enum arm_reg_type type)
1233 {
1234 /* Alternative syntaxes are accepted for a few register classes. */
1235 switch (type)
1236 {
1237 case REG_TYPE_MVF:
1238 case REG_TYPE_MVD:
1239 case REG_TYPE_MVFX:
1240 case REG_TYPE_MVDX:
1241 /* Generic coprocessor register names are allowed for these. */
1242 if (reg && reg->type == REG_TYPE_CN)
1243 return reg->number;
1244 break;
1245
1246 case REG_TYPE_CP:
1247 /* For backward compatibility, a bare number is valid here. */
1248 {
1249 unsigned long processor = strtoul (start, ccp, 10);
1250 if (*ccp != start && processor <= 15)
1251 return processor;
1252 }
1253
1254 case REG_TYPE_MMXWC:
1255 /* WC includes WCG. ??? I'm not sure this is true for all
1256 instructions that take WC registers. */
1257 if (reg && reg->type == REG_TYPE_MMXWCG)
1258 return reg->number;
1259 break;
1260
1261 default:
1262 break;
1263 }
1264
1265 return FAIL;
1266 }
1267
1268 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1269 return value is the register number or FAIL. */
1270
1271 static int
1272 arm_reg_parse (char **ccp, enum arm_reg_type type)
1273 {
1274 char *start = *ccp;
1275 struct reg_entry *reg = arm_reg_parse_multi (ccp);
1276 int ret;
1277
1278 /* Do not allow a scalar (reg+index) to parse as a register. */
1279 if (reg && reg->neon && (reg->neon->defined & NTA_HASINDEX))
1280 return FAIL;
1281
1282 if (reg && reg->type == type)
1283 return reg->number;
1284
1285 if ((ret = arm_reg_alt_syntax (ccp, start, reg, type)) != FAIL)
1286 return ret;
1287
1288 *ccp = start;
1289 return FAIL;
1290 }
1291
1292 /* Parse a Neon type specifier. *STR should point at the leading '.'
1293 character. Does no verification at this stage that the type fits the opcode
1294 properly. E.g.,
1295
1296 .i32.i32.s16
1297 .s32.f32
1298 .u16
1299
1300 Can all be legally parsed by this function.
1301
1302 Fills in neon_type struct pointer with parsed information, and updates STR
1303 to point after the parsed type specifier. Returns SUCCESS if this was a legal
1304 type, FAIL if not. */
1305
1306 static int
1307 parse_neon_type (struct neon_type *type, char **str)
1308 {
1309 char *ptr = *str;
1310
1311 if (type)
1312 type->elems = 0;
1313
1314 while (type->elems < NEON_MAX_TYPE_ELS)
1315 {
1316 enum neon_el_type thistype = NT_untyped;
1317 unsigned thissize = -1u;
1318
1319 if (*ptr != '.')
1320 break;
1321
1322 ptr++;
1323
1324 /* Just a size without an explicit type. */
1325 if (ISDIGIT (*ptr))
1326 goto parsesize;
1327
1328 switch (TOLOWER (*ptr))
1329 {
1330 case 'i': thistype = NT_integer; break;
1331 case 'f': thistype = NT_float; break;
1332 case 'p': thistype = NT_poly; break;
1333 case 's': thistype = NT_signed; break;
1334 case 'u': thistype = NT_unsigned; break;
1335 case 'd':
1336 thistype = NT_float;
1337 thissize = 64;
1338 ptr++;
1339 goto done;
1340 default:
1341 as_bad (_("unexpected character `%c' in type specifier"), *ptr);
1342 return FAIL;
1343 }
1344
1345 ptr++;
1346
1347 /* .f is an abbreviation for .f32. */
1348 if (thistype == NT_float && !ISDIGIT (*ptr))
1349 thissize = 32;
1350 else
1351 {
1352 parsesize:
1353 thissize = strtoul (ptr, &ptr, 10);
1354
1355 if (thissize != 8 && thissize != 16 && thissize != 32
1356 && thissize != 64)
1357 {
1358 as_bad (_("bad size %d in type specifier"), thissize);
1359 return FAIL;
1360 }
1361 }
1362
1363 done:
1364 if (type)
1365 {
1366 type->el[type->elems].type = thistype;
1367 type->el[type->elems].size = thissize;
1368 type->elems++;
1369 }
1370 }
1371
1372 /* Empty/missing type is not a successful parse. */
1373 if (type->elems == 0)
1374 return FAIL;
1375
1376 *str = ptr;
1377
1378 return SUCCESS;
1379 }
1380
1381 /* Errors may be set multiple times during parsing or bit encoding
1382 (particularly in the Neon bits), but usually the earliest error which is set
1383 will be the most meaningful. Avoid overwriting it with later (cascading)
1384 errors by calling this function. */
1385
1386 static void
1387 first_error (const char *err)
1388 {
1389 if (!inst.error)
1390 inst.error = err;
1391 }
1392
1393 /* Parse a single type, e.g. ".s32", leading period included. */
1394 static int
1395 parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
1396 {
1397 char *str = *ccp;
1398 struct neon_type optype;
1399
1400 if (*str == '.')
1401 {
1402 if (parse_neon_type (&optype, &str) == SUCCESS)
1403 {
1404 if (optype.elems == 1)
1405 *vectype = optype.el[0];
1406 else
1407 {
1408 first_error (_("only one type should be specified for operand"));
1409 return FAIL;
1410 }
1411 }
1412 else
1413 {
1414 first_error (_("vector type expected"));
1415 return FAIL;
1416 }
1417 }
1418 else
1419 return FAIL;
1420
1421 *ccp = str;
1422
1423 return SUCCESS;
1424 }
1425
1426 /* Special meanings for indices (which have a range of 0-7), which will fit into
1427 a 4-bit integer. */
1428
1429 #define NEON_ALL_LANES 15
1430 #define NEON_INTERLEAVE_LANES 14
1431
1432 /* Parse either a register or a scalar, with an optional type. Return the
1433 register number, and optionally fill in the actual type of the register
1434 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1435 type/index information in *TYPEINFO. */
1436
1437 static int
1438 parse_typed_reg_or_scalar (char **ccp, enum arm_reg_type type,
1439 enum arm_reg_type *rtype,
1440 struct neon_typed_alias *typeinfo)
1441 {
1442 char *str = *ccp;
1443 struct reg_entry *reg = arm_reg_parse_multi (&str);
1444 struct neon_typed_alias atype;
1445 struct neon_type_el parsetype;
1446
1447 atype.defined = 0;
1448 atype.index = -1;
1449 atype.eltype.type = NT_invtype;
1450 atype.eltype.size = -1;
1451
1452 /* Try alternate syntax for some types of register. Note these are mutually
1453 exclusive with the Neon syntax extensions. */
1454 if (reg == NULL)
1455 {
1456 int altreg = arm_reg_alt_syntax (&str, *ccp, reg, type);
1457 if (altreg != FAIL)
1458 *ccp = str;
1459 if (typeinfo)
1460 *typeinfo = atype;
1461 return altreg;
1462 }
1463
1464 /* Undo polymorphism when a set of register types may be accepted. */
1465 if ((type == REG_TYPE_NDQ
1466 && (reg->type == REG_TYPE_NQ || reg->type == REG_TYPE_VFD))
1467 || (type == REG_TYPE_VFSD
1468 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD))
1469 || (type == REG_TYPE_NSDQ
1470 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD
1471 || reg->type == REG_TYPE_NQ))
1472 || (type == REG_TYPE_MMXWC
1473 && (reg->type == REG_TYPE_MMXWCG)))
1474 type = (enum arm_reg_type) reg->type;
1475
1476 if (type != reg->type)
1477 return FAIL;
1478
1479 if (reg->neon)
1480 atype = *reg->neon;
1481
1482 if (parse_neon_operand_type (&parsetype, &str) == SUCCESS)
1483 {
1484 if ((atype.defined & NTA_HASTYPE) != 0)
1485 {
1486 first_error (_("can't redefine type for operand"));
1487 return FAIL;
1488 }
1489 atype.defined |= NTA_HASTYPE;
1490 atype.eltype = parsetype;
1491 }
1492
1493 if (skip_past_char (&str, '[') == SUCCESS)
1494 {
1495 if (type != REG_TYPE_VFD)
1496 {
1497 first_error (_("only D registers may be indexed"));
1498 return FAIL;
1499 }
1500
1501 if ((atype.defined & NTA_HASINDEX) != 0)
1502 {
1503 first_error (_("can't change index for operand"));
1504 return FAIL;
1505 }
1506
1507 atype.defined |= NTA_HASINDEX;
1508
1509 if (skip_past_char (&str, ']') == SUCCESS)
1510 atype.index = NEON_ALL_LANES;
1511 else
1512 {
1513 expressionS exp;
1514
1515 my_get_expression (&exp, &str, GE_NO_PREFIX);
1516
1517 if (exp.X_op != O_constant)
1518 {
1519 first_error (_("constant expression required"));
1520 return FAIL;
1521 }
1522
1523 if (skip_past_char (&str, ']') == FAIL)
1524 return FAIL;
1525
1526 atype.index = exp.X_add_number;
1527 }
1528 }
1529
1530 if (typeinfo)
1531 *typeinfo = atype;
1532
1533 if (rtype)
1534 *rtype = type;
1535
1536 *ccp = str;
1537
1538 return reg->number;
1539 }
1540
1541 /* Like arm_reg_parse, but allow allow the following extra features:
1542 - If RTYPE is non-zero, return the (possibly restricted) type of the
1543 register (e.g. Neon double or quad reg when either has been requested).
1544 - If this is a Neon vector type with additional type information, fill
1545 in the struct pointed to by VECTYPE (if non-NULL).
1546 This function will fault on encountering a scalar. */
1547
1548 static int
1549 arm_typed_reg_parse (char **ccp, enum arm_reg_type type,
1550 enum arm_reg_type *rtype, struct neon_type_el *vectype)
1551 {
1552 struct neon_typed_alias atype;
1553 char *str = *ccp;
1554 int reg = parse_typed_reg_or_scalar (&str, type, rtype, &atype);
1555
1556 if (reg == FAIL)
1557 return FAIL;
1558
1559 /* Do not allow regname(... to parse as a register. */
1560 if (*str == '(')
1561 return FAIL;
1562
1563 /* Do not allow a scalar (reg+index) to parse as a register. */
1564 if ((atype.defined & NTA_HASINDEX) != 0)
1565 {
1566 first_error (_("register operand expected, but got scalar"));
1567 return FAIL;
1568 }
1569
1570 if (vectype)
1571 *vectype = atype.eltype;
1572
1573 *ccp = str;
1574
1575 return reg;
1576 }
1577
1578 #define NEON_SCALAR_REG(X) ((X) >> 4)
1579 #define NEON_SCALAR_INDEX(X) ((X) & 15)
1580
1581 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1582 have enough information to be able to do a good job bounds-checking. So, we
1583 just do easy checks here, and do further checks later. */
1584
1585 static int
1586 parse_scalar (char **ccp, int elsize, struct neon_type_el *type)
1587 {
1588 int reg;
1589 char *str = *ccp;
1590 struct neon_typed_alias atype;
1591
1592 reg = parse_typed_reg_or_scalar (&str, REG_TYPE_VFD, NULL, &atype);
1593
1594 if (reg == FAIL || (atype.defined & NTA_HASINDEX) == 0)
1595 return FAIL;
1596
1597 if (atype.index == NEON_ALL_LANES)
1598 {
1599 first_error (_("scalar must have an index"));
1600 return FAIL;
1601 }
1602 else if (atype.index >= 64 / elsize)
1603 {
1604 first_error (_("scalar index out of range"));
1605 return FAIL;
1606 }
1607
1608 if (type)
1609 *type = atype.eltype;
1610
1611 *ccp = str;
1612
1613 return reg * 16 + atype.index;
1614 }
1615
1616 /* Parse an ARM register list. Returns the bitmask, or FAIL. */
1617
1618 static long
1619 parse_reg_list (char ** strp)
1620 {
1621 char * str = * strp;
1622 long range = 0;
1623 int another_range;
1624
1625 /* We come back here if we get ranges concatenated by '+' or '|'. */
1626 do
1627 {
1628 skip_whitespace (str);
1629
1630 another_range = 0;
1631
1632 if (*str == '{')
1633 {
1634 int in_range = 0;
1635 int cur_reg = -1;
1636
1637 str++;
1638 do
1639 {
1640 int reg;
1641
1642 if ((reg = arm_reg_parse (&str, REG_TYPE_RN)) == FAIL)
1643 {
1644 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
1645 return FAIL;
1646 }
1647
1648 if (in_range)
1649 {
1650 int i;
1651
1652 if (reg <= cur_reg)
1653 {
1654 first_error (_("bad range in register list"));
1655 return FAIL;
1656 }
1657
1658 for (i = cur_reg + 1; i < reg; i++)
1659 {
1660 if (range & (1 << i))
1661 as_tsktsk
1662 (_("Warning: duplicated register (r%d) in register list"),
1663 i);
1664 else
1665 range |= 1 << i;
1666 }
1667 in_range = 0;
1668 }
1669
1670 if (range & (1 << reg))
1671 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1672 reg);
1673 else if (reg <= cur_reg)
1674 as_tsktsk (_("Warning: register range not in ascending order"));
1675
1676 range |= 1 << reg;
1677 cur_reg = reg;
1678 }
1679 while (skip_past_comma (&str) != FAIL
1680 || (in_range = 1, *str++ == '-'));
1681 str--;
1682
1683 if (skip_past_char (&str, '}') == FAIL)
1684 {
1685 first_error (_("missing `}'"));
1686 return FAIL;
1687 }
1688 }
1689 else
1690 {
1691 expressionS exp;
1692
1693 if (my_get_expression (&exp, &str, GE_NO_PREFIX))
1694 return FAIL;
1695
1696 if (exp.X_op == O_constant)
1697 {
1698 if (exp.X_add_number
1699 != (exp.X_add_number & 0x0000ffff))
1700 {
1701 inst.error = _("invalid register mask");
1702 return FAIL;
1703 }
1704
1705 if ((range & exp.X_add_number) != 0)
1706 {
1707 int regno = range & exp.X_add_number;
1708
1709 regno &= -regno;
1710 regno = (1 << regno) - 1;
1711 as_tsktsk
1712 (_("Warning: duplicated register (r%d) in register list"),
1713 regno);
1714 }
1715
1716 range |= exp.X_add_number;
1717 }
1718 else
1719 {
1720 if (inst.reloc.type != 0)
1721 {
1722 inst.error = _("expression too complex");
1723 return FAIL;
1724 }
1725
1726 memcpy (&inst.reloc.exp, &exp, sizeof (expressionS));
1727 inst.reloc.type = BFD_RELOC_ARM_MULTI;
1728 inst.reloc.pc_rel = 0;
1729 }
1730 }
1731
1732 if (*str == '|' || *str == '+')
1733 {
1734 str++;
1735 another_range = 1;
1736 }
1737 }
1738 while (another_range);
1739
1740 *strp = str;
1741 return range;
1742 }
1743
1744 /* Types of registers in a list. */
1745
1746 enum reg_list_els
1747 {
1748 REGLIST_VFP_S,
1749 REGLIST_VFP_D,
1750 REGLIST_NEON_D
1751 };
1752
1753 /* Parse a VFP register list. If the string is invalid return FAIL.
1754 Otherwise return the number of registers, and set PBASE to the first
1755 register. Parses registers of type ETYPE.
1756 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1757 - Q registers can be used to specify pairs of D registers
1758 - { } can be omitted from around a singleton register list
1759 FIXME: This is not implemented, as it would require backtracking in
1760 some cases, e.g.:
1761 vtbl.8 d3,d4,d5
1762 This could be done (the meaning isn't really ambiguous), but doesn't
1763 fit in well with the current parsing framework.
1764 - 32 D registers may be used (also true for VFPv3).
1765 FIXME: Types are ignored in these register lists, which is probably a
1766 bug. */
1767
1768 static int
1769 parse_vfp_reg_list (char **ccp, unsigned int *pbase, enum reg_list_els etype)
1770 {
1771 char *str = *ccp;
1772 int base_reg;
1773 int new_base;
1774 enum arm_reg_type regtype = (enum arm_reg_type) 0;
1775 int max_regs = 0;
1776 int count = 0;
1777 int warned = 0;
1778 unsigned long mask = 0;
1779 int i;
1780
1781 if (skip_past_char (&str, '{') == FAIL)
1782 {
1783 inst.error = _("expecting {");
1784 return FAIL;
1785 }
1786
1787 switch (etype)
1788 {
1789 case REGLIST_VFP_S:
1790 regtype = REG_TYPE_VFS;
1791 max_regs = 32;
1792 break;
1793
1794 case REGLIST_VFP_D:
1795 regtype = REG_TYPE_VFD;
1796 break;
1797
1798 case REGLIST_NEON_D:
1799 regtype = REG_TYPE_NDQ;
1800 break;
1801 }
1802
1803 if (etype != REGLIST_VFP_S)
1804 {
1805 /* VFPv3 allows 32 D registers, except for the VFPv3-D16 variant. */
1806 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
1807 {
1808 max_regs = 32;
1809 if (thumb_mode)
1810 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
1811 fpu_vfp_ext_d32);
1812 else
1813 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
1814 fpu_vfp_ext_d32);
1815 }
1816 else
1817 max_regs = 16;
1818 }
1819
1820 base_reg = max_regs;
1821
1822 do
1823 {
1824 int setmask = 1, addregs = 1;
1825
1826 new_base = arm_typed_reg_parse (&str, regtype, &regtype, NULL);
1827
1828 if (new_base == FAIL)
1829 {
1830 first_error (_(reg_expected_msgs[regtype]));
1831 return FAIL;
1832 }
1833
1834 if (new_base >= max_regs)
1835 {
1836 first_error (_("register out of range in list"));
1837 return FAIL;
1838 }
1839
1840 /* Note: a value of 2 * n is returned for the register Q<n>. */
1841 if (regtype == REG_TYPE_NQ)
1842 {
1843 setmask = 3;
1844 addregs = 2;
1845 }
1846
1847 if (new_base < base_reg)
1848 base_reg = new_base;
1849
1850 if (mask & (setmask << new_base))
1851 {
1852 first_error (_("invalid register list"));
1853 return FAIL;
1854 }
1855
1856 if ((mask >> new_base) != 0 && ! warned)
1857 {
1858 as_tsktsk (_("register list not in ascending order"));
1859 warned = 1;
1860 }
1861
1862 mask |= setmask << new_base;
1863 count += addregs;
1864
1865 if (*str == '-') /* We have the start of a range expression */
1866 {
1867 int high_range;
1868
1869 str++;
1870
1871 if ((high_range = arm_typed_reg_parse (&str, regtype, NULL, NULL))
1872 == FAIL)
1873 {
1874 inst.error = gettext (reg_expected_msgs[regtype]);
1875 return FAIL;
1876 }
1877
1878 if (high_range >= max_regs)
1879 {
1880 first_error (_("register out of range in list"));
1881 return FAIL;
1882 }
1883
1884 if (regtype == REG_TYPE_NQ)
1885 high_range = high_range + 1;
1886
1887 if (high_range <= new_base)
1888 {
1889 inst.error = _("register range not in ascending order");
1890 return FAIL;
1891 }
1892
1893 for (new_base += addregs; new_base <= high_range; new_base += addregs)
1894 {
1895 if (mask & (setmask << new_base))
1896 {
1897 inst.error = _("invalid register list");
1898 return FAIL;
1899 }
1900
1901 mask |= setmask << new_base;
1902 count += addregs;
1903 }
1904 }
1905 }
1906 while (skip_past_comma (&str) != FAIL);
1907
1908 str++;
1909
1910 /* Sanity check -- should have raised a parse error above. */
1911 if (count == 0 || count > max_regs)
1912 abort ();
1913
1914 *pbase = base_reg;
1915
1916 /* Final test -- the registers must be consecutive. */
1917 mask >>= base_reg;
1918 for (i = 0; i < count; i++)
1919 {
1920 if ((mask & (1u << i)) == 0)
1921 {
1922 inst.error = _("non-contiguous register range");
1923 return FAIL;
1924 }
1925 }
1926
1927 *ccp = str;
1928
1929 return count;
1930 }
1931
1932 /* True if two alias types are the same. */
1933
1934 static bfd_boolean
1935 neon_alias_types_same (struct neon_typed_alias *a, struct neon_typed_alias *b)
1936 {
1937 if (!a && !b)
1938 return TRUE;
1939
1940 if (!a || !b)
1941 return FALSE;
1942
1943 if (a->defined != b->defined)
1944 return FALSE;
1945
1946 if ((a->defined & NTA_HASTYPE) != 0
1947 && (a->eltype.type != b->eltype.type
1948 || a->eltype.size != b->eltype.size))
1949 return FALSE;
1950
1951 if ((a->defined & NTA_HASINDEX) != 0
1952 && (a->index != b->index))
1953 return FALSE;
1954
1955 return TRUE;
1956 }
1957
1958 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
1959 The base register is put in *PBASE.
1960 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
1961 the return value.
1962 The register stride (minus one) is put in bit 4 of the return value.
1963 Bits [6:5] encode the list length (minus one).
1964 The type of the list elements is put in *ELTYPE, if non-NULL. */
1965
1966 #define NEON_LANE(X) ((X) & 0xf)
1967 #define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1)
1968 #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
1969
1970 static int
1971 parse_neon_el_struct_list (char **str, unsigned *pbase,
1972 struct neon_type_el *eltype)
1973 {
1974 char *ptr = *str;
1975 int base_reg = -1;
1976 int reg_incr = -1;
1977 int count = 0;
1978 int lane = -1;
1979 int leading_brace = 0;
1980 enum arm_reg_type rtype = REG_TYPE_NDQ;
1981 const char *const incr_error = _("register stride must be 1 or 2");
1982 const char *const type_error = _("mismatched element/structure types in list");
1983 struct neon_typed_alias firsttype;
1984
1985 if (skip_past_char (&ptr, '{') == SUCCESS)
1986 leading_brace = 1;
1987
1988 do
1989 {
1990 struct neon_typed_alias atype;
1991 int getreg = parse_typed_reg_or_scalar (&ptr, rtype, &rtype, &atype);
1992
1993 if (getreg == FAIL)
1994 {
1995 first_error (_(reg_expected_msgs[rtype]));
1996 return FAIL;
1997 }
1998
1999 if (base_reg == -1)
2000 {
2001 base_reg = getreg;
2002 if (rtype == REG_TYPE_NQ)
2003 {
2004 reg_incr = 1;
2005 }
2006 firsttype = atype;
2007 }
2008 else if (reg_incr == -1)
2009 {
2010 reg_incr = getreg - base_reg;
2011 if (reg_incr < 1 || reg_incr > 2)
2012 {
2013 first_error (_(incr_error));
2014 return FAIL;
2015 }
2016 }
2017 else if (getreg != base_reg + reg_incr * count)
2018 {
2019 first_error (_(incr_error));
2020 return FAIL;
2021 }
2022
2023 if (! neon_alias_types_same (&atype, &firsttype))
2024 {
2025 first_error (_(type_error));
2026 return FAIL;
2027 }
2028
2029 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
2030 modes. */
2031 if (ptr[0] == '-')
2032 {
2033 struct neon_typed_alias htype;
2034 int hireg, dregs = (rtype == REG_TYPE_NQ) ? 2 : 1;
2035 if (lane == -1)
2036 lane = NEON_INTERLEAVE_LANES;
2037 else if (lane != NEON_INTERLEAVE_LANES)
2038 {
2039 first_error (_(type_error));
2040 return FAIL;
2041 }
2042 if (reg_incr == -1)
2043 reg_incr = 1;
2044 else if (reg_incr != 1)
2045 {
2046 first_error (_("don't use Rn-Rm syntax with non-unit stride"));
2047 return FAIL;
2048 }
2049 ptr++;
2050 hireg = parse_typed_reg_or_scalar (&ptr, rtype, NULL, &htype);
2051 if (hireg == FAIL)
2052 {
2053 first_error (_(reg_expected_msgs[rtype]));
2054 return FAIL;
2055 }
2056 if (! neon_alias_types_same (&htype, &firsttype))
2057 {
2058 first_error (_(type_error));
2059 return FAIL;
2060 }
2061 count += hireg + dregs - getreg;
2062 continue;
2063 }
2064
2065 /* If we're using Q registers, we can't use [] or [n] syntax. */
2066 if (rtype == REG_TYPE_NQ)
2067 {
2068 count += 2;
2069 continue;
2070 }
2071
2072 if ((atype.defined & NTA_HASINDEX) != 0)
2073 {
2074 if (lane == -1)
2075 lane = atype.index;
2076 else if (lane != atype.index)
2077 {
2078 first_error (_(type_error));
2079 return FAIL;
2080 }
2081 }
2082 else if (lane == -1)
2083 lane = NEON_INTERLEAVE_LANES;
2084 else if (lane != NEON_INTERLEAVE_LANES)
2085 {
2086 first_error (_(type_error));
2087 return FAIL;
2088 }
2089 count++;
2090 }
2091 while ((count != 1 || leading_brace) && skip_past_comma (&ptr) != FAIL);
2092
2093 /* No lane set by [x]. We must be interleaving structures. */
2094 if (lane == -1)
2095 lane = NEON_INTERLEAVE_LANES;
2096
2097 /* Sanity check. */
2098 if (lane == -1 || base_reg == -1 || count < 1 || count > 4
2099 || (count > 1 && reg_incr == -1))
2100 {
2101 first_error (_("error parsing element/structure list"));
2102 return FAIL;
2103 }
2104
2105 if ((count > 1 || leading_brace) && skip_past_char (&ptr, '}') == FAIL)
2106 {
2107 first_error (_("expected }"));
2108 return FAIL;
2109 }
2110
2111 if (reg_incr == -1)
2112 reg_incr = 1;
2113
2114 if (eltype)
2115 *eltype = firsttype.eltype;
2116
2117 *pbase = base_reg;
2118 *str = ptr;
2119
2120 return lane | ((reg_incr - 1) << 4) | ((count - 1) << 5);
2121 }
2122
2123 /* Parse an explicit relocation suffix on an expression. This is
2124 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
2125 arm_reloc_hsh contains no entries, so this function can only
2126 succeed if there is no () after the word. Returns -1 on error,
2127 BFD_RELOC_UNUSED if there wasn't any suffix. */
2128
2129 static int
2130 parse_reloc (char **str)
2131 {
2132 struct reloc_entry *r;
2133 char *p, *q;
2134
2135 if (**str != '(')
2136 return BFD_RELOC_UNUSED;
2137
2138 p = *str + 1;
2139 q = p;
2140
2141 while (*q && *q != ')' && *q != ',')
2142 q++;
2143 if (*q != ')')
2144 return -1;
2145
2146 if ((r = (struct reloc_entry *)
2147 hash_find_n (arm_reloc_hsh, p, q - p)) == NULL)
2148 return -1;
2149
2150 *str = q + 1;
2151 return r->reloc;
2152 }
2153
2154 /* Directives: register aliases. */
2155
2156 static struct reg_entry *
2157 insert_reg_alias (char *str, unsigned number, int type)
2158 {
2159 struct reg_entry *new_reg;
2160 const char *name;
2161
2162 if ((new_reg = (struct reg_entry *) hash_find (arm_reg_hsh, str)) != 0)
2163 {
2164 if (new_reg->builtin)
2165 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str);
2166
2167 /* Only warn about a redefinition if it's not defined as the
2168 same register. */
2169 else if (new_reg->number != number || new_reg->type != type)
2170 as_warn (_("ignoring redefinition of register alias '%s'"), str);
2171
2172 return NULL;
2173 }
2174
2175 name = xstrdup (str);
2176 new_reg = (struct reg_entry *) xmalloc (sizeof (struct reg_entry));
2177
2178 new_reg->name = name;
2179 new_reg->number = number;
2180 new_reg->type = type;
2181 new_reg->builtin = FALSE;
2182 new_reg->neon = NULL;
2183
2184 if (hash_insert (arm_reg_hsh, name, (void *) new_reg))
2185 abort ();
2186
2187 return new_reg;
2188 }
2189
2190 static void
2191 insert_neon_reg_alias (char *str, int number, int type,
2192 struct neon_typed_alias *atype)
2193 {
2194 struct reg_entry *reg = insert_reg_alias (str, number, type);
2195
2196 if (!reg)
2197 {
2198 first_error (_("attempt to redefine typed alias"));
2199 return;
2200 }
2201
2202 if (atype)
2203 {
2204 reg->neon = (struct neon_typed_alias *)
2205 xmalloc (sizeof (struct neon_typed_alias));
2206 *reg->neon = *atype;
2207 }
2208 }
2209
2210 /* Look for the .req directive. This is of the form:
2211
2212 new_register_name .req existing_register_name
2213
2214 If we find one, or if it looks sufficiently like one that we want to
2215 handle any error here, return TRUE. Otherwise return FALSE. */
2216
2217 static bfd_boolean
2218 create_register_alias (char * newname, char *p)
2219 {
2220 struct reg_entry *old;
2221 char *oldname, *nbuf;
2222 size_t nlen;
2223
2224 /* The input scrubber ensures that whitespace after the mnemonic is
2225 collapsed to single spaces. */
2226 oldname = p;
2227 if (strncmp (oldname, " .req ", 6) != 0)
2228 return FALSE;
2229
2230 oldname += 6;
2231 if (*oldname == '\0')
2232 return FALSE;
2233
2234 old = (struct reg_entry *) hash_find (arm_reg_hsh, oldname);
2235 if (!old)
2236 {
2237 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
2238 return TRUE;
2239 }
2240
2241 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2242 the desired alias name, and p points to its end. If not, then
2243 the desired alias name is in the global original_case_string. */
2244 #ifdef TC_CASE_SENSITIVE
2245 nlen = p - newname;
2246 #else
2247 newname = original_case_string;
2248 nlen = strlen (newname);
2249 #endif
2250
2251 nbuf = (char *) alloca (nlen + 1);
2252 memcpy (nbuf, newname, nlen);
2253 nbuf[nlen] = '\0';
2254
2255 /* Create aliases under the new name as stated; an all-lowercase
2256 version of the new name; and an all-uppercase version of the new
2257 name. */
2258 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
2259 {
2260 for (p = nbuf; *p; p++)
2261 *p = TOUPPER (*p);
2262
2263 if (strncmp (nbuf, newname, nlen))
2264 {
2265 /* If this attempt to create an additional alias fails, do not bother
2266 trying to create the all-lower case alias. We will fail and issue
2267 a second, duplicate error message. This situation arises when the
2268 programmer does something like:
2269 foo .req r0
2270 Foo .req r1
2271 The second .req creates the "Foo" alias but then fails to create
2272 the artificial FOO alias because it has already been created by the
2273 first .req. */
2274 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
2275 return TRUE;
2276 }
2277
2278 for (p = nbuf; *p; p++)
2279 *p = TOLOWER (*p);
2280
2281 if (strncmp (nbuf, newname, nlen))
2282 insert_reg_alias (nbuf, old->number, old->type);
2283 }
2284
2285 return TRUE;
2286 }
2287
2288 /* Create a Neon typed/indexed register alias using directives, e.g.:
2289 X .dn d5.s32[1]
2290 Y .qn 6.s16
2291 Z .dn d7
2292 T .dn Z[0]
2293 These typed registers can be used instead of the types specified after the
2294 Neon mnemonic, so long as all operands given have types. Types can also be
2295 specified directly, e.g.:
2296 vadd d0.s32, d1.s32, d2.s32 */
2297
2298 static bfd_boolean
2299 create_neon_reg_alias (char *newname, char *p)
2300 {
2301 enum arm_reg_type basetype;
2302 struct reg_entry *basereg;
2303 struct reg_entry mybasereg;
2304 struct neon_type ntype;
2305 struct neon_typed_alias typeinfo;
2306 char *namebuf, *nameend ATTRIBUTE_UNUSED;
2307 int namelen;
2308
2309 typeinfo.defined = 0;
2310 typeinfo.eltype.type = NT_invtype;
2311 typeinfo.eltype.size = -1;
2312 typeinfo.index = -1;
2313
2314 nameend = p;
2315
2316 if (strncmp (p, " .dn ", 5) == 0)
2317 basetype = REG_TYPE_VFD;
2318 else if (strncmp (p, " .qn ", 5) == 0)
2319 basetype = REG_TYPE_NQ;
2320 else
2321 return FALSE;
2322
2323 p += 5;
2324
2325 if (*p == '\0')
2326 return FALSE;
2327
2328 basereg = arm_reg_parse_multi (&p);
2329
2330 if (basereg && basereg->type != basetype)
2331 {
2332 as_bad (_("bad type for register"));
2333 return FALSE;
2334 }
2335
2336 if (basereg == NULL)
2337 {
2338 expressionS exp;
2339 /* Try parsing as an integer. */
2340 my_get_expression (&exp, &p, GE_NO_PREFIX);
2341 if (exp.X_op != O_constant)
2342 {
2343 as_bad (_("expression must be constant"));
2344 return FALSE;
2345 }
2346 basereg = &mybasereg;
2347 basereg->number = (basetype == REG_TYPE_NQ) ? exp.X_add_number * 2
2348 : exp.X_add_number;
2349 basereg->neon = 0;
2350 }
2351
2352 if (basereg->neon)
2353 typeinfo = *basereg->neon;
2354
2355 if (parse_neon_type (&ntype, &p) == SUCCESS)
2356 {
2357 /* We got a type. */
2358 if (typeinfo.defined & NTA_HASTYPE)
2359 {
2360 as_bad (_("can't redefine the type of a register alias"));
2361 return FALSE;
2362 }
2363
2364 typeinfo.defined |= NTA_HASTYPE;
2365 if (ntype.elems != 1)
2366 {
2367 as_bad (_("you must specify a single type only"));
2368 return FALSE;
2369 }
2370 typeinfo.eltype = ntype.el[0];
2371 }
2372
2373 if (skip_past_char (&p, '[') == SUCCESS)
2374 {
2375 expressionS exp;
2376 /* We got a scalar index. */
2377
2378 if (typeinfo.defined & NTA_HASINDEX)
2379 {
2380 as_bad (_("can't redefine the index of a scalar alias"));
2381 return FALSE;
2382 }
2383
2384 my_get_expression (&exp, &p, GE_NO_PREFIX);
2385
2386 if (exp.X_op != O_constant)
2387 {
2388 as_bad (_("scalar index must be constant"));
2389 return FALSE;
2390 }
2391
2392 typeinfo.defined |= NTA_HASINDEX;
2393 typeinfo.index = exp.X_add_number;
2394
2395 if (skip_past_char (&p, ']') == FAIL)
2396 {
2397 as_bad (_("expecting ]"));
2398 return FALSE;
2399 }
2400 }
2401
2402 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2403 the desired alias name, and p points to its end. If not, then
2404 the desired alias name is in the global original_case_string. */
2405 #ifdef TC_CASE_SENSITIVE
2406 namelen = nameend - newname;
2407 #else
2408 newname = original_case_string;
2409 namelen = strlen (newname);
2410 #endif
2411
2412 namebuf = (char *) alloca (namelen + 1);
2413 strncpy (namebuf, newname, namelen);
2414 namebuf[namelen] = '\0';
2415
2416 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2417 typeinfo.defined != 0 ? &typeinfo : NULL);
2418
2419 /* Insert name in all uppercase. */
2420 for (p = namebuf; *p; p++)
2421 *p = TOUPPER (*p);
2422
2423 if (strncmp (namebuf, newname, namelen))
2424 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2425 typeinfo.defined != 0 ? &typeinfo : NULL);
2426
2427 /* Insert name in all lowercase. */
2428 for (p = namebuf; *p; p++)
2429 *p = TOLOWER (*p);
2430
2431 if (strncmp (namebuf, newname, namelen))
2432 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2433 typeinfo.defined != 0 ? &typeinfo : NULL);
2434
2435 return TRUE;
2436 }
2437
2438 /* Should never be called, as .req goes between the alias and the
2439 register name, not at the beginning of the line. */
2440
2441 static void
2442 s_req (int a ATTRIBUTE_UNUSED)
2443 {
2444 as_bad (_("invalid syntax for .req directive"));
2445 }
2446
2447 static void
2448 s_dn (int a ATTRIBUTE_UNUSED)
2449 {
2450 as_bad (_("invalid syntax for .dn directive"));
2451 }
2452
2453 static void
2454 s_qn (int a ATTRIBUTE_UNUSED)
2455 {
2456 as_bad (_("invalid syntax for .qn directive"));
2457 }
2458
2459 /* The .unreq directive deletes an alias which was previously defined
2460 by .req. For example:
2461
2462 my_alias .req r11
2463 .unreq my_alias */
2464
2465 static void
2466 s_unreq (int a ATTRIBUTE_UNUSED)
2467 {
2468 char * name;
2469 char saved_char;
2470
2471 name = input_line_pointer;
2472
2473 while (*input_line_pointer != 0
2474 && *input_line_pointer != ' '
2475 && *input_line_pointer != '\n')
2476 ++input_line_pointer;
2477
2478 saved_char = *input_line_pointer;
2479 *input_line_pointer = 0;
2480
2481 if (!*name)
2482 as_bad (_("invalid syntax for .unreq directive"));
2483 else
2484 {
2485 struct reg_entry *reg = (struct reg_entry *) hash_find (arm_reg_hsh,
2486 name);
2487
2488 if (!reg)
2489 as_bad (_("unknown register alias '%s'"), name);
2490 else if (reg->builtin)
2491 as_warn (_("ignoring attempt to use .unreq on fixed register name: '%s'"),
2492 name);
2493 else
2494 {
2495 char * p;
2496 char * nbuf;
2497
2498 hash_delete (arm_reg_hsh, name, FALSE);
2499 free ((char *) reg->name);
2500 if (reg->neon)
2501 free (reg->neon);
2502 free (reg);
2503
2504 /* Also locate the all upper case and all lower case versions.
2505 Do not complain if we cannot find one or the other as it
2506 was probably deleted above. */
2507
2508 nbuf = strdup (name);
2509 for (p = nbuf; *p; p++)
2510 *p = TOUPPER (*p);
2511 reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
2512 if (reg)
2513 {
2514 hash_delete (arm_reg_hsh, nbuf, FALSE);
2515 free ((char *) reg->name);
2516 if (reg->neon)
2517 free (reg->neon);
2518 free (reg);
2519 }
2520
2521 for (p = nbuf; *p; p++)
2522 *p = TOLOWER (*p);
2523 reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
2524 if (reg)
2525 {
2526 hash_delete (arm_reg_hsh, nbuf, FALSE);
2527 free ((char *) reg->name);
2528 if (reg->neon)
2529 free (reg->neon);
2530 free (reg);
2531 }
2532
2533 free (nbuf);
2534 }
2535 }
2536
2537 *input_line_pointer = saved_char;
2538 demand_empty_rest_of_line ();
2539 }
2540
2541 /* Directives: Instruction set selection. */
2542
2543 #ifdef OBJ_ELF
2544 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
2545 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2546 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2547 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
2548
2549 /* Create a new mapping symbol for the transition to STATE. */
2550
2551 static void
2552 make_mapping_symbol (enum mstate state, valueT value, fragS *frag)
2553 {
2554 symbolS * symbolP;
2555 const char * symname;
2556 int type;
2557
2558 switch (state)
2559 {
2560 case MAP_DATA:
2561 symname = "$d";
2562 type = BSF_NO_FLAGS;
2563 break;
2564 case MAP_ARM:
2565 symname = "$a";
2566 type = BSF_NO_FLAGS;
2567 break;
2568 case MAP_THUMB:
2569 symname = "$t";
2570 type = BSF_NO_FLAGS;
2571 break;
2572 default:
2573 abort ();
2574 }
2575
2576 symbolP = symbol_new (symname, now_seg, value, frag);
2577 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
2578
2579 switch (state)
2580 {
2581 case MAP_ARM:
2582 THUMB_SET_FUNC (symbolP, 0);
2583 ARM_SET_THUMB (symbolP, 0);
2584 ARM_SET_INTERWORK (symbolP, support_interwork);
2585 break;
2586
2587 case MAP_THUMB:
2588 THUMB_SET_FUNC (symbolP, 1);
2589 ARM_SET_THUMB (symbolP, 1);
2590 ARM_SET_INTERWORK (symbolP, support_interwork);
2591 break;
2592
2593 case MAP_DATA:
2594 default:
2595 break;
2596 }
2597
2598 /* Save the mapping symbols for future reference. Also check that
2599 we do not place two mapping symbols at the same offset within a
2600 frag. We'll handle overlap between frags in
2601 check_mapping_symbols.
2602
2603 If .fill or other data filling directive generates zero sized data,
2604 the mapping symbol for the following code will have the same value
2605 as the one generated for the data filling directive. In this case,
2606 we replace the old symbol with the new one at the same address. */
2607 if (value == 0)
2608 {
2609 if (frag->tc_frag_data.first_map != NULL)
2610 {
2611 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
2612 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP, &symbol_lastP);
2613 }
2614 frag->tc_frag_data.first_map = symbolP;
2615 }
2616 if (frag->tc_frag_data.last_map != NULL)
2617 {
2618 know (S_GET_VALUE (frag->tc_frag_data.last_map) <= S_GET_VALUE (symbolP));
2619 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
2620 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP, &symbol_lastP);
2621 }
2622 frag->tc_frag_data.last_map = symbolP;
2623 }
2624
2625 /* We must sometimes convert a region marked as code to data during
2626 code alignment, if an odd number of bytes have to be padded. The
2627 code mapping symbol is pushed to an aligned address. */
2628
2629 static void
2630 insert_data_mapping_symbol (enum mstate state,
2631 valueT value, fragS *frag, offsetT bytes)
2632 {
2633 /* If there was already a mapping symbol, remove it. */
2634 if (frag->tc_frag_data.last_map != NULL
2635 && S_GET_VALUE (frag->tc_frag_data.last_map) == frag->fr_address + value)
2636 {
2637 symbolS *symp = frag->tc_frag_data.last_map;
2638
2639 if (value == 0)
2640 {
2641 know (frag->tc_frag_data.first_map == symp);
2642 frag->tc_frag_data.first_map = NULL;
2643 }
2644 frag->tc_frag_data.last_map = NULL;
2645 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
2646 }
2647
2648 make_mapping_symbol (MAP_DATA, value, frag);
2649 make_mapping_symbol (state, value + bytes, frag);
2650 }
2651
2652 static void mapping_state_2 (enum mstate state, int max_chars);
2653
2654 /* Set the mapping state to STATE. Only call this when about to
2655 emit some STATE bytes to the file. */
2656
2657 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
2658 void
2659 mapping_state (enum mstate state)
2660 {
2661 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2662
2663 if (mapstate == state)
2664 /* The mapping symbol has already been emitted.
2665 There is nothing else to do. */
2666 return;
2667
2668 if (state == MAP_ARM || state == MAP_THUMB)
2669 /* PR gas/12931
2670 All ARM instructions require 4-byte alignment.
2671 (Almost) all Thumb instructions require 2-byte alignment.
2672
2673 When emitting instructions into any section, mark the section
2674 appropriately.
2675
2676 Some Thumb instructions are alignment-sensitive modulo 4 bytes,
2677 but themselves require 2-byte alignment; this applies to some
2678 PC- relative forms. However, these cases will invovle implicit
2679 literal pool generation or an explicit .align >=2, both of
2680 which will cause the section to me marked with sufficient
2681 alignment. Thus, we don't handle those cases here. */
2682 record_alignment (now_seg, state == MAP_ARM ? 2 : 1);
2683
2684 if (TRANSITION (MAP_UNDEFINED, MAP_DATA))
2685 /* This case will be evaluated later. */
2686 return;
2687
2688 mapping_state_2 (state, 0);
2689 }
2690
2691 /* Same as mapping_state, but MAX_CHARS bytes have already been
2692 allocated. Put the mapping symbol that far back. */
2693
2694 static void
2695 mapping_state_2 (enum mstate state, int max_chars)
2696 {
2697 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2698
2699 if (!SEG_NORMAL (now_seg))
2700 return;
2701
2702 if (mapstate == state)
2703 /* The mapping symbol has already been emitted.
2704 There is nothing else to do. */
2705 return;
2706
2707 if (TRANSITION (MAP_UNDEFINED, MAP_ARM)
2708 || TRANSITION (MAP_UNDEFINED, MAP_THUMB))
2709 {
2710 struct frag * const frag_first = seg_info (now_seg)->frchainP->frch_root;
2711 const int add_symbol = (frag_now != frag_first) || (frag_now_fix () > 0);
2712
2713 if (add_symbol)
2714 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
2715 }
2716
2717 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
2718 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
2719 }
2720 #undef TRANSITION
2721 #else
2722 #define mapping_state(x) ((void)0)
2723 #define mapping_state_2(x, y) ((void)0)
2724 #endif
2725
2726 /* Find the real, Thumb encoded start of a Thumb function. */
2727
2728 #ifdef OBJ_COFF
2729 static symbolS *
2730 find_real_start (symbolS * symbolP)
2731 {
2732 char * real_start;
2733 const char * name = S_GET_NAME (symbolP);
2734 symbolS * new_target;
2735
2736 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
2737 #define STUB_NAME ".real_start_of"
2738
2739 if (name == NULL)
2740 abort ();
2741
2742 /* The compiler may generate BL instructions to local labels because
2743 it needs to perform a branch to a far away location. These labels
2744 do not have a corresponding ".real_start_of" label. We check
2745 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2746 the ".real_start_of" convention for nonlocal branches. */
2747 if (S_IS_LOCAL (symbolP) || name[0] == '.')
2748 return symbolP;
2749
2750 real_start = ACONCAT ((STUB_NAME, name, NULL));
2751 new_target = symbol_find (real_start);
2752
2753 if (new_target == NULL)
2754 {
2755 as_warn (_("Failed to find real start of function: %s\n"), name);
2756 new_target = symbolP;
2757 }
2758
2759 return new_target;
2760 }
2761 #endif
2762
2763 static void
2764 opcode_select (int width)
2765 {
2766 switch (width)
2767 {
2768 case 16:
2769 if (! thumb_mode)
2770 {
2771 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
2772 as_bad (_("selected processor does not support THUMB opcodes"));
2773
2774 thumb_mode = 1;
2775 /* No need to force the alignment, since we will have been
2776 coming from ARM mode, which is word-aligned. */
2777 record_alignment (now_seg, 1);
2778 }
2779 break;
2780
2781 case 32:
2782 if (thumb_mode)
2783 {
2784 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
2785 as_bad (_("selected processor does not support ARM opcodes"));
2786
2787 thumb_mode = 0;
2788
2789 if (!need_pass_2)
2790 frag_align (2, 0, 0);
2791
2792 record_alignment (now_seg, 1);
2793 }
2794 break;
2795
2796 default:
2797 as_bad (_("invalid instruction size selected (%d)"), width);
2798 }
2799 }
2800
2801 static void
2802 s_arm (int ignore ATTRIBUTE_UNUSED)
2803 {
2804 opcode_select (32);
2805 demand_empty_rest_of_line ();
2806 }
2807
2808 static void
2809 s_thumb (int ignore ATTRIBUTE_UNUSED)
2810 {
2811 opcode_select (16);
2812 demand_empty_rest_of_line ();
2813 }
2814
2815 static void
2816 s_code (int unused ATTRIBUTE_UNUSED)
2817 {
2818 int temp;
2819
2820 temp = get_absolute_expression ();
2821 switch (temp)
2822 {
2823 case 16:
2824 case 32:
2825 opcode_select (temp);
2826 break;
2827
2828 default:
2829 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp);
2830 }
2831 }
2832
2833 static void
2834 s_force_thumb (int ignore ATTRIBUTE_UNUSED)
2835 {
2836 /* If we are not already in thumb mode go into it, EVEN if
2837 the target processor does not support thumb instructions.
2838 This is used by gcc/config/arm/lib1funcs.asm for example
2839 to compile interworking support functions even if the
2840 target processor should not support interworking. */
2841 if (! thumb_mode)
2842 {
2843 thumb_mode = 2;
2844 record_alignment (now_seg, 1);
2845 }
2846
2847 demand_empty_rest_of_line ();
2848 }
2849
2850 static void
2851 s_thumb_func (int ignore ATTRIBUTE_UNUSED)
2852 {
2853 s_thumb (0);
2854
2855 /* The following label is the name/address of the start of a Thumb function.
2856 We need to know this for the interworking support. */
2857 label_is_thumb_function_name = TRUE;
2858 }
2859
2860 /* Perform a .set directive, but also mark the alias as
2861 being a thumb function. */
2862
2863 static void
2864 s_thumb_set (int equiv)
2865 {
2866 /* XXX the following is a duplicate of the code for s_set() in read.c
2867 We cannot just call that code as we need to get at the symbol that
2868 is created. */
2869 char * name;
2870 char delim;
2871 char * end_name;
2872 symbolS * symbolP;
2873
2874 /* Especial apologies for the random logic:
2875 This just grew, and could be parsed much more simply!
2876 Dean - in haste. */
2877 delim = get_symbol_name (& name);
2878 end_name = input_line_pointer;
2879 (void) restore_line_pointer (delim);
2880
2881 if (*input_line_pointer != ',')
2882 {
2883 *end_name = 0;
2884 as_bad (_("expected comma after name \"%s\""), name);
2885 *end_name = delim;
2886 ignore_rest_of_line ();
2887 return;
2888 }
2889
2890 input_line_pointer++;
2891 *end_name = 0;
2892
2893 if (name[0] == '.' && name[1] == '\0')
2894 {
2895 /* XXX - this should not happen to .thumb_set. */
2896 abort ();
2897 }
2898
2899 if ((symbolP = symbol_find (name)) == NULL
2900 && (symbolP = md_undefined_symbol (name)) == NULL)
2901 {
2902 #ifndef NO_LISTING
2903 /* When doing symbol listings, play games with dummy fragments living
2904 outside the normal fragment chain to record the file and line info
2905 for this symbol. */
2906 if (listing & LISTING_SYMBOLS)
2907 {
2908 extern struct list_info_struct * listing_tail;
2909 fragS * dummy_frag = (fragS * ) xmalloc (sizeof (fragS));
2910
2911 memset (dummy_frag, 0, sizeof (fragS));
2912 dummy_frag->fr_type = rs_fill;
2913 dummy_frag->line = listing_tail;
2914 symbolP = symbol_new (name, undefined_section, 0, dummy_frag);
2915 dummy_frag->fr_symbol = symbolP;
2916 }
2917 else
2918 #endif
2919 symbolP = symbol_new (name, undefined_section, 0, &zero_address_frag);
2920
2921 #ifdef OBJ_COFF
2922 /* "set" symbols are local unless otherwise specified. */
2923 SF_SET_LOCAL (symbolP);
2924 #endif /* OBJ_COFF */
2925 } /* Make a new symbol. */
2926
2927 symbol_table_insert (symbolP);
2928
2929 * end_name = delim;
2930
2931 if (equiv
2932 && S_IS_DEFINED (symbolP)
2933 && S_GET_SEGMENT (symbolP) != reg_section)
2934 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP));
2935
2936 pseudo_set (symbolP);
2937
2938 demand_empty_rest_of_line ();
2939
2940 /* XXX Now we come to the Thumb specific bit of code. */
2941
2942 THUMB_SET_FUNC (symbolP, 1);
2943 ARM_SET_THUMB (symbolP, 1);
2944 #if defined OBJ_ELF || defined OBJ_COFF
2945 ARM_SET_INTERWORK (symbolP, support_interwork);
2946 #endif
2947 }
2948
2949 /* Directives: Mode selection. */
2950
2951 /* .syntax [unified|divided] - choose the new unified syntax
2952 (same for Arm and Thumb encoding, modulo slight differences in what
2953 can be represented) or the old divergent syntax for each mode. */
2954 static void
2955 s_syntax (int unused ATTRIBUTE_UNUSED)
2956 {
2957 char *name, delim;
2958
2959 delim = get_symbol_name (& name);
2960
2961 if (!strcasecmp (name, "unified"))
2962 unified_syntax = TRUE;
2963 else if (!strcasecmp (name, "divided"))
2964 unified_syntax = FALSE;
2965 else
2966 {
2967 as_bad (_("unrecognized syntax mode \"%s\""), name);
2968 return;
2969 }
2970 (void) restore_line_pointer (delim);
2971 demand_empty_rest_of_line ();
2972 }
2973
2974 /* Directives: sectioning and alignment. */
2975
2976 static void
2977 s_bss (int ignore ATTRIBUTE_UNUSED)
2978 {
2979 /* We don't support putting frags in the BSS segment, we fake it by
2980 marking in_bss, then looking at s_skip for clues. */
2981 subseg_set (bss_section, 0);
2982 demand_empty_rest_of_line ();
2983
2984 #ifdef md_elf_section_change_hook
2985 md_elf_section_change_hook ();
2986 #endif
2987 }
2988
2989 static void
2990 s_even (int ignore ATTRIBUTE_UNUSED)
2991 {
2992 /* Never make frag if expect extra pass. */
2993 if (!need_pass_2)
2994 frag_align (1, 0, 0);
2995
2996 record_alignment (now_seg, 1);
2997
2998 demand_empty_rest_of_line ();
2999 }
3000
3001 /* Directives: CodeComposer Studio. */
3002
3003 /* .ref (for CodeComposer Studio syntax only). */
3004 static void
3005 s_ccs_ref (int unused ATTRIBUTE_UNUSED)
3006 {
3007 if (codecomposer_syntax)
3008 ignore_rest_of_line ();
3009 else
3010 as_bad (_(".ref pseudo-op only available with -mccs flag."));
3011 }
3012
3013 /* If name is not NULL, then it is used for marking the beginning of a
3014 function, wherease if it is NULL then it means the function end. */
3015 static void
3016 asmfunc_debug (const char * name)
3017 {
3018 static const char * last_name = NULL;
3019
3020 if (name != NULL)
3021 {
3022 gas_assert (last_name == NULL);
3023 last_name = name;
3024
3025 if (debug_type == DEBUG_STABS)
3026 stabs_generate_asm_func (name, name);
3027 }
3028 else
3029 {
3030 gas_assert (last_name != NULL);
3031
3032 if (debug_type == DEBUG_STABS)
3033 stabs_generate_asm_endfunc (last_name, last_name);
3034
3035 last_name = NULL;
3036 }
3037 }
3038
3039 static void
3040 s_ccs_asmfunc (int unused ATTRIBUTE_UNUSED)
3041 {
3042 if (codecomposer_syntax)
3043 {
3044 switch (asmfunc_state)
3045 {
3046 case OUTSIDE_ASMFUNC:
3047 asmfunc_state = WAITING_ASMFUNC_NAME;
3048 break;
3049
3050 case WAITING_ASMFUNC_NAME:
3051 as_bad (_(".asmfunc repeated."));
3052 break;
3053
3054 case WAITING_ENDASMFUNC:
3055 as_bad (_(".asmfunc without function."));
3056 break;
3057 }
3058 demand_empty_rest_of_line ();
3059 }
3060 else
3061 as_bad (_(".asmfunc pseudo-op only available with -mccs flag."));
3062 }
3063
3064 static void
3065 s_ccs_endasmfunc (int unused ATTRIBUTE_UNUSED)
3066 {
3067 if (codecomposer_syntax)
3068 {
3069 switch (asmfunc_state)
3070 {
3071 case OUTSIDE_ASMFUNC:
3072 as_bad (_(".endasmfunc without a .asmfunc."));
3073 break;
3074
3075 case WAITING_ASMFUNC_NAME:
3076 as_bad (_(".endasmfunc without function."));
3077 break;
3078
3079 case WAITING_ENDASMFUNC:
3080 asmfunc_state = OUTSIDE_ASMFUNC;
3081 asmfunc_debug (NULL);
3082 break;
3083 }
3084 demand_empty_rest_of_line ();
3085 }
3086 else
3087 as_bad (_(".endasmfunc pseudo-op only available with -mccs flag."));
3088 }
3089
3090 static void
3091 s_ccs_def (int name)
3092 {
3093 if (codecomposer_syntax)
3094 s_globl (name);
3095 else
3096 as_bad (_(".def pseudo-op only available with -mccs flag."));
3097 }
3098
3099 /* Directives: Literal pools. */
3100
3101 static literal_pool *
3102 find_literal_pool (void)
3103 {
3104 literal_pool * pool;
3105
3106 for (pool = list_of_pools; pool != NULL; pool = pool->next)
3107 {
3108 if (pool->section == now_seg
3109 && pool->sub_section == now_subseg)
3110 break;
3111 }
3112
3113 return pool;
3114 }
3115
3116 static literal_pool *
3117 find_or_make_literal_pool (void)
3118 {
3119 /* Next literal pool ID number. */
3120 static unsigned int latest_pool_num = 1;
3121 literal_pool * pool;
3122
3123 pool = find_literal_pool ();
3124
3125 if (pool == NULL)
3126 {
3127 /* Create a new pool. */
3128 pool = (literal_pool *) xmalloc (sizeof (* pool));
3129 if (! pool)
3130 return NULL;
3131
3132 pool->next_free_entry = 0;
3133 pool->section = now_seg;
3134 pool->sub_section = now_subseg;
3135 pool->next = list_of_pools;
3136 pool->symbol = NULL;
3137 pool->alignment = 2;
3138
3139 /* Add it to the list. */
3140 list_of_pools = pool;
3141 }
3142
3143 /* New pools, and emptied pools, will have a NULL symbol. */
3144 if (pool->symbol == NULL)
3145 {
3146 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
3147 (valueT) 0, &zero_address_frag);
3148 pool->id = latest_pool_num ++;
3149 }
3150
3151 /* Done. */
3152 return pool;
3153 }
3154
3155 /* Add the literal in the global 'inst'
3156 structure to the relevant literal pool. */
3157
3158 static int
3159 add_to_lit_pool (unsigned int nbytes)
3160 {
3161 #define PADDING_SLOT 0x1
3162 #define LIT_ENTRY_SIZE_MASK 0xFF
3163 literal_pool * pool;
3164 unsigned int entry, pool_size = 0;
3165 bfd_boolean padding_slot_p = FALSE;
3166 unsigned imm1 = 0;
3167 unsigned imm2 = 0;
3168
3169 if (nbytes == 8)
3170 {
3171 imm1 = inst.operands[1].imm;
3172 imm2 = (inst.operands[1].regisimm ? inst.operands[1].reg
3173 : inst.reloc.exp.X_unsigned ? 0
3174 : ((bfd_int64_t) inst.operands[1].imm) >> 32);
3175 if (target_big_endian)
3176 {
3177 imm1 = imm2;
3178 imm2 = inst.operands[1].imm;
3179 }
3180 }
3181
3182 pool = find_or_make_literal_pool ();
3183
3184 /* Check if this literal value is already in the pool. */
3185 for (entry = 0; entry < pool->next_free_entry; entry ++)
3186 {
3187 if (nbytes == 4)
3188 {
3189 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
3190 && (inst.reloc.exp.X_op == O_constant)
3191 && (pool->literals[entry].X_add_number
3192 == inst.reloc.exp.X_add_number)
3193 && (pool->literals[entry].X_md == nbytes)
3194 && (pool->literals[entry].X_unsigned
3195 == inst.reloc.exp.X_unsigned))
3196 break;
3197
3198 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
3199 && (inst.reloc.exp.X_op == O_symbol)
3200 && (pool->literals[entry].X_add_number
3201 == inst.reloc.exp.X_add_number)
3202 && (pool->literals[entry].X_add_symbol
3203 == inst.reloc.exp.X_add_symbol)
3204 && (pool->literals[entry].X_op_symbol
3205 == inst.reloc.exp.X_op_symbol)
3206 && (pool->literals[entry].X_md == nbytes))
3207 break;
3208 }
3209 else if ((nbytes == 8)
3210 && !(pool_size & 0x7)
3211 && ((entry + 1) != pool->next_free_entry)
3212 && (pool->literals[entry].X_op == O_constant)
3213 && (pool->literals[entry].X_add_number == (offsetT) imm1)
3214 && (pool->literals[entry].X_unsigned
3215 == inst.reloc.exp.X_unsigned)
3216 && (pool->literals[entry + 1].X_op == O_constant)
3217 && (pool->literals[entry + 1].X_add_number == (offsetT) imm2)
3218 && (pool->literals[entry + 1].X_unsigned
3219 == inst.reloc.exp.X_unsigned))
3220 break;
3221
3222 padding_slot_p = ((pool->literals[entry].X_md >> 8) == PADDING_SLOT);
3223 if (padding_slot_p && (nbytes == 4))
3224 break;
3225
3226 pool_size += 4;
3227 }
3228
3229 /* Do we need to create a new entry? */
3230 if (entry == pool->next_free_entry)
3231 {
3232 if (entry >= MAX_LITERAL_POOL_SIZE)
3233 {
3234 inst.error = _("literal pool overflow");
3235 return FAIL;
3236 }
3237
3238 if (nbytes == 8)
3239 {
3240 /* For 8-byte entries, we align to an 8-byte boundary,
3241 and split it into two 4-byte entries, because on 32-bit
3242 host, 8-byte constants are treated as big num, thus
3243 saved in "generic_bignum" which will be overwritten
3244 by later assignments.
3245
3246 We also need to make sure there is enough space for
3247 the split.
3248
3249 We also check to make sure the literal operand is a
3250 constant number. */
3251 if (!(inst.reloc.exp.X_op == O_constant
3252 || inst.reloc.exp.X_op == O_big))
3253 {
3254 inst.error = _("invalid type for literal pool");
3255 return FAIL;
3256 }
3257 else if (pool_size & 0x7)
3258 {
3259 if ((entry + 2) >= MAX_LITERAL_POOL_SIZE)
3260 {
3261 inst.error = _("literal pool overflow");
3262 return FAIL;
3263 }
3264
3265 pool->literals[entry] = inst.reloc.exp;
3266 pool->literals[entry].X_add_number = 0;
3267 pool->literals[entry++].X_md = (PADDING_SLOT << 8) | 4;
3268 pool->next_free_entry += 1;
3269 pool_size += 4;
3270 }
3271 else if ((entry + 1) >= MAX_LITERAL_POOL_SIZE)
3272 {
3273 inst.error = _("literal pool overflow");
3274 return FAIL;
3275 }
3276
3277 pool->literals[entry] = inst.reloc.exp;
3278 pool->literals[entry].X_op = O_constant;
3279 pool->literals[entry].X_add_number = imm1;
3280 pool->literals[entry].X_unsigned = inst.reloc.exp.X_unsigned;
3281 pool->literals[entry++].X_md = 4;
3282 pool->literals[entry] = inst.reloc.exp;
3283 pool->literals[entry].X_op = O_constant;
3284 pool->literals[entry].X_add_number = imm2;
3285 pool->literals[entry].X_unsigned = inst.reloc.exp.X_unsigned;
3286 pool->literals[entry].X_md = 4;
3287 pool->alignment = 3;
3288 pool->next_free_entry += 1;
3289 }
3290 else
3291 {
3292 pool->literals[entry] = inst.reloc.exp;
3293 pool->literals[entry].X_md = 4;
3294 }
3295
3296 #ifdef OBJ_ELF
3297 /* PR ld/12974: Record the location of the first source line to reference
3298 this entry in the literal pool. If it turns out during linking that the
3299 symbol does not exist we will be able to give an accurate line number for
3300 the (first use of the) missing reference. */
3301 if (debug_type == DEBUG_DWARF2)
3302 dwarf2_where (pool->locs + entry);
3303 #endif
3304 pool->next_free_entry += 1;
3305 }
3306 else if (padding_slot_p)
3307 {
3308 pool->literals[entry] = inst.reloc.exp;
3309 pool->literals[entry].X_md = nbytes;
3310 }
3311
3312 inst.reloc.exp.X_op = O_symbol;
3313 inst.reloc.exp.X_add_number = pool_size;
3314 inst.reloc.exp.X_add_symbol = pool->symbol;
3315
3316 return SUCCESS;
3317 }
3318
3319 bfd_boolean
3320 tc_start_label_without_colon (void)
3321 {
3322 bfd_boolean ret = TRUE;
3323
3324 if (codecomposer_syntax && asmfunc_state == WAITING_ASMFUNC_NAME)
3325 {
3326 const char *label = input_line_pointer;
3327
3328 while (!is_end_of_line[(int) label[-1]])
3329 --label;
3330
3331 if (*label == '.')
3332 {
3333 as_bad (_("Invalid label '%s'"), label);
3334 ret = FALSE;
3335 }
3336
3337 asmfunc_debug (label);
3338
3339 asmfunc_state = WAITING_ENDASMFUNC;
3340 }
3341
3342 return ret;
3343 }
3344
3345 /* Can't use symbol_new here, so have to create a symbol and then at
3346 a later date assign it a value. Thats what these functions do. */
3347
3348 static void
3349 symbol_locate (symbolS * symbolP,
3350 const char * name, /* It is copied, the caller can modify. */
3351 segT segment, /* Segment identifier (SEG_<something>). */
3352 valueT valu, /* Symbol value. */
3353 fragS * frag) /* Associated fragment. */
3354 {
3355 size_t name_length;
3356 char * preserved_copy_of_name;
3357
3358 name_length = strlen (name) + 1; /* +1 for \0. */
3359 obstack_grow (&notes, name, name_length);
3360 preserved_copy_of_name = (char *) obstack_finish (&notes);
3361
3362 #ifdef tc_canonicalize_symbol_name
3363 preserved_copy_of_name =
3364 tc_canonicalize_symbol_name (preserved_copy_of_name);
3365 #endif
3366
3367 S_SET_NAME (symbolP, preserved_copy_of_name);
3368
3369 S_SET_SEGMENT (symbolP, segment);
3370 S_SET_VALUE (symbolP, valu);
3371 symbol_clear_list_pointers (symbolP);
3372
3373 symbol_set_frag (symbolP, frag);
3374
3375 /* Link to end of symbol chain. */
3376 {
3377 extern int symbol_table_frozen;
3378
3379 if (symbol_table_frozen)
3380 abort ();
3381 }
3382
3383 symbol_append (symbolP, symbol_lastP, & symbol_rootP, & symbol_lastP);
3384
3385 obj_symbol_new_hook (symbolP);
3386
3387 #ifdef tc_symbol_new_hook
3388 tc_symbol_new_hook (symbolP);
3389 #endif
3390
3391 #ifdef DEBUG_SYMS
3392 verify_symbol_chain (symbol_rootP, symbol_lastP);
3393 #endif /* DEBUG_SYMS */
3394 }
3395
3396 static void
3397 s_ltorg (int ignored ATTRIBUTE_UNUSED)
3398 {
3399 unsigned int entry;
3400 literal_pool * pool;
3401 char sym_name[20];
3402
3403 pool = find_literal_pool ();
3404 if (pool == NULL
3405 || pool->symbol == NULL
3406 || pool->next_free_entry == 0)
3407 return;
3408
3409 /* Align pool as you have word accesses.
3410 Only make a frag if we have to. */
3411 if (!need_pass_2)
3412 frag_align (pool->alignment, 0, 0);
3413
3414 record_alignment (now_seg, 2);
3415
3416 #ifdef OBJ_ELF
3417 seg_info (now_seg)->tc_segment_info_data.mapstate = MAP_DATA;
3418 make_mapping_symbol (MAP_DATA, (valueT) frag_now_fix (), frag_now);
3419 #endif
3420 sprintf (sym_name, "$$lit_\002%x", pool->id);
3421
3422 symbol_locate (pool->symbol, sym_name, now_seg,
3423 (valueT) frag_now_fix (), frag_now);
3424 symbol_table_insert (pool->symbol);
3425
3426 ARM_SET_THUMB (pool->symbol, thumb_mode);
3427
3428 #if defined OBJ_COFF || defined OBJ_ELF
3429 ARM_SET_INTERWORK (pool->symbol, support_interwork);
3430 #endif
3431
3432 for (entry = 0; entry < pool->next_free_entry; entry ++)
3433 {
3434 #ifdef OBJ_ELF
3435 if (debug_type == DEBUG_DWARF2)
3436 dwarf2_gen_line_info (frag_now_fix (), pool->locs + entry);
3437 #endif
3438 /* First output the expression in the instruction to the pool. */
3439 emit_expr (&(pool->literals[entry]),
3440 pool->literals[entry].X_md & LIT_ENTRY_SIZE_MASK);
3441 }
3442
3443 /* Mark the pool as empty. */
3444 pool->next_free_entry = 0;
3445 pool->symbol = NULL;
3446 }
3447
3448 #ifdef OBJ_ELF
3449 /* Forward declarations for functions below, in the MD interface
3450 section. */
3451 static void fix_new_arm (fragS *, int, short, expressionS *, int, int);
3452 static valueT create_unwind_entry (int);
3453 static void start_unwind_section (const segT, int);
3454 static void add_unwind_opcode (valueT, int);
3455 static void flush_pending_unwind (void);
3456
3457 /* Directives: Data. */
3458
3459 static void
3460 s_arm_elf_cons (int nbytes)
3461 {
3462 expressionS exp;
3463
3464 #ifdef md_flush_pending_output
3465 md_flush_pending_output ();
3466 #endif
3467
3468 if (is_it_end_of_statement ())
3469 {
3470 demand_empty_rest_of_line ();
3471 return;
3472 }
3473
3474 #ifdef md_cons_align
3475 md_cons_align (nbytes);
3476 #endif
3477
3478 mapping_state (MAP_DATA);
3479 do
3480 {
3481 int reloc;
3482 char *base = input_line_pointer;
3483
3484 expression (& exp);
3485
3486 if (exp.X_op != O_symbol)
3487 emit_expr (&exp, (unsigned int) nbytes);
3488 else
3489 {
3490 char *before_reloc = input_line_pointer;
3491 reloc = parse_reloc (&input_line_pointer);
3492 if (reloc == -1)
3493 {
3494 as_bad (_("unrecognized relocation suffix"));
3495 ignore_rest_of_line ();
3496 return;
3497 }
3498 else if (reloc == BFD_RELOC_UNUSED)
3499 emit_expr (&exp, (unsigned int) nbytes);
3500 else
3501 {
3502 reloc_howto_type *howto = (reloc_howto_type *)
3503 bfd_reloc_type_lookup (stdoutput,
3504 (bfd_reloc_code_real_type) reloc);
3505 int size = bfd_get_reloc_size (howto);
3506
3507 if (reloc == BFD_RELOC_ARM_PLT32)
3508 {
3509 as_bad (_("(plt) is only valid on branch targets"));
3510 reloc = BFD_RELOC_UNUSED;
3511 size = 0;
3512 }
3513
3514 if (size > nbytes)
3515 as_bad (_("%s relocations do not fit in %d bytes"),
3516 howto->name, nbytes);
3517 else
3518 {
3519 /* We've parsed an expression stopping at O_symbol.
3520 But there may be more expression left now that we
3521 have parsed the relocation marker. Parse it again.
3522 XXX Surely there is a cleaner way to do this. */
3523 char *p = input_line_pointer;
3524 int offset;
3525 char *save_buf = (char *) alloca (input_line_pointer - base);
3526 memcpy (save_buf, base, input_line_pointer - base);
3527 memmove (base + (input_line_pointer - before_reloc),
3528 base, before_reloc - base);
3529
3530 input_line_pointer = base + (input_line_pointer-before_reloc);
3531 expression (&exp);
3532 memcpy (base, save_buf, p - base);
3533
3534 offset = nbytes - size;
3535 p = frag_more (nbytes);
3536 memset (p, 0, nbytes);
3537 fix_new_exp (frag_now, p - frag_now->fr_literal + offset,
3538 size, &exp, 0, (enum bfd_reloc_code_real) reloc);
3539 }
3540 }
3541 }
3542 }
3543 while (*input_line_pointer++ == ',');
3544
3545 /* Put terminator back into stream. */
3546 input_line_pointer --;
3547 demand_empty_rest_of_line ();
3548 }
3549
3550 /* Emit an expression containing a 32-bit thumb instruction.
3551 Implementation based on put_thumb32_insn. */
3552
3553 static void
3554 emit_thumb32_expr (expressionS * exp)
3555 {
3556 expressionS exp_high = *exp;
3557
3558 exp_high.X_add_number = (unsigned long)exp_high.X_add_number >> 16;
3559 emit_expr (& exp_high, (unsigned int) THUMB_SIZE);
3560 exp->X_add_number &= 0xffff;
3561 emit_expr (exp, (unsigned int) THUMB_SIZE);
3562 }
3563
3564 /* Guess the instruction size based on the opcode. */
3565
3566 static int
3567 thumb_insn_size (int opcode)
3568 {
3569 if ((unsigned int) opcode < 0xe800u)
3570 return 2;
3571 else if ((unsigned int) opcode >= 0xe8000000u)
3572 return 4;
3573 else
3574 return 0;
3575 }
3576
3577 static bfd_boolean
3578 emit_insn (expressionS *exp, int nbytes)
3579 {
3580 int size = 0;
3581
3582 if (exp->X_op == O_constant)
3583 {
3584 size = nbytes;
3585
3586 if (size == 0)
3587 size = thumb_insn_size (exp->X_add_number);
3588
3589 if (size != 0)
3590 {
3591 if (size == 2 && (unsigned int)exp->X_add_number > 0xffffu)
3592 {
3593 as_bad (_(".inst.n operand too big. "\
3594 "Use .inst.w instead"));
3595 size = 0;
3596 }
3597 else
3598 {
3599 if (now_it.state == AUTOMATIC_IT_BLOCK)
3600 set_it_insn_type_nonvoid (OUTSIDE_IT_INSN, 0);
3601 else
3602 set_it_insn_type_nonvoid (NEUTRAL_IT_INSN, 0);
3603
3604 if (thumb_mode && (size > THUMB_SIZE) && !target_big_endian)
3605 emit_thumb32_expr (exp);
3606 else
3607 emit_expr (exp, (unsigned int) size);
3608
3609 it_fsm_post_encode ();
3610 }
3611 }
3612 else
3613 as_bad (_("cannot determine Thumb instruction size. " \
3614 "Use .inst.n/.inst.w instead"));
3615 }
3616 else
3617 as_bad (_("constant expression required"));
3618
3619 return (size != 0);
3620 }
3621
3622 /* Like s_arm_elf_cons but do not use md_cons_align and
3623 set the mapping state to MAP_ARM/MAP_THUMB. */
3624
3625 static void
3626 s_arm_elf_inst (int nbytes)
3627 {
3628 if (is_it_end_of_statement ())
3629 {
3630 demand_empty_rest_of_line ();
3631 return;
3632 }
3633
3634 /* Calling mapping_state () here will not change ARM/THUMB,
3635 but will ensure not to be in DATA state. */
3636
3637 if (thumb_mode)
3638 mapping_state (MAP_THUMB);
3639 else
3640 {
3641 if (nbytes != 0)
3642 {
3643 as_bad (_("width suffixes are invalid in ARM mode"));
3644 ignore_rest_of_line ();
3645 return;
3646 }
3647
3648 nbytes = 4;
3649
3650 mapping_state (MAP_ARM);
3651 }
3652
3653 do
3654 {
3655 expressionS exp;
3656
3657 expression (& exp);
3658
3659 if (! emit_insn (& exp, nbytes))
3660 {
3661 ignore_rest_of_line ();
3662 return;
3663 }
3664 }
3665 while (*input_line_pointer++ == ',');
3666
3667 /* Put terminator back into stream. */
3668 input_line_pointer --;
3669 demand_empty_rest_of_line ();
3670 }
3671
3672 /* Parse a .rel31 directive. */
3673
3674 static void
3675 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED)
3676 {
3677 expressionS exp;
3678 char *p;
3679 valueT highbit;
3680
3681 highbit = 0;
3682 if (*input_line_pointer == '1')
3683 highbit = 0x80000000;
3684 else if (*input_line_pointer != '0')
3685 as_bad (_("expected 0 or 1"));
3686
3687 input_line_pointer++;
3688 if (*input_line_pointer != ',')
3689 as_bad (_("missing comma"));
3690 input_line_pointer++;
3691
3692 #ifdef md_flush_pending_output
3693 md_flush_pending_output ();
3694 #endif
3695
3696 #ifdef md_cons_align
3697 md_cons_align (4);
3698 #endif
3699
3700 mapping_state (MAP_DATA);
3701
3702 expression (&exp);
3703
3704 p = frag_more (4);
3705 md_number_to_chars (p, highbit, 4);
3706 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 1,
3707 BFD_RELOC_ARM_PREL31);
3708
3709 demand_empty_rest_of_line ();
3710 }
3711
3712 /* Directives: AEABI stack-unwind tables. */
3713
3714 /* Parse an unwind_fnstart directive. Simply records the current location. */
3715
3716 static void
3717 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED)
3718 {
3719 demand_empty_rest_of_line ();
3720 if (unwind.proc_start)
3721 {
3722 as_bad (_("duplicate .fnstart directive"));
3723 return;
3724 }
3725
3726 /* Mark the start of the function. */
3727 unwind.proc_start = expr_build_dot ();
3728
3729 /* Reset the rest of the unwind info. */
3730 unwind.opcode_count = 0;
3731 unwind.table_entry = NULL;
3732 unwind.personality_routine = NULL;
3733 unwind.personality_index = -1;
3734 unwind.frame_size = 0;
3735 unwind.fp_offset = 0;
3736 unwind.fp_reg = REG_SP;
3737 unwind.fp_used = 0;
3738 unwind.sp_restored = 0;
3739 }
3740
3741
3742 /* Parse a handlerdata directive. Creates the exception handling table entry
3743 for the function. */
3744
3745 static void
3746 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED)
3747 {
3748 demand_empty_rest_of_line ();
3749 if (!unwind.proc_start)
3750 as_bad (MISSING_FNSTART);
3751
3752 if (unwind.table_entry)
3753 as_bad (_("duplicate .handlerdata directive"));
3754
3755 create_unwind_entry (1);
3756 }
3757
3758 /* Parse an unwind_fnend directive. Generates the index table entry. */
3759
3760 static void
3761 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED)
3762 {
3763 long where;
3764 char *ptr;
3765 valueT val;
3766 unsigned int marked_pr_dependency;
3767
3768 demand_empty_rest_of_line ();
3769
3770 if (!unwind.proc_start)
3771 {
3772 as_bad (_(".fnend directive without .fnstart"));
3773 return;
3774 }
3775
3776 /* Add eh table entry. */
3777 if (unwind.table_entry == NULL)
3778 val = create_unwind_entry (0);
3779 else
3780 val = 0;
3781
3782 /* Add index table entry. This is two words. */
3783 start_unwind_section (unwind.saved_seg, 1);
3784 frag_align (2, 0, 0);
3785 record_alignment (now_seg, 2);
3786
3787 ptr = frag_more (8);
3788 memset (ptr, 0, 8);
3789 where = frag_now_fix () - 8;
3790
3791 /* Self relative offset of the function start. */
3792 fix_new (frag_now, where, 4, unwind.proc_start, 0, 1,
3793 BFD_RELOC_ARM_PREL31);
3794
3795 /* Indicate dependency on EHABI-defined personality routines to the
3796 linker, if it hasn't been done already. */
3797 marked_pr_dependency
3798 = seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency;
3799 if (unwind.personality_index >= 0 && unwind.personality_index < 3
3800 && !(marked_pr_dependency & (1 << unwind.personality_index)))
3801 {
3802 static const char *const name[] =
3803 {
3804 "__aeabi_unwind_cpp_pr0",
3805 "__aeabi_unwind_cpp_pr1",
3806 "__aeabi_unwind_cpp_pr2"
3807 };
3808 symbolS *pr = symbol_find_or_make (name[unwind.personality_index]);
3809 fix_new (frag_now, where, 0, pr, 0, 1, BFD_RELOC_NONE);
3810 seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency
3811 |= 1 << unwind.personality_index;
3812 }
3813
3814 if (val)
3815 /* Inline exception table entry. */
3816 md_number_to_chars (ptr + 4, val, 4);
3817 else
3818 /* Self relative offset of the table entry. */
3819 fix_new (frag_now, where + 4, 4, unwind.table_entry, 0, 1,
3820 BFD_RELOC_ARM_PREL31);
3821
3822 /* Restore the original section. */
3823 subseg_set (unwind.saved_seg, unwind.saved_subseg);
3824
3825 unwind.proc_start = NULL;
3826 }
3827
3828
3829 /* Parse an unwind_cantunwind directive. */
3830
3831 static void
3832 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED)
3833 {
3834 demand_empty_rest_of_line ();
3835 if (!unwind.proc_start)
3836 as_bad (MISSING_FNSTART);
3837
3838 if (unwind.personality_routine || unwind.personality_index != -1)
3839 as_bad (_("personality routine specified for cantunwind frame"));
3840
3841 unwind.personality_index = -2;
3842 }
3843
3844
3845 /* Parse a personalityindex directive. */
3846
3847 static void
3848 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED)
3849 {
3850 expressionS exp;
3851
3852 if (!unwind.proc_start)
3853 as_bad (MISSING_FNSTART);
3854
3855 if (unwind.personality_routine || unwind.personality_index != -1)
3856 as_bad (_("duplicate .personalityindex directive"));
3857
3858 expression (&exp);
3859
3860 if (exp.X_op != O_constant
3861 || exp.X_add_number < 0 || exp.X_add_number > 15)
3862 {
3863 as_bad (_("bad personality routine number"));
3864 ignore_rest_of_line ();
3865 return;
3866 }
3867
3868 unwind.personality_index = exp.X_add_number;
3869
3870 demand_empty_rest_of_line ();
3871 }
3872
3873
3874 /* Parse a personality directive. */
3875
3876 static void
3877 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED)
3878 {
3879 char *name, *p, c;
3880
3881 if (!unwind.proc_start)
3882 as_bad (MISSING_FNSTART);
3883
3884 if (unwind.personality_routine || unwind.personality_index != -1)
3885 as_bad (_("duplicate .personality directive"));
3886
3887 c = get_symbol_name (& name);
3888 p = input_line_pointer;
3889 if (c == '"')
3890 ++ input_line_pointer;
3891 unwind.personality_routine = symbol_find_or_make (name);
3892 *p = c;
3893 demand_empty_rest_of_line ();
3894 }
3895
3896
3897 /* Parse a directive saving core registers. */
3898
3899 static void
3900 s_arm_unwind_save_core (void)
3901 {
3902 valueT op;
3903 long range;
3904 int n;
3905
3906 range = parse_reg_list (&input_line_pointer);
3907 if (range == FAIL)
3908 {
3909 as_bad (_("expected register list"));
3910 ignore_rest_of_line ();
3911 return;
3912 }
3913
3914 demand_empty_rest_of_line ();
3915
3916 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
3917 into .unwind_save {..., sp...}. We aren't bothered about the value of
3918 ip because it is clobbered by calls. */
3919 if (unwind.sp_restored && unwind.fp_reg == 12
3920 && (range & 0x3000) == 0x1000)
3921 {
3922 unwind.opcode_count--;
3923 unwind.sp_restored = 0;
3924 range = (range | 0x2000) & ~0x1000;
3925 unwind.pending_offset = 0;
3926 }
3927
3928 /* Pop r4-r15. */
3929 if (range & 0xfff0)
3930 {
3931 /* See if we can use the short opcodes. These pop a block of up to 8
3932 registers starting with r4, plus maybe r14. */
3933 for (n = 0; n < 8; n++)
3934 {
3935 /* Break at the first non-saved register. */
3936 if ((range & (1 << (n + 4))) == 0)
3937 break;
3938 }
3939 /* See if there are any other bits set. */
3940 if (n == 0 || (range & (0xfff0 << n) & 0xbff0) != 0)
3941 {
3942 /* Use the long form. */
3943 op = 0x8000 | ((range >> 4) & 0xfff);
3944 add_unwind_opcode (op, 2);
3945 }
3946 else
3947 {
3948 /* Use the short form. */
3949 if (range & 0x4000)
3950 op = 0xa8; /* Pop r14. */
3951 else
3952 op = 0xa0; /* Do not pop r14. */
3953 op |= (n - 1);
3954 add_unwind_opcode (op, 1);
3955 }
3956 }
3957
3958 /* Pop r0-r3. */
3959 if (range & 0xf)
3960 {
3961 op = 0xb100 | (range & 0xf);
3962 add_unwind_opcode (op, 2);
3963 }
3964
3965 /* Record the number of bytes pushed. */
3966 for (n = 0; n < 16; n++)
3967 {
3968 if (range & (1 << n))
3969 unwind.frame_size += 4;
3970 }
3971 }
3972
3973
3974 /* Parse a directive saving FPA registers. */
3975
3976 static void
3977 s_arm_unwind_save_fpa (int reg)
3978 {
3979 expressionS exp;
3980 int num_regs;
3981 valueT op;
3982
3983 /* Get Number of registers to transfer. */
3984 if (skip_past_comma (&input_line_pointer) != FAIL)
3985 expression (&exp);
3986 else
3987 exp.X_op = O_illegal;
3988
3989 if (exp.X_op != O_constant)
3990 {
3991 as_bad (_("expected , <constant>"));
3992 ignore_rest_of_line ();
3993 return;
3994 }
3995
3996 num_regs = exp.X_add_number;
3997
3998 if (num_regs < 1 || num_regs > 4)
3999 {
4000 as_bad (_("number of registers must be in the range [1:4]"));
4001 ignore_rest_of_line ();
4002 return;
4003 }
4004
4005 demand_empty_rest_of_line ();
4006
4007 if (reg == 4)
4008 {
4009 /* Short form. */
4010 op = 0xb4 | (num_regs - 1);
4011 add_unwind_opcode (op, 1);
4012 }
4013 else
4014 {
4015 /* Long form. */
4016 op = 0xc800 | (reg << 4) | (num_regs - 1);
4017 add_unwind_opcode (op, 2);
4018 }
4019 unwind.frame_size += num_regs * 12;
4020 }
4021
4022
4023 /* Parse a directive saving VFP registers for ARMv6 and above. */
4024
4025 static void
4026 s_arm_unwind_save_vfp_armv6 (void)
4027 {
4028 int count;
4029 unsigned int start;
4030 valueT op;
4031 int num_vfpv3_regs = 0;
4032 int num_regs_below_16;
4033
4034 count = parse_vfp_reg_list (&input_line_pointer, &start, REGLIST_VFP_D);
4035 if (count == FAIL)
4036 {
4037 as_bad (_("expected register list"));
4038 ignore_rest_of_line ();
4039 return;
4040 }
4041
4042 demand_empty_rest_of_line ();
4043
4044 /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather
4045 than FSTMX/FLDMX-style ones). */
4046
4047 /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31. */
4048 if (start >= 16)
4049 num_vfpv3_regs = count;
4050 else if (start + count > 16)
4051 num_vfpv3_regs = start + count - 16;
4052
4053 if (num_vfpv3_regs > 0)
4054 {
4055 int start_offset = start > 16 ? start - 16 : 0;
4056 op = 0xc800 | (start_offset << 4) | (num_vfpv3_regs - 1);
4057 add_unwind_opcode (op, 2);
4058 }
4059
4060 /* Generate opcode for registers numbered in the range 0 .. 15. */
4061 num_regs_below_16 = num_vfpv3_regs > 0 ? 16 - (int) start : count;
4062 gas_assert (num_regs_below_16 + num_vfpv3_regs == count);
4063 if (num_regs_below_16 > 0)
4064 {
4065 op = 0xc900 | (start << 4) | (num_regs_below_16 - 1);
4066 add_unwind_opcode (op, 2);
4067 }
4068
4069 unwind.frame_size += count * 8;
4070 }
4071
4072
4073 /* Parse a directive saving VFP registers for pre-ARMv6. */
4074
4075 static void
4076 s_arm_unwind_save_vfp (void)
4077 {
4078 int count;
4079 unsigned int reg;
4080 valueT op;
4081
4082 count = parse_vfp_reg_list (&input_line_pointer, &reg, REGLIST_VFP_D);
4083 if (count == FAIL)
4084 {
4085 as_bad (_("expected register list"));
4086 ignore_rest_of_line ();
4087 return;
4088 }
4089
4090 demand_empty_rest_of_line ();
4091
4092 if (reg == 8)
4093 {
4094 /* Short form. */
4095 op = 0xb8 | (count - 1);
4096 add_unwind_opcode (op, 1);
4097 }
4098 else
4099 {
4100 /* Long form. */
4101 op = 0xb300 | (reg << 4) | (count - 1);
4102 add_unwind_opcode (op, 2);
4103 }
4104 unwind.frame_size += count * 8 + 4;
4105 }
4106
4107
4108 /* Parse a directive saving iWMMXt data registers. */
4109
4110 static void
4111 s_arm_unwind_save_mmxwr (void)
4112 {
4113 int reg;
4114 int hi_reg;
4115 int i;
4116 unsigned mask = 0;
4117 valueT op;
4118
4119 if (*input_line_pointer == '{')
4120 input_line_pointer++;
4121
4122 do
4123 {
4124 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
4125
4126 if (reg == FAIL)
4127 {
4128 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
4129 goto error;
4130 }
4131
4132 if (mask >> reg)
4133 as_tsktsk (_("register list not in ascending order"));
4134 mask |= 1 << reg;
4135
4136 if (*input_line_pointer == '-')
4137 {
4138 input_line_pointer++;
4139 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
4140 if (hi_reg == FAIL)
4141 {
4142 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
4143 goto error;
4144 }
4145 else if (reg >= hi_reg)
4146 {
4147 as_bad (_("bad register range"));
4148 goto error;
4149 }
4150 for (; reg < hi_reg; reg++)
4151 mask |= 1 << reg;
4152 }
4153 }
4154 while (skip_past_comma (&input_line_pointer) != FAIL);
4155
4156 skip_past_char (&input_line_pointer, '}');
4157
4158 demand_empty_rest_of_line ();
4159
4160 /* Generate any deferred opcodes because we're going to be looking at
4161 the list. */
4162 flush_pending_unwind ();
4163
4164 for (i = 0; i < 16; i++)
4165 {
4166 if (mask & (1 << i))
4167 unwind.frame_size += 8;
4168 }
4169
4170 /* Attempt to combine with a previous opcode. We do this because gcc
4171 likes to output separate unwind directives for a single block of
4172 registers. */
4173 if (unwind.opcode_count > 0)
4174 {
4175 i = unwind.opcodes[unwind.opcode_count - 1];
4176 if ((i & 0xf8) == 0xc0)
4177 {
4178 i &= 7;
4179 /* Only merge if the blocks are contiguous. */
4180 if (i < 6)
4181 {
4182 if ((mask & 0xfe00) == (1 << 9))
4183 {
4184 mask |= ((1 << (i + 11)) - 1) & 0xfc00;
4185 unwind.opcode_count--;
4186 }
4187 }
4188 else if (i == 6 && unwind.opcode_count >= 2)
4189 {
4190 i = unwind.opcodes[unwind.opcode_count - 2];
4191 reg = i >> 4;
4192 i &= 0xf;
4193
4194 op = 0xffff << (reg - 1);
4195 if (reg > 0
4196 && ((mask & op) == (1u << (reg - 1))))
4197 {
4198 op = (1 << (reg + i + 1)) - 1;
4199 op &= ~((1 << reg) - 1);
4200 mask |= op;
4201 unwind.opcode_count -= 2;
4202 }
4203 }
4204 }
4205 }
4206
4207 hi_reg = 15;
4208 /* We want to generate opcodes in the order the registers have been
4209 saved, ie. descending order. */
4210 for (reg = 15; reg >= -1; reg--)
4211 {
4212 /* Save registers in blocks. */
4213 if (reg < 0
4214 || !(mask & (1 << reg)))
4215 {
4216 /* We found an unsaved reg. Generate opcodes to save the
4217 preceding block. */
4218 if (reg != hi_reg)
4219 {
4220 if (reg == 9)
4221 {
4222 /* Short form. */
4223 op = 0xc0 | (hi_reg - 10);
4224 add_unwind_opcode (op, 1);
4225 }
4226 else
4227 {
4228 /* Long form. */
4229 op = 0xc600 | ((reg + 1) << 4) | ((hi_reg - reg) - 1);
4230 add_unwind_opcode (op, 2);
4231 }
4232 }
4233 hi_reg = reg - 1;
4234 }
4235 }
4236
4237 return;
4238 error:
4239 ignore_rest_of_line ();
4240 }
4241
4242 static void
4243 s_arm_unwind_save_mmxwcg (void)
4244 {
4245 int reg;
4246 int hi_reg;
4247 unsigned mask = 0;
4248 valueT op;
4249
4250 if (*input_line_pointer == '{')
4251 input_line_pointer++;
4252
4253 skip_whitespace (input_line_pointer);
4254
4255 do
4256 {
4257 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
4258
4259 if (reg == FAIL)
4260 {
4261 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
4262 goto error;
4263 }
4264
4265 reg -= 8;
4266 if (mask >> reg)
4267 as_tsktsk (_("register list not in ascending order"));
4268 mask |= 1 << reg;
4269
4270 if (*input_line_pointer == '-')
4271 {
4272 input_line_pointer++;
4273 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
4274 if (hi_reg == FAIL)
4275 {
4276 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
4277 goto error;
4278 }
4279 else if (reg >= hi_reg)
4280 {
4281 as_bad (_("bad register range"));
4282 goto error;
4283 }
4284 for (; reg < hi_reg; reg++)
4285 mask |= 1 << reg;
4286 }
4287 }
4288 while (skip_past_comma (&input_line_pointer) != FAIL);
4289
4290 skip_past_char (&input_line_pointer, '}');
4291
4292 demand_empty_rest_of_line ();
4293
4294 /* Generate any deferred opcodes because we're going to be looking at
4295 the list. */
4296 flush_pending_unwind ();
4297
4298 for (reg = 0; reg < 16; reg++)
4299 {
4300 if (mask & (1 << reg))
4301 unwind.frame_size += 4;
4302 }
4303 op = 0xc700 | mask;
4304 add_unwind_opcode (op, 2);
4305 return;
4306 error:
4307 ignore_rest_of_line ();
4308 }
4309
4310
4311 /* Parse an unwind_save directive.
4312 If the argument is non-zero, this is a .vsave directive. */
4313
4314 static void
4315 s_arm_unwind_save (int arch_v6)
4316 {
4317 char *peek;
4318 struct reg_entry *reg;
4319 bfd_boolean had_brace = FALSE;
4320
4321 if (!unwind.proc_start)
4322 as_bad (MISSING_FNSTART);
4323
4324 /* Figure out what sort of save we have. */
4325 peek = input_line_pointer;
4326
4327 if (*peek == '{')
4328 {
4329 had_brace = TRUE;
4330 peek++;
4331 }
4332
4333 reg = arm_reg_parse_multi (&peek);
4334
4335 if (!reg)
4336 {
4337 as_bad (_("register expected"));
4338 ignore_rest_of_line ();
4339 return;
4340 }
4341
4342 switch (reg->type)
4343 {
4344 case REG_TYPE_FN:
4345 if (had_brace)
4346 {
4347 as_bad (_("FPA .unwind_save does not take a register list"));
4348 ignore_rest_of_line ();
4349 return;
4350 }
4351 input_line_pointer = peek;
4352 s_arm_unwind_save_fpa (reg->number);
4353 return;
4354
4355 case REG_TYPE_RN:
4356 s_arm_unwind_save_core ();
4357 return;
4358
4359 case REG_TYPE_VFD:
4360 if (arch_v6)
4361 s_arm_unwind_save_vfp_armv6 ();
4362 else
4363 s_arm_unwind_save_vfp ();
4364 return;
4365
4366 case REG_TYPE_MMXWR:
4367 s_arm_unwind_save_mmxwr ();
4368 return;
4369
4370 case REG_TYPE_MMXWCG:
4371 s_arm_unwind_save_mmxwcg ();
4372 return;
4373
4374 default:
4375 as_bad (_(".unwind_save does not support this kind of register"));
4376 ignore_rest_of_line ();
4377 }
4378 }
4379
4380
4381 /* Parse an unwind_movsp directive. */
4382
4383 static void
4384 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED)
4385 {
4386 int reg;
4387 valueT op;
4388 int offset;
4389
4390 if (!unwind.proc_start)
4391 as_bad (MISSING_FNSTART);
4392
4393 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4394 if (reg == FAIL)
4395 {
4396 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_RN]));
4397 ignore_rest_of_line ();
4398 return;
4399 }
4400
4401 /* Optional constant. */
4402 if (skip_past_comma (&input_line_pointer) != FAIL)
4403 {
4404 if (immediate_for_directive (&offset) == FAIL)
4405 return;
4406 }
4407 else
4408 offset = 0;
4409
4410 demand_empty_rest_of_line ();
4411
4412 if (reg == REG_SP || reg == REG_PC)
4413 {
4414 as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
4415 return;
4416 }
4417
4418 if (unwind.fp_reg != REG_SP)
4419 as_bad (_("unexpected .unwind_movsp directive"));
4420
4421 /* Generate opcode to restore the value. */
4422 op = 0x90 | reg;
4423 add_unwind_opcode (op, 1);
4424
4425 /* Record the information for later. */
4426 unwind.fp_reg = reg;
4427 unwind.fp_offset = unwind.frame_size - offset;
4428 unwind.sp_restored = 1;
4429 }
4430
4431 /* Parse an unwind_pad directive. */
4432
4433 static void
4434 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED)
4435 {
4436 int offset;
4437
4438 if (!unwind.proc_start)
4439 as_bad (MISSING_FNSTART);
4440
4441 if (immediate_for_directive (&offset) == FAIL)
4442 return;
4443
4444 if (offset & 3)
4445 {
4446 as_bad (_("stack increment must be multiple of 4"));
4447 ignore_rest_of_line ();
4448 return;
4449 }
4450
4451 /* Don't generate any opcodes, just record the details for later. */
4452 unwind.frame_size += offset;
4453 unwind.pending_offset += offset;
4454
4455 demand_empty_rest_of_line ();
4456 }
4457
4458 /* Parse an unwind_setfp directive. */
4459
4460 static void
4461 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED)
4462 {
4463 int sp_reg;
4464 int fp_reg;
4465 int offset;
4466
4467 if (!unwind.proc_start)
4468 as_bad (MISSING_FNSTART);
4469
4470 fp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4471 if (skip_past_comma (&input_line_pointer) == FAIL)
4472 sp_reg = FAIL;
4473 else
4474 sp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4475
4476 if (fp_reg == FAIL || sp_reg == FAIL)
4477 {
4478 as_bad (_("expected <reg>, <reg>"));
4479 ignore_rest_of_line ();
4480 return;
4481 }
4482
4483 /* Optional constant. */
4484 if (skip_past_comma (&input_line_pointer) != FAIL)
4485 {
4486 if (immediate_for_directive (&offset) == FAIL)
4487 return;
4488 }
4489 else
4490 offset = 0;
4491
4492 demand_empty_rest_of_line ();
4493
4494 if (sp_reg != REG_SP && sp_reg != unwind.fp_reg)
4495 {
4496 as_bad (_("register must be either sp or set by a previous"
4497 "unwind_movsp directive"));
4498 return;
4499 }
4500
4501 /* Don't generate any opcodes, just record the information for later. */
4502 unwind.fp_reg = fp_reg;
4503 unwind.fp_used = 1;
4504 if (sp_reg == REG_SP)
4505 unwind.fp_offset = unwind.frame_size - offset;
4506 else
4507 unwind.fp_offset -= offset;
4508 }
4509
4510 /* Parse an unwind_raw directive. */
4511
4512 static void
4513 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED)
4514 {
4515 expressionS exp;
4516 /* This is an arbitrary limit. */
4517 unsigned char op[16];
4518 int count;
4519
4520 if (!unwind.proc_start)
4521 as_bad (MISSING_FNSTART);
4522
4523 expression (&exp);
4524 if (exp.X_op == O_constant
4525 && skip_past_comma (&input_line_pointer) != FAIL)
4526 {
4527 unwind.frame_size += exp.X_add_number;
4528 expression (&exp);
4529 }
4530 else
4531 exp.X_op = O_illegal;
4532
4533 if (exp.X_op != O_constant)
4534 {
4535 as_bad (_("expected <offset>, <opcode>"));
4536 ignore_rest_of_line ();
4537 return;
4538 }
4539
4540 count = 0;
4541
4542 /* Parse the opcode. */
4543 for (;;)
4544 {
4545 if (count >= 16)
4546 {
4547 as_bad (_("unwind opcode too long"));
4548 ignore_rest_of_line ();
4549 }
4550 if (exp.X_op != O_constant || exp.X_add_number & ~0xff)
4551 {
4552 as_bad (_("invalid unwind opcode"));
4553 ignore_rest_of_line ();
4554 return;
4555 }
4556 op[count++] = exp.X_add_number;
4557
4558 /* Parse the next byte. */
4559 if (skip_past_comma (&input_line_pointer) == FAIL)
4560 break;
4561
4562 expression (&exp);
4563 }
4564
4565 /* Add the opcode bytes in reverse order. */
4566 while (count--)
4567 add_unwind_opcode (op[count], 1);
4568
4569 demand_empty_rest_of_line ();
4570 }
4571
4572
4573 /* Parse a .eabi_attribute directive. */
4574
4575 static void
4576 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED)
4577 {
4578 int tag = obj_elf_vendor_attribute (OBJ_ATTR_PROC);
4579
4580 if (tag < NUM_KNOWN_OBJ_ATTRIBUTES)
4581 attributes_set_explicitly[tag] = 1;
4582 }
4583
4584 /* Emit a tls fix for the symbol. */
4585
4586 static void
4587 s_arm_tls_descseq (int ignored ATTRIBUTE_UNUSED)
4588 {
4589 char *p;
4590 expressionS exp;
4591 #ifdef md_flush_pending_output
4592 md_flush_pending_output ();
4593 #endif
4594
4595 #ifdef md_cons_align
4596 md_cons_align (4);
4597 #endif
4598
4599 /* Since we're just labelling the code, there's no need to define a
4600 mapping symbol. */
4601 expression (&exp);
4602 p = obstack_next_free (&frchain_now->frch_obstack);
4603 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 0,
4604 thumb_mode ? BFD_RELOC_ARM_THM_TLS_DESCSEQ
4605 : BFD_RELOC_ARM_TLS_DESCSEQ);
4606 }
4607 #endif /* OBJ_ELF */
4608
4609 static void s_arm_arch (int);
4610 static void s_arm_object_arch (int);
4611 static void s_arm_cpu (int);
4612 static void s_arm_fpu (int);
4613 static void s_arm_arch_extension (int);
4614
4615 #ifdef TE_PE
4616
4617 static void
4618 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
4619 {
4620 expressionS exp;
4621
4622 do
4623 {
4624 expression (&exp);
4625 if (exp.X_op == O_symbol)
4626 exp.X_op = O_secrel;
4627
4628 emit_expr (&exp, 4);
4629 }
4630 while (*input_line_pointer++ == ',');
4631
4632 input_line_pointer--;
4633 demand_empty_rest_of_line ();
4634 }
4635 #endif /* TE_PE */
4636
4637 /* This table describes all the machine specific pseudo-ops the assembler
4638 has to support. The fields are:
4639 pseudo-op name without dot
4640 function to call to execute this pseudo-op
4641 Integer arg to pass to the function. */
4642
4643 const pseudo_typeS md_pseudo_table[] =
4644 {
4645 /* Never called because '.req' does not start a line. */
4646 { "req", s_req, 0 },
4647 /* Following two are likewise never called. */
4648 { "dn", s_dn, 0 },
4649 { "qn", s_qn, 0 },
4650 { "unreq", s_unreq, 0 },
4651 { "bss", s_bss, 0 },
4652 { "align", s_align_ptwo, 2 },
4653 { "arm", s_arm, 0 },
4654 { "thumb", s_thumb, 0 },
4655 { "code", s_code, 0 },
4656 { "force_thumb", s_force_thumb, 0 },
4657 { "thumb_func", s_thumb_func, 0 },
4658 { "thumb_set", s_thumb_set, 0 },
4659 { "even", s_even, 0 },
4660 { "ltorg", s_ltorg, 0 },
4661 { "pool", s_ltorg, 0 },
4662 { "syntax", s_syntax, 0 },
4663 { "cpu", s_arm_cpu, 0 },
4664 { "arch", s_arm_arch, 0 },
4665 { "object_arch", s_arm_object_arch, 0 },
4666 { "fpu", s_arm_fpu, 0 },
4667 { "arch_extension", s_arm_arch_extension, 0 },
4668 #ifdef OBJ_ELF
4669 { "word", s_arm_elf_cons, 4 },
4670 { "long", s_arm_elf_cons, 4 },
4671 { "inst.n", s_arm_elf_inst, 2 },
4672 { "inst.w", s_arm_elf_inst, 4 },
4673 { "inst", s_arm_elf_inst, 0 },
4674 { "rel31", s_arm_rel31, 0 },
4675 { "fnstart", s_arm_unwind_fnstart, 0 },
4676 { "fnend", s_arm_unwind_fnend, 0 },
4677 { "cantunwind", s_arm_unwind_cantunwind, 0 },
4678 { "personality", s_arm_unwind_personality, 0 },
4679 { "personalityindex", s_arm_unwind_personalityindex, 0 },
4680 { "handlerdata", s_arm_unwind_handlerdata, 0 },
4681 { "save", s_arm_unwind_save, 0 },
4682 { "vsave", s_arm_unwind_save, 1 },
4683 { "movsp", s_arm_unwind_movsp, 0 },
4684 { "pad", s_arm_unwind_pad, 0 },
4685 { "setfp", s_arm_unwind_setfp, 0 },
4686 { "unwind_raw", s_arm_unwind_raw, 0 },
4687 { "eabi_attribute", s_arm_eabi_attribute, 0 },
4688 { "tlsdescseq", s_arm_tls_descseq, 0 },
4689 #else
4690 { "word", cons, 4},
4691
4692 /* These are used for dwarf. */
4693 {"2byte", cons, 2},
4694 {"4byte", cons, 4},
4695 {"8byte", cons, 8},
4696 /* These are used for dwarf2. */
4697 { "file", (void (*) (int)) dwarf2_directive_file, 0 },
4698 { "loc", dwarf2_directive_loc, 0 },
4699 { "loc_mark_labels", dwarf2_directive_loc_mark_labels, 0 },
4700 #endif
4701 { "extend", float_cons, 'x' },
4702 { "ldouble", float_cons, 'x' },
4703 { "packed", float_cons, 'p' },
4704 #ifdef TE_PE
4705 {"secrel32", pe_directive_secrel, 0},
4706 #endif
4707
4708 /* These are for compatibility with CodeComposer Studio. */
4709 {"ref", s_ccs_ref, 0},
4710 {"def", s_ccs_def, 0},
4711 {"asmfunc", s_ccs_asmfunc, 0},
4712 {"endasmfunc", s_ccs_endasmfunc, 0},
4713
4714 { 0, 0, 0 }
4715 };
4716 \f
4717 /* Parser functions used exclusively in instruction operands. */
4718
4719 /* Generic immediate-value read function for use in insn parsing.
4720 STR points to the beginning of the immediate (the leading #);
4721 VAL receives the value; if the value is outside [MIN, MAX]
4722 issue an error. PREFIX_OPT is true if the immediate prefix is
4723 optional. */
4724
4725 static int
4726 parse_immediate (char **str, int *val, int min, int max,
4727 bfd_boolean prefix_opt)
4728 {
4729 expressionS exp;
4730 my_get_expression (&exp, str, prefix_opt ? GE_OPT_PREFIX : GE_IMM_PREFIX);
4731 if (exp.X_op != O_constant)
4732 {
4733 inst.error = _("constant expression required");
4734 return FAIL;
4735 }
4736
4737 if (exp.X_add_number < min || exp.X_add_number > max)
4738 {
4739 inst.error = _("immediate value out of range");
4740 return FAIL;
4741 }
4742
4743 *val = exp.X_add_number;
4744 return SUCCESS;
4745 }
4746
4747 /* Less-generic immediate-value read function with the possibility of loading a
4748 big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate
4749 instructions. Puts the result directly in inst.operands[i]. */
4750
4751 static int
4752 parse_big_immediate (char **str, int i, expressionS *in_exp,
4753 bfd_boolean allow_symbol_p)
4754 {
4755 expressionS exp;
4756 expressionS *exp_p = in_exp ? in_exp : &exp;
4757 char *ptr = *str;
4758
4759 my_get_expression (exp_p, &ptr, GE_OPT_PREFIX_BIG);
4760
4761 if (exp_p->X_op == O_constant)
4762 {
4763 inst.operands[i].imm = exp_p->X_add_number & 0xffffffff;
4764 /* If we're on a 64-bit host, then a 64-bit number can be returned using
4765 O_constant. We have to be careful not to break compilation for
4766 32-bit X_add_number, though. */
4767 if ((exp_p->X_add_number & ~(offsetT)(0xffffffffU)) != 0)
4768 {
4769 /* X >> 32 is illegal if sizeof (exp_p->X_add_number) == 4. */
4770 inst.operands[i].reg = (((exp_p->X_add_number >> 16) >> 16)
4771 & 0xffffffff);
4772 inst.operands[i].regisimm = 1;
4773 }
4774 }
4775 else if (exp_p->X_op == O_big
4776 && LITTLENUM_NUMBER_OF_BITS * exp_p->X_add_number > 32)
4777 {
4778 unsigned parts = 32 / LITTLENUM_NUMBER_OF_BITS, j, idx = 0;
4779
4780 /* Bignums have their least significant bits in
4781 generic_bignum[0]. Make sure we put 32 bits in imm and
4782 32 bits in reg, in a (hopefully) portable way. */
4783 gas_assert (parts != 0);
4784
4785 /* Make sure that the number is not too big.
4786 PR 11972: Bignums can now be sign-extended to the
4787 size of a .octa so check that the out of range bits
4788 are all zero or all one. */
4789 if (LITTLENUM_NUMBER_OF_BITS * exp_p->X_add_number > 64)
4790 {
4791 LITTLENUM_TYPE m = -1;
4792
4793 if (generic_bignum[parts * 2] != 0
4794 && generic_bignum[parts * 2] != m)
4795 return FAIL;
4796
4797 for (j = parts * 2 + 1; j < (unsigned) exp_p->X_add_number; j++)
4798 if (generic_bignum[j] != generic_bignum[j-1])
4799 return FAIL;
4800 }
4801
4802 inst.operands[i].imm = 0;
4803 for (j = 0; j < parts; j++, idx++)
4804 inst.operands[i].imm |= generic_bignum[idx]
4805 << (LITTLENUM_NUMBER_OF_BITS * j);
4806 inst.operands[i].reg = 0;
4807 for (j = 0; j < parts; j++, idx++)
4808 inst.operands[i].reg |= generic_bignum[idx]
4809 << (LITTLENUM_NUMBER_OF_BITS * j);
4810 inst.operands[i].regisimm = 1;
4811 }
4812 else if (!(exp_p->X_op == O_symbol && allow_symbol_p))
4813 return FAIL;
4814
4815 *str = ptr;
4816
4817 return SUCCESS;
4818 }
4819
4820 /* Returns the pseudo-register number of an FPA immediate constant,
4821 or FAIL if there isn't a valid constant here. */
4822
4823 static int
4824 parse_fpa_immediate (char ** str)
4825 {
4826 LITTLENUM_TYPE words[MAX_LITTLENUMS];
4827 char * save_in;
4828 expressionS exp;
4829 int i;
4830 int j;
4831
4832 /* First try and match exact strings, this is to guarantee
4833 that some formats will work even for cross assembly. */
4834
4835 for (i = 0; fp_const[i]; i++)
4836 {
4837 if (strncmp (*str, fp_const[i], strlen (fp_const[i])) == 0)
4838 {
4839 char *start = *str;
4840
4841 *str += strlen (fp_const[i]);
4842 if (is_end_of_line[(unsigned char) **str])
4843 return i + 8;
4844 *str = start;
4845 }
4846 }
4847
4848 /* Just because we didn't get a match doesn't mean that the constant
4849 isn't valid, just that it is in a format that we don't
4850 automatically recognize. Try parsing it with the standard
4851 expression routines. */
4852
4853 memset (words, 0, MAX_LITTLENUMS * sizeof (LITTLENUM_TYPE));
4854
4855 /* Look for a raw floating point number. */
4856 if ((save_in = atof_ieee (*str, 'x', words)) != NULL
4857 && is_end_of_line[(unsigned char) *save_in])
4858 {
4859 for (i = 0; i < NUM_FLOAT_VALS; i++)
4860 {
4861 for (j = 0; j < MAX_LITTLENUMS; j++)
4862 {
4863 if (words[j] != fp_values[i][j])
4864 break;
4865 }
4866
4867 if (j == MAX_LITTLENUMS)
4868 {
4869 *str = save_in;
4870 return i + 8;
4871 }
4872 }
4873 }
4874
4875 /* Try and parse a more complex expression, this will probably fail
4876 unless the code uses a floating point prefix (eg "0f"). */
4877 save_in = input_line_pointer;
4878 input_line_pointer = *str;
4879 if (expression (&exp) == absolute_section
4880 && exp.X_op == O_big
4881 && exp.X_add_number < 0)
4882 {
4883 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
4884 Ditto for 15. */
4885 #define X_PRECISION 5
4886 #define E_PRECISION 15L
4887 if (gen_to_words (words, X_PRECISION, E_PRECISION) == 0)
4888 {
4889 for (i = 0; i < NUM_FLOAT_VALS; i++)
4890 {
4891 for (j = 0; j < MAX_LITTLENUMS; j++)
4892 {
4893 if (words[j] != fp_values[i][j])
4894 break;
4895 }
4896
4897 if (j == MAX_LITTLENUMS)
4898 {
4899 *str = input_line_pointer;
4900 input_line_pointer = save_in;
4901 return i + 8;
4902 }
4903 }
4904 }
4905 }
4906
4907 *str = input_line_pointer;
4908 input_line_pointer = save_in;
4909 inst.error = _("invalid FPA immediate expression");
4910 return FAIL;
4911 }
4912
4913 /* Returns 1 if a number has "quarter-precision" float format
4914 0baBbbbbbc defgh000 00000000 00000000. */
4915
4916 static int
4917 is_quarter_float (unsigned imm)
4918 {
4919 int bs = (imm & 0x20000000) ? 0x3e000000 : 0x40000000;
4920 return (imm & 0x7ffff) == 0 && ((imm & 0x7e000000) ^ bs) == 0;
4921 }
4922
4923
4924 /* Detect the presence of a floating point or integer zero constant,
4925 i.e. #0.0 or #0. */
4926
4927 static bfd_boolean
4928 parse_ifimm_zero (char **in)
4929 {
4930 int error_code;
4931
4932 if (!is_immediate_prefix (**in))
4933 return FALSE;
4934
4935 ++*in;
4936
4937 /* Accept #0x0 as a synonym for #0. */
4938 if (strncmp (*in, "0x", 2) == 0)
4939 {
4940 int val;
4941 if (parse_immediate (in, &val, 0, 0, TRUE) == FAIL)
4942 return FALSE;
4943 return TRUE;
4944 }
4945
4946 error_code = atof_generic (in, ".", EXP_CHARS,
4947 &generic_floating_point_number);
4948
4949 if (!error_code
4950 && generic_floating_point_number.sign == '+'
4951 && (generic_floating_point_number.low
4952 > generic_floating_point_number.leader))
4953 return TRUE;
4954
4955 return FALSE;
4956 }
4957
4958 /* Parse an 8-bit "quarter-precision" floating point number of the form:
4959 0baBbbbbbc defgh000 00000000 00000000.
4960 The zero and minus-zero cases need special handling, since they can't be
4961 encoded in the "quarter-precision" float format, but can nonetheless be
4962 loaded as integer constants. */
4963
4964 static unsigned
4965 parse_qfloat_immediate (char **ccp, int *immed)
4966 {
4967 char *str = *ccp;
4968 char *fpnum;
4969 LITTLENUM_TYPE words[MAX_LITTLENUMS];
4970 int found_fpchar = 0;
4971
4972 skip_past_char (&str, '#');
4973
4974 /* We must not accidentally parse an integer as a floating-point number. Make
4975 sure that the value we parse is not an integer by checking for special
4976 characters '.' or 'e'.
4977 FIXME: This is a horrible hack, but doing better is tricky because type
4978 information isn't in a very usable state at parse time. */
4979 fpnum = str;
4980 skip_whitespace (fpnum);
4981
4982 if (strncmp (fpnum, "0x", 2) == 0)
4983 return FAIL;
4984 else
4985 {
4986 for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
4987 if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
4988 {
4989 found_fpchar = 1;
4990 break;
4991 }
4992
4993 if (!found_fpchar)
4994 return FAIL;
4995 }
4996
4997 if ((str = atof_ieee (str, 's', words)) != NULL)
4998 {
4999 unsigned fpword = 0;
5000 int i;
5001
5002 /* Our FP word must be 32 bits (single-precision FP). */
5003 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
5004 {
5005 fpword <<= LITTLENUM_NUMBER_OF_BITS;
5006 fpword |= words[i];
5007 }
5008
5009 if (is_quarter_float (fpword) || (fpword & 0x7fffffff) == 0)
5010 *immed = fpword;
5011 else
5012 return FAIL;
5013
5014 *ccp = str;
5015
5016 return SUCCESS;
5017 }
5018
5019 return FAIL;
5020 }
5021
5022 /* Shift operands. */
5023 enum shift_kind
5024 {
5025 SHIFT_LSL, SHIFT_LSR, SHIFT_ASR, SHIFT_ROR, SHIFT_RRX
5026 };
5027
5028 struct asm_shift_name
5029 {
5030 const char *name;
5031 enum shift_kind kind;
5032 };
5033
5034 /* Third argument to parse_shift. */
5035 enum parse_shift_mode
5036 {
5037 NO_SHIFT_RESTRICT, /* Any kind of shift is accepted. */
5038 SHIFT_IMMEDIATE, /* Shift operand must be an immediate. */
5039 SHIFT_LSL_OR_ASR_IMMEDIATE, /* Shift must be LSL or ASR immediate. */
5040 SHIFT_ASR_IMMEDIATE, /* Shift must be ASR immediate. */
5041 SHIFT_LSL_IMMEDIATE, /* Shift must be LSL immediate. */
5042 };
5043
5044 /* Parse a <shift> specifier on an ARM data processing instruction.
5045 This has three forms:
5046
5047 (LSL|LSR|ASL|ASR|ROR) Rs
5048 (LSL|LSR|ASL|ASR|ROR) #imm
5049 RRX
5050
5051 Note that ASL is assimilated to LSL in the instruction encoding, and
5052 RRX to ROR #0 (which cannot be written as such). */
5053
5054 static int
5055 parse_shift (char **str, int i, enum parse_shift_mode mode)
5056 {
5057 const struct asm_shift_name *shift_name;
5058 enum shift_kind shift;
5059 char *s = *str;
5060 char *p = s;
5061 int reg;
5062
5063 for (p = *str; ISALPHA (*p); p++)
5064 ;
5065
5066 if (p == *str)
5067 {
5068 inst.error = _("shift expression expected");
5069 return FAIL;
5070 }
5071
5072 shift_name = (const struct asm_shift_name *) hash_find_n (arm_shift_hsh, *str,
5073 p - *str);
5074
5075 if (shift_name == NULL)
5076 {
5077 inst.error = _("shift expression expected");
5078 return FAIL;
5079 }
5080
5081 shift = shift_name->kind;
5082
5083 switch (mode)
5084 {
5085 case NO_SHIFT_RESTRICT:
5086 case SHIFT_IMMEDIATE: break;
5087
5088 case SHIFT_LSL_OR_ASR_IMMEDIATE:
5089 if (shift != SHIFT_LSL && shift != SHIFT_ASR)
5090 {
5091 inst.error = _("'LSL' or 'ASR' required");
5092 return FAIL;
5093 }
5094 break;
5095
5096 case SHIFT_LSL_IMMEDIATE:
5097 if (shift != SHIFT_LSL)
5098 {
5099 inst.error = _("'LSL' required");
5100 return FAIL;
5101 }
5102 break;
5103
5104 case SHIFT_ASR_IMMEDIATE:
5105 if (shift != SHIFT_ASR)
5106 {
5107 inst.error = _("'ASR' required");
5108 return FAIL;
5109 }
5110 break;
5111
5112 default: abort ();
5113 }
5114
5115 if (shift != SHIFT_RRX)
5116 {
5117 /* Whitespace can appear here if the next thing is a bare digit. */
5118 skip_whitespace (p);
5119
5120 if (mode == NO_SHIFT_RESTRICT
5121 && (reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5122 {
5123 inst.operands[i].imm = reg;
5124 inst.operands[i].immisreg = 1;
5125 }
5126 else if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5127 return FAIL;
5128 }
5129 inst.operands[i].shift_kind = shift;
5130 inst.operands[i].shifted = 1;
5131 *str = p;
5132 return SUCCESS;
5133 }
5134
5135 /* Parse a <shifter_operand> for an ARM data processing instruction:
5136
5137 #<immediate>
5138 #<immediate>, <rotate>
5139 <Rm>
5140 <Rm>, <shift>
5141
5142 where <shift> is defined by parse_shift above, and <rotate> is a
5143 multiple of 2 between 0 and 30. Validation of immediate operands
5144 is deferred to md_apply_fix. */
5145
5146 static int
5147 parse_shifter_operand (char **str, int i)
5148 {
5149 int value;
5150 expressionS exp;
5151
5152 if ((value = arm_reg_parse (str, REG_TYPE_RN)) != FAIL)
5153 {
5154 inst.operands[i].reg = value;
5155 inst.operands[i].isreg = 1;
5156
5157 /* parse_shift will override this if appropriate */
5158 inst.reloc.exp.X_op = O_constant;
5159 inst.reloc.exp.X_add_number = 0;
5160
5161 if (skip_past_comma (str) == FAIL)
5162 return SUCCESS;
5163
5164 /* Shift operation on register. */
5165 return parse_shift (str, i, NO_SHIFT_RESTRICT);
5166 }
5167
5168 if (my_get_expression (&inst.reloc.exp, str, GE_IMM_PREFIX))
5169 return FAIL;
5170
5171 if (skip_past_comma (str) == SUCCESS)
5172 {
5173 /* #x, y -- ie explicit rotation by Y. */
5174 if (my_get_expression (&exp, str, GE_NO_PREFIX))
5175 return FAIL;
5176
5177 if (exp.X_op != O_constant || inst.reloc.exp.X_op != O_constant)
5178 {
5179 inst.error = _("constant expression expected");
5180 return FAIL;
5181 }
5182
5183 value = exp.X_add_number;
5184 if (value < 0 || value > 30 || value % 2 != 0)
5185 {
5186 inst.error = _("invalid rotation");
5187 return FAIL;
5188 }
5189 if (inst.reloc.exp.X_add_number < 0 || inst.reloc.exp.X_add_number > 255)
5190 {
5191 inst.error = _("invalid constant");
5192 return FAIL;
5193 }
5194
5195 /* Encode as specified. */
5196 inst.operands[i].imm = inst.reloc.exp.X_add_number | value << 7;
5197 return SUCCESS;
5198 }
5199
5200 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
5201 inst.reloc.pc_rel = 0;
5202 return SUCCESS;
5203 }
5204
5205 /* Group relocation information. Each entry in the table contains the
5206 textual name of the relocation as may appear in assembler source
5207 and must end with a colon.
5208 Along with this textual name are the relocation codes to be used if
5209 the corresponding instruction is an ALU instruction (ADD or SUB only),
5210 an LDR, an LDRS, or an LDC. */
5211
5212 struct group_reloc_table_entry
5213 {
5214 const char *name;
5215 int alu_code;
5216 int ldr_code;
5217 int ldrs_code;
5218 int ldc_code;
5219 };
5220
5221 typedef enum
5222 {
5223 /* Varieties of non-ALU group relocation. */
5224
5225 GROUP_LDR,
5226 GROUP_LDRS,
5227 GROUP_LDC
5228 } group_reloc_type;
5229
5230 static struct group_reloc_table_entry group_reloc_table[] =
5231 { /* Program counter relative: */
5232 { "pc_g0_nc",
5233 BFD_RELOC_ARM_ALU_PC_G0_NC, /* ALU */
5234 0, /* LDR */
5235 0, /* LDRS */
5236 0 }, /* LDC */
5237 { "pc_g0",
5238 BFD_RELOC_ARM_ALU_PC_G0, /* ALU */
5239 BFD_RELOC_ARM_LDR_PC_G0, /* LDR */
5240 BFD_RELOC_ARM_LDRS_PC_G0, /* LDRS */
5241 BFD_RELOC_ARM_LDC_PC_G0 }, /* LDC */
5242 { "pc_g1_nc",
5243 BFD_RELOC_ARM_ALU_PC_G1_NC, /* ALU */
5244 0, /* LDR */
5245 0, /* LDRS */
5246 0 }, /* LDC */
5247 { "pc_g1",
5248 BFD_RELOC_ARM_ALU_PC_G1, /* ALU */
5249 BFD_RELOC_ARM_LDR_PC_G1, /* LDR */
5250 BFD_RELOC_ARM_LDRS_PC_G1, /* LDRS */
5251 BFD_RELOC_ARM_LDC_PC_G1 }, /* LDC */
5252 { "pc_g2",
5253 BFD_RELOC_ARM_ALU_PC_G2, /* ALU */
5254 BFD_RELOC_ARM_LDR_PC_G2, /* LDR */
5255 BFD_RELOC_ARM_LDRS_PC_G2, /* LDRS */
5256 BFD_RELOC_ARM_LDC_PC_G2 }, /* LDC */
5257 /* Section base relative */
5258 { "sb_g0_nc",
5259 BFD_RELOC_ARM_ALU_SB_G0_NC, /* ALU */
5260 0, /* LDR */
5261 0, /* LDRS */
5262 0 }, /* LDC */
5263 { "sb_g0",
5264 BFD_RELOC_ARM_ALU_SB_G0, /* ALU */
5265 BFD_RELOC_ARM_LDR_SB_G0, /* LDR */
5266 BFD_RELOC_ARM_LDRS_SB_G0, /* LDRS */
5267 BFD_RELOC_ARM_LDC_SB_G0 }, /* LDC */
5268 { "sb_g1_nc",
5269 BFD_RELOC_ARM_ALU_SB_G1_NC, /* ALU */
5270 0, /* LDR */
5271 0, /* LDRS */
5272 0 }, /* LDC */
5273 { "sb_g1",
5274 BFD_RELOC_ARM_ALU_SB_G1, /* ALU */
5275 BFD_RELOC_ARM_LDR_SB_G1, /* LDR */
5276 BFD_RELOC_ARM_LDRS_SB_G1, /* LDRS */
5277 BFD_RELOC_ARM_LDC_SB_G1 }, /* LDC */
5278 { "sb_g2",
5279 BFD_RELOC_ARM_ALU_SB_G2, /* ALU */
5280 BFD_RELOC_ARM_LDR_SB_G2, /* LDR */
5281 BFD_RELOC_ARM_LDRS_SB_G2, /* LDRS */
5282 BFD_RELOC_ARM_LDC_SB_G2 }, /* LDC */
5283 /* Absolute thumb alu relocations. */
5284 { "lower0_7",
5285 BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC,/* ALU. */
5286 0, /* LDR. */
5287 0, /* LDRS. */
5288 0 }, /* LDC. */
5289 { "lower8_15",
5290 BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC,/* ALU. */
5291 0, /* LDR. */
5292 0, /* LDRS. */
5293 0 }, /* LDC. */
5294 { "upper0_7",
5295 BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC,/* ALU. */
5296 0, /* LDR. */
5297 0, /* LDRS. */
5298 0 }, /* LDC. */
5299 { "upper8_15",
5300 BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC,/* ALU. */
5301 0, /* LDR. */
5302 0, /* LDRS. */
5303 0 } }; /* LDC. */
5304
5305 /* Given the address of a pointer pointing to the textual name of a group
5306 relocation as may appear in assembler source, attempt to find its details
5307 in group_reloc_table. The pointer will be updated to the character after
5308 the trailing colon. On failure, FAIL will be returned; SUCCESS
5309 otherwise. On success, *entry will be updated to point at the relevant
5310 group_reloc_table entry. */
5311
5312 static int
5313 find_group_reloc_table_entry (char **str, struct group_reloc_table_entry **out)
5314 {
5315 unsigned int i;
5316 for (i = 0; i < ARRAY_SIZE (group_reloc_table); i++)
5317 {
5318 int length = strlen (group_reloc_table[i].name);
5319
5320 if (strncasecmp (group_reloc_table[i].name, *str, length) == 0
5321 && (*str)[length] == ':')
5322 {
5323 *out = &group_reloc_table[i];
5324 *str += (length + 1);
5325 return SUCCESS;
5326 }
5327 }
5328
5329 return FAIL;
5330 }
5331
5332 /* Parse a <shifter_operand> for an ARM data processing instruction
5333 (as for parse_shifter_operand) where group relocations are allowed:
5334
5335 #<immediate>
5336 #<immediate>, <rotate>
5337 #:<group_reloc>:<expression>
5338 <Rm>
5339 <Rm>, <shift>
5340
5341 where <group_reloc> is one of the strings defined in group_reloc_table.
5342 The hashes are optional.
5343
5344 Everything else is as for parse_shifter_operand. */
5345
5346 static parse_operand_result
5347 parse_shifter_operand_group_reloc (char **str, int i)
5348 {
5349 /* Determine if we have the sequence of characters #: or just :
5350 coming next. If we do, then we check for a group relocation.
5351 If we don't, punt the whole lot to parse_shifter_operand. */
5352
5353 if (((*str)[0] == '#' && (*str)[1] == ':')
5354 || (*str)[0] == ':')
5355 {
5356 struct group_reloc_table_entry *entry;
5357
5358 if ((*str)[0] == '#')
5359 (*str) += 2;
5360 else
5361 (*str)++;
5362
5363 /* Try to parse a group relocation. Anything else is an error. */
5364 if (find_group_reloc_table_entry (str, &entry) == FAIL)
5365 {
5366 inst.error = _("unknown group relocation");
5367 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5368 }
5369
5370 /* We now have the group relocation table entry corresponding to
5371 the name in the assembler source. Next, we parse the expression. */
5372 if (my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX))
5373 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5374
5375 /* Record the relocation type (always the ALU variant here). */
5376 inst.reloc.type = (bfd_reloc_code_real_type) entry->alu_code;
5377 gas_assert (inst.reloc.type != 0);
5378
5379 return PARSE_OPERAND_SUCCESS;
5380 }
5381 else
5382 return parse_shifter_operand (str, i) == SUCCESS
5383 ? PARSE_OPERAND_SUCCESS : PARSE_OPERAND_FAIL;
5384
5385 /* Never reached. */
5386 }
5387
5388 /* Parse a Neon alignment expression. Information is written to
5389 inst.operands[i]. We assume the initial ':' has been skipped.
5390
5391 align .imm = align << 8, .immisalign=1, .preind=0 */
5392 static parse_operand_result
5393 parse_neon_alignment (char **str, int i)
5394 {
5395 char *p = *str;
5396 expressionS exp;
5397
5398 my_get_expression (&exp, &p, GE_NO_PREFIX);
5399
5400 if (exp.X_op != O_constant)
5401 {
5402 inst.error = _("alignment must be constant");
5403 return PARSE_OPERAND_FAIL;
5404 }
5405
5406 inst.operands[i].imm = exp.X_add_number << 8;
5407 inst.operands[i].immisalign = 1;
5408 /* Alignments are not pre-indexes. */
5409 inst.operands[i].preind = 0;
5410
5411 *str = p;
5412 return PARSE_OPERAND_SUCCESS;
5413 }
5414
5415 /* Parse all forms of an ARM address expression. Information is written
5416 to inst.operands[i] and/or inst.reloc.
5417
5418 Preindexed addressing (.preind=1):
5419
5420 [Rn, #offset] .reg=Rn .reloc.exp=offset
5421 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5422 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5423 .shift_kind=shift .reloc.exp=shift_imm
5424
5425 These three may have a trailing ! which causes .writeback to be set also.
5426
5427 Postindexed addressing (.postind=1, .writeback=1):
5428
5429 [Rn], #offset .reg=Rn .reloc.exp=offset
5430 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5431 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5432 .shift_kind=shift .reloc.exp=shift_imm
5433
5434 Unindexed addressing (.preind=0, .postind=0):
5435
5436 [Rn], {option} .reg=Rn .imm=option .immisreg=0
5437
5438 Other:
5439
5440 [Rn]{!} shorthand for [Rn,#0]{!}
5441 =immediate .isreg=0 .reloc.exp=immediate
5442 label .reg=PC .reloc.pc_rel=1 .reloc.exp=label
5443
5444 It is the caller's responsibility to check for addressing modes not
5445 supported by the instruction, and to set inst.reloc.type. */
5446
5447 static parse_operand_result
5448 parse_address_main (char **str, int i, int group_relocations,
5449 group_reloc_type group_type)
5450 {
5451 char *p = *str;
5452 int reg;
5453
5454 if (skip_past_char (&p, '[') == FAIL)
5455 {
5456 if (skip_past_char (&p, '=') == FAIL)
5457 {
5458 /* Bare address - translate to PC-relative offset. */
5459 inst.reloc.pc_rel = 1;
5460 inst.operands[i].reg = REG_PC;
5461 inst.operands[i].isreg = 1;
5462 inst.operands[i].preind = 1;
5463
5464 if (my_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX_BIG))
5465 return PARSE_OPERAND_FAIL;
5466 }
5467 else if (parse_big_immediate (&p, i, &inst.reloc.exp,
5468 /*allow_symbol_p=*/TRUE))
5469 return PARSE_OPERAND_FAIL;
5470
5471 *str = p;
5472 return PARSE_OPERAND_SUCCESS;
5473 }
5474
5475 /* PR gas/14887: Allow for whitespace after the opening bracket. */
5476 skip_whitespace (p);
5477
5478 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5479 {
5480 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
5481 return PARSE_OPERAND_FAIL;
5482 }
5483 inst.operands[i].reg = reg;
5484 inst.operands[i].isreg = 1;
5485
5486 if (skip_past_comma (&p) == SUCCESS)
5487 {
5488 inst.operands[i].preind = 1;
5489
5490 if (*p == '+') p++;
5491 else if (*p == '-') p++, inst.operands[i].negative = 1;
5492
5493 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5494 {
5495 inst.operands[i].imm = reg;
5496 inst.operands[i].immisreg = 1;
5497
5498 if (skip_past_comma (&p) == SUCCESS)
5499 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
5500 return PARSE_OPERAND_FAIL;
5501 }
5502 else if (skip_past_char (&p, ':') == SUCCESS)
5503 {
5504 /* FIXME: '@' should be used here, but it's filtered out by generic
5505 code before we get to see it here. This may be subject to
5506 change. */
5507 parse_operand_result result = parse_neon_alignment (&p, i);
5508
5509 if (result != PARSE_OPERAND_SUCCESS)
5510 return result;
5511 }
5512 else
5513 {
5514 if (inst.operands[i].negative)
5515 {
5516 inst.operands[i].negative = 0;
5517 p--;
5518 }
5519
5520 if (group_relocations
5521 && ((*p == '#' && *(p + 1) == ':') || *p == ':'))
5522 {
5523 struct group_reloc_table_entry *entry;
5524
5525 /* Skip over the #: or : sequence. */
5526 if (*p == '#')
5527 p += 2;
5528 else
5529 p++;
5530
5531 /* Try to parse a group relocation. Anything else is an
5532 error. */
5533 if (find_group_reloc_table_entry (&p, &entry) == FAIL)
5534 {
5535 inst.error = _("unknown group relocation");
5536 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5537 }
5538
5539 /* We now have the group relocation table entry corresponding to
5540 the name in the assembler source. Next, we parse the
5541 expression. */
5542 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
5543 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5544
5545 /* Record the relocation type. */
5546 switch (group_type)
5547 {
5548 case GROUP_LDR:
5549 inst.reloc.type = (bfd_reloc_code_real_type) entry->ldr_code;
5550 break;
5551
5552 case GROUP_LDRS:
5553 inst.reloc.type = (bfd_reloc_code_real_type) entry->ldrs_code;
5554 break;
5555
5556 case GROUP_LDC:
5557 inst.reloc.type = (bfd_reloc_code_real_type) entry->ldc_code;
5558 break;
5559
5560 default:
5561 gas_assert (0);
5562 }
5563
5564 if (inst.reloc.type == 0)
5565 {
5566 inst.error = _("this group relocation is not allowed on this instruction");
5567 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5568 }
5569 }
5570 else
5571 {
5572 char *q = p;
5573 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5574 return PARSE_OPERAND_FAIL;
5575 /* If the offset is 0, find out if it's a +0 or -0. */
5576 if (inst.reloc.exp.X_op == O_constant
5577 && inst.reloc.exp.X_add_number == 0)
5578 {
5579 skip_whitespace (q);
5580 if (*q == '#')
5581 {
5582 q++;
5583 skip_whitespace (q);
5584 }
5585 if (*q == '-')
5586 inst.operands[i].negative = 1;
5587 }
5588 }
5589 }
5590 }
5591 else if (skip_past_char (&p, ':') == SUCCESS)
5592 {
5593 /* FIXME: '@' should be used here, but it's filtered out by generic code
5594 before we get to see it here. This may be subject to change. */
5595 parse_operand_result result = parse_neon_alignment (&p, i);
5596
5597 if (result != PARSE_OPERAND_SUCCESS)
5598 return result;
5599 }
5600
5601 if (skip_past_char (&p, ']') == FAIL)
5602 {
5603 inst.error = _("']' expected");
5604 return PARSE_OPERAND_FAIL;
5605 }
5606
5607 if (skip_past_char (&p, '!') == SUCCESS)
5608 inst.operands[i].writeback = 1;
5609
5610 else if (skip_past_comma (&p) == SUCCESS)
5611 {
5612 if (skip_past_char (&p, '{') == SUCCESS)
5613 {
5614 /* [Rn], {expr} - unindexed, with option */
5615 if (parse_immediate (&p, &inst.operands[i].imm,
5616 0, 255, TRUE) == FAIL)
5617 return PARSE_OPERAND_FAIL;
5618
5619 if (skip_past_char (&p, '}') == FAIL)
5620 {
5621 inst.error = _("'}' expected at end of 'option' field");
5622 return PARSE_OPERAND_FAIL;
5623 }
5624 if (inst.operands[i].preind)
5625 {
5626 inst.error = _("cannot combine index with option");
5627 return PARSE_OPERAND_FAIL;
5628 }
5629 *str = p;
5630 return PARSE_OPERAND_SUCCESS;
5631 }
5632 else
5633 {
5634 inst.operands[i].postind = 1;
5635 inst.operands[i].writeback = 1;
5636
5637 if (inst.operands[i].preind)
5638 {
5639 inst.error = _("cannot combine pre- and post-indexing");
5640 return PARSE_OPERAND_FAIL;
5641 }
5642
5643 if (*p == '+') p++;
5644 else if (*p == '-') p++, inst.operands[i].negative = 1;
5645
5646 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5647 {
5648 /* We might be using the immediate for alignment already. If we
5649 are, OR the register number into the low-order bits. */
5650 if (inst.operands[i].immisalign)
5651 inst.operands[i].imm |= reg;
5652 else
5653 inst.operands[i].imm = reg;
5654 inst.operands[i].immisreg = 1;
5655
5656 if (skip_past_comma (&p) == SUCCESS)
5657 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
5658 return PARSE_OPERAND_FAIL;
5659 }
5660 else
5661 {
5662 char *q = p;
5663 if (inst.operands[i].negative)
5664 {
5665 inst.operands[i].negative = 0;
5666 p--;
5667 }
5668 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5669 return PARSE_OPERAND_FAIL;
5670 /* If the offset is 0, find out if it's a +0 or -0. */
5671 if (inst.reloc.exp.X_op == O_constant
5672 && inst.reloc.exp.X_add_number == 0)
5673 {
5674 skip_whitespace (q);
5675 if (*q == '#')
5676 {
5677 q++;
5678 skip_whitespace (q);
5679 }
5680 if (*q == '-')
5681 inst.operands[i].negative = 1;
5682 }
5683 }
5684 }
5685 }
5686
5687 /* If at this point neither .preind nor .postind is set, we have a
5688 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
5689 if (inst.operands[i].preind == 0 && inst.operands[i].postind == 0)
5690 {
5691 inst.operands[i].preind = 1;
5692 inst.reloc.exp.X_op = O_constant;
5693 inst.reloc.exp.X_add_number = 0;
5694 }
5695 *str = p;
5696 return PARSE_OPERAND_SUCCESS;
5697 }
5698
5699 static int
5700 parse_address (char **str, int i)
5701 {
5702 return parse_address_main (str, i, 0, GROUP_LDR) == PARSE_OPERAND_SUCCESS
5703 ? SUCCESS : FAIL;
5704 }
5705
5706 static parse_operand_result
5707 parse_address_group_reloc (char **str, int i, group_reloc_type type)
5708 {
5709 return parse_address_main (str, i, 1, type);
5710 }
5711
5712 /* Parse an operand for a MOVW or MOVT instruction. */
5713 static int
5714 parse_half (char **str)
5715 {
5716 char * p;
5717
5718 p = *str;
5719 skip_past_char (&p, '#');
5720 if (strncasecmp (p, ":lower16:", 9) == 0)
5721 inst.reloc.type = BFD_RELOC_ARM_MOVW;
5722 else if (strncasecmp (p, ":upper16:", 9) == 0)
5723 inst.reloc.type = BFD_RELOC_ARM_MOVT;
5724
5725 if (inst.reloc.type != BFD_RELOC_UNUSED)
5726 {
5727 p += 9;
5728 skip_whitespace (p);
5729 }
5730
5731 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
5732 return FAIL;
5733
5734 if (inst.reloc.type == BFD_RELOC_UNUSED)
5735 {
5736 if (inst.reloc.exp.X_op != O_constant)
5737 {
5738 inst.error = _("constant expression expected");
5739 return FAIL;
5740 }
5741 if (inst.reloc.exp.X_add_number < 0
5742 || inst.reloc.exp.X_add_number > 0xffff)
5743 {
5744 inst.error = _("immediate value out of range");
5745 return FAIL;
5746 }
5747 }
5748 *str = p;
5749 return SUCCESS;
5750 }
5751
5752 /* Miscellaneous. */
5753
5754 /* Parse a PSR flag operand. The value returned is FAIL on syntax error,
5755 or a bitmask suitable to be or-ed into the ARM msr instruction. */
5756 static int
5757 parse_psr (char **str, bfd_boolean lhs)
5758 {
5759 char *p;
5760 unsigned long psr_field;
5761 const struct asm_psr *psr;
5762 char *start;
5763 bfd_boolean is_apsr = FALSE;
5764 bfd_boolean m_profile = ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m);
5765
5766 /* PR gas/12698: If the user has specified -march=all then m_profile will
5767 be TRUE, but we want to ignore it in this case as we are building for any
5768 CPU type, including non-m variants. */
5769 if (ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any))
5770 m_profile = FALSE;
5771
5772 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
5773 feature for ease of use and backwards compatibility. */
5774 p = *str;
5775 if (strncasecmp (p, "SPSR", 4) == 0)
5776 {
5777 if (m_profile)
5778 goto unsupported_psr;
5779
5780 psr_field = SPSR_BIT;
5781 }
5782 else if (strncasecmp (p, "CPSR", 4) == 0)
5783 {
5784 if (m_profile)
5785 goto unsupported_psr;
5786
5787 psr_field = 0;
5788 }
5789 else if (strncasecmp (p, "APSR", 4) == 0)
5790 {
5791 /* APSR[_<bits>] can be used as a synonym for CPSR[_<flags>] on ARMv7-A
5792 and ARMv7-R architecture CPUs. */
5793 is_apsr = TRUE;
5794 psr_field = 0;
5795 }
5796 else if (m_profile)
5797 {
5798 start = p;
5799 do
5800 p++;
5801 while (ISALNUM (*p) || *p == '_');
5802
5803 if (strncasecmp (start, "iapsr", 5) == 0
5804 || strncasecmp (start, "eapsr", 5) == 0
5805 || strncasecmp (start, "xpsr", 4) == 0
5806 || strncasecmp (start, "psr", 3) == 0)
5807 p = start + strcspn (start, "rR") + 1;
5808
5809 psr = (const struct asm_psr *) hash_find_n (arm_v7m_psr_hsh, start,
5810 p - start);
5811
5812 if (!psr)
5813 return FAIL;
5814
5815 /* If APSR is being written, a bitfield may be specified. Note that
5816 APSR itself is handled above. */
5817 if (psr->field <= 3)
5818 {
5819 psr_field = psr->field;
5820 is_apsr = TRUE;
5821 goto check_suffix;
5822 }
5823
5824 *str = p;
5825 /* M-profile MSR instructions have the mask field set to "10", except
5826 *PSR variants which modify APSR, which may use a different mask (and
5827 have been handled already). Do that by setting the PSR_f field
5828 here. */
5829 return psr->field | (lhs ? PSR_f : 0);
5830 }
5831 else
5832 goto unsupported_psr;
5833
5834 p += 4;
5835 check_suffix:
5836 if (*p == '_')
5837 {
5838 /* A suffix follows. */
5839 p++;
5840 start = p;
5841
5842 do
5843 p++;
5844 while (ISALNUM (*p) || *p == '_');
5845
5846 if (is_apsr)
5847 {
5848 /* APSR uses a notation for bits, rather than fields. */
5849 unsigned int nzcvq_bits = 0;
5850 unsigned int g_bit = 0;
5851 char *bit;
5852
5853 for (bit = start; bit != p; bit++)
5854 {
5855 switch (TOLOWER (*bit))
5856 {
5857 case 'n':
5858 nzcvq_bits |= (nzcvq_bits & 0x01) ? 0x20 : 0x01;
5859 break;
5860
5861 case 'z':
5862 nzcvq_bits |= (nzcvq_bits & 0x02) ? 0x20 : 0x02;
5863 break;
5864
5865 case 'c':
5866 nzcvq_bits |= (nzcvq_bits & 0x04) ? 0x20 : 0x04;
5867 break;
5868
5869 case 'v':
5870 nzcvq_bits |= (nzcvq_bits & 0x08) ? 0x20 : 0x08;
5871 break;
5872
5873 case 'q':
5874 nzcvq_bits |= (nzcvq_bits & 0x10) ? 0x20 : 0x10;
5875 break;
5876
5877 case 'g':
5878 g_bit |= (g_bit & 0x1) ? 0x2 : 0x1;
5879 break;
5880
5881 default:
5882 inst.error = _("unexpected bit specified after APSR");
5883 return FAIL;
5884 }
5885 }
5886
5887 if (nzcvq_bits == 0x1f)
5888 psr_field |= PSR_f;
5889
5890 if (g_bit == 0x1)
5891 {
5892 if (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp))
5893 {
5894 inst.error = _("selected processor does not "
5895 "support DSP extension");
5896 return FAIL;
5897 }
5898
5899 psr_field |= PSR_s;
5900 }
5901
5902 if ((nzcvq_bits & 0x20) != 0
5903 || (nzcvq_bits != 0x1f && nzcvq_bits != 0)
5904 || (g_bit & 0x2) != 0)
5905 {
5906 inst.error = _("bad bitmask specified after APSR");
5907 return FAIL;
5908 }
5909 }
5910 else
5911 {
5912 psr = (const struct asm_psr *) hash_find_n (arm_psr_hsh, start,
5913 p - start);
5914 if (!psr)
5915 goto error;
5916
5917 psr_field |= psr->field;
5918 }
5919 }
5920 else
5921 {
5922 if (ISALNUM (*p))
5923 goto error; /* Garbage after "[CS]PSR". */
5924
5925 /* Unadorned APSR is equivalent to APSR_nzcvq/CPSR_f (for writes). This
5926 is deprecated, but allow it anyway. */
5927 if (is_apsr && lhs)
5928 {
5929 psr_field |= PSR_f;
5930 as_tsktsk (_("writing to APSR without specifying a bitmask is "
5931 "deprecated"));
5932 }
5933 else if (!m_profile)
5934 /* These bits are never right for M-profile devices: don't set them
5935 (only code paths which read/write APSR reach here). */
5936 psr_field |= (PSR_c | PSR_f);
5937 }
5938 *str = p;
5939 return psr_field;
5940
5941 unsupported_psr:
5942 inst.error = _("selected processor does not support requested special "
5943 "purpose register");
5944 return FAIL;
5945
5946 error:
5947 inst.error = _("flag for {c}psr instruction expected");
5948 return FAIL;
5949 }
5950
5951 /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
5952 value suitable for splatting into the AIF field of the instruction. */
5953
5954 static int
5955 parse_cps_flags (char **str)
5956 {
5957 int val = 0;
5958 int saw_a_flag = 0;
5959 char *s = *str;
5960
5961 for (;;)
5962 switch (*s++)
5963 {
5964 case '\0': case ',':
5965 goto done;
5966
5967 case 'a': case 'A': saw_a_flag = 1; val |= 0x4; break;
5968 case 'i': case 'I': saw_a_flag = 1; val |= 0x2; break;
5969 case 'f': case 'F': saw_a_flag = 1; val |= 0x1; break;
5970
5971 default:
5972 inst.error = _("unrecognized CPS flag");
5973 return FAIL;
5974 }
5975
5976 done:
5977 if (saw_a_flag == 0)
5978 {
5979 inst.error = _("missing CPS flags");
5980 return FAIL;
5981 }
5982
5983 *str = s - 1;
5984 return val;
5985 }
5986
5987 /* Parse an endian specifier ("BE" or "LE", case insensitive);
5988 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
5989
5990 static int
5991 parse_endian_specifier (char **str)
5992 {
5993 int little_endian;
5994 char *s = *str;
5995
5996 if (strncasecmp (s, "BE", 2))
5997 little_endian = 0;
5998 else if (strncasecmp (s, "LE", 2))
5999 little_endian = 1;
6000 else
6001 {
6002 inst.error = _("valid endian specifiers are be or le");
6003 return FAIL;
6004 }
6005
6006 if (ISALNUM (s[2]) || s[2] == '_')
6007 {
6008 inst.error = _("valid endian specifiers are be or le");
6009 return FAIL;
6010 }
6011
6012 *str = s + 2;
6013 return little_endian;
6014 }
6015
6016 /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
6017 value suitable for poking into the rotate field of an sxt or sxta
6018 instruction, or FAIL on error. */
6019
6020 static int
6021 parse_ror (char **str)
6022 {
6023 int rot;
6024 char *s = *str;
6025
6026 if (strncasecmp (s, "ROR", 3) == 0)
6027 s += 3;
6028 else
6029 {
6030 inst.error = _("missing rotation field after comma");
6031 return FAIL;
6032 }
6033
6034 if (parse_immediate (&s, &rot, 0, 24, FALSE) == FAIL)
6035 return FAIL;
6036
6037 switch (rot)
6038 {
6039 case 0: *str = s; return 0x0;
6040 case 8: *str = s; return 0x1;
6041 case 16: *str = s; return 0x2;
6042 case 24: *str = s; return 0x3;
6043
6044 default:
6045 inst.error = _("rotation can only be 0, 8, 16, or 24");
6046 return FAIL;
6047 }
6048 }
6049
6050 /* Parse a conditional code (from conds[] below). The value returned is in the
6051 range 0 .. 14, or FAIL. */
6052 static int
6053 parse_cond (char **str)
6054 {
6055 char *q;
6056 const struct asm_cond *c;
6057 int n;
6058 /* Condition codes are always 2 characters, so matching up to
6059 3 characters is sufficient. */
6060 char cond[3];
6061
6062 q = *str;
6063 n = 0;
6064 while (ISALPHA (*q) && n < 3)
6065 {
6066 cond[n] = TOLOWER (*q);
6067 q++;
6068 n++;
6069 }
6070
6071 c = (const struct asm_cond *) hash_find_n (arm_cond_hsh, cond, n);
6072 if (!c)
6073 {
6074 inst.error = _("condition required");
6075 return FAIL;
6076 }
6077
6078 *str = q;
6079 return c->value;
6080 }
6081
6082 /* If the given feature available in the selected CPU, mark it as used.
6083 Returns TRUE iff feature is available. */
6084 static bfd_boolean
6085 mark_feature_used (const arm_feature_set *feature)
6086 {
6087 /* Ensure the option is valid on the current architecture. */
6088 if (!ARM_CPU_HAS_FEATURE (cpu_variant, *feature))
6089 return FALSE;
6090
6091 /* Add the appropriate architecture feature for the barrier option used.
6092 */
6093 if (thumb_mode)
6094 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, *feature);
6095 else
6096 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, *feature);
6097
6098 return TRUE;
6099 }
6100
6101 /* Parse an option for a barrier instruction. Returns the encoding for the
6102 option, or FAIL. */
6103 static int
6104 parse_barrier (char **str)
6105 {
6106 char *p, *q;
6107 const struct asm_barrier_opt *o;
6108
6109 p = q = *str;
6110 while (ISALPHA (*q))
6111 q++;
6112
6113 o = (const struct asm_barrier_opt *) hash_find_n (arm_barrier_opt_hsh, p,
6114 q - p);
6115 if (!o)
6116 return FAIL;
6117
6118 if (!mark_feature_used (&o->arch))
6119 return FAIL;
6120
6121 *str = q;
6122 return o->value;
6123 }
6124
6125 /* Parse the operands of a table branch instruction. Similar to a memory
6126 operand. */
6127 static int
6128 parse_tb (char **str)
6129 {
6130 char * p = *str;
6131 int reg;
6132
6133 if (skip_past_char (&p, '[') == FAIL)
6134 {
6135 inst.error = _("'[' expected");
6136 return FAIL;
6137 }
6138
6139 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
6140 {
6141 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
6142 return FAIL;
6143 }
6144 inst.operands[0].reg = reg;
6145
6146 if (skip_past_comma (&p) == FAIL)
6147 {
6148 inst.error = _("',' expected");
6149 return FAIL;
6150 }
6151
6152 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
6153 {
6154 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
6155 return FAIL;
6156 }
6157 inst.operands[0].imm = reg;
6158
6159 if (skip_past_comma (&p) == SUCCESS)
6160 {
6161 if (parse_shift (&p, 0, SHIFT_LSL_IMMEDIATE) == FAIL)
6162 return FAIL;
6163 if (inst.reloc.exp.X_add_number != 1)
6164 {
6165 inst.error = _("invalid shift");
6166 return FAIL;
6167 }
6168 inst.operands[0].shifted = 1;
6169 }
6170
6171 if (skip_past_char (&p, ']') == FAIL)
6172 {
6173 inst.error = _("']' expected");
6174 return FAIL;
6175 }
6176 *str = p;
6177 return SUCCESS;
6178 }
6179
6180 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
6181 information on the types the operands can take and how they are encoded.
6182 Up to four operands may be read; this function handles setting the
6183 ".present" field for each read operand itself.
6184 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
6185 else returns FAIL. */
6186
6187 static int
6188 parse_neon_mov (char **str, int *which_operand)
6189 {
6190 int i = *which_operand, val;
6191 enum arm_reg_type rtype;
6192 char *ptr = *str;
6193 struct neon_type_el optype;
6194
6195 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
6196 {
6197 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
6198 inst.operands[i].reg = val;
6199 inst.operands[i].isscalar = 1;
6200 inst.operands[i].vectype = optype;
6201 inst.operands[i++].present = 1;
6202
6203 if (skip_past_comma (&ptr) == FAIL)
6204 goto wanted_comma;
6205
6206 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6207 goto wanted_arm;
6208
6209 inst.operands[i].reg = val;
6210 inst.operands[i].isreg = 1;
6211 inst.operands[i].present = 1;
6212 }
6213 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype, &optype))
6214 != FAIL)
6215 {
6216 /* Cases 0, 1, 2, 3, 5 (D only). */
6217 if (skip_past_comma (&ptr) == FAIL)
6218 goto wanted_comma;
6219
6220 inst.operands[i].reg = val;
6221 inst.operands[i].isreg = 1;
6222 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
6223 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6224 inst.operands[i].isvec = 1;
6225 inst.operands[i].vectype = optype;
6226 inst.operands[i++].present = 1;
6227
6228 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6229 {
6230 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
6231 Case 13: VMOV <Sd>, <Rm> */
6232 inst.operands[i].reg = val;
6233 inst.operands[i].isreg = 1;
6234 inst.operands[i].present = 1;
6235
6236 if (rtype == REG_TYPE_NQ)
6237 {
6238 first_error (_("can't use Neon quad register here"));
6239 return FAIL;
6240 }
6241 else if (rtype != REG_TYPE_VFS)
6242 {
6243 i++;
6244 if (skip_past_comma (&ptr) == FAIL)
6245 goto wanted_comma;
6246 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6247 goto wanted_arm;
6248 inst.operands[i].reg = val;
6249 inst.operands[i].isreg = 1;
6250 inst.operands[i].present = 1;
6251 }
6252 }
6253 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype,
6254 &optype)) != FAIL)
6255 {
6256 /* Case 0: VMOV<c><q> <Qd>, <Qm>
6257 Case 1: VMOV<c><q> <Dd>, <Dm>
6258 Case 8: VMOV.F32 <Sd>, <Sm>
6259 Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm> */
6260
6261 inst.operands[i].reg = val;
6262 inst.operands[i].isreg = 1;
6263 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
6264 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6265 inst.operands[i].isvec = 1;
6266 inst.operands[i].vectype = optype;
6267 inst.operands[i].present = 1;
6268
6269 if (skip_past_comma (&ptr) == SUCCESS)
6270 {
6271 /* Case 15. */
6272 i++;
6273
6274 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6275 goto wanted_arm;
6276
6277 inst.operands[i].reg = val;
6278 inst.operands[i].isreg = 1;
6279 inst.operands[i++].present = 1;
6280
6281 if (skip_past_comma (&ptr) == FAIL)
6282 goto wanted_comma;
6283
6284 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6285 goto wanted_arm;
6286
6287 inst.operands[i].reg = val;
6288 inst.operands[i].isreg = 1;
6289 inst.operands[i].present = 1;
6290 }
6291 }
6292 else if (parse_qfloat_immediate (&ptr, &inst.operands[i].imm) == SUCCESS)
6293 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
6294 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
6295 Case 10: VMOV.F32 <Sd>, #<imm>
6296 Case 11: VMOV.F64 <Dd>, #<imm> */
6297 inst.operands[i].immisfloat = 1;
6298 else if (parse_big_immediate (&ptr, i, NULL, /*allow_symbol_p=*/FALSE)
6299 == SUCCESS)
6300 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
6301 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
6302 ;
6303 else
6304 {
6305 first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
6306 return FAIL;
6307 }
6308 }
6309 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6310 {
6311 /* Cases 6, 7. */
6312 inst.operands[i].reg = val;
6313 inst.operands[i].isreg = 1;
6314 inst.operands[i++].present = 1;
6315
6316 if (skip_past_comma (&ptr) == FAIL)
6317 goto wanted_comma;
6318
6319 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
6320 {
6321 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
6322 inst.operands[i].reg = val;
6323 inst.operands[i].isscalar = 1;
6324 inst.operands[i].present = 1;
6325 inst.operands[i].vectype = optype;
6326 }
6327 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6328 {
6329 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
6330 inst.operands[i].reg = val;
6331 inst.operands[i].isreg = 1;
6332 inst.operands[i++].present = 1;
6333
6334 if (skip_past_comma (&ptr) == FAIL)
6335 goto wanted_comma;
6336
6337 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFSD, &rtype, &optype))
6338 == FAIL)
6339 {
6340 first_error (_(reg_expected_msgs[REG_TYPE_VFSD]));
6341 return FAIL;
6342 }
6343
6344 inst.operands[i].reg = val;
6345 inst.operands[i].isreg = 1;
6346 inst.operands[i].isvec = 1;
6347 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6348 inst.operands[i].vectype = optype;
6349 inst.operands[i].present = 1;
6350
6351 if (rtype == REG_TYPE_VFS)
6352 {
6353 /* Case 14. */
6354 i++;
6355 if (skip_past_comma (&ptr) == FAIL)
6356 goto wanted_comma;
6357 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL,
6358 &optype)) == FAIL)
6359 {
6360 first_error (_(reg_expected_msgs[REG_TYPE_VFS]));
6361 return FAIL;
6362 }
6363 inst.operands[i].reg = val;
6364 inst.operands[i].isreg = 1;
6365 inst.operands[i].isvec = 1;
6366 inst.operands[i].issingle = 1;
6367 inst.operands[i].vectype = optype;
6368 inst.operands[i].present = 1;
6369 }
6370 }
6371 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL, &optype))
6372 != FAIL)
6373 {
6374 /* Case 13. */
6375 inst.operands[i].reg = val;
6376 inst.operands[i].isreg = 1;
6377 inst.operands[i].isvec = 1;
6378 inst.operands[i].issingle = 1;
6379 inst.operands[i].vectype = optype;
6380 inst.operands[i].present = 1;
6381 }
6382 }
6383 else
6384 {
6385 first_error (_("parse error"));
6386 return FAIL;
6387 }
6388
6389 /* Successfully parsed the operands. Update args. */
6390 *which_operand = i;
6391 *str = ptr;
6392 return SUCCESS;
6393
6394 wanted_comma:
6395 first_error (_("expected comma"));
6396 return FAIL;
6397
6398 wanted_arm:
6399 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
6400 return FAIL;
6401 }
6402
6403 /* Use this macro when the operand constraints are different
6404 for ARM and THUMB (e.g. ldrd). */
6405 #define MIX_ARM_THUMB_OPERANDS(arm_operand, thumb_operand) \
6406 ((arm_operand) | ((thumb_operand) << 16))
6407
6408 /* Matcher codes for parse_operands. */
6409 enum operand_parse_code
6410 {
6411 OP_stop, /* end of line */
6412
6413 OP_RR, /* ARM register */
6414 OP_RRnpc, /* ARM register, not r15 */
6415 OP_RRnpcsp, /* ARM register, neither r15 nor r13 (a.k.a. 'BadReg') */
6416 OP_RRnpcb, /* ARM register, not r15, in square brackets */
6417 OP_RRnpctw, /* ARM register, not r15 in Thumb-state or with writeback,
6418 optional trailing ! */
6419 OP_RRw, /* ARM register, not r15, optional trailing ! */
6420 OP_RCP, /* Coprocessor number */
6421 OP_RCN, /* Coprocessor register */
6422 OP_RF, /* FPA register */
6423 OP_RVS, /* VFP single precision register */
6424 OP_RVD, /* VFP double precision register (0..15) */
6425 OP_RND, /* Neon double precision register (0..31) */
6426 OP_RNQ, /* Neon quad precision register */
6427 OP_RVSD, /* VFP single or double precision register */
6428 OP_RNDQ, /* Neon double or quad precision register */
6429 OP_RNSDQ, /* Neon single, double or quad precision register */
6430 OP_RNSC, /* Neon scalar D[X] */
6431 OP_RVC, /* VFP control register */
6432 OP_RMF, /* Maverick F register */
6433 OP_RMD, /* Maverick D register */
6434 OP_RMFX, /* Maverick FX register */
6435 OP_RMDX, /* Maverick DX register */
6436 OP_RMAX, /* Maverick AX register */
6437 OP_RMDS, /* Maverick DSPSC register */
6438 OP_RIWR, /* iWMMXt wR register */
6439 OP_RIWC, /* iWMMXt wC register */
6440 OP_RIWG, /* iWMMXt wCG register */
6441 OP_RXA, /* XScale accumulator register */
6442
6443 OP_REGLST, /* ARM register list */
6444 OP_VRSLST, /* VFP single-precision register list */
6445 OP_VRDLST, /* VFP double-precision register list */
6446 OP_VRSDLST, /* VFP single or double-precision register list (& quad) */
6447 OP_NRDLST, /* Neon double-precision register list (d0-d31, qN aliases) */
6448 OP_NSTRLST, /* Neon element/structure list */
6449
6450 OP_RNDQ_I0, /* Neon D or Q reg, or immediate zero. */
6451 OP_RVSD_I0, /* VFP S or D reg, or immediate zero. */
6452 OP_RSVD_FI0, /* VFP S or D reg, or floating point immediate zero. */
6453 OP_RR_RNSC, /* ARM reg or Neon scalar. */
6454 OP_RNSDQ_RNSC, /* Vector S, D or Q reg, or Neon scalar. */
6455 OP_RNDQ_RNSC, /* Neon D or Q reg, or Neon scalar. */
6456 OP_RND_RNSC, /* Neon D reg, or Neon scalar. */
6457 OP_VMOV, /* Neon VMOV operands. */
6458 OP_RNDQ_Ibig, /* Neon D or Q reg, or big immediate for logic and VMVN. */
6459 OP_RNDQ_I63b, /* Neon D or Q reg, or immediate for shift. */
6460 OP_RIWR_I32z, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2. */
6461
6462 OP_I0, /* immediate zero */
6463 OP_I7, /* immediate value 0 .. 7 */
6464 OP_I15, /* 0 .. 15 */
6465 OP_I16, /* 1 .. 16 */
6466 OP_I16z, /* 0 .. 16 */
6467 OP_I31, /* 0 .. 31 */
6468 OP_I31w, /* 0 .. 31, optional trailing ! */
6469 OP_I32, /* 1 .. 32 */
6470 OP_I32z, /* 0 .. 32 */
6471 OP_I63, /* 0 .. 63 */
6472 OP_I63s, /* -64 .. 63 */
6473 OP_I64, /* 1 .. 64 */
6474 OP_I64z, /* 0 .. 64 */
6475 OP_I255, /* 0 .. 255 */
6476
6477 OP_I4b, /* immediate, prefix optional, 1 .. 4 */
6478 OP_I7b, /* 0 .. 7 */
6479 OP_I15b, /* 0 .. 15 */
6480 OP_I31b, /* 0 .. 31 */
6481
6482 OP_SH, /* shifter operand */
6483 OP_SHG, /* shifter operand with possible group relocation */
6484 OP_ADDR, /* Memory address expression (any mode) */
6485 OP_ADDRGLDR, /* Mem addr expr (any mode) with possible LDR group reloc */
6486 OP_ADDRGLDRS, /* Mem addr expr (any mode) with possible LDRS group reloc */
6487 OP_ADDRGLDC, /* Mem addr expr (any mode) with possible LDC group reloc */
6488 OP_EXP, /* arbitrary expression */
6489 OP_EXPi, /* same, with optional immediate prefix */
6490 OP_EXPr, /* same, with optional relocation suffix */
6491 OP_HALF, /* 0 .. 65535 or low/high reloc. */
6492
6493 OP_CPSF, /* CPS flags */
6494 OP_ENDI, /* Endianness specifier */
6495 OP_wPSR, /* CPSR/SPSR/APSR mask for msr (writing). */
6496 OP_rPSR, /* CPSR/SPSR/APSR mask for msr (reading). */
6497 OP_COND, /* conditional code */
6498 OP_TB, /* Table branch. */
6499
6500 OP_APSR_RR, /* ARM register or "APSR_nzcv". */
6501
6502 OP_RRnpc_I0, /* ARM register or literal 0 */
6503 OP_RR_EXr, /* ARM register or expression with opt. reloc suff. */
6504 OP_RR_EXi, /* ARM register or expression with imm prefix */
6505 OP_RF_IF, /* FPA register or immediate */
6506 OP_RIWR_RIWC, /* iWMMXt R or C reg */
6507 OP_RIWC_RIWG, /* iWMMXt wC or wCG reg */
6508
6509 /* Optional operands. */
6510 OP_oI7b, /* immediate, prefix optional, 0 .. 7 */
6511 OP_oI31b, /* 0 .. 31 */
6512 OP_oI32b, /* 1 .. 32 */
6513 OP_oI32z, /* 0 .. 32 */
6514 OP_oIffffb, /* 0 .. 65535 */
6515 OP_oI255c, /* curly-brace enclosed, 0 .. 255 */
6516
6517 OP_oRR, /* ARM register */
6518 OP_oRRnpc, /* ARM register, not the PC */
6519 OP_oRRnpcsp, /* ARM register, neither the PC nor the SP (a.k.a. BadReg) */
6520 OP_oRRw, /* ARM register, not r15, optional trailing ! */
6521 OP_oRND, /* Optional Neon double precision register */
6522 OP_oRNQ, /* Optional Neon quad precision register */
6523 OP_oRNDQ, /* Optional Neon double or quad precision register */
6524 OP_oRNSDQ, /* Optional single, double or quad precision vector register */
6525 OP_oSHll, /* LSL immediate */
6526 OP_oSHar, /* ASR immediate */
6527 OP_oSHllar, /* LSL or ASR immediate */
6528 OP_oROR, /* ROR 0/8/16/24 */
6529 OP_oBARRIER_I15, /* Option argument for a barrier instruction. */
6530
6531 /* Some pre-defined mixed (ARM/THUMB) operands. */
6532 OP_RR_npcsp = MIX_ARM_THUMB_OPERANDS (OP_RR, OP_RRnpcsp),
6533 OP_RRnpc_npcsp = MIX_ARM_THUMB_OPERANDS (OP_RRnpc, OP_RRnpcsp),
6534 OP_oRRnpc_npcsp = MIX_ARM_THUMB_OPERANDS (OP_oRRnpc, OP_oRRnpcsp),
6535
6536 OP_FIRST_OPTIONAL = OP_oI7b
6537 };
6538
6539 /* Generic instruction operand parser. This does no encoding and no
6540 semantic validation; it merely squirrels values away in the inst
6541 structure. Returns SUCCESS or FAIL depending on whether the
6542 specified grammar matched. */
6543 static int
6544 parse_operands (char *str, const unsigned int *pattern, bfd_boolean thumb)
6545 {
6546 unsigned const int *upat = pattern;
6547 char *backtrack_pos = 0;
6548 const char *backtrack_error = 0;
6549 int i, val = 0, backtrack_index = 0;
6550 enum arm_reg_type rtype;
6551 parse_operand_result result;
6552 unsigned int op_parse_code;
6553
6554 #define po_char_or_fail(chr) \
6555 do \
6556 { \
6557 if (skip_past_char (&str, chr) == FAIL) \
6558 goto bad_args; \
6559 } \
6560 while (0)
6561
6562 #define po_reg_or_fail(regtype) \
6563 do \
6564 { \
6565 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6566 & inst.operands[i].vectype); \
6567 if (val == FAIL) \
6568 { \
6569 first_error (_(reg_expected_msgs[regtype])); \
6570 goto failure; \
6571 } \
6572 inst.operands[i].reg = val; \
6573 inst.operands[i].isreg = 1; \
6574 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6575 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6576 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6577 || rtype == REG_TYPE_VFD \
6578 || rtype == REG_TYPE_NQ); \
6579 } \
6580 while (0)
6581
6582 #define po_reg_or_goto(regtype, label) \
6583 do \
6584 { \
6585 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6586 & inst.operands[i].vectype); \
6587 if (val == FAIL) \
6588 goto label; \
6589 \
6590 inst.operands[i].reg = val; \
6591 inst.operands[i].isreg = 1; \
6592 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6593 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6594 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6595 || rtype == REG_TYPE_VFD \
6596 || rtype == REG_TYPE_NQ); \
6597 } \
6598 while (0)
6599
6600 #define po_imm_or_fail(min, max, popt) \
6601 do \
6602 { \
6603 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
6604 goto failure; \
6605 inst.operands[i].imm = val; \
6606 } \
6607 while (0)
6608
6609 #define po_scalar_or_goto(elsz, label) \
6610 do \
6611 { \
6612 val = parse_scalar (& str, elsz, & inst.operands[i].vectype); \
6613 if (val == FAIL) \
6614 goto label; \
6615 inst.operands[i].reg = val; \
6616 inst.operands[i].isscalar = 1; \
6617 } \
6618 while (0)
6619
6620 #define po_misc_or_fail(expr) \
6621 do \
6622 { \
6623 if (expr) \
6624 goto failure; \
6625 } \
6626 while (0)
6627
6628 #define po_misc_or_fail_no_backtrack(expr) \
6629 do \
6630 { \
6631 result = expr; \
6632 if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK) \
6633 backtrack_pos = 0; \
6634 if (result != PARSE_OPERAND_SUCCESS) \
6635 goto failure; \
6636 } \
6637 while (0)
6638
6639 #define po_barrier_or_imm(str) \
6640 do \
6641 { \
6642 val = parse_barrier (&str); \
6643 if (val == FAIL && ! ISALPHA (*str)) \
6644 goto immediate; \
6645 if (val == FAIL \
6646 /* ISB can only take SY as an option. */ \
6647 || ((inst.instruction & 0xf0) == 0x60 \
6648 && val != 0xf)) \
6649 { \
6650 inst.error = _("invalid barrier type"); \
6651 backtrack_pos = 0; \
6652 goto failure; \
6653 } \
6654 } \
6655 while (0)
6656
6657 skip_whitespace (str);
6658
6659 for (i = 0; upat[i] != OP_stop; i++)
6660 {
6661 op_parse_code = upat[i];
6662 if (op_parse_code >= 1<<16)
6663 op_parse_code = thumb ? (op_parse_code >> 16)
6664 : (op_parse_code & ((1<<16)-1));
6665
6666 if (op_parse_code >= OP_FIRST_OPTIONAL)
6667 {
6668 /* Remember where we are in case we need to backtrack. */
6669 gas_assert (!backtrack_pos);
6670 backtrack_pos = str;
6671 backtrack_error = inst.error;
6672 backtrack_index = i;
6673 }
6674
6675 if (i > 0 && (i > 1 || inst.operands[0].present))
6676 po_char_or_fail (',');
6677
6678 switch (op_parse_code)
6679 {
6680 /* Registers */
6681 case OP_oRRnpc:
6682 case OP_oRRnpcsp:
6683 case OP_RRnpc:
6684 case OP_RRnpcsp:
6685 case OP_oRR:
6686 case OP_RR: po_reg_or_fail (REG_TYPE_RN); break;
6687 case OP_RCP: po_reg_or_fail (REG_TYPE_CP); break;
6688 case OP_RCN: po_reg_or_fail (REG_TYPE_CN); break;
6689 case OP_RF: po_reg_or_fail (REG_TYPE_FN); break;
6690 case OP_RVS: po_reg_or_fail (REG_TYPE_VFS); break;
6691 case OP_RVD: po_reg_or_fail (REG_TYPE_VFD); break;
6692 case OP_oRND:
6693 case OP_RND: po_reg_or_fail (REG_TYPE_VFD); break;
6694 case OP_RVC:
6695 po_reg_or_goto (REG_TYPE_VFC, coproc_reg);
6696 break;
6697 /* Also accept generic coprocessor regs for unknown registers. */
6698 coproc_reg:
6699 po_reg_or_fail (REG_TYPE_CN);
6700 break;
6701 case OP_RMF: po_reg_or_fail (REG_TYPE_MVF); break;
6702 case OP_RMD: po_reg_or_fail (REG_TYPE_MVD); break;
6703 case OP_RMFX: po_reg_or_fail (REG_TYPE_MVFX); break;
6704 case OP_RMDX: po_reg_or_fail (REG_TYPE_MVDX); break;
6705 case OP_RMAX: po_reg_or_fail (REG_TYPE_MVAX); break;
6706 case OP_RMDS: po_reg_or_fail (REG_TYPE_DSPSC); break;
6707 case OP_RIWR: po_reg_or_fail (REG_TYPE_MMXWR); break;
6708 case OP_RIWC: po_reg_or_fail (REG_TYPE_MMXWC); break;
6709 case OP_RIWG: po_reg_or_fail (REG_TYPE_MMXWCG); break;
6710 case OP_RXA: po_reg_or_fail (REG_TYPE_XSCALE); break;
6711 case OP_oRNQ:
6712 case OP_RNQ: po_reg_or_fail (REG_TYPE_NQ); break;
6713 case OP_oRNDQ:
6714 case OP_RNDQ: po_reg_or_fail (REG_TYPE_NDQ); break;
6715 case OP_RVSD: po_reg_or_fail (REG_TYPE_VFSD); break;
6716 case OP_oRNSDQ:
6717 case OP_RNSDQ: po_reg_or_fail (REG_TYPE_NSDQ); break;
6718
6719 /* Neon scalar. Using an element size of 8 means that some invalid
6720 scalars are accepted here, so deal with those in later code. */
6721 case OP_RNSC: po_scalar_or_goto (8, failure); break;
6722
6723 case OP_RNDQ_I0:
6724 {
6725 po_reg_or_goto (REG_TYPE_NDQ, try_imm0);
6726 break;
6727 try_imm0:
6728 po_imm_or_fail (0, 0, TRUE);
6729 }
6730 break;
6731
6732 case OP_RVSD_I0:
6733 po_reg_or_goto (REG_TYPE_VFSD, try_imm0);
6734 break;
6735
6736 case OP_RSVD_FI0:
6737 {
6738 po_reg_or_goto (REG_TYPE_VFSD, try_ifimm0);
6739 break;
6740 try_ifimm0:
6741 if (parse_ifimm_zero (&str))
6742 inst.operands[i].imm = 0;
6743 else
6744 {
6745 inst.error
6746 = _("only floating point zero is allowed as immediate value");
6747 goto failure;
6748 }
6749 }
6750 break;
6751
6752 case OP_RR_RNSC:
6753 {
6754 po_scalar_or_goto (8, try_rr);
6755 break;
6756 try_rr:
6757 po_reg_or_fail (REG_TYPE_RN);
6758 }
6759 break;
6760
6761 case OP_RNSDQ_RNSC:
6762 {
6763 po_scalar_or_goto (8, try_nsdq);
6764 break;
6765 try_nsdq:
6766 po_reg_or_fail (REG_TYPE_NSDQ);
6767 }
6768 break;
6769
6770 case OP_RNDQ_RNSC:
6771 {
6772 po_scalar_or_goto (8, try_ndq);
6773 break;
6774 try_ndq:
6775 po_reg_or_fail (REG_TYPE_NDQ);
6776 }
6777 break;
6778
6779 case OP_RND_RNSC:
6780 {
6781 po_scalar_or_goto (8, try_vfd);
6782 break;
6783 try_vfd:
6784 po_reg_or_fail (REG_TYPE_VFD);
6785 }
6786 break;
6787
6788 case OP_VMOV:
6789 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
6790 not careful then bad things might happen. */
6791 po_misc_or_fail (parse_neon_mov (&str, &i) == FAIL);
6792 break;
6793
6794 case OP_RNDQ_Ibig:
6795 {
6796 po_reg_or_goto (REG_TYPE_NDQ, try_immbig);
6797 break;
6798 try_immbig:
6799 /* There's a possibility of getting a 64-bit immediate here, so
6800 we need special handling. */
6801 if (parse_big_immediate (&str, i, NULL, /*allow_symbol_p=*/FALSE)
6802 == FAIL)
6803 {
6804 inst.error = _("immediate value is out of range");
6805 goto failure;
6806 }
6807 }
6808 break;
6809
6810 case OP_RNDQ_I63b:
6811 {
6812 po_reg_or_goto (REG_TYPE_NDQ, try_shimm);
6813 break;
6814 try_shimm:
6815 po_imm_or_fail (0, 63, TRUE);
6816 }
6817 break;
6818
6819 case OP_RRnpcb:
6820 po_char_or_fail ('[');
6821 po_reg_or_fail (REG_TYPE_RN);
6822 po_char_or_fail (']');
6823 break;
6824
6825 case OP_RRnpctw:
6826 case OP_RRw:
6827 case OP_oRRw:
6828 po_reg_or_fail (REG_TYPE_RN);
6829 if (skip_past_char (&str, '!') == SUCCESS)
6830 inst.operands[i].writeback = 1;
6831 break;
6832
6833 /* Immediates */
6834 case OP_I7: po_imm_or_fail ( 0, 7, FALSE); break;
6835 case OP_I15: po_imm_or_fail ( 0, 15, FALSE); break;
6836 case OP_I16: po_imm_or_fail ( 1, 16, FALSE); break;
6837 case OP_I16z: po_imm_or_fail ( 0, 16, FALSE); break;
6838 case OP_I31: po_imm_or_fail ( 0, 31, FALSE); break;
6839 case OP_I32: po_imm_or_fail ( 1, 32, FALSE); break;
6840 case OP_I32z: po_imm_or_fail ( 0, 32, FALSE); break;
6841 case OP_I63s: po_imm_or_fail (-64, 63, FALSE); break;
6842 case OP_I63: po_imm_or_fail ( 0, 63, FALSE); break;
6843 case OP_I64: po_imm_or_fail ( 1, 64, FALSE); break;
6844 case OP_I64z: po_imm_or_fail ( 0, 64, FALSE); break;
6845 case OP_I255: po_imm_or_fail ( 0, 255, FALSE); break;
6846
6847 case OP_I4b: po_imm_or_fail ( 1, 4, TRUE); break;
6848 case OP_oI7b:
6849 case OP_I7b: po_imm_or_fail ( 0, 7, TRUE); break;
6850 case OP_I15b: po_imm_or_fail ( 0, 15, TRUE); break;
6851 case OP_oI31b:
6852 case OP_I31b: po_imm_or_fail ( 0, 31, TRUE); break;
6853 case OP_oI32b: po_imm_or_fail ( 1, 32, TRUE); break;
6854 case OP_oI32z: po_imm_or_fail ( 0, 32, TRUE); break;
6855 case OP_oIffffb: po_imm_or_fail ( 0, 0xffff, TRUE); break;
6856
6857 /* Immediate variants */
6858 case OP_oI255c:
6859 po_char_or_fail ('{');
6860 po_imm_or_fail (0, 255, TRUE);
6861 po_char_or_fail ('}');
6862 break;
6863
6864 case OP_I31w:
6865 /* The expression parser chokes on a trailing !, so we have
6866 to find it first and zap it. */
6867 {
6868 char *s = str;
6869 while (*s && *s != ',')
6870 s++;
6871 if (s[-1] == '!')
6872 {
6873 s[-1] = '\0';
6874 inst.operands[i].writeback = 1;
6875 }
6876 po_imm_or_fail (0, 31, TRUE);
6877 if (str == s - 1)
6878 str = s;
6879 }
6880 break;
6881
6882 /* Expressions */
6883 case OP_EXPi: EXPi:
6884 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6885 GE_OPT_PREFIX));
6886 break;
6887
6888 case OP_EXP:
6889 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6890 GE_NO_PREFIX));
6891 break;
6892
6893 case OP_EXPr: EXPr:
6894 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6895 GE_NO_PREFIX));
6896 if (inst.reloc.exp.X_op == O_symbol)
6897 {
6898 val = parse_reloc (&str);
6899 if (val == -1)
6900 {
6901 inst.error = _("unrecognized relocation suffix");
6902 goto failure;
6903 }
6904 else if (val != BFD_RELOC_UNUSED)
6905 {
6906 inst.operands[i].imm = val;
6907 inst.operands[i].hasreloc = 1;
6908 }
6909 }
6910 break;
6911
6912 /* Operand for MOVW or MOVT. */
6913 case OP_HALF:
6914 po_misc_or_fail (parse_half (&str));
6915 break;
6916
6917 /* Register or expression. */
6918 case OP_RR_EXr: po_reg_or_goto (REG_TYPE_RN, EXPr); break;
6919 case OP_RR_EXi: po_reg_or_goto (REG_TYPE_RN, EXPi); break;
6920
6921 /* Register or immediate. */
6922 case OP_RRnpc_I0: po_reg_or_goto (REG_TYPE_RN, I0); break;
6923 I0: po_imm_or_fail (0, 0, FALSE); break;
6924
6925 case OP_RF_IF: po_reg_or_goto (REG_TYPE_FN, IF); break;
6926 IF:
6927 if (!is_immediate_prefix (*str))
6928 goto bad_args;
6929 str++;
6930 val = parse_fpa_immediate (&str);
6931 if (val == FAIL)
6932 goto failure;
6933 /* FPA immediates are encoded as registers 8-15.
6934 parse_fpa_immediate has already applied the offset. */
6935 inst.operands[i].reg = val;
6936 inst.operands[i].isreg = 1;
6937 break;
6938
6939 case OP_RIWR_I32z: po_reg_or_goto (REG_TYPE_MMXWR, I32z); break;
6940 I32z: po_imm_or_fail (0, 32, FALSE); break;
6941
6942 /* Two kinds of register. */
6943 case OP_RIWR_RIWC:
6944 {
6945 struct reg_entry *rege = arm_reg_parse_multi (&str);
6946 if (!rege
6947 || (rege->type != REG_TYPE_MMXWR
6948 && rege->type != REG_TYPE_MMXWC
6949 && rege->type != REG_TYPE_MMXWCG))
6950 {
6951 inst.error = _("iWMMXt data or control register expected");
6952 goto failure;
6953 }
6954 inst.operands[i].reg = rege->number;
6955 inst.operands[i].isreg = (rege->type == REG_TYPE_MMXWR);
6956 }
6957 break;
6958
6959 case OP_RIWC_RIWG:
6960 {
6961 struct reg_entry *rege = arm_reg_parse_multi (&str);
6962 if (!rege
6963 || (rege->type != REG_TYPE_MMXWC
6964 && rege->type != REG_TYPE_MMXWCG))
6965 {
6966 inst.error = _("iWMMXt control register expected");
6967 goto failure;
6968 }
6969 inst.operands[i].reg = rege->number;
6970 inst.operands[i].isreg = 1;
6971 }
6972 break;
6973
6974 /* Misc */
6975 case OP_CPSF: val = parse_cps_flags (&str); break;
6976 case OP_ENDI: val = parse_endian_specifier (&str); break;
6977 case OP_oROR: val = parse_ror (&str); break;
6978 case OP_COND: val = parse_cond (&str); break;
6979 case OP_oBARRIER_I15:
6980 po_barrier_or_imm (str); break;
6981 immediate:
6982 if (parse_immediate (&str, &val, 0, 15, TRUE) == FAIL)
6983 goto failure;
6984 break;
6985
6986 case OP_wPSR:
6987 case OP_rPSR:
6988 po_reg_or_goto (REG_TYPE_RNB, try_psr);
6989 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_virt))
6990 {
6991 inst.error = _("Banked registers are not available with this "
6992 "architecture.");
6993 goto failure;
6994 }
6995 break;
6996 try_psr:
6997 val = parse_psr (&str, op_parse_code == OP_wPSR);
6998 break;
6999
7000 case OP_APSR_RR:
7001 po_reg_or_goto (REG_TYPE_RN, try_apsr);
7002 break;
7003 try_apsr:
7004 /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
7005 instruction). */
7006 if (strncasecmp (str, "APSR_", 5) == 0)
7007 {
7008 unsigned found = 0;
7009 str += 5;
7010 while (found < 15)
7011 switch (*str++)
7012 {
7013 case 'c': found = (found & 1) ? 16 : found | 1; break;
7014 case 'n': found = (found & 2) ? 16 : found | 2; break;
7015 case 'z': found = (found & 4) ? 16 : found | 4; break;
7016 case 'v': found = (found & 8) ? 16 : found | 8; break;
7017 default: found = 16;
7018 }
7019 if (found != 15)
7020 goto failure;
7021 inst.operands[i].isvec = 1;
7022 /* APSR_nzcv is encoded in instructions as if it were the REG_PC. */
7023 inst.operands[i].reg = REG_PC;
7024 }
7025 else
7026 goto failure;
7027 break;
7028
7029 case OP_TB:
7030 po_misc_or_fail (parse_tb (&str));
7031 break;
7032
7033 /* Register lists. */
7034 case OP_REGLST:
7035 val = parse_reg_list (&str);
7036 if (*str == '^')
7037 {
7038 inst.operands[i].writeback = 1;
7039 str++;
7040 }
7041 break;
7042
7043 case OP_VRSLST:
7044 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_S);
7045 break;
7046
7047 case OP_VRDLST:
7048 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_D);
7049 break;
7050
7051 case OP_VRSDLST:
7052 /* Allow Q registers too. */
7053 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7054 REGLIST_NEON_D);
7055 if (val == FAIL)
7056 {
7057 inst.error = NULL;
7058 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7059 REGLIST_VFP_S);
7060 inst.operands[i].issingle = 1;
7061 }
7062 break;
7063
7064 case OP_NRDLST:
7065 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7066 REGLIST_NEON_D);
7067 break;
7068
7069 case OP_NSTRLST:
7070 val = parse_neon_el_struct_list (&str, &inst.operands[i].reg,
7071 &inst.operands[i].vectype);
7072 break;
7073
7074 /* Addressing modes */
7075 case OP_ADDR:
7076 po_misc_or_fail (parse_address (&str, i));
7077 break;
7078
7079 case OP_ADDRGLDR:
7080 po_misc_or_fail_no_backtrack (
7081 parse_address_group_reloc (&str, i, GROUP_LDR));
7082 break;
7083
7084 case OP_ADDRGLDRS:
7085 po_misc_or_fail_no_backtrack (
7086 parse_address_group_reloc (&str, i, GROUP_LDRS));
7087 break;
7088
7089 case OP_ADDRGLDC:
7090 po_misc_or_fail_no_backtrack (
7091 parse_address_group_reloc (&str, i, GROUP_LDC));
7092 break;
7093
7094 case OP_SH:
7095 po_misc_or_fail (parse_shifter_operand (&str, i));
7096 break;
7097
7098 case OP_SHG:
7099 po_misc_or_fail_no_backtrack (
7100 parse_shifter_operand_group_reloc (&str, i));
7101 break;
7102
7103 case OP_oSHll:
7104 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_IMMEDIATE));
7105 break;
7106
7107 case OP_oSHar:
7108 po_misc_or_fail (parse_shift (&str, i, SHIFT_ASR_IMMEDIATE));
7109 break;
7110
7111 case OP_oSHllar:
7112 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_OR_ASR_IMMEDIATE));
7113 break;
7114
7115 default:
7116 as_fatal (_("unhandled operand code %d"), op_parse_code);
7117 }
7118
7119 /* Various value-based sanity checks and shared operations. We
7120 do not signal immediate failures for the register constraints;
7121 this allows a syntax error to take precedence. */
7122 switch (op_parse_code)
7123 {
7124 case OP_oRRnpc:
7125 case OP_RRnpc:
7126 case OP_RRnpcb:
7127 case OP_RRw:
7128 case OP_oRRw:
7129 case OP_RRnpc_I0:
7130 if (inst.operands[i].isreg && inst.operands[i].reg == REG_PC)
7131 inst.error = BAD_PC;
7132 break;
7133
7134 case OP_oRRnpcsp:
7135 case OP_RRnpcsp:
7136 if (inst.operands[i].isreg)
7137 {
7138 if (inst.operands[i].reg == REG_PC)
7139 inst.error = BAD_PC;
7140 else if (inst.operands[i].reg == REG_SP)
7141 inst.error = BAD_SP;
7142 }
7143 break;
7144
7145 case OP_RRnpctw:
7146 if (inst.operands[i].isreg
7147 && inst.operands[i].reg == REG_PC
7148 && (inst.operands[i].writeback || thumb))
7149 inst.error = BAD_PC;
7150 break;
7151
7152 case OP_CPSF:
7153 case OP_ENDI:
7154 case OP_oROR:
7155 case OP_wPSR:
7156 case OP_rPSR:
7157 case OP_COND:
7158 case OP_oBARRIER_I15:
7159 case OP_REGLST:
7160 case OP_VRSLST:
7161 case OP_VRDLST:
7162 case OP_VRSDLST:
7163 case OP_NRDLST:
7164 case OP_NSTRLST:
7165 if (val == FAIL)
7166 goto failure;
7167 inst.operands[i].imm = val;
7168 break;
7169
7170 default:
7171 break;
7172 }
7173
7174 /* If we get here, this operand was successfully parsed. */
7175 inst.operands[i].present = 1;
7176 continue;
7177
7178 bad_args:
7179 inst.error = BAD_ARGS;
7180
7181 failure:
7182 if (!backtrack_pos)
7183 {
7184 /* The parse routine should already have set inst.error, but set a
7185 default here just in case. */
7186 if (!inst.error)
7187 inst.error = _("syntax error");
7188 return FAIL;
7189 }
7190
7191 /* Do not backtrack over a trailing optional argument that
7192 absorbed some text. We will only fail again, with the
7193 'garbage following instruction' error message, which is
7194 probably less helpful than the current one. */
7195 if (backtrack_index == i && backtrack_pos != str
7196 && upat[i+1] == OP_stop)
7197 {
7198 if (!inst.error)
7199 inst.error = _("syntax error");
7200 return FAIL;
7201 }
7202
7203 /* Try again, skipping the optional argument at backtrack_pos. */
7204 str = backtrack_pos;
7205 inst.error = backtrack_error;
7206 inst.operands[backtrack_index].present = 0;
7207 i = backtrack_index;
7208 backtrack_pos = 0;
7209 }
7210
7211 /* Check that we have parsed all the arguments. */
7212 if (*str != '\0' && !inst.error)
7213 inst.error = _("garbage following instruction");
7214
7215 return inst.error ? FAIL : SUCCESS;
7216 }
7217
7218 #undef po_char_or_fail
7219 #undef po_reg_or_fail
7220 #undef po_reg_or_goto
7221 #undef po_imm_or_fail
7222 #undef po_scalar_or_fail
7223 #undef po_barrier_or_imm
7224
7225 /* Shorthand macro for instruction encoding functions issuing errors. */
7226 #define constraint(expr, err) \
7227 do \
7228 { \
7229 if (expr) \
7230 { \
7231 inst.error = err; \
7232 return; \
7233 } \
7234 } \
7235 while (0)
7236
7237 /* Reject "bad registers" for Thumb-2 instructions. Many Thumb-2
7238 instructions are unpredictable if these registers are used. This
7239 is the BadReg predicate in ARM's Thumb-2 documentation. */
7240 #define reject_bad_reg(reg) \
7241 do \
7242 if (reg == REG_SP || reg == REG_PC) \
7243 { \
7244 inst.error = (reg == REG_SP) ? BAD_SP : BAD_PC; \
7245 return; \
7246 } \
7247 while (0)
7248
7249 /* If REG is R13 (the stack pointer), warn that its use is
7250 deprecated. */
7251 #define warn_deprecated_sp(reg) \
7252 do \
7253 if (warn_on_deprecated && reg == REG_SP) \
7254 as_tsktsk (_("use of r13 is deprecated")); \
7255 while (0)
7256
7257 /* Functions for operand encoding. ARM, then Thumb. */
7258
7259 #define rotate_left(v, n) (v << (n & 31) | v >> ((32 - n) & 31))
7260
7261 /* If VAL can be encoded in the immediate field of an ARM instruction,
7262 return the encoded form. Otherwise, return FAIL. */
7263
7264 static unsigned int
7265 encode_arm_immediate (unsigned int val)
7266 {
7267 unsigned int a, i;
7268
7269 for (i = 0; i < 32; i += 2)
7270 if ((a = rotate_left (val, i)) <= 0xff)
7271 return a | (i << 7); /* 12-bit pack: [shift-cnt,const]. */
7272
7273 return FAIL;
7274 }
7275
7276 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
7277 return the encoded form. Otherwise, return FAIL. */
7278 static unsigned int
7279 encode_thumb32_immediate (unsigned int val)
7280 {
7281 unsigned int a, i;
7282
7283 if (val <= 0xff)
7284 return val;
7285
7286 for (i = 1; i <= 24; i++)
7287 {
7288 a = val >> i;
7289 if ((val & ~(0xff << i)) == 0)
7290 return ((val >> i) & 0x7f) | ((32 - i) << 7);
7291 }
7292
7293 a = val & 0xff;
7294 if (val == ((a << 16) | a))
7295 return 0x100 | a;
7296 if (val == ((a << 24) | (a << 16) | (a << 8) | a))
7297 return 0x300 | a;
7298
7299 a = val & 0xff00;
7300 if (val == ((a << 16) | a))
7301 return 0x200 | (a >> 8);
7302
7303 return FAIL;
7304 }
7305 /* Encode a VFP SP or DP register number into inst.instruction. */
7306
7307 static void
7308 encode_arm_vfp_reg (int reg, enum vfp_reg_pos pos)
7309 {
7310 if ((pos == VFP_REG_Dd || pos == VFP_REG_Dn || pos == VFP_REG_Dm)
7311 && reg > 15)
7312 {
7313 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
7314 {
7315 if (thumb_mode)
7316 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
7317 fpu_vfp_ext_d32);
7318 else
7319 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
7320 fpu_vfp_ext_d32);
7321 }
7322 else
7323 {
7324 first_error (_("D register out of range for selected VFP version"));
7325 return;
7326 }
7327 }
7328
7329 switch (pos)
7330 {
7331 case VFP_REG_Sd:
7332 inst.instruction |= ((reg >> 1) << 12) | ((reg & 1) << 22);
7333 break;
7334
7335 case VFP_REG_Sn:
7336 inst.instruction |= ((reg >> 1) << 16) | ((reg & 1) << 7);
7337 break;
7338
7339 case VFP_REG_Sm:
7340 inst.instruction |= ((reg >> 1) << 0) | ((reg & 1) << 5);
7341 break;
7342
7343 case VFP_REG_Dd:
7344 inst.instruction |= ((reg & 15) << 12) | ((reg >> 4) << 22);
7345 break;
7346
7347 case VFP_REG_Dn:
7348 inst.instruction |= ((reg & 15) << 16) | ((reg >> 4) << 7);
7349 break;
7350
7351 case VFP_REG_Dm:
7352 inst.instruction |= (reg & 15) | ((reg >> 4) << 5);
7353 break;
7354
7355 default:
7356 abort ();
7357 }
7358 }
7359
7360 /* Encode a <shift> in an ARM-format instruction. The immediate,
7361 if any, is handled by md_apply_fix. */
7362 static void
7363 encode_arm_shift (int i)
7364 {
7365 if (inst.operands[i].shift_kind == SHIFT_RRX)
7366 inst.instruction |= SHIFT_ROR << 5;
7367 else
7368 {
7369 inst.instruction |= inst.operands[i].shift_kind << 5;
7370 if (inst.operands[i].immisreg)
7371 {
7372 inst.instruction |= SHIFT_BY_REG;
7373 inst.instruction |= inst.operands[i].imm << 8;
7374 }
7375 else
7376 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
7377 }
7378 }
7379
7380 static void
7381 encode_arm_shifter_operand (int i)
7382 {
7383 if (inst.operands[i].isreg)
7384 {
7385 inst.instruction |= inst.operands[i].reg;
7386 encode_arm_shift (i);
7387 }
7388 else
7389 {
7390 inst.instruction |= INST_IMMEDIATE;
7391 if (inst.reloc.type != BFD_RELOC_ARM_IMMEDIATE)
7392 inst.instruction |= inst.operands[i].imm;
7393 }
7394 }
7395
7396 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
7397 static void
7398 encode_arm_addr_mode_common (int i, bfd_boolean is_t)
7399 {
7400 /* PR 14260:
7401 Generate an error if the operand is not a register. */
7402 constraint (!inst.operands[i].isreg,
7403 _("Instruction does not support =N addresses"));
7404
7405 inst.instruction |= inst.operands[i].reg << 16;
7406
7407 if (inst.operands[i].preind)
7408 {
7409 if (is_t)
7410 {
7411 inst.error = _("instruction does not accept preindexed addressing");
7412 return;
7413 }
7414 inst.instruction |= PRE_INDEX;
7415 if (inst.operands[i].writeback)
7416 inst.instruction |= WRITE_BACK;
7417
7418 }
7419 else if (inst.operands[i].postind)
7420 {
7421 gas_assert (inst.operands[i].writeback);
7422 if (is_t)
7423 inst.instruction |= WRITE_BACK;
7424 }
7425 else /* unindexed - only for coprocessor */
7426 {
7427 inst.error = _("instruction does not accept unindexed addressing");
7428 return;
7429 }
7430
7431 if (((inst.instruction & WRITE_BACK) || !(inst.instruction & PRE_INDEX))
7432 && (((inst.instruction & 0x000f0000) >> 16)
7433 == ((inst.instruction & 0x0000f000) >> 12)))
7434 as_warn ((inst.instruction & LOAD_BIT)
7435 ? _("destination register same as write-back base")
7436 : _("source register same as write-back base"));
7437 }
7438
7439 /* inst.operands[i] was set up by parse_address. Encode it into an
7440 ARM-format mode 2 load or store instruction. If is_t is true,
7441 reject forms that cannot be used with a T instruction (i.e. not
7442 post-indexed). */
7443 static void
7444 encode_arm_addr_mode_2 (int i, bfd_boolean is_t)
7445 {
7446 const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
7447
7448 encode_arm_addr_mode_common (i, is_t);
7449
7450 if (inst.operands[i].immisreg)
7451 {
7452 constraint ((inst.operands[i].imm == REG_PC
7453 || (is_pc && inst.operands[i].writeback)),
7454 BAD_PC_ADDRESSING);
7455 inst.instruction |= INST_IMMEDIATE; /* yes, this is backwards */
7456 inst.instruction |= inst.operands[i].imm;
7457 if (!inst.operands[i].negative)
7458 inst.instruction |= INDEX_UP;
7459 if (inst.operands[i].shifted)
7460 {
7461 if (inst.operands[i].shift_kind == SHIFT_RRX)
7462 inst.instruction |= SHIFT_ROR << 5;
7463 else
7464 {
7465 inst.instruction |= inst.operands[i].shift_kind << 5;
7466 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
7467 }
7468 }
7469 }
7470 else /* immediate offset in inst.reloc */
7471 {
7472 if (is_pc && !inst.reloc.pc_rel)
7473 {
7474 const bfd_boolean is_load = ((inst.instruction & LOAD_BIT) != 0);
7475
7476 /* If is_t is TRUE, it's called from do_ldstt. ldrt/strt
7477 cannot use PC in addressing.
7478 PC cannot be used in writeback addressing, either. */
7479 constraint ((is_t || inst.operands[i].writeback),
7480 BAD_PC_ADDRESSING);
7481
7482 /* Use of PC in str is deprecated for ARMv7. */
7483 if (warn_on_deprecated
7484 && !is_load
7485 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7))
7486 as_tsktsk (_("use of PC in this instruction is deprecated"));
7487 }
7488
7489 if (inst.reloc.type == BFD_RELOC_UNUSED)
7490 {
7491 /* Prefer + for zero encoded value. */
7492 if (!inst.operands[i].negative)
7493 inst.instruction |= INDEX_UP;
7494 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM;
7495 }
7496 }
7497 }
7498
7499 /* inst.operands[i] was set up by parse_address. Encode it into an
7500 ARM-format mode 3 load or store instruction. Reject forms that
7501 cannot be used with such instructions. If is_t is true, reject
7502 forms that cannot be used with a T instruction (i.e. not
7503 post-indexed). */
7504 static void
7505 encode_arm_addr_mode_3 (int i, bfd_boolean is_t)
7506 {
7507 if (inst.operands[i].immisreg && inst.operands[i].shifted)
7508 {
7509 inst.error = _("instruction does not accept scaled register index");
7510 return;
7511 }
7512
7513 encode_arm_addr_mode_common (i, is_t);
7514
7515 if (inst.operands[i].immisreg)
7516 {
7517 constraint ((inst.operands[i].imm == REG_PC
7518 || (is_t && inst.operands[i].reg == REG_PC)),
7519 BAD_PC_ADDRESSING);
7520 constraint (inst.operands[i].reg == REG_PC && inst.operands[i].writeback,
7521 BAD_PC_WRITEBACK);
7522 inst.instruction |= inst.operands[i].imm;
7523 if (!inst.operands[i].negative)
7524 inst.instruction |= INDEX_UP;
7525 }
7526 else /* immediate offset in inst.reloc */
7527 {
7528 constraint ((inst.operands[i].reg == REG_PC && !inst.reloc.pc_rel
7529 && inst.operands[i].writeback),
7530 BAD_PC_WRITEBACK);
7531 inst.instruction |= HWOFFSET_IMM;
7532 if (inst.reloc.type == BFD_RELOC_UNUSED)
7533 {
7534 /* Prefer + for zero encoded value. */
7535 if (!inst.operands[i].negative)
7536 inst.instruction |= INDEX_UP;
7537
7538 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM8;
7539 }
7540 }
7541 }
7542
7543 /* Write immediate bits [7:0] to the following locations:
7544
7545 |28/24|23 19|18 16|15 4|3 0|
7546 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
7547
7548 This function is used by VMOV/VMVN/VORR/VBIC. */
7549
7550 static void
7551 neon_write_immbits (unsigned immbits)
7552 {
7553 inst.instruction |= immbits & 0xf;
7554 inst.instruction |= ((immbits >> 4) & 0x7) << 16;
7555 inst.instruction |= ((immbits >> 7) & 0x1) << (thumb_mode ? 28 : 24);
7556 }
7557
7558 /* Invert low-order SIZE bits of XHI:XLO. */
7559
7560 static void
7561 neon_invert_size (unsigned *xlo, unsigned *xhi, int size)
7562 {
7563 unsigned immlo = xlo ? *xlo : 0;
7564 unsigned immhi = xhi ? *xhi : 0;
7565
7566 switch (size)
7567 {
7568 case 8:
7569 immlo = (~immlo) & 0xff;
7570 break;
7571
7572 case 16:
7573 immlo = (~immlo) & 0xffff;
7574 break;
7575
7576 case 64:
7577 immhi = (~immhi) & 0xffffffff;
7578 /* fall through. */
7579
7580 case 32:
7581 immlo = (~immlo) & 0xffffffff;
7582 break;
7583
7584 default:
7585 abort ();
7586 }
7587
7588 if (xlo)
7589 *xlo = immlo;
7590
7591 if (xhi)
7592 *xhi = immhi;
7593 }
7594
7595 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
7596 A, B, C, D. */
7597
7598 static int
7599 neon_bits_same_in_bytes (unsigned imm)
7600 {
7601 return ((imm & 0x000000ff) == 0 || (imm & 0x000000ff) == 0x000000ff)
7602 && ((imm & 0x0000ff00) == 0 || (imm & 0x0000ff00) == 0x0000ff00)
7603 && ((imm & 0x00ff0000) == 0 || (imm & 0x00ff0000) == 0x00ff0000)
7604 && ((imm & 0xff000000) == 0 || (imm & 0xff000000) == 0xff000000);
7605 }
7606
7607 /* For immediate of above form, return 0bABCD. */
7608
7609 static unsigned
7610 neon_squash_bits (unsigned imm)
7611 {
7612 return (imm & 0x01) | ((imm & 0x0100) >> 7) | ((imm & 0x010000) >> 14)
7613 | ((imm & 0x01000000) >> 21);
7614 }
7615
7616 /* Compress quarter-float representation to 0b...000 abcdefgh. */
7617
7618 static unsigned
7619 neon_qfloat_bits (unsigned imm)
7620 {
7621 return ((imm >> 19) & 0x7f) | ((imm >> 24) & 0x80);
7622 }
7623
7624 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
7625 the instruction. *OP is passed as the initial value of the op field, and
7626 may be set to a different value depending on the constant (i.e.
7627 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
7628 MVN). If the immediate looks like a repeated pattern then also
7629 try smaller element sizes. */
7630
7631 static int
7632 neon_cmode_for_move_imm (unsigned immlo, unsigned immhi, int float_p,
7633 unsigned *immbits, int *op, int size,
7634 enum neon_el_type type)
7635 {
7636 /* Only permit float immediates (including 0.0/-0.0) if the operand type is
7637 float. */
7638 if (type == NT_float && !float_p)
7639 return FAIL;
7640
7641 if (type == NT_float && is_quarter_float (immlo) && immhi == 0)
7642 {
7643 if (size != 32 || *op == 1)
7644 return FAIL;
7645 *immbits = neon_qfloat_bits (immlo);
7646 return 0xf;
7647 }
7648
7649 if (size == 64)
7650 {
7651 if (neon_bits_same_in_bytes (immhi)
7652 && neon_bits_same_in_bytes (immlo))
7653 {
7654 if (*op == 1)
7655 return FAIL;
7656 *immbits = (neon_squash_bits (immhi) << 4)
7657 | neon_squash_bits (immlo);
7658 *op = 1;
7659 return 0xe;
7660 }
7661
7662 if (immhi != immlo)
7663 return FAIL;
7664 }
7665
7666 if (size >= 32)
7667 {
7668 if (immlo == (immlo & 0x000000ff))
7669 {
7670 *immbits = immlo;
7671 return 0x0;
7672 }
7673 else if (immlo == (immlo & 0x0000ff00))
7674 {
7675 *immbits = immlo >> 8;
7676 return 0x2;
7677 }
7678 else if (immlo == (immlo & 0x00ff0000))
7679 {
7680 *immbits = immlo >> 16;
7681 return 0x4;
7682 }
7683 else if (immlo == (immlo & 0xff000000))
7684 {
7685 *immbits = immlo >> 24;
7686 return 0x6;
7687 }
7688 else if (immlo == ((immlo & 0x0000ff00) | 0x000000ff))
7689 {
7690 *immbits = (immlo >> 8) & 0xff;
7691 return 0xc;
7692 }
7693 else if (immlo == ((immlo & 0x00ff0000) | 0x0000ffff))
7694 {
7695 *immbits = (immlo >> 16) & 0xff;
7696 return 0xd;
7697 }
7698
7699 if ((immlo & 0xffff) != (immlo >> 16))
7700 return FAIL;
7701 immlo &= 0xffff;
7702 }
7703
7704 if (size >= 16)
7705 {
7706 if (immlo == (immlo & 0x000000ff))
7707 {
7708 *immbits = immlo;
7709 return 0x8;
7710 }
7711 else if (immlo == (immlo & 0x0000ff00))
7712 {
7713 *immbits = immlo >> 8;
7714 return 0xa;
7715 }
7716
7717 if ((immlo & 0xff) != (immlo >> 8))
7718 return FAIL;
7719 immlo &= 0xff;
7720 }
7721
7722 if (immlo == (immlo & 0x000000ff))
7723 {
7724 /* Don't allow MVN with 8-bit immediate. */
7725 if (*op == 1)
7726 return FAIL;
7727 *immbits = immlo;
7728 return 0xe;
7729 }
7730
7731 return FAIL;
7732 }
7733
7734 #if defined BFD_HOST_64_BIT
7735 /* Returns TRUE if double precision value V may be cast
7736 to single precision without loss of accuracy. */
7737
7738 static bfd_boolean
7739 is_double_a_single (bfd_int64_t v)
7740 {
7741 int exp = (int)((v >> 52) & 0x7FF);
7742 bfd_int64_t mantissa = (v & (bfd_int64_t)0xFFFFFFFFFFFFFULL);
7743
7744 return (exp == 0 || exp == 0x7FF
7745 || (exp >= 1023 - 126 && exp <= 1023 + 127))
7746 && (mantissa & 0x1FFFFFFFl) == 0;
7747 }
7748
7749 /* Returns a double precision value casted to single precision
7750 (ignoring the least significant bits in exponent and mantissa). */
7751
7752 static int
7753 double_to_single (bfd_int64_t v)
7754 {
7755 int sign = (int) ((v >> 63) & 1l);
7756 int exp = (int) ((v >> 52) & 0x7FF);
7757 bfd_int64_t mantissa = (v & (bfd_int64_t)0xFFFFFFFFFFFFFULL);
7758
7759 if (exp == 0x7FF)
7760 exp = 0xFF;
7761 else
7762 {
7763 exp = exp - 1023 + 127;
7764 if (exp >= 0xFF)
7765 {
7766 /* Infinity. */
7767 exp = 0x7F;
7768 mantissa = 0;
7769 }
7770 else if (exp < 0)
7771 {
7772 /* No denormalized numbers. */
7773 exp = 0;
7774 mantissa = 0;
7775 }
7776 }
7777 mantissa >>= 29;
7778 return (sign << 31) | (exp << 23) | mantissa;
7779 }
7780 #endif /* BFD_HOST_64_BIT */
7781
7782 enum lit_type
7783 {
7784 CONST_THUMB,
7785 CONST_ARM,
7786 CONST_VEC
7787 };
7788
7789 static void do_vfp_nsyn_opcode (const char *);
7790
7791 /* inst.reloc.exp describes an "=expr" load pseudo-operation.
7792 Determine whether it can be performed with a move instruction; if
7793 it can, convert inst.instruction to that move instruction and
7794 return TRUE; if it can't, convert inst.instruction to a literal-pool
7795 load and return FALSE. If this is not a valid thing to do in the
7796 current context, set inst.error and return TRUE.
7797
7798 inst.operands[i] describes the destination register. */
7799
7800 static bfd_boolean
7801 move_or_literal_pool (int i, enum lit_type t, bfd_boolean mode_3)
7802 {
7803 unsigned long tbit;
7804 bfd_boolean thumb_p = (t == CONST_THUMB);
7805 bfd_boolean arm_p = (t == CONST_ARM);
7806
7807 if (thumb_p)
7808 tbit = (inst.instruction > 0xffff) ? THUMB2_LOAD_BIT : THUMB_LOAD_BIT;
7809 else
7810 tbit = LOAD_BIT;
7811
7812 if ((inst.instruction & tbit) == 0)
7813 {
7814 inst.error = _("invalid pseudo operation");
7815 return TRUE;
7816 }
7817
7818 if (inst.reloc.exp.X_op != O_constant
7819 && inst.reloc.exp.X_op != O_symbol
7820 && inst.reloc.exp.X_op != O_big)
7821 {
7822 inst.error = _("constant expression expected");
7823 return TRUE;
7824 }
7825
7826 if (inst.reloc.exp.X_op == O_constant
7827 || inst.reloc.exp.X_op == O_big)
7828 {
7829 #if defined BFD_HOST_64_BIT
7830 bfd_int64_t v;
7831 #else
7832 offsetT v;
7833 #endif
7834 if (inst.reloc.exp.X_op == O_big)
7835 {
7836 LITTLENUM_TYPE w[X_PRECISION];
7837 LITTLENUM_TYPE * l;
7838
7839 if (inst.reloc.exp.X_add_number == -1)
7840 {
7841 gen_to_words (w, X_PRECISION, E_PRECISION);
7842 l = w;
7843 /* FIXME: Should we check words w[2..5] ? */
7844 }
7845 else
7846 l = generic_bignum;
7847
7848 #if defined BFD_HOST_64_BIT
7849 v =
7850 ((((((((bfd_int64_t) l[3] & LITTLENUM_MASK)
7851 << LITTLENUM_NUMBER_OF_BITS)
7852 | ((bfd_int64_t) l[2] & LITTLENUM_MASK))
7853 << LITTLENUM_NUMBER_OF_BITS)
7854 | ((bfd_int64_t) l[1] & LITTLENUM_MASK))
7855 << LITTLENUM_NUMBER_OF_BITS)
7856 | ((bfd_int64_t) l[0] & LITTLENUM_MASK));
7857 #else
7858 v = ((l[1] & LITTLENUM_MASK) << LITTLENUM_NUMBER_OF_BITS)
7859 | (l[0] & LITTLENUM_MASK);
7860 #endif
7861 }
7862 else
7863 v = inst.reloc.exp.X_add_number;
7864
7865 if (!inst.operands[i].issingle)
7866 {
7867 if (thumb_p)
7868 {
7869 /* This can be encoded only for a low register. */
7870 if ((v & ~0xFF) == 0 && (inst.operands[i].reg < 8))
7871 {
7872 /* This can be done with a mov(1) instruction. */
7873 inst.instruction = T_OPCODE_MOV_I8 | (inst.operands[i].reg << 8);
7874 inst.instruction |= v;
7875 return TRUE;
7876 }
7877
7878 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2)
7879 || ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m))
7880 {
7881 /* Check if on thumb2 it can be done with a mov.w, mvn or
7882 movw instruction. */
7883 unsigned int newimm;
7884 bfd_boolean isNegated;
7885
7886 newimm = encode_thumb32_immediate (v);
7887 if (newimm != (unsigned int) FAIL)
7888 isNegated = FALSE;
7889 else
7890 {
7891 newimm = encode_thumb32_immediate (~v);
7892 if (newimm != (unsigned int) FAIL)
7893 isNegated = TRUE;
7894 }
7895
7896 /* The number can be loaded with a mov.w or mvn
7897 instruction. */
7898 if (newimm != (unsigned int) FAIL
7899 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2))
7900 {
7901 inst.instruction = (0xf04f0000 /* MOV.W. */
7902 | (inst.operands[i].reg << 8));
7903 /* Change to MOVN. */
7904 inst.instruction |= (isNegated ? 0x200000 : 0);
7905 inst.instruction |= (newimm & 0x800) << 15;
7906 inst.instruction |= (newimm & 0x700) << 4;
7907 inst.instruction |= (newimm & 0x0ff);
7908 return TRUE;
7909 }
7910 /* The number can be loaded with a movw instruction. */
7911 else if ((v & ~0xFFFF) == 0
7912 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m))
7913 {
7914 int imm = v & 0xFFFF;
7915
7916 inst.instruction = 0xf2400000; /* MOVW. */
7917 inst.instruction |= (inst.operands[i].reg << 8);
7918 inst.instruction |= (imm & 0xf000) << 4;
7919 inst.instruction |= (imm & 0x0800) << 15;
7920 inst.instruction |= (imm & 0x0700) << 4;
7921 inst.instruction |= (imm & 0x00ff);
7922 return TRUE;
7923 }
7924 }
7925 }
7926 else if (arm_p)
7927 {
7928 int value = encode_arm_immediate (v);
7929
7930 if (value != FAIL)
7931 {
7932 /* This can be done with a mov instruction. */
7933 inst.instruction &= LITERAL_MASK;
7934 inst.instruction |= INST_IMMEDIATE | (OPCODE_MOV << DATA_OP_SHIFT);
7935 inst.instruction |= value & 0xfff;
7936 return TRUE;
7937 }
7938
7939 value = encode_arm_immediate (~ v);
7940 if (value != FAIL)
7941 {
7942 /* This can be done with a mvn instruction. */
7943 inst.instruction &= LITERAL_MASK;
7944 inst.instruction |= INST_IMMEDIATE | (OPCODE_MVN << DATA_OP_SHIFT);
7945 inst.instruction |= value & 0xfff;
7946 return TRUE;
7947 }
7948 }
7949 else if (t == CONST_VEC)
7950 {
7951 int op = 0;
7952 unsigned immbits = 0;
7953 unsigned immlo = inst.operands[1].imm;
7954 unsigned immhi = inst.operands[1].regisimm
7955 ? inst.operands[1].reg
7956 : inst.reloc.exp.X_unsigned
7957 ? 0
7958 : ((bfd_int64_t)((int) immlo)) >> 32;
7959 int cmode = neon_cmode_for_move_imm (immlo, immhi, FALSE, &immbits,
7960 &op, 64, NT_invtype);
7961
7962 if (cmode == FAIL)
7963 {
7964 neon_invert_size (&immlo, &immhi, 64);
7965 op = !op;
7966 cmode = neon_cmode_for_move_imm (immlo, immhi, FALSE, &immbits,
7967 &op, 64, NT_invtype);
7968 }
7969
7970 if (cmode != FAIL)
7971 {
7972 inst.instruction = (inst.instruction & VLDR_VMOV_SAME)
7973 | (1 << 23)
7974 | (cmode << 8)
7975 | (op << 5)
7976 | (1 << 4);
7977
7978 /* Fill other bits in vmov encoding for both thumb and arm. */
7979 if (thumb_mode)
7980 inst.instruction |= (0x7U << 29) | (0xF << 24);
7981 else
7982 inst.instruction |= (0xFU << 28) | (0x1 << 25);
7983 neon_write_immbits (immbits);
7984 return TRUE;
7985 }
7986 }
7987 }
7988
7989 if (t == CONST_VEC)
7990 {
7991 /* Check if vldr Rx, =constant could be optimized to vmov Rx, #constant. */
7992 if (inst.operands[i].issingle
7993 && is_quarter_float (inst.operands[1].imm)
7994 && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3xd))
7995 {
7996 inst.operands[1].imm =
7997 neon_qfloat_bits (v);
7998 do_vfp_nsyn_opcode ("fconsts");
7999 return TRUE;
8000 }
8001
8002 /* If our host does not support a 64-bit type then we cannot perform
8003 the following optimization. This mean that there will be a
8004 discrepancy between the output produced by an assembler built for
8005 a 32-bit-only host and the output produced from a 64-bit host, but
8006 this cannot be helped. */
8007 #if defined BFD_HOST_64_BIT
8008 else if (!inst.operands[1].issingle
8009 && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3))
8010 {
8011 if (is_double_a_single (v)
8012 && is_quarter_float (double_to_single (v)))
8013 {
8014 inst.operands[1].imm =
8015 neon_qfloat_bits (double_to_single (v));
8016 do_vfp_nsyn_opcode ("fconstd");
8017 return TRUE;
8018 }
8019 }
8020 #endif
8021 }
8022 }
8023
8024 if (add_to_lit_pool ((!inst.operands[i].isvec
8025 || inst.operands[i].issingle) ? 4 : 8) == FAIL)
8026 return TRUE;
8027
8028 inst.operands[1].reg = REG_PC;
8029 inst.operands[1].isreg = 1;
8030 inst.operands[1].preind = 1;
8031 inst.reloc.pc_rel = 1;
8032 inst.reloc.type = (thumb_p
8033 ? BFD_RELOC_ARM_THUMB_OFFSET
8034 : (mode_3
8035 ? BFD_RELOC_ARM_HWLITERAL
8036 : BFD_RELOC_ARM_LITERAL));
8037 return FALSE;
8038 }
8039
8040 /* inst.operands[i] was set up by parse_address. Encode it into an
8041 ARM-format instruction. Reject all forms which cannot be encoded
8042 into a coprocessor load/store instruction. If wb_ok is false,
8043 reject use of writeback; if unind_ok is false, reject use of
8044 unindexed addressing. If reloc_override is not 0, use it instead
8045 of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
8046 (in which case it is preserved). */
8047
8048 static int
8049 encode_arm_cp_address (int i, int wb_ok, int unind_ok, int reloc_override)
8050 {
8051 if (!inst.operands[i].isreg)
8052 {
8053 /* PR 18256 */
8054 if (! inst.operands[0].isvec)
8055 {
8056 inst.error = _("invalid co-processor operand");
8057 return FAIL;
8058 }
8059 if (move_or_literal_pool (0, CONST_VEC, /*mode_3=*/FALSE))
8060 return SUCCESS;
8061 }
8062
8063 inst.instruction |= inst.operands[i].reg << 16;
8064
8065 gas_assert (!(inst.operands[i].preind && inst.operands[i].postind));
8066
8067 if (!inst.operands[i].preind && !inst.operands[i].postind) /* unindexed */
8068 {
8069 gas_assert (!inst.operands[i].writeback);
8070 if (!unind_ok)
8071 {
8072 inst.error = _("instruction does not support unindexed addressing");
8073 return FAIL;
8074 }
8075 inst.instruction |= inst.operands[i].imm;
8076 inst.instruction |= INDEX_UP;
8077 return SUCCESS;
8078 }
8079
8080 if (inst.operands[i].preind)
8081 inst.instruction |= PRE_INDEX;
8082
8083 if (inst.operands[i].writeback)
8084 {
8085 if (inst.operands[i].reg == REG_PC)
8086 {
8087 inst.error = _("pc may not be used with write-back");
8088 return FAIL;
8089 }
8090 if (!wb_ok)
8091 {
8092 inst.error = _("instruction does not support writeback");
8093 return FAIL;
8094 }
8095 inst.instruction |= WRITE_BACK;
8096 }
8097
8098 if (reloc_override)
8099 inst.reloc.type = (bfd_reloc_code_real_type) reloc_override;
8100 else if ((inst.reloc.type < BFD_RELOC_ARM_ALU_PC_G0_NC
8101 || inst.reloc.type > BFD_RELOC_ARM_LDC_SB_G2)
8102 && inst.reloc.type != BFD_RELOC_ARM_LDR_PC_G0)
8103 {
8104 if (thumb_mode)
8105 inst.reloc.type = BFD_RELOC_ARM_T32_CP_OFF_IMM;
8106 else
8107 inst.reloc.type = BFD_RELOC_ARM_CP_OFF_IMM;
8108 }
8109
8110 /* Prefer + for zero encoded value. */
8111 if (!inst.operands[i].negative)
8112 inst.instruction |= INDEX_UP;
8113
8114 return SUCCESS;
8115 }
8116
8117 /* Functions for instruction encoding, sorted by sub-architecture.
8118 First some generics; their names are taken from the conventional
8119 bit positions for register arguments in ARM format instructions. */
8120
8121 static void
8122 do_noargs (void)
8123 {
8124 }
8125
8126 static void
8127 do_rd (void)
8128 {
8129 inst.instruction |= inst.operands[0].reg << 12;
8130 }
8131
8132 static void
8133 do_rd_rm (void)
8134 {
8135 inst.instruction |= inst.operands[0].reg << 12;
8136 inst.instruction |= inst.operands[1].reg;
8137 }
8138
8139 static void
8140 do_rm_rn (void)
8141 {
8142 inst.instruction |= inst.operands[0].reg;
8143 inst.instruction |= inst.operands[1].reg << 16;
8144 }
8145
8146 static void
8147 do_rd_rn (void)
8148 {
8149 inst.instruction |= inst.operands[0].reg << 12;
8150 inst.instruction |= inst.operands[1].reg << 16;
8151 }
8152
8153 static void
8154 do_rn_rd (void)
8155 {
8156 inst.instruction |= inst.operands[0].reg << 16;
8157 inst.instruction |= inst.operands[1].reg << 12;
8158 }
8159
8160 static void
8161 do_tt (void)
8162 {
8163 inst.instruction |= inst.operands[0].reg << 8;
8164 inst.instruction |= inst.operands[1].reg << 16;
8165 }
8166
8167 static bfd_boolean
8168 check_obsolete (const arm_feature_set *feature, const char *msg)
8169 {
8170 if (ARM_CPU_IS_ANY (cpu_variant))
8171 {
8172 as_tsktsk ("%s", msg);
8173 return TRUE;
8174 }
8175 else if (ARM_CPU_HAS_FEATURE (cpu_variant, *feature))
8176 {
8177 as_bad ("%s", msg);
8178 return TRUE;
8179 }
8180
8181 return FALSE;
8182 }
8183
8184 static void
8185 do_rd_rm_rn (void)
8186 {
8187 unsigned Rn = inst.operands[2].reg;
8188 /* Enforce restrictions on SWP instruction. */
8189 if ((inst.instruction & 0x0fbfffff) == 0x01000090)
8190 {
8191 constraint (Rn == inst.operands[0].reg || Rn == inst.operands[1].reg,
8192 _("Rn must not overlap other operands"));
8193
8194 /* SWP{b} is obsolete for ARMv8-A, and deprecated for ARMv6* and ARMv7.
8195 */
8196 if (!check_obsolete (&arm_ext_v8,
8197 _("swp{b} use is obsoleted for ARMv8 and later"))
8198 && warn_on_deprecated
8199 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6))
8200 as_tsktsk (_("swp{b} use is deprecated for ARMv6 and ARMv7"));
8201 }
8202
8203 inst.instruction |= inst.operands[0].reg << 12;
8204 inst.instruction |= inst.operands[1].reg;
8205 inst.instruction |= Rn << 16;
8206 }
8207
8208 static void
8209 do_rd_rn_rm (void)
8210 {
8211 inst.instruction |= inst.operands[0].reg << 12;
8212 inst.instruction |= inst.operands[1].reg << 16;
8213 inst.instruction |= inst.operands[2].reg;
8214 }
8215
8216 static void
8217 do_rm_rd_rn (void)
8218 {
8219 constraint ((inst.operands[2].reg == REG_PC), BAD_PC);
8220 constraint (((inst.reloc.exp.X_op != O_constant
8221 && inst.reloc.exp.X_op != O_illegal)
8222 || inst.reloc.exp.X_add_number != 0),
8223 BAD_ADDR_MODE);
8224 inst.instruction |= inst.operands[0].reg;
8225 inst.instruction |= inst.operands[1].reg << 12;
8226 inst.instruction |= inst.operands[2].reg << 16;
8227 }
8228
8229 static void
8230 do_imm0 (void)
8231 {
8232 inst.instruction |= inst.operands[0].imm;
8233 }
8234
8235 static void
8236 do_rd_cpaddr (void)
8237 {
8238 inst.instruction |= inst.operands[0].reg << 12;
8239 encode_arm_cp_address (1, TRUE, TRUE, 0);
8240 }
8241
8242 /* ARM instructions, in alphabetical order by function name (except
8243 that wrapper functions appear immediately after the function they
8244 wrap). */
8245
8246 /* This is a pseudo-op of the form "adr rd, label" to be converted
8247 into a relative address of the form "add rd, pc, #label-.-8". */
8248
8249 static void
8250 do_adr (void)
8251 {
8252 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
8253
8254 /* Frag hacking will turn this into a sub instruction if the offset turns
8255 out to be negative. */
8256 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
8257 inst.reloc.pc_rel = 1;
8258 inst.reloc.exp.X_add_number -= 8;
8259 }
8260
8261 /* This is a pseudo-op of the form "adrl rd, label" to be converted
8262 into a relative address of the form:
8263 add rd, pc, #low(label-.-8)"
8264 add rd, rd, #high(label-.-8)" */
8265
8266 static void
8267 do_adrl (void)
8268 {
8269 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
8270
8271 /* Frag hacking will turn this into a sub instruction if the offset turns
8272 out to be negative. */
8273 inst.reloc.type = BFD_RELOC_ARM_ADRL_IMMEDIATE;
8274 inst.reloc.pc_rel = 1;
8275 inst.size = INSN_SIZE * 2;
8276 inst.reloc.exp.X_add_number -= 8;
8277 }
8278
8279 static void
8280 do_arit (void)
8281 {
8282 if (!inst.operands[1].present)
8283 inst.operands[1].reg = inst.operands[0].reg;
8284 inst.instruction |= inst.operands[0].reg << 12;
8285 inst.instruction |= inst.operands[1].reg << 16;
8286 encode_arm_shifter_operand (2);
8287 }
8288
8289 static void
8290 do_barrier (void)
8291 {
8292 if (inst.operands[0].present)
8293 inst.instruction |= inst.operands[0].imm;
8294 else
8295 inst.instruction |= 0xf;
8296 }
8297
8298 static void
8299 do_bfc (void)
8300 {
8301 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
8302 constraint (msb > 32, _("bit-field extends past end of register"));
8303 /* The instruction encoding stores the LSB and MSB,
8304 not the LSB and width. */
8305 inst.instruction |= inst.operands[0].reg << 12;
8306 inst.instruction |= inst.operands[1].imm << 7;
8307 inst.instruction |= (msb - 1) << 16;
8308 }
8309
8310 static void
8311 do_bfi (void)
8312 {
8313 unsigned int msb;
8314
8315 /* #0 in second position is alternative syntax for bfc, which is
8316 the same instruction but with REG_PC in the Rm field. */
8317 if (!inst.operands[1].isreg)
8318 inst.operands[1].reg = REG_PC;
8319
8320 msb = inst.operands[2].imm + inst.operands[3].imm;
8321 constraint (msb > 32, _("bit-field extends past end of register"));
8322 /* The instruction encoding stores the LSB and MSB,
8323 not the LSB and width. */
8324 inst.instruction |= inst.operands[0].reg << 12;
8325 inst.instruction |= inst.operands[1].reg;
8326 inst.instruction |= inst.operands[2].imm << 7;
8327 inst.instruction |= (msb - 1) << 16;
8328 }
8329
8330 static void
8331 do_bfx (void)
8332 {
8333 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
8334 _("bit-field extends past end of register"));
8335 inst.instruction |= inst.operands[0].reg << 12;
8336 inst.instruction |= inst.operands[1].reg;
8337 inst.instruction |= inst.operands[2].imm << 7;
8338 inst.instruction |= (inst.operands[3].imm - 1) << 16;
8339 }
8340
8341 /* ARM V5 breakpoint instruction (argument parse)
8342 BKPT <16 bit unsigned immediate>
8343 Instruction is not conditional.
8344 The bit pattern given in insns[] has the COND_ALWAYS condition,
8345 and it is an error if the caller tried to override that. */
8346
8347 static void
8348 do_bkpt (void)
8349 {
8350 /* Top 12 of 16 bits to bits 19:8. */
8351 inst.instruction |= (inst.operands[0].imm & 0xfff0) << 4;
8352
8353 /* Bottom 4 of 16 bits to bits 3:0. */
8354 inst.instruction |= inst.operands[0].imm & 0xf;
8355 }
8356
8357 static void
8358 encode_branch (int default_reloc)
8359 {
8360 if (inst.operands[0].hasreloc)
8361 {
8362 constraint (inst.operands[0].imm != BFD_RELOC_ARM_PLT32
8363 && inst.operands[0].imm != BFD_RELOC_ARM_TLS_CALL,
8364 _("the only valid suffixes here are '(plt)' and '(tlscall)'"));
8365 inst.reloc.type = inst.operands[0].imm == BFD_RELOC_ARM_PLT32
8366 ? BFD_RELOC_ARM_PLT32
8367 : thumb_mode ? BFD_RELOC_ARM_THM_TLS_CALL : BFD_RELOC_ARM_TLS_CALL;
8368 }
8369 else
8370 inst.reloc.type = (bfd_reloc_code_real_type) default_reloc;
8371 inst.reloc.pc_rel = 1;
8372 }
8373
8374 static void
8375 do_branch (void)
8376 {
8377 #ifdef OBJ_ELF
8378 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
8379 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
8380 else
8381 #endif
8382 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
8383 }
8384
8385 static void
8386 do_bl (void)
8387 {
8388 #ifdef OBJ_ELF
8389 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
8390 {
8391 if (inst.cond == COND_ALWAYS)
8392 encode_branch (BFD_RELOC_ARM_PCREL_CALL);
8393 else
8394 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
8395 }
8396 else
8397 #endif
8398 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
8399 }
8400
8401 /* ARM V5 branch-link-exchange instruction (argument parse)
8402 BLX <target_addr> ie BLX(1)
8403 BLX{<condition>} <Rm> ie BLX(2)
8404 Unfortunately, there are two different opcodes for this mnemonic.
8405 So, the insns[].value is not used, and the code here zaps values
8406 into inst.instruction.
8407 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
8408
8409 static void
8410 do_blx (void)
8411 {
8412 if (inst.operands[0].isreg)
8413 {
8414 /* Arg is a register; the opcode provided by insns[] is correct.
8415 It is not illegal to do "blx pc", just useless. */
8416 if (inst.operands[0].reg == REG_PC)
8417 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
8418
8419 inst.instruction |= inst.operands[0].reg;
8420 }
8421 else
8422 {
8423 /* Arg is an address; this instruction cannot be executed
8424 conditionally, and the opcode must be adjusted.
8425 We retain the BFD_RELOC_ARM_PCREL_BLX till the very end
8426 where we generate out a BFD_RELOC_ARM_PCREL_CALL instead. */
8427 constraint (inst.cond != COND_ALWAYS, BAD_COND);
8428 inst.instruction = 0xfa000000;
8429 encode_branch (BFD_RELOC_ARM_PCREL_BLX);
8430 }
8431 }
8432
8433 static void
8434 do_bx (void)
8435 {
8436 bfd_boolean want_reloc;
8437
8438 if (inst.operands[0].reg == REG_PC)
8439 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
8440
8441 inst.instruction |= inst.operands[0].reg;
8442 /* Output R_ARM_V4BX relocations if is an EABI object that looks like
8443 it is for ARMv4t or earlier. */
8444 want_reloc = !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5);
8445 if (object_arch && !ARM_CPU_HAS_FEATURE (*object_arch, arm_ext_v5))
8446 want_reloc = TRUE;
8447
8448 #ifdef OBJ_ELF
8449 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
8450 #endif
8451 want_reloc = FALSE;
8452
8453 if (want_reloc)
8454 inst.reloc.type = BFD_RELOC_ARM_V4BX;
8455 }
8456
8457
8458 /* ARM v5TEJ. Jump to Jazelle code. */
8459
8460 static void
8461 do_bxj (void)
8462 {
8463 if (inst.operands[0].reg == REG_PC)
8464 as_tsktsk (_("use of r15 in bxj is not really useful"));
8465
8466 inst.instruction |= inst.operands[0].reg;
8467 }
8468
8469 /* Co-processor data operation:
8470 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
8471 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
8472 static void
8473 do_cdp (void)
8474 {
8475 inst.instruction |= inst.operands[0].reg << 8;
8476 inst.instruction |= inst.operands[1].imm << 20;
8477 inst.instruction |= inst.operands[2].reg << 12;
8478 inst.instruction |= inst.operands[3].reg << 16;
8479 inst.instruction |= inst.operands[4].reg;
8480 inst.instruction |= inst.operands[5].imm << 5;
8481 }
8482
8483 static void
8484 do_cmp (void)
8485 {
8486 inst.instruction |= inst.operands[0].reg << 16;
8487 encode_arm_shifter_operand (1);
8488 }
8489
8490 /* Transfer between coprocessor and ARM registers.
8491 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
8492 MRC2
8493 MCR{cond}
8494 MCR2
8495
8496 No special properties. */
8497
8498 struct deprecated_coproc_regs_s
8499 {
8500 unsigned cp;
8501 int opc1;
8502 unsigned crn;
8503 unsigned crm;
8504 int opc2;
8505 arm_feature_set deprecated;
8506 arm_feature_set obsoleted;
8507 const char *dep_msg;
8508 const char *obs_msg;
8509 };
8510
8511 #define DEPR_ACCESS_V8 \
8512 N_("This coprocessor register access is deprecated in ARMv8")
8513
8514 /* Table of all deprecated coprocessor registers. */
8515 static struct deprecated_coproc_regs_s deprecated_coproc_regs[] =
8516 {
8517 {15, 0, 7, 10, 5, /* CP15DMB. */
8518 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8519 DEPR_ACCESS_V8, NULL},
8520 {15, 0, 7, 10, 4, /* CP15DSB. */
8521 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8522 DEPR_ACCESS_V8, NULL},
8523 {15, 0, 7, 5, 4, /* CP15ISB. */
8524 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8525 DEPR_ACCESS_V8, NULL},
8526 {14, 6, 1, 0, 0, /* TEEHBR. */
8527 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8528 DEPR_ACCESS_V8, NULL},
8529 {14, 6, 0, 0, 0, /* TEECR. */
8530 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8531 DEPR_ACCESS_V8, NULL},
8532 };
8533
8534 #undef DEPR_ACCESS_V8
8535
8536 static const size_t deprecated_coproc_reg_count =
8537 sizeof (deprecated_coproc_regs) / sizeof (deprecated_coproc_regs[0]);
8538
8539 static void
8540 do_co_reg (void)
8541 {
8542 unsigned Rd;
8543 size_t i;
8544
8545 Rd = inst.operands[2].reg;
8546 if (thumb_mode)
8547 {
8548 if (inst.instruction == 0xee000010
8549 || inst.instruction == 0xfe000010)
8550 /* MCR, MCR2 */
8551 reject_bad_reg (Rd);
8552 else
8553 /* MRC, MRC2 */
8554 constraint (Rd == REG_SP, BAD_SP);
8555 }
8556 else
8557 {
8558 /* MCR */
8559 if (inst.instruction == 0xe000010)
8560 constraint (Rd == REG_PC, BAD_PC);
8561 }
8562
8563 for (i = 0; i < deprecated_coproc_reg_count; ++i)
8564 {
8565 const struct deprecated_coproc_regs_s *r =
8566 deprecated_coproc_regs + i;
8567
8568 if (inst.operands[0].reg == r->cp
8569 && inst.operands[1].imm == r->opc1
8570 && inst.operands[3].reg == r->crn
8571 && inst.operands[4].reg == r->crm
8572 && inst.operands[5].imm == r->opc2)
8573 {
8574 if (! ARM_CPU_IS_ANY (cpu_variant)
8575 && warn_on_deprecated
8576 && ARM_CPU_HAS_FEATURE (cpu_variant, r->deprecated))
8577 as_tsktsk ("%s", r->dep_msg);
8578 }
8579 }
8580
8581 inst.instruction |= inst.operands[0].reg << 8;
8582 inst.instruction |= inst.operands[1].imm << 21;
8583 inst.instruction |= Rd << 12;
8584 inst.instruction |= inst.operands[3].reg << 16;
8585 inst.instruction |= inst.operands[4].reg;
8586 inst.instruction |= inst.operands[5].imm << 5;
8587 }
8588
8589 /* Transfer between coprocessor register and pair of ARM registers.
8590 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
8591 MCRR2
8592 MRRC{cond}
8593 MRRC2
8594
8595 Two XScale instructions are special cases of these:
8596
8597 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
8598 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
8599
8600 Result unpredictable if Rd or Rn is R15. */
8601
8602 static void
8603 do_co_reg2c (void)
8604 {
8605 unsigned Rd, Rn;
8606
8607 Rd = inst.operands[2].reg;
8608 Rn = inst.operands[3].reg;
8609
8610 if (thumb_mode)
8611 {
8612 reject_bad_reg (Rd);
8613 reject_bad_reg (Rn);
8614 }
8615 else
8616 {
8617 constraint (Rd == REG_PC, BAD_PC);
8618 constraint (Rn == REG_PC, BAD_PC);
8619 }
8620
8621 inst.instruction |= inst.operands[0].reg << 8;
8622 inst.instruction |= inst.operands[1].imm << 4;
8623 inst.instruction |= Rd << 12;
8624 inst.instruction |= Rn << 16;
8625 inst.instruction |= inst.operands[4].reg;
8626 }
8627
8628 static void
8629 do_cpsi (void)
8630 {
8631 inst.instruction |= inst.operands[0].imm << 6;
8632 if (inst.operands[1].present)
8633 {
8634 inst.instruction |= CPSI_MMOD;
8635 inst.instruction |= inst.operands[1].imm;
8636 }
8637 }
8638
8639 static void
8640 do_dbg (void)
8641 {
8642 inst.instruction |= inst.operands[0].imm;
8643 }
8644
8645 static void
8646 do_div (void)
8647 {
8648 unsigned Rd, Rn, Rm;
8649
8650 Rd = inst.operands[0].reg;
8651 Rn = (inst.operands[1].present
8652 ? inst.operands[1].reg : Rd);
8653 Rm = inst.operands[2].reg;
8654
8655 constraint ((Rd == REG_PC), BAD_PC);
8656 constraint ((Rn == REG_PC), BAD_PC);
8657 constraint ((Rm == REG_PC), BAD_PC);
8658
8659 inst.instruction |= Rd << 16;
8660 inst.instruction |= Rn << 0;
8661 inst.instruction |= Rm << 8;
8662 }
8663
8664 static void
8665 do_it (void)
8666 {
8667 /* There is no IT instruction in ARM mode. We
8668 process it to do the validation as if in
8669 thumb mode, just in case the code gets
8670 assembled for thumb using the unified syntax. */
8671
8672 inst.size = 0;
8673 if (unified_syntax)
8674 {
8675 set_it_insn_type (IT_INSN);
8676 now_it.mask = (inst.instruction & 0xf) | 0x10;
8677 now_it.cc = inst.operands[0].imm;
8678 }
8679 }
8680
8681 /* If there is only one register in the register list,
8682 then return its register number. Otherwise return -1. */
8683 static int
8684 only_one_reg_in_list (int range)
8685 {
8686 int i = ffs (range) - 1;
8687 return (i > 15 || range != (1 << i)) ? -1 : i;
8688 }
8689
8690 static void
8691 encode_ldmstm(int from_push_pop_mnem)
8692 {
8693 int base_reg = inst.operands[0].reg;
8694 int range = inst.operands[1].imm;
8695 int one_reg;
8696
8697 inst.instruction |= base_reg << 16;
8698 inst.instruction |= range;
8699
8700 if (inst.operands[1].writeback)
8701 inst.instruction |= LDM_TYPE_2_OR_3;
8702
8703 if (inst.operands[0].writeback)
8704 {
8705 inst.instruction |= WRITE_BACK;
8706 /* Check for unpredictable uses of writeback. */
8707 if (inst.instruction & LOAD_BIT)
8708 {
8709 /* Not allowed in LDM type 2. */
8710 if ((inst.instruction & LDM_TYPE_2_OR_3)
8711 && ((range & (1 << REG_PC)) == 0))
8712 as_warn (_("writeback of base register is UNPREDICTABLE"));
8713 /* Only allowed if base reg not in list for other types. */
8714 else if (range & (1 << base_reg))
8715 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
8716 }
8717 else /* STM. */
8718 {
8719 /* Not allowed for type 2. */
8720 if (inst.instruction & LDM_TYPE_2_OR_3)
8721 as_warn (_("writeback of base register is UNPREDICTABLE"));
8722 /* Only allowed if base reg not in list, or first in list. */
8723 else if ((range & (1 << base_reg))
8724 && (range & ((1 << base_reg) - 1)))
8725 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
8726 }
8727 }
8728
8729 /* If PUSH/POP has only one register, then use the A2 encoding. */
8730 one_reg = only_one_reg_in_list (range);
8731 if (from_push_pop_mnem && one_reg >= 0)
8732 {
8733 int is_push = (inst.instruction & A_PUSH_POP_OP_MASK) == A1_OPCODE_PUSH;
8734
8735 inst.instruction &= A_COND_MASK;
8736 inst.instruction |= is_push ? A2_OPCODE_PUSH : A2_OPCODE_POP;
8737 inst.instruction |= one_reg << 12;
8738 }
8739 }
8740
8741 static void
8742 do_ldmstm (void)
8743 {
8744 encode_ldmstm (/*from_push_pop_mnem=*/FALSE);
8745 }
8746
8747 /* ARMv5TE load-consecutive (argument parse)
8748 Mode is like LDRH.
8749
8750 LDRccD R, mode
8751 STRccD R, mode. */
8752
8753 static void
8754 do_ldrd (void)
8755 {
8756 constraint (inst.operands[0].reg % 2 != 0,
8757 _("first transfer register must be even"));
8758 constraint (inst.operands[1].present
8759 && inst.operands[1].reg != inst.operands[0].reg + 1,
8760 _("can only transfer two consecutive registers"));
8761 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
8762 constraint (!inst.operands[2].isreg, _("'[' expected"));
8763
8764 if (!inst.operands[1].present)
8765 inst.operands[1].reg = inst.operands[0].reg + 1;
8766
8767 /* encode_arm_addr_mode_3 will diagnose overlap between the base
8768 register and the first register written; we have to diagnose
8769 overlap between the base and the second register written here. */
8770
8771 if (inst.operands[2].reg == inst.operands[1].reg
8772 && (inst.operands[2].writeback || inst.operands[2].postind))
8773 as_warn (_("base register written back, and overlaps "
8774 "second transfer register"));
8775
8776 if (!(inst.instruction & V4_STR_BIT))
8777 {
8778 /* For an index-register load, the index register must not overlap the
8779 destination (even if not write-back). */
8780 if (inst.operands[2].immisreg
8781 && ((unsigned) inst.operands[2].imm == inst.operands[0].reg
8782 || (unsigned) inst.operands[2].imm == inst.operands[1].reg))
8783 as_warn (_("index register overlaps transfer register"));
8784 }
8785 inst.instruction |= inst.operands[0].reg << 12;
8786 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE);
8787 }
8788
8789 static void
8790 do_ldrex (void)
8791 {
8792 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
8793 || inst.operands[1].postind || inst.operands[1].writeback
8794 || inst.operands[1].immisreg || inst.operands[1].shifted
8795 || inst.operands[1].negative
8796 /* This can arise if the programmer has written
8797 strex rN, rM, foo
8798 or if they have mistakenly used a register name as the last
8799 operand, eg:
8800 strex rN, rM, rX
8801 It is very difficult to distinguish between these two cases
8802 because "rX" might actually be a label. ie the register
8803 name has been occluded by a symbol of the same name. So we
8804 just generate a general 'bad addressing mode' type error
8805 message and leave it up to the programmer to discover the
8806 true cause and fix their mistake. */
8807 || (inst.operands[1].reg == REG_PC),
8808 BAD_ADDR_MODE);
8809
8810 constraint (inst.reloc.exp.X_op != O_constant
8811 || inst.reloc.exp.X_add_number != 0,
8812 _("offset must be zero in ARM encoding"));
8813
8814 constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
8815
8816 inst.instruction |= inst.operands[0].reg << 12;
8817 inst.instruction |= inst.operands[1].reg << 16;
8818 inst.reloc.type = BFD_RELOC_UNUSED;
8819 }
8820
8821 static void
8822 do_ldrexd (void)
8823 {
8824 constraint (inst.operands[0].reg % 2 != 0,
8825 _("even register required"));
8826 constraint (inst.operands[1].present
8827 && inst.operands[1].reg != inst.operands[0].reg + 1,
8828 _("can only load two consecutive registers"));
8829 /* If op 1 were present and equal to PC, this function wouldn't
8830 have been called in the first place. */
8831 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
8832
8833 inst.instruction |= inst.operands[0].reg << 12;
8834 inst.instruction |= inst.operands[2].reg << 16;
8835 }
8836
8837 /* In both ARM and thumb state 'ldr pc, #imm' with an immediate
8838 which is not a multiple of four is UNPREDICTABLE. */
8839 static void
8840 check_ldr_r15_aligned (void)
8841 {
8842 constraint (!(inst.operands[1].immisreg)
8843 && (inst.operands[0].reg == REG_PC
8844 && inst.operands[1].reg == REG_PC
8845 && (inst.reloc.exp.X_add_number & 0x3)),
8846 _("ldr to register 15 must be 4-byte alligned"));
8847 }
8848
8849 static void
8850 do_ldst (void)
8851 {
8852 inst.instruction |= inst.operands[0].reg << 12;
8853 if (!inst.operands[1].isreg)
8854 if (move_or_literal_pool (0, CONST_ARM, /*mode_3=*/FALSE))
8855 return;
8856 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE);
8857 check_ldr_r15_aligned ();
8858 }
8859
8860 static void
8861 do_ldstt (void)
8862 {
8863 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
8864 reject [Rn,...]. */
8865 if (inst.operands[1].preind)
8866 {
8867 constraint (inst.reloc.exp.X_op != O_constant
8868 || inst.reloc.exp.X_add_number != 0,
8869 _("this instruction requires a post-indexed address"));
8870
8871 inst.operands[1].preind = 0;
8872 inst.operands[1].postind = 1;
8873 inst.operands[1].writeback = 1;
8874 }
8875 inst.instruction |= inst.operands[0].reg << 12;
8876 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE);
8877 }
8878
8879 /* Halfword and signed-byte load/store operations. */
8880
8881 static void
8882 do_ldstv4 (void)
8883 {
8884 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
8885 inst.instruction |= inst.operands[0].reg << 12;
8886 if (!inst.operands[1].isreg)
8887 if (move_or_literal_pool (0, CONST_ARM, /*mode_3=*/TRUE))
8888 return;
8889 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE);
8890 }
8891
8892 static void
8893 do_ldsttv4 (void)
8894 {
8895 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
8896 reject [Rn,...]. */
8897 if (inst.operands[1].preind)
8898 {
8899 constraint (inst.reloc.exp.X_op != O_constant
8900 || inst.reloc.exp.X_add_number != 0,
8901 _("this instruction requires a post-indexed address"));
8902
8903 inst.operands[1].preind = 0;
8904 inst.operands[1].postind = 1;
8905 inst.operands[1].writeback = 1;
8906 }
8907 inst.instruction |= inst.operands[0].reg << 12;
8908 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE);
8909 }
8910
8911 /* Co-processor register load/store.
8912 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
8913 static void
8914 do_lstc (void)
8915 {
8916 inst.instruction |= inst.operands[0].reg << 8;
8917 inst.instruction |= inst.operands[1].reg << 12;
8918 encode_arm_cp_address (2, TRUE, TRUE, 0);
8919 }
8920
8921 static void
8922 do_mlas (void)
8923 {
8924 /* This restriction does not apply to mls (nor to mla in v6 or later). */
8925 if (inst.operands[0].reg == inst.operands[1].reg
8926 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6)
8927 && !(inst.instruction & 0x00400000))
8928 as_tsktsk (_("Rd and Rm should be different in mla"));
8929
8930 inst.instruction |= inst.operands[0].reg << 16;
8931 inst.instruction |= inst.operands[1].reg;
8932 inst.instruction |= inst.operands[2].reg << 8;
8933 inst.instruction |= inst.operands[3].reg << 12;
8934 }
8935
8936 static void
8937 do_mov (void)
8938 {
8939 inst.instruction |= inst.operands[0].reg << 12;
8940 encode_arm_shifter_operand (1);
8941 }
8942
8943 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
8944 static void
8945 do_mov16 (void)
8946 {
8947 bfd_vma imm;
8948 bfd_boolean top;
8949
8950 top = (inst.instruction & 0x00400000) != 0;
8951 constraint (top && inst.reloc.type == BFD_RELOC_ARM_MOVW,
8952 _(":lower16: not allowed this instruction"));
8953 constraint (!top && inst.reloc.type == BFD_RELOC_ARM_MOVT,
8954 _(":upper16: not allowed instruction"));
8955 inst.instruction |= inst.operands[0].reg << 12;
8956 if (inst.reloc.type == BFD_RELOC_UNUSED)
8957 {
8958 imm = inst.reloc.exp.X_add_number;
8959 /* The value is in two pieces: 0:11, 16:19. */
8960 inst.instruction |= (imm & 0x00000fff);
8961 inst.instruction |= (imm & 0x0000f000) << 4;
8962 }
8963 }
8964
8965 static int
8966 do_vfp_nsyn_mrs (void)
8967 {
8968 if (inst.operands[0].isvec)
8969 {
8970 if (inst.operands[1].reg != 1)
8971 first_error (_("operand 1 must be FPSCR"));
8972 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
8973 memset (&inst.operands[1], '\0', sizeof (inst.operands[1]));
8974 do_vfp_nsyn_opcode ("fmstat");
8975 }
8976 else if (inst.operands[1].isvec)
8977 do_vfp_nsyn_opcode ("fmrx");
8978 else
8979 return FAIL;
8980
8981 return SUCCESS;
8982 }
8983
8984 static int
8985 do_vfp_nsyn_msr (void)
8986 {
8987 if (inst.operands[0].isvec)
8988 do_vfp_nsyn_opcode ("fmxr");
8989 else
8990 return FAIL;
8991
8992 return SUCCESS;
8993 }
8994
8995 static void
8996 do_vmrs (void)
8997 {
8998 unsigned Rt = inst.operands[0].reg;
8999
9000 if (thumb_mode && Rt == REG_SP)
9001 {
9002 inst.error = BAD_SP;
9003 return;
9004 }
9005
9006 /* APSR_ sets isvec. All other refs to PC are illegal. */
9007 if (!inst.operands[0].isvec && Rt == REG_PC)
9008 {
9009 inst.error = BAD_PC;
9010 return;
9011 }
9012
9013 /* If we get through parsing the register name, we just insert the number
9014 generated into the instruction without further validation. */
9015 inst.instruction |= (inst.operands[1].reg << 16);
9016 inst.instruction |= (Rt << 12);
9017 }
9018
9019 static void
9020 do_vmsr (void)
9021 {
9022 unsigned Rt = inst.operands[1].reg;
9023
9024 if (thumb_mode)
9025 reject_bad_reg (Rt);
9026 else if (Rt == REG_PC)
9027 {
9028 inst.error = BAD_PC;
9029 return;
9030 }
9031
9032 /* If we get through parsing the register name, we just insert the number
9033 generated into the instruction without further validation. */
9034 inst.instruction |= (inst.operands[0].reg << 16);
9035 inst.instruction |= (Rt << 12);
9036 }
9037
9038 static void
9039 do_mrs (void)
9040 {
9041 unsigned br;
9042
9043 if (do_vfp_nsyn_mrs () == SUCCESS)
9044 return;
9045
9046 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
9047 inst.instruction |= inst.operands[0].reg << 12;
9048
9049 if (inst.operands[1].isreg)
9050 {
9051 br = inst.operands[1].reg;
9052 if (((br & 0x200) == 0) && ((br & 0xf0000) != 0xf000))
9053 as_bad (_("bad register for mrs"));
9054 }
9055 else
9056 {
9057 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
9058 constraint ((inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f))
9059 != (PSR_c|PSR_f),
9060 _("'APSR', 'CPSR' or 'SPSR' expected"));
9061 br = (15<<16) | (inst.operands[1].imm & SPSR_BIT);
9062 }
9063
9064 inst.instruction |= br;
9065 }
9066
9067 /* Two possible forms:
9068 "{C|S}PSR_<field>, Rm",
9069 "{C|S}PSR_f, #expression". */
9070
9071 static void
9072 do_msr (void)
9073 {
9074 if (do_vfp_nsyn_msr () == SUCCESS)
9075 return;
9076
9077 inst.instruction |= inst.operands[0].imm;
9078 if (inst.operands[1].isreg)
9079 inst.instruction |= inst.operands[1].reg;
9080 else
9081 {
9082 inst.instruction |= INST_IMMEDIATE;
9083 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
9084 inst.reloc.pc_rel = 0;
9085 }
9086 }
9087
9088 static void
9089 do_mul (void)
9090 {
9091 constraint (inst.operands[2].reg == REG_PC, BAD_PC);
9092
9093 if (!inst.operands[2].present)
9094 inst.operands[2].reg = inst.operands[0].reg;
9095 inst.instruction |= inst.operands[0].reg << 16;
9096 inst.instruction |= inst.operands[1].reg;
9097 inst.instruction |= inst.operands[2].reg << 8;
9098
9099 if (inst.operands[0].reg == inst.operands[1].reg
9100 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
9101 as_tsktsk (_("Rd and Rm should be different in mul"));
9102 }
9103
9104 /* Long Multiply Parser
9105 UMULL RdLo, RdHi, Rm, Rs
9106 SMULL RdLo, RdHi, Rm, Rs
9107 UMLAL RdLo, RdHi, Rm, Rs
9108 SMLAL RdLo, RdHi, Rm, Rs. */
9109
9110 static void
9111 do_mull (void)
9112 {
9113 inst.instruction |= inst.operands[0].reg << 12;
9114 inst.instruction |= inst.operands[1].reg << 16;
9115 inst.instruction |= inst.operands[2].reg;
9116 inst.instruction |= inst.operands[3].reg << 8;
9117
9118 /* rdhi and rdlo must be different. */
9119 if (inst.operands[0].reg == inst.operands[1].reg)
9120 as_tsktsk (_("rdhi and rdlo must be different"));
9121
9122 /* rdhi, rdlo and rm must all be different before armv6. */
9123 if ((inst.operands[0].reg == inst.operands[2].reg
9124 || inst.operands[1].reg == inst.operands[2].reg)
9125 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
9126 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
9127 }
9128
9129 static void
9130 do_nop (void)
9131 {
9132 if (inst.operands[0].present
9133 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6k))
9134 {
9135 /* Architectural NOP hints are CPSR sets with no bits selected. */
9136 inst.instruction &= 0xf0000000;
9137 inst.instruction |= 0x0320f000;
9138 if (inst.operands[0].present)
9139 inst.instruction |= inst.operands[0].imm;
9140 }
9141 }
9142
9143 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
9144 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
9145 Condition defaults to COND_ALWAYS.
9146 Error if Rd, Rn or Rm are R15. */
9147
9148 static void
9149 do_pkhbt (void)
9150 {
9151 inst.instruction |= inst.operands[0].reg << 12;
9152 inst.instruction |= inst.operands[1].reg << 16;
9153 inst.instruction |= inst.operands[2].reg;
9154 if (inst.operands[3].present)
9155 encode_arm_shift (3);
9156 }
9157
9158 /* ARM V6 PKHTB (Argument Parse). */
9159
9160 static void
9161 do_pkhtb (void)
9162 {
9163 if (!inst.operands[3].present)
9164 {
9165 /* If the shift specifier is omitted, turn the instruction
9166 into pkhbt rd, rm, rn. */
9167 inst.instruction &= 0xfff00010;
9168 inst.instruction |= inst.operands[0].reg << 12;
9169 inst.instruction |= inst.operands[1].reg;
9170 inst.instruction |= inst.operands[2].reg << 16;
9171 }
9172 else
9173 {
9174 inst.instruction |= inst.operands[0].reg << 12;
9175 inst.instruction |= inst.operands[1].reg << 16;
9176 inst.instruction |= inst.operands[2].reg;
9177 encode_arm_shift (3);
9178 }
9179 }
9180
9181 /* ARMv5TE: Preload-Cache
9182 MP Extensions: Preload for write
9183
9184 PLD(W) <addr_mode>
9185
9186 Syntactically, like LDR with B=1, W=0, L=1. */
9187
9188 static void
9189 do_pld (void)
9190 {
9191 constraint (!inst.operands[0].isreg,
9192 _("'[' expected after PLD mnemonic"));
9193 constraint (inst.operands[0].postind,
9194 _("post-indexed expression used in preload instruction"));
9195 constraint (inst.operands[0].writeback,
9196 _("writeback used in preload instruction"));
9197 constraint (!inst.operands[0].preind,
9198 _("unindexed addressing used in preload instruction"));
9199 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
9200 }
9201
9202 /* ARMv7: PLI <addr_mode> */
9203 static void
9204 do_pli (void)
9205 {
9206 constraint (!inst.operands[0].isreg,
9207 _("'[' expected after PLI mnemonic"));
9208 constraint (inst.operands[0].postind,
9209 _("post-indexed expression used in preload instruction"));
9210 constraint (inst.operands[0].writeback,
9211 _("writeback used in preload instruction"));
9212 constraint (!inst.operands[0].preind,
9213 _("unindexed addressing used in preload instruction"));
9214 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
9215 inst.instruction &= ~PRE_INDEX;
9216 }
9217
9218 static void
9219 do_push_pop (void)
9220 {
9221 constraint (inst.operands[0].writeback,
9222 _("push/pop do not support {reglist}^"));
9223 inst.operands[1] = inst.operands[0];
9224 memset (&inst.operands[0], 0, sizeof inst.operands[0]);
9225 inst.operands[0].isreg = 1;
9226 inst.operands[0].writeback = 1;
9227 inst.operands[0].reg = REG_SP;
9228 encode_ldmstm (/*from_push_pop_mnem=*/TRUE);
9229 }
9230
9231 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
9232 word at the specified address and the following word
9233 respectively.
9234 Unconditionally executed.
9235 Error if Rn is R15. */
9236
9237 static void
9238 do_rfe (void)
9239 {
9240 inst.instruction |= inst.operands[0].reg << 16;
9241 if (inst.operands[0].writeback)
9242 inst.instruction |= WRITE_BACK;
9243 }
9244
9245 /* ARM V6 ssat (argument parse). */
9246
9247 static void
9248 do_ssat (void)
9249 {
9250 inst.instruction |= inst.operands[0].reg << 12;
9251 inst.instruction |= (inst.operands[1].imm - 1) << 16;
9252 inst.instruction |= inst.operands[2].reg;
9253
9254 if (inst.operands[3].present)
9255 encode_arm_shift (3);
9256 }
9257
9258 /* ARM V6 usat (argument parse). */
9259
9260 static void
9261 do_usat (void)
9262 {
9263 inst.instruction |= inst.operands[0].reg << 12;
9264 inst.instruction |= inst.operands[1].imm << 16;
9265 inst.instruction |= inst.operands[2].reg;
9266
9267 if (inst.operands[3].present)
9268 encode_arm_shift (3);
9269 }
9270
9271 /* ARM V6 ssat16 (argument parse). */
9272
9273 static void
9274 do_ssat16 (void)
9275 {
9276 inst.instruction |= inst.operands[0].reg << 12;
9277 inst.instruction |= ((inst.operands[1].imm - 1) << 16);
9278 inst.instruction |= inst.operands[2].reg;
9279 }
9280
9281 static void
9282 do_usat16 (void)
9283 {
9284 inst.instruction |= inst.operands[0].reg << 12;
9285 inst.instruction |= inst.operands[1].imm << 16;
9286 inst.instruction |= inst.operands[2].reg;
9287 }
9288
9289 /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
9290 preserving the other bits.
9291
9292 setend <endian_specifier>, where <endian_specifier> is either
9293 BE or LE. */
9294
9295 static void
9296 do_setend (void)
9297 {
9298 if (warn_on_deprecated
9299 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
9300 as_tsktsk (_("setend use is deprecated for ARMv8"));
9301
9302 if (inst.operands[0].imm)
9303 inst.instruction |= 0x200;
9304 }
9305
9306 static void
9307 do_shift (void)
9308 {
9309 unsigned int Rm = (inst.operands[1].present
9310 ? inst.operands[1].reg
9311 : inst.operands[0].reg);
9312
9313 inst.instruction |= inst.operands[0].reg << 12;
9314 inst.instruction |= Rm;
9315 if (inst.operands[2].isreg) /* Rd, {Rm,} Rs */
9316 {
9317 inst.instruction |= inst.operands[2].reg << 8;
9318 inst.instruction |= SHIFT_BY_REG;
9319 /* PR 12854: Error on extraneous shifts. */
9320 constraint (inst.operands[2].shifted,
9321 _("extraneous shift as part of operand to shift insn"));
9322 }
9323 else
9324 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
9325 }
9326
9327 static void
9328 do_smc (void)
9329 {
9330 inst.reloc.type = BFD_RELOC_ARM_SMC;
9331 inst.reloc.pc_rel = 0;
9332 }
9333
9334 static void
9335 do_hvc (void)
9336 {
9337 inst.reloc.type = BFD_RELOC_ARM_HVC;
9338 inst.reloc.pc_rel = 0;
9339 }
9340
9341 static void
9342 do_swi (void)
9343 {
9344 inst.reloc.type = BFD_RELOC_ARM_SWI;
9345 inst.reloc.pc_rel = 0;
9346 }
9347
9348 static void
9349 do_setpan (void)
9350 {
9351 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_pan),
9352 _("selected processor does not support SETPAN instruction"));
9353
9354 inst.instruction |= ((inst.operands[0].imm & 1) << 9);
9355 }
9356
9357 static void
9358 do_t_setpan (void)
9359 {
9360 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_pan),
9361 _("selected processor does not support SETPAN instruction"));
9362
9363 inst.instruction |= (inst.operands[0].imm << 3);
9364 }
9365
9366 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
9367 SMLAxy{cond} Rd,Rm,Rs,Rn
9368 SMLAWy{cond} Rd,Rm,Rs,Rn
9369 Error if any register is R15. */
9370
9371 static void
9372 do_smla (void)
9373 {
9374 inst.instruction |= inst.operands[0].reg << 16;
9375 inst.instruction |= inst.operands[1].reg;
9376 inst.instruction |= inst.operands[2].reg << 8;
9377 inst.instruction |= inst.operands[3].reg << 12;
9378 }
9379
9380 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
9381 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
9382 Error if any register is R15.
9383 Warning if Rdlo == Rdhi. */
9384
9385 static void
9386 do_smlal (void)
9387 {
9388 inst.instruction |= inst.operands[0].reg << 12;
9389 inst.instruction |= inst.operands[1].reg << 16;
9390 inst.instruction |= inst.operands[2].reg;
9391 inst.instruction |= inst.operands[3].reg << 8;
9392
9393 if (inst.operands[0].reg == inst.operands[1].reg)
9394 as_tsktsk (_("rdhi and rdlo must be different"));
9395 }
9396
9397 /* ARM V5E (El Segundo) signed-multiply (argument parse)
9398 SMULxy{cond} Rd,Rm,Rs
9399 Error if any register is R15. */
9400
9401 static void
9402 do_smul (void)
9403 {
9404 inst.instruction |= inst.operands[0].reg << 16;
9405 inst.instruction |= inst.operands[1].reg;
9406 inst.instruction |= inst.operands[2].reg << 8;
9407 }
9408
9409 /* ARM V6 srs (argument parse). The variable fields in the encoding are
9410 the same for both ARM and Thumb-2. */
9411
9412 static void
9413 do_srs (void)
9414 {
9415 int reg;
9416
9417 if (inst.operands[0].present)
9418 {
9419 reg = inst.operands[0].reg;
9420 constraint (reg != REG_SP, _("SRS base register must be r13"));
9421 }
9422 else
9423 reg = REG_SP;
9424
9425 inst.instruction |= reg << 16;
9426 inst.instruction |= inst.operands[1].imm;
9427 if (inst.operands[0].writeback || inst.operands[1].writeback)
9428 inst.instruction |= WRITE_BACK;
9429 }
9430
9431 /* ARM V6 strex (argument parse). */
9432
9433 static void
9434 do_strex (void)
9435 {
9436 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
9437 || inst.operands[2].postind || inst.operands[2].writeback
9438 || inst.operands[2].immisreg || inst.operands[2].shifted
9439 || inst.operands[2].negative
9440 /* See comment in do_ldrex(). */
9441 || (inst.operands[2].reg == REG_PC),
9442 BAD_ADDR_MODE);
9443
9444 constraint (inst.operands[0].reg == inst.operands[1].reg
9445 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9446
9447 constraint (inst.reloc.exp.X_op != O_constant
9448 || inst.reloc.exp.X_add_number != 0,
9449 _("offset must be zero in ARM encoding"));
9450
9451 inst.instruction |= inst.operands[0].reg << 12;
9452 inst.instruction |= inst.operands[1].reg;
9453 inst.instruction |= inst.operands[2].reg << 16;
9454 inst.reloc.type = BFD_RELOC_UNUSED;
9455 }
9456
9457 static void
9458 do_t_strexbh (void)
9459 {
9460 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
9461 || inst.operands[2].postind || inst.operands[2].writeback
9462 || inst.operands[2].immisreg || inst.operands[2].shifted
9463 || inst.operands[2].negative,
9464 BAD_ADDR_MODE);
9465
9466 constraint (inst.operands[0].reg == inst.operands[1].reg
9467 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9468
9469 do_rm_rd_rn ();
9470 }
9471
9472 static void
9473 do_strexd (void)
9474 {
9475 constraint (inst.operands[1].reg % 2 != 0,
9476 _("even register required"));
9477 constraint (inst.operands[2].present
9478 && inst.operands[2].reg != inst.operands[1].reg + 1,
9479 _("can only store two consecutive registers"));
9480 /* If op 2 were present and equal to PC, this function wouldn't
9481 have been called in the first place. */
9482 constraint (inst.operands[1].reg == REG_LR, _("r14 not allowed here"));
9483
9484 constraint (inst.operands[0].reg == inst.operands[1].reg
9485 || inst.operands[0].reg == inst.operands[1].reg + 1
9486 || inst.operands[0].reg == inst.operands[3].reg,
9487 BAD_OVERLAP);
9488
9489 inst.instruction |= inst.operands[0].reg << 12;
9490 inst.instruction |= inst.operands[1].reg;
9491 inst.instruction |= inst.operands[3].reg << 16;
9492 }
9493
9494 /* ARM V8 STRL. */
9495 static void
9496 do_stlex (void)
9497 {
9498 constraint (inst.operands[0].reg == inst.operands[1].reg
9499 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9500
9501 do_rd_rm_rn ();
9502 }
9503
9504 static void
9505 do_t_stlex (void)
9506 {
9507 constraint (inst.operands[0].reg == inst.operands[1].reg
9508 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9509
9510 do_rm_rd_rn ();
9511 }
9512
9513 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
9514 extends it to 32-bits, and adds the result to a value in another
9515 register. You can specify a rotation by 0, 8, 16, or 24 bits
9516 before extracting the 16-bit value.
9517 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
9518 Condition defaults to COND_ALWAYS.
9519 Error if any register uses R15. */
9520
9521 static void
9522 do_sxtah (void)
9523 {
9524 inst.instruction |= inst.operands[0].reg << 12;
9525 inst.instruction |= inst.operands[1].reg << 16;
9526 inst.instruction |= inst.operands[2].reg;
9527 inst.instruction |= inst.operands[3].imm << 10;
9528 }
9529
9530 /* ARM V6 SXTH.
9531
9532 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
9533 Condition defaults to COND_ALWAYS.
9534 Error if any register uses R15. */
9535
9536 static void
9537 do_sxth (void)
9538 {
9539 inst.instruction |= inst.operands[0].reg << 12;
9540 inst.instruction |= inst.operands[1].reg;
9541 inst.instruction |= inst.operands[2].imm << 10;
9542 }
9543 \f
9544 /* VFP instructions. In a logical order: SP variant first, monad
9545 before dyad, arithmetic then move then load/store. */
9546
9547 static void
9548 do_vfp_sp_monadic (void)
9549 {
9550 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9551 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
9552 }
9553
9554 static void
9555 do_vfp_sp_dyadic (void)
9556 {
9557 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9558 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
9559 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
9560 }
9561
9562 static void
9563 do_vfp_sp_compare_z (void)
9564 {
9565 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9566 }
9567
9568 static void
9569 do_vfp_dp_sp_cvt (void)
9570 {
9571 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9572 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
9573 }
9574
9575 static void
9576 do_vfp_sp_dp_cvt (void)
9577 {
9578 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9579 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
9580 }
9581
9582 static void
9583 do_vfp_reg_from_sp (void)
9584 {
9585 inst.instruction |= inst.operands[0].reg << 12;
9586 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
9587 }
9588
9589 static void
9590 do_vfp_reg2_from_sp2 (void)
9591 {
9592 constraint (inst.operands[2].imm != 2,
9593 _("only two consecutive VFP SP registers allowed here"));
9594 inst.instruction |= inst.operands[0].reg << 12;
9595 inst.instruction |= inst.operands[1].reg << 16;
9596 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
9597 }
9598
9599 static void
9600 do_vfp_sp_from_reg (void)
9601 {
9602 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sn);
9603 inst.instruction |= inst.operands[1].reg << 12;
9604 }
9605
9606 static void
9607 do_vfp_sp2_from_reg2 (void)
9608 {
9609 constraint (inst.operands[0].imm != 2,
9610 _("only two consecutive VFP SP registers allowed here"));
9611 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sm);
9612 inst.instruction |= inst.operands[1].reg << 12;
9613 inst.instruction |= inst.operands[2].reg << 16;
9614 }
9615
9616 static void
9617 do_vfp_sp_ldst (void)
9618 {
9619 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9620 encode_arm_cp_address (1, FALSE, TRUE, 0);
9621 }
9622
9623 static void
9624 do_vfp_dp_ldst (void)
9625 {
9626 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9627 encode_arm_cp_address (1, FALSE, TRUE, 0);
9628 }
9629
9630
9631 static void
9632 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type)
9633 {
9634 if (inst.operands[0].writeback)
9635 inst.instruction |= WRITE_BACK;
9636 else
9637 constraint (ldstm_type != VFP_LDSTMIA,
9638 _("this addressing mode requires base-register writeback"));
9639 inst.instruction |= inst.operands[0].reg << 16;
9640 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sd);
9641 inst.instruction |= inst.operands[1].imm;
9642 }
9643
9644 static void
9645 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type)
9646 {
9647 int count;
9648
9649 if (inst.operands[0].writeback)
9650 inst.instruction |= WRITE_BACK;
9651 else
9652 constraint (ldstm_type != VFP_LDSTMIA && ldstm_type != VFP_LDSTMIAX,
9653 _("this addressing mode requires base-register writeback"));
9654
9655 inst.instruction |= inst.operands[0].reg << 16;
9656 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
9657
9658 count = inst.operands[1].imm << 1;
9659 if (ldstm_type == VFP_LDSTMIAX || ldstm_type == VFP_LDSTMDBX)
9660 count += 1;
9661
9662 inst.instruction |= count;
9663 }
9664
9665 static void
9666 do_vfp_sp_ldstmia (void)
9667 {
9668 vfp_sp_ldstm (VFP_LDSTMIA);
9669 }
9670
9671 static void
9672 do_vfp_sp_ldstmdb (void)
9673 {
9674 vfp_sp_ldstm (VFP_LDSTMDB);
9675 }
9676
9677 static void
9678 do_vfp_dp_ldstmia (void)
9679 {
9680 vfp_dp_ldstm (VFP_LDSTMIA);
9681 }
9682
9683 static void
9684 do_vfp_dp_ldstmdb (void)
9685 {
9686 vfp_dp_ldstm (VFP_LDSTMDB);
9687 }
9688
9689 static void
9690 do_vfp_xp_ldstmia (void)
9691 {
9692 vfp_dp_ldstm (VFP_LDSTMIAX);
9693 }
9694
9695 static void
9696 do_vfp_xp_ldstmdb (void)
9697 {
9698 vfp_dp_ldstm (VFP_LDSTMDBX);
9699 }
9700
9701 static void
9702 do_vfp_dp_rd_rm (void)
9703 {
9704 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9705 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
9706 }
9707
9708 static void
9709 do_vfp_dp_rn_rd (void)
9710 {
9711 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dn);
9712 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
9713 }
9714
9715 static void
9716 do_vfp_dp_rd_rn (void)
9717 {
9718 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9719 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
9720 }
9721
9722 static void
9723 do_vfp_dp_rd_rn_rm (void)
9724 {
9725 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9726 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
9727 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dm);
9728 }
9729
9730 static void
9731 do_vfp_dp_rd (void)
9732 {
9733 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9734 }
9735
9736 static void
9737 do_vfp_dp_rm_rd_rn (void)
9738 {
9739 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dm);
9740 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
9741 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dn);
9742 }
9743
9744 /* VFPv3 instructions. */
9745 static void
9746 do_vfp_sp_const (void)
9747 {
9748 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9749 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
9750 inst.instruction |= (inst.operands[1].imm & 0x0f);
9751 }
9752
9753 static void
9754 do_vfp_dp_const (void)
9755 {
9756 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9757 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
9758 inst.instruction |= (inst.operands[1].imm & 0x0f);
9759 }
9760
9761 static void
9762 vfp_conv (int srcsize)
9763 {
9764 int immbits = srcsize - inst.operands[1].imm;
9765
9766 if (srcsize == 16 && !(immbits >= 0 && immbits <= srcsize))
9767 {
9768 /* If srcsize is 16, inst.operands[1].imm must be in the range 0-16.
9769 i.e. immbits must be in range 0 - 16. */
9770 inst.error = _("immediate value out of range, expected range [0, 16]");
9771 return;
9772 }
9773 else if (srcsize == 32 && !(immbits >= 0 && immbits < srcsize))
9774 {
9775 /* If srcsize is 32, inst.operands[1].imm must be in the range 1-32.
9776 i.e. immbits must be in range 0 - 31. */
9777 inst.error = _("immediate value out of range, expected range [1, 32]");
9778 return;
9779 }
9780
9781 inst.instruction |= (immbits & 1) << 5;
9782 inst.instruction |= (immbits >> 1);
9783 }
9784
9785 static void
9786 do_vfp_sp_conv_16 (void)
9787 {
9788 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9789 vfp_conv (16);
9790 }
9791
9792 static void
9793 do_vfp_dp_conv_16 (void)
9794 {
9795 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9796 vfp_conv (16);
9797 }
9798
9799 static void
9800 do_vfp_sp_conv_32 (void)
9801 {
9802 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9803 vfp_conv (32);
9804 }
9805
9806 static void
9807 do_vfp_dp_conv_32 (void)
9808 {
9809 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9810 vfp_conv (32);
9811 }
9812 \f
9813 /* FPA instructions. Also in a logical order. */
9814
9815 static void
9816 do_fpa_cmp (void)
9817 {
9818 inst.instruction |= inst.operands[0].reg << 16;
9819 inst.instruction |= inst.operands[1].reg;
9820 }
9821
9822 static void
9823 do_fpa_ldmstm (void)
9824 {
9825 inst.instruction |= inst.operands[0].reg << 12;
9826 switch (inst.operands[1].imm)
9827 {
9828 case 1: inst.instruction |= CP_T_X; break;
9829 case 2: inst.instruction |= CP_T_Y; break;
9830 case 3: inst.instruction |= CP_T_Y | CP_T_X; break;
9831 case 4: break;
9832 default: abort ();
9833 }
9834
9835 if (inst.instruction & (PRE_INDEX | INDEX_UP))
9836 {
9837 /* The instruction specified "ea" or "fd", so we can only accept
9838 [Rn]{!}. The instruction does not really support stacking or
9839 unstacking, so we have to emulate these by setting appropriate
9840 bits and offsets. */
9841 constraint (inst.reloc.exp.X_op != O_constant
9842 || inst.reloc.exp.X_add_number != 0,
9843 _("this instruction does not support indexing"));
9844
9845 if ((inst.instruction & PRE_INDEX) || inst.operands[2].writeback)
9846 inst.reloc.exp.X_add_number = 12 * inst.operands[1].imm;
9847
9848 if (!(inst.instruction & INDEX_UP))
9849 inst.reloc.exp.X_add_number = -inst.reloc.exp.X_add_number;
9850
9851 if (!(inst.instruction & PRE_INDEX) && inst.operands[2].writeback)
9852 {
9853 inst.operands[2].preind = 0;
9854 inst.operands[2].postind = 1;
9855 }
9856 }
9857
9858 encode_arm_cp_address (2, TRUE, TRUE, 0);
9859 }
9860 \f
9861 /* iWMMXt instructions: strictly in alphabetical order. */
9862
9863 static void
9864 do_iwmmxt_tandorc (void)
9865 {
9866 constraint (inst.operands[0].reg != REG_PC, _("only r15 allowed here"));
9867 }
9868
9869 static void
9870 do_iwmmxt_textrc (void)
9871 {
9872 inst.instruction |= inst.operands[0].reg << 12;
9873 inst.instruction |= inst.operands[1].imm;
9874 }
9875
9876 static void
9877 do_iwmmxt_textrm (void)
9878 {
9879 inst.instruction |= inst.operands[0].reg << 12;
9880 inst.instruction |= inst.operands[1].reg << 16;
9881 inst.instruction |= inst.operands[2].imm;
9882 }
9883
9884 static void
9885 do_iwmmxt_tinsr (void)
9886 {
9887 inst.instruction |= inst.operands[0].reg << 16;
9888 inst.instruction |= inst.operands[1].reg << 12;
9889 inst.instruction |= inst.operands[2].imm;
9890 }
9891
9892 static void
9893 do_iwmmxt_tmia (void)
9894 {
9895 inst.instruction |= inst.operands[0].reg << 5;
9896 inst.instruction |= inst.operands[1].reg;
9897 inst.instruction |= inst.operands[2].reg << 12;
9898 }
9899
9900 static void
9901 do_iwmmxt_waligni (void)
9902 {
9903 inst.instruction |= inst.operands[0].reg << 12;
9904 inst.instruction |= inst.operands[1].reg << 16;
9905 inst.instruction |= inst.operands[2].reg;
9906 inst.instruction |= inst.operands[3].imm << 20;
9907 }
9908
9909 static void
9910 do_iwmmxt_wmerge (void)
9911 {
9912 inst.instruction |= inst.operands[0].reg << 12;
9913 inst.instruction |= inst.operands[1].reg << 16;
9914 inst.instruction |= inst.operands[2].reg;
9915 inst.instruction |= inst.operands[3].imm << 21;
9916 }
9917
9918 static void
9919 do_iwmmxt_wmov (void)
9920 {
9921 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
9922 inst.instruction |= inst.operands[0].reg << 12;
9923 inst.instruction |= inst.operands[1].reg << 16;
9924 inst.instruction |= inst.operands[1].reg;
9925 }
9926
9927 static void
9928 do_iwmmxt_wldstbh (void)
9929 {
9930 int reloc;
9931 inst.instruction |= inst.operands[0].reg << 12;
9932 if (thumb_mode)
9933 reloc = BFD_RELOC_ARM_T32_CP_OFF_IMM_S2;
9934 else
9935 reloc = BFD_RELOC_ARM_CP_OFF_IMM_S2;
9936 encode_arm_cp_address (1, TRUE, FALSE, reloc);
9937 }
9938
9939 static void
9940 do_iwmmxt_wldstw (void)
9941 {
9942 /* RIWR_RIWC clears .isreg for a control register. */
9943 if (!inst.operands[0].isreg)
9944 {
9945 constraint (inst.cond != COND_ALWAYS, BAD_COND);
9946 inst.instruction |= 0xf0000000;
9947 }
9948
9949 inst.instruction |= inst.operands[0].reg << 12;
9950 encode_arm_cp_address (1, TRUE, TRUE, 0);
9951 }
9952
9953 static void
9954 do_iwmmxt_wldstd (void)
9955 {
9956 inst.instruction |= inst.operands[0].reg << 12;
9957 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2)
9958 && inst.operands[1].immisreg)
9959 {
9960 inst.instruction &= ~0x1a000ff;
9961 inst.instruction |= (0xfU << 28);
9962 if (inst.operands[1].preind)
9963 inst.instruction |= PRE_INDEX;
9964 if (!inst.operands[1].negative)
9965 inst.instruction |= INDEX_UP;
9966 if (inst.operands[1].writeback)
9967 inst.instruction |= WRITE_BACK;
9968 inst.instruction |= inst.operands[1].reg << 16;
9969 inst.instruction |= inst.reloc.exp.X_add_number << 4;
9970 inst.instruction |= inst.operands[1].imm;
9971 }
9972 else
9973 encode_arm_cp_address (1, TRUE, FALSE, 0);
9974 }
9975
9976 static void
9977 do_iwmmxt_wshufh (void)
9978 {
9979 inst.instruction |= inst.operands[0].reg << 12;
9980 inst.instruction |= inst.operands[1].reg << 16;
9981 inst.instruction |= ((inst.operands[2].imm & 0xf0) << 16);
9982 inst.instruction |= (inst.operands[2].imm & 0x0f);
9983 }
9984
9985 static void
9986 do_iwmmxt_wzero (void)
9987 {
9988 /* WZERO reg is an alias for WANDN reg, reg, reg. */
9989 inst.instruction |= inst.operands[0].reg;
9990 inst.instruction |= inst.operands[0].reg << 12;
9991 inst.instruction |= inst.operands[0].reg << 16;
9992 }
9993
9994 static void
9995 do_iwmmxt_wrwrwr_or_imm5 (void)
9996 {
9997 if (inst.operands[2].isreg)
9998 do_rd_rn_rm ();
9999 else {
10000 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2),
10001 _("immediate operand requires iWMMXt2"));
10002 do_rd_rn ();
10003 if (inst.operands[2].imm == 0)
10004 {
10005 switch ((inst.instruction >> 20) & 0xf)
10006 {
10007 case 4:
10008 case 5:
10009 case 6:
10010 case 7:
10011 /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16. */
10012 inst.operands[2].imm = 16;
10013 inst.instruction = (inst.instruction & 0xff0fffff) | (0x7 << 20);
10014 break;
10015 case 8:
10016 case 9:
10017 case 10:
10018 case 11:
10019 /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32. */
10020 inst.operands[2].imm = 32;
10021 inst.instruction = (inst.instruction & 0xff0fffff) | (0xb << 20);
10022 break;
10023 case 12:
10024 case 13:
10025 case 14:
10026 case 15:
10027 {
10028 /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn. */
10029 unsigned long wrn;
10030 wrn = (inst.instruction >> 16) & 0xf;
10031 inst.instruction &= 0xff0fff0f;
10032 inst.instruction |= wrn;
10033 /* Bail out here; the instruction is now assembled. */
10034 return;
10035 }
10036 }
10037 }
10038 /* Map 32 -> 0, etc. */
10039 inst.operands[2].imm &= 0x1f;
10040 inst.instruction |= (0xfU << 28) | ((inst.operands[2].imm & 0x10) << 4) | (inst.operands[2].imm & 0xf);
10041 }
10042 }
10043 \f
10044 /* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
10045 operations first, then control, shift, and load/store. */
10046
10047 /* Insns like "foo X,Y,Z". */
10048
10049 static void
10050 do_mav_triple (void)
10051 {
10052 inst.instruction |= inst.operands[0].reg << 16;
10053 inst.instruction |= inst.operands[1].reg;
10054 inst.instruction |= inst.operands[2].reg << 12;
10055 }
10056
10057 /* Insns like "foo W,X,Y,Z".
10058 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
10059
10060 static void
10061 do_mav_quad (void)
10062 {
10063 inst.instruction |= inst.operands[0].reg << 5;
10064 inst.instruction |= inst.operands[1].reg << 12;
10065 inst.instruction |= inst.operands[2].reg << 16;
10066 inst.instruction |= inst.operands[3].reg;
10067 }
10068
10069 /* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
10070 static void
10071 do_mav_dspsc (void)
10072 {
10073 inst.instruction |= inst.operands[1].reg << 12;
10074 }
10075
10076 /* Maverick shift immediate instructions.
10077 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
10078 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
10079
10080 static void
10081 do_mav_shift (void)
10082 {
10083 int imm = inst.operands[2].imm;
10084
10085 inst.instruction |= inst.operands[0].reg << 12;
10086 inst.instruction |= inst.operands[1].reg << 16;
10087
10088 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
10089 Bits 5-7 of the insn should have bits 4-6 of the immediate.
10090 Bit 4 should be 0. */
10091 imm = (imm & 0xf) | ((imm & 0x70) << 1);
10092
10093 inst.instruction |= imm;
10094 }
10095 \f
10096 /* XScale instructions. Also sorted arithmetic before move. */
10097
10098 /* Xscale multiply-accumulate (argument parse)
10099 MIAcc acc0,Rm,Rs
10100 MIAPHcc acc0,Rm,Rs
10101 MIAxycc acc0,Rm,Rs. */
10102
10103 static void
10104 do_xsc_mia (void)
10105 {
10106 inst.instruction |= inst.operands[1].reg;
10107 inst.instruction |= inst.operands[2].reg << 12;
10108 }
10109
10110 /* Xscale move-accumulator-register (argument parse)
10111
10112 MARcc acc0,RdLo,RdHi. */
10113
10114 static void
10115 do_xsc_mar (void)
10116 {
10117 inst.instruction |= inst.operands[1].reg << 12;
10118 inst.instruction |= inst.operands[2].reg << 16;
10119 }
10120
10121 /* Xscale move-register-accumulator (argument parse)
10122
10123 MRAcc RdLo,RdHi,acc0. */
10124
10125 static void
10126 do_xsc_mra (void)
10127 {
10128 constraint (inst.operands[0].reg == inst.operands[1].reg, BAD_OVERLAP);
10129 inst.instruction |= inst.operands[0].reg << 12;
10130 inst.instruction |= inst.operands[1].reg << 16;
10131 }
10132 \f
10133 /* Encoding functions relevant only to Thumb. */
10134
10135 /* inst.operands[i] is a shifted-register operand; encode
10136 it into inst.instruction in the format used by Thumb32. */
10137
10138 static void
10139 encode_thumb32_shifted_operand (int i)
10140 {
10141 unsigned int value = inst.reloc.exp.X_add_number;
10142 unsigned int shift = inst.operands[i].shift_kind;
10143
10144 constraint (inst.operands[i].immisreg,
10145 _("shift by register not allowed in thumb mode"));
10146 inst.instruction |= inst.operands[i].reg;
10147 if (shift == SHIFT_RRX)
10148 inst.instruction |= SHIFT_ROR << 4;
10149 else
10150 {
10151 constraint (inst.reloc.exp.X_op != O_constant,
10152 _("expression too complex"));
10153
10154 constraint (value > 32
10155 || (value == 32 && (shift == SHIFT_LSL
10156 || shift == SHIFT_ROR)),
10157 _("shift expression is too large"));
10158
10159 if (value == 0)
10160 shift = SHIFT_LSL;
10161 else if (value == 32)
10162 value = 0;
10163
10164 inst.instruction |= shift << 4;
10165 inst.instruction |= (value & 0x1c) << 10;
10166 inst.instruction |= (value & 0x03) << 6;
10167 }
10168 }
10169
10170
10171 /* inst.operands[i] was set up by parse_address. Encode it into a
10172 Thumb32 format load or store instruction. Reject forms that cannot
10173 be used with such instructions. If is_t is true, reject forms that
10174 cannot be used with a T instruction; if is_d is true, reject forms
10175 that cannot be used with a D instruction. If it is a store insn,
10176 reject PC in Rn. */
10177
10178 static void
10179 encode_thumb32_addr_mode (int i, bfd_boolean is_t, bfd_boolean is_d)
10180 {
10181 const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
10182
10183 constraint (!inst.operands[i].isreg,
10184 _("Instruction does not support =N addresses"));
10185
10186 inst.instruction |= inst.operands[i].reg << 16;
10187 if (inst.operands[i].immisreg)
10188 {
10189 constraint (is_pc, BAD_PC_ADDRESSING);
10190 constraint (is_t || is_d, _("cannot use register index with this instruction"));
10191 constraint (inst.operands[i].negative,
10192 _("Thumb does not support negative register indexing"));
10193 constraint (inst.operands[i].postind,
10194 _("Thumb does not support register post-indexing"));
10195 constraint (inst.operands[i].writeback,
10196 _("Thumb does not support register indexing with writeback"));
10197 constraint (inst.operands[i].shifted && inst.operands[i].shift_kind != SHIFT_LSL,
10198 _("Thumb supports only LSL in shifted register indexing"));
10199
10200 inst.instruction |= inst.operands[i].imm;
10201 if (inst.operands[i].shifted)
10202 {
10203 constraint (inst.reloc.exp.X_op != O_constant,
10204 _("expression too complex"));
10205 constraint (inst.reloc.exp.X_add_number < 0
10206 || inst.reloc.exp.X_add_number > 3,
10207 _("shift out of range"));
10208 inst.instruction |= inst.reloc.exp.X_add_number << 4;
10209 }
10210 inst.reloc.type = BFD_RELOC_UNUSED;
10211 }
10212 else if (inst.operands[i].preind)
10213 {
10214 constraint (is_pc && inst.operands[i].writeback, BAD_PC_WRITEBACK);
10215 constraint (is_t && inst.operands[i].writeback,
10216 _("cannot use writeback with this instruction"));
10217 constraint (is_pc && ((inst.instruction & THUMB2_LOAD_BIT) == 0),
10218 BAD_PC_ADDRESSING);
10219
10220 if (is_d)
10221 {
10222 inst.instruction |= 0x01000000;
10223 if (inst.operands[i].writeback)
10224 inst.instruction |= 0x00200000;
10225 }
10226 else
10227 {
10228 inst.instruction |= 0x00000c00;
10229 if (inst.operands[i].writeback)
10230 inst.instruction |= 0x00000100;
10231 }
10232 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
10233 }
10234 else if (inst.operands[i].postind)
10235 {
10236 gas_assert (inst.operands[i].writeback);
10237 constraint (is_pc, _("cannot use post-indexing with PC-relative addressing"));
10238 constraint (is_t, _("cannot use post-indexing with this instruction"));
10239
10240 if (is_d)
10241 inst.instruction |= 0x00200000;
10242 else
10243 inst.instruction |= 0x00000900;
10244 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
10245 }
10246 else /* unindexed - only for coprocessor */
10247 inst.error = _("instruction does not accept unindexed addressing");
10248 }
10249
10250 /* Table of Thumb instructions which exist in both 16- and 32-bit
10251 encodings (the latter only in post-V6T2 cores). The index is the
10252 value used in the insns table below. When there is more than one
10253 possible 16-bit encoding for the instruction, this table always
10254 holds variant (1).
10255 Also contains several pseudo-instructions used during relaxation. */
10256 #define T16_32_TAB \
10257 X(_adc, 4140, eb400000), \
10258 X(_adcs, 4140, eb500000), \
10259 X(_add, 1c00, eb000000), \
10260 X(_adds, 1c00, eb100000), \
10261 X(_addi, 0000, f1000000), \
10262 X(_addis, 0000, f1100000), \
10263 X(_add_pc,000f, f20f0000), \
10264 X(_add_sp,000d, f10d0000), \
10265 X(_adr, 000f, f20f0000), \
10266 X(_and, 4000, ea000000), \
10267 X(_ands, 4000, ea100000), \
10268 X(_asr, 1000, fa40f000), \
10269 X(_asrs, 1000, fa50f000), \
10270 X(_b, e000, f000b000), \
10271 X(_bcond, d000, f0008000), \
10272 X(_bic, 4380, ea200000), \
10273 X(_bics, 4380, ea300000), \
10274 X(_cmn, 42c0, eb100f00), \
10275 X(_cmp, 2800, ebb00f00), \
10276 X(_cpsie, b660, f3af8400), \
10277 X(_cpsid, b670, f3af8600), \
10278 X(_cpy, 4600, ea4f0000), \
10279 X(_dec_sp,80dd, f1ad0d00), \
10280 X(_eor, 4040, ea800000), \
10281 X(_eors, 4040, ea900000), \
10282 X(_inc_sp,00dd, f10d0d00), \
10283 X(_ldmia, c800, e8900000), \
10284 X(_ldr, 6800, f8500000), \
10285 X(_ldrb, 7800, f8100000), \
10286 X(_ldrh, 8800, f8300000), \
10287 X(_ldrsb, 5600, f9100000), \
10288 X(_ldrsh, 5e00, f9300000), \
10289 X(_ldr_pc,4800, f85f0000), \
10290 X(_ldr_pc2,4800, f85f0000), \
10291 X(_ldr_sp,9800, f85d0000), \
10292 X(_lsl, 0000, fa00f000), \
10293 X(_lsls, 0000, fa10f000), \
10294 X(_lsr, 0800, fa20f000), \
10295 X(_lsrs, 0800, fa30f000), \
10296 X(_mov, 2000, ea4f0000), \
10297 X(_movs, 2000, ea5f0000), \
10298 X(_mul, 4340, fb00f000), \
10299 X(_muls, 4340, ffffffff), /* no 32b muls */ \
10300 X(_mvn, 43c0, ea6f0000), \
10301 X(_mvns, 43c0, ea7f0000), \
10302 X(_neg, 4240, f1c00000), /* rsb #0 */ \
10303 X(_negs, 4240, f1d00000), /* rsbs #0 */ \
10304 X(_orr, 4300, ea400000), \
10305 X(_orrs, 4300, ea500000), \
10306 X(_pop, bc00, e8bd0000), /* ldmia sp!,... */ \
10307 X(_push, b400, e92d0000), /* stmdb sp!,... */ \
10308 X(_rev, ba00, fa90f080), \
10309 X(_rev16, ba40, fa90f090), \
10310 X(_revsh, bac0, fa90f0b0), \
10311 X(_ror, 41c0, fa60f000), \
10312 X(_rors, 41c0, fa70f000), \
10313 X(_sbc, 4180, eb600000), \
10314 X(_sbcs, 4180, eb700000), \
10315 X(_stmia, c000, e8800000), \
10316 X(_str, 6000, f8400000), \
10317 X(_strb, 7000, f8000000), \
10318 X(_strh, 8000, f8200000), \
10319 X(_str_sp,9000, f84d0000), \
10320 X(_sub, 1e00, eba00000), \
10321 X(_subs, 1e00, ebb00000), \
10322 X(_subi, 8000, f1a00000), \
10323 X(_subis, 8000, f1b00000), \
10324 X(_sxtb, b240, fa4ff080), \
10325 X(_sxth, b200, fa0ff080), \
10326 X(_tst, 4200, ea100f00), \
10327 X(_uxtb, b2c0, fa5ff080), \
10328 X(_uxth, b280, fa1ff080), \
10329 X(_nop, bf00, f3af8000), \
10330 X(_yield, bf10, f3af8001), \
10331 X(_wfe, bf20, f3af8002), \
10332 X(_wfi, bf30, f3af8003), \
10333 X(_sev, bf40, f3af8004), \
10334 X(_sevl, bf50, f3af8005), \
10335 X(_udf, de00, f7f0a000)
10336
10337 /* To catch errors in encoding functions, the codes are all offset by
10338 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
10339 as 16-bit instructions. */
10340 #define X(a,b,c) T_MNEM##a
10341 enum t16_32_codes { T16_32_OFFSET = 0xF7FF, T16_32_TAB };
10342 #undef X
10343
10344 #define X(a,b,c) 0x##b
10345 static const unsigned short thumb_op16[] = { T16_32_TAB };
10346 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
10347 #undef X
10348
10349 #define X(a,b,c) 0x##c
10350 static const unsigned int thumb_op32[] = { T16_32_TAB };
10351 #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
10352 #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
10353 #undef X
10354 #undef T16_32_TAB
10355
10356 /* Thumb instruction encoders, in alphabetical order. */
10357
10358 /* ADDW or SUBW. */
10359
10360 static void
10361 do_t_add_sub_w (void)
10362 {
10363 int Rd, Rn;
10364
10365 Rd = inst.operands[0].reg;
10366 Rn = inst.operands[1].reg;
10367
10368 /* If Rn is REG_PC, this is ADR; if Rn is REG_SP, then this
10369 is the SP-{plus,minus}-immediate form of the instruction. */
10370 if (Rn == REG_SP)
10371 constraint (Rd == REG_PC, BAD_PC);
10372 else
10373 reject_bad_reg (Rd);
10374
10375 inst.instruction |= (Rn << 16) | (Rd << 8);
10376 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
10377 }
10378
10379 /* Parse an add or subtract instruction. We get here with inst.instruction
10380 equalling any of THUMB_OPCODE_add, adds, sub, or subs. */
10381
10382 static void
10383 do_t_add_sub (void)
10384 {
10385 int Rd, Rs, Rn;
10386
10387 Rd = inst.operands[0].reg;
10388 Rs = (inst.operands[1].present
10389 ? inst.operands[1].reg /* Rd, Rs, foo */
10390 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
10391
10392 if (Rd == REG_PC)
10393 set_it_insn_type_last ();
10394
10395 if (unified_syntax)
10396 {
10397 bfd_boolean flags;
10398 bfd_boolean narrow;
10399 int opcode;
10400
10401 flags = (inst.instruction == T_MNEM_adds
10402 || inst.instruction == T_MNEM_subs);
10403 if (flags)
10404 narrow = !in_it_block ();
10405 else
10406 narrow = in_it_block ();
10407 if (!inst.operands[2].isreg)
10408 {
10409 int add;
10410
10411 constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
10412
10413 add = (inst.instruction == T_MNEM_add
10414 || inst.instruction == T_MNEM_adds);
10415 opcode = 0;
10416 if (inst.size_req != 4)
10417 {
10418 /* Attempt to use a narrow opcode, with relaxation if
10419 appropriate. */
10420 if (Rd == REG_SP && Rs == REG_SP && !flags)
10421 opcode = add ? T_MNEM_inc_sp : T_MNEM_dec_sp;
10422 else if (Rd <= 7 && Rs == REG_SP && add && !flags)
10423 opcode = T_MNEM_add_sp;
10424 else if (Rd <= 7 && Rs == REG_PC && add && !flags)
10425 opcode = T_MNEM_add_pc;
10426 else if (Rd <= 7 && Rs <= 7 && narrow)
10427 {
10428 if (flags)
10429 opcode = add ? T_MNEM_addis : T_MNEM_subis;
10430 else
10431 opcode = add ? T_MNEM_addi : T_MNEM_subi;
10432 }
10433 if (opcode)
10434 {
10435 inst.instruction = THUMB_OP16(opcode);
10436 inst.instruction |= (Rd << 4) | Rs;
10437 if (inst.reloc.type < BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
10438 || inst.reloc.type > BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC)
10439 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
10440 if (inst.size_req != 2)
10441 inst.relax = opcode;
10442 }
10443 else
10444 constraint (inst.size_req == 2, BAD_HIREG);
10445 }
10446 if (inst.size_req == 4
10447 || (inst.size_req != 2 && !opcode))
10448 {
10449 if (Rd == REG_PC)
10450 {
10451 constraint (add, BAD_PC);
10452 constraint (Rs != REG_LR || inst.instruction != T_MNEM_subs,
10453 _("only SUBS PC, LR, #const allowed"));
10454 constraint (inst.reloc.exp.X_op != O_constant,
10455 _("expression too complex"));
10456 constraint (inst.reloc.exp.X_add_number < 0
10457 || inst.reloc.exp.X_add_number > 0xff,
10458 _("immediate value out of range"));
10459 inst.instruction = T2_SUBS_PC_LR
10460 | inst.reloc.exp.X_add_number;
10461 inst.reloc.type = BFD_RELOC_UNUSED;
10462 return;
10463 }
10464 else if (Rs == REG_PC)
10465 {
10466 /* Always use addw/subw. */
10467 inst.instruction = add ? 0xf20f0000 : 0xf2af0000;
10468 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
10469 }
10470 else
10471 {
10472 inst.instruction = THUMB_OP32 (inst.instruction);
10473 inst.instruction = (inst.instruction & 0xe1ffffff)
10474 | 0x10000000;
10475 if (flags)
10476 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10477 else
10478 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_IMM;
10479 }
10480 inst.instruction |= Rd << 8;
10481 inst.instruction |= Rs << 16;
10482 }
10483 }
10484 else
10485 {
10486 unsigned int value = inst.reloc.exp.X_add_number;
10487 unsigned int shift = inst.operands[2].shift_kind;
10488
10489 Rn = inst.operands[2].reg;
10490 /* See if we can do this with a 16-bit instruction. */
10491 if (!inst.operands[2].shifted && inst.size_req != 4)
10492 {
10493 if (Rd > 7 || Rs > 7 || Rn > 7)
10494 narrow = FALSE;
10495
10496 if (narrow)
10497 {
10498 inst.instruction = ((inst.instruction == T_MNEM_adds
10499 || inst.instruction == T_MNEM_add)
10500 ? T_OPCODE_ADD_R3
10501 : T_OPCODE_SUB_R3);
10502 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
10503 return;
10504 }
10505
10506 if (inst.instruction == T_MNEM_add && (Rd == Rs || Rd == Rn))
10507 {
10508 /* Thumb-1 cores (except v6-M) require at least one high
10509 register in a narrow non flag setting add. */
10510 if (Rd > 7 || Rn > 7
10511 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2)
10512 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_msr))
10513 {
10514 if (Rd == Rn)
10515 {
10516 Rn = Rs;
10517 Rs = Rd;
10518 }
10519 inst.instruction = T_OPCODE_ADD_HI;
10520 inst.instruction |= (Rd & 8) << 4;
10521 inst.instruction |= (Rd & 7);
10522 inst.instruction |= Rn << 3;
10523 return;
10524 }
10525 }
10526 }
10527
10528 constraint (Rd == REG_PC, BAD_PC);
10529 constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
10530 constraint (Rs == REG_PC, BAD_PC);
10531 reject_bad_reg (Rn);
10532
10533 /* If we get here, it can't be done in 16 bits. */
10534 constraint (inst.operands[2].shifted && inst.operands[2].immisreg,
10535 _("shift must be constant"));
10536 inst.instruction = THUMB_OP32 (inst.instruction);
10537 inst.instruction |= Rd << 8;
10538 inst.instruction |= Rs << 16;
10539 constraint (Rd == REG_SP && Rs == REG_SP && value > 3,
10540 _("shift value over 3 not allowed in thumb mode"));
10541 constraint (Rd == REG_SP && Rs == REG_SP && shift != SHIFT_LSL,
10542 _("only LSL shift allowed in thumb mode"));
10543 encode_thumb32_shifted_operand (2);
10544 }
10545 }
10546 else
10547 {
10548 constraint (inst.instruction == T_MNEM_adds
10549 || inst.instruction == T_MNEM_subs,
10550 BAD_THUMB32);
10551
10552 if (!inst.operands[2].isreg) /* Rd, Rs, #imm */
10553 {
10554 constraint ((Rd > 7 && (Rd != REG_SP || Rs != REG_SP))
10555 || (Rs > 7 && Rs != REG_SP && Rs != REG_PC),
10556 BAD_HIREG);
10557
10558 inst.instruction = (inst.instruction == T_MNEM_add
10559 ? 0x0000 : 0x8000);
10560 inst.instruction |= (Rd << 4) | Rs;
10561 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
10562 return;
10563 }
10564
10565 Rn = inst.operands[2].reg;
10566 constraint (inst.operands[2].shifted, _("unshifted register required"));
10567
10568 /* We now have Rd, Rs, and Rn set to registers. */
10569 if (Rd > 7 || Rs > 7 || Rn > 7)
10570 {
10571 /* Can't do this for SUB. */
10572 constraint (inst.instruction == T_MNEM_sub, BAD_HIREG);
10573 inst.instruction = T_OPCODE_ADD_HI;
10574 inst.instruction |= (Rd & 8) << 4;
10575 inst.instruction |= (Rd & 7);
10576 if (Rs == Rd)
10577 inst.instruction |= Rn << 3;
10578 else if (Rn == Rd)
10579 inst.instruction |= Rs << 3;
10580 else
10581 constraint (1, _("dest must overlap one source register"));
10582 }
10583 else
10584 {
10585 inst.instruction = (inst.instruction == T_MNEM_add
10586 ? T_OPCODE_ADD_R3 : T_OPCODE_SUB_R3);
10587 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
10588 }
10589 }
10590 }
10591
10592 static void
10593 do_t_adr (void)
10594 {
10595 unsigned Rd;
10596
10597 Rd = inst.operands[0].reg;
10598 reject_bad_reg (Rd);
10599
10600 if (unified_syntax && inst.size_req == 0 && Rd <= 7)
10601 {
10602 /* Defer to section relaxation. */
10603 inst.relax = inst.instruction;
10604 inst.instruction = THUMB_OP16 (inst.instruction);
10605 inst.instruction |= Rd << 4;
10606 }
10607 else if (unified_syntax && inst.size_req != 2)
10608 {
10609 /* Generate a 32-bit opcode. */
10610 inst.instruction = THUMB_OP32 (inst.instruction);
10611 inst.instruction |= Rd << 8;
10612 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_PC12;
10613 inst.reloc.pc_rel = 1;
10614 }
10615 else
10616 {
10617 /* Generate a 16-bit opcode. */
10618 inst.instruction = THUMB_OP16 (inst.instruction);
10619 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
10620 inst.reloc.exp.X_add_number -= 4; /* PC relative adjust. */
10621 inst.reloc.pc_rel = 1;
10622
10623 inst.instruction |= Rd << 4;
10624 }
10625 }
10626
10627 /* Arithmetic instructions for which there is just one 16-bit
10628 instruction encoding, and it allows only two low registers.
10629 For maximal compatibility with ARM syntax, we allow three register
10630 operands even when Thumb-32 instructions are not available, as long
10631 as the first two are identical. For instance, both "sbc r0,r1" and
10632 "sbc r0,r0,r1" are allowed. */
10633 static void
10634 do_t_arit3 (void)
10635 {
10636 int Rd, Rs, Rn;
10637
10638 Rd = inst.operands[0].reg;
10639 Rs = (inst.operands[1].present
10640 ? inst.operands[1].reg /* Rd, Rs, foo */
10641 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
10642 Rn = inst.operands[2].reg;
10643
10644 reject_bad_reg (Rd);
10645 reject_bad_reg (Rs);
10646 if (inst.operands[2].isreg)
10647 reject_bad_reg (Rn);
10648
10649 if (unified_syntax)
10650 {
10651 if (!inst.operands[2].isreg)
10652 {
10653 /* For an immediate, we always generate a 32-bit opcode;
10654 section relaxation will shrink it later if possible. */
10655 inst.instruction = THUMB_OP32 (inst.instruction);
10656 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
10657 inst.instruction |= Rd << 8;
10658 inst.instruction |= Rs << 16;
10659 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10660 }
10661 else
10662 {
10663 bfd_boolean narrow;
10664
10665 /* See if we can do this with a 16-bit instruction. */
10666 if (THUMB_SETS_FLAGS (inst.instruction))
10667 narrow = !in_it_block ();
10668 else
10669 narrow = in_it_block ();
10670
10671 if (Rd > 7 || Rn > 7 || Rs > 7)
10672 narrow = FALSE;
10673 if (inst.operands[2].shifted)
10674 narrow = FALSE;
10675 if (inst.size_req == 4)
10676 narrow = FALSE;
10677
10678 if (narrow
10679 && Rd == Rs)
10680 {
10681 inst.instruction = THUMB_OP16 (inst.instruction);
10682 inst.instruction |= Rd;
10683 inst.instruction |= Rn << 3;
10684 return;
10685 }
10686
10687 /* If we get here, it can't be done in 16 bits. */
10688 constraint (inst.operands[2].shifted
10689 && inst.operands[2].immisreg,
10690 _("shift must be constant"));
10691 inst.instruction = THUMB_OP32 (inst.instruction);
10692 inst.instruction |= Rd << 8;
10693 inst.instruction |= Rs << 16;
10694 encode_thumb32_shifted_operand (2);
10695 }
10696 }
10697 else
10698 {
10699 /* On its face this is a lie - the instruction does set the
10700 flags. However, the only supported mnemonic in this mode
10701 says it doesn't. */
10702 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
10703
10704 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
10705 _("unshifted register required"));
10706 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
10707 constraint (Rd != Rs,
10708 _("dest and source1 must be the same register"));
10709
10710 inst.instruction = THUMB_OP16 (inst.instruction);
10711 inst.instruction |= Rd;
10712 inst.instruction |= Rn << 3;
10713 }
10714 }
10715
10716 /* Similarly, but for instructions where the arithmetic operation is
10717 commutative, so we can allow either of them to be different from
10718 the destination operand in a 16-bit instruction. For instance, all
10719 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
10720 accepted. */
10721 static void
10722 do_t_arit3c (void)
10723 {
10724 int Rd, Rs, Rn;
10725
10726 Rd = inst.operands[0].reg;
10727 Rs = (inst.operands[1].present
10728 ? inst.operands[1].reg /* Rd, Rs, foo */
10729 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
10730 Rn = inst.operands[2].reg;
10731
10732 reject_bad_reg (Rd);
10733 reject_bad_reg (Rs);
10734 if (inst.operands[2].isreg)
10735 reject_bad_reg (Rn);
10736
10737 if (unified_syntax)
10738 {
10739 if (!inst.operands[2].isreg)
10740 {
10741 /* For an immediate, we always generate a 32-bit opcode;
10742 section relaxation will shrink it later if possible. */
10743 inst.instruction = THUMB_OP32 (inst.instruction);
10744 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
10745 inst.instruction |= Rd << 8;
10746 inst.instruction |= Rs << 16;
10747 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10748 }
10749 else
10750 {
10751 bfd_boolean narrow;
10752
10753 /* See if we can do this with a 16-bit instruction. */
10754 if (THUMB_SETS_FLAGS (inst.instruction))
10755 narrow = !in_it_block ();
10756 else
10757 narrow = in_it_block ();
10758
10759 if (Rd > 7 || Rn > 7 || Rs > 7)
10760 narrow = FALSE;
10761 if (inst.operands[2].shifted)
10762 narrow = FALSE;
10763 if (inst.size_req == 4)
10764 narrow = FALSE;
10765
10766 if (narrow)
10767 {
10768 if (Rd == Rs)
10769 {
10770 inst.instruction = THUMB_OP16 (inst.instruction);
10771 inst.instruction |= Rd;
10772 inst.instruction |= Rn << 3;
10773 return;
10774 }
10775 if (Rd == Rn)
10776 {
10777 inst.instruction = THUMB_OP16 (inst.instruction);
10778 inst.instruction |= Rd;
10779 inst.instruction |= Rs << 3;
10780 return;
10781 }
10782 }
10783
10784 /* If we get here, it can't be done in 16 bits. */
10785 constraint (inst.operands[2].shifted
10786 && inst.operands[2].immisreg,
10787 _("shift must be constant"));
10788 inst.instruction = THUMB_OP32 (inst.instruction);
10789 inst.instruction |= Rd << 8;
10790 inst.instruction |= Rs << 16;
10791 encode_thumb32_shifted_operand (2);
10792 }
10793 }
10794 else
10795 {
10796 /* On its face this is a lie - the instruction does set the
10797 flags. However, the only supported mnemonic in this mode
10798 says it doesn't. */
10799 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
10800
10801 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
10802 _("unshifted register required"));
10803 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
10804
10805 inst.instruction = THUMB_OP16 (inst.instruction);
10806 inst.instruction |= Rd;
10807
10808 if (Rd == Rs)
10809 inst.instruction |= Rn << 3;
10810 else if (Rd == Rn)
10811 inst.instruction |= Rs << 3;
10812 else
10813 constraint (1, _("dest must overlap one source register"));
10814 }
10815 }
10816
10817 static void
10818 do_t_bfc (void)
10819 {
10820 unsigned Rd;
10821 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
10822 constraint (msb > 32, _("bit-field extends past end of register"));
10823 /* The instruction encoding stores the LSB and MSB,
10824 not the LSB and width. */
10825 Rd = inst.operands[0].reg;
10826 reject_bad_reg (Rd);
10827 inst.instruction |= Rd << 8;
10828 inst.instruction |= (inst.operands[1].imm & 0x1c) << 10;
10829 inst.instruction |= (inst.operands[1].imm & 0x03) << 6;
10830 inst.instruction |= msb - 1;
10831 }
10832
10833 static void
10834 do_t_bfi (void)
10835 {
10836 int Rd, Rn;
10837 unsigned int msb;
10838
10839 Rd = inst.operands[0].reg;
10840 reject_bad_reg (Rd);
10841
10842 /* #0 in second position is alternative syntax for bfc, which is
10843 the same instruction but with REG_PC in the Rm field. */
10844 if (!inst.operands[1].isreg)
10845 Rn = REG_PC;
10846 else
10847 {
10848 Rn = inst.operands[1].reg;
10849 reject_bad_reg (Rn);
10850 }
10851
10852 msb = inst.operands[2].imm + inst.operands[3].imm;
10853 constraint (msb > 32, _("bit-field extends past end of register"));
10854 /* The instruction encoding stores the LSB and MSB,
10855 not the LSB and width. */
10856 inst.instruction |= Rd << 8;
10857 inst.instruction |= Rn << 16;
10858 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
10859 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
10860 inst.instruction |= msb - 1;
10861 }
10862
10863 static void
10864 do_t_bfx (void)
10865 {
10866 unsigned Rd, Rn;
10867
10868 Rd = inst.operands[0].reg;
10869 Rn = inst.operands[1].reg;
10870
10871 reject_bad_reg (Rd);
10872 reject_bad_reg (Rn);
10873
10874 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
10875 _("bit-field extends past end of register"));
10876 inst.instruction |= Rd << 8;
10877 inst.instruction |= Rn << 16;
10878 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
10879 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
10880 inst.instruction |= inst.operands[3].imm - 1;
10881 }
10882
10883 /* ARM V5 Thumb BLX (argument parse)
10884 BLX <target_addr> which is BLX(1)
10885 BLX <Rm> which is BLX(2)
10886 Unfortunately, there are two different opcodes for this mnemonic.
10887 So, the insns[].value is not used, and the code here zaps values
10888 into inst.instruction.
10889
10890 ??? How to take advantage of the additional two bits of displacement
10891 available in Thumb32 mode? Need new relocation? */
10892
10893 static void
10894 do_t_blx (void)
10895 {
10896 set_it_insn_type_last ();
10897
10898 if (inst.operands[0].isreg)
10899 {
10900 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
10901 /* We have a register, so this is BLX(2). */
10902 inst.instruction |= inst.operands[0].reg << 3;
10903 }
10904 else
10905 {
10906 /* No register. This must be BLX(1). */
10907 inst.instruction = 0xf000e800;
10908 encode_branch (BFD_RELOC_THUMB_PCREL_BLX);
10909 }
10910 }
10911
10912 static void
10913 do_t_branch (void)
10914 {
10915 int opcode;
10916 int cond;
10917 int reloc;
10918
10919 cond = inst.cond;
10920 set_it_insn_type (IF_INSIDE_IT_LAST_INSN);
10921
10922 if (in_it_block ())
10923 {
10924 /* Conditional branches inside IT blocks are encoded as unconditional
10925 branches. */
10926 cond = COND_ALWAYS;
10927 }
10928 else
10929 cond = inst.cond;
10930
10931 if (cond != COND_ALWAYS)
10932 opcode = T_MNEM_bcond;
10933 else
10934 opcode = inst.instruction;
10935
10936 if (unified_syntax
10937 && (inst.size_req == 4
10938 || (inst.size_req != 2
10939 && (inst.operands[0].hasreloc
10940 || inst.reloc.exp.X_op == O_constant))))
10941 {
10942 inst.instruction = THUMB_OP32(opcode);
10943 if (cond == COND_ALWAYS)
10944 reloc = BFD_RELOC_THUMB_PCREL_BRANCH25;
10945 else
10946 {
10947 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2),
10948 _("selected architecture does not support "
10949 "wide conditional branch instruction"));
10950
10951 gas_assert (cond != 0xF);
10952 inst.instruction |= cond << 22;
10953 reloc = BFD_RELOC_THUMB_PCREL_BRANCH20;
10954 }
10955 }
10956 else
10957 {
10958 inst.instruction = THUMB_OP16(opcode);
10959 if (cond == COND_ALWAYS)
10960 reloc = BFD_RELOC_THUMB_PCREL_BRANCH12;
10961 else
10962 {
10963 inst.instruction |= cond << 8;
10964 reloc = BFD_RELOC_THUMB_PCREL_BRANCH9;
10965 }
10966 /* Allow section relaxation. */
10967 if (unified_syntax && inst.size_req != 2)
10968 inst.relax = opcode;
10969 }
10970 inst.reloc.type = reloc;
10971 inst.reloc.pc_rel = 1;
10972 }
10973
10974 /* Actually do the work for Thumb state bkpt and hlt. The only difference
10975 between the two is the maximum immediate allowed - which is passed in
10976 RANGE. */
10977 static void
10978 do_t_bkpt_hlt1 (int range)
10979 {
10980 constraint (inst.cond != COND_ALWAYS,
10981 _("instruction is always unconditional"));
10982 if (inst.operands[0].present)
10983 {
10984 constraint (inst.operands[0].imm > range,
10985 _("immediate value out of range"));
10986 inst.instruction |= inst.operands[0].imm;
10987 }
10988
10989 set_it_insn_type (NEUTRAL_IT_INSN);
10990 }
10991
10992 static void
10993 do_t_hlt (void)
10994 {
10995 do_t_bkpt_hlt1 (63);
10996 }
10997
10998 static void
10999 do_t_bkpt (void)
11000 {
11001 do_t_bkpt_hlt1 (255);
11002 }
11003
11004 static void
11005 do_t_branch23 (void)
11006 {
11007 set_it_insn_type_last ();
11008 encode_branch (BFD_RELOC_THUMB_PCREL_BRANCH23);
11009
11010 /* md_apply_fix blows up with 'bl foo(PLT)' where foo is defined in
11011 this file. We used to simply ignore the PLT reloc type here --
11012 the branch encoding is now needed to deal with TLSCALL relocs.
11013 So if we see a PLT reloc now, put it back to how it used to be to
11014 keep the preexisting behaviour. */
11015 if (inst.reloc.type == BFD_RELOC_ARM_PLT32)
11016 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH23;
11017
11018 #if defined(OBJ_COFF)
11019 /* If the destination of the branch is a defined symbol which does not have
11020 the THUMB_FUNC attribute, then we must be calling a function which has
11021 the (interfacearm) attribute. We look for the Thumb entry point to that
11022 function and change the branch to refer to that function instead. */
11023 if ( inst.reloc.exp.X_op == O_symbol
11024 && inst.reloc.exp.X_add_symbol != NULL
11025 && S_IS_DEFINED (inst.reloc.exp.X_add_symbol)
11026 && ! THUMB_IS_FUNC (inst.reloc.exp.X_add_symbol))
11027 inst.reloc.exp.X_add_symbol =
11028 find_real_start (inst.reloc.exp.X_add_symbol);
11029 #endif
11030 }
11031
11032 static void
11033 do_t_bx (void)
11034 {
11035 set_it_insn_type_last ();
11036 inst.instruction |= inst.operands[0].reg << 3;
11037 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
11038 should cause the alignment to be checked once it is known. This is
11039 because BX PC only works if the instruction is word aligned. */
11040 }
11041
11042 static void
11043 do_t_bxj (void)
11044 {
11045 int Rm;
11046
11047 set_it_insn_type_last ();
11048 Rm = inst.operands[0].reg;
11049 reject_bad_reg (Rm);
11050 inst.instruction |= Rm << 16;
11051 }
11052
11053 static void
11054 do_t_clz (void)
11055 {
11056 unsigned Rd;
11057 unsigned Rm;
11058
11059 Rd = inst.operands[0].reg;
11060 Rm = inst.operands[1].reg;
11061
11062 reject_bad_reg (Rd);
11063 reject_bad_reg (Rm);
11064
11065 inst.instruction |= Rd << 8;
11066 inst.instruction |= Rm << 16;
11067 inst.instruction |= Rm;
11068 }
11069
11070 static void
11071 do_t_cps (void)
11072 {
11073 set_it_insn_type (OUTSIDE_IT_INSN);
11074 inst.instruction |= inst.operands[0].imm;
11075 }
11076
11077 static void
11078 do_t_cpsi (void)
11079 {
11080 set_it_insn_type (OUTSIDE_IT_INSN);
11081 if (unified_syntax
11082 && (inst.operands[1].present || inst.size_req == 4)
11083 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6_notm))
11084 {
11085 unsigned int imod = (inst.instruction & 0x0030) >> 4;
11086 inst.instruction = 0xf3af8000;
11087 inst.instruction |= imod << 9;
11088 inst.instruction |= inst.operands[0].imm << 5;
11089 if (inst.operands[1].present)
11090 inst.instruction |= 0x100 | inst.operands[1].imm;
11091 }
11092 else
11093 {
11094 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1)
11095 && (inst.operands[0].imm & 4),
11096 _("selected processor does not support 'A' form "
11097 "of this instruction"));
11098 constraint (inst.operands[1].present || inst.size_req == 4,
11099 _("Thumb does not support the 2-argument "
11100 "form of this instruction"));
11101 inst.instruction |= inst.operands[0].imm;
11102 }
11103 }
11104
11105 /* THUMB CPY instruction (argument parse). */
11106
11107 static void
11108 do_t_cpy (void)
11109 {
11110 if (inst.size_req == 4)
11111 {
11112 inst.instruction = THUMB_OP32 (T_MNEM_mov);
11113 inst.instruction |= inst.operands[0].reg << 8;
11114 inst.instruction |= inst.operands[1].reg;
11115 }
11116 else
11117 {
11118 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
11119 inst.instruction |= (inst.operands[0].reg & 0x7);
11120 inst.instruction |= inst.operands[1].reg << 3;
11121 }
11122 }
11123
11124 static void
11125 do_t_cbz (void)
11126 {
11127 set_it_insn_type (OUTSIDE_IT_INSN);
11128 constraint (inst.operands[0].reg > 7, BAD_HIREG);
11129 inst.instruction |= inst.operands[0].reg;
11130 inst.reloc.pc_rel = 1;
11131 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH7;
11132 }
11133
11134 static void
11135 do_t_dbg (void)
11136 {
11137 inst.instruction |= inst.operands[0].imm;
11138 }
11139
11140 static void
11141 do_t_div (void)
11142 {
11143 unsigned Rd, Rn, Rm;
11144
11145 Rd = inst.operands[0].reg;
11146 Rn = (inst.operands[1].present
11147 ? inst.operands[1].reg : Rd);
11148 Rm = inst.operands[2].reg;
11149
11150 reject_bad_reg (Rd);
11151 reject_bad_reg (Rn);
11152 reject_bad_reg (Rm);
11153
11154 inst.instruction |= Rd << 8;
11155 inst.instruction |= Rn << 16;
11156 inst.instruction |= Rm;
11157 }
11158
11159 static void
11160 do_t_hint (void)
11161 {
11162 if (unified_syntax && inst.size_req == 4)
11163 inst.instruction = THUMB_OP32 (inst.instruction);
11164 else
11165 inst.instruction = THUMB_OP16 (inst.instruction);
11166 }
11167
11168 static void
11169 do_t_it (void)
11170 {
11171 unsigned int cond = inst.operands[0].imm;
11172
11173 set_it_insn_type (IT_INSN);
11174 now_it.mask = (inst.instruction & 0xf) | 0x10;
11175 now_it.cc = cond;
11176 now_it.warn_deprecated = FALSE;
11177
11178 /* If the condition is a negative condition, invert the mask. */
11179 if ((cond & 0x1) == 0x0)
11180 {
11181 unsigned int mask = inst.instruction & 0x000f;
11182
11183 if ((mask & 0x7) == 0)
11184 {
11185 /* No conversion needed. */
11186 now_it.block_length = 1;
11187 }
11188 else if ((mask & 0x3) == 0)
11189 {
11190 mask ^= 0x8;
11191 now_it.block_length = 2;
11192 }
11193 else if ((mask & 0x1) == 0)
11194 {
11195 mask ^= 0xC;
11196 now_it.block_length = 3;
11197 }
11198 else
11199 {
11200 mask ^= 0xE;
11201 now_it.block_length = 4;
11202 }
11203
11204 inst.instruction &= 0xfff0;
11205 inst.instruction |= mask;
11206 }
11207
11208 inst.instruction |= cond << 4;
11209 }
11210
11211 /* Helper function used for both push/pop and ldm/stm. */
11212 static void
11213 encode_thumb2_ldmstm (int base, unsigned mask, bfd_boolean writeback)
11214 {
11215 bfd_boolean load;
11216
11217 load = (inst.instruction & (1 << 20)) != 0;
11218
11219 if (mask & (1 << 13))
11220 inst.error = _("SP not allowed in register list");
11221
11222 if ((mask & (1 << base)) != 0
11223 && writeback)
11224 inst.error = _("having the base register in the register list when "
11225 "using write back is UNPREDICTABLE");
11226
11227 if (load)
11228 {
11229 if (mask & (1 << 15))
11230 {
11231 if (mask & (1 << 14))
11232 inst.error = _("LR and PC should not both be in register list");
11233 else
11234 set_it_insn_type_last ();
11235 }
11236 }
11237 else
11238 {
11239 if (mask & (1 << 15))
11240 inst.error = _("PC not allowed in register list");
11241 }
11242
11243 if ((mask & (mask - 1)) == 0)
11244 {
11245 /* Single register transfers implemented as str/ldr. */
11246 if (writeback)
11247 {
11248 if (inst.instruction & (1 << 23))
11249 inst.instruction = 0x00000b04; /* ia! -> [base], #4 */
11250 else
11251 inst.instruction = 0x00000d04; /* db! -> [base, #-4]! */
11252 }
11253 else
11254 {
11255 if (inst.instruction & (1 << 23))
11256 inst.instruction = 0x00800000; /* ia -> [base] */
11257 else
11258 inst.instruction = 0x00000c04; /* db -> [base, #-4] */
11259 }
11260
11261 inst.instruction |= 0xf8400000;
11262 if (load)
11263 inst.instruction |= 0x00100000;
11264
11265 mask = ffs (mask) - 1;
11266 mask <<= 12;
11267 }
11268 else if (writeback)
11269 inst.instruction |= WRITE_BACK;
11270
11271 inst.instruction |= mask;
11272 inst.instruction |= base << 16;
11273 }
11274
11275 static void
11276 do_t_ldmstm (void)
11277 {
11278 /* This really doesn't seem worth it. */
11279 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
11280 _("expression too complex"));
11281 constraint (inst.operands[1].writeback,
11282 _("Thumb load/store multiple does not support {reglist}^"));
11283
11284 if (unified_syntax)
11285 {
11286 bfd_boolean narrow;
11287 unsigned mask;
11288
11289 narrow = FALSE;
11290 /* See if we can use a 16-bit instruction. */
11291 if (inst.instruction < 0xffff /* not ldmdb/stmdb */
11292 && inst.size_req != 4
11293 && !(inst.operands[1].imm & ~0xff))
11294 {
11295 mask = 1 << inst.operands[0].reg;
11296
11297 if (inst.operands[0].reg <= 7)
11298 {
11299 if (inst.instruction == T_MNEM_stmia
11300 ? inst.operands[0].writeback
11301 : (inst.operands[0].writeback
11302 == !(inst.operands[1].imm & mask)))
11303 {
11304 if (inst.instruction == T_MNEM_stmia
11305 && (inst.operands[1].imm & mask)
11306 && (inst.operands[1].imm & (mask - 1)))
11307 as_warn (_("value stored for r%d is UNKNOWN"),
11308 inst.operands[0].reg);
11309
11310 inst.instruction = THUMB_OP16 (inst.instruction);
11311 inst.instruction |= inst.operands[0].reg << 8;
11312 inst.instruction |= inst.operands[1].imm;
11313 narrow = TRUE;
11314 }
11315 else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
11316 {
11317 /* This means 1 register in reg list one of 3 situations:
11318 1. Instruction is stmia, but without writeback.
11319 2. lmdia without writeback, but with Rn not in
11320 reglist.
11321 3. ldmia with writeback, but with Rn in reglist.
11322 Case 3 is UNPREDICTABLE behaviour, so we handle
11323 case 1 and 2 which can be converted into a 16-bit
11324 str or ldr. The SP cases are handled below. */
11325 unsigned long opcode;
11326 /* First, record an error for Case 3. */
11327 if (inst.operands[1].imm & mask
11328 && inst.operands[0].writeback)
11329 inst.error =
11330 _("having the base register in the register list when "
11331 "using write back is UNPREDICTABLE");
11332
11333 opcode = (inst.instruction == T_MNEM_stmia ? T_MNEM_str
11334 : T_MNEM_ldr);
11335 inst.instruction = THUMB_OP16 (opcode);
11336 inst.instruction |= inst.operands[0].reg << 3;
11337 inst.instruction |= (ffs (inst.operands[1].imm)-1);
11338 narrow = TRUE;
11339 }
11340 }
11341 else if (inst.operands[0] .reg == REG_SP)
11342 {
11343 if (inst.operands[0].writeback)
11344 {
11345 inst.instruction =
11346 THUMB_OP16 (inst.instruction == T_MNEM_stmia
11347 ? T_MNEM_push : T_MNEM_pop);
11348 inst.instruction |= inst.operands[1].imm;
11349 narrow = TRUE;
11350 }
11351 else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
11352 {
11353 inst.instruction =
11354 THUMB_OP16 (inst.instruction == T_MNEM_stmia
11355 ? T_MNEM_str_sp : T_MNEM_ldr_sp);
11356 inst.instruction |= ((ffs (inst.operands[1].imm)-1) << 8);
11357 narrow = TRUE;
11358 }
11359 }
11360 }
11361
11362 if (!narrow)
11363 {
11364 if (inst.instruction < 0xffff)
11365 inst.instruction = THUMB_OP32 (inst.instruction);
11366
11367 encode_thumb2_ldmstm (inst.operands[0].reg, inst.operands[1].imm,
11368 inst.operands[0].writeback);
11369 }
11370 }
11371 else
11372 {
11373 constraint (inst.operands[0].reg > 7
11374 || (inst.operands[1].imm & ~0xff), BAD_HIREG);
11375 constraint (inst.instruction != T_MNEM_ldmia
11376 && inst.instruction != T_MNEM_stmia,
11377 _("Thumb-2 instruction only valid in unified syntax"));
11378 if (inst.instruction == T_MNEM_stmia)
11379 {
11380 if (!inst.operands[0].writeback)
11381 as_warn (_("this instruction will write back the base register"));
11382 if ((inst.operands[1].imm & (1 << inst.operands[0].reg))
11383 && (inst.operands[1].imm & ((1 << inst.operands[0].reg) - 1)))
11384 as_warn (_("value stored for r%d is UNKNOWN"),
11385 inst.operands[0].reg);
11386 }
11387 else
11388 {
11389 if (!inst.operands[0].writeback
11390 && !(inst.operands[1].imm & (1 << inst.operands[0].reg)))
11391 as_warn (_("this instruction will write back the base register"));
11392 else if (inst.operands[0].writeback
11393 && (inst.operands[1].imm & (1 << inst.operands[0].reg)))
11394 as_warn (_("this instruction will not write back the base register"));
11395 }
11396
11397 inst.instruction = THUMB_OP16 (inst.instruction);
11398 inst.instruction |= inst.operands[0].reg << 8;
11399 inst.instruction |= inst.operands[1].imm;
11400 }
11401 }
11402
11403 static void
11404 do_t_ldrex (void)
11405 {
11406 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
11407 || inst.operands[1].postind || inst.operands[1].writeback
11408 || inst.operands[1].immisreg || inst.operands[1].shifted
11409 || inst.operands[1].negative,
11410 BAD_ADDR_MODE);
11411
11412 constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
11413
11414 inst.instruction |= inst.operands[0].reg << 12;
11415 inst.instruction |= inst.operands[1].reg << 16;
11416 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
11417 }
11418
11419 static void
11420 do_t_ldrexd (void)
11421 {
11422 if (!inst.operands[1].present)
11423 {
11424 constraint (inst.operands[0].reg == REG_LR,
11425 _("r14 not allowed as first register "
11426 "when second register is omitted"));
11427 inst.operands[1].reg = inst.operands[0].reg + 1;
11428 }
11429 constraint (inst.operands[0].reg == inst.operands[1].reg,
11430 BAD_OVERLAP);
11431
11432 inst.instruction |= inst.operands[0].reg << 12;
11433 inst.instruction |= inst.operands[1].reg << 8;
11434 inst.instruction |= inst.operands[2].reg << 16;
11435 }
11436
11437 static void
11438 do_t_ldst (void)
11439 {
11440 unsigned long opcode;
11441 int Rn;
11442
11443 if (inst.operands[0].isreg
11444 && !inst.operands[0].preind
11445 && inst.operands[0].reg == REG_PC)
11446 set_it_insn_type_last ();
11447
11448 opcode = inst.instruction;
11449 if (unified_syntax)
11450 {
11451 if (!inst.operands[1].isreg)
11452 {
11453 if (opcode <= 0xffff)
11454 inst.instruction = THUMB_OP32 (opcode);
11455 if (move_or_literal_pool (0, CONST_THUMB, /*mode_3=*/FALSE))
11456 return;
11457 }
11458 if (inst.operands[1].isreg
11459 && !inst.operands[1].writeback
11460 && !inst.operands[1].shifted && !inst.operands[1].postind
11461 && !inst.operands[1].negative && inst.operands[0].reg <= 7
11462 && opcode <= 0xffff
11463 && inst.size_req != 4)
11464 {
11465 /* Insn may have a 16-bit form. */
11466 Rn = inst.operands[1].reg;
11467 if (inst.operands[1].immisreg)
11468 {
11469 inst.instruction = THUMB_OP16 (opcode);
11470 /* [Rn, Rik] */
11471 if (Rn <= 7 && inst.operands[1].imm <= 7)
11472 goto op16;
11473 else if (opcode != T_MNEM_ldr && opcode != T_MNEM_str)
11474 reject_bad_reg (inst.operands[1].imm);
11475 }
11476 else if ((Rn <= 7 && opcode != T_MNEM_ldrsh
11477 && opcode != T_MNEM_ldrsb)
11478 || ((Rn == REG_PC || Rn == REG_SP) && opcode == T_MNEM_ldr)
11479 || (Rn == REG_SP && opcode == T_MNEM_str))
11480 {
11481 /* [Rn, #const] */
11482 if (Rn > 7)
11483 {
11484 if (Rn == REG_PC)
11485 {
11486 if (inst.reloc.pc_rel)
11487 opcode = T_MNEM_ldr_pc2;
11488 else
11489 opcode = T_MNEM_ldr_pc;
11490 }
11491 else
11492 {
11493 if (opcode == T_MNEM_ldr)
11494 opcode = T_MNEM_ldr_sp;
11495 else
11496 opcode = T_MNEM_str_sp;
11497 }
11498 inst.instruction = inst.operands[0].reg << 8;
11499 }
11500 else
11501 {
11502 inst.instruction = inst.operands[0].reg;
11503 inst.instruction |= inst.operands[1].reg << 3;
11504 }
11505 inst.instruction |= THUMB_OP16 (opcode);
11506 if (inst.size_req == 2)
11507 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
11508 else
11509 inst.relax = opcode;
11510 return;
11511 }
11512 }
11513 /* Definitely a 32-bit variant. */
11514
11515 /* Warning for Erratum 752419. */
11516 if (opcode == T_MNEM_ldr
11517 && inst.operands[0].reg == REG_SP
11518 && inst.operands[1].writeback == 1
11519 && !inst.operands[1].immisreg)
11520 {
11521 if (no_cpu_selected ()
11522 || (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7)
11523 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a)
11524 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7r)))
11525 as_warn (_("This instruction may be unpredictable "
11526 "if executed on M-profile cores "
11527 "with interrupts enabled."));
11528 }
11529
11530 /* Do some validations regarding addressing modes. */
11531 if (inst.operands[1].immisreg)
11532 reject_bad_reg (inst.operands[1].imm);
11533
11534 constraint (inst.operands[1].writeback == 1
11535 && inst.operands[0].reg == inst.operands[1].reg,
11536 BAD_OVERLAP);
11537
11538 inst.instruction = THUMB_OP32 (opcode);
11539 inst.instruction |= inst.operands[0].reg << 12;
11540 encode_thumb32_addr_mode (1, /*is_t=*/FALSE, /*is_d=*/FALSE);
11541 check_ldr_r15_aligned ();
11542 return;
11543 }
11544
11545 constraint (inst.operands[0].reg > 7, BAD_HIREG);
11546
11547 if (inst.instruction == T_MNEM_ldrsh || inst.instruction == T_MNEM_ldrsb)
11548 {
11549 /* Only [Rn,Rm] is acceptable. */
11550 constraint (inst.operands[1].reg > 7 || inst.operands[1].imm > 7, BAD_HIREG);
11551 constraint (!inst.operands[1].isreg || !inst.operands[1].immisreg
11552 || inst.operands[1].postind || inst.operands[1].shifted
11553 || inst.operands[1].negative,
11554 _("Thumb does not support this addressing mode"));
11555 inst.instruction = THUMB_OP16 (inst.instruction);
11556 goto op16;
11557 }
11558
11559 inst.instruction = THUMB_OP16 (inst.instruction);
11560 if (!inst.operands[1].isreg)
11561 if (move_or_literal_pool (0, CONST_THUMB, /*mode_3=*/FALSE))
11562 return;
11563
11564 constraint (!inst.operands[1].preind
11565 || inst.operands[1].shifted
11566 || inst.operands[1].writeback,
11567 _("Thumb does not support this addressing mode"));
11568 if (inst.operands[1].reg == REG_PC || inst.operands[1].reg == REG_SP)
11569 {
11570 constraint (inst.instruction & 0x0600,
11571 _("byte or halfword not valid for base register"));
11572 constraint (inst.operands[1].reg == REG_PC
11573 && !(inst.instruction & THUMB_LOAD_BIT),
11574 _("r15 based store not allowed"));
11575 constraint (inst.operands[1].immisreg,
11576 _("invalid base register for register offset"));
11577
11578 if (inst.operands[1].reg == REG_PC)
11579 inst.instruction = T_OPCODE_LDR_PC;
11580 else if (inst.instruction & THUMB_LOAD_BIT)
11581 inst.instruction = T_OPCODE_LDR_SP;
11582 else
11583 inst.instruction = T_OPCODE_STR_SP;
11584
11585 inst.instruction |= inst.operands[0].reg << 8;
11586 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
11587 return;
11588 }
11589
11590 constraint (inst.operands[1].reg > 7, BAD_HIREG);
11591 if (!inst.operands[1].immisreg)
11592 {
11593 /* Immediate offset. */
11594 inst.instruction |= inst.operands[0].reg;
11595 inst.instruction |= inst.operands[1].reg << 3;
11596 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
11597 return;
11598 }
11599
11600 /* Register offset. */
11601 constraint (inst.operands[1].imm > 7, BAD_HIREG);
11602 constraint (inst.operands[1].negative,
11603 _("Thumb does not support this addressing mode"));
11604
11605 op16:
11606 switch (inst.instruction)
11607 {
11608 case T_OPCODE_STR_IW: inst.instruction = T_OPCODE_STR_RW; break;
11609 case T_OPCODE_STR_IH: inst.instruction = T_OPCODE_STR_RH; break;
11610 case T_OPCODE_STR_IB: inst.instruction = T_OPCODE_STR_RB; break;
11611 case T_OPCODE_LDR_IW: inst.instruction = T_OPCODE_LDR_RW; break;
11612 case T_OPCODE_LDR_IH: inst.instruction = T_OPCODE_LDR_RH; break;
11613 case T_OPCODE_LDR_IB: inst.instruction = T_OPCODE_LDR_RB; break;
11614 case 0x5600 /* ldrsb */:
11615 case 0x5e00 /* ldrsh */: break;
11616 default: abort ();
11617 }
11618
11619 inst.instruction |= inst.operands[0].reg;
11620 inst.instruction |= inst.operands[1].reg << 3;
11621 inst.instruction |= inst.operands[1].imm << 6;
11622 }
11623
11624 static void
11625 do_t_ldstd (void)
11626 {
11627 if (!inst.operands[1].present)
11628 {
11629 inst.operands[1].reg = inst.operands[0].reg + 1;
11630 constraint (inst.operands[0].reg == REG_LR,
11631 _("r14 not allowed here"));
11632 constraint (inst.operands[0].reg == REG_R12,
11633 _("r12 not allowed here"));
11634 }
11635
11636 if (inst.operands[2].writeback
11637 && (inst.operands[0].reg == inst.operands[2].reg
11638 || inst.operands[1].reg == inst.operands[2].reg))
11639 as_warn (_("base register written back, and overlaps "
11640 "one of transfer registers"));
11641
11642 inst.instruction |= inst.operands[0].reg << 12;
11643 inst.instruction |= inst.operands[1].reg << 8;
11644 encode_thumb32_addr_mode (2, /*is_t=*/FALSE, /*is_d=*/TRUE);
11645 }
11646
11647 static void
11648 do_t_ldstt (void)
11649 {
11650 inst.instruction |= inst.operands[0].reg << 12;
11651 encode_thumb32_addr_mode (1, /*is_t=*/TRUE, /*is_d=*/FALSE);
11652 }
11653
11654 static void
11655 do_t_mla (void)
11656 {
11657 unsigned Rd, Rn, Rm, Ra;
11658
11659 Rd = inst.operands[0].reg;
11660 Rn = inst.operands[1].reg;
11661 Rm = inst.operands[2].reg;
11662 Ra = inst.operands[3].reg;
11663
11664 reject_bad_reg (Rd);
11665 reject_bad_reg (Rn);
11666 reject_bad_reg (Rm);
11667 reject_bad_reg (Ra);
11668
11669 inst.instruction |= Rd << 8;
11670 inst.instruction |= Rn << 16;
11671 inst.instruction |= Rm;
11672 inst.instruction |= Ra << 12;
11673 }
11674
11675 static void
11676 do_t_mlal (void)
11677 {
11678 unsigned RdLo, RdHi, Rn, Rm;
11679
11680 RdLo = inst.operands[0].reg;
11681 RdHi = inst.operands[1].reg;
11682 Rn = inst.operands[2].reg;
11683 Rm = inst.operands[3].reg;
11684
11685 reject_bad_reg (RdLo);
11686 reject_bad_reg (RdHi);
11687 reject_bad_reg (Rn);
11688 reject_bad_reg (Rm);
11689
11690 inst.instruction |= RdLo << 12;
11691 inst.instruction |= RdHi << 8;
11692 inst.instruction |= Rn << 16;
11693 inst.instruction |= Rm;
11694 }
11695
11696 static void
11697 do_t_mov_cmp (void)
11698 {
11699 unsigned Rn, Rm;
11700
11701 Rn = inst.operands[0].reg;
11702 Rm = inst.operands[1].reg;
11703
11704 if (Rn == REG_PC)
11705 set_it_insn_type_last ();
11706
11707 if (unified_syntax)
11708 {
11709 int r0off = (inst.instruction == T_MNEM_mov
11710 || inst.instruction == T_MNEM_movs) ? 8 : 16;
11711 unsigned long opcode;
11712 bfd_boolean narrow;
11713 bfd_boolean low_regs;
11714
11715 low_regs = (Rn <= 7 && Rm <= 7);
11716 opcode = inst.instruction;
11717 if (in_it_block ())
11718 narrow = opcode != T_MNEM_movs;
11719 else
11720 narrow = opcode != T_MNEM_movs || low_regs;
11721 if (inst.size_req == 4
11722 || inst.operands[1].shifted)
11723 narrow = FALSE;
11724
11725 /* MOVS PC, LR is encoded as SUBS PC, LR, #0. */
11726 if (opcode == T_MNEM_movs && inst.operands[1].isreg
11727 && !inst.operands[1].shifted
11728 && Rn == REG_PC
11729 && Rm == REG_LR)
11730 {
11731 inst.instruction = T2_SUBS_PC_LR;
11732 return;
11733 }
11734
11735 if (opcode == T_MNEM_cmp)
11736 {
11737 constraint (Rn == REG_PC, BAD_PC);
11738 if (narrow)
11739 {
11740 /* In the Thumb-2 ISA, use of R13 as Rm is deprecated,
11741 but valid. */
11742 warn_deprecated_sp (Rm);
11743 /* R15 was documented as a valid choice for Rm in ARMv6,
11744 but as UNPREDICTABLE in ARMv7. ARM's proprietary
11745 tools reject R15, so we do too. */
11746 constraint (Rm == REG_PC, BAD_PC);
11747 }
11748 else
11749 reject_bad_reg (Rm);
11750 }
11751 else if (opcode == T_MNEM_mov
11752 || opcode == T_MNEM_movs)
11753 {
11754 if (inst.operands[1].isreg)
11755 {
11756 if (opcode == T_MNEM_movs)
11757 {
11758 reject_bad_reg (Rn);
11759 reject_bad_reg (Rm);
11760 }
11761 else if (narrow)
11762 {
11763 /* This is mov.n. */
11764 if ((Rn == REG_SP || Rn == REG_PC)
11765 && (Rm == REG_SP || Rm == REG_PC))
11766 {
11767 as_tsktsk (_("Use of r%u as a source register is "
11768 "deprecated when r%u is the destination "
11769 "register."), Rm, Rn);
11770 }
11771 }
11772 else
11773 {
11774 /* This is mov.w. */
11775 constraint (Rn == REG_PC, BAD_PC);
11776 constraint (Rm == REG_PC, BAD_PC);
11777 constraint (Rn == REG_SP && Rm == REG_SP, BAD_SP);
11778 }
11779 }
11780 else
11781 reject_bad_reg (Rn);
11782 }
11783
11784 if (!inst.operands[1].isreg)
11785 {
11786 /* Immediate operand. */
11787 if (!in_it_block () && opcode == T_MNEM_mov)
11788 narrow = 0;
11789 if (low_regs && narrow)
11790 {
11791 inst.instruction = THUMB_OP16 (opcode);
11792 inst.instruction |= Rn << 8;
11793 if (inst.size_req == 2)
11794 {
11795 if (inst.reloc.type < BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
11796 || inst.reloc.type > BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC)
11797 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
11798 }
11799 else
11800 inst.relax = opcode;
11801 }
11802 else
11803 {
11804 inst.instruction = THUMB_OP32 (inst.instruction);
11805 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
11806 inst.instruction |= Rn << r0off;
11807 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
11808 }
11809 }
11810 else if (inst.operands[1].shifted && inst.operands[1].immisreg
11811 && (inst.instruction == T_MNEM_mov
11812 || inst.instruction == T_MNEM_movs))
11813 {
11814 /* Register shifts are encoded as separate shift instructions. */
11815 bfd_boolean flags = (inst.instruction == T_MNEM_movs);
11816
11817 if (in_it_block ())
11818 narrow = !flags;
11819 else
11820 narrow = flags;
11821
11822 if (inst.size_req == 4)
11823 narrow = FALSE;
11824
11825 if (!low_regs || inst.operands[1].imm > 7)
11826 narrow = FALSE;
11827
11828 if (Rn != Rm)
11829 narrow = FALSE;
11830
11831 switch (inst.operands[1].shift_kind)
11832 {
11833 case SHIFT_LSL:
11834 opcode = narrow ? T_OPCODE_LSL_R : THUMB_OP32 (T_MNEM_lsl);
11835 break;
11836 case SHIFT_ASR:
11837 opcode = narrow ? T_OPCODE_ASR_R : THUMB_OP32 (T_MNEM_asr);
11838 break;
11839 case SHIFT_LSR:
11840 opcode = narrow ? T_OPCODE_LSR_R : THUMB_OP32 (T_MNEM_lsr);
11841 break;
11842 case SHIFT_ROR:
11843 opcode = narrow ? T_OPCODE_ROR_R : THUMB_OP32 (T_MNEM_ror);
11844 break;
11845 default:
11846 abort ();
11847 }
11848
11849 inst.instruction = opcode;
11850 if (narrow)
11851 {
11852 inst.instruction |= Rn;
11853 inst.instruction |= inst.operands[1].imm << 3;
11854 }
11855 else
11856 {
11857 if (flags)
11858 inst.instruction |= CONDS_BIT;
11859
11860 inst.instruction |= Rn << 8;
11861 inst.instruction |= Rm << 16;
11862 inst.instruction |= inst.operands[1].imm;
11863 }
11864 }
11865 else if (!narrow)
11866 {
11867 /* Some mov with immediate shift have narrow variants.
11868 Register shifts are handled above. */
11869 if (low_regs && inst.operands[1].shifted
11870 && (inst.instruction == T_MNEM_mov
11871 || inst.instruction == T_MNEM_movs))
11872 {
11873 if (in_it_block ())
11874 narrow = (inst.instruction == T_MNEM_mov);
11875 else
11876 narrow = (inst.instruction == T_MNEM_movs);
11877 }
11878
11879 if (narrow)
11880 {
11881 switch (inst.operands[1].shift_kind)
11882 {
11883 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
11884 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
11885 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
11886 default: narrow = FALSE; break;
11887 }
11888 }
11889
11890 if (narrow)
11891 {
11892 inst.instruction |= Rn;
11893 inst.instruction |= Rm << 3;
11894 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
11895 }
11896 else
11897 {
11898 inst.instruction = THUMB_OP32 (inst.instruction);
11899 inst.instruction |= Rn << r0off;
11900 encode_thumb32_shifted_operand (1);
11901 }
11902 }
11903 else
11904 switch (inst.instruction)
11905 {
11906 case T_MNEM_mov:
11907 /* In v4t or v5t a move of two lowregs produces unpredictable
11908 results. Don't allow this. */
11909 if (low_regs)
11910 {
11911 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6),
11912 "MOV Rd, Rs with two low registers is not "
11913 "permitted on this architecture");
11914 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
11915 arm_ext_v6);
11916 }
11917
11918 inst.instruction = T_OPCODE_MOV_HR;
11919 inst.instruction |= (Rn & 0x8) << 4;
11920 inst.instruction |= (Rn & 0x7);
11921 inst.instruction |= Rm << 3;
11922 break;
11923
11924 case T_MNEM_movs:
11925 /* We know we have low registers at this point.
11926 Generate LSLS Rd, Rs, #0. */
11927 inst.instruction = T_OPCODE_LSL_I;
11928 inst.instruction |= Rn;
11929 inst.instruction |= Rm << 3;
11930 break;
11931
11932 case T_MNEM_cmp:
11933 if (low_regs)
11934 {
11935 inst.instruction = T_OPCODE_CMP_LR;
11936 inst.instruction |= Rn;
11937 inst.instruction |= Rm << 3;
11938 }
11939 else
11940 {
11941 inst.instruction = T_OPCODE_CMP_HR;
11942 inst.instruction |= (Rn & 0x8) << 4;
11943 inst.instruction |= (Rn & 0x7);
11944 inst.instruction |= Rm << 3;
11945 }
11946 break;
11947 }
11948 return;
11949 }
11950
11951 inst.instruction = THUMB_OP16 (inst.instruction);
11952
11953 /* PR 10443: Do not silently ignore shifted operands. */
11954 constraint (inst.operands[1].shifted,
11955 _("shifts in CMP/MOV instructions are only supported in unified syntax"));
11956
11957 if (inst.operands[1].isreg)
11958 {
11959 if (Rn < 8 && Rm < 8)
11960 {
11961 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
11962 since a MOV instruction produces unpredictable results. */
11963 if (inst.instruction == T_OPCODE_MOV_I8)
11964 inst.instruction = T_OPCODE_ADD_I3;
11965 else
11966 inst.instruction = T_OPCODE_CMP_LR;
11967
11968 inst.instruction |= Rn;
11969 inst.instruction |= Rm << 3;
11970 }
11971 else
11972 {
11973 if (inst.instruction == T_OPCODE_MOV_I8)
11974 inst.instruction = T_OPCODE_MOV_HR;
11975 else
11976 inst.instruction = T_OPCODE_CMP_HR;
11977 do_t_cpy ();
11978 }
11979 }
11980 else
11981 {
11982 constraint (Rn > 7,
11983 _("only lo regs allowed with immediate"));
11984 inst.instruction |= Rn << 8;
11985 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
11986 }
11987 }
11988
11989 static void
11990 do_t_mov16 (void)
11991 {
11992 unsigned Rd;
11993 bfd_vma imm;
11994 bfd_boolean top;
11995
11996 top = (inst.instruction & 0x00800000) != 0;
11997 if (inst.reloc.type == BFD_RELOC_ARM_MOVW)
11998 {
11999 constraint (top, _(":lower16: not allowed this instruction"));
12000 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVW;
12001 }
12002 else if (inst.reloc.type == BFD_RELOC_ARM_MOVT)
12003 {
12004 constraint (!top, _(":upper16: not allowed this instruction"));
12005 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVT;
12006 }
12007
12008 Rd = inst.operands[0].reg;
12009 reject_bad_reg (Rd);
12010
12011 inst.instruction |= Rd << 8;
12012 if (inst.reloc.type == BFD_RELOC_UNUSED)
12013 {
12014 imm = inst.reloc.exp.X_add_number;
12015 inst.instruction |= (imm & 0xf000) << 4;
12016 inst.instruction |= (imm & 0x0800) << 15;
12017 inst.instruction |= (imm & 0x0700) << 4;
12018 inst.instruction |= (imm & 0x00ff);
12019 }
12020 }
12021
12022 static void
12023 do_t_mvn_tst (void)
12024 {
12025 unsigned Rn, Rm;
12026
12027 Rn = inst.operands[0].reg;
12028 Rm = inst.operands[1].reg;
12029
12030 if (inst.instruction == T_MNEM_cmp
12031 || inst.instruction == T_MNEM_cmn)
12032 constraint (Rn == REG_PC, BAD_PC);
12033 else
12034 reject_bad_reg (Rn);
12035 reject_bad_reg (Rm);
12036
12037 if (unified_syntax)
12038 {
12039 int r0off = (inst.instruction == T_MNEM_mvn
12040 || inst.instruction == T_MNEM_mvns) ? 8 : 16;
12041 bfd_boolean narrow;
12042
12043 if (inst.size_req == 4
12044 || inst.instruction > 0xffff
12045 || inst.operands[1].shifted
12046 || Rn > 7 || Rm > 7)
12047 narrow = FALSE;
12048 else if (inst.instruction == T_MNEM_cmn
12049 || inst.instruction == T_MNEM_tst)
12050 narrow = TRUE;
12051 else if (THUMB_SETS_FLAGS (inst.instruction))
12052 narrow = !in_it_block ();
12053 else
12054 narrow = in_it_block ();
12055
12056 if (!inst.operands[1].isreg)
12057 {
12058 /* For an immediate, we always generate a 32-bit opcode;
12059 section relaxation will shrink it later if possible. */
12060 if (inst.instruction < 0xffff)
12061 inst.instruction = THUMB_OP32 (inst.instruction);
12062 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
12063 inst.instruction |= Rn << r0off;
12064 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
12065 }
12066 else
12067 {
12068 /* See if we can do this with a 16-bit instruction. */
12069 if (narrow)
12070 {
12071 inst.instruction = THUMB_OP16 (inst.instruction);
12072 inst.instruction |= Rn;
12073 inst.instruction |= Rm << 3;
12074 }
12075 else
12076 {
12077 constraint (inst.operands[1].shifted
12078 && inst.operands[1].immisreg,
12079 _("shift must be constant"));
12080 if (inst.instruction < 0xffff)
12081 inst.instruction = THUMB_OP32 (inst.instruction);
12082 inst.instruction |= Rn << r0off;
12083 encode_thumb32_shifted_operand (1);
12084 }
12085 }
12086 }
12087 else
12088 {
12089 constraint (inst.instruction > 0xffff
12090 || inst.instruction == T_MNEM_mvns, BAD_THUMB32);
12091 constraint (!inst.operands[1].isreg || inst.operands[1].shifted,
12092 _("unshifted register required"));
12093 constraint (Rn > 7 || Rm > 7,
12094 BAD_HIREG);
12095
12096 inst.instruction = THUMB_OP16 (inst.instruction);
12097 inst.instruction |= Rn;
12098 inst.instruction |= Rm << 3;
12099 }
12100 }
12101
12102 static void
12103 do_t_mrs (void)
12104 {
12105 unsigned Rd;
12106
12107 if (do_vfp_nsyn_mrs () == SUCCESS)
12108 return;
12109
12110 Rd = inst.operands[0].reg;
12111 reject_bad_reg (Rd);
12112 inst.instruction |= Rd << 8;
12113
12114 if (inst.operands[1].isreg)
12115 {
12116 unsigned br = inst.operands[1].reg;
12117 if (((br & 0x200) == 0) && ((br & 0xf000) != 0xf000))
12118 as_bad (_("bad register for mrs"));
12119
12120 inst.instruction |= br & (0xf << 16);
12121 inst.instruction |= (br & 0x300) >> 4;
12122 inst.instruction |= (br & SPSR_BIT) >> 2;
12123 }
12124 else
12125 {
12126 int flags = inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
12127
12128 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
12129 {
12130 /* PR gas/12698: The constraint is only applied for m_profile.
12131 If the user has specified -march=all, we want to ignore it as
12132 we are building for any CPU type, including non-m variants. */
12133 bfd_boolean m_profile =
12134 !ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any);
12135 constraint ((flags != 0) && m_profile, _("selected processor does "
12136 "not support requested special purpose register"));
12137 }
12138 else
12139 /* mrs only accepts APSR/CPSR/SPSR/CPSR_all/SPSR_all (for non-M profile
12140 devices). */
12141 constraint ((flags & ~SPSR_BIT) != (PSR_c|PSR_f),
12142 _("'APSR', 'CPSR' or 'SPSR' expected"));
12143
12144 inst.instruction |= (flags & SPSR_BIT) >> 2;
12145 inst.instruction |= inst.operands[1].imm & 0xff;
12146 inst.instruction |= 0xf0000;
12147 }
12148 }
12149
12150 static void
12151 do_t_msr (void)
12152 {
12153 int flags;
12154 unsigned Rn;
12155
12156 if (do_vfp_nsyn_msr () == SUCCESS)
12157 return;
12158
12159 constraint (!inst.operands[1].isreg,
12160 _("Thumb encoding does not support an immediate here"));
12161
12162 if (inst.operands[0].isreg)
12163 flags = (int)(inst.operands[0].reg);
12164 else
12165 flags = inst.operands[0].imm;
12166
12167 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
12168 {
12169 int bits = inst.operands[0].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
12170
12171 /* PR gas/12698: The constraint is only applied for m_profile.
12172 If the user has specified -march=all, we want to ignore it as
12173 we are building for any CPU type, including non-m variants. */
12174 bfd_boolean m_profile =
12175 !ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any);
12176 constraint (((ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
12177 && (bits & ~(PSR_s | PSR_f)) != 0)
12178 || (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
12179 && bits != PSR_f)) && m_profile,
12180 _("selected processor does not support requested special "
12181 "purpose register"));
12182 }
12183 else
12184 constraint ((flags & 0xff) != 0, _("selected processor does not support "
12185 "requested special purpose register"));
12186
12187 Rn = inst.operands[1].reg;
12188 reject_bad_reg (Rn);
12189
12190 inst.instruction |= (flags & SPSR_BIT) >> 2;
12191 inst.instruction |= (flags & 0xf0000) >> 8;
12192 inst.instruction |= (flags & 0x300) >> 4;
12193 inst.instruction |= (flags & 0xff);
12194 inst.instruction |= Rn << 16;
12195 }
12196
12197 static void
12198 do_t_mul (void)
12199 {
12200 bfd_boolean narrow;
12201 unsigned Rd, Rn, Rm;
12202
12203 if (!inst.operands[2].present)
12204 inst.operands[2].reg = inst.operands[0].reg;
12205
12206 Rd = inst.operands[0].reg;
12207 Rn = inst.operands[1].reg;
12208 Rm = inst.operands[2].reg;
12209
12210 if (unified_syntax)
12211 {
12212 if (inst.size_req == 4
12213 || (Rd != Rn
12214 && Rd != Rm)
12215 || Rn > 7
12216 || Rm > 7)
12217 narrow = FALSE;
12218 else if (inst.instruction == T_MNEM_muls)
12219 narrow = !in_it_block ();
12220 else
12221 narrow = in_it_block ();
12222 }
12223 else
12224 {
12225 constraint (inst.instruction == T_MNEM_muls, BAD_THUMB32);
12226 constraint (Rn > 7 || Rm > 7,
12227 BAD_HIREG);
12228 narrow = TRUE;
12229 }
12230
12231 if (narrow)
12232 {
12233 /* 16-bit MULS/Conditional MUL. */
12234 inst.instruction = THUMB_OP16 (inst.instruction);
12235 inst.instruction |= Rd;
12236
12237 if (Rd == Rn)
12238 inst.instruction |= Rm << 3;
12239 else if (Rd == Rm)
12240 inst.instruction |= Rn << 3;
12241 else
12242 constraint (1, _("dest must overlap one source register"));
12243 }
12244 else
12245 {
12246 constraint (inst.instruction != T_MNEM_mul,
12247 _("Thumb-2 MUL must not set flags"));
12248 /* 32-bit MUL. */
12249 inst.instruction = THUMB_OP32 (inst.instruction);
12250 inst.instruction |= Rd << 8;
12251 inst.instruction |= Rn << 16;
12252 inst.instruction |= Rm << 0;
12253
12254 reject_bad_reg (Rd);
12255 reject_bad_reg (Rn);
12256 reject_bad_reg (Rm);
12257 }
12258 }
12259
12260 static void
12261 do_t_mull (void)
12262 {
12263 unsigned RdLo, RdHi, Rn, Rm;
12264
12265 RdLo = inst.operands[0].reg;
12266 RdHi = inst.operands[1].reg;
12267 Rn = inst.operands[2].reg;
12268 Rm = inst.operands[3].reg;
12269
12270 reject_bad_reg (RdLo);
12271 reject_bad_reg (RdHi);
12272 reject_bad_reg (Rn);
12273 reject_bad_reg (Rm);
12274
12275 inst.instruction |= RdLo << 12;
12276 inst.instruction |= RdHi << 8;
12277 inst.instruction |= Rn << 16;
12278 inst.instruction |= Rm;
12279
12280 if (RdLo == RdHi)
12281 as_tsktsk (_("rdhi and rdlo must be different"));
12282 }
12283
12284 static void
12285 do_t_nop (void)
12286 {
12287 set_it_insn_type (NEUTRAL_IT_INSN);
12288
12289 if (unified_syntax)
12290 {
12291 if (inst.size_req == 4 || inst.operands[0].imm > 15)
12292 {
12293 inst.instruction = THUMB_OP32 (inst.instruction);
12294 inst.instruction |= inst.operands[0].imm;
12295 }
12296 else
12297 {
12298 /* PR9722: Check for Thumb2 availability before
12299 generating a thumb2 nop instruction. */
12300 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2))
12301 {
12302 inst.instruction = THUMB_OP16 (inst.instruction);
12303 inst.instruction |= inst.operands[0].imm << 4;
12304 }
12305 else
12306 inst.instruction = 0x46c0;
12307 }
12308 }
12309 else
12310 {
12311 constraint (inst.operands[0].present,
12312 _("Thumb does not support NOP with hints"));
12313 inst.instruction = 0x46c0;
12314 }
12315 }
12316
12317 static void
12318 do_t_neg (void)
12319 {
12320 if (unified_syntax)
12321 {
12322 bfd_boolean narrow;
12323
12324 if (THUMB_SETS_FLAGS (inst.instruction))
12325 narrow = !in_it_block ();
12326 else
12327 narrow = in_it_block ();
12328 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
12329 narrow = FALSE;
12330 if (inst.size_req == 4)
12331 narrow = FALSE;
12332
12333 if (!narrow)
12334 {
12335 inst.instruction = THUMB_OP32 (inst.instruction);
12336 inst.instruction |= inst.operands[0].reg << 8;
12337 inst.instruction |= inst.operands[1].reg << 16;
12338 }
12339 else
12340 {
12341 inst.instruction = THUMB_OP16 (inst.instruction);
12342 inst.instruction |= inst.operands[0].reg;
12343 inst.instruction |= inst.operands[1].reg << 3;
12344 }
12345 }
12346 else
12347 {
12348 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
12349 BAD_HIREG);
12350 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
12351
12352 inst.instruction = THUMB_OP16 (inst.instruction);
12353 inst.instruction |= inst.operands[0].reg;
12354 inst.instruction |= inst.operands[1].reg << 3;
12355 }
12356 }
12357
12358 static void
12359 do_t_orn (void)
12360 {
12361 unsigned Rd, Rn;
12362
12363 Rd = inst.operands[0].reg;
12364 Rn = inst.operands[1].present ? inst.operands[1].reg : Rd;
12365
12366 reject_bad_reg (Rd);
12367 /* Rn == REG_SP is unpredictable; Rn == REG_PC is MVN. */
12368 reject_bad_reg (Rn);
12369
12370 inst.instruction |= Rd << 8;
12371 inst.instruction |= Rn << 16;
12372
12373 if (!inst.operands[2].isreg)
12374 {
12375 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
12376 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
12377 }
12378 else
12379 {
12380 unsigned Rm;
12381
12382 Rm = inst.operands[2].reg;
12383 reject_bad_reg (Rm);
12384
12385 constraint (inst.operands[2].shifted
12386 && inst.operands[2].immisreg,
12387 _("shift must be constant"));
12388 encode_thumb32_shifted_operand (2);
12389 }
12390 }
12391
12392 static void
12393 do_t_pkhbt (void)
12394 {
12395 unsigned Rd, Rn, Rm;
12396
12397 Rd = inst.operands[0].reg;
12398 Rn = inst.operands[1].reg;
12399 Rm = inst.operands[2].reg;
12400
12401 reject_bad_reg (Rd);
12402 reject_bad_reg (Rn);
12403 reject_bad_reg (Rm);
12404
12405 inst.instruction |= Rd << 8;
12406 inst.instruction |= Rn << 16;
12407 inst.instruction |= Rm;
12408 if (inst.operands[3].present)
12409 {
12410 unsigned int val = inst.reloc.exp.X_add_number;
12411 constraint (inst.reloc.exp.X_op != O_constant,
12412 _("expression too complex"));
12413 inst.instruction |= (val & 0x1c) << 10;
12414 inst.instruction |= (val & 0x03) << 6;
12415 }
12416 }
12417
12418 static void
12419 do_t_pkhtb (void)
12420 {
12421 if (!inst.operands[3].present)
12422 {
12423 unsigned Rtmp;
12424
12425 inst.instruction &= ~0x00000020;
12426
12427 /* PR 10168. Swap the Rm and Rn registers. */
12428 Rtmp = inst.operands[1].reg;
12429 inst.operands[1].reg = inst.operands[2].reg;
12430 inst.operands[2].reg = Rtmp;
12431 }
12432 do_t_pkhbt ();
12433 }
12434
12435 static void
12436 do_t_pld (void)
12437 {
12438 if (inst.operands[0].immisreg)
12439 reject_bad_reg (inst.operands[0].imm);
12440
12441 encode_thumb32_addr_mode (0, /*is_t=*/FALSE, /*is_d=*/FALSE);
12442 }
12443
12444 static void
12445 do_t_push_pop (void)
12446 {
12447 unsigned mask;
12448
12449 constraint (inst.operands[0].writeback,
12450 _("push/pop do not support {reglist}^"));
12451 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
12452 _("expression too complex"));
12453
12454 mask = inst.operands[0].imm;
12455 if (inst.size_req != 4 && (mask & ~0xff) == 0)
12456 inst.instruction = THUMB_OP16 (inst.instruction) | mask;
12457 else if (inst.size_req != 4
12458 && (mask & ~0xff) == (1 << (inst.instruction == T_MNEM_push
12459 ? REG_LR : REG_PC)))
12460 {
12461 inst.instruction = THUMB_OP16 (inst.instruction);
12462 inst.instruction |= THUMB_PP_PC_LR;
12463 inst.instruction |= mask & 0xff;
12464 }
12465 else if (unified_syntax)
12466 {
12467 inst.instruction = THUMB_OP32 (inst.instruction);
12468 encode_thumb2_ldmstm (13, mask, TRUE);
12469 }
12470 else
12471 {
12472 inst.error = _("invalid register list to push/pop instruction");
12473 return;
12474 }
12475 }
12476
12477 static void
12478 do_t_rbit (void)
12479 {
12480 unsigned Rd, Rm;
12481
12482 Rd = inst.operands[0].reg;
12483 Rm = inst.operands[1].reg;
12484
12485 reject_bad_reg (Rd);
12486 reject_bad_reg (Rm);
12487
12488 inst.instruction |= Rd << 8;
12489 inst.instruction |= Rm << 16;
12490 inst.instruction |= Rm;
12491 }
12492
12493 static void
12494 do_t_rev (void)
12495 {
12496 unsigned Rd, Rm;
12497
12498 Rd = inst.operands[0].reg;
12499 Rm = inst.operands[1].reg;
12500
12501 reject_bad_reg (Rd);
12502 reject_bad_reg (Rm);
12503
12504 if (Rd <= 7 && Rm <= 7
12505 && inst.size_req != 4)
12506 {
12507 inst.instruction = THUMB_OP16 (inst.instruction);
12508 inst.instruction |= Rd;
12509 inst.instruction |= Rm << 3;
12510 }
12511 else if (unified_syntax)
12512 {
12513 inst.instruction = THUMB_OP32 (inst.instruction);
12514 inst.instruction |= Rd << 8;
12515 inst.instruction |= Rm << 16;
12516 inst.instruction |= Rm;
12517 }
12518 else
12519 inst.error = BAD_HIREG;
12520 }
12521
12522 static void
12523 do_t_rrx (void)
12524 {
12525 unsigned Rd, Rm;
12526
12527 Rd = inst.operands[0].reg;
12528 Rm = inst.operands[1].reg;
12529
12530 reject_bad_reg (Rd);
12531 reject_bad_reg (Rm);
12532
12533 inst.instruction |= Rd << 8;
12534 inst.instruction |= Rm;
12535 }
12536
12537 static void
12538 do_t_rsb (void)
12539 {
12540 unsigned Rd, Rs;
12541
12542 Rd = inst.operands[0].reg;
12543 Rs = (inst.operands[1].present
12544 ? inst.operands[1].reg /* Rd, Rs, foo */
12545 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
12546
12547 reject_bad_reg (Rd);
12548 reject_bad_reg (Rs);
12549 if (inst.operands[2].isreg)
12550 reject_bad_reg (inst.operands[2].reg);
12551
12552 inst.instruction |= Rd << 8;
12553 inst.instruction |= Rs << 16;
12554 if (!inst.operands[2].isreg)
12555 {
12556 bfd_boolean narrow;
12557
12558 if ((inst.instruction & 0x00100000) != 0)
12559 narrow = !in_it_block ();
12560 else
12561 narrow = in_it_block ();
12562
12563 if (Rd > 7 || Rs > 7)
12564 narrow = FALSE;
12565
12566 if (inst.size_req == 4 || !unified_syntax)
12567 narrow = FALSE;
12568
12569 if (inst.reloc.exp.X_op != O_constant
12570 || inst.reloc.exp.X_add_number != 0)
12571 narrow = FALSE;
12572
12573 /* Turn rsb #0 into 16-bit neg. We should probably do this via
12574 relaxation, but it doesn't seem worth the hassle. */
12575 if (narrow)
12576 {
12577 inst.reloc.type = BFD_RELOC_UNUSED;
12578 inst.instruction = THUMB_OP16 (T_MNEM_negs);
12579 inst.instruction |= Rs << 3;
12580 inst.instruction |= Rd;
12581 }
12582 else
12583 {
12584 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
12585 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
12586 }
12587 }
12588 else
12589 encode_thumb32_shifted_operand (2);
12590 }
12591
12592 static void
12593 do_t_setend (void)
12594 {
12595 if (warn_on_deprecated
12596 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
12597 as_tsktsk (_("setend use is deprecated for ARMv8"));
12598
12599 set_it_insn_type (OUTSIDE_IT_INSN);
12600 if (inst.operands[0].imm)
12601 inst.instruction |= 0x8;
12602 }
12603
12604 static void
12605 do_t_shift (void)
12606 {
12607 if (!inst.operands[1].present)
12608 inst.operands[1].reg = inst.operands[0].reg;
12609
12610 if (unified_syntax)
12611 {
12612 bfd_boolean narrow;
12613 int shift_kind;
12614
12615 switch (inst.instruction)
12616 {
12617 case T_MNEM_asr:
12618 case T_MNEM_asrs: shift_kind = SHIFT_ASR; break;
12619 case T_MNEM_lsl:
12620 case T_MNEM_lsls: shift_kind = SHIFT_LSL; break;
12621 case T_MNEM_lsr:
12622 case T_MNEM_lsrs: shift_kind = SHIFT_LSR; break;
12623 case T_MNEM_ror:
12624 case T_MNEM_rors: shift_kind = SHIFT_ROR; break;
12625 default: abort ();
12626 }
12627
12628 if (THUMB_SETS_FLAGS (inst.instruction))
12629 narrow = !in_it_block ();
12630 else
12631 narrow = in_it_block ();
12632 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
12633 narrow = FALSE;
12634 if (!inst.operands[2].isreg && shift_kind == SHIFT_ROR)
12635 narrow = FALSE;
12636 if (inst.operands[2].isreg
12637 && (inst.operands[1].reg != inst.operands[0].reg
12638 || inst.operands[2].reg > 7))
12639 narrow = FALSE;
12640 if (inst.size_req == 4)
12641 narrow = FALSE;
12642
12643 reject_bad_reg (inst.operands[0].reg);
12644 reject_bad_reg (inst.operands[1].reg);
12645
12646 if (!narrow)
12647 {
12648 if (inst.operands[2].isreg)
12649 {
12650 reject_bad_reg (inst.operands[2].reg);
12651 inst.instruction = THUMB_OP32 (inst.instruction);
12652 inst.instruction |= inst.operands[0].reg << 8;
12653 inst.instruction |= inst.operands[1].reg << 16;
12654 inst.instruction |= inst.operands[2].reg;
12655
12656 /* PR 12854: Error on extraneous shifts. */
12657 constraint (inst.operands[2].shifted,
12658 _("extraneous shift as part of operand to shift insn"));
12659 }
12660 else
12661 {
12662 inst.operands[1].shifted = 1;
12663 inst.operands[1].shift_kind = shift_kind;
12664 inst.instruction = THUMB_OP32 (THUMB_SETS_FLAGS (inst.instruction)
12665 ? T_MNEM_movs : T_MNEM_mov);
12666 inst.instruction |= inst.operands[0].reg << 8;
12667 encode_thumb32_shifted_operand (1);
12668 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
12669 inst.reloc.type = BFD_RELOC_UNUSED;
12670 }
12671 }
12672 else
12673 {
12674 if (inst.operands[2].isreg)
12675 {
12676 switch (shift_kind)
12677 {
12678 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_R; break;
12679 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_R; break;
12680 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_R; break;
12681 case SHIFT_ROR: inst.instruction = T_OPCODE_ROR_R; break;
12682 default: abort ();
12683 }
12684
12685 inst.instruction |= inst.operands[0].reg;
12686 inst.instruction |= inst.operands[2].reg << 3;
12687
12688 /* PR 12854: Error on extraneous shifts. */
12689 constraint (inst.operands[2].shifted,
12690 _("extraneous shift as part of operand to shift insn"));
12691 }
12692 else
12693 {
12694 switch (shift_kind)
12695 {
12696 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
12697 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
12698 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
12699 default: abort ();
12700 }
12701 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
12702 inst.instruction |= inst.operands[0].reg;
12703 inst.instruction |= inst.operands[1].reg << 3;
12704 }
12705 }
12706 }
12707 else
12708 {
12709 constraint (inst.operands[0].reg > 7
12710 || inst.operands[1].reg > 7, BAD_HIREG);
12711 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
12712
12713 if (inst.operands[2].isreg) /* Rd, {Rs,} Rn */
12714 {
12715 constraint (inst.operands[2].reg > 7, BAD_HIREG);
12716 constraint (inst.operands[0].reg != inst.operands[1].reg,
12717 _("source1 and dest must be same register"));
12718
12719 switch (inst.instruction)
12720 {
12721 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_R; break;
12722 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_R; break;
12723 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_R; break;
12724 case T_MNEM_ror: inst.instruction = T_OPCODE_ROR_R; break;
12725 default: abort ();
12726 }
12727
12728 inst.instruction |= inst.operands[0].reg;
12729 inst.instruction |= inst.operands[2].reg << 3;
12730
12731 /* PR 12854: Error on extraneous shifts. */
12732 constraint (inst.operands[2].shifted,
12733 _("extraneous shift as part of operand to shift insn"));
12734 }
12735 else
12736 {
12737 switch (inst.instruction)
12738 {
12739 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_I; break;
12740 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_I; break;
12741 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_I; break;
12742 case T_MNEM_ror: inst.error = _("ror #imm not supported"); return;
12743 default: abort ();
12744 }
12745 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
12746 inst.instruction |= inst.operands[0].reg;
12747 inst.instruction |= inst.operands[1].reg << 3;
12748 }
12749 }
12750 }
12751
12752 static void
12753 do_t_simd (void)
12754 {
12755 unsigned Rd, Rn, Rm;
12756
12757 Rd = inst.operands[0].reg;
12758 Rn = inst.operands[1].reg;
12759 Rm = inst.operands[2].reg;
12760
12761 reject_bad_reg (Rd);
12762 reject_bad_reg (Rn);
12763 reject_bad_reg (Rm);
12764
12765 inst.instruction |= Rd << 8;
12766 inst.instruction |= Rn << 16;
12767 inst.instruction |= Rm;
12768 }
12769
12770 static void
12771 do_t_simd2 (void)
12772 {
12773 unsigned Rd, Rn, Rm;
12774
12775 Rd = inst.operands[0].reg;
12776 Rm = inst.operands[1].reg;
12777 Rn = inst.operands[2].reg;
12778
12779 reject_bad_reg (Rd);
12780 reject_bad_reg (Rn);
12781 reject_bad_reg (Rm);
12782
12783 inst.instruction |= Rd << 8;
12784 inst.instruction |= Rn << 16;
12785 inst.instruction |= Rm;
12786 }
12787
12788 static void
12789 do_t_smc (void)
12790 {
12791 unsigned int value = inst.reloc.exp.X_add_number;
12792 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a),
12793 _("SMC is not permitted on this architecture"));
12794 constraint (inst.reloc.exp.X_op != O_constant,
12795 _("expression too complex"));
12796 inst.reloc.type = BFD_RELOC_UNUSED;
12797 inst.instruction |= (value & 0xf000) >> 12;
12798 inst.instruction |= (value & 0x0ff0);
12799 inst.instruction |= (value & 0x000f) << 16;
12800 /* PR gas/15623: SMC instructions must be last in an IT block. */
12801 set_it_insn_type_last ();
12802 }
12803
12804 static void
12805 do_t_hvc (void)
12806 {
12807 unsigned int value = inst.reloc.exp.X_add_number;
12808
12809 inst.reloc.type = BFD_RELOC_UNUSED;
12810 inst.instruction |= (value & 0x0fff);
12811 inst.instruction |= (value & 0xf000) << 4;
12812 }
12813
12814 static void
12815 do_t_ssat_usat (int bias)
12816 {
12817 unsigned Rd, Rn;
12818
12819 Rd = inst.operands[0].reg;
12820 Rn = inst.operands[2].reg;
12821
12822 reject_bad_reg (Rd);
12823 reject_bad_reg (Rn);
12824
12825 inst.instruction |= Rd << 8;
12826 inst.instruction |= inst.operands[1].imm - bias;
12827 inst.instruction |= Rn << 16;
12828
12829 if (inst.operands[3].present)
12830 {
12831 offsetT shift_amount = inst.reloc.exp.X_add_number;
12832
12833 inst.reloc.type = BFD_RELOC_UNUSED;
12834
12835 constraint (inst.reloc.exp.X_op != O_constant,
12836 _("expression too complex"));
12837
12838 if (shift_amount != 0)
12839 {
12840 constraint (shift_amount > 31,
12841 _("shift expression is too large"));
12842
12843 if (inst.operands[3].shift_kind == SHIFT_ASR)
12844 inst.instruction |= 0x00200000; /* sh bit. */
12845
12846 inst.instruction |= (shift_amount & 0x1c) << 10;
12847 inst.instruction |= (shift_amount & 0x03) << 6;
12848 }
12849 }
12850 }
12851
12852 static void
12853 do_t_ssat (void)
12854 {
12855 do_t_ssat_usat (1);
12856 }
12857
12858 static void
12859 do_t_ssat16 (void)
12860 {
12861 unsigned Rd, Rn;
12862
12863 Rd = inst.operands[0].reg;
12864 Rn = inst.operands[2].reg;
12865
12866 reject_bad_reg (Rd);
12867 reject_bad_reg (Rn);
12868
12869 inst.instruction |= Rd << 8;
12870 inst.instruction |= inst.operands[1].imm - 1;
12871 inst.instruction |= Rn << 16;
12872 }
12873
12874 static void
12875 do_t_strex (void)
12876 {
12877 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
12878 || inst.operands[2].postind || inst.operands[2].writeback
12879 || inst.operands[2].immisreg || inst.operands[2].shifted
12880 || inst.operands[2].negative,
12881 BAD_ADDR_MODE);
12882
12883 constraint (inst.operands[2].reg == REG_PC, BAD_PC);
12884
12885 inst.instruction |= inst.operands[0].reg << 8;
12886 inst.instruction |= inst.operands[1].reg << 12;
12887 inst.instruction |= inst.operands[2].reg << 16;
12888 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
12889 }
12890
12891 static void
12892 do_t_strexd (void)
12893 {
12894 if (!inst.operands[2].present)
12895 inst.operands[2].reg = inst.operands[1].reg + 1;
12896
12897 constraint (inst.operands[0].reg == inst.operands[1].reg
12898 || inst.operands[0].reg == inst.operands[2].reg
12899 || inst.operands[0].reg == inst.operands[3].reg,
12900 BAD_OVERLAP);
12901
12902 inst.instruction |= inst.operands[0].reg;
12903 inst.instruction |= inst.operands[1].reg << 12;
12904 inst.instruction |= inst.operands[2].reg << 8;
12905 inst.instruction |= inst.operands[3].reg << 16;
12906 }
12907
12908 static void
12909 do_t_sxtah (void)
12910 {
12911 unsigned Rd, Rn, Rm;
12912
12913 Rd = inst.operands[0].reg;
12914 Rn = inst.operands[1].reg;
12915 Rm = inst.operands[2].reg;
12916
12917 reject_bad_reg (Rd);
12918 reject_bad_reg (Rn);
12919 reject_bad_reg (Rm);
12920
12921 inst.instruction |= Rd << 8;
12922 inst.instruction |= Rn << 16;
12923 inst.instruction |= Rm;
12924 inst.instruction |= inst.operands[3].imm << 4;
12925 }
12926
12927 static void
12928 do_t_sxth (void)
12929 {
12930 unsigned Rd, Rm;
12931
12932 Rd = inst.operands[0].reg;
12933 Rm = inst.operands[1].reg;
12934
12935 reject_bad_reg (Rd);
12936 reject_bad_reg (Rm);
12937
12938 if (inst.instruction <= 0xffff
12939 && inst.size_req != 4
12940 && Rd <= 7 && Rm <= 7
12941 && (!inst.operands[2].present || inst.operands[2].imm == 0))
12942 {
12943 inst.instruction = THUMB_OP16 (inst.instruction);
12944 inst.instruction |= Rd;
12945 inst.instruction |= Rm << 3;
12946 }
12947 else if (unified_syntax)
12948 {
12949 if (inst.instruction <= 0xffff)
12950 inst.instruction = THUMB_OP32 (inst.instruction);
12951 inst.instruction |= Rd << 8;
12952 inst.instruction |= Rm;
12953 inst.instruction |= inst.operands[2].imm << 4;
12954 }
12955 else
12956 {
12957 constraint (inst.operands[2].present && inst.operands[2].imm != 0,
12958 _("Thumb encoding does not support rotation"));
12959 constraint (1, BAD_HIREG);
12960 }
12961 }
12962
12963 static void
12964 do_t_swi (void)
12965 {
12966 /* We have to do the following check manually as ARM_EXT_OS only applies
12967 to ARM_EXT_V6M. */
12968 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6m))
12969 {
12970 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_os)
12971 /* This only applies to the v6m howver, not later architectures. */
12972 && ! ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7))
12973 as_bad (_("SVC is not permitted on this architecture"));
12974 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, arm_ext_os);
12975 }
12976
12977 inst.reloc.type = BFD_RELOC_ARM_SWI;
12978 }
12979
12980 static void
12981 do_t_tb (void)
12982 {
12983 unsigned Rn, Rm;
12984 int half;
12985
12986 half = (inst.instruction & 0x10) != 0;
12987 set_it_insn_type_last ();
12988 constraint (inst.operands[0].immisreg,
12989 _("instruction requires register index"));
12990
12991 Rn = inst.operands[0].reg;
12992 Rm = inst.operands[0].imm;
12993
12994 constraint (Rn == REG_SP, BAD_SP);
12995 reject_bad_reg (Rm);
12996
12997 constraint (!half && inst.operands[0].shifted,
12998 _("instruction does not allow shifted index"));
12999 inst.instruction |= (Rn << 16) | Rm;
13000 }
13001
13002 static void
13003 do_t_udf (void)
13004 {
13005 if (!inst.operands[0].present)
13006 inst.operands[0].imm = 0;
13007
13008 if ((unsigned int) inst.operands[0].imm > 255 || inst.size_req == 4)
13009 {
13010 constraint (inst.size_req == 2,
13011 _("immediate value out of range"));
13012 inst.instruction = THUMB_OP32 (inst.instruction);
13013 inst.instruction |= (inst.operands[0].imm & 0xf000u) << 4;
13014 inst.instruction |= (inst.operands[0].imm & 0x0fffu) << 0;
13015 }
13016 else
13017 {
13018 inst.instruction = THUMB_OP16 (inst.instruction);
13019 inst.instruction |= inst.operands[0].imm;
13020 }
13021
13022 set_it_insn_type (NEUTRAL_IT_INSN);
13023 }
13024
13025
13026 static void
13027 do_t_usat (void)
13028 {
13029 do_t_ssat_usat (0);
13030 }
13031
13032 static void
13033 do_t_usat16 (void)
13034 {
13035 unsigned Rd, Rn;
13036
13037 Rd = inst.operands[0].reg;
13038 Rn = inst.operands[2].reg;
13039
13040 reject_bad_reg (Rd);
13041 reject_bad_reg (Rn);
13042
13043 inst.instruction |= Rd << 8;
13044 inst.instruction |= inst.operands[1].imm;
13045 inst.instruction |= Rn << 16;
13046 }
13047
13048 /* Neon instruction encoder helpers. */
13049
13050 /* Encodings for the different types for various Neon opcodes. */
13051
13052 /* An "invalid" code for the following tables. */
13053 #define N_INV -1u
13054
13055 struct neon_tab_entry
13056 {
13057 unsigned integer;
13058 unsigned float_or_poly;
13059 unsigned scalar_or_imm;
13060 };
13061
13062 /* Map overloaded Neon opcodes to their respective encodings. */
13063 #define NEON_ENC_TAB \
13064 X(vabd, 0x0000700, 0x1200d00, N_INV), \
13065 X(vmax, 0x0000600, 0x0000f00, N_INV), \
13066 X(vmin, 0x0000610, 0x0200f00, N_INV), \
13067 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
13068 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
13069 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
13070 X(vadd, 0x0000800, 0x0000d00, N_INV), \
13071 X(vsub, 0x1000800, 0x0200d00, N_INV), \
13072 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
13073 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
13074 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
13075 /* Register variants of the following two instructions are encoded as
13076 vcge / vcgt with the operands reversed. */ \
13077 X(vclt, 0x0000300, 0x1200e00, 0x1b10200), \
13078 X(vcle, 0x0000310, 0x1000e00, 0x1b10180), \
13079 X(vfma, N_INV, 0x0000c10, N_INV), \
13080 X(vfms, N_INV, 0x0200c10, N_INV), \
13081 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
13082 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
13083 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
13084 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
13085 X(vmlal, 0x0800800, N_INV, 0x0800240), \
13086 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
13087 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
13088 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
13089 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
13090 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
13091 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
13092 X(vqrdmlah, 0x3000b10, N_INV, 0x0800e40), \
13093 X(vqrdmlsh, 0x3000c10, N_INV, 0x0800f40), \
13094 X(vshl, 0x0000400, N_INV, 0x0800510), \
13095 X(vqshl, 0x0000410, N_INV, 0x0800710), \
13096 X(vand, 0x0000110, N_INV, 0x0800030), \
13097 X(vbic, 0x0100110, N_INV, 0x0800030), \
13098 X(veor, 0x1000110, N_INV, N_INV), \
13099 X(vorn, 0x0300110, N_INV, 0x0800010), \
13100 X(vorr, 0x0200110, N_INV, 0x0800010), \
13101 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
13102 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
13103 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
13104 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
13105 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
13106 X(vst1, 0x0000000, 0x0800000, N_INV), \
13107 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
13108 X(vst2, 0x0000100, 0x0800100, N_INV), \
13109 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
13110 X(vst3, 0x0000200, 0x0800200, N_INV), \
13111 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
13112 X(vst4, 0x0000300, 0x0800300, N_INV), \
13113 X(vmovn, 0x1b20200, N_INV, N_INV), \
13114 X(vtrn, 0x1b20080, N_INV, N_INV), \
13115 X(vqmovn, 0x1b20200, N_INV, N_INV), \
13116 X(vqmovun, 0x1b20240, N_INV, N_INV), \
13117 X(vnmul, 0xe200a40, 0xe200b40, N_INV), \
13118 X(vnmla, 0xe100a40, 0xe100b40, N_INV), \
13119 X(vnmls, 0xe100a00, 0xe100b00, N_INV), \
13120 X(vfnma, 0xe900a40, 0xe900b40, N_INV), \
13121 X(vfnms, 0xe900a00, 0xe900b00, N_INV), \
13122 X(vcmp, 0xeb40a40, 0xeb40b40, N_INV), \
13123 X(vcmpz, 0xeb50a40, 0xeb50b40, N_INV), \
13124 X(vcmpe, 0xeb40ac0, 0xeb40bc0, N_INV), \
13125 X(vcmpez, 0xeb50ac0, 0xeb50bc0, N_INV), \
13126 X(vseleq, 0xe000a00, N_INV, N_INV), \
13127 X(vselvs, 0xe100a00, N_INV, N_INV), \
13128 X(vselge, 0xe200a00, N_INV, N_INV), \
13129 X(vselgt, 0xe300a00, N_INV, N_INV), \
13130 X(vmaxnm, 0xe800a00, 0x3000f10, N_INV), \
13131 X(vminnm, 0xe800a40, 0x3200f10, N_INV), \
13132 X(vcvta, 0xebc0a40, 0x3bb0000, N_INV), \
13133 X(vrintr, 0xeb60a40, 0x3ba0400, N_INV), \
13134 X(vrinta, 0xeb80a40, 0x3ba0400, N_INV), \
13135 X(aes, 0x3b00300, N_INV, N_INV), \
13136 X(sha3op, 0x2000c00, N_INV, N_INV), \
13137 X(sha1h, 0x3b902c0, N_INV, N_INV), \
13138 X(sha2op, 0x3ba0380, N_INV, N_INV)
13139
13140 enum neon_opc
13141 {
13142 #define X(OPC,I,F,S) N_MNEM_##OPC
13143 NEON_ENC_TAB
13144 #undef X
13145 };
13146
13147 static const struct neon_tab_entry neon_enc_tab[] =
13148 {
13149 #define X(OPC,I,F,S) { (I), (F), (S) }
13150 NEON_ENC_TAB
13151 #undef X
13152 };
13153
13154 /* Do not use these macros; instead, use NEON_ENCODE defined below. */
13155 #define NEON_ENC_INTEGER_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13156 #define NEON_ENC_ARMREG_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13157 #define NEON_ENC_POLY_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13158 #define NEON_ENC_FLOAT_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13159 #define NEON_ENC_SCALAR_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13160 #define NEON_ENC_IMMED_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13161 #define NEON_ENC_INTERLV_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13162 #define NEON_ENC_LANE_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13163 #define NEON_ENC_DUP_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13164 #define NEON_ENC_SINGLE_(X) \
13165 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
13166 #define NEON_ENC_DOUBLE_(X) \
13167 ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
13168 #define NEON_ENC_FPV8_(X) \
13169 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf000000))
13170
13171 #define NEON_ENCODE(type, inst) \
13172 do \
13173 { \
13174 inst.instruction = NEON_ENC_##type##_ (inst.instruction); \
13175 inst.is_neon = 1; \
13176 } \
13177 while (0)
13178
13179 #define check_neon_suffixes \
13180 do \
13181 { \
13182 if (!inst.error && inst.vectype.elems > 0 && !inst.is_neon) \
13183 { \
13184 as_bad (_("invalid neon suffix for non neon instruction")); \
13185 return; \
13186 } \
13187 } \
13188 while (0)
13189
13190 /* Define shapes for instruction operands. The following mnemonic characters
13191 are used in this table:
13192
13193 F - VFP S<n> register
13194 D - Neon D<n> register
13195 Q - Neon Q<n> register
13196 I - Immediate
13197 S - Scalar
13198 R - ARM register
13199 L - D<n> register list
13200
13201 This table is used to generate various data:
13202 - enumerations of the form NS_DDR to be used as arguments to
13203 neon_select_shape.
13204 - a table classifying shapes into single, double, quad, mixed.
13205 - a table used to drive neon_select_shape. */
13206
13207 #define NEON_SHAPE_DEF \
13208 X(3, (D, D, D), DOUBLE), \
13209 X(3, (Q, Q, Q), QUAD), \
13210 X(3, (D, D, I), DOUBLE), \
13211 X(3, (Q, Q, I), QUAD), \
13212 X(3, (D, D, S), DOUBLE), \
13213 X(3, (Q, Q, S), QUAD), \
13214 X(2, (D, D), DOUBLE), \
13215 X(2, (Q, Q), QUAD), \
13216 X(2, (D, S), DOUBLE), \
13217 X(2, (Q, S), QUAD), \
13218 X(2, (D, R), DOUBLE), \
13219 X(2, (Q, R), QUAD), \
13220 X(2, (D, I), DOUBLE), \
13221 X(2, (Q, I), QUAD), \
13222 X(3, (D, L, D), DOUBLE), \
13223 X(2, (D, Q), MIXED), \
13224 X(2, (Q, D), MIXED), \
13225 X(3, (D, Q, I), MIXED), \
13226 X(3, (Q, D, I), MIXED), \
13227 X(3, (Q, D, D), MIXED), \
13228 X(3, (D, Q, Q), MIXED), \
13229 X(3, (Q, Q, D), MIXED), \
13230 X(3, (Q, D, S), MIXED), \
13231 X(3, (D, Q, S), MIXED), \
13232 X(4, (D, D, D, I), DOUBLE), \
13233 X(4, (Q, Q, Q, I), QUAD), \
13234 X(2, (F, F), SINGLE), \
13235 X(3, (F, F, F), SINGLE), \
13236 X(2, (F, I), SINGLE), \
13237 X(2, (F, D), MIXED), \
13238 X(2, (D, F), MIXED), \
13239 X(3, (F, F, I), MIXED), \
13240 X(4, (R, R, F, F), SINGLE), \
13241 X(4, (F, F, R, R), SINGLE), \
13242 X(3, (D, R, R), DOUBLE), \
13243 X(3, (R, R, D), DOUBLE), \
13244 X(2, (S, R), SINGLE), \
13245 X(2, (R, S), SINGLE), \
13246 X(2, (F, R), SINGLE), \
13247 X(2, (R, F), SINGLE)
13248
13249 #define S2(A,B) NS_##A##B
13250 #define S3(A,B,C) NS_##A##B##C
13251 #define S4(A,B,C,D) NS_##A##B##C##D
13252
13253 #define X(N, L, C) S##N L
13254
13255 enum neon_shape
13256 {
13257 NEON_SHAPE_DEF,
13258 NS_NULL
13259 };
13260
13261 #undef X
13262 #undef S2
13263 #undef S3
13264 #undef S4
13265
13266 enum neon_shape_class
13267 {
13268 SC_SINGLE,
13269 SC_DOUBLE,
13270 SC_QUAD,
13271 SC_MIXED
13272 };
13273
13274 #define X(N, L, C) SC_##C
13275
13276 static enum neon_shape_class neon_shape_class[] =
13277 {
13278 NEON_SHAPE_DEF
13279 };
13280
13281 #undef X
13282
13283 enum neon_shape_el
13284 {
13285 SE_F,
13286 SE_D,
13287 SE_Q,
13288 SE_I,
13289 SE_S,
13290 SE_R,
13291 SE_L
13292 };
13293
13294 /* Register widths of above. */
13295 static unsigned neon_shape_el_size[] =
13296 {
13297 32,
13298 64,
13299 128,
13300 0,
13301 32,
13302 32,
13303 0
13304 };
13305
13306 struct neon_shape_info
13307 {
13308 unsigned els;
13309 enum neon_shape_el el[NEON_MAX_TYPE_ELS];
13310 };
13311
13312 #define S2(A,B) { SE_##A, SE_##B }
13313 #define S3(A,B,C) { SE_##A, SE_##B, SE_##C }
13314 #define S4(A,B,C,D) { SE_##A, SE_##B, SE_##C, SE_##D }
13315
13316 #define X(N, L, C) { N, S##N L }
13317
13318 static struct neon_shape_info neon_shape_tab[] =
13319 {
13320 NEON_SHAPE_DEF
13321 };
13322
13323 #undef X
13324 #undef S2
13325 #undef S3
13326 #undef S4
13327
13328 /* Bit masks used in type checking given instructions.
13329 'N_EQK' means the type must be the same as (or based on in some way) the key
13330 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
13331 set, various other bits can be set as well in order to modify the meaning of
13332 the type constraint. */
13333
13334 enum neon_type_mask
13335 {
13336 N_S8 = 0x0000001,
13337 N_S16 = 0x0000002,
13338 N_S32 = 0x0000004,
13339 N_S64 = 0x0000008,
13340 N_U8 = 0x0000010,
13341 N_U16 = 0x0000020,
13342 N_U32 = 0x0000040,
13343 N_U64 = 0x0000080,
13344 N_I8 = 0x0000100,
13345 N_I16 = 0x0000200,
13346 N_I32 = 0x0000400,
13347 N_I64 = 0x0000800,
13348 N_8 = 0x0001000,
13349 N_16 = 0x0002000,
13350 N_32 = 0x0004000,
13351 N_64 = 0x0008000,
13352 N_P8 = 0x0010000,
13353 N_P16 = 0x0020000,
13354 N_F16 = 0x0040000,
13355 N_F32 = 0x0080000,
13356 N_F64 = 0x0100000,
13357 N_P64 = 0x0200000,
13358 N_KEY = 0x1000000, /* Key element (main type specifier). */
13359 N_EQK = 0x2000000, /* Given operand has the same type & size as the key. */
13360 N_VFP = 0x4000000, /* VFP mode: operand size must match register width. */
13361 N_UNT = 0x8000000, /* Must be explicitly untyped. */
13362 N_DBL = 0x0000001, /* If N_EQK, this operand is twice the size. */
13363 N_HLF = 0x0000002, /* If N_EQK, this operand is half the size. */
13364 N_SGN = 0x0000004, /* If N_EQK, this operand is forced to be signed. */
13365 N_UNS = 0x0000008, /* If N_EQK, this operand is forced to be unsigned. */
13366 N_INT = 0x0000010, /* If N_EQK, this operand is forced to be integer. */
13367 N_FLT = 0x0000020, /* If N_EQK, this operand is forced to be float. */
13368 N_SIZ = 0x0000040, /* If N_EQK, this operand is forced to be size-only. */
13369 N_UTYP = 0,
13370 N_MAX_NONSPECIAL = N_P64
13371 };
13372
13373 #define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
13374
13375 #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
13376 #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
13377 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
13378 #define N_SUF_32 (N_SU_32 | N_F32)
13379 #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
13380 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F32)
13381
13382 /* Pass this as the first type argument to neon_check_type to ignore types
13383 altogether. */
13384 #define N_IGNORE_TYPE (N_KEY | N_EQK)
13385
13386 /* Select a "shape" for the current instruction (describing register types or
13387 sizes) from a list of alternatives. Return NS_NULL if the current instruction
13388 doesn't fit. For non-polymorphic shapes, checking is usually done as a
13389 function of operand parsing, so this function doesn't need to be called.
13390 Shapes should be listed in order of decreasing length. */
13391
13392 static enum neon_shape
13393 neon_select_shape (enum neon_shape shape, ...)
13394 {
13395 va_list ap;
13396 enum neon_shape first_shape = shape;
13397
13398 /* Fix missing optional operands. FIXME: we don't know at this point how
13399 many arguments we should have, so this makes the assumption that we have
13400 > 1. This is true of all current Neon opcodes, I think, but may not be
13401 true in the future. */
13402 if (!inst.operands[1].present)
13403 inst.operands[1] = inst.operands[0];
13404
13405 va_start (ap, shape);
13406
13407 for (; shape != NS_NULL; shape = (enum neon_shape) va_arg (ap, int))
13408 {
13409 unsigned j;
13410 int matches = 1;
13411
13412 for (j = 0; j < neon_shape_tab[shape].els; j++)
13413 {
13414 if (!inst.operands[j].present)
13415 {
13416 matches = 0;
13417 break;
13418 }
13419
13420 switch (neon_shape_tab[shape].el[j])
13421 {
13422 case SE_F:
13423 if (!(inst.operands[j].isreg
13424 && inst.operands[j].isvec
13425 && inst.operands[j].issingle
13426 && !inst.operands[j].isquad))
13427 matches = 0;
13428 break;
13429
13430 case SE_D:
13431 if (!(inst.operands[j].isreg
13432 && inst.operands[j].isvec
13433 && !inst.operands[j].isquad
13434 && !inst.operands[j].issingle))
13435 matches = 0;
13436 break;
13437
13438 case SE_R:
13439 if (!(inst.operands[j].isreg
13440 && !inst.operands[j].isvec))
13441 matches = 0;
13442 break;
13443
13444 case SE_Q:
13445 if (!(inst.operands[j].isreg
13446 && inst.operands[j].isvec
13447 && inst.operands[j].isquad
13448 && !inst.operands[j].issingle))
13449 matches = 0;
13450 break;
13451
13452 case SE_I:
13453 if (!(!inst.operands[j].isreg
13454 && !inst.operands[j].isscalar))
13455 matches = 0;
13456 break;
13457
13458 case SE_S:
13459 if (!(!inst.operands[j].isreg
13460 && inst.operands[j].isscalar))
13461 matches = 0;
13462 break;
13463
13464 case SE_L:
13465 break;
13466 }
13467 if (!matches)
13468 break;
13469 }
13470 if (matches && (j >= ARM_IT_MAX_OPERANDS || !inst.operands[j].present))
13471 /* We've matched all the entries in the shape table, and we don't
13472 have any left over operands which have not been matched. */
13473 break;
13474 }
13475
13476 va_end (ap);
13477
13478 if (shape == NS_NULL && first_shape != NS_NULL)
13479 first_error (_("invalid instruction shape"));
13480
13481 return shape;
13482 }
13483
13484 /* True if SHAPE is predominantly a quadword operation (most of the time, this
13485 means the Q bit should be set). */
13486
13487 static int
13488 neon_quad (enum neon_shape shape)
13489 {
13490 return neon_shape_class[shape] == SC_QUAD;
13491 }
13492
13493 static void
13494 neon_modify_type_size (unsigned typebits, enum neon_el_type *g_type,
13495 unsigned *g_size)
13496 {
13497 /* Allow modification to be made to types which are constrained to be
13498 based on the key element, based on bits set alongside N_EQK. */
13499 if ((typebits & N_EQK) != 0)
13500 {
13501 if ((typebits & N_HLF) != 0)
13502 *g_size /= 2;
13503 else if ((typebits & N_DBL) != 0)
13504 *g_size *= 2;
13505 if ((typebits & N_SGN) != 0)
13506 *g_type = NT_signed;
13507 else if ((typebits & N_UNS) != 0)
13508 *g_type = NT_unsigned;
13509 else if ((typebits & N_INT) != 0)
13510 *g_type = NT_integer;
13511 else if ((typebits & N_FLT) != 0)
13512 *g_type = NT_float;
13513 else if ((typebits & N_SIZ) != 0)
13514 *g_type = NT_untyped;
13515 }
13516 }
13517
13518 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
13519 operand type, i.e. the single type specified in a Neon instruction when it
13520 is the only one given. */
13521
13522 static struct neon_type_el
13523 neon_type_promote (struct neon_type_el *key, unsigned thisarg)
13524 {
13525 struct neon_type_el dest = *key;
13526
13527 gas_assert ((thisarg & N_EQK) != 0);
13528
13529 neon_modify_type_size (thisarg, &dest.type, &dest.size);
13530
13531 return dest;
13532 }
13533
13534 /* Convert Neon type and size into compact bitmask representation. */
13535
13536 static enum neon_type_mask
13537 type_chk_of_el_type (enum neon_el_type type, unsigned size)
13538 {
13539 switch (type)
13540 {
13541 case NT_untyped:
13542 switch (size)
13543 {
13544 case 8: return N_8;
13545 case 16: return N_16;
13546 case 32: return N_32;
13547 case 64: return N_64;
13548 default: ;
13549 }
13550 break;
13551
13552 case NT_integer:
13553 switch (size)
13554 {
13555 case 8: return N_I8;
13556 case 16: return N_I16;
13557 case 32: return N_I32;
13558 case 64: return N_I64;
13559 default: ;
13560 }
13561 break;
13562
13563 case NT_float:
13564 switch (size)
13565 {
13566 case 16: return N_F16;
13567 case 32: return N_F32;
13568 case 64: return N_F64;
13569 default: ;
13570 }
13571 break;
13572
13573 case NT_poly:
13574 switch (size)
13575 {
13576 case 8: return N_P8;
13577 case 16: return N_P16;
13578 case 64: return N_P64;
13579 default: ;
13580 }
13581 break;
13582
13583 case NT_signed:
13584 switch (size)
13585 {
13586 case 8: return N_S8;
13587 case 16: return N_S16;
13588 case 32: return N_S32;
13589 case 64: return N_S64;
13590 default: ;
13591 }
13592 break;
13593
13594 case NT_unsigned:
13595 switch (size)
13596 {
13597 case 8: return N_U8;
13598 case 16: return N_U16;
13599 case 32: return N_U32;
13600 case 64: return N_U64;
13601 default: ;
13602 }
13603 break;
13604
13605 default: ;
13606 }
13607
13608 return N_UTYP;
13609 }
13610
13611 /* Convert compact Neon bitmask type representation to a type and size. Only
13612 handles the case where a single bit is set in the mask. */
13613
13614 static int
13615 el_type_of_type_chk (enum neon_el_type *type, unsigned *size,
13616 enum neon_type_mask mask)
13617 {
13618 if ((mask & N_EQK) != 0)
13619 return FAIL;
13620
13621 if ((mask & (N_S8 | N_U8 | N_I8 | N_8 | N_P8)) != 0)
13622 *size = 8;
13623 else if ((mask & (N_S16 | N_U16 | N_I16 | N_16 | N_F16 | N_P16)) != 0)
13624 *size = 16;
13625 else if ((mask & (N_S32 | N_U32 | N_I32 | N_32 | N_F32)) != 0)
13626 *size = 32;
13627 else if ((mask & (N_S64 | N_U64 | N_I64 | N_64 | N_F64 | N_P64)) != 0)
13628 *size = 64;
13629 else
13630 return FAIL;
13631
13632 if ((mask & (N_S8 | N_S16 | N_S32 | N_S64)) != 0)
13633 *type = NT_signed;
13634 else if ((mask & (N_U8 | N_U16 | N_U32 | N_U64)) != 0)
13635 *type = NT_unsigned;
13636 else if ((mask & (N_I8 | N_I16 | N_I32 | N_I64)) != 0)
13637 *type = NT_integer;
13638 else if ((mask & (N_8 | N_16 | N_32 | N_64)) != 0)
13639 *type = NT_untyped;
13640 else if ((mask & (N_P8 | N_P16 | N_P64)) != 0)
13641 *type = NT_poly;
13642 else if ((mask & (N_F16 | N_F32 | N_F64)) != 0)
13643 *type = NT_float;
13644 else
13645 return FAIL;
13646
13647 return SUCCESS;
13648 }
13649
13650 /* Modify a bitmask of allowed types. This is only needed for type
13651 relaxation. */
13652
13653 static unsigned
13654 modify_types_allowed (unsigned allowed, unsigned mods)
13655 {
13656 unsigned size;
13657 enum neon_el_type type;
13658 unsigned destmask;
13659 int i;
13660
13661 destmask = 0;
13662
13663 for (i = 1; i <= N_MAX_NONSPECIAL; i <<= 1)
13664 {
13665 if (el_type_of_type_chk (&type, &size,
13666 (enum neon_type_mask) (allowed & i)) == SUCCESS)
13667 {
13668 neon_modify_type_size (mods, &type, &size);
13669 destmask |= type_chk_of_el_type (type, size);
13670 }
13671 }
13672
13673 return destmask;
13674 }
13675
13676 /* Check type and return type classification.
13677 The manual states (paraphrase): If one datatype is given, it indicates the
13678 type given in:
13679 - the second operand, if there is one
13680 - the operand, if there is no second operand
13681 - the result, if there are no operands.
13682 This isn't quite good enough though, so we use a concept of a "key" datatype
13683 which is set on a per-instruction basis, which is the one which matters when
13684 only one data type is written.
13685 Note: this function has side-effects (e.g. filling in missing operands). All
13686 Neon instructions should call it before performing bit encoding. */
13687
13688 static struct neon_type_el
13689 neon_check_type (unsigned els, enum neon_shape ns, ...)
13690 {
13691 va_list ap;
13692 unsigned i, pass, key_el = 0;
13693 unsigned types[NEON_MAX_TYPE_ELS];
13694 enum neon_el_type k_type = NT_invtype;
13695 unsigned k_size = -1u;
13696 struct neon_type_el badtype = {NT_invtype, -1};
13697 unsigned key_allowed = 0;
13698
13699 /* Optional registers in Neon instructions are always (not) in operand 1.
13700 Fill in the missing operand here, if it was omitted. */
13701 if (els > 1 && !inst.operands[1].present)
13702 inst.operands[1] = inst.operands[0];
13703
13704 /* Suck up all the varargs. */
13705 va_start (ap, ns);
13706 for (i = 0; i < els; i++)
13707 {
13708 unsigned thisarg = va_arg (ap, unsigned);
13709 if (thisarg == N_IGNORE_TYPE)
13710 {
13711 va_end (ap);
13712 return badtype;
13713 }
13714 types[i] = thisarg;
13715 if ((thisarg & N_KEY) != 0)
13716 key_el = i;
13717 }
13718 va_end (ap);
13719
13720 if (inst.vectype.elems > 0)
13721 for (i = 0; i < els; i++)
13722 if (inst.operands[i].vectype.type != NT_invtype)
13723 {
13724 first_error (_("types specified in both the mnemonic and operands"));
13725 return badtype;
13726 }
13727
13728 /* Duplicate inst.vectype elements here as necessary.
13729 FIXME: No idea if this is exactly the same as the ARM assembler,
13730 particularly when an insn takes one register and one non-register
13731 operand. */
13732 if (inst.vectype.elems == 1 && els > 1)
13733 {
13734 unsigned j;
13735 inst.vectype.elems = els;
13736 inst.vectype.el[key_el] = inst.vectype.el[0];
13737 for (j = 0; j < els; j++)
13738 if (j != key_el)
13739 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
13740 types[j]);
13741 }
13742 else if (inst.vectype.elems == 0 && els > 0)
13743 {
13744 unsigned j;
13745 /* No types were given after the mnemonic, so look for types specified
13746 after each operand. We allow some flexibility here; as long as the
13747 "key" operand has a type, we can infer the others. */
13748 for (j = 0; j < els; j++)
13749 if (inst.operands[j].vectype.type != NT_invtype)
13750 inst.vectype.el[j] = inst.operands[j].vectype;
13751
13752 if (inst.operands[key_el].vectype.type != NT_invtype)
13753 {
13754 for (j = 0; j < els; j++)
13755 if (inst.operands[j].vectype.type == NT_invtype)
13756 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
13757 types[j]);
13758 }
13759 else
13760 {
13761 first_error (_("operand types can't be inferred"));
13762 return badtype;
13763 }
13764 }
13765 else if (inst.vectype.elems != els)
13766 {
13767 first_error (_("type specifier has the wrong number of parts"));
13768 return badtype;
13769 }
13770
13771 for (pass = 0; pass < 2; pass++)
13772 {
13773 for (i = 0; i < els; i++)
13774 {
13775 unsigned thisarg = types[i];
13776 unsigned types_allowed = ((thisarg & N_EQK) != 0 && pass != 0)
13777 ? modify_types_allowed (key_allowed, thisarg) : thisarg;
13778 enum neon_el_type g_type = inst.vectype.el[i].type;
13779 unsigned g_size = inst.vectype.el[i].size;
13780
13781 /* Decay more-specific signed & unsigned types to sign-insensitive
13782 integer types if sign-specific variants are unavailable. */
13783 if ((g_type == NT_signed || g_type == NT_unsigned)
13784 && (types_allowed & N_SU_ALL) == 0)
13785 g_type = NT_integer;
13786
13787 /* If only untyped args are allowed, decay any more specific types to
13788 them. Some instructions only care about signs for some element
13789 sizes, so handle that properly. */
13790 if (((types_allowed & N_UNT) == 0)
13791 && ((g_size == 8 && (types_allowed & N_8) != 0)
13792 || (g_size == 16 && (types_allowed & N_16) != 0)
13793 || (g_size == 32 && (types_allowed & N_32) != 0)
13794 || (g_size == 64 && (types_allowed & N_64) != 0)))
13795 g_type = NT_untyped;
13796
13797 if (pass == 0)
13798 {
13799 if ((thisarg & N_KEY) != 0)
13800 {
13801 k_type = g_type;
13802 k_size = g_size;
13803 key_allowed = thisarg & ~N_KEY;
13804 }
13805 }
13806 else
13807 {
13808 if ((thisarg & N_VFP) != 0)
13809 {
13810 enum neon_shape_el regshape;
13811 unsigned regwidth, match;
13812
13813 /* PR 11136: Catch the case where we are passed a shape of NS_NULL. */
13814 if (ns == NS_NULL)
13815 {
13816 first_error (_("invalid instruction shape"));
13817 return badtype;
13818 }
13819 regshape = neon_shape_tab[ns].el[i];
13820 regwidth = neon_shape_el_size[regshape];
13821
13822 /* In VFP mode, operands must match register widths. If we
13823 have a key operand, use its width, else use the width of
13824 the current operand. */
13825 if (k_size != -1u)
13826 match = k_size;
13827 else
13828 match = g_size;
13829
13830 if (regwidth != match)
13831 {
13832 first_error (_("operand size must match register width"));
13833 return badtype;
13834 }
13835 }
13836
13837 if ((thisarg & N_EQK) == 0)
13838 {
13839 unsigned given_type = type_chk_of_el_type (g_type, g_size);
13840
13841 if ((given_type & types_allowed) == 0)
13842 {
13843 first_error (_("bad type in Neon instruction"));
13844 return badtype;
13845 }
13846 }
13847 else
13848 {
13849 enum neon_el_type mod_k_type = k_type;
13850 unsigned mod_k_size = k_size;
13851 neon_modify_type_size (thisarg, &mod_k_type, &mod_k_size);
13852 if (g_type != mod_k_type || g_size != mod_k_size)
13853 {
13854 first_error (_("inconsistent types in Neon instruction"));
13855 return badtype;
13856 }
13857 }
13858 }
13859 }
13860 }
13861
13862 return inst.vectype.el[key_el];
13863 }
13864
13865 /* Neon-style VFP instruction forwarding. */
13866
13867 /* Thumb VFP instructions have 0xE in the condition field. */
13868
13869 static void
13870 do_vfp_cond_or_thumb (void)
13871 {
13872 inst.is_neon = 1;
13873
13874 if (thumb_mode)
13875 inst.instruction |= 0xe0000000;
13876 else
13877 inst.instruction |= inst.cond << 28;
13878 }
13879
13880 /* Look up and encode a simple mnemonic, for use as a helper function for the
13881 Neon-style VFP syntax. This avoids duplication of bits of the insns table,
13882 etc. It is assumed that operand parsing has already been done, and that the
13883 operands are in the form expected by the given opcode (this isn't necessarily
13884 the same as the form in which they were parsed, hence some massaging must
13885 take place before this function is called).
13886 Checks current arch version against that in the looked-up opcode. */
13887
13888 static void
13889 do_vfp_nsyn_opcode (const char *opname)
13890 {
13891 const struct asm_opcode *opcode;
13892
13893 opcode = (const struct asm_opcode *) hash_find (arm_ops_hsh, opname);
13894
13895 if (!opcode)
13896 abort ();
13897
13898 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant,
13899 thumb_mode ? *opcode->tvariant : *opcode->avariant),
13900 _(BAD_FPU));
13901
13902 inst.is_neon = 1;
13903
13904 if (thumb_mode)
13905 {
13906 inst.instruction = opcode->tvalue;
13907 opcode->tencode ();
13908 }
13909 else
13910 {
13911 inst.instruction = (inst.cond << 28) | opcode->avalue;
13912 opcode->aencode ();
13913 }
13914 }
13915
13916 static void
13917 do_vfp_nsyn_add_sub (enum neon_shape rs)
13918 {
13919 int is_add = (inst.instruction & 0x0fffffff) == N_MNEM_vadd;
13920
13921 if (rs == NS_FFF)
13922 {
13923 if (is_add)
13924 do_vfp_nsyn_opcode ("fadds");
13925 else
13926 do_vfp_nsyn_opcode ("fsubs");
13927 }
13928 else
13929 {
13930 if (is_add)
13931 do_vfp_nsyn_opcode ("faddd");
13932 else
13933 do_vfp_nsyn_opcode ("fsubd");
13934 }
13935 }
13936
13937 /* Check operand types to see if this is a VFP instruction, and if so call
13938 PFN (). */
13939
13940 static int
13941 try_vfp_nsyn (int args, void (*pfn) (enum neon_shape))
13942 {
13943 enum neon_shape rs;
13944 struct neon_type_el et;
13945
13946 switch (args)
13947 {
13948 case 2:
13949 rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
13950 et = neon_check_type (2, rs,
13951 N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
13952 break;
13953
13954 case 3:
13955 rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
13956 et = neon_check_type (3, rs,
13957 N_EQK | N_VFP, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
13958 break;
13959
13960 default:
13961 abort ();
13962 }
13963
13964 if (et.type != NT_invtype)
13965 {
13966 pfn (rs);
13967 return SUCCESS;
13968 }
13969
13970 inst.error = NULL;
13971 return FAIL;
13972 }
13973
13974 static void
13975 do_vfp_nsyn_mla_mls (enum neon_shape rs)
13976 {
13977 int is_mla = (inst.instruction & 0x0fffffff) == N_MNEM_vmla;
13978
13979 if (rs == NS_FFF)
13980 {
13981 if (is_mla)
13982 do_vfp_nsyn_opcode ("fmacs");
13983 else
13984 do_vfp_nsyn_opcode ("fnmacs");
13985 }
13986 else
13987 {
13988 if (is_mla)
13989 do_vfp_nsyn_opcode ("fmacd");
13990 else
13991 do_vfp_nsyn_opcode ("fnmacd");
13992 }
13993 }
13994
13995 static void
13996 do_vfp_nsyn_fma_fms (enum neon_shape rs)
13997 {
13998 int is_fma = (inst.instruction & 0x0fffffff) == N_MNEM_vfma;
13999
14000 if (rs == NS_FFF)
14001 {
14002 if (is_fma)
14003 do_vfp_nsyn_opcode ("ffmas");
14004 else
14005 do_vfp_nsyn_opcode ("ffnmas");
14006 }
14007 else
14008 {
14009 if (is_fma)
14010 do_vfp_nsyn_opcode ("ffmad");
14011 else
14012 do_vfp_nsyn_opcode ("ffnmad");
14013 }
14014 }
14015
14016 static void
14017 do_vfp_nsyn_mul (enum neon_shape rs)
14018 {
14019 if (rs == NS_FFF)
14020 do_vfp_nsyn_opcode ("fmuls");
14021 else
14022 do_vfp_nsyn_opcode ("fmuld");
14023 }
14024
14025 static void
14026 do_vfp_nsyn_abs_neg (enum neon_shape rs)
14027 {
14028 int is_neg = (inst.instruction & 0x80) != 0;
14029 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_VFP | N_KEY);
14030
14031 if (rs == NS_FF)
14032 {
14033 if (is_neg)
14034 do_vfp_nsyn_opcode ("fnegs");
14035 else
14036 do_vfp_nsyn_opcode ("fabss");
14037 }
14038 else
14039 {
14040 if (is_neg)
14041 do_vfp_nsyn_opcode ("fnegd");
14042 else
14043 do_vfp_nsyn_opcode ("fabsd");
14044 }
14045 }
14046
14047 /* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
14048 insns belong to Neon, and are handled elsewhere. */
14049
14050 static void
14051 do_vfp_nsyn_ldm_stm (int is_dbmode)
14052 {
14053 int is_ldm = (inst.instruction & (1 << 20)) != 0;
14054 if (is_ldm)
14055 {
14056 if (is_dbmode)
14057 do_vfp_nsyn_opcode ("fldmdbs");
14058 else
14059 do_vfp_nsyn_opcode ("fldmias");
14060 }
14061 else
14062 {
14063 if (is_dbmode)
14064 do_vfp_nsyn_opcode ("fstmdbs");
14065 else
14066 do_vfp_nsyn_opcode ("fstmias");
14067 }
14068 }
14069
14070 static void
14071 do_vfp_nsyn_sqrt (void)
14072 {
14073 enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
14074 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
14075
14076 if (rs == NS_FF)
14077 do_vfp_nsyn_opcode ("fsqrts");
14078 else
14079 do_vfp_nsyn_opcode ("fsqrtd");
14080 }
14081
14082 static void
14083 do_vfp_nsyn_div (void)
14084 {
14085 enum neon_shape rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
14086 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
14087 N_F32 | N_F64 | N_KEY | N_VFP);
14088
14089 if (rs == NS_FFF)
14090 do_vfp_nsyn_opcode ("fdivs");
14091 else
14092 do_vfp_nsyn_opcode ("fdivd");
14093 }
14094
14095 static void
14096 do_vfp_nsyn_nmul (void)
14097 {
14098 enum neon_shape rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
14099 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
14100 N_F32 | N_F64 | N_KEY | N_VFP);
14101
14102 if (rs == NS_FFF)
14103 {
14104 NEON_ENCODE (SINGLE, inst);
14105 do_vfp_sp_dyadic ();
14106 }
14107 else
14108 {
14109 NEON_ENCODE (DOUBLE, inst);
14110 do_vfp_dp_rd_rn_rm ();
14111 }
14112 do_vfp_cond_or_thumb ();
14113 }
14114
14115 static void
14116 do_vfp_nsyn_cmp (void)
14117 {
14118 if (inst.operands[1].isreg)
14119 {
14120 enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
14121 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
14122
14123 if (rs == NS_FF)
14124 {
14125 NEON_ENCODE (SINGLE, inst);
14126 do_vfp_sp_monadic ();
14127 }
14128 else
14129 {
14130 NEON_ENCODE (DOUBLE, inst);
14131 do_vfp_dp_rd_rm ();
14132 }
14133 }
14134 else
14135 {
14136 enum neon_shape rs = neon_select_shape (NS_FI, NS_DI, NS_NULL);
14137 neon_check_type (2, rs, N_F32 | N_F64 | N_KEY | N_VFP, N_EQK);
14138
14139 switch (inst.instruction & 0x0fffffff)
14140 {
14141 case N_MNEM_vcmp:
14142 inst.instruction += N_MNEM_vcmpz - N_MNEM_vcmp;
14143 break;
14144 case N_MNEM_vcmpe:
14145 inst.instruction += N_MNEM_vcmpez - N_MNEM_vcmpe;
14146 break;
14147 default:
14148 abort ();
14149 }
14150
14151 if (rs == NS_FI)
14152 {
14153 NEON_ENCODE (SINGLE, inst);
14154 do_vfp_sp_compare_z ();
14155 }
14156 else
14157 {
14158 NEON_ENCODE (DOUBLE, inst);
14159 do_vfp_dp_rd ();
14160 }
14161 }
14162 do_vfp_cond_or_thumb ();
14163 }
14164
14165 static void
14166 nsyn_insert_sp (void)
14167 {
14168 inst.operands[1] = inst.operands[0];
14169 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
14170 inst.operands[0].reg = REG_SP;
14171 inst.operands[0].isreg = 1;
14172 inst.operands[0].writeback = 1;
14173 inst.operands[0].present = 1;
14174 }
14175
14176 static void
14177 do_vfp_nsyn_push (void)
14178 {
14179 nsyn_insert_sp ();
14180 if (inst.operands[1].issingle)
14181 do_vfp_nsyn_opcode ("fstmdbs");
14182 else
14183 do_vfp_nsyn_opcode ("fstmdbd");
14184 }
14185
14186 static void
14187 do_vfp_nsyn_pop (void)
14188 {
14189 nsyn_insert_sp ();
14190 if (inst.operands[1].issingle)
14191 do_vfp_nsyn_opcode ("fldmias");
14192 else
14193 do_vfp_nsyn_opcode ("fldmiad");
14194 }
14195
14196 /* Fix up Neon data-processing instructions, ORing in the correct bits for
14197 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
14198
14199 static void
14200 neon_dp_fixup (struct arm_it* insn)
14201 {
14202 unsigned int i = insn->instruction;
14203 insn->is_neon = 1;
14204
14205 if (thumb_mode)
14206 {
14207 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
14208 if (i & (1 << 24))
14209 i |= 1 << 28;
14210
14211 i &= ~(1 << 24);
14212
14213 i |= 0xef000000;
14214 }
14215 else
14216 i |= 0xf2000000;
14217
14218 insn->instruction = i;
14219 }
14220
14221 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
14222 (0, 1, 2, 3). */
14223
14224 static unsigned
14225 neon_logbits (unsigned x)
14226 {
14227 return ffs (x) - 4;
14228 }
14229
14230 #define LOW4(R) ((R) & 0xf)
14231 #define HI1(R) (((R) >> 4) & 1)
14232
14233 /* Encode insns with bit pattern:
14234
14235 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
14236 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
14237
14238 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
14239 different meaning for some instruction. */
14240
14241 static void
14242 neon_three_same (int isquad, int ubit, int size)
14243 {
14244 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14245 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14246 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
14247 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
14248 inst.instruction |= LOW4 (inst.operands[2].reg);
14249 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
14250 inst.instruction |= (isquad != 0) << 6;
14251 inst.instruction |= (ubit != 0) << 24;
14252 if (size != -1)
14253 inst.instruction |= neon_logbits (size) << 20;
14254
14255 neon_dp_fixup (&inst);
14256 }
14257
14258 /* Encode instructions of the form:
14259
14260 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
14261 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
14262
14263 Don't write size if SIZE == -1. */
14264
14265 static void
14266 neon_two_same (int qbit, int ubit, int size)
14267 {
14268 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14269 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14270 inst.instruction |= LOW4 (inst.operands[1].reg);
14271 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14272 inst.instruction |= (qbit != 0) << 6;
14273 inst.instruction |= (ubit != 0) << 24;
14274
14275 if (size != -1)
14276 inst.instruction |= neon_logbits (size) << 18;
14277
14278 neon_dp_fixup (&inst);
14279 }
14280
14281 /* Neon instruction encoders, in approximate order of appearance. */
14282
14283 static void
14284 do_neon_dyadic_i_su (void)
14285 {
14286 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14287 struct neon_type_el et = neon_check_type (3, rs,
14288 N_EQK, N_EQK, N_SU_32 | N_KEY);
14289 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14290 }
14291
14292 static void
14293 do_neon_dyadic_i64_su (void)
14294 {
14295 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14296 struct neon_type_el et = neon_check_type (3, rs,
14297 N_EQK, N_EQK, N_SU_ALL | N_KEY);
14298 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14299 }
14300
14301 static void
14302 neon_imm_shift (int write_ubit, int uval, int isquad, struct neon_type_el et,
14303 unsigned immbits)
14304 {
14305 unsigned size = et.size >> 3;
14306 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14307 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14308 inst.instruction |= LOW4 (inst.operands[1].reg);
14309 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14310 inst.instruction |= (isquad != 0) << 6;
14311 inst.instruction |= immbits << 16;
14312 inst.instruction |= (size >> 3) << 7;
14313 inst.instruction |= (size & 0x7) << 19;
14314 if (write_ubit)
14315 inst.instruction |= (uval != 0) << 24;
14316
14317 neon_dp_fixup (&inst);
14318 }
14319
14320 static void
14321 do_neon_shl_imm (void)
14322 {
14323 if (!inst.operands[2].isreg)
14324 {
14325 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14326 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_KEY | N_I_ALL);
14327 int imm = inst.operands[2].imm;
14328
14329 constraint (imm < 0 || (unsigned)imm >= et.size,
14330 _("immediate out of range for shift"));
14331 NEON_ENCODE (IMMED, inst);
14332 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
14333 }
14334 else
14335 {
14336 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14337 struct neon_type_el et = neon_check_type (3, rs,
14338 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
14339 unsigned int tmp;
14340
14341 /* VSHL/VQSHL 3-register variants have syntax such as:
14342 vshl.xx Dd, Dm, Dn
14343 whereas other 3-register operations encoded by neon_three_same have
14344 syntax like:
14345 vadd.xx Dd, Dn, Dm
14346 (i.e. with Dn & Dm reversed). Swap operands[1].reg and operands[2].reg
14347 here. */
14348 tmp = inst.operands[2].reg;
14349 inst.operands[2].reg = inst.operands[1].reg;
14350 inst.operands[1].reg = tmp;
14351 NEON_ENCODE (INTEGER, inst);
14352 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14353 }
14354 }
14355
14356 static void
14357 do_neon_qshl_imm (void)
14358 {
14359 if (!inst.operands[2].isreg)
14360 {
14361 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14362 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
14363 int imm = inst.operands[2].imm;
14364
14365 constraint (imm < 0 || (unsigned)imm >= et.size,
14366 _("immediate out of range for shift"));
14367 NEON_ENCODE (IMMED, inst);
14368 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et, imm);
14369 }
14370 else
14371 {
14372 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14373 struct neon_type_el et = neon_check_type (3, rs,
14374 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
14375 unsigned int tmp;
14376
14377 /* See note in do_neon_shl_imm. */
14378 tmp = inst.operands[2].reg;
14379 inst.operands[2].reg = inst.operands[1].reg;
14380 inst.operands[1].reg = tmp;
14381 NEON_ENCODE (INTEGER, inst);
14382 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14383 }
14384 }
14385
14386 static void
14387 do_neon_rshl (void)
14388 {
14389 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14390 struct neon_type_el et = neon_check_type (3, rs,
14391 N_EQK, N_EQK, N_SU_ALL | N_KEY);
14392 unsigned int tmp;
14393
14394 tmp = inst.operands[2].reg;
14395 inst.operands[2].reg = inst.operands[1].reg;
14396 inst.operands[1].reg = tmp;
14397 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14398 }
14399
14400 static int
14401 neon_cmode_for_logic_imm (unsigned immediate, unsigned *immbits, int size)
14402 {
14403 /* Handle .I8 pseudo-instructions. */
14404 if (size == 8)
14405 {
14406 /* Unfortunately, this will make everything apart from zero out-of-range.
14407 FIXME is this the intended semantics? There doesn't seem much point in
14408 accepting .I8 if so. */
14409 immediate |= immediate << 8;
14410 size = 16;
14411 }
14412
14413 if (size >= 32)
14414 {
14415 if (immediate == (immediate & 0x000000ff))
14416 {
14417 *immbits = immediate;
14418 return 0x1;
14419 }
14420 else if (immediate == (immediate & 0x0000ff00))
14421 {
14422 *immbits = immediate >> 8;
14423 return 0x3;
14424 }
14425 else if (immediate == (immediate & 0x00ff0000))
14426 {
14427 *immbits = immediate >> 16;
14428 return 0x5;
14429 }
14430 else if (immediate == (immediate & 0xff000000))
14431 {
14432 *immbits = immediate >> 24;
14433 return 0x7;
14434 }
14435 if ((immediate & 0xffff) != (immediate >> 16))
14436 goto bad_immediate;
14437 immediate &= 0xffff;
14438 }
14439
14440 if (immediate == (immediate & 0x000000ff))
14441 {
14442 *immbits = immediate;
14443 return 0x9;
14444 }
14445 else if (immediate == (immediate & 0x0000ff00))
14446 {
14447 *immbits = immediate >> 8;
14448 return 0xb;
14449 }
14450
14451 bad_immediate:
14452 first_error (_("immediate value out of range"));
14453 return FAIL;
14454 }
14455
14456 static void
14457 do_neon_logic (void)
14458 {
14459 if (inst.operands[2].present && inst.operands[2].isreg)
14460 {
14461 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14462 neon_check_type (3, rs, N_IGNORE_TYPE);
14463 /* U bit and size field were set as part of the bitmask. */
14464 NEON_ENCODE (INTEGER, inst);
14465 neon_three_same (neon_quad (rs), 0, -1);
14466 }
14467 else
14468 {
14469 const int three_ops_form = (inst.operands[2].present
14470 && !inst.operands[2].isreg);
14471 const int immoperand = (three_ops_form ? 2 : 1);
14472 enum neon_shape rs = (three_ops_form
14473 ? neon_select_shape (NS_DDI, NS_QQI, NS_NULL)
14474 : neon_select_shape (NS_DI, NS_QI, NS_NULL));
14475 struct neon_type_el et = neon_check_type (2, rs,
14476 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
14477 enum neon_opc opcode = (enum neon_opc) inst.instruction & 0x0fffffff;
14478 unsigned immbits;
14479 int cmode;
14480
14481 if (et.type == NT_invtype)
14482 return;
14483
14484 if (three_ops_form)
14485 constraint (inst.operands[0].reg != inst.operands[1].reg,
14486 _("first and second operands shall be the same register"));
14487
14488 NEON_ENCODE (IMMED, inst);
14489
14490 immbits = inst.operands[immoperand].imm;
14491 if (et.size == 64)
14492 {
14493 /* .i64 is a pseudo-op, so the immediate must be a repeating
14494 pattern. */
14495 if (immbits != (inst.operands[immoperand].regisimm ?
14496 inst.operands[immoperand].reg : 0))
14497 {
14498 /* Set immbits to an invalid constant. */
14499 immbits = 0xdeadbeef;
14500 }
14501 }
14502
14503 switch (opcode)
14504 {
14505 case N_MNEM_vbic:
14506 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14507 break;
14508
14509 case N_MNEM_vorr:
14510 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14511 break;
14512
14513 case N_MNEM_vand:
14514 /* Pseudo-instruction for VBIC. */
14515 neon_invert_size (&immbits, 0, et.size);
14516 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14517 break;
14518
14519 case N_MNEM_vorn:
14520 /* Pseudo-instruction for VORR. */
14521 neon_invert_size (&immbits, 0, et.size);
14522 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14523 break;
14524
14525 default:
14526 abort ();
14527 }
14528
14529 if (cmode == FAIL)
14530 return;
14531
14532 inst.instruction |= neon_quad (rs) << 6;
14533 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14534 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14535 inst.instruction |= cmode << 8;
14536 neon_write_immbits (immbits);
14537
14538 neon_dp_fixup (&inst);
14539 }
14540 }
14541
14542 static void
14543 do_neon_bitfield (void)
14544 {
14545 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14546 neon_check_type (3, rs, N_IGNORE_TYPE);
14547 neon_three_same (neon_quad (rs), 0, -1);
14548 }
14549
14550 static void
14551 neon_dyadic_misc (enum neon_el_type ubit_meaning, unsigned types,
14552 unsigned destbits)
14553 {
14554 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14555 struct neon_type_el et = neon_check_type (3, rs, N_EQK | destbits, N_EQK,
14556 types | N_KEY);
14557 if (et.type == NT_float)
14558 {
14559 NEON_ENCODE (FLOAT, inst);
14560 neon_three_same (neon_quad (rs), 0, -1);
14561 }
14562 else
14563 {
14564 NEON_ENCODE (INTEGER, inst);
14565 neon_three_same (neon_quad (rs), et.type == ubit_meaning, et.size);
14566 }
14567 }
14568
14569 static void
14570 do_neon_dyadic_if_su (void)
14571 {
14572 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
14573 }
14574
14575 static void
14576 do_neon_dyadic_if_su_d (void)
14577 {
14578 /* This version only allow D registers, but that constraint is enforced during
14579 operand parsing so we don't need to do anything extra here. */
14580 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
14581 }
14582
14583 static void
14584 do_neon_dyadic_if_i_d (void)
14585 {
14586 /* The "untyped" case can't happen. Do this to stop the "U" bit being
14587 affected if we specify unsigned args. */
14588 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
14589 }
14590
14591 enum vfp_or_neon_is_neon_bits
14592 {
14593 NEON_CHECK_CC = 1,
14594 NEON_CHECK_ARCH = 2,
14595 NEON_CHECK_ARCH8 = 4
14596 };
14597
14598 /* Call this function if an instruction which may have belonged to the VFP or
14599 Neon instruction sets, but turned out to be a Neon instruction (due to the
14600 operand types involved, etc.). We have to check and/or fix-up a couple of
14601 things:
14602
14603 - Make sure the user hasn't attempted to make a Neon instruction
14604 conditional.
14605 - Alter the value in the condition code field if necessary.
14606 - Make sure that the arch supports Neon instructions.
14607
14608 Which of these operations take place depends on bits from enum
14609 vfp_or_neon_is_neon_bits.
14610
14611 WARNING: This function has side effects! If NEON_CHECK_CC is used and the
14612 current instruction's condition is COND_ALWAYS, the condition field is
14613 changed to inst.uncond_value. This is necessary because instructions shared
14614 between VFP and Neon may be conditional for the VFP variants only, and the
14615 unconditional Neon version must have, e.g., 0xF in the condition field. */
14616
14617 static int
14618 vfp_or_neon_is_neon (unsigned check)
14619 {
14620 /* Conditions are always legal in Thumb mode (IT blocks). */
14621 if (!thumb_mode && (check & NEON_CHECK_CC))
14622 {
14623 if (inst.cond != COND_ALWAYS)
14624 {
14625 first_error (_(BAD_COND));
14626 return FAIL;
14627 }
14628 if (inst.uncond_value != -1)
14629 inst.instruction |= inst.uncond_value << 28;
14630 }
14631
14632 if ((check & NEON_CHECK_ARCH)
14633 && !mark_feature_used (&fpu_neon_ext_v1))
14634 {
14635 first_error (_(BAD_FPU));
14636 return FAIL;
14637 }
14638
14639 if ((check & NEON_CHECK_ARCH8)
14640 && !mark_feature_used (&fpu_neon_ext_armv8))
14641 {
14642 first_error (_(BAD_FPU));
14643 return FAIL;
14644 }
14645
14646 return SUCCESS;
14647 }
14648
14649 static void
14650 do_neon_addsub_if_i (void)
14651 {
14652 if (try_vfp_nsyn (3, do_vfp_nsyn_add_sub) == SUCCESS)
14653 return;
14654
14655 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14656 return;
14657
14658 /* The "untyped" case can't happen. Do this to stop the "U" bit being
14659 affected if we specify unsigned args. */
14660 neon_dyadic_misc (NT_untyped, N_IF_32 | N_I64, 0);
14661 }
14662
14663 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
14664 result to be:
14665 V<op> A,B (A is operand 0, B is operand 2)
14666 to mean:
14667 V<op> A,B,A
14668 not:
14669 V<op> A,B,B
14670 so handle that case specially. */
14671
14672 static void
14673 neon_exchange_operands (void)
14674 {
14675 void *scratch = alloca (sizeof (inst.operands[0]));
14676 if (inst.operands[1].present)
14677 {
14678 /* Swap operands[1] and operands[2]. */
14679 memcpy (scratch, &inst.operands[1], sizeof (inst.operands[0]));
14680 inst.operands[1] = inst.operands[2];
14681 memcpy (&inst.operands[2], scratch, sizeof (inst.operands[0]));
14682 }
14683 else
14684 {
14685 inst.operands[1] = inst.operands[2];
14686 inst.operands[2] = inst.operands[0];
14687 }
14688 }
14689
14690 static void
14691 neon_compare (unsigned regtypes, unsigned immtypes, int invert)
14692 {
14693 if (inst.operands[2].isreg)
14694 {
14695 if (invert)
14696 neon_exchange_operands ();
14697 neon_dyadic_misc (NT_unsigned, regtypes, N_SIZ);
14698 }
14699 else
14700 {
14701 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14702 struct neon_type_el et = neon_check_type (2, rs,
14703 N_EQK | N_SIZ, immtypes | N_KEY);
14704
14705 NEON_ENCODE (IMMED, inst);
14706 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14707 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14708 inst.instruction |= LOW4 (inst.operands[1].reg);
14709 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14710 inst.instruction |= neon_quad (rs) << 6;
14711 inst.instruction |= (et.type == NT_float) << 10;
14712 inst.instruction |= neon_logbits (et.size) << 18;
14713
14714 neon_dp_fixup (&inst);
14715 }
14716 }
14717
14718 static void
14719 do_neon_cmp (void)
14720 {
14721 neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, FALSE);
14722 }
14723
14724 static void
14725 do_neon_cmp_inv (void)
14726 {
14727 neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, TRUE);
14728 }
14729
14730 static void
14731 do_neon_ceq (void)
14732 {
14733 neon_compare (N_IF_32, N_IF_32, FALSE);
14734 }
14735
14736 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
14737 scalars, which are encoded in 5 bits, M : Rm.
14738 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
14739 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
14740 index in M. */
14741
14742 static unsigned
14743 neon_scalar_for_mul (unsigned scalar, unsigned elsize)
14744 {
14745 unsigned regno = NEON_SCALAR_REG (scalar);
14746 unsigned elno = NEON_SCALAR_INDEX (scalar);
14747
14748 switch (elsize)
14749 {
14750 case 16:
14751 if (regno > 7 || elno > 3)
14752 goto bad_scalar;
14753 return regno | (elno << 3);
14754
14755 case 32:
14756 if (regno > 15 || elno > 1)
14757 goto bad_scalar;
14758 return regno | (elno << 4);
14759
14760 default:
14761 bad_scalar:
14762 first_error (_("scalar out of range for multiply instruction"));
14763 }
14764
14765 return 0;
14766 }
14767
14768 /* Encode multiply / multiply-accumulate scalar instructions. */
14769
14770 static void
14771 neon_mul_mac (struct neon_type_el et, int ubit)
14772 {
14773 unsigned scalar;
14774
14775 /* Give a more helpful error message if we have an invalid type. */
14776 if (et.type == NT_invtype)
14777 return;
14778
14779 scalar = neon_scalar_for_mul (inst.operands[2].reg, et.size);
14780 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14781 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14782 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
14783 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
14784 inst.instruction |= LOW4 (scalar);
14785 inst.instruction |= HI1 (scalar) << 5;
14786 inst.instruction |= (et.type == NT_float) << 8;
14787 inst.instruction |= neon_logbits (et.size) << 20;
14788 inst.instruction |= (ubit != 0) << 24;
14789
14790 neon_dp_fixup (&inst);
14791 }
14792
14793 static void
14794 do_neon_mac_maybe_scalar (void)
14795 {
14796 if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls) == SUCCESS)
14797 return;
14798
14799 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14800 return;
14801
14802 if (inst.operands[2].isscalar)
14803 {
14804 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
14805 struct neon_type_el et = neon_check_type (3, rs,
14806 N_EQK, N_EQK, N_I16 | N_I32 | N_F32 | N_KEY);
14807 NEON_ENCODE (SCALAR, inst);
14808 neon_mul_mac (et, neon_quad (rs));
14809 }
14810 else
14811 {
14812 /* The "untyped" case can't happen. Do this to stop the "U" bit being
14813 affected if we specify unsigned args. */
14814 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
14815 }
14816 }
14817
14818 static void
14819 do_neon_fmac (void)
14820 {
14821 if (try_vfp_nsyn (3, do_vfp_nsyn_fma_fms) == SUCCESS)
14822 return;
14823
14824 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14825 return;
14826
14827 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
14828 }
14829
14830 static void
14831 do_neon_tst (void)
14832 {
14833 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14834 struct neon_type_el et = neon_check_type (3, rs,
14835 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_KEY);
14836 neon_three_same (neon_quad (rs), 0, et.size);
14837 }
14838
14839 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
14840 same types as the MAC equivalents. The polynomial type for this instruction
14841 is encoded the same as the integer type. */
14842
14843 static void
14844 do_neon_mul (void)
14845 {
14846 if (try_vfp_nsyn (3, do_vfp_nsyn_mul) == SUCCESS)
14847 return;
14848
14849 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14850 return;
14851
14852 if (inst.operands[2].isscalar)
14853 do_neon_mac_maybe_scalar ();
14854 else
14855 neon_dyadic_misc (NT_poly, N_I8 | N_I16 | N_I32 | N_F32 | N_P8, 0);
14856 }
14857
14858 static void
14859 do_neon_qdmulh (void)
14860 {
14861 if (inst.operands[2].isscalar)
14862 {
14863 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
14864 struct neon_type_el et = neon_check_type (3, rs,
14865 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
14866 NEON_ENCODE (SCALAR, inst);
14867 neon_mul_mac (et, neon_quad (rs));
14868 }
14869 else
14870 {
14871 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14872 struct neon_type_el et = neon_check_type (3, rs,
14873 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
14874 NEON_ENCODE (INTEGER, inst);
14875 /* The U bit (rounding) comes from bit mask. */
14876 neon_three_same (neon_quad (rs), 0, et.size);
14877 }
14878 }
14879
14880 static void
14881 do_neon_fcmp_absolute (void)
14882 {
14883 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14884 neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY);
14885 /* Size field comes from bit mask. */
14886 neon_three_same (neon_quad (rs), 1, -1);
14887 }
14888
14889 static void
14890 do_neon_fcmp_absolute_inv (void)
14891 {
14892 neon_exchange_operands ();
14893 do_neon_fcmp_absolute ();
14894 }
14895
14896 static void
14897 do_neon_step (void)
14898 {
14899 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14900 neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY);
14901 neon_three_same (neon_quad (rs), 0, -1);
14902 }
14903
14904 static void
14905 do_neon_abs_neg (void)
14906 {
14907 enum neon_shape rs;
14908 struct neon_type_el et;
14909
14910 if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg) == SUCCESS)
14911 return;
14912
14913 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14914 return;
14915
14916 rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
14917 et = neon_check_type (2, rs, N_EQK, N_S8 | N_S16 | N_S32 | N_F32 | N_KEY);
14918
14919 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14920 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14921 inst.instruction |= LOW4 (inst.operands[1].reg);
14922 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14923 inst.instruction |= neon_quad (rs) << 6;
14924 inst.instruction |= (et.type == NT_float) << 10;
14925 inst.instruction |= neon_logbits (et.size) << 18;
14926
14927 neon_dp_fixup (&inst);
14928 }
14929
14930 static void
14931 do_neon_sli (void)
14932 {
14933 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14934 struct neon_type_el et = neon_check_type (2, rs,
14935 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
14936 int imm = inst.operands[2].imm;
14937 constraint (imm < 0 || (unsigned)imm >= et.size,
14938 _("immediate out of range for insert"));
14939 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
14940 }
14941
14942 static void
14943 do_neon_sri (void)
14944 {
14945 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14946 struct neon_type_el et = neon_check_type (2, rs,
14947 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
14948 int imm = inst.operands[2].imm;
14949 constraint (imm < 1 || (unsigned)imm > et.size,
14950 _("immediate out of range for insert"));
14951 neon_imm_shift (FALSE, 0, neon_quad (rs), et, et.size - imm);
14952 }
14953
14954 static void
14955 do_neon_qshlu_imm (void)
14956 {
14957 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14958 struct neon_type_el et = neon_check_type (2, rs,
14959 N_EQK | N_UNS, N_S8 | N_S16 | N_S32 | N_S64 | N_KEY);
14960 int imm = inst.operands[2].imm;
14961 constraint (imm < 0 || (unsigned)imm >= et.size,
14962 _("immediate out of range for shift"));
14963 /* Only encodes the 'U present' variant of the instruction.
14964 In this case, signed types have OP (bit 8) set to 0.
14965 Unsigned types have OP set to 1. */
14966 inst.instruction |= (et.type == NT_unsigned) << 8;
14967 /* The rest of the bits are the same as other immediate shifts. */
14968 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
14969 }
14970
14971 static void
14972 do_neon_qmovn (void)
14973 {
14974 struct neon_type_el et = neon_check_type (2, NS_DQ,
14975 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
14976 /* Saturating move where operands can be signed or unsigned, and the
14977 destination has the same signedness. */
14978 NEON_ENCODE (INTEGER, inst);
14979 if (et.type == NT_unsigned)
14980 inst.instruction |= 0xc0;
14981 else
14982 inst.instruction |= 0x80;
14983 neon_two_same (0, 1, et.size / 2);
14984 }
14985
14986 static void
14987 do_neon_qmovun (void)
14988 {
14989 struct neon_type_el et = neon_check_type (2, NS_DQ,
14990 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
14991 /* Saturating move with unsigned results. Operands must be signed. */
14992 NEON_ENCODE (INTEGER, inst);
14993 neon_two_same (0, 1, et.size / 2);
14994 }
14995
14996 static void
14997 do_neon_rshift_sat_narrow (void)
14998 {
14999 /* FIXME: Types for narrowing. If operands are signed, results can be signed
15000 or unsigned. If operands are unsigned, results must also be unsigned. */
15001 struct neon_type_el et = neon_check_type (2, NS_DQI,
15002 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
15003 int imm = inst.operands[2].imm;
15004 /* This gets the bounds check, size encoding and immediate bits calculation
15005 right. */
15006 et.size /= 2;
15007
15008 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
15009 VQMOVN.I<size> <Dd>, <Qm>. */
15010 if (imm == 0)
15011 {
15012 inst.operands[2].present = 0;
15013 inst.instruction = N_MNEM_vqmovn;
15014 do_neon_qmovn ();
15015 return;
15016 }
15017
15018 constraint (imm < 1 || (unsigned)imm > et.size,
15019 _("immediate out of range"));
15020 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, et.size - imm);
15021 }
15022
15023 static void
15024 do_neon_rshift_sat_narrow_u (void)
15025 {
15026 /* FIXME: Types for narrowing. If operands are signed, results can be signed
15027 or unsigned. If operands are unsigned, results must also be unsigned. */
15028 struct neon_type_el et = neon_check_type (2, NS_DQI,
15029 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
15030 int imm = inst.operands[2].imm;
15031 /* This gets the bounds check, size encoding and immediate bits calculation
15032 right. */
15033 et.size /= 2;
15034
15035 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
15036 VQMOVUN.I<size> <Dd>, <Qm>. */
15037 if (imm == 0)
15038 {
15039 inst.operands[2].present = 0;
15040 inst.instruction = N_MNEM_vqmovun;
15041 do_neon_qmovun ();
15042 return;
15043 }
15044
15045 constraint (imm < 1 || (unsigned)imm > et.size,
15046 _("immediate out of range"));
15047 /* FIXME: The manual is kind of unclear about what value U should have in
15048 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
15049 must be 1. */
15050 neon_imm_shift (TRUE, 1, 0, et, et.size - imm);
15051 }
15052
15053 static void
15054 do_neon_movn (void)
15055 {
15056 struct neon_type_el et = neon_check_type (2, NS_DQ,
15057 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
15058 NEON_ENCODE (INTEGER, inst);
15059 neon_two_same (0, 1, et.size / 2);
15060 }
15061
15062 static void
15063 do_neon_rshift_narrow (void)
15064 {
15065 struct neon_type_el et = neon_check_type (2, NS_DQI,
15066 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
15067 int imm = inst.operands[2].imm;
15068 /* This gets the bounds check, size encoding and immediate bits calculation
15069 right. */
15070 et.size /= 2;
15071
15072 /* If immediate is zero then we are a pseudo-instruction for
15073 VMOVN.I<size> <Dd>, <Qm> */
15074 if (imm == 0)
15075 {
15076 inst.operands[2].present = 0;
15077 inst.instruction = N_MNEM_vmovn;
15078 do_neon_movn ();
15079 return;
15080 }
15081
15082 constraint (imm < 1 || (unsigned)imm > et.size,
15083 _("immediate out of range for narrowing operation"));
15084 neon_imm_shift (FALSE, 0, 0, et, et.size - imm);
15085 }
15086
15087 static void
15088 do_neon_shll (void)
15089 {
15090 /* FIXME: Type checking when lengthening. */
15091 struct neon_type_el et = neon_check_type (2, NS_QDI,
15092 N_EQK | N_DBL, N_I8 | N_I16 | N_I32 | N_KEY);
15093 unsigned imm = inst.operands[2].imm;
15094
15095 if (imm == et.size)
15096 {
15097 /* Maximum shift variant. */
15098 NEON_ENCODE (INTEGER, inst);
15099 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15100 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15101 inst.instruction |= LOW4 (inst.operands[1].reg);
15102 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15103 inst.instruction |= neon_logbits (et.size) << 18;
15104
15105 neon_dp_fixup (&inst);
15106 }
15107 else
15108 {
15109 /* A more-specific type check for non-max versions. */
15110 et = neon_check_type (2, NS_QDI,
15111 N_EQK | N_DBL, N_SU_32 | N_KEY);
15112 NEON_ENCODE (IMMED, inst);
15113 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, imm);
15114 }
15115 }
15116
15117 /* Check the various types for the VCVT instruction, and return which version
15118 the current instruction is. */
15119
15120 #define CVT_FLAVOUR_VAR \
15121 CVT_VAR (s32_f32, N_S32, N_F32, whole_reg, "ftosls", "ftosis", "ftosizs") \
15122 CVT_VAR (u32_f32, N_U32, N_F32, whole_reg, "ftouls", "ftouis", "ftouizs") \
15123 CVT_VAR (f32_s32, N_F32, N_S32, whole_reg, "fsltos", "fsitos", NULL) \
15124 CVT_VAR (f32_u32, N_F32, N_U32, whole_reg, "fultos", "fuitos", NULL) \
15125 /* Half-precision conversions. */ \
15126 CVT_VAR (f32_f16, N_F32, N_F16, whole_reg, NULL, NULL, NULL) \
15127 CVT_VAR (f16_f32, N_F16, N_F32, whole_reg, NULL, NULL, NULL) \
15128 /* VFP instructions. */ \
15129 CVT_VAR (f32_f64, N_F32, N_F64, N_VFP, NULL, "fcvtsd", NULL) \
15130 CVT_VAR (f64_f32, N_F64, N_F32, N_VFP, NULL, "fcvtds", NULL) \
15131 CVT_VAR (s32_f64, N_S32, N_F64 | key, N_VFP, "ftosld", "ftosid", "ftosizd") \
15132 CVT_VAR (u32_f64, N_U32, N_F64 | key, N_VFP, "ftould", "ftouid", "ftouizd") \
15133 CVT_VAR (f64_s32, N_F64 | key, N_S32, N_VFP, "fsltod", "fsitod", NULL) \
15134 CVT_VAR (f64_u32, N_F64 | key, N_U32, N_VFP, "fultod", "fuitod", NULL) \
15135 /* VFP instructions with bitshift. */ \
15136 CVT_VAR (f32_s16, N_F32 | key, N_S16, N_VFP, "fshtos", NULL, NULL) \
15137 CVT_VAR (f32_u16, N_F32 | key, N_U16, N_VFP, "fuhtos", NULL, NULL) \
15138 CVT_VAR (f64_s16, N_F64 | key, N_S16, N_VFP, "fshtod", NULL, NULL) \
15139 CVT_VAR (f64_u16, N_F64 | key, N_U16, N_VFP, "fuhtod", NULL, NULL) \
15140 CVT_VAR (s16_f32, N_S16, N_F32 | key, N_VFP, "ftoshs", NULL, NULL) \
15141 CVT_VAR (u16_f32, N_U16, N_F32 | key, N_VFP, "ftouhs", NULL, NULL) \
15142 CVT_VAR (s16_f64, N_S16, N_F64 | key, N_VFP, "ftoshd", NULL, NULL) \
15143 CVT_VAR (u16_f64, N_U16, N_F64 | key, N_VFP, "ftouhd", NULL, NULL)
15144
15145 #define CVT_VAR(C, X, Y, R, BSN, CN, ZN) \
15146 neon_cvt_flavour_##C,
15147
15148 /* The different types of conversions we can do. */
15149 enum neon_cvt_flavour
15150 {
15151 CVT_FLAVOUR_VAR
15152 neon_cvt_flavour_invalid,
15153 neon_cvt_flavour_first_fp = neon_cvt_flavour_f32_f64
15154 };
15155
15156 #undef CVT_VAR
15157
15158 static enum neon_cvt_flavour
15159 get_neon_cvt_flavour (enum neon_shape rs)
15160 {
15161 #define CVT_VAR(C,X,Y,R,BSN,CN,ZN) \
15162 et = neon_check_type (2, rs, (R) | (X), (R) | (Y)); \
15163 if (et.type != NT_invtype) \
15164 { \
15165 inst.error = NULL; \
15166 return (neon_cvt_flavour_##C); \
15167 }
15168
15169 struct neon_type_el et;
15170 unsigned whole_reg = (rs == NS_FFI || rs == NS_FD || rs == NS_DF
15171 || rs == NS_FF) ? N_VFP : 0;
15172 /* The instruction versions which take an immediate take one register
15173 argument, which is extended to the width of the full register. Thus the
15174 "source" and "destination" registers must have the same width. Hack that
15175 here by making the size equal to the key (wider, in this case) operand. */
15176 unsigned key = (rs == NS_QQI || rs == NS_DDI || rs == NS_FFI) ? N_KEY : 0;
15177
15178 CVT_FLAVOUR_VAR;
15179
15180 return neon_cvt_flavour_invalid;
15181 #undef CVT_VAR
15182 }
15183
15184 enum neon_cvt_mode
15185 {
15186 neon_cvt_mode_a,
15187 neon_cvt_mode_n,
15188 neon_cvt_mode_p,
15189 neon_cvt_mode_m,
15190 neon_cvt_mode_z,
15191 neon_cvt_mode_x,
15192 neon_cvt_mode_r
15193 };
15194
15195 /* Neon-syntax VFP conversions. */
15196
15197 static void
15198 do_vfp_nsyn_cvt (enum neon_shape rs, enum neon_cvt_flavour flavour)
15199 {
15200 const char *opname = 0;
15201
15202 if (rs == NS_DDI || rs == NS_QQI || rs == NS_FFI)
15203 {
15204 /* Conversions with immediate bitshift. */
15205 const char *enc[] =
15206 {
15207 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) BSN,
15208 CVT_FLAVOUR_VAR
15209 NULL
15210 #undef CVT_VAR
15211 };
15212
15213 if (flavour < (int) ARRAY_SIZE (enc))
15214 {
15215 opname = enc[flavour];
15216 constraint (inst.operands[0].reg != inst.operands[1].reg,
15217 _("operands 0 and 1 must be the same register"));
15218 inst.operands[1] = inst.operands[2];
15219 memset (&inst.operands[2], '\0', sizeof (inst.operands[2]));
15220 }
15221 }
15222 else
15223 {
15224 /* Conversions without bitshift. */
15225 const char *enc[] =
15226 {
15227 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) CN,
15228 CVT_FLAVOUR_VAR
15229 NULL
15230 #undef CVT_VAR
15231 };
15232
15233 if (flavour < (int) ARRAY_SIZE (enc))
15234 opname = enc[flavour];
15235 }
15236
15237 if (opname)
15238 do_vfp_nsyn_opcode (opname);
15239 }
15240
15241 static void
15242 do_vfp_nsyn_cvtz (void)
15243 {
15244 enum neon_shape rs = neon_select_shape (NS_FF, NS_FD, NS_NULL);
15245 enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
15246 const char *enc[] =
15247 {
15248 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) ZN,
15249 CVT_FLAVOUR_VAR
15250 NULL
15251 #undef CVT_VAR
15252 };
15253
15254 if (flavour < (int) ARRAY_SIZE (enc) && enc[flavour])
15255 do_vfp_nsyn_opcode (enc[flavour]);
15256 }
15257
15258 static void
15259 do_vfp_nsyn_cvt_fpv8 (enum neon_cvt_flavour flavour,
15260 enum neon_cvt_mode mode)
15261 {
15262 int sz, op;
15263 int rm;
15264
15265 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
15266 D register operands. */
15267 if (flavour == neon_cvt_flavour_s32_f64
15268 || flavour == neon_cvt_flavour_u32_f64)
15269 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
15270 _(BAD_FPU));
15271
15272 set_it_insn_type (OUTSIDE_IT_INSN);
15273
15274 switch (flavour)
15275 {
15276 case neon_cvt_flavour_s32_f64:
15277 sz = 1;
15278 op = 1;
15279 break;
15280 case neon_cvt_flavour_s32_f32:
15281 sz = 0;
15282 op = 1;
15283 break;
15284 case neon_cvt_flavour_u32_f64:
15285 sz = 1;
15286 op = 0;
15287 break;
15288 case neon_cvt_flavour_u32_f32:
15289 sz = 0;
15290 op = 0;
15291 break;
15292 default:
15293 first_error (_("invalid instruction shape"));
15294 return;
15295 }
15296
15297 switch (mode)
15298 {
15299 case neon_cvt_mode_a: rm = 0; break;
15300 case neon_cvt_mode_n: rm = 1; break;
15301 case neon_cvt_mode_p: rm = 2; break;
15302 case neon_cvt_mode_m: rm = 3; break;
15303 default: first_error (_("invalid rounding mode")); return;
15304 }
15305
15306 NEON_ENCODE (FPV8, inst);
15307 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
15308 encode_arm_vfp_reg (inst.operands[1].reg, sz == 1 ? VFP_REG_Dm : VFP_REG_Sm);
15309 inst.instruction |= sz << 8;
15310 inst.instruction |= op << 7;
15311 inst.instruction |= rm << 16;
15312 inst.instruction |= 0xf0000000;
15313 inst.is_neon = TRUE;
15314 }
15315
15316 static void
15317 do_neon_cvt_1 (enum neon_cvt_mode mode)
15318 {
15319 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_FFI, NS_DD, NS_QQ,
15320 NS_FD, NS_DF, NS_FF, NS_QD, NS_DQ, NS_NULL);
15321 enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
15322
15323 /* PR11109: Handle round-to-zero for VCVT conversions. */
15324 if (mode == neon_cvt_mode_z
15325 && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_vfp_v2)
15326 && (flavour == neon_cvt_flavour_s32_f32
15327 || flavour == neon_cvt_flavour_u32_f32
15328 || flavour == neon_cvt_flavour_s32_f64
15329 || flavour == neon_cvt_flavour_u32_f64)
15330 && (rs == NS_FD || rs == NS_FF))
15331 {
15332 do_vfp_nsyn_cvtz ();
15333 return;
15334 }
15335
15336 /* VFP rather than Neon conversions. */
15337 if (flavour >= neon_cvt_flavour_first_fp)
15338 {
15339 if (mode == neon_cvt_mode_x || mode == neon_cvt_mode_z)
15340 do_vfp_nsyn_cvt (rs, flavour);
15341 else
15342 do_vfp_nsyn_cvt_fpv8 (flavour, mode);
15343
15344 return;
15345 }
15346
15347 switch (rs)
15348 {
15349 case NS_DDI:
15350 case NS_QQI:
15351 {
15352 unsigned immbits;
15353 unsigned enctab[] = { 0x0000100, 0x1000100, 0x0, 0x1000000 };
15354
15355 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15356 return;
15357
15358 /* Fixed-point conversion with #0 immediate is encoded as an
15359 integer conversion. */
15360 if (inst.operands[2].present && inst.operands[2].imm == 0)
15361 goto int_encode;
15362 immbits = 32 - inst.operands[2].imm;
15363 NEON_ENCODE (IMMED, inst);
15364 if (flavour != neon_cvt_flavour_invalid)
15365 inst.instruction |= enctab[flavour];
15366 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15367 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15368 inst.instruction |= LOW4 (inst.operands[1].reg);
15369 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15370 inst.instruction |= neon_quad (rs) << 6;
15371 inst.instruction |= 1 << 21;
15372 inst.instruction |= immbits << 16;
15373
15374 neon_dp_fixup (&inst);
15375 }
15376 break;
15377
15378 case NS_DD:
15379 case NS_QQ:
15380 if (mode != neon_cvt_mode_x && mode != neon_cvt_mode_z)
15381 {
15382 NEON_ENCODE (FLOAT, inst);
15383 set_it_insn_type (OUTSIDE_IT_INSN);
15384
15385 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
15386 return;
15387
15388 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15389 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15390 inst.instruction |= LOW4 (inst.operands[1].reg);
15391 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15392 inst.instruction |= neon_quad (rs) << 6;
15393 inst.instruction |= (flavour == neon_cvt_flavour_u32_f32) << 7;
15394 inst.instruction |= mode << 8;
15395 if (thumb_mode)
15396 inst.instruction |= 0xfc000000;
15397 else
15398 inst.instruction |= 0xf0000000;
15399 }
15400 else
15401 {
15402 int_encode:
15403 {
15404 unsigned enctab[] = { 0x100, 0x180, 0x0, 0x080 };
15405
15406 NEON_ENCODE (INTEGER, inst);
15407
15408 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15409 return;
15410
15411 if (flavour != neon_cvt_flavour_invalid)
15412 inst.instruction |= enctab[flavour];
15413
15414 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15415 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15416 inst.instruction |= LOW4 (inst.operands[1].reg);
15417 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15418 inst.instruction |= neon_quad (rs) << 6;
15419 inst.instruction |= 2 << 18;
15420
15421 neon_dp_fixup (&inst);
15422 }
15423 }
15424 break;
15425
15426 /* Half-precision conversions for Advanced SIMD -- neon. */
15427 case NS_QD:
15428 case NS_DQ:
15429
15430 if ((rs == NS_DQ)
15431 && (inst.vectype.el[0].size != 16 || inst.vectype.el[1].size != 32))
15432 {
15433 as_bad (_("operand size must match register width"));
15434 break;
15435 }
15436
15437 if ((rs == NS_QD)
15438 && ((inst.vectype.el[0].size != 32 || inst.vectype.el[1].size != 16)))
15439 {
15440 as_bad (_("operand size must match register width"));
15441 break;
15442 }
15443
15444 if (rs == NS_DQ)
15445 inst.instruction = 0x3b60600;
15446 else
15447 inst.instruction = 0x3b60700;
15448
15449 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15450 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15451 inst.instruction |= LOW4 (inst.operands[1].reg);
15452 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15453 neon_dp_fixup (&inst);
15454 break;
15455
15456 default:
15457 /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32). */
15458 if (mode == neon_cvt_mode_x || mode == neon_cvt_mode_z)
15459 do_vfp_nsyn_cvt (rs, flavour);
15460 else
15461 do_vfp_nsyn_cvt_fpv8 (flavour, mode);
15462 }
15463 }
15464
15465 static void
15466 do_neon_cvtr (void)
15467 {
15468 do_neon_cvt_1 (neon_cvt_mode_x);
15469 }
15470
15471 static void
15472 do_neon_cvt (void)
15473 {
15474 do_neon_cvt_1 (neon_cvt_mode_z);
15475 }
15476
15477 static void
15478 do_neon_cvta (void)
15479 {
15480 do_neon_cvt_1 (neon_cvt_mode_a);
15481 }
15482
15483 static void
15484 do_neon_cvtn (void)
15485 {
15486 do_neon_cvt_1 (neon_cvt_mode_n);
15487 }
15488
15489 static void
15490 do_neon_cvtp (void)
15491 {
15492 do_neon_cvt_1 (neon_cvt_mode_p);
15493 }
15494
15495 static void
15496 do_neon_cvtm (void)
15497 {
15498 do_neon_cvt_1 (neon_cvt_mode_m);
15499 }
15500
15501 static void
15502 do_neon_cvttb_2 (bfd_boolean t, bfd_boolean to, bfd_boolean is_double)
15503 {
15504 if (is_double)
15505 mark_feature_used (&fpu_vfp_ext_armv8);
15506
15507 encode_arm_vfp_reg (inst.operands[0].reg,
15508 (is_double && !to) ? VFP_REG_Dd : VFP_REG_Sd);
15509 encode_arm_vfp_reg (inst.operands[1].reg,
15510 (is_double && to) ? VFP_REG_Dm : VFP_REG_Sm);
15511 inst.instruction |= to ? 0x10000 : 0;
15512 inst.instruction |= t ? 0x80 : 0;
15513 inst.instruction |= is_double ? 0x100 : 0;
15514 do_vfp_cond_or_thumb ();
15515 }
15516
15517 static void
15518 do_neon_cvttb_1 (bfd_boolean t)
15519 {
15520 enum neon_shape rs = neon_select_shape (NS_FF, NS_FD, NS_DF, NS_NULL);
15521
15522 if (rs == NS_NULL)
15523 return;
15524 else if (neon_check_type (2, rs, N_F16, N_F32 | N_VFP).type != NT_invtype)
15525 {
15526 inst.error = NULL;
15527 do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/FALSE);
15528 }
15529 else if (neon_check_type (2, rs, N_F32 | N_VFP, N_F16).type != NT_invtype)
15530 {
15531 inst.error = NULL;
15532 do_neon_cvttb_2 (t, /*to=*/FALSE, /*is_double=*/FALSE);
15533 }
15534 else if (neon_check_type (2, rs, N_F16, N_F64 | N_VFP).type != NT_invtype)
15535 {
15536 /* The VCVTB and VCVTT instructions with D-register operands
15537 don't work for SP only targets. */
15538 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
15539 _(BAD_FPU));
15540
15541 inst.error = NULL;
15542 do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/TRUE);
15543 }
15544 else if (neon_check_type (2, rs, N_F64 | N_VFP, N_F16).type != NT_invtype)
15545 {
15546 /* The VCVTB and VCVTT instructions with D-register operands
15547 don't work for SP only targets. */
15548 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
15549 _(BAD_FPU));
15550
15551 inst.error = NULL;
15552 do_neon_cvttb_2 (t, /*to=*/FALSE, /*is_double=*/TRUE);
15553 }
15554 else
15555 return;
15556 }
15557
15558 static void
15559 do_neon_cvtb (void)
15560 {
15561 do_neon_cvttb_1 (FALSE);
15562 }
15563
15564
15565 static void
15566 do_neon_cvtt (void)
15567 {
15568 do_neon_cvttb_1 (TRUE);
15569 }
15570
15571 static void
15572 neon_move_immediate (void)
15573 {
15574 enum neon_shape rs = neon_select_shape (NS_DI, NS_QI, NS_NULL);
15575 struct neon_type_el et = neon_check_type (2, rs,
15576 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
15577 unsigned immlo, immhi = 0, immbits;
15578 int op, cmode, float_p;
15579
15580 constraint (et.type == NT_invtype,
15581 _("operand size must be specified for immediate VMOV"));
15582
15583 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
15584 op = (inst.instruction & (1 << 5)) != 0;
15585
15586 immlo = inst.operands[1].imm;
15587 if (inst.operands[1].regisimm)
15588 immhi = inst.operands[1].reg;
15589
15590 constraint (et.size < 32 && (immlo & ~((1 << et.size) - 1)) != 0,
15591 _("immediate has bits set outside the operand size"));
15592
15593 float_p = inst.operands[1].immisfloat;
15594
15595 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits, &op,
15596 et.size, et.type)) == FAIL)
15597 {
15598 /* Invert relevant bits only. */
15599 neon_invert_size (&immlo, &immhi, et.size);
15600 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
15601 with one or the other; those cases are caught by
15602 neon_cmode_for_move_imm. */
15603 op = !op;
15604 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits,
15605 &op, et.size, et.type)) == FAIL)
15606 {
15607 first_error (_("immediate out of range"));
15608 return;
15609 }
15610 }
15611
15612 inst.instruction &= ~(1 << 5);
15613 inst.instruction |= op << 5;
15614
15615 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15616 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15617 inst.instruction |= neon_quad (rs) << 6;
15618 inst.instruction |= cmode << 8;
15619
15620 neon_write_immbits (immbits);
15621 }
15622
15623 static void
15624 do_neon_mvn (void)
15625 {
15626 if (inst.operands[1].isreg)
15627 {
15628 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15629
15630 NEON_ENCODE (INTEGER, inst);
15631 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15632 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15633 inst.instruction |= LOW4 (inst.operands[1].reg);
15634 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15635 inst.instruction |= neon_quad (rs) << 6;
15636 }
15637 else
15638 {
15639 NEON_ENCODE (IMMED, inst);
15640 neon_move_immediate ();
15641 }
15642
15643 neon_dp_fixup (&inst);
15644 }
15645
15646 /* Encode instructions of form:
15647
15648 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
15649 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm | */
15650
15651 static void
15652 neon_mixed_length (struct neon_type_el et, unsigned size)
15653 {
15654 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15655 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15656 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
15657 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
15658 inst.instruction |= LOW4 (inst.operands[2].reg);
15659 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
15660 inst.instruction |= (et.type == NT_unsigned) << 24;
15661 inst.instruction |= neon_logbits (size) << 20;
15662
15663 neon_dp_fixup (&inst);
15664 }
15665
15666 static void
15667 do_neon_dyadic_long (void)
15668 {
15669 /* FIXME: Type checking for lengthening op. */
15670 struct neon_type_el et = neon_check_type (3, NS_QDD,
15671 N_EQK | N_DBL, N_EQK, N_SU_32 | N_KEY);
15672 neon_mixed_length (et, et.size);
15673 }
15674
15675 static void
15676 do_neon_abal (void)
15677 {
15678 struct neon_type_el et = neon_check_type (3, NS_QDD,
15679 N_EQK | N_INT | N_DBL, N_EQK, N_SU_32 | N_KEY);
15680 neon_mixed_length (et, et.size);
15681 }
15682
15683 static void
15684 neon_mac_reg_scalar_long (unsigned regtypes, unsigned scalartypes)
15685 {
15686 if (inst.operands[2].isscalar)
15687 {
15688 struct neon_type_el et = neon_check_type (3, NS_QDS,
15689 N_EQK | N_DBL, N_EQK, regtypes | N_KEY);
15690 NEON_ENCODE (SCALAR, inst);
15691 neon_mul_mac (et, et.type == NT_unsigned);
15692 }
15693 else
15694 {
15695 struct neon_type_el et = neon_check_type (3, NS_QDD,
15696 N_EQK | N_DBL, N_EQK, scalartypes | N_KEY);
15697 NEON_ENCODE (INTEGER, inst);
15698 neon_mixed_length (et, et.size);
15699 }
15700 }
15701
15702 static void
15703 do_neon_mac_maybe_scalar_long (void)
15704 {
15705 neon_mac_reg_scalar_long (N_S16 | N_S32 | N_U16 | N_U32, N_SU_32);
15706 }
15707
15708 static void
15709 do_neon_dyadic_wide (void)
15710 {
15711 struct neon_type_el et = neon_check_type (3, NS_QQD,
15712 N_EQK | N_DBL, N_EQK | N_DBL, N_SU_32 | N_KEY);
15713 neon_mixed_length (et, et.size);
15714 }
15715
15716 static void
15717 do_neon_dyadic_narrow (void)
15718 {
15719 struct neon_type_el et = neon_check_type (3, NS_QDD,
15720 N_EQK | N_DBL, N_EQK, N_I16 | N_I32 | N_I64 | N_KEY);
15721 /* Operand sign is unimportant, and the U bit is part of the opcode,
15722 so force the operand type to integer. */
15723 et.type = NT_integer;
15724 neon_mixed_length (et, et.size / 2);
15725 }
15726
15727 static void
15728 do_neon_mul_sat_scalar_long (void)
15729 {
15730 neon_mac_reg_scalar_long (N_S16 | N_S32, N_S16 | N_S32);
15731 }
15732
15733 static void
15734 do_neon_vmull (void)
15735 {
15736 if (inst.operands[2].isscalar)
15737 do_neon_mac_maybe_scalar_long ();
15738 else
15739 {
15740 struct neon_type_el et = neon_check_type (3, NS_QDD,
15741 N_EQK | N_DBL, N_EQK, N_SU_32 | N_P8 | N_P64 | N_KEY);
15742
15743 if (et.type == NT_poly)
15744 NEON_ENCODE (POLY, inst);
15745 else
15746 NEON_ENCODE (INTEGER, inst);
15747
15748 /* For polynomial encoding the U bit must be zero, and the size must
15749 be 8 (encoded as 0b00) or, on ARMv8 or later 64 (encoded, non
15750 obviously, as 0b10). */
15751 if (et.size == 64)
15752 {
15753 /* Check we're on the correct architecture. */
15754 if (!mark_feature_used (&fpu_crypto_ext_armv8))
15755 inst.error =
15756 _("Instruction form not available on this architecture.");
15757
15758 et.size = 32;
15759 }
15760
15761 neon_mixed_length (et, et.size);
15762 }
15763 }
15764
15765 static void
15766 do_neon_ext (void)
15767 {
15768 enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
15769 struct neon_type_el et = neon_check_type (3, rs,
15770 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
15771 unsigned imm = (inst.operands[3].imm * et.size) / 8;
15772
15773 constraint (imm >= (unsigned) (neon_quad (rs) ? 16 : 8),
15774 _("shift out of range"));
15775 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15776 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15777 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
15778 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
15779 inst.instruction |= LOW4 (inst.operands[2].reg);
15780 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
15781 inst.instruction |= neon_quad (rs) << 6;
15782 inst.instruction |= imm << 8;
15783
15784 neon_dp_fixup (&inst);
15785 }
15786
15787 static void
15788 do_neon_rev (void)
15789 {
15790 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15791 struct neon_type_el et = neon_check_type (2, rs,
15792 N_EQK, N_8 | N_16 | N_32 | N_KEY);
15793 unsigned op = (inst.instruction >> 7) & 3;
15794 /* N (width of reversed regions) is encoded as part of the bitmask. We
15795 extract it here to check the elements to be reversed are smaller.
15796 Otherwise we'd get a reserved instruction. */
15797 unsigned elsize = (op == 2) ? 16 : (op == 1) ? 32 : (op == 0) ? 64 : 0;
15798 gas_assert (elsize != 0);
15799 constraint (et.size >= elsize,
15800 _("elements must be smaller than reversal region"));
15801 neon_two_same (neon_quad (rs), 1, et.size);
15802 }
15803
15804 static void
15805 do_neon_dup (void)
15806 {
15807 if (inst.operands[1].isscalar)
15808 {
15809 enum neon_shape rs = neon_select_shape (NS_DS, NS_QS, NS_NULL);
15810 struct neon_type_el et = neon_check_type (2, rs,
15811 N_EQK, N_8 | N_16 | N_32 | N_KEY);
15812 unsigned sizebits = et.size >> 3;
15813 unsigned dm = NEON_SCALAR_REG (inst.operands[1].reg);
15814 int logsize = neon_logbits (et.size);
15815 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg) << logsize;
15816
15817 if (vfp_or_neon_is_neon (NEON_CHECK_CC) == FAIL)
15818 return;
15819
15820 NEON_ENCODE (SCALAR, inst);
15821 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15822 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15823 inst.instruction |= LOW4 (dm);
15824 inst.instruction |= HI1 (dm) << 5;
15825 inst.instruction |= neon_quad (rs) << 6;
15826 inst.instruction |= x << 17;
15827 inst.instruction |= sizebits << 16;
15828
15829 neon_dp_fixup (&inst);
15830 }
15831 else
15832 {
15833 enum neon_shape rs = neon_select_shape (NS_DR, NS_QR, NS_NULL);
15834 struct neon_type_el et = neon_check_type (2, rs,
15835 N_8 | N_16 | N_32 | N_KEY, N_EQK);
15836 /* Duplicate ARM register to lanes of vector. */
15837 NEON_ENCODE (ARMREG, inst);
15838 switch (et.size)
15839 {
15840 case 8: inst.instruction |= 0x400000; break;
15841 case 16: inst.instruction |= 0x000020; break;
15842 case 32: inst.instruction |= 0x000000; break;
15843 default: break;
15844 }
15845 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
15846 inst.instruction |= LOW4 (inst.operands[0].reg) << 16;
15847 inst.instruction |= HI1 (inst.operands[0].reg) << 7;
15848 inst.instruction |= neon_quad (rs) << 21;
15849 /* The encoding for this instruction is identical for the ARM and Thumb
15850 variants, except for the condition field. */
15851 do_vfp_cond_or_thumb ();
15852 }
15853 }
15854
15855 /* VMOV has particularly many variations. It can be one of:
15856 0. VMOV<c><q> <Qd>, <Qm>
15857 1. VMOV<c><q> <Dd>, <Dm>
15858 (Register operations, which are VORR with Rm = Rn.)
15859 2. VMOV<c><q>.<dt> <Qd>, #<imm>
15860 3. VMOV<c><q>.<dt> <Dd>, #<imm>
15861 (Immediate loads.)
15862 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
15863 (ARM register to scalar.)
15864 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
15865 (Two ARM registers to vector.)
15866 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
15867 (Scalar to ARM register.)
15868 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
15869 (Vector to two ARM registers.)
15870 8. VMOV.F32 <Sd>, <Sm>
15871 9. VMOV.F64 <Dd>, <Dm>
15872 (VFP register moves.)
15873 10. VMOV.F32 <Sd>, #imm
15874 11. VMOV.F64 <Dd>, #imm
15875 (VFP float immediate load.)
15876 12. VMOV <Rd>, <Sm>
15877 (VFP single to ARM reg.)
15878 13. VMOV <Sd>, <Rm>
15879 (ARM reg to VFP single.)
15880 14. VMOV <Rd>, <Re>, <Sn>, <Sm>
15881 (Two ARM regs to two VFP singles.)
15882 15. VMOV <Sd>, <Se>, <Rn>, <Rm>
15883 (Two VFP singles to two ARM regs.)
15884
15885 These cases can be disambiguated using neon_select_shape, except cases 1/9
15886 and 3/11 which depend on the operand type too.
15887
15888 All the encoded bits are hardcoded by this function.
15889
15890 Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
15891 Cases 5, 7 may be used with VFPv2 and above.
15892
15893 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
15894 can specify a type where it doesn't make sense to, and is ignored). */
15895
15896 static void
15897 do_neon_mov (void)
15898 {
15899 enum neon_shape rs = neon_select_shape (NS_RRFF, NS_FFRR, NS_DRR, NS_RRD,
15900 NS_QQ, NS_DD, NS_QI, NS_DI, NS_SR, NS_RS, NS_FF, NS_FI, NS_RF, NS_FR,
15901 NS_NULL);
15902 struct neon_type_el et;
15903 const char *ldconst = 0;
15904
15905 switch (rs)
15906 {
15907 case NS_DD: /* case 1/9. */
15908 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
15909 /* It is not an error here if no type is given. */
15910 inst.error = NULL;
15911 if (et.type == NT_float && et.size == 64)
15912 {
15913 do_vfp_nsyn_opcode ("fcpyd");
15914 break;
15915 }
15916 /* fall through. */
15917
15918 case NS_QQ: /* case 0/1. */
15919 {
15920 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15921 return;
15922 /* The architecture manual I have doesn't explicitly state which
15923 value the U bit should have for register->register moves, but
15924 the equivalent VORR instruction has U = 0, so do that. */
15925 inst.instruction = 0x0200110;
15926 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15927 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15928 inst.instruction |= LOW4 (inst.operands[1].reg);
15929 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15930 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
15931 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
15932 inst.instruction |= neon_quad (rs) << 6;
15933
15934 neon_dp_fixup (&inst);
15935 }
15936 break;
15937
15938 case NS_DI: /* case 3/11. */
15939 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
15940 inst.error = NULL;
15941 if (et.type == NT_float && et.size == 64)
15942 {
15943 /* case 11 (fconstd). */
15944 ldconst = "fconstd";
15945 goto encode_fconstd;
15946 }
15947 /* fall through. */
15948
15949 case NS_QI: /* case 2/3. */
15950 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15951 return;
15952 inst.instruction = 0x0800010;
15953 neon_move_immediate ();
15954 neon_dp_fixup (&inst);
15955 break;
15956
15957 case NS_SR: /* case 4. */
15958 {
15959 unsigned bcdebits = 0;
15960 int logsize;
15961 unsigned dn = NEON_SCALAR_REG (inst.operands[0].reg);
15962 unsigned x = NEON_SCALAR_INDEX (inst.operands[0].reg);
15963
15964 /* .<size> is optional here, defaulting to .32. */
15965 if (inst.vectype.elems == 0
15966 && inst.operands[0].vectype.type == NT_invtype
15967 && inst.operands[1].vectype.type == NT_invtype)
15968 {
15969 inst.vectype.el[0].type = NT_untyped;
15970 inst.vectype.el[0].size = 32;
15971 inst.vectype.elems = 1;
15972 }
15973
15974 et = neon_check_type (2, NS_NULL, N_8 | N_16 | N_32 | N_KEY, N_EQK);
15975 logsize = neon_logbits (et.size);
15976
15977 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
15978 _(BAD_FPU));
15979 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
15980 && et.size != 32, _(BAD_FPU));
15981 constraint (et.type == NT_invtype, _("bad type for scalar"));
15982 constraint (x >= 64 / et.size, _("scalar index out of range"));
15983
15984 switch (et.size)
15985 {
15986 case 8: bcdebits = 0x8; break;
15987 case 16: bcdebits = 0x1; break;
15988 case 32: bcdebits = 0x0; break;
15989 default: ;
15990 }
15991
15992 bcdebits |= x << logsize;
15993
15994 inst.instruction = 0xe000b10;
15995 do_vfp_cond_or_thumb ();
15996 inst.instruction |= LOW4 (dn) << 16;
15997 inst.instruction |= HI1 (dn) << 7;
15998 inst.instruction |= inst.operands[1].reg << 12;
15999 inst.instruction |= (bcdebits & 3) << 5;
16000 inst.instruction |= (bcdebits >> 2) << 21;
16001 }
16002 break;
16003
16004 case NS_DRR: /* case 5 (fmdrr). */
16005 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
16006 _(BAD_FPU));
16007
16008 inst.instruction = 0xc400b10;
16009 do_vfp_cond_or_thumb ();
16010 inst.instruction |= LOW4 (inst.operands[0].reg);
16011 inst.instruction |= HI1 (inst.operands[0].reg) << 5;
16012 inst.instruction |= inst.operands[1].reg << 12;
16013 inst.instruction |= inst.operands[2].reg << 16;
16014 break;
16015
16016 case NS_RS: /* case 6. */
16017 {
16018 unsigned logsize;
16019 unsigned dn = NEON_SCALAR_REG (inst.operands[1].reg);
16020 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg);
16021 unsigned abcdebits = 0;
16022
16023 /* .<dt> is optional here, defaulting to .32. */
16024 if (inst.vectype.elems == 0
16025 && inst.operands[0].vectype.type == NT_invtype
16026 && inst.operands[1].vectype.type == NT_invtype)
16027 {
16028 inst.vectype.el[0].type = NT_untyped;
16029 inst.vectype.el[0].size = 32;
16030 inst.vectype.elems = 1;
16031 }
16032
16033 et = neon_check_type (2, NS_NULL,
16034 N_EQK, N_S8 | N_S16 | N_U8 | N_U16 | N_32 | N_KEY);
16035 logsize = neon_logbits (et.size);
16036
16037 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
16038 _(BAD_FPU));
16039 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
16040 && et.size != 32, _(BAD_FPU));
16041 constraint (et.type == NT_invtype, _("bad type for scalar"));
16042 constraint (x >= 64 / et.size, _("scalar index out of range"));
16043
16044 switch (et.size)
16045 {
16046 case 8: abcdebits = (et.type == NT_signed) ? 0x08 : 0x18; break;
16047 case 16: abcdebits = (et.type == NT_signed) ? 0x01 : 0x11; break;
16048 case 32: abcdebits = 0x00; break;
16049 default: ;
16050 }
16051
16052 abcdebits |= x << logsize;
16053 inst.instruction = 0xe100b10;
16054 do_vfp_cond_or_thumb ();
16055 inst.instruction |= LOW4 (dn) << 16;
16056 inst.instruction |= HI1 (dn) << 7;
16057 inst.instruction |= inst.operands[0].reg << 12;
16058 inst.instruction |= (abcdebits & 3) << 5;
16059 inst.instruction |= (abcdebits >> 2) << 21;
16060 }
16061 break;
16062
16063 case NS_RRD: /* case 7 (fmrrd). */
16064 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
16065 _(BAD_FPU));
16066
16067 inst.instruction = 0xc500b10;
16068 do_vfp_cond_or_thumb ();
16069 inst.instruction |= inst.operands[0].reg << 12;
16070 inst.instruction |= inst.operands[1].reg << 16;
16071 inst.instruction |= LOW4 (inst.operands[2].reg);
16072 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16073 break;
16074
16075 case NS_FF: /* case 8 (fcpys). */
16076 do_vfp_nsyn_opcode ("fcpys");
16077 break;
16078
16079 case NS_FI: /* case 10 (fconsts). */
16080 ldconst = "fconsts";
16081 encode_fconstd:
16082 if (is_quarter_float (inst.operands[1].imm))
16083 {
16084 inst.operands[1].imm = neon_qfloat_bits (inst.operands[1].imm);
16085 do_vfp_nsyn_opcode (ldconst);
16086 }
16087 else
16088 first_error (_("immediate out of range"));
16089 break;
16090
16091 case NS_RF: /* case 12 (fmrs). */
16092 do_vfp_nsyn_opcode ("fmrs");
16093 break;
16094
16095 case NS_FR: /* case 13 (fmsr). */
16096 do_vfp_nsyn_opcode ("fmsr");
16097 break;
16098
16099 /* The encoders for the fmrrs and fmsrr instructions expect three operands
16100 (one of which is a list), but we have parsed four. Do some fiddling to
16101 make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
16102 expect. */
16103 case NS_RRFF: /* case 14 (fmrrs). */
16104 constraint (inst.operands[3].reg != inst.operands[2].reg + 1,
16105 _("VFP registers must be adjacent"));
16106 inst.operands[2].imm = 2;
16107 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
16108 do_vfp_nsyn_opcode ("fmrrs");
16109 break;
16110
16111 case NS_FFRR: /* case 15 (fmsrr). */
16112 constraint (inst.operands[1].reg != inst.operands[0].reg + 1,
16113 _("VFP registers must be adjacent"));
16114 inst.operands[1] = inst.operands[2];
16115 inst.operands[2] = inst.operands[3];
16116 inst.operands[0].imm = 2;
16117 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
16118 do_vfp_nsyn_opcode ("fmsrr");
16119 break;
16120
16121 case NS_NULL:
16122 /* neon_select_shape has determined that the instruction
16123 shape is wrong and has already set the error message. */
16124 break;
16125
16126 default:
16127 abort ();
16128 }
16129 }
16130
16131 static void
16132 do_neon_rshift_round_imm (void)
16133 {
16134 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
16135 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
16136 int imm = inst.operands[2].imm;
16137
16138 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
16139 if (imm == 0)
16140 {
16141 inst.operands[2].present = 0;
16142 do_neon_mov ();
16143 return;
16144 }
16145
16146 constraint (imm < 1 || (unsigned)imm > et.size,
16147 _("immediate out of range for shift"));
16148 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et,
16149 et.size - imm);
16150 }
16151
16152 static void
16153 do_neon_movl (void)
16154 {
16155 struct neon_type_el et = neon_check_type (2, NS_QD,
16156 N_EQK | N_DBL, N_SU_32 | N_KEY);
16157 unsigned sizebits = et.size >> 3;
16158 inst.instruction |= sizebits << 19;
16159 neon_two_same (0, et.type == NT_unsigned, -1);
16160 }
16161
16162 static void
16163 do_neon_trn (void)
16164 {
16165 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16166 struct neon_type_el et = neon_check_type (2, rs,
16167 N_EQK, N_8 | N_16 | N_32 | N_KEY);
16168 NEON_ENCODE (INTEGER, inst);
16169 neon_two_same (neon_quad (rs), 1, et.size);
16170 }
16171
16172 static void
16173 do_neon_zip_uzp (void)
16174 {
16175 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16176 struct neon_type_el et = neon_check_type (2, rs,
16177 N_EQK, N_8 | N_16 | N_32 | N_KEY);
16178 if (rs == NS_DD && et.size == 32)
16179 {
16180 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
16181 inst.instruction = N_MNEM_vtrn;
16182 do_neon_trn ();
16183 return;
16184 }
16185 neon_two_same (neon_quad (rs), 1, et.size);
16186 }
16187
16188 static void
16189 do_neon_sat_abs_neg (void)
16190 {
16191 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16192 struct neon_type_el et = neon_check_type (2, rs,
16193 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
16194 neon_two_same (neon_quad (rs), 1, et.size);
16195 }
16196
16197 static void
16198 do_neon_pair_long (void)
16199 {
16200 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16201 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_32 | N_KEY);
16202 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
16203 inst.instruction |= (et.type == NT_unsigned) << 7;
16204 neon_two_same (neon_quad (rs), 1, et.size);
16205 }
16206
16207 static void
16208 do_neon_recip_est (void)
16209 {
16210 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16211 struct neon_type_el et = neon_check_type (2, rs,
16212 N_EQK | N_FLT, N_F32 | N_U32 | N_KEY);
16213 inst.instruction |= (et.type == NT_float) << 8;
16214 neon_two_same (neon_quad (rs), 1, et.size);
16215 }
16216
16217 static void
16218 do_neon_cls (void)
16219 {
16220 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16221 struct neon_type_el et = neon_check_type (2, rs,
16222 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
16223 neon_two_same (neon_quad (rs), 1, et.size);
16224 }
16225
16226 static void
16227 do_neon_clz (void)
16228 {
16229 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16230 struct neon_type_el et = neon_check_type (2, rs,
16231 N_EQK, N_I8 | N_I16 | N_I32 | N_KEY);
16232 neon_two_same (neon_quad (rs), 1, et.size);
16233 }
16234
16235 static void
16236 do_neon_cnt (void)
16237 {
16238 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16239 struct neon_type_el et = neon_check_type (2, rs,
16240 N_EQK | N_INT, N_8 | N_KEY);
16241 neon_two_same (neon_quad (rs), 1, et.size);
16242 }
16243
16244 static void
16245 do_neon_swp (void)
16246 {
16247 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16248 neon_two_same (neon_quad (rs), 1, -1);
16249 }
16250
16251 static void
16252 do_neon_tbl_tbx (void)
16253 {
16254 unsigned listlenbits;
16255 neon_check_type (3, NS_DLD, N_EQK, N_EQK, N_8 | N_KEY);
16256
16257 if (inst.operands[1].imm < 1 || inst.operands[1].imm > 4)
16258 {
16259 first_error (_("bad list length for table lookup"));
16260 return;
16261 }
16262
16263 listlenbits = inst.operands[1].imm - 1;
16264 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16265 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16266 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16267 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16268 inst.instruction |= LOW4 (inst.operands[2].reg);
16269 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16270 inst.instruction |= listlenbits << 8;
16271
16272 neon_dp_fixup (&inst);
16273 }
16274
16275 static void
16276 do_neon_ldm_stm (void)
16277 {
16278 /* P, U and L bits are part of bitmask. */
16279 int is_dbmode = (inst.instruction & (1 << 24)) != 0;
16280 unsigned offsetbits = inst.operands[1].imm * 2;
16281
16282 if (inst.operands[1].issingle)
16283 {
16284 do_vfp_nsyn_ldm_stm (is_dbmode);
16285 return;
16286 }
16287
16288 constraint (is_dbmode && !inst.operands[0].writeback,
16289 _("writeback (!) must be used for VLDMDB and VSTMDB"));
16290
16291 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
16292 _("register list must contain at least 1 and at most 16 "
16293 "registers"));
16294
16295 inst.instruction |= inst.operands[0].reg << 16;
16296 inst.instruction |= inst.operands[0].writeback << 21;
16297 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
16298 inst.instruction |= HI1 (inst.operands[1].reg) << 22;
16299
16300 inst.instruction |= offsetbits;
16301
16302 do_vfp_cond_or_thumb ();
16303 }
16304
16305 static void
16306 do_neon_ldr_str (void)
16307 {
16308 int is_ldr = (inst.instruction & (1 << 20)) != 0;
16309
16310 /* Use of PC in vstr in ARM mode is deprecated in ARMv7.
16311 And is UNPREDICTABLE in thumb mode. */
16312 if (!is_ldr
16313 && inst.operands[1].reg == REG_PC
16314 && (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7) || thumb_mode))
16315 {
16316 if (thumb_mode)
16317 inst.error = _("Use of PC here is UNPREDICTABLE");
16318 else if (warn_on_deprecated)
16319 as_tsktsk (_("Use of PC here is deprecated"));
16320 }
16321
16322 if (inst.operands[0].issingle)
16323 {
16324 if (is_ldr)
16325 do_vfp_nsyn_opcode ("flds");
16326 else
16327 do_vfp_nsyn_opcode ("fsts");
16328 }
16329 else
16330 {
16331 if (is_ldr)
16332 do_vfp_nsyn_opcode ("fldd");
16333 else
16334 do_vfp_nsyn_opcode ("fstd");
16335 }
16336 }
16337
16338 /* "interleave" version also handles non-interleaving register VLD1/VST1
16339 instructions. */
16340
16341 static void
16342 do_neon_ld_st_interleave (void)
16343 {
16344 struct neon_type_el et = neon_check_type (1, NS_NULL,
16345 N_8 | N_16 | N_32 | N_64);
16346 unsigned alignbits = 0;
16347 unsigned idx;
16348 /* The bits in this table go:
16349 0: register stride of one (0) or two (1)
16350 1,2: register list length, minus one (1, 2, 3, 4).
16351 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
16352 We use -1 for invalid entries. */
16353 const int typetable[] =
16354 {
16355 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
16356 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
16357 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
16358 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
16359 };
16360 int typebits;
16361
16362 if (et.type == NT_invtype)
16363 return;
16364
16365 if (inst.operands[1].immisalign)
16366 switch (inst.operands[1].imm >> 8)
16367 {
16368 case 64: alignbits = 1; break;
16369 case 128:
16370 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2
16371 && NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
16372 goto bad_alignment;
16373 alignbits = 2;
16374 break;
16375 case 256:
16376 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
16377 goto bad_alignment;
16378 alignbits = 3;
16379 break;
16380 default:
16381 bad_alignment:
16382 first_error (_("bad alignment"));
16383 return;
16384 }
16385
16386 inst.instruction |= alignbits << 4;
16387 inst.instruction |= neon_logbits (et.size) << 6;
16388
16389 /* Bits [4:6] of the immediate in a list specifier encode register stride
16390 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
16391 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
16392 up the right value for "type" in a table based on this value and the given
16393 list style, then stick it back. */
16394 idx = ((inst.operands[0].imm >> 4) & 7)
16395 | (((inst.instruction >> 8) & 3) << 3);
16396
16397 typebits = typetable[idx];
16398
16399 constraint (typebits == -1, _("bad list type for instruction"));
16400 constraint (((inst.instruction >> 8) & 3) && et.size == 64,
16401 _("bad element type for instruction"));
16402
16403 inst.instruction &= ~0xf00;
16404 inst.instruction |= typebits << 8;
16405 }
16406
16407 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
16408 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
16409 otherwise. The variable arguments are a list of pairs of legal (size, align)
16410 values, terminated with -1. */
16411
16412 static int
16413 neon_alignment_bit (int size, int align, int *do_align, ...)
16414 {
16415 va_list ap;
16416 int result = FAIL, thissize, thisalign;
16417
16418 if (!inst.operands[1].immisalign)
16419 {
16420 *do_align = 0;
16421 return SUCCESS;
16422 }
16423
16424 va_start (ap, do_align);
16425
16426 do
16427 {
16428 thissize = va_arg (ap, int);
16429 if (thissize == -1)
16430 break;
16431 thisalign = va_arg (ap, int);
16432
16433 if (size == thissize && align == thisalign)
16434 result = SUCCESS;
16435 }
16436 while (result != SUCCESS);
16437
16438 va_end (ap);
16439
16440 if (result == SUCCESS)
16441 *do_align = 1;
16442 else
16443 first_error (_("unsupported alignment for instruction"));
16444
16445 return result;
16446 }
16447
16448 static void
16449 do_neon_ld_st_lane (void)
16450 {
16451 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
16452 int align_good, do_align = 0;
16453 int logsize = neon_logbits (et.size);
16454 int align = inst.operands[1].imm >> 8;
16455 int n = (inst.instruction >> 8) & 3;
16456 int max_el = 64 / et.size;
16457
16458 if (et.type == NT_invtype)
16459 return;
16460
16461 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != n + 1,
16462 _("bad list length"));
16463 constraint (NEON_LANE (inst.operands[0].imm) >= max_el,
16464 _("scalar index out of range"));
16465 constraint (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2
16466 && et.size == 8,
16467 _("stride of 2 unavailable when element size is 8"));
16468
16469 switch (n)
16470 {
16471 case 0: /* VLD1 / VST1. */
16472 align_good = neon_alignment_bit (et.size, align, &do_align, 16, 16,
16473 32, 32, -1);
16474 if (align_good == FAIL)
16475 return;
16476 if (do_align)
16477 {
16478 unsigned alignbits = 0;
16479 switch (et.size)
16480 {
16481 case 16: alignbits = 0x1; break;
16482 case 32: alignbits = 0x3; break;
16483 default: ;
16484 }
16485 inst.instruction |= alignbits << 4;
16486 }
16487 break;
16488
16489 case 1: /* VLD2 / VST2. */
16490 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 16, 16, 32,
16491 32, 64, -1);
16492 if (align_good == FAIL)
16493 return;
16494 if (do_align)
16495 inst.instruction |= 1 << 4;
16496 break;
16497
16498 case 2: /* VLD3 / VST3. */
16499 constraint (inst.operands[1].immisalign,
16500 _("can't use alignment with this instruction"));
16501 break;
16502
16503 case 3: /* VLD4 / VST4. */
16504 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32,
16505 16, 64, 32, 64, 32, 128, -1);
16506 if (align_good == FAIL)
16507 return;
16508 if (do_align)
16509 {
16510 unsigned alignbits = 0;
16511 switch (et.size)
16512 {
16513 case 8: alignbits = 0x1; break;
16514 case 16: alignbits = 0x1; break;
16515 case 32: alignbits = (align == 64) ? 0x1 : 0x2; break;
16516 default: ;
16517 }
16518 inst.instruction |= alignbits << 4;
16519 }
16520 break;
16521
16522 default: ;
16523 }
16524
16525 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
16526 if (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2)
16527 inst.instruction |= 1 << (4 + logsize);
16528
16529 inst.instruction |= NEON_LANE (inst.operands[0].imm) << (logsize + 5);
16530 inst.instruction |= logsize << 10;
16531 }
16532
16533 /* Encode single n-element structure to all lanes VLD<n> instructions. */
16534
16535 static void
16536 do_neon_ld_dup (void)
16537 {
16538 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
16539 int align_good, do_align = 0;
16540
16541 if (et.type == NT_invtype)
16542 return;
16543
16544 switch ((inst.instruction >> 8) & 3)
16545 {
16546 case 0: /* VLD1. */
16547 gas_assert (NEON_REG_STRIDE (inst.operands[0].imm) != 2);
16548 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
16549 &do_align, 16, 16, 32, 32, -1);
16550 if (align_good == FAIL)
16551 return;
16552 switch (NEON_REGLIST_LENGTH (inst.operands[0].imm))
16553 {
16554 case 1: break;
16555 case 2: inst.instruction |= 1 << 5; break;
16556 default: first_error (_("bad list length")); return;
16557 }
16558 inst.instruction |= neon_logbits (et.size) << 6;
16559 break;
16560
16561 case 1: /* VLD2. */
16562 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
16563 &do_align, 8, 16, 16, 32, 32, 64, -1);
16564 if (align_good == FAIL)
16565 return;
16566 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2,
16567 _("bad list length"));
16568 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
16569 inst.instruction |= 1 << 5;
16570 inst.instruction |= neon_logbits (et.size) << 6;
16571 break;
16572
16573 case 2: /* VLD3. */
16574 constraint (inst.operands[1].immisalign,
16575 _("can't use alignment with this instruction"));
16576 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 3,
16577 _("bad list length"));
16578 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
16579 inst.instruction |= 1 << 5;
16580 inst.instruction |= neon_logbits (et.size) << 6;
16581 break;
16582
16583 case 3: /* VLD4. */
16584 {
16585 int align = inst.operands[1].imm >> 8;
16586 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32,
16587 16, 64, 32, 64, 32, 128, -1);
16588 if (align_good == FAIL)
16589 return;
16590 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4,
16591 _("bad list length"));
16592 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
16593 inst.instruction |= 1 << 5;
16594 if (et.size == 32 && align == 128)
16595 inst.instruction |= 0x3 << 6;
16596 else
16597 inst.instruction |= neon_logbits (et.size) << 6;
16598 }
16599 break;
16600
16601 default: ;
16602 }
16603
16604 inst.instruction |= do_align << 4;
16605 }
16606
16607 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
16608 apart from bits [11:4]. */
16609
16610 static void
16611 do_neon_ldx_stx (void)
16612 {
16613 if (inst.operands[1].isreg)
16614 constraint (inst.operands[1].reg == REG_PC, BAD_PC);
16615
16616 switch (NEON_LANE (inst.operands[0].imm))
16617 {
16618 case NEON_INTERLEAVE_LANES:
16619 NEON_ENCODE (INTERLV, inst);
16620 do_neon_ld_st_interleave ();
16621 break;
16622
16623 case NEON_ALL_LANES:
16624 NEON_ENCODE (DUP, inst);
16625 if (inst.instruction == N_INV)
16626 {
16627 first_error ("only loads support such operands");
16628 break;
16629 }
16630 do_neon_ld_dup ();
16631 break;
16632
16633 default:
16634 NEON_ENCODE (LANE, inst);
16635 do_neon_ld_st_lane ();
16636 }
16637
16638 /* L bit comes from bit mask. */
16639 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16640 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16641 inst.instruction |= inst.operands[1].reg << 16;
16642
16643 if (inst.operands[1].postind)
16644 {
16645 int postreg = inst.operands[1].imm & 0xf;
16646 constraint (!inst.operands[1].immisreg,
16647 _("post-index must be a register"));
16648 constraint (postreg == 0xd || postreg == 0xf,
16649 _("bad register for post-index"));
16650 inst.instruction |= postreg;
16651 }
16652 else
16653 {
16654 constraint (inst.operands[1].immisreg, BAD_ADDR_MODE);
16655 constraint (inst.reloc.exp.X_op != O_constant
16656 || inst.reloc.exp.X_add_number != 0,
16657 BAD_ADDR_MODE);
16658
16659 if (inst.operands[1].writeback)
16660 {
16661 inst.instruction |= 0xd;
16662 }
16663 else
16664 inst.instruction |= 0xf;
16665 }
16666
16667 if (thumb_mode)
16668 inst.instruction |= 0xf9000000;
16669 else
16670 inst.instruction |= 0xf4000000;
16671 }
16672
16673 /* FP v8. */
16674 static void
16675 do_vfp_nsyn_fpv8 (enum neon_shape rs)
16676 {
16677 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
16678 D register operands. */
16679 if (neon_shape_class[rs] == SC_DOUBLE)
16680 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
16681 _(BAD_FPU));
16682
16683 NEON_ENCODE (FPV8, inst);
16684
16685 if (rs == NS_FFF)
16686 do_vfp_sp_dyadic ();
16687 else
16688 do_vfp_dp_rd_rn_rm ();
16689
16690 if (rs == NS_DDD)
16691 inst.instruction |= 0x100;
16692
16693 inst.instruction |= 0xf0000000;
16694 }
16695
16696 static void
16697 do_vsel (void)
16698 {
16699 set_it_insn_type (OUTSIDE_IT_INSN);
16700
16701 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8) != SUCCESS)
16702 first_error (_("invalid instruction shape"));
16703 }
16704
16705 static void
16706 do_vmaxnm (void)
16707 {
16708 set_it_insn_type (OUTSIDE_IT_INSN);
16709
16710 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8) == SUCCESS)
16711 return;
16712
16713 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
16714 return;
16715
16716 neon_dyadic_misc (NT_untyped, N_F32, 0);
16717 }
16718
16719 static void
16720 do_vrint_1 (enum neon_cvt_mode mode)
16721 {
16722 enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_QQ, NS_NULL);
16723 struct neon_type_el et;
16724
16725 if (rs == NS_NULL)
16726 return;
16727
16728 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
16729 D register operands. */
16730 if (neon_shape_class[rs] == SC_DOUBLE)
16731 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
16732 _(BAD_FPU));
16733
16734 et = neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
16735 if (et.type != NT_invtype)
16736 {
16737 /* VFP encodings. */
16738 if (mode == neon_cvt_mode_a || mode == neon_cvt_mode_n
16739 || mode == neon_cvt_mode_p || mode == neon_cvt_mode_m)
16740 set_it_insn_type (OUTSIDE_IT_INSN);
16741
16742 NEON_ENCODE (FPV8, inst);
16743 if (rs == NS_FF)
16744 do_vfp_sp_monadic ();
16745 else
16746 do_vfp_dp_rd_rm ();
16747
16748 switch (mode)
16749 {
16750 case neon_cvt_mode_r: inst.instruction |= 0x00000000; break;
16751 case neon_cvt_mode_z: inst.instruction |= 0x00000080; break;
16752 case neon_cvt_mode_x: inst.instruction |= 0x00010000; break;
16753 case neon_cvt_mode_a: inst.instruction |= 0xf0000000; break;
16754 case neon_cvt_mode_n: inst.instruction |= 0xf0010000; break;
16755 case neon_cvt_mode_p: inst.instruction |= 0xf0020000; break;
16756 case neon_cvt_mode_m: inst.instruction |= 0xf0030000; break;
16757 default: abort ();
16758 }
16759
16760 inst.instruction |= (rs == NS_DD) << 8;
16761 do_vfp_cond_or_thumb ();
16762 }
16763 else
16764 {
16765 /* Neon encodings (or something broken...). */
16766 inst.error = NULL;
16767 et = neon_check_type (2, rs, N_EQK, N_F32 | N_KEY);
16768
16769 if (et.type == NT_invtype)
16770 return;
16771
16772 set_it_insn_type (OUTSIDE_IT_INSN);
16773 NEON_ENCODE (FLOAT, inst);
16774
16775 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
16776 return;
16777
16778 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16779 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16780 inst.instruction |= LOW4 (inst.operands[1].reg);
16781 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16782 inst.instruction |= neon_quad (rs) << 6;
16783 switch (mode)
16784 {
16785 case neon_cvt_mode_z: inst.instruction |= 3 << 7; break;
16786 case neon_cvt_mode_x: inst.instruction |= 1 << 7; break;
16787 case neon_cvt_mode_a: inst.instruction |= 2 << 7; break;
16788 case neon_cvt_mode_n: inst.instruction |= 0 << 7; break;
16789 case neon_cvt_mode_p: inst.instruction |= 7 << 7; break;
16790 case neon_cvt_mode_m: inst.instruction |= 5 << 7; break;
16791 case neon_cvt_mode_r: inst.error = _("invalid rounding mode"); break;
16792 default: abort ();
16793 }
16794
16795 if (thumb_mode)
16796 inst.instruction |= 0xfc000000;
16797 else
16798 inst.instruction |= 0xf0000000;
16799 }
16800 }
16801
16802 static void
16803 do_vrintx (void)
16804 {
16805 do_vrint_1 (neon_cvt_mode_x);
16806 }
16807
16808 static void
16809 do_vrintz (void)
16810 {
16811 do_vrint_1 (neon_cvt_mode_z);
16812 }
16813
16814 static void
16815 do_vrintr (void)
16816 {
16817 do_vrint_1 (neon_cvt_mode_r);
16818 }
16819
16820 static void
16821 do_vrinta (void)
16822 {
16823 do_vrint_1 (neon_cvt_mode_a);
16824 }
16825
16826 static void
16827 do_vrintn (void)
16828 {
16829 do_vrint_1 (neon_cvt_mode_n);
16830 }
16831
16832 static void
16833 do_vrintp (void)
16834 {
16835 do_vrint_1 (neon_cvt_mode_p);
16836 }
16837
16838 static void
16839 do_vrintm (void)
16840 {
16841 do_vrint_1 (neon_cvt_mode_m);
16842 }
16843
16844 /* Crypto v1 instructions. */
16845 static void
16846 do_crypto_2op_1 (unsigned elttype, int op)
16847 {
16848 set_it_insn_type (OUTSIDE_IT_INSN);
16849
16850 if (neon_check_type (2, NS_QQ, N_EQK | N_UNT, elttype | N_UNT | N_KEY).type
16851 == NT_invtype)
16852 return;
16853
16854 inst.error = NULL;
16855
16856 NEON_ENCODE (INTEGER, inst);
16857 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16858 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16859 inst.instruction |= LOW4 (inst.operands[1].reg);
16860 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16861 if (op != -1)
16862 inst.instruction |= op << 6;
16863
16864 if (thumb_mode)
16865 inst.instruction |= 0xfc000000;
16866 else
16867 inst.instruction |= 0xf0000000;
16868 }
16869
16870 static void
16871 do_crypto_3op_1 (int u, int op)
16872 {
16873 set_it_insn_type (OUTSIDE_IT_INSN);
16874
16875 if (neon_check_type (3, NS_QQQ, N_EQK | N_UNT, N_EQK | N_UNT,
16876 N_32 | N_UNT | N_KEY).type == NT_invtype)
16877 return;
16878
16879 inst.error = NULL;
16880
16881 NEON_ENCODE (INTEGER, inst);
16882 neon_three_same (1, u, 8 << op);
16883 }
16884
16885 static void
16886 do_aese (void)
16887 {
16888 do_crypto_2op_1 (N_8, 0);
16889 }
16890
16891 static void
16892 do_aesd (void)
16893 {
16894 do_crypto_2op_1 (N_8, 1);
16895 }
16896
16897 static void
16898 do_aesmc (void)
16899 {
16900 do_crypto_2op_1 (N_8, 2);
16901 }
16902
16903 static void
16904 do_aesimc (void)
16905 {
16906 do_crypto_2op_1 (N_8, 3);
16907 }
16908
16909 static void
16910 do_sha1c (void)
16911 {
16912 do_crypto_3op_1 (0, 0);
16913 }
16914
16915 static void
16916 do_sha1p (void)
16917 {
16918 do_crypto_3op_1 (0, 1);
16919 }
16920
16921 static void
16922 do_sha1m (void)
16923 {
16924 do_crypto_3op_1 (0, 2);
16925 }
16926
16927 static void
16928 do_sha1su0 (void)
16929 {
16930 do_crypto_3op_1 (0, 3);
16931 }
16932
16933 static void
16934 do_sha256h (void)
16935 {
16936 do_crypto_3op_1 (1, 0);
16937 }
16938
16939 static void
16940 do_sha256h2 (void)
16941 {
16942 do_crypto_3op_1 (1, 1);
16943 }
16944
16945 static void
16946 do_sha256su1 (void)
16947 {
16948 do_crypto_3op_1 (1, 2);
16949 }
16950
16951 static void
16952 do_sha1h (void)
16953 {
16954 do_crypto_2op_1 (N_32, -1);
16955 }
16956
16957 static void
16958 do_sha1su1 (void)
16959 {
16960 do_crypto_2op_1 (N_32, 0);
16961 }
16962
16963 static void
16964 do_sha256su0 (void)
16965 {
16966 do_crypto_2op_1 (N_32, 1);
16967 }
16968
16969 static void
16970 do_crc32_1 (unsigned int poly, unsigned int sz)
16971 {
16972 unsigned int Rd = inst.operands[0].reg;
16973 unsigned int Rn = inst.operands[1].reg;
16974 unsigned int Rm = inst.operands[2].reg;
16975
16976 set_it_insn_type (OUTSIDE_IT_INSN);
16977 inst.instruction |= LOW4 (Rd) << (thumb_mode ? 8 : 12);
16978 inst.instruction |= LOW4 (Rn) << 16;
16979 inst.instruction |= LOW4 (Rm);
16980 inst.instruction |= sz << (thumb_mode ? 4 : 21);
16981 inst.instruction |= poly << (thumb_mode ? 20 : 9);
16982
16983 if (Rd == REG_PC || Rn == REG_PC || Rm == REG_PC)
16984 as_warn (UNPRED_REG ("r15"));
16985 if (thumb_mode && (Rd == REG_SP || Rn == REG_SP || Rm == REG_SP))
16986 as_warn (UNPRED_REG ("r13"));
16987 }
16988
16989 static void
16990 do_crc32b (void)
16991 {
16992 do_crc32_1 (0, 0);
16993 }
16994
16995 static void
16996 do_crc32h (void)
16997 {
16998 do_crc32_1 (0, 1);
16999 }
17000
17001 static void
17002 do_crc32w (void)
17003 {
17004 do_crc32_1 (0, 2);
17005 }
17006
17007 static void
17008 do_crc32cb (void)
17009 {
17010 do_crc32_1 (1, 0);
17011 }
17012
17013 static void
17014 do_crc32ch (void)
17015 {
17016 do_crc32_1 (1, 1);
17017 }
17018
17019 static void
17020 do_crc32cw (void)
17021 {
17022 do_crc32_1 (1, 2);
17023 }
17024
17025 \f
17026 /* Overall per-instruction processing. */
17027
17028 /* We need to be able to fix up arbitrary expressions in some statements.
17029 This is so that we can handle symbols that are an arbitrary distance from
17030 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
17031 which returns part of an address in a form which will be valid for
17032 a data instruction. We do this by pushing the expression into a symbol
17033 in the expr_section, and creating a fix for that. */
17034
17035 static void
17036 fix_new_arm (fragS * frag,
17037 int where,
17038 short int size,
17039 expressionS * exp,
17040 int pc_rel,
17041 int reloc)
17042 {
17043 fixS * new_fix;
17044
17045 switch (exp->X_op)
17046 {
17047 case O_constant:
17048 if (pc_rel)
17049 {
17050 /* Create an absolute valued symbol, so we have something to
17051 refer to in the object file. Unfortunately for us, gas's
17052 generic expression parsing will already have folded out
17053 any use of .set foo/.type foo %function that may have
17054 been used to set type information of the target location,
17055 that's being specified symbolically. We have to presume
17056 the user knows what they are doing. */
17057 char name[16 + 8];
17058 symbolS *symbol;
17059
17060 sprintf (name, "*ABS*0x%lx", (unsigned long)exp->X_add_number);
17061
17062 symbol = symbol_find_or_make (name);
17063 S_SET_SEGMENT (symbol, absolute_section);
17064 symbol_set_frag (symbol, &zero_address_frag);
17065 S_SET_VALUE (symbol, exp->X_add_number);
17066 exp->X_op = O_symbol;
17067 exp->X_add_symbol = symbol;
17068 exp->X_add_number = 0;
17069 }
17070 /* FALLTHROUGH */
17071 case O_symbol:
17072 case O_add:
17073 case O_subtract:
17074 new_fix = fix_new_exp (frag, where, size, exp, pc_rel,
17075 (enum bfd_reloc_code_real) reloc);
17076 break;
17077
17078 default:
17079 new_fix = (fixS *) fix_new (frag, where, size, make_expr_symbol (exp), 0,
17080 pc_rel, (enum bfd_reloc_code_real) reloc);
17081 break;
17082 }
17083
17084 /* Mark whether the fix is to a THUMB instruction, or an ARM
17085 instruction. */
17086 new_fix->tc_fix_data = thumb_mode;
17087 }
17088
17089 /* Create a frg for an instruction requiring relaxation. */
17090 static void
17091 output_relax_insn (void)
17092 {
17093 char * to;
17094 symbolS *sym;
17095 int offset;
17096
17097 /* The size of the instruction is unknown, so tie the debug info to the
17098 start of the instruction. */
17099 dwarf2_emit_insn (0);
17100
17101 switch (inst.reloc.exp.X_op)
17102 {
17103 case O_symbol:
17104 sym = inst.reloc.exp.X_add_symbol;
17105 offset = inst.reloc.exp.X_add_number;
17106 break;
17107 case O_constant:
17108 sym = NULL;
17109 offset = inst.reloc.exp.X_add_number;
17110 break;
17111 default:
17112 sym = make_expr_symbol (&inst.reloc.exp);
17113 offset = 0;
17114 break;
17115 }
17116 to = frag_var (rs_machine_dependent, INSN_SIZE, THUMB_SIZE,
17117 inst.relax, sym, offset, NULL/*offset, opcode*/);
17118 md_number_to_chars (to, inst.instruction, THUMB_SIZE);
17119 }
17120
17121 /* Write a 32-bit thumb instruction to buf. */
17122 static void
17123 put_thumb32_insn (char * buf, unsigned long insn)
17124 {
17125 md_number_to_chars (buf, insn >> 16, THUMB_SIZE);
17126 md_number_to_chars (buf + THUMB_SIZE, insn, THUMB_SIZE);
17127 }
17128
17129 static void
17130 output_inst (const char * str)
17131 {
17132 char * to = NULL;
17133
17134 if (inst.error)
17135 {
17136 as_bad ("%s -- `%s'", inst.error, str);
17137 return;
17138 }
17139 if (inst.relax)
17140 {
17141 output_relax_insn ();
17142 return;
17143 }
17144 if (inst.size == 0)
17145 return;
17146
17147 to = frag_more (inst.size);
17148 /* PR 9814: Record the thumb mode into the current frag so that we know
17149 what type of NOP padding to use, if necessary. We override any previous
17150 setting so that if the mode has changed then the NOPS that we use will
17151 match the encoding of the last instruction in the frag. */
17152 frag_now->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
17153
17154 if (thumb_mode && (inst.size > THUMB_SIZE))
17155 {
17156 gas_assert (inst.size == (2 * THUMB_SIZE));
17157 put_thumb32_insn (to, inst.instruction);
17158 }
17159 else if (inst.size > INSN_SIZE)
17160 {
17161 gas_assert (inst.size == (2 * INSN_SIZE));
17162 md_number_to_chars (to, inst.instruction, INSN_SIZE);
17163 md_number_to_chars (to + INSN_SIZE, inst.instruction, INSN_SIZE);
17164 }
17165 else
17166 md_number_to_chars (to, inst.instruction, inst.size);
17167
17168 if (inst.reloc.type != BFD_RELOC_UNUSED)
17169 fix_new_arm (frag_now, to - frag_now->fr_literal,
17170 inst.size, & inst.reloc.exp, inst.reloc.pc_rel,
17171 inst.reloc.type);
17172
17173 dwarf2_emit_insn (inst.size);
17174 }
17175
17176 static char *
17177 output_it_inst (int cond, int mask, char * to)
17178 {
17179 unsigned long instruction = 0xbf00;
17180
17181 mask &= 0xf;
17182 instruction |= mask;
17183 instruction |= cond << 4;
17184
17185 if (to == NULL)
17186 {
17187 to = frag_more (2);
17188 #ifdef OBJ_ELF
17189 dwarf2_emit_insn (2);
17190 #endif
17191 }
17192
17193 md_number_to_chars (to, instruction, 2);
17194
17195 return to;
17196 }
17197
17198 /* Tag values used in struct asm_opcode's tag field. */
17199 enum opcode_tag
17200 {
17201 OT_unconditional, /* Instruction cannot be conditionalized.
17202 The ARM condition field is still 0xE. */
17203 OT_unconditionalF, /* Instruction cannot be conditionalized
17204 and carries 0xF in its ARM condition field. */
17205 OT_csuffix, /* Instruction takes a conditional suffix. */
17206 OT_csuffixF, /* Some forms of the instruction take a conditional
17207 suffix, others place 0xF where the condition field
17208 would be. */
17209 OT_cinfix3, /* Instruction takes a conditional infix,
17210 beginning at character index 3. (In
17211 unified mode, it becomes a suffix.) */
17212 OT_cinfix3_deprecated, /* The same as OT_cinfix3. This is used for
17213 tsts, cmps, cmns, and teqs. */
17214 OT_cinfix3_legacy, /* Legacy instruction takes a conditional infix at
17215 character index 3, even in unified mode. Used for
17216 legacy instructions where suffix and infix forms
17217 may be ambiguous. */
17218 OT_csuf_or_in3, /* Instruction takes either a conditional
17219 suffix or an infix at character index 3. */
17220 OT_odd_infix_unc, /* This is the unconditional variant of an
17221 instruction that takes a conditional infix
17222 at an unusual position. In unified mode,
17223 this variant will accept a suffix. */
17224 OT_odd_infix_0 /* Values greater than or equal to OT_odd_infix_0
17225 are the conditional variants of instructions that
17226 take conditional infixes in unusual positions.
17227 The infix appears at character index
17228 (tag - OT_odd_infix_0). These are not accepted
17229 in unified mode. */
17230 };
17231
17232 /* Subroutine of md_assemble, responsible for looking up the primary
17233 opcode from the mnemonic the user wrote. STR points to the
17234 beginning of the mnemonic.
17235
17236 This is not simply a hash table lookup, because of conditional
17237 variants. Most instructions have conditional variants, which are
17238 expressed with a _conditional affix_ to the mnemonic. If we were
17239 to encode each conditional variant as a literal string in the opcode
17240 table, it would have approximately 20,000 entries.
17241
17242 Most mnemonics take this affix as a suffix, and in unified syntax,
17243 'most' is upgraded to 'all'. However, in the divided syntax, some
17244 instructions take the affix as an infix, notably the s-variants of
17245 the arithmetic instructions. Of those instructions, all but six
17246 have the infix appear after the third character of the mnemonic.
17247
17248 Accordingly, the algorithm for looking up primary opcodes given
17249 an identifier is:
17250
17251 1. Look up the identifier in the opcode table.
17252 If we find a match, go to step U.
17253
17254 2. Look up the last two characters of the identifier in the
17255 conditions table. If we find a match, look up the first N-2
17256 characters of the identifier in the opcode table. If we
17257 find a match, go to step CE.
17258
17259 3. Look up the fourth and fifth characters of the identifier in
17260 the conditions table. If we find a match, extract those
17261 characters from the identifier, and look up the remaining
17262 characters in the opcode table. If we find a match, go
17263 to step CM.
17264
17265 4. Fail.
17266
17267 U. Examine the tag field of the opcode structure, in case this is
17268 one of the six instructions with its conditional infix in an
17269 unusual place. If it is, the tag tells us where to find the
17270 infix; look it up in the conditions table and set inst.cond
17271 accordingly. Otherwise, this is an unconditional instruction.
17272 Again set inst.cond accordingly. Return the opcode structure.
17273
17274 CE. Examine the tag field to make sure this is an instruction that
17275 should receive a conditional suffix. If it is not, fail.
17276 Otherwise, set inst.cond from the suffix we already looked up,
17277 and return the opcode structure.
17278
17279 CM. Examine the tag field to make sure this is an instruction that
17280 should receive a conditional infix after the third character.
17281 If it is not, fail. Otherwise, undo the edits to the current
17282 line of input and proceed as for case CE. */
17283
17284 static const struct asm_opcode *
17285 opcode_lookup (char **str)
17286 {
17287 char *end, *base;
17288 char *affix;
17289 const struct asm_opcode *opcode;
17290 const struct asm_cond *cond;
17291 char save[2];
17292
17293 /* Scan up to the end of the mnemonic, which must end in white space,
17294 '.' (in unified mode, or for Neon/VFP instructions), or end of string. */
17295 for (base = end = *str; *end != '\0'; end++)
17296 if (*end == ' ' || *end == '.')
17297 break;
17298
17299 if (end == base)
17300 return NULL;
17301
17302 /* Handle a possible width suffix and/or Neon type suffix. */
17303 if (end[0] == '.')
17304 {
17305 int offset = 2;
17306
17307 /* The .w and .n suffixes are only valid if the unified syntax is in
17308 use. */
17309 if (unified_syntax && end[1] == 'w')
17310 inst.size_req = 4;
17311 else if (unified_syntax && end[1] == 'n')
17312 inst.size_req = 2;
17313 else
17314 offset = 0;
17315
17316 inst.vectype.elems = 0;
17317
17318 *str = end + offset;
17319
17320 if (end[offset] == '.')
17321 {
17322 /* See if we have a Neon type suffix (possible in either unified or
17323 non-unified ARM syntax mode). */
17324 if (parse_neon_type (&inst.vectype, str) == FAIL)
17325 return NULL;
17326 }
17327 else if (end[offset] != '\0' && end[offset] != ' ')
17328 return NULL;
17329 }
17330 else
17331 *str = end;
17332
17333 /* Look for unaffixed or special-case affixed mnemonic. */
17334 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
17335 end - base);
17336 if (opcode)
17337 {
17338 /* step U */
17339 if (opcode->tag < OT_odd_infix_0)
17340 {
17341 inst.cond = COND_ALWAYS;
17342 return opcode;
17343 }
17344
17345 if (warn_on_deprecated && unified_syntax)
17346 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
17347 affix = base + (opcode->tag - OT_odd_infix_0);
17348 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
17349 gas_assert (cond);
17350
17351 inst.cond = cond->value;
17352 return opcode;
17353 }
17354
17355 /* Cannot have a conditional suffix on a mnemonic of less than two
17356 characters. */
17357 if (end - base < 3)
17358 return NULL;
17359
17360 /* Look for suffixed mnemonic. */
17361 affix = end - 2;
17362 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
17363 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
17364 affix - base);
17365 if (opcode && cond)
17366 {
17367 /* step CE */
17368 switch (opcode->tag)
17369 {
17370 case OT_cinfix3_legacy:
17371 /* Ignore conditional suffixes matched on infix only mnemonics. */
17372 break;
17373
17374 case OT_cinfix3:
17375 case OT_cinfix3_deprecated:
17376 case OT_odd_infix_unc:
17377 if (!unified_syntax)
17378 return 0;
17379 /* else fall through */
17380
17381 case OT_csuffix:
17382 case OT_csuffixF:
17383 case OT_csuf_or_in3:
17384 inst.cond = cond->value;
17385 return opcode;
17386
17387 case OT_unconditional:
17388 case OT_unconditionalF:
17389 if (thumb_mode)
17390 inst.cond = cond->value;
17391 else
17392 {
17393 /* Delayed diagnostic. */
17394 inst.error = BAD_COND;
17395 inst.cond = COND_ALWAYS;
17396 }
17397 return opcode;
17398
17399 default:
17400 return NULL;
17401 }
17402 }
17403
17404 /* Cannot have a usual-position infix on a mnemonic of less than
17405 six characters (five would be a suffix). */
17406 if (end - base < 6)
17407 return NULL;
17408
17409 /* Look for infixed mnemonic in the usual position. */
17410 affix = base + 3;
17411 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
17412 if (!cond)
17413 return NULL;
17414
17415 memcpy (save, affix, 2);
17416 memmove (affix, affix + 2, (end - affix) - 2);
17417 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
17418 (end - base) - 2);
17419 memmove (affix + 2, affix, (end - affix) - 2);
17420 memcpy (affix, save, 2);
17421
17422 if (opcode
17423 && (opcode->tag == OT_cinfix3
17424 || opcode->tag == OT_cinfix3_deprecated
17425 || opcode->tag == OT_csuf_or_in3
17426 || opcode->tag == OT_cinfix3_legacy))
17427 {
17428 /* Step CM. */
17429 if (warn_on_deprecated && unified_syntax
17430 && (opcode->tag == OT_cinfix3
17431 || opcode->tag == OT_cinfix3_deprecated))
17432 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
17433
17434 inst.cond = cond->value;
17435 return opcode;
17436 }
17437
17438 return NULL;
17439 }
17440
17441 /* This function generates an initial IT instruction, leaving its block
17442 virtually open for the new instructions. Eventually,
17443 the mask will be updated by now_it_add_mask () each time
17444 a new instruction needs to be included in the IT block.
17445 Finally, the block is closed with close_automatic_it_block ().
17446 The block closure can be requested either from md_assemble (),
17447 a tencode (), or due to a label hook. */
17448
17449 static void
17450 new_automatic_it_block (int cond)
17451 {
17452 now_it.state = AUTOMATIC_IT_BLOCK;
17453 now_it.mask = 0x18;
17454 now_it.cc = cond;
17455 now_it.block_length = 1;
17456 mapping_state (MAP_THUMB);
17457 now_it.insn = output_it_inst (cond, now_it.mask, NULL);
17458 now_it.warn_deprecated = FALSE;
17459 now_it.insn_cond = TRUE;
17460 }
17461
17462 /* Close an automatic IT block.
17463 See comments in new_automatic_it_block (). */
17464
17465 static void
17466 close_automatic_it_block (void)
17467 {
17468 now_it.mask = 0x10;
17469 now_it.block_length = 0;
17470 }
17471
17472 /* Update the mask of the current automatically-generated IT
17473 instruction. See comments in new_automatic_it_block (). */
17474
17475 static void
17476 now_it_add_mask (int cond)
17477 {
17478 #define CLEAR_BIT(value, nbit) ((value) & ~(1 << (nbit)))
17479 #define SET_BIT_VALUE(value, bitvalue, nbit) (CLEAR_BIT (value, nbit) \
17480 | ((bitvalue) << (nbit)))
17481 const int resulting_bit = (cond & 1);
17482
17483 now_it.mask &= 0xf;
17484 now_it.mask = SET_BIT_VALUE (now_it.mask,
17485 resulting_bit,
17486 (5 - now_it.block_length));
17487 now_it.mask = SET_BIT_VALUE (now_it.mask,
17488 1,
17489 ((5 - now_it.block_length) - 1) );
17490 output_it_inst (now_it.cc, now_it.mask, now_it.insn);
17491
17492 #undef CLEAR_BIT
17493 #undef SET_BIT_VALUE
17494 }
17495
17496 /* The IT blocks handling machinery is accessed through the these functions:
17497 it_fsm_pre_encode () from md_assemble ()
17498 set_it_insn_type () optional, from the tencode functions
17499 set_it_insn_type_last () ditto
17500 in_it_block () ditto
17501 it_fsm_post_encode () from md_assemble ()
17502 force_automatic_it_block_close () from label habdling functions
17503
17504 Rationale:
17505 1) md_assemble () calls it_fsm_pre_encode () before calling tencode (),
17506 initializing the IT insn type with a generic initial value depending
17507 on the inst.condition.
17508 2) During the tencode function, two things may happen:
17509 a) The tencode function overrides the IT insn type by
17510 calling either set_it_insn_type (type) or set_it_insn_type_last ().
17511 b) The tencode function queries the IT block state by
17512 calling in_it_block () (i.e. to determine narrow/not narrow mode).
17513
17514 Both set_it_insn_type and in_it_block run the internal FSM state
17515 handling function (handle_it_state), because: a) setting the IT insn
17516 type may incur in an invalid state (exiting the function),
17517 and b) querying the state requires the FSM to be updated.
17518 Specifically we want to avoid creating an IT block for conditional
17519 branches, so it_fsm_pre_encode is actually a guess and we can't
17520 determine whether an IT block is required until the tencode () routine
17521 has decided what type of instruction this actually it.
17522 Because of this, if set_it_insn_type and in_it_block have to be used,
17523 set_it_insn_type has to be called first.
17524
17525 set_it_insn_type_last () is a wrapper of set_it_insn_type (type), that
17526 determines the insn IT type depending on the inst.cond code.
17527 When a tencode () routine encodes an instruction that can be
17528 either outside an IT block, or, in the case of being inside, has to be
17529 the last one, set_it_insn_type_last () will determine the proper
17530 IT instruction type based on the inst.cond code. Otherwise,
17531 set_it_insn_type can be called for overriding that logic or
17532 for covering other cases.
17533
17534 Calling handle_it_state () may not transition the IT block state to
17535 OUTSIDE_IT_BLOCK immediatelly, since the (current) state could be
17536 still queried. Instead, if the FSM determines that the state should
17537 be transitioned to OUTSIDE_IT_BLOCK, a flag is marked to be closed
17538 after the tencode () function: that's what it_fsm_post_encode () does.
17539
17540 Since in_it_block () calls the state handling function to get an
17541 updated state, an error may occur (due to invalid insns combination).
17542 In that case, inst.error is set.
17543 Therefore, inst.error has to be checked after the execution of
17544 the tencode () routine.
17545
17546 3) Back in md_assemble(), it_fsm_post_encode () is called to commit
17547 any pending state change (if any) that didn't take place in
17548 handle_it_state () as explained above. */
17549
17550 static void
17551 it_fsm_pre_encode (void)
17552 {
17553 if (inst.cond != COND_ALWAYS)
17554 inst.it_insn_type = INSIDE_IT_INSN;
17555 else
17556 inst.it_insn_type = OUTSIDE_IT_INSN;
17557
17558 now_it.state_handled = 0;
17559 }
17560
17561 /* IT state FSM handling function. */
17562
17563 static int
17564 handle_it_state (void)
17565 {
17566 now_it.state_handled = 1;
17567 now_it.insn_cond = FALSE;
17568
17569 switch (now_it.state)
17570 {
17571 case OUTSIDE_IT_BLOCK:
17572 switch (inst.it_insn_type)
17573 {
17574 case OUTSIDE_IT_INSN:
17575 break;
17576
17577 case INSIDE_IT_INSN:
17578 case INSIDE_IT_LAST_INSN:
17579 if (thumb_mode == 0)
17580 {
17581 if (unified_syntax
17582 && !(implicit_it_mode & IMPLICIT_IT_MODE_ARM))
17583 as_tsktsk (_("Warning: conditional outside an IT block"\
17584 " for Thumb."));
17585 }
17586 else
17587 {
17588 if ((implicit_it_mode & IMPLICIT_IT_MODE_THUMB)
17589 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2))
17590 {
17591 /* Automatically generate the IT instruction. */
17592 new_automatic_it_block (inst.cond);
17593 if (inst.it_insn_type == INSIDE_IT_LAST_INSN)
17594 close_automatic_it_block ();
17595 }
17596 else
17597 {
17598 inst.error = BAD_OUT_IT;
17599 return FAIL;
17600 }
17601 }
17602 break;
17603
17604 case IF_INSIDE_IT_LAST_INSN:
17605 case NEUTRAL_IT_INSN:
17606 break;
17607
17608 case IT_INSN:
17609 now_it.state = MANUAL_IT_BLOCK;
17610 now_it.block_length = 0;
17611 break;
17612 }
17613 break;
17614
17615 case AUTOMATIC_IT_BLOCK:
17616 /* Three things may happen now:
17617 a) We should increment current it block size;
17618 b) We should close current it block (closing insn or 4 insns);
17619 c) We should close current it block and start a new one (due
17620 to incompatible conditions or
17621 4 insns-length block reached). */
17622
17623 switch (inst.it_insn_type)
17624 {
17625 case OUTSIDE_IT_INSN:
17626 /* The closure of the block shall happen immediatelly,
17627 so any in_it_block () call reports the block as closed. */
17628 force_automatic_it_block_close ();
17629 break;
17630
17631 case INSIDE_IT_INSN:
17632 case INSIDE_IT_LAST_INSN:
17633 case IF_INSIDE_IT_LAST_INSN:
17634 now_it.block_length++;
17635
17636 if (now_it.block_length > 4
17637 || !now_it_compatible (inst.cond))
17638 {
17639 force_automatic_it_block_close ();
17640 if (inst.it_insn_type != IF_INSIDE_IT_LAST_INSN)
17641 new_automatic_it_block (inst.cond);
17642 }
17643 else
17644 {
17645 now_it.insn_cond = TRUE;
17646 now_it_add_mask (inst.cond);
17647 }
17648
17649 if (now_it.state == AUTOMATIC_IT_BLOCK
17650 && (inst.it_insn_type == INSIDE_IT_LAST_INSN
17651 || inst.it_insn_type == IF_INSIDE_IT_LAST_INSN))
17652 close_automatic_it_block ();
17653 break;
17654
17655 case NEUTRAL_IT_INSN:
17656 now_it.block_length++;
17657 now_it.insn_cond = TRUE;
17658
17659 if (now_it.block_length > 4)
17660 force_automatic_it_block_close ();
17661 else
17662 now_it_add_mask (now_it.cc & 1);
17663 break;
17664
17665 case IT_INSN:
17666 close_automatic_it_block ();
17667 now_it.state = MANUAL_IT_BLOCK;
17668 break;
17669 }
17670 break;
17671
17672 case MANUAL_IT_BLOCK:
17673 {
17674 /* Check conditional suffixes. */
17675 const int cond = now_it.cc ^ ((now_it.mask >> 4) & 1) ^ 1;
17676 int is_last;
17677 now_it.mask <<= 1;
17678 now_it.mask &= 0x1f;
17679 is_last = (now_it.mask == 0x10);
17680 now_it.insn_cond = TRUE;
17681
17682 switch (inst.it_insn_type)
17683 {
17684 case OUTSIDE_IT_INSN:
17685 inst.error = BAD_NOT_IT;
17686 return FAIL;
17687
17688 case INSIDE_IT_INSN:
17689 if (cond != inst.cond)
17690 {
17691 inst.error = BAD_IT_COND;
17692 return FAIL;
17693 }
17694 break;
17695
17696 case INSIDE_IT_LAST_INSN:
17697 case IF_INSIDE_IT_LAST_INSN:
17698 if (cond != inst.cond)
17699 {
17700 inst.error = BAD_IT_COND;
17701 return FAIL;
17702 }
17703 if (!is_last)
17704 {
17705 inst.error = BAD_BRANCH;
17706 return FAIL;
17707 }
17708 break;
17709
17710 case NEUTRAL_IT_INSN:
17711 /* The BKPT instruction is unconditional even in an IT block. */
17712 break;
17713
17714 case IT_INSN:
17715 inst.error = BAD_IT_IT;
17716 return FAIL;
17717 }
17718 }
17719 break;
17720 }
17721
17722 return SUCCESS;
17723 }
17724
17725 struct depr_insn_mask
17726 {
17727 unsigned long pattern;
17728 unsigned long mask;
17729 const char* description;
17730 };
17731
17732 /* List of 16-bit instruction patterns deprecated in an IT block in
17733 ARMv8. */
17734 static const struct depr_insn_mask depr_it_insns[] = {
17735 { 0xc000, 0xc000, N_("Short branches, Undefined, SVC, LDM/STM") },
17736 { 0xb000, 0xb000, N_("Miscellaneous 16-bit instructions") },
17737 { 0xa000, 0xb800, N_("ADR") },
17738 { 0x4800, 0xf800, N_("Literal loads") },
17739 { 0x4478, 0xf478, N_("Hi-register ADD, MOV, CMP, BX, BLX using pc") },
17740 { 0x4487, 0xfc87, N_("Hi-register ADD, MOV, CMP using pc") },
17741 /* NOTE: 0x00dd is not the real encoding, instead, it is the 'tvalue'
17742 field in asm_opcode. 'tvalue' is used at the stage this check happen. */
17743 { 0x00dd, 0x7fff, N_("ADD/SUB sp, sp #imm") },
17744 { 0, 0, NULL }
17745 };
17746
17747 static void
17748 it_fsm_post_encode (void)
17749 {
17750 int is_last;
17751
17752 if (!now_it.state_handled)
17753 handle_it_state ();
17754
17755 if (now_it.insn_cond
17756 && !now_it.warn_deprecated
17757 && warn_on_deprecated
17758 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
17759 {
17760 if (inst.instruction >= 0x10000)
17761 {
17762 as_tsktsk (_("IT blocks containing 32-bit Thumb instructions are "
17763 "deprecated in ARMv8"));
17764 now_it.warn_deprecated = TRUE;
17765 }
17766 else
17767 {
17768 const struct depr_insn_mask *p = depr_it_insns;
17769
17770 while (p->mask != 0)
17771 {
17772 if ((inst.instruction & p->mask) == p->pattern)
17773 {
17774 as_tsktsk (_("IT blocks containing 16-bit Thumb instructions "
17775 "of the following class are deprecated in ARMv8: "
17776 "%s"), p->description);
17777 now_it.warn_deprecated = TRUE;
17778 break;
17779 }
17780
17781 ++p;
17782 }
17783 }
17784
17785 if (now_it.block_length > 1)
17786 {
17787 as_tsktsk (_("IT blocks containing more than one conditional "
17788 "instruction are deprecated in ARMv8"));
17789 now_it.warn_deprecated = TRUE;
17790 }
17791 }
17792
17793 is_last = (now_it.mask == 0x10);
17794 if (is_last)
17795 {
17796 now_it.state = OUTSIDE_IT_BLOCK;
17797 now_it.mask = 0;
17798 }
17799 }
17800
17801 static void
17802 force_automatic_it_block_close (void)
17803 {
17804 if (now_it.state == AUTOMATIC_IT_BLOCK)
17805 {
17806 close_automatic_it_block ();
17807 now_it.state = OUTSIDE_IT_BLOCK;
17808 now_it.mask = 0;
17809 }
17810 }
17811
17812 static int
17813 in_it_block (void)
17814 {
17815 if (!now_it.state_handled)
17816 handle_it_state ();
17817
17818 return now_it.state != OUTSIDE_IT_BLOCK;
17819 }
17820
17821 /* Whether OPCODE only has T32 encoding. Since this function is only used by
17822 t32_insn_ok, OPCODE enabled by v6t2 extension bit do not need to be listed
17823 here, hence the "known" in the function name. */
17824
17825 static bfd_boolean
17826 known_t32_only_insn (const struct asm_opcode *opcode)
17827 {
17828 /* Original Thumb-1 wide instruction. */
17829 if (opcode->tencode == do_t_blx
17830 || opcode->tencode == do_t_branch23
17831 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_msr)
17832 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_barrier))
17833 return TRUE;
17834
17835 /* Wide-only instruction added to ARMv8-M. */
17836 if (ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_v8m)
17837 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_atomics)
17838 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_v6t2_v8m)
17839 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_div))
17840 return TRUE;
17841
17842 return FALSE;
17843 }
17844
17845 /* Whether wide instruction variant can be used if available for a valid OPCODE
17846 in ARCH. */
17847
17848 static bfd_boolean
17849 t32_insn_ok (arm_feature_set arch, const struct asm_opcode *opcode)
17850 {
17851 if (known_t32_only_insn (opcode))
17852 return TRUE;
17853
17854 /* Instruction with narrow and wide encoding added to ARMv8-M. Availability
17855 of variant T3 of B.W is checked in do_t_branch. */
17856 if (ARM_CPU_HAS_FEATURE (arch, arm_ext_v8m)
17857 && opcode->tencode == do_t_branch)
17858 return TRUE;
17859
17860 /* Wide instruction variants of all instructions with narrow *and* wide
17861 variants become available with ARMv6t2. Other opcodes are either
17862 narrow-only or wide-only and are thus available if OPCODE is valid. */
17863 if (ARM_CPU_HAS_FEATURE (arch, arm_ext_v6t2))
17864 return TRUE;
17865
17866 /* OPCODE with narrow only instruction variant or wide variant not
17867 available. */
17868 return FALSE;
17869 }
17870
17871 void
17872 md_assemble (char *str)
17873 {
17874 char *p = str;
17875 const struct asm_opcode * opcode;
17876
17877 /* Align the previous label if needed. */
17878 if (last_label_seen != NULL)
17879 {
17880 symbol_set_frag (last_label_seen, frag_now);
17881 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
17882 S_SET_SEGMENT (last_label_seen, now_seg);
17883 }
17884
17885 memset (&inst, '\0', sizeof (inst));
17886 inst.reloc.type = BFD_RELOC_UNUSED;
17887
17888 opcode = opcode_lookup (&p);
17889 if (!opcode)
17890 {
17891 /* It wasn't an instruction, but it might be a register alias of
17892 the form alias .req reg, or a Neon .dn/.qn directive. */
17893 if (! create_register_alias (str, p)
17894 && ! create_neon_reg_alias (str, p))
17895 as_bad (_("bad instruction `%s'"), str);
17896
17897 return;
17898 }
17899
17900 if (warn_on_deprecated && opcode->tag == OT_cinfix3_deprecated)
17901 as_tsktsk (_("s suffix on comparison instruction is deprecated"));
17902
17903 /* The value which unconditional instructions should have in place of the
17904 condition field. */
17905 inst.uncond_value = (opcode->tag == OT_csuffixF) ? 0xf : -1;
17906
17907 if (thumb_mode)
17908 {
17909 arm_feature_set variant;
17910
17911 variant = cpu_variant;
17912 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
17913 if (!ARM_CPU_HAS_FEATURE (variant, arm_arch_t2))
17914 ARM_CLEAR_FEATURE (variant, variant, fpu_any_hard);
17915 /* Check that this instruction is supported for this CPU. */
17916 if (!opcode->tvariant
17917 || (thumb_mode == 1
17918 && !ARM_CPU_HAS_FEATURE (variant, *opcode->tvariant)))
17919 {
17920 as_bad (_("selected processor does not support `%s' in Thumb mode"), str);
17921 return;
17922 }
17923 if (inst.cond != COND_ALWAYS && !unified_syntax
17924 && opcode->tencode != do_t_branch)
17925 {
17926 as_bad (_("Thumb does not support conditional execution"));
17927 return;
17928 }
17929
17930 /* Two things are addressed here:
17931 1) Implicit require narrow instructions on Thumb-1.
17932 This avoids relaxation accidentally introducing Thumb-2
17933 instructions.
17934 2) Reject wide instructions in non Thumb-2 cores.
17935
17936 Only instructions with narrow and wide variants need to be handled
17937 but selecting all non wide-only instructions is easier. */
17938 if (!ARM_CPU_HAS_FEATURE (variant, arm_ext_v6t2)
17939 && !t32_insn_ok (variant, opcode))
17940 {
17941 if (inst.size_req == 0)
17942 inst.size_req = 2;
17943 else if (inst.size_req == 4)
17944 {
17945 if (ARM_CPU_HAS_FEATURE (variant, arm_ext_v8m))
17946 as_bad (_("selected processor does not support 32bit wide "
17947 "variant of instruction `%s'"), str);
17948 else
17949 as_bad (_("selected processor does not support `%s' in "
17950 "Thumb-2 mode"), str);
17951 return;
17952 }
17953 }
17954
17955 inst.instruction = opcode->tvalue;
17956
17957 if (!parse_operands (p, opcode->operands, /*thumb=*/TRUE))
17958 {
17959 /* Prepare the it_insn_type for those encodings that don't set
17960 it. */
17961 it_fsm_pre_encode ();
17962
17963 opcode->tencode ();
17964
17965 it_fsm_post_encode ();
17966 }
17967
17968 if (!(inst.error || inst.relax))
17969 {
17970 gas_assert (inst.instruction < 0xe800 || inst.instruction > 0xffff);
17971 inst.size = (inst.instruction > 0xffff ? 4 : 2);
17972 if (inst.size_req && inst.size_req != inst.size)
17973 {
17974 as_bad (_("cannot honor width suffix -- `%s'"), str);
17975 return;
17976 }
17977 }
17978
17979 /* Something has gone badly wrong if we try to relax a fixed size
17980 instruction. */
17981 gas_assert (inst.size_req == 0 || !inst.relax);
17982
17983 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
17984 *opcode->tvariant);
17985 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
17986 set those bits when Thumb-2 32-bit instructions are seen. The impact
17987 of relaxable instructions will be considered later after we finish all
17988 relaxation. */
17989 if (ARM_FEATURE_CORE_EQUAL (cpu_variant, arm_arch_any))
17990 variant = arm_arch_none;
17991 else
17992 variant = cpu_variant;
17993 if (inst.size == 4 && !t32_insn_ok (variant, opcode))
17994 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
17995 arm_ext_v6t2);
17996
17997 check_neon_suffixes;
17998
17999 if (!inst.error)
18000 {
18001 mapping_state (MAP_THUMB);
18002 }
18003 }
18004 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
18005 {
18006 bfd_boolean is_bx;
18007
18008 /* bx is allowed on v5 cores, and sometimes on v4 cores. */
18009 is_bx = (opcode->aencode == do_bx);
18010
18011 /* Check that this instruction is supported for this CPU. */
18012 if (!(is_bx && fix_v4bx)
18013 && !(opcode->avariant &&
18014 ARM_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant)))
18015 {
18016 as_bad (_("selected processor does not support `%s' in ARM mode"), str);
18017 return;
18018 }
18019 if (inst.size_req)
18020 {
18021 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str);
18022 return;
18023 }
18024
18025 inst.instruction = opcode->avalue;
18026 if (opcode->tag == OT_unconditionalF)
18027 inst.instruction |= 0xFU << 28;
18028 else
18029 inst.instruction |= inst.cond << 28;
18030 inst.size = INSN_SIZE;
18031 if (!parse_operands (p, opcode->operands, /*thumb=*/FALSE))
18032 {
18033 it_fsm_pre_encode ();
18034 opcode->aencode ();
18035 it_fsm_post_encode ();
18036 }
18037 /* Arm mode bx is marked as both v4T and v5 because it's still required
18038 on a hypothetical non-thumb v5 core. */
18039 if (is_bx)
18040 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, arm_ext_v4t);
18041 else
18042 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
18043 *opcode->avariant);
18044
18045 check_neon_suffixes;
18046
18047 if (!inst.error)
18048 {
18049 mapping_state (MAP_ARM);
18050 }
18051 }
18052 else
18053 {
18054 as_bad (_("attempt to use an ARM instruction on a Thumb-only processor "
18055 "-- `%s'"), str);
18056 return;
18057 }
18058 output_inst (str);
18059 }
18060
18061 static void
18062 check_it_blocks_finished (void)
18063 {
18064 #ifdef OBJ_ELF
18065 asection *sect;
18066
18067 for (sect = stdoutput->sections; sect != NULL; sect = sect->next)
18068 if (seg_info (sect)->tc_segment_info_data.current_it.state
18069 == MANUAL_IT_BLOCK)
18070 {
18071 as_warn (_("section '%s' finished with an open IT block."),
18072 sect->name);
18073 }
18074 #else
18075 if (now_it.state == MANUAL_IT_BLOCK)
18076 as_warn (_("file finished with an open IT block."));
18077 #endif
18078 }
18079
18080 /* Various frobbings of labels and their addresses. */
18081
18082 void
18083 arm_start_line_hook (void)
18084 {
18085 last_label_seen = NULL;
18086 }
18087
18088 void
18089 arm_frob_label (symbolS * sym)
18090 {
18091 last_label_seen = sym;
18092
18093 ARM_SET_THUMB (sym, thumb_mode);
18094
18095 #if defined OBJ_COFF || defined OBJ_ELF
18096 ARM_SET_INTERWORK (sym, support_interwork);
18097 #endif
18098
18099 force_automatic_it_block_close ();
18100
18101 /* Note - do not allow local symbols (.Lxxx) to be labelled
18102 as Thumb functions. This is because these labels, whilst
18103 they exist inside Thumb code, are not the entry points for
18104 possible ARM->Thumb calls. Also, these labels can be used
18105 as part of a computed goto or switch statement. eg gcc
18106 can generate code that looks like this:
18107
18108 ldr r2, [pc, .Laaa]
18109 lsl r3, r3, #2
18110 ldr r2, [r3, r2]
18111 mov pc, r2
18112
18113 .Lbbb: .word .Lxxx
18114 .Lccc: .word .Lyyy
18115 ..etc...
18116 .Laaa: .word Lbbb
18117
18118 The first instruction loads the address of the jump table.
18119 The second instruction converts a table index into a byte offset.
18120 The third instruction gets the jump address out of the table.
18121 The fourth instruction performs the jump.
18122
18123 If the address stored at .Laaa is that of a symbol which has the
18124 Thumb_Func bit set, then the linker will arrange for this address
18125 to have the bottom bit set, which in turn would mean that the
18126 address computation performed by the third instruction would end
18127 up with the bottom bit set. Since the ARM is capable of unaligned
18128 word loads, the instruction would then load the incorrect address
18129 out of the jump table, and chaos would ensue. */
18130 if (label_is_thumb_function_name
18131 && (S_GET_NAME (sym)[0] != '.' || S_GET_NAME (sym)[1] != 'L')
18132 && (bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE) != 0)
18133 {
18134 /* When the address of a Thumb function is taken the bottom
18135 bit of that address should be set. This will allow
18136 interworking between Arm and Thumb functions to work
18137 correctly. */
18138
18139 THUMB_SET_FUNC (sym, 1);
18140
18141 label_is_thumb_function_name = FALSE;
18142 }
18143
18144 dwarf2_emit_label (sym);
18145 }
18146
18147 bfd_boolean
18148 arm_data_in_code (void)
18149 {
18150 if (thumb_mode && ! strncmp (input_line_pointer + 1, "data:", 5))
18151 {
18152 *input_line_pointer = '/';
18153 input_line_pointer += 5;
18154 *input_line_pointer = 0;
18155 return TRUE;
18156 }
18157
18158 return FALSE;
18159 }
18160
18161 char *
18162 arm_canonicalize_symbol_name (char * name)
18163 {
18164 int len;
18165
18166 if (thumb_mode && (len = strlen (name)) > 5
18167 && streq (name + len - 5, "/data"))
18168 *(name + len - 5) = 0;
18169
18170 return name;
18171 }
18172 \f
18173 /* Table of all register names defined by default. The user can
18174 define additional names with .req. Note that all register names
18175 should appear in both upper and lowercase variants. Some registers
18176 also have mixed-case names. */
18177
18178 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
18179 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
18180 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
18181 #define REGSET(p,t) \
18182 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
18183 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
18184 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
18185 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
18186 #define REGSETH(p,t) \
18187 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
18188 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
18189 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
18190 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
18191 #define REGSET2(p,t) \
18192 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
18193 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
18194 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
18195 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
18196 #define SPLRBANK(base,bank,t) \
18197 REGDEF(lr_##bank, 768|((base+0)<<16), t), \
18198 REGDEF(sp_##bank, 768|((base+1)<<16), t), \
18199 REGDEF(spsr_##bank, 768|(base<<16)|SPSR_BIT, t), \
18200 REGDEF(LR_##bank, 768|((base+0)<<16), t), \
18201 REGDEF(SP_##bank, 768|((base+1)<<16), t), \
18202 REGDEF(SPSR_##bank, 768|(base<<16)|SPSR_BIT, t)
18203
18204 static const struct reg_entry reg_names[] =
18205 {
18206 /* ARM integer registers. */
18207 REGSET(r, RN), REGSET(R, RN),
18208
18209 /* ATPCS synonyms. */
18210 REGDEF(a1,0,RN), REGDEF(a2,1,RN), REGDEF(a3, 2,RN), REGDEF(a4, 3,RN),
18211 REGDEF(v1,4,RN), REGDEF(v2,5,RN), REGDEF(v3, 6,RN), REGDEF(v4, 7,RN),
18212 REGDEF(v5,8,RN), REGDEF(v6,9,RN), REGDEF(v7,10,RN), REGDEF(v8,11,RN),
18213
18214 REGDEF(A1,0,RN), REGDEF(A2,1,RN), REGDEF(A3, 2,RN), REGDEF(A4, 3,RN),
18215 REGDEF(V1,4,RN), REGDEF(V2,5,RN), REGDEF(V3, 6,RN), REGDEF(V4, 7,RN),
18216 REGDEF(V5,8,RN), REGDEF(V6,9,RN), REGDEF(V7,10,RN), REGDEF(V8,11,RN),
18217
18218 /* Well-known aliases. */
18219 REGDEF(wr, 7,RN), REGDEF(sb, 9,RN), REGDEF(sl,10,RN), REGDEF(fp,11,RN),
18220 REGDEF(ip,12,RN), REGDEF(sp,13,RN), REGDEF(lr,14,RN), REGDEF(pc,15,RN),
18221
18222 REGDEF(WR, 7,RN), REGDEF(SB, 9,RN), REGDEF(SL,10,RN), REGDEF(FP,11,RN),
18223 REGDEF(IP,12,RN), REGDEF(SP,13,RN), REGDEF(LR,14,RN), REGDEF(PC,15,RN),
18224
18225 /* Coprocessor numbers. */
18226 REGSET(p, CP), REGSET(P, CP),
18227
18228 /* Coprocessor register numbers. The "cr" variants are for backward
18229 compatibility. */
18230 REGSET(c, CN), REGSET(C, CN),
18231 REGSET(cr, CN), REGSET(CR, CN),
18232
18233 /* ARM banked registers. */
18234 REGDEF(R8_usr,512|(0<<16),RNB), REGDEF(r8_usr,512|(0<<16),RNB),
18235 REGDEF(R9_usr,512|(1<<16),RNB), REGDEF(r9_usr,512|(1<<16),RNB),
18236 REGDEF(R10_usr,512|(2<<16),RNB), REGDEF(r10_usr,512|(2<<16),RNB),
18237 REGDEF(R11_usr,512|(3<<16),RNB), REGDEF(r11_usr,512|(3<<16),RNB),
18238 REGDEF(R12_usr,512|(4<<16),RNB), REGDEF(r12_usr,512|(4<<16),RNB),
18239 REGDEF(SP_usr,512|(5<<16),RNB), REGDEF(sp_usr,512|(5<<16),RNB),
18240 REGDEF(LR_usr,512|(6<<16),RNB), REGDEF(lr_usr,512|(6<<16),RNB),
18241
18242 REGDEF(R8_fiq,512|(8<<16),RNB), REGDEF(r8_fiq,512|(8<<16),RNB),
18243 REGDEF(R9_fiq,512|(9<<16),RNB), REGDEF(r9_fiq,512|(9<<16),RNB),
18244 REGDEF(R10_fiq,512|(10<<16),RNB), REGDEF(r10_fiq,512|(10<<16),RNB),
18245 REGDEF(R11_fiq,512|(11<<16),RNB), REGDEF(r11_fiq,512|(11<<16),RNB),
18246 REGDEF(R12_fiq,512|(12<<16),RNB), REGDEF(r12_fiq,512|(12<<16),RNB),
18247 REGDEF(SP_fiq,512|(13<<16),RNB), REGDEF(sp_fiq,512|(13<<16),RNB),
18248 REGDEF(LR_fiq,512|(14<<16),RNB), REGDEF(lr_fiq,512|(14<<16),RNB),
18249 REGDEF(SPSR_fiq,512|(14<<16)|SPSR_BIT,RNB), REGDEF(spsr_fiq,512|(14<<16)|SPSR_BIT,RNB),
18250
18251 SPLRBANK(0,IRQ,RNB), SPLRBANK(0,irq,RNB),
18252 SPLRBANK(2,SVC,RNB), SPLRBANK(2,svc,RNB),
18253 SPLRBANK(4,ABT,RNB), SPLRBANK(4,abt,RNB),
18254 SPLRBANK(6,UND,RNB), SPLRBANK(6,und,RNB),
18255 SPLRBANK(12,MON,RNB), SPLRBANK(12,mon,RNB),
18256 REGDEF(elr_hyp,768|(14<<16),RNB), REGDEF(ELR_hyp,768|(14<<16),RNB),
18257 REGDEF(sp_hyp,768|(15<<16),RNB), REGDEF(SP_hyp,768|(15<<16),RNB),
18258 REGDEF(spsr_hyp,768|(14<<16)|SPSR_BIT,RNB),
18259 REGDEF(SPSR_hyp,768|(14<<16)|SPSR_BIT,RNB),
18260
18261 /* FPA registers. */
18262 REGNUM(f,0,FN), REGNUM(f,1,FN), REGNUM(f,2,FN), REGNUM(f,3,FN),
18263 REGNUM(f,4,FN), REGNUM(f,5,FN), REGNUM(f,6,FN), REGNUM(f,7, FN),
18264
18265 REGNUM(F,0,FN), REGNUM(F,1,FN), REGNUM(F,2,FN), REGNUM(F,3,FN),
18266 REGNUM(F,4,FN), REGNUM(F,5,FN), REGNUM(F,6,FN), REGNUM(F,7, FN),
18267
18268 /* VFP SP registers. */
18269 REGSET(s,VFS), REGSET(S,VFS),
18270 REGSETH(s,VFS), REGSETH(S,VFS),
18271
18272 /* VFP DP Registers. */
18273 REGSET(d,VFD), REGSET(D,VFD),
18274 /* Extra Neon DP registers. */
18275 REGSETH(d,VFD), REGSETH(D,VFD),
18276
18277 /* Neon QP registers. */
18278 REGSET2(q,NQ), REGSET2(Q,NQ),
18279
18280 /* VFP control registers. */
18281 REGDEF(fpsid,0,VFC), REGDEF(fpscr,1,VFC), REGDEF(fpexc,8,VFC),
18282 REGDEF(FPSID,0,VFC), REGDEF(FPSCR,1,VFC), REGDEF(FPEXC,8,VFC),
18283 REGDEF(fpinst,9,VFC), REGDEF(fpinst2,10,VFC),
18284 REGDEF(FPINST,9,VFC), REGDEF(FPINST2,10,VFC),
18285 REGDEF(mvfr0,7,VFC), REGDEF(mvfr1,6,VFC),
18286 REGDEF(MVFR0,7,VFC), REGDEF(MVFR1,6,VFC),
18287
18288 /* Maverick DSP coprocessor registers. */
18289 REGSET(mvf,MVF), REGSET(mvd,MVD), REGSET(mvfx,MVFX), REGSET(mvdx,MVDX),
18290 REGSET(MVF,MVF), REGSET(MVD,MVD), REGSET(MVFX,MVFX), REGSET(MVDX,MVDX),
18291
18292 REGNUM(mvax,0,MVAX), REGNUM(mvax,1,MVAX),
18293 REGNUM(mvax,2,MVAX), REGNUM(mvax,3,MVAX),
18294 REGDEF(dspsc,0,DSPSC),
18295
18296 REGNUM(MVAX,0,MVAX), REGNUM(MVAX,1,MVAX),
18297 REGNUM(MVAX,2,MVAX), REGNUM(MVAX,3,MVAX),
18298 REGDEF(DSPSC,0,DSPSC),
18299
18300 /* iWMMXt data registers - p0, c0-15. */
18301 REGSET(wr,MMXWR), REGSET(wR,MMXWR), REGSET(WR, MMXWR),
18302
18303 /* iWMMXt control registers - p1, c0-3. */
18304 REGDEF(wcid, 0,MMXWC), REGDEF(wCID, 0,MMXWC), REGDEF(WCID, 0,MMXWC),
18305 REGDEF(wcon, 1,MMXWC), REGDEF(wCon, 1,MMXWC), REGDEF(WCON, 1,MMXWC),
18306 REGDEF(wcssf, 2,MMXWC), REGDEF(wCSSF, 2,MMXWC), REGDEF(WCSSF, 2,MMXWC),
18307 REGDEF(wcasf, 3,MMXWC), REGDEF(wCASF, 3,MMXWC), REGDEF(WCASF, 3,MMXWC),
18308
18309 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
18310 REGDEF(wcgr0, 8,MMXWCG), REGDEF(wCGR0, 8,MMXWCG), REGDEF(WCGR0, 8,MMXWCG),
18311 REGDEF(wcgr1, 9,MMXWCG), REGDEF(wCGR1, 9,MMXWCG), REGDEF(WCGR1, 9,MMXWCG),
18312 REGDEF(wcgr2,10,MMXWCG), REGDEF(wCGR2,10,MMXWCG), REGDEF(WCGR2,10,MMXWCG),
18313 REGDEF(wcgr3,11,MMXWCG), REGDEF(wCGR3,11,MMXWCG), REGDEF(WCGR3,11,MMXWCG),
18314
18315 /* XScale accumulator registers. */
18316 REGNUM(acc,0,XSCALE), REGNUM(ACC,0,XSCALE),
18317 };
18318 #undef REGDEF
18319 #undef REGNUM
18320 #undef REGSET
18321
18322 /* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
18323 within psr_required_here. */
18324 static const struct asm_psr psrs[] =
18325 {
18326 /* Backward compatibility notation. Note that "all" is no longer
18327 truly all possible PSR bits. */
18328 {"all", PSR_c | PSR_f},
18329 {"flg", PSR_f},
18330 {"ctl", PSR_c},
18331
18332 /* Individual flags. */
18333 {"f", PSR_f},
18334 {"c", PSR_c},
18335 {"x", PSR_x},
18336 {"s", PSR_s},
18337
18338 /* Combinations of flags. */
18339 {"fs", PSR_f | PSR_s},
18340 {"fx", PSR_f | PSR_x},
18341 {"fc", PSR_f | PSR_c},
18342 {"sf", PSR_s | PSR_f},
18343 {"sx", PSR_s | PSR_x},
18344 {"sc", PSR_s | PSR_c},
18345 {"xf", PSR_x | PSR_f},
18346 {"xs", PSR_x | PSR_s},
18347 {"xc", PSR_x | PSR_c},
18348 {"cf", PSR_c | PSR_f},
18349 {"cs", PSR_c | PSR_s},
18350 {"cx", PSR_c | PSR_x},
18351 {"fsx", PSR_f | PSR_s | PSR_x},
18352 {"fsc", PSR_f | PSR_s | PSR_c},
18353 {"fxs", PSR_f | PSR_x | PSR_s},
18354 {"fxc", PSR_f | PSR_x | PSR_c},
18355 {"fcs", PSR_f | PSR_c | PSR_s},
18356 {"fcx", PSR_f | PSR_c | PSR_x},
18357 {"sfx", PSR_s | PSR_f | PSR_x},
18358 {"sfc", PSR_s | PSR_f | PSR_c},
18359 {"sxf", PSR_s | PSR_x | PSR_f},
18360 {"sxc", PSR_s | PSR_x | PSR_c},
18361 {"scf", PSR_s | PSR_c | PSR_f},
18362 {"scx", PSR_s | PSR_c | PSR_x},
18363 {"xfs", PSR_x | PSR_f | PSR_s},
18364 {"xfc", PSR_x | PSR_f | PSR_c},
18365 {"xsf", PSR_x | PSR_s | PSR_f},
18366 {"xsc", PSR_x | PSR_s | PSR_c},
18367 {"xcf", PSR_x | PSR_c | PSR_f},
18368 {"xcs", PSR_x | PSR_c | PSR_s},
18369 {"cfs", PSR_c | PSR_f | PSR_s},
18370 {"cfx", PSR_c | PSR_f | PSR_x},
18371 {"csf", PSR_c | PSR_s | PSR_f},
18372 {"csx", PSR_c | PSR_s | PSR_x},
18373 {"cxf", PSR_c | PSR_x | PSR_f},
18374 {"cxs", PSR_c | PSR_x | PSR_s},
18375 {"fsxc", PSR_f | PSR_s | PSR_x | PSR_c},
18376 {"fscx", PSR_f | PSR_s | PSR_c | PSR_x},
18377 {"fxsc", PSR_f | PSR_x | PSR_s | PSR_c},
18378 {"fxcs", PSR_f | PSR_x | PSR_c | PSR_s},
18379 {"fcsx", PSR_f | PSR_c | PSR_s | PSR_x},
18380 {"fcxs", PSR_f | PSR_c | PSR_x | PSR_s},
18381 {"sfxc", PSR_s | PSR_f | PSR_x | PSR_c},
18382 {"sfcx", PSR_s | PSR_f | PSR_c | PSR_x},
18383 {"sxfc", PSR_s | PSR_x | PSR_f | PSR_c},
18384 {"sxcf", PSR_s | PSR_x | PSR_c | PSR_f},
18385 {"scfx", PSR_s | PSR_c | PSR_f | PSR_x},
18386 {"scxf", PSR_s | PSR_c | PSR_x | PSR_f},
18387 {"xfsc", PSR_x | PSR_f | PSR_s | PSR_c},
18388 {"xfcs", PSR_x | PSR_f | PSR_c | PSR_s},
18389 {"xsfc", PSR_x | PSR_s | PSR_f | PSR_c},
18390 {"xscf", PSR_x | PSR_s | PSR_c | PSR_f},
18391 {"xcfs", PSR_x | PSR_c | PSR_f | PSR_s},
18392 {"xcsf", PSR_x | PSR_c | PSR_s | PSR_f},
18393 {"cfsx", PSR_c | PSR_f | PSR_s | PSR_x},
18394 {"cfxs", PSR_c | PSR_f | PSR_x | PSR_s},
18395 {"csfx", PSR_c | PSR_s | PSR_f | PSR_x},
18396 {"csxf", PSR_c | PSR_s | PSR_x | PSR_f},
18397 {"cxfs", PSR_c | PSR_x | PSR_f | PSR_s},
18398 {"cxsf", PSR_c | PSR_x | PSR_s | PSR_f},
18399 };
18400
18401 /* Table of V7M psr names. */
18402 static const struct asm_psr v7m_psrs[] =
18403 {
18404 {"apsr", 0 }, {"APSR", 0 },
18405 {"iapsr", 1 }, {"IAPSR", 1 },
18406 {"eapsr", 2 }, {"EAPSR", 2 },
18407 {"psr", 3 }, {"PSR", 3 },
18408 {"xpsr", 3 }, {"XPSR", 3 }, {"xPSR", 3 },
18409 {"ipsr", 5 }, {"IPSR", 5 },
18410 {"epsr", 6 }, {"EPSR", 6 },
18411 {"iepsr", 7 }, {"IEPSR", 7 },
18412 {"msp", 8 }, {"MSP", 8 },
18413 {"psp", 9 }, {"PSP", 9 },
18414 {"primask", 16}, {"PRIMASK", 16},
18415 {"basepri", 17}, {"BASEPRI", 17},
18416 {"basepri_max", 18}, {"BASEPRI_MAX", 18},
18417 {"basepri_max", 18}, {"BASEPRI_MASK", 18}, /* Typo, preserved for backwards compatibility. */
18418 {"faultmask", 19}, {"FAULTMASK", 19},
18419 {"control", 20}, {"CONTROL", 20}
18420 };
18421
18422 /* Table of all shift-in-operand names. */
18423 static const struct asm_shift_name shift_names [] =
18424 {
18425 { "asl", SHIFT_LSL }, { "ASL", SHIFT_LSL },
18426 { "lsl", SHIFT_LSL }, { "LSL", SHIFT_LSL },
18427 { "lsr", SHIFT_LSR }, { "LSR", SHIFT_LSR },
18428 { "asr", SHIFT_ASR }, { "ASR", SHIFT_ASR },
18429 { "ror", SHIFT_ROR }, { "ROR", SHIFT_ROR },
18430 { "rrx", SHIFT_RRX }, { "RRX", SHIFT_RRX }
18431 };
18432
18433 /* Table of all explicit relocation names. */
18434 #ifdef OBJ_ELF
18435 static struct reloc_entry reloc_names[] =
18436 {
18437 { "got", BFD_RELOC_ARM_GOT32 }, { "GOT", BFD_RELOC_ARM_GOT32 },
18438 { "gotoff", BFD_RELOC_ARM_GOTOFF }, { "GOTOFF", BFD_RELOC_ARM_GOTOFF },
18439 { "plt", BFD_RELOC_ARM_PLT32 }, { "PLT", BFD_RELOC_ARM_PLT32 },
18440 { "target1", BFD_RELOC_ARM_TARGET1 }, { "TARGET1", BFD_RELOC_ARM_TARGET1 },
18441 { "target2", BFD_RELOC_ARM_TARGET2 }, { "TARGET2", BFD_RELOC_ARM_TARGET2 },
18442 { "sbrel", BFD_RELOC_ARM_SBREL32 }, { "SBREL", BFD_RELOC_ARM_SBREL32 },
18443 { "tlsgd", BFD_RELOC_ARM_TLS_GD32}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32},
18444 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32},
18445 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32},
18446 { "gottpoff",BFD_RELOC_ARM_TLS_IE32}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32},
18447 { "tpoff", BFD_RELOC_ARM_TLS_LE32}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32},
18448 { "got_prel", BFD_RELOC_ARM_GOT_PREL}, { "GOT_PREL", BFD_RELOC_ARM_GOT_PREL},
18449 { "tlsdesc", BFD_RELOC_ARM_TLS_GOTDESC},
18450 { "TLSDESC", BFD_RELOC_ARM_TLS_GOTDESC},
18451 { "tlscall", BFD_RELOC_ARM_TLS_CALL},
18452 { "TLSCALL", BFD_RELOC_ARM_TLS_CALL},
18453 { "tlsdescseq", BFD_RELOC_ARM_TLS_DESCSEQ},
18454 { "TLSDESCSEQ", BFD_RELOC_ARM_TLS_DESCSEQ}
18455 };
18456 #endif
18457
18458 /* Table of all conditional affixes. 0xF is not defined as a condition code. */
18459 static const struct asm_cond conds[] =
18460 {
18461 {"eq", 0x0},
18462 {"ne", 0x1},
18463 {"cs", 0x2}, {"hs", 0x2},
18464 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
18465 {"mi", 0x4},
18466 {"pl", 0x5},
18467 {"vs", 0x6},
18468 {"vc", 0x7},
18469 {"hi", 0x8},
18470 {"ls", 0x9},
18471 {"ge", 0xa},
18472 {"lt", 0xb},
18473 {"gt", 0xc},
18474 {"le", 0xd},
18475 {"al", 0xe}
18476 };
18477
18478 #define UL_BARRIER(L,U,CODE,FEAT) \
18479 { L, CODE, ARM_FEATURE_CORE_LOW (FEAT) }, \
18480 { U, CODE, ARM_FEATURE_CORE_LOW (FEAT) }
18481
18482 static struct asm_barrier_opt barrier_opt_names[] =
18483 {
18484 UL_BARRIER ("sy", "SY", 0xf, ARM_EXT_BARRIER),
18485 UL_BARRIER ("st", "ST", 0xe, ARM_EXT_BARRIER),
18486 UL_BARRIER ("ld", "LD", 0xd, ARM_EXT_V8),
18487 UL_BARRIER ("ish", "ISH", 0xb, ARM_EXT_BARRIER),
18488 UL_BARRIER ("sh", "SH", 0xb, ARM_EXT_BARRIER),
18489 UL_BARRIER ("ishst", "ISHST", 0xa, ARM_EXT_BARRIER),
18490 UL_BARRIER ("shst", "SHST", 0xa, ARM_EXT_BARRIER),
18491 UL_BARRIER ("ishld", "ISHLD", 0x9, ARM_EXT_V8),
18492 UL_BARRIER ("un", "UN", 0x7, ARM_EXT_BARRIER),
18493 UL_BARRIER ("nsh", "NSH", 0x7, ARM_EXT_BARRIER),
18494 UL_BARRIER ("unst", "UNST", 0x6, ARM_EXT_BARRIER),
18495 UL_BARRIER ("nshst", "NSHST", 0x6, ARM_EXT_BARRIER),
18496 UL_BARRIER ("nshld", "NSHLD", 0x5, ARM_EXT_V8),
18497 UL_BARRIER ("osh", "OSH", 0x3, ARM_EXT_BARRIER),
18498 UL_BARRIER ("oshst", "OSHST", 0x2, ARM_EXT_BARRIER),
18499 UL_BARRIER ("oshld", "OSHLD", 0x1, ARM_EXT_V8)
18500 };
18501
18502 #undef UL_BARRIER
18503
18504 /* Table of ARM-format instructions. */
18505
18506 /* Macros for gluing together operand strings. N.B. In all cases
18507 other than OPS0, the trailing OP_stop comes from default
18508 zero-initialization of the unspecified elements of the array. */
18509 #define OPS0() { OP_stop, }
18510 #define OPS1(a) { OP_##a, }
18511 #define OPS2(a,b) { OP_##a,OP_##b, }
18512 #define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
18513 #define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
18514 #define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
18515 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
18516
18517 /* These macros are similar to the OPSn, but do not prepend the OP_ prefix.
18518 This is useful when mixing operands for ARM and THUMB, i.e. using the
18519 MIX_ARM_THUMB_OPERANDS macro.
18520 In order to use these macros, prefix the number of operands with _
18521 e.g. _3. */
18522 #define OPS_1(a) { a, }
18523 #define OPS_2(a,b) { a,b, }
18524 #define OPS_3(a,b,c) { a,b,c, }
18525 #define OPS_4(a,b,c,d) { a,b,c,d, }
18526 #define OPS_5(a,b,c,d,e) { a,b,c,d,e, }
18527 #define OPS_6(a,b,c,d,e,f) { a,b,c,d,e,f, }
18528
18529 /* These macros abstract out the exact format of the mnemonic table and
18530 save some repeated characters. */
18531
18532 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
18533 #define TxCE(mnem, op, top, nops, ops, ae, te) \
18534 { mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
18535 THUMB_VARIANT, do_##ae, do_##te }
18536
18537 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
18538 a T_MNEM_xyz enumerator. */
18539 #define TCE(mnem, aop, top, nops, ops, ae, te) \
18540 TxCE (mnem, aop, 0x##top, nops, ops, ae, te)
18541 #define tCE(mnem, aop, top, nops, ops, ae, te) \
18542 TxCE (mnem, aop, T_MNEM##top, nops, ops, ae, te)
18543
18544 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
18545 infix after the third character. */
18546 #define TxC3(mnem, op, top, nops, ops, ae, te) \
18547 { mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
18548 THUMB_VARIANT, do_##ae, do_##te }
18549 #define TxC3w(mnem, op, top, nops, ops, ae, te) \
18550 { mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
18551 THUMB_VARIANT, do_##ae, do_##te }
18552 #define TC3(mnem, aop, top, nops, ops, ae, te) \
18553 TxC3 (mnem, aop, 0x##top, nops, ops, ae, te)
18554 #define TC3w(mnem, aop, top, nops, ops, ae, te) \
18555 TxC3w (mnem, aop, 0x##top, nops, ops, ae, te)
18556 #define tC3(mnem, aop, top, nops, ops, ae, te) \
18557 TxC3 (mnem, aop, T_MNEM##top, nops, ops, ae, te)
18558 #define tC3w(mnem, aop, top, nops, ops, ae, te) \
18559 TxC3w (mnem, aop, T_MNEM##top, nops, ops, ae, te)
18560
18561 /* Mnemonic that cannot be conditionalized. The ARM condition-code
18562 field is still 0xE. Many of the Thumb variants can be executed
18563 conditionally, so this is checked separately. */
18564 #define TUE(mnem, op, top, nops, ops, ae, te) \
18565 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
18566 THUMB_VARIANT, do_##ae, do_##te }
18567
18568 /* Same as TUE but the encoding function for ARM and Thumb modes is the same.
18569 Used by mnemonics that have very minimal differences in the encoding for
18570 ARM and Thumb variants and can be handled in a common function. */
18571 #define TUEc(mnem, op, top, nops, ops, en) \
18572 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
18573 THUMB_VARIANT, do_##en, do_##en }
18574
18575 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
18576 condition code field. */
18577 #define TUF(mnem, op, top, nops, ops, ae, te) \
18578 { mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
18579 THUMB_VARIANT, do_##ae, do_##te }
18580
18581 /* ARM-only variants of all the above. */
18582 #define CE(mnem, op, nops, ops, ae) \
18583 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
18584
18585 #define C3(mnem, op, nops, ops, ae) \
18586 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
18587
18588 /* Legacy mnemonics that always have conditional infix after the third
18589 character. */
18590 #define CL(mnem, op, nops, ops, ae) \
18591 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
18592 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
18593
18594 /* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
18595 #define cCE(mnem, op, nops, ops, ae) \
18596 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
18597
18598 /* Legacy coprocessor instructions where conditional infix and conditional
18599 suffix are ambiguous. For consistency this includes all FPA instructions,
18600 not just the potentially ambiguous ones. */
18601 #define cCL(mnem, op, nops, ops, ae) \
18602 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
18603 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
18604
18605 /* Coprocessor, takes either a suffix or a position-3 infix
18606 (for an FPA corner case). */
18607 #define C3E(mnem, op, nops, ops, ae) \
18608 { mnem, OPS##nops ops, OT_csuf_or_in3, \
18609 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
18610
18611 #define xCM_(m1, m2, m3, op, nops, ops, ae) \
18612 { m1 #m2 m3, OPS##nops ops, \
18613 sizeof (#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof (m1) - 1, \
18614 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
18615
18616 #define CM(m1, m2, op, nops, ops, ae) \
18617 xCM_ (m1, , m2, op, nops, ops, ae), \
18618 xCM_ (m1, eq, m2, op, nops, ops, ae), \
18619 xCM_ (m1, ne, m2, op, nops, ops, ae), \
18620 xCM_ (m1, cs, m2, op, nops, ops, ae), \
18621 xCM_ (m1, hs, m2, op, nops, ops, ae), \
18622 xCM_ (m1, cc, m2, op, nops, ops, ae), \
18623 xCM_ (m1, ul, m2, op, nops, ops, ae), \
18624 xCM_ (m1, lo, m2, op, nops, ops, ae), \
18625 xCM_ (m1, mi, m2, op, nops, ops, ae), \
18626 xCM_ (m1, pl, m2, op, nops, ops, ae), \
18627 xCM_ (m1, vs, m2, op, nops, ops, ae), \
18628 xCM_ (m1, vc, m2, op, nops, ops, ae), \
18629 xCM_ (m1, hi, m2, op, nops, ops, ae), \
18630 xCM_ (m1, ls, m2, op, nops, ops, ae), \
18631 xCM_ (m1, ge, m2, op, nops, ops, ae), \
18632 xCM_ (m1, lt, m2, op, nops, ops, ae), \
18633 xCM_ (m1, gt, m2, op, nops, ops, ae), \
18634 xCM_ (m1, le, m2, op, nops, ops, ae), \
18635 xCM_ (m1, al, m2, op, nops, ops, ae)
18636
18637 #define UE(mnem, op, nops, ops, ae) \
18638 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
18639
18640 #define UF(mnem, op, nops, ops, ae) \
18641 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
18642
18643 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
18644 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
18645 use the same encoding function for each. */
18646 #define NUF(mnem, op, nops, ops, enc) \
18647 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
18648 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
18649
18650 /* Neon data processing, version which indirects through neon_enc_tab for
18651 the various overloaded versions of opcodes. */
18652 #define nUF(mnem, op, nops, ops, enc) \
18653 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM##op, N_MNEM##op, \
18654 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
18655
18656 /* Neon insn with conditional suffix for the ARM version, non-overloaded
18657 version. */
18658 #define NCE_tag(mnem, op, nops, ops, enc, tag) \
18659 { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT, \
18660 THUMB_VARIANT, do_##enc, do_##enc }
18661
18662 #define NCE(mnem, op, nops, ops, enc) \
18663 NCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
18664
18665 #define NCEF(mnem, op, nops, ops, enc) \
18666 NCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
18667
18668 /* Neon insn with conditional suffix for the ARM version, overloaded types. */
18669 #define nCE_tag(mnem, op, nops, ops, enc, tag) \
18670 { #mnem, OPS##nops ops, tag, N_MNEM##op, N_MNEM##op, \
18671 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
18672
18673 #define nCE(mnem, op, nops, ops, enc) \
18674 nCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
18675
18676 #define nCEF(mnem, op, nops, ops, enc) \
18677 nCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
18678
18679 #define do_0 0
18680
18681 static const struct asm_opcode insns[] =
18682 {
18683 #define ARM_VARIANT & arm_ext_v1 /* Core ARM Instructions. */
18684 #define THUMB_VARIANT & arm_ext_v4t
18685 tCE("and", 0000000, _and, 3, (RR, oRR, SH), arit, t_arit3c),
18686 tC3("ands", 0100000, _ands, 3, (RR, oRR, SH), arit, t_arit3c),
18687 tCE("eor", 0200000, _eor, 3, (RR, oRR, SH), arit, t_arit3c),
18688 tC3("eors", 0300000, _eors, 3, (RR, oRR, SH), arit, t_arit3c),
18689 tCE("sub", 0400000, _sub, 3, (RR, oRR, SH), arit, t_add_sub),
18690 tC3("subs", 0500000, _subs, 3, (RR, oRR, SH), arit, t_add_sub),
18691 tCE("add", 0800000, _add, 3, (RR, oRR, SHG), arit, t_add_sub),
18692 tC3("adds", 0900000, _adds, 3, (RR, oRR, SHG), arit, t_add_sub),
18693 tCE("adc", 0a00000, _adc, 3, (RR, oRR, SH), arit, t_arit3c),
18694 tC3("adcs", 0b00000, _adcs, 3, (RR, oRR, SH), arit, t_arit3c),
18695 tCE("sbc", 0c00000, _sbc, 3, (RR, oRR, SH), arit, t_arit3),
18696 tC3("sbcs", 0d00000, _sbcs, 3, (RR, oRR, SH), arit, t_arit3),
18697 tCE("orr", 1800000, _orr, 3, (RR, oRR, SH), arit, t_arit3c),
18698 tC3("orrs", 1900000, _orrs, 3, (RR, oRR, SH), arit, t_arit3c),
18699 tCE("bic", 1c00000, _bic, 3, (RR, oRR, SH), arit, t_arit3),
18700 tC3("bics", 1d00000, _bics, 3, (RR, oRR, SH), arit, t_arit3),
18701
18702 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
18703 for setting PSR flag bits. They are obsolete in V6 and do not
18704 have Thumb equivalents. */
18705 tCE("tst", 1100000, _tst, 2, (RR, SH), cmp, t_mvn_tst),
18706 tC3w("tsts", 1100000, _tst, 2, (RR, SH), cmp, t_mvn_tst),
18707 CL("tstp", 110f000, 2, (RR, SH), cmp),
18708 tCE("cmp", 1500000, _cmp, 2, (RR, SH), cmp, t_mov_cmp),
18709 tC3w("cmps", 1500000, _cmp, 2, (RR, SH), cmp, t_mov_cmp),
18710 CL("cmpp", 150f000, 2, (RR, SH), cmp),
18711 tCE("cmn", 1700000, _cmn, 2, (RR, SH), cmp, t_mvn_tst),
18712 tC3w("cmns", 1700000, _cmn, 2, (RR, SH), cmp, t_mvn_tst),
18713 CL("cmnp", 170f000, 2, (RR, SH), cmp),
18714
18715 tCE("mov", 1a00000, _mov, 2, (RR, SH), mov, t_mov_cmp),
18716 tC3("movs", 1b00000, _movs, 2, (RR, SHG), mov, t_mov_cmp),
18717 tCE("mvn", 1e00000, _mvn, 2, (RR, SH), mov, t_mvn_tst),
18718 tC3("mvns", 1f00000, _mvns, 2, (RR, SH), mov, t_mvn_tst),
18719
18720 tCE("ldr", 4100000, _ldr, 2, (RR, ADDRGLDR),ldst, t_ldst),
18721 tC3("ldrb", 4500000, _ldrb, 2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
18722 tCE("str", 4000000, _str, _2, (MIX_ARM_THUMB_OPERANDS (OP_RR,
18723 OP_RRnpc),
18724 OP_ADDRGLDR),ldst, t_ldst),
18725 tC3("strb", 4400000, _strb, 2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
18726
18727 tCE("stm", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18728 tC3("stmia", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18729 tC3("stmea", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18730 tCE("ldm", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18731 tC3("ldmia", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18732 tC3("ldmfd", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18733
18734 TCE("swi", f000000, df00, 1, (EXPi), swi, t_swi),
18735 TCE("svc", f000000, df00, 1, (EXPi), swi, t_swi),
18736 tCE("b", a000000, _b, 1, (EXPr), branch, t_branch),
18737 TCE("bl", b000000, f000f800, 1, (EXPr), bl, t_branch23),
18738
18739 /* Pseudo ops. */
18740 tCE("adr", 28f0000, _adr, 2, (RR, EXP), adr, t_adr),
18741 C3(adrl, 28f0000, 2, (RR, EXP), adrl),
18742 tCE("nop", 1a00000, _nop, 1, (oI255c), nop, t_nop),
18743 tCE("udf", 7f000f0, _udf, 1, (oIffffb), bkpt, t_udf),
18744
18745 /* Thumb-compatibility pseudo ops. */
18746 tCE("lsl", 1a00000, _lsl, 3, (RR, oRR, SH), shift, t_shift),
18747 tC3("lsls", 1b00000, _lsls, 3, (RR, oRR, SH), shift, t_shift),
18748 tCE("lsr", 1a00020, _lsr, 3, (RR, oRR, SH), shift, t_shift),
18749 tC3("lsrs", 1b00020, _lsrs, 3, (RR, oRR, SH), shift, t_shift),
18750 tCE("asr", 1a00040, _asr, 3, (RR, oRR, SH), shift, t_shift),
18751 tC3("asrs", 1b00040, _asrs, 3, (RR, oRR, SH), shift, t_shift),
18752 tCE("ror", 1a00060, _ror, 3, (RR, oRR, SH), shift, t_shift),
18753 tC3("rors", 1b00060, _rors, 3, (RR, oRR, SH), shift, t_shift),
18754 tCE("neg", 2600000, _neg, 2, (RR, RR), rd_rn, t_neg),
18755 tC3("negs", 2700000, _negs, 2, (RR, RR), rd_rn, t_neg),
18756 tCE("push", 92d0000, _push, 1, (REGLST), push_pop, t_push_pop),
18757 tCE("pop", 8bd0000, _pop, 1, (REGLST), push_pop, t_push_pop),
18758
18759 /* These may simplify to neg. */
18760 TCE("rsb", 0600000, ebc00000, 3, (RR, oRR, SH), arit, t_rsb),
18761 TC3("rsbs", 0700000, ebd00000, 3, (RR, oRR, SH), arit, t_rsb),
18762
18763 #undef THUMB_VARIANT
18764 #define THUMB_VARIANT & arm_ext_v6
18765
18766 TCE("cpy", 1a00000, 4600, 2, (RR, RR), rd_rm, t_cpy),
18767
18768 /* V1 instructions with no Thumb analogue prior to V6T2. */
18769 #undef THUMB_VARIANT
18770 #define THUMB_VARIANT & arm_ext_v6t2
18771
18772 TCE("teq", 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
18773 TC3w("teqs", 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
18774 CL("teqp", 130f000, 2, (RR, SH), cmp),
18775
18776 TC3("ldrt", 4300000, f8500e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
18777 TC3("ldrbt", 4700000, f8100e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
18778 TC3("strt", 4200000, f8400e00, 2, (RR_npcsp, ADDR), ldstt, t_ldstt),
18779 TC3("strbt", 4600000, f8000e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
18780
18781 TC3("stmdb", 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18782 TC3("stmfd", 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18783
18784 TC3("ldmdb", 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18785 TC3("ldmea", 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18786
18787 /* V1 instructions with no Thumb analogue at all. */
18788 CE("rsc", 0e00000, 3, (RR, oRR, SH), arit),
18789 C3(rscs, 0f00000, 3, (RR, oRR, SH), arit),
18790
18791 C3(stmib, 9800000, 2, (RRw, REGLST), ldmstm),
18792 C3(stmfa, 9800000, 2, (RRw, REGLST), ldmstm),
18793 C3(stmda, 8000000, 2, (RRw, REGLST), ldmstm),
18794 C3(stmed, 8000000, 2, (RRw, REGLST), ldmstm),
18795 C3(ldmib, 9900000, 2, (RRw, REGLST), ldmstm),
18796 C3(ldmed, 9900000, 2, (RRw, REGLST), ldmstm),
18797 C3(ldmda, 8100000, 2, (RRw, REGLST), ldmstm),
18798 C3(ldmfa, 8100000, 2, (RRw, REGLST), ldmstm),
18799
18800 #undef ARM_VARIANT
18801 #define ARM_VARIANT & arm_ext_v2 /* ARM 2 - multiplies. */
18802 #undef THUMB_VARIANT
18803 #define THUMB_VARIANT & arm_ext_v4t
18804
18805 tCE("mul", 0000090, _mul, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
18806 tC3("muls", 0100090, _muls, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
18807
18808 #undef THUMB_VARIANT
18809 #define THUMB_VARIANT & arm_ext_v6t2
18810
18811 TCE("mla", 0200090, fb000000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
18812 C3(mlas, 0300090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas),
18813
18814 /* Generic coprocessor instructions. */
18815 TCE("cdp", e000000, ee000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
18816 TCE("ldc", c100000, ec100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
18817 TC3("ldcl", c500000, ec500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
18818 TCE("stc", c000000, ec000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
18819 TC3("stcl", c400000, ec400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
18820 TCE("mcr", e000010, ee000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
18821 TCE("mrc", e100010, ee100010, 6, (RCP, I7b, APSR_RR, RCN, RCN, oI7b), co_reg, co_reg),
18822
18823 #undef ARM_VARIANT
18824 #define ARM_VARIANT & arm_ext_v2s /* ARM 3 - swp instructions. */
18825
18826 CE("swp", 1000090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
18827 C3(swpb, 1400090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
18828
18829 #undef ARM_VARIANT
18830 #define ARM_VARIANT & arm_ext_v3 /* ARM 6 Status register instructions. */
18831 #undef THUMB_VARIANT
18832 #define THUMB_VARIANT & arm_ext_msr
18833
18834 TCE("mrs", 1000000, f3e08000, 2, (RRnpc, rPSR), mrs, t_mrs),
18835 TCE("msr", 120f000, f3808000, 2, (wPSR, RR_EXi), msr, t_msr),
18836
18837 #undef ARM_VARIANT
18838 #define ARM_VARIANT & arm_ext_v3m /* ARM 7M long multiplies. */
18839 #undef THUMB_VARIANT
18840 #define THUMB_VARIANT & arm_ext_v6t2
18841
18842 TCE("smull", 0c00090, fb800000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
18843 CM("smull","s", 0d00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
18844 TCE("umull", 0800090, fba00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
18845 CM("umull","s", 0900090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
18846 TCE("smlal", 0e00090, fbc00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
18847 CM("smlal","s", 0f00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
18848 TCE("umlal", 0a00090, fbe00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
18849 CM("umlal","s", 0b00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
18850
18851 #undef ARM_VARIANT
18852 #define ARM_VARIANT & arm_ext_v4 /* ARM Architecture 4. */
18853 #undef THUMB_VARIANT
18854 #define THUMB_VARIANT & arm_ext_v4t
18855
18856 tC3("ldrh", 01000b0, _ldrh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
18857 tC3("strh", 00000b0, _strh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
18858 tC3("ldrsh", 01000f0, _ldrsh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
18859 tC3("ldrsb", 01000d0, _ldrsb, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
18860 tC3("ldsh", 01000f0, _ldrsh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
18861 tC3("ldsb", 01000d0, _ldrsb, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
18862
18863 #undef ARM_VARIANT
18864 #define ARM_VARIANT & arm_ext_v4t_5
18865
18866 /* ARM Architecture 4T. */
18867 /* Note: bx (and blx) are required on V5, even if the processor does
18868 not support Thumb. */
18869 TCE("bx", 12fff10, 4700, 1, (RR), bx, t_bx),
18870
18871 #undef ARM_VARIANT
18872 #define ARM_VARIANT & arm_ext_v5 /* ARM Architecture 5T. */
18873 #undef THUMB_VARIANT
18874 #define THUMB_VARIANT & arm_ext_v5t
18875
18876 /* Note: blx has 2 variants; the .value coded here is for
18877 BLX(2). Only this variant has conditional execution. */
18878 TCE("blx", 12fff30, 4780, 1, (RR_EXr), blx, t_blx),
18879 TUE("bkpt", 1200070, be00, 1, (oIffffb), bkpt, t_bkpt),
18880
18881 #undef THUMB_VARIANT
18882 #define THUMB_VARIANT & arm_ext_v6t2
18883
18884 TCE("clz", 16f0f10, fab0f080, 2, (RRnpc, RRnpc), rd_rm, t_clz),
18885 TUF("ldc2", c100000, fc100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
18886 TUF("ldc2l", c500000, fc500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
18887 TUF("stc2", c000000, fc000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
18888 TUF("stc2l", c400000, fc400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
18889 TUF("cdp2", e000000, fe000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
18890 TUF("mcr2", e000010, fe000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
18891 TUF("mrc2", e100010, fe100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
18892
18893 #undef ARM_VARIANT
18894 #define ARM_VARIANT & arm_ext_v5exp /* ARM Architecture 5TExP. */
18895 #undef THUMB_VARIANT
18896 #define THUMB_VARIANT & arm_ext_v5exp
18897
18898 TCE("smlabb", 1000080, fb100000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
18899 TCE("smlatb", 10000a0, fb100020, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
18900 TCE("smlabt", 10000c0, fb100010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
18901 TCE("smlatt", 10000e0, fb100030, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
18902
18903 TCE("smlawb", 1200080, fb300000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
18904 TCE("smlawt", 12000c0, fb300010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
18905
18906 TCE("smlalbb", 1400080, fbc00080, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
18907 TCE("smlaltb", 14000a0, fbc000a0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
18908 TCE("smlalbt", 14000c0, fbc00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
18909 TCE("smlaltt", 14000e0, fbc000b0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
18910
18911 TCE("smulbb", 1600080, fb10f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18912 TCE("smultb", 16000a0, fb10f020, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18913 TCE("smulbt", 16000c0, fb10f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18914 TCE("smultt", 16000e0, fb10f030, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18915
18916 TCE("smulwb", 12000a0, fb30f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18917 TCE("smulwt", 12000e0, fb30f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18918
18919 TCE("qadd", 1000050, fa80f080, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
18920 TCE("qdadd", 1400050, fa80f090, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
18921 TCE("qsub", 1200050, fa80f0a0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
18922 TCE("qdsub", 1600050, fa80f0b0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
18923
18924 #undef ARM_VARIANT
18925 #define ARM_VARIANT & arm_ext_v5e /* ARM Architecture 5TE. */
18926 #undef THUMB_VARIANT
18927 #define THUMB_VARIANT & arm_ext_v6t2
18928
18929 TUF("pld", 450f000, f810f000, 1, (ADDR), pld, t_pld),
18930 TC3("ldrd", 00000d0, e8500000, 3, (RRnpc_npcsp, oRRnpc_npcsp, ADDRGLDRS),
18931 ldrd, t_ldstd),
18932 TC3("strd", 00000f0, e8400000, 3, (RRnpc_npcsp, oRRnpc_npcsp,
18933 ADDRGLDRS), ldrd, t_ldstd),
18934
18935 TCE("mcrr", c400000, ec400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
18936 TCE("mrrc", c500000, ec500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
18937
18938 #undef ARM_VARIANT
18939 #define ARM_VARIANT & arm_ext_v5j /* ARM Architecture 5TEJ. */
18940
18941 TCE("bxj", 12fff20, f3c08f00, 1, (RR), bxj, t_bxj),
18942
18943 #undef ARM_VARIANT
18944 #define ARM_VARIANT & arm_ext_v6 /* ARM V6. */
18945 #undef THUMB_VARIANT
18946 #define THUMB_VARIANT & arm_ext_v6
18947
18948 TUF("cpsie", 1080000, b660, 2, (CPSF, oI31b), cpsi, t_cpsi),
18949 TUF("cpsid", 10c0000, b670, 2, (CPSF, oI31b), cpsi, t_cpsi),
18950 tCE("rev", 6bf0f30, _rev, 2, (RRnpc, RRnpc), rd_rm, t_rev),
18951 tCE("rev16", 6bf0fb0, _rev16, 2, (RRnpc, RRnpc), rd_rm, t_rev),
18952 tCE("revsh", 6ff0fb0, _revsh, 2, (RRnpc, RRnpc), rd_rm, t_rev),
18953 tCE("sxth", 6bf0070, _sxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
18954 tCE("uxth", 6ff0070, _uxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
18955 tCE("sxtb", 6af0070, _sxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
18956 tCE("uxtb", 6ef0070, _uxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
18957 TUF("setend", 1010000, b650, 1, (ENDI), setend, t_setend),
18958
18959 #undef THUMB_VARIANT
18960 #define THUMB_VARIANT & arm_ext_v6t2_v8m
18961
18962 TCE("ldrex", 1900f9f, e8500f00, 2, (RRnpc_npcsp, ADDR), ldrex, t_ldrex),
18963 TCE("strex", 1800f90, e8400000, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
18964 strex, t_strex),
18965 #undef THUMB_VARIANT
18966 #define THUMB_VARIANT & arm_ext_v6t2
18967
18968 TUF("mcrr2", c400000, fc400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
18969 TUF("mrrc2", c500000, fc500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
18970
18971 TCE("ssat", 6a00010, f3000000, 4, (RRnpc, I32, RRnpc, oSHllar),ssat, t_ssat),
18972 TCE("usat", 6e00010, f3800000, 4, (RRnpc, I31, RRnpc, oSHllar),usat, t_usat),
18973
18974 /* ARM V6 not included in V7M. */
18975 #undef THUMB_VARIANT
18976 #define THUMB_VARIANT & arm_ext_v6_notm
18977 TUF("rfeia", 8900a00, e990c000, 1, (RRw), rfe, rfe),
18978 TUF("rfe", 8900a00, e990c000, 1, (RRw), rfe, rfe),
18979 UF(rfeib, 9900a00, 1, (RRw), rfe),
18980 UF(rfeda, 8100a00, 1, (RRw), rfe),
18981 TUF("rfedb", 9100a00, e810c000, 1, (RRw), rfe, rfe),
18982 TUF("rfefd", 8900a00, e990c000, 1, (RRw), rfe, rfe),
18983 UF(rfefa, 8100a00, 1, (RRw), rfe),
18984 TUF("rfeea", 9100a00, e810c000, 1, (RRw), rfe, rfe),
18985 UF(rfeed, 9900a00, 1, (RRw), rfe),
18986 TUF("srsia", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
18987 TUF("srs", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
18988 TUF("srsea", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
18989 UF(srsib, 9c00500, 2, (oRRw, I31w), srs),
18990 UF(srsfa, 9c00500, 2, (oRRw, I31w), srs),
18991 UF(srsda, 8400500, 2, (oRRw, I31w), srs),
18992 UF(srsed, 8400500, 2, (oRRw, I31w), srs),
18993 TUF("srsdb", 9400500, e800c000, 2, (oRRw, I31w), srs, srs),
18994 TUF("srsfd", 9400500, e800c000, 2, (oRRw, I31w), srs, srs),
18995 TUF("cps", 1020000, f3af8100, 1, (I31b), imm0, t_cps),
18996
18997 /* ARM V6 not included in V7M (eg. integer SIMD). */
18998 #undef THUMB_VARIANT
18999 #define THUMB_VARIANT & arm_ext_v6_dsp
19000 TCE("pkhbt", 6800010, eac00000, 4, (RRnpc, RRnpc, RRnpc, oSHll), pkhbt, t_pkhbt),
19001 TCE("pkhtb", 6800050, eac00020, 4, (RRnpc, RRnpc, RRnpc, oSHar), pkhtb, t_pkhtb),
19002 TCE("qadd16", 6200f10, fa90f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19003 TCE("qadd8", 6200f90, fa80f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19004 TCE("qasx", 6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19005 /* Old name for QASX. */
19006 TCE("qaddsubx",6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19007 TCE("qsax", 6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19008 /* Old name for QSAX. */
19009 TCE("qsubaddx",6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19010 TCE("qsub16", 6200f70, fad0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19011 TCE("qsub8", 6200ff0, fac0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19012 TCE("sadd16", 6100f10, fa90f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19013 TCE("sadd8", 6100f90, fa80f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19014 TCE("sasx", 6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19015 /* Old name for SASX. */
19016 TCE("saddsubx",6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19017 TCE("shadd16", 6300f10, fa90f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19018 TCE("shadd8", 6300f90, fa80f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19019 TCE("shasx", 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19020 /* Old name for SHASX. */
19021 TCE("shaddsubx", 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19022 TCE("shsax", 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19023 /* Old name for SHSAX. */
19024 TCE("shsubaddx", 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19025 TCE("shsub16", 6300f70, fad0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19026 TCE("shsub8", 6300ff0, fac0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19027 TCE("ssax", 6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19028 /* Old name for SSAX. */
19029 TCE("ssubaddx",6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19030 TCE("ssub16", 6100f70, fad0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19031 TCE("ssub8", 6100ff0, fac0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19032 TCE("uadd16", 6500f10, fa90f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19033 TCE("uadd8", 6500f90, fa80f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19034 TCE("uasx", 6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19035 /* Old name for UASX. */
19036 TCE("uaddsubx",6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19037 TCE("uhadd16", 6700f10, fa90f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19038 TCE("uhadd8", 6700f90, fa80f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19039 TCE("uhasx", 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19040 /* Old name for UHASX. */
19041 TCE("uhaddsubx", 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19042 TCE("uhsax", 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19043 /* Old name for UHSAX. */
19044 TCE("uhsubaddx", 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19045 TCE("uhsub16", 6700f70, fad0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19046 TCE("uhsub8", 6700ff0, fac0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19047 TCE("uqadd16", 6600f10, fa90f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19048 TCE("uqadd8", 6600f90, fa80f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19049 TCE("uqasx", 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19050 /* Old name for UQASX. */
19051 TCE("uqaddsubx", 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19052 TCE("uqsax", 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19053 /* Old name for UQSAX. */
19054 TCE("uqsubaddx", 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19055 TCE("uqsub16", 6600f70, fad0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19056 TCE("uqsub8", 6600ff0, fac0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19057 TCE("usub16", 6500f70, fad0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19058 TCE("usax", 6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19059 /* Old name for USAX. */
19060 TCE("usubaddx",6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19061 TCE("usub8", 6500ff0, fac0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19062 TCE("sxtah", 6b00070, fa00f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19063 TCE("sxtab16", 6800070, fa20f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19064 TCE("sxtab", 6a00070, fa40f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19065 TCE("sxtb16", 68f0070, fa2ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
19066 TCE("uxtah", 6f00070, fa10f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19067 TCE("uxtab16", 6c00070, fa30f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19068 TCE("uxtab", 6e00070, fa50f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19069 TCE("uxtb16", 6cf0070, fa3ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
19070 TCE("sel", 6800fb0, faa0f080, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19071 TCE("smlad", 7000010, fb200000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19072 TCE("smladx", 7000030, fb200010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19073 TCE("smlald", 7400010, fbc000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
19074 TCE("smlaldx", 7400030, fbc000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
19075 TCE("smlsd", 7000050, fb400000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19076 TCE("smlsdx", 7000070, fb400010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19077 TCE("smlsld", 7400050, fbd000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
19078 TCE("smlsldx", 7400070, fbd000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
19079 TCE("smmla", 7500010, fb500000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19080 TCE("smmlar", 7500030, fb500010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19081 TCE("smmls", 75000d0, fb600000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19082 TCE("smmlsr", 75000f0, fb600010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19083 TCE("smmul", 750f010, fb50f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19084 TCE("smmulr", 750f030, fb50f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19085 TCE("smuad", 700f010, fb20f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19086 TCE("smuadx", 700f030, fb20f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19087 TCE("smusd", 700f050, fb40f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19088 TCE("smusdx", 700f070, fb40f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19089 TCE("ssat16", 6a00f30, f3200000, 3, (RRnpc, I16, RRnpc), ssat16, t_ssat16),
19090 TCE("umaal", 0400090, fbe00060, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal, t_mlal),
19091 TCE("usad8", 780f010, fb70f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19092 TCE("usada8", 7800010, fb700000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19093 TCE("usat16", 6e00f30, f3a00000, 3, (RRnpc, I15, RRnpc), usat16, t_usat16),
19094
19095 #undef ARM_VARIANT
19096 #define ARM_VARIANT & arm_ext_v6k
19097 #undef THUMB_VARIANT
19098 #define THUMB_VARIANT & arm_ext_v6k
19099
19100 tCE("yield", 320f001, _yield, 0, (), noargs, t_hint),
19101 tCE("wfe", 320f002, _wfe, 0, (), noargs, t_hint),
19102 tCE("wfi", 320f003, _wfi, 0, (), noargs, t_hint),
19103 tCE("sev", 320f004, _sev, 0, (), noargs, t_hint),
19104
19105 #undef THUMB_VARIANT
19106 #define THUMB_VARIANT & arm_ext_v6_notm
19107 TCE("ldrexd", 1b00f9f, e8d0007f, 3, (RRnpc_npcsp, oRRnpc_npcsp, RRnpcb),
19108 ldrexd, t_ldrexd),
19109 TCE("strexd", 1a00f90, e8c00070, 4, (RRnpc_npcsp, RRnpc_npcsp, oRRnpc_npcsp,
19110 RRnpcb), strexd, t_strexd),
19111
19112 #undef THUMB_VARIANT
19113 #define THUMB_VARIANT & arm_ext_v6t2_v8m
19114 TCE("ldrexb", 1d00f9f, e8d00f4f, 2, (RRnpc_npcsp,RRnpcb),
19115 rd_rn, rd_rn),
19116 TCE("ldrexh", 1f00f9f, e8d00f5f, 2, (RRnpc_npcsp, RRnpcb),
19117 rd_rn, rd_rn),
19118 TCE("strexb", 1c00f90, e8c00f40, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
19119 strex, t_strexbh),
19120 TCE("strexh", 1e00f90, e8c00f50, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
19121 strex, t_strexbh),
19122 TUF("clrex", 57ff01f, f3bf8f2f, 0, (), noargs, noargs),
19123
19124 #undef ARM_VARIANT
19125 #define ARM_VARIANT & arm_ext_sec
19126 #undef THUMB_VARIANT
19127 #define THUMB_VARIANT & arm_ext_sec
19128
19129 TCE("smc", 1600070, f7f08000, 1, (EXPi), smc, t_smc),
19130
19131 #undef ARM_VARIANT
19132 #define ARM_VARIANT & arm_ext_virt
19133 #undef THUMB_VARIANT
19134 #define THUMB_VARIANT & arm_ext_virt
19135
19136 TCE("hvc", 1400070, f7e08000, 1, (EXPi), hvc, t_hvc),
19137 TCE("eret", 160006e, f3de8f00, 0, (), noargs, noargs),
19138
19139 #undef ARM_VARIANT
19140 #define ARM_VARIANT & arm_ext_pan
19141 #undef THUMB_VARIANT
19142 #define THUMB_VARIANT & arm_ext_pan
19143
19144 TUF("setpan", 1100000, b610, 1, (I7), setpan, t_setpan),
19145
19146 #undef ARM_VARIANT
19147 #define ARM_VARIANT & arm_ext_v6t2
19148 #undef THUMB_VARIANT
19149 #define THUMB_VARIANT & arm_ext_v6t2
19150
19151 TCE("bfc", 7c0001f, f36f0000, 3, (RRnpc, I31, I32), bfc, t_bfc),
19152 TCE("bfi", 7c00010, f3600000, 4, (RRnpc, RRnpc_I0, I31, I32), bfi, t_bfi),
19153 TCE("sbfx", 7a00050, f3400000, 4, (RR, RR, I31, I32), bfx, t_bfx),
19154 TCE("ubfx", 7e00050, f3c00000, 4, (RR, RR, I31, I32), bfx, t_bfx),
19155
19156 TCE("mls", 0600090, fb000010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
19157 TCE("rbit", 6ff0f30, fa90f0a0, 2, (RR, RR), rd_rm, t_rbit),
19158
19159 TC3("ldrht", 03000b0, f8300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
19160 TC3("ldrsht", 03000f0, f9300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
19161 TC3("ldrsbt", 03000d0, f9100e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
19162 TC3("strht", 02000b0, f8200e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
19163
19164 #undef THUMB_VARIANT
19165 #define THUMB_VARIANT & arm_ext_v6t2_v8m
19166 TCE("movw", 3000000, f2400000, 2, (RRnpc, HALF), mov16, t_mov16),
19167 TCE("movt", 3400000, f2c00000, 2, (RRnpc, HALF), mov16, t_mov16),
19168
19169 /* Thumb-only instructions. */
19170 #undef ARM_VARIANT
19171 #define ARM_VARIANT NULL
19172 TUE("cbnz", 0, b900, 2, (RR, EXP), 0, t_cbz),
19173 TUE("cbz", 0, b100, 2, (RR, EXP), 0, t_cbz),
19174
19175 /* ARM does not really have an IT instruction, so always allow it.
19176 The opcode is copied from Thumb in order to allow warnings in
19177 -mimplicit-it=[never | arm] modes. */
19178 #undef ARM_VARIANT
19179 #define ARM_VARIANT & arm_ext_v1
19180 #undef THUMB_VARIANT
19181 #define THUMB_VARIANT & arm_ext_v6t2
19182
19183 TUE("it", bf08, bf08, 1, (COND), it, t_it),
19184 TUE("itt", bf0c, bf0c, 1, (COND), it, t_it),
19185 TUE("ite", bf04, bf04, 1, (COND), it, t_it),
19186 TUE("ittt", bf0e, bf0e, 1, (COND), it, t_it),
19187 TUE("itet", bf06, bf06, 1, (COND), it, t_it),
19188 TUE("itte", bf0a, bf0a, 1, (COND), it, t_it),
19189 TUE("itee", bf02, bf02, 1, (COND), it, t_it),
19190 TUE("itttt", bf0f, bf0f, 1, (COND), it, t_it),
19191 TUE("itett", bf07, bf07, 1, (COND), it, t_it),
19192 TUE("ittet", bf0b, bf0b, 1, (COND), it, t_it),
19193 TUE("iteet", bf03, bf03, 1, (COND), it, t_it),
19194 TUE("ittte", bf0d, bf0d, 1, (COND), it, t_it),
19195 TUE("itete", bf05, bf05, 1, (COND), it, t_it),
19196 TUE("ittee", bf09, bf09, 1, (COND), it, t_it),
19197 TUE("iteee", bf01, bf01, 1, (COND), it, t_it),
19198 /* ARM/Thumb-2 instructions with no Thumb-1 equivalent. */
19199 TC3("rrx", 01a00060, ea4f0030, 2, (RR, RR), rd_rm, t_rrx),
19200 TC3("rrxs", 01b00060, ea5f0030, 2, (RR, RR), rd_rm, t_rrx),
19201
19202 /* Thumb2 only instructions. */
19203 #undef ARM_VARIANT
19204 #define ARM_VARIANT NULL
19205
19206 TCE("addw", 0, f2000000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
19207 TCE("subw", 0, f2a00000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
19208 TCE("orn", 0, ea600000, 3, (RR, oRR, SH), 0, t_orn),
19209 TCE("orns", 0, ea700000, 3, (RR, oRR, SH), 0, t_orn),
19210 TCE("tbb", 0, e8d0f000, 1, (TB), 0, t_tb),
19211 TCE("tbh", 0, e8d0f010, 1, (TB), 0, t_tb),
19212
19213 /* Hardware division instructions. */
19214 #undef ARM_VARIANT
19215 #define ARM_VARIANT & arm_ext_adiv
19216 #undef THUMB_VARIANT
19217 #define THUMB_VARIANT & arm_ext_div
19218
19219 TCE("sdiv", 710f010, fb90f0f0, 3, (RR, oRR, RR), div, t_div),
19220 TCE("udiv", 730f010, fbb0f0f0, 3, (RR, oRR, RR), div, t_div),
19221
19222 /* ARM V6M/V7 instructions. */
19223 #undef ARM_VARIANT
19224 #define ARM_VARIANT & arm_ext_barrier
19225 #undef THUMB_VARIANT
19226 #define THUMB_VARIANT & arm_ext_barrier
19227
19228 TUF("dmb", 57ff050, f3bf8f50, 1, (oBARRIER_I15), barrier, barrier),
19229 TUF("dsb", 57ff040, f3bf8f40, 1, (oBARRIER_I15), barrier, barrier),
19230 TUF("isb", 57ff060, f3bf8f60, 1, (oBARRIER_I15), barrier, barrier),
19231
19232 /* ARM V7 instructions. */
19233 #undef ARM_VARIANT
19234 #define ARM_VARIANT & arm_ext_v7
19235 #undef THUMB_VARIANT
19236 #define THUMB_VARIANT & arm_ext_v7
19237
19238 TUF("pli", 450f000, f910f000, 1, (ADDR), pli, t_pld),
19239 TCE("dbg", 320f0f0, f3af80f0, 1, (I15), dbg, t_dbg),
19240
19241 #undef ARM_VARIANT
19242 #define ARM_VARIANT & arm_ext_mp
19243 #undef THUMB_VARIANT
19244 #define THUMB_VARIANT & arm_ext_mp
19245
19246 TUF("pldw", 410f000, f830f000, 1, (ADDR), pld, t_pld),
19247
19248 /* AArchv8 instructions. */
19249 #undef ARM_VARIANT
19250 #define ARM_VARIANT & arm_ext_v8
19251
19252 /* Instructions shared between armv8-a and armv8-m. */
19253 #undef THUMB_VARIANT
19254 #define THUMB_VARIANT & arm_ext_atomics
19255
19256 TCE("lda", 1900c9f, e8d00faf, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
19257 TCE("ldab", 1d00c9f, e8d00f8f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
19258 TCE("ldah", 1f00c9f, e8d00f9f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
19259 TCE("stl", 180fc90, e8c00faf, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
19260 TCE("stlb", 1c0fc90, e8c00f8f, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
19261 TCE("stlh", 1e0fc90, e8c00f9f, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
19262 TCE("ldaex", 1900e9f, e8d00fef, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
19263 TCE("ldaexb", 1d00e9f, e8d00fcf, 2, (RRnpc,RRnpcb), rd_rn, rd_rn),
19264 TCE("ldaexh", 1f00e9f, e8d00fdf, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
19265 TCE("stlex", 1800e90, e8c00fe0, 3, (RRnpc, RRnpc, RRnpcb),
19266 stlex, t_stlex),
19267 TCE("stlexb", 1c00e90, e8c00fc0, 3, (RRnpc, RRnpc, RRnpcb),
19268 stlex, t_stlex),
19269 TCE("stlexh", 1e00e90, e8c00fd0, 3, (RRnpc, RRnpc, RRnpcb),
19270 stlex, t_stlex),
19271 #undef THUMB_VARIANT
19272 #define THUMB_VARIANT & arm_ext_v8
19273
19274 tCE("sevl", 320f005, _sevl, 0, (), noargs, t_hint),
19275 TUE("hlt", 1000070, ba80, 1, (oIffffb), bkpt, t_hlt),
19276 TCE("ldaexd", 1b00e9f, e8d000ff, 3, (RRnpc, oRRnpc, RRnpcb),
19277 ldrexd, t_ldrexd),
19278 TCE("stlexd", 1a00e90, e8c000f0, 4, (RRnpc, RRnpc, oRRnpc, RRnpcb),
19279 strexd, t_strexd),
19280 /* ARMv8 T32 only. */
19281 #undef ARM_VARIANT
19282 #define ARM_VARIANT NULL
19283 TUF("dcps1", 0, f78f8001, 0, (), noargs, noargs),
19284 TUF("dcps2", 0, f78f8002, 0, (), noargs, noargs),
19285 TUF("dcps3", 0, f78f8003, 0, (), noargs, noargs),
19286
19287 /* FP for ARMv8. */
19288 #undef ARM_VARIANT
19289 #define ARM_VARIANT & fpu_vfp_ext_armv8xd
19290 #undef THUMB_VARIANT
19291 #define THUMB_VARIANT & fpu_vfp_ext_armv8xd
19292
19293 nUF(vseleq, _vseleq, 3, (RVSD, RVSD, RVSD), vsel),
19294 nUF(vselvs, _vselvs, 3, (RVSD, RVSD, RVSD), vsel),
19295 nUF(vselge, _vselge, 3, (RVSD, RVSD, RVSD), vsel),
19296 nUF(vselgt, _vselgt, 3, (RVSD, RVSD, RVSD), vsel),
19297 nUF(vmaxnm, _vmaxnm, 3, (RNSDQ, oRNSDQ, RNSDQ), vmaxnm),
19298 nUF(vminnm, _vminnm, 3, (RNSDQ, oRNSDQ, RNSDQ), vmaxnm),
19299 nUF(vcvta, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvta),
19300 nUF(vcvtn, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvtn),
19301 nUF(vcvtp, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvtp),
19302 nUF(vcvtm, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvtm),
19303 nCE(vrintr, _vrintr, 2, (RNSDQ, oRNSDQ), vrintr),
19304 nCE(vrintz, _vrintr, 2, (RNSDQ, oRNSDQ), vrintz),
19305 nCE(vrintx, _vrintr, 2, (RNSDQ, oRNSDQ), vrintx),
19306 nUF(vrinta, _vrinta, 2, (RNSDQ, oRNSDQ), vrinta),
19307 nUF(vrintn, _vrinta, 2, (RNSDQ, oRNSDQ), vrintn),
19308 nUF(vrintp, _vrinta, 2, (RNSDQ, oRNSDQ), vrintp),
19309 nUF(vrintm, _vrinta, 2, (RNSDQ, oRNSDQ), vrintm),
19310
19311 /* Crypto v1 extensions. */
19312 #undef ARM_VARIANT
19313 #define ARM_VARIANT & fpu_crypto_ext_armv8
19314 #undef THUMB_VARIANT
19315 #define THUMB_VARIANT & fpu_crypto_ext_armv8
19316
19317 nUF(aese, _aes, 2, (RNQ, RNQ), aese),
19318 nUF(aesd, _aes, 2, (RNQ, RNQ), aesd),
19319 nUF(aesmc, _aes, 2, (RNQ, RNQ), aesmc),
19320 nUF(aesimc, _aes, 2, (RNQ, RNQ), aesimc),
19321 nUF(sha1c, _sha3op, 3, (RNQ, RNQ, RNQ), sha1c),
19322 nUF(sha1p, _sha3op, 3, (RNQ, RNQ, RNQ), sha1p),
19323 nUF(sha1m, _sha3op, 3, (RNQ, RNQ, RNQ), sha1m),
19324 nUF(sha1su0, _sha3op, 3, (RNQ, RNQ, RNQ), sha1su0),
19325 nUF(sha256h, _sha3op, 3, (RNQ, RNQ, RNQ), sha256h),
19326 nUF(sha256h2, _sha3op, 3, (RNQ, RNQ, RNQ), sha256h2),
19327 nUF(sha256su1, _sha3op, 3, (RNQ, RNQ, RNQ), sha256su1),
19328 nUF(sha1h, _sha1h, 2, (RNQ, RNQ), sha1h),
19329 nUF(sha1su1, _sha2op, 2, (RNQ, RNQ), sha1su1),
19330 nUF(sha256su0, _sha2op, 2, (RNQ, RNQ), sha256su0),
19331
19332 #undef ARM_VARIANT
19333 #define ARM_VARIANT & crc_ext_armv8
19334 #undef THUMB_VARIANT
19335 #define THUMB_VARIANT & crc_ext_armv8
19336 TUEc("crc32b", 1000040, fac0f080, 3, (RR, oRR, RR), crc32b),
19337 TUEc("crc32h", 1200040, fac0f090, 3, (RR, oRR, RR), crc32h),
19338 TUEc("crc32w", 1400040, fac0f0a0, 3, (RR, oRR, RR), crc32w),
19339 TUEc("crc32cb",1000240, fad0f080, 3, (RR, oRR, RR), crc32cb),
19340 TUEc("crc32ch",1200240, fad0f090, 3, (RR, oRR, RR), crc32ch),
19341 TUEc("crc32cw",1400240, fad0f0a0, 3, (RR, oRR, RR), crc32cw),
19342
19343 #undef ARM_VARIANT
19344 #define ARM_VARIANT & fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
19345 #undef THUMB_VARIANT
19346 #define THUMB_VARIANT NULL
19347
19348 cCE("wfs", e200110, 1, (RR), rd),
19349 cCE("rfs", e300110, 1, (RR), rd),
19350 cCE("wfc", e400110, 1, (RR), rd),
19351 cCE("rfc", e500110, 1, (RR), rd),
19352
19353 cCL("ldfs", c100100, 2, (RF, ADDRGLDC), rd_cpaddr),
19354 cCL("ldfd", c108100, 2, (RF, ADDRGLDC), rd_cpaddr),
19355 cCL("ldfe", c500100, 2, (RF, ADDRGLDC), rd_cpaddr),
19356 cCL("ldfp", c508100, 2, (RF, ADDRGLDC), rd_cpaddr),
19357
19358 cCL("stfs", c000100, 2, (RF, ADDRGLDC), rd_cpaddr),
19359 cCL("stfd", c008100, 2, (RF, ADDRGLDC), rd_cpaddr),
19360 cCL("stfe", c400100, 2, (RF, ADDRGLDC), rd_cpaddr),
19361 cCL("stfp", c408100, 2, (RF, ADDRGLDC), rd_cpaddr),
19362
19363 cCL("mvfs", e008100, 2, (RF, RF_IF), rd_rm),
19364 cCL("mvfsp", e008120, 2, (RF, RF_IF), rd_rm),
19365 cCL("mvfsm", e008140, 2, (RF, RF_IF), rd_rm),
19366 cCL("mvfsz", e008160, 2, (RF, RF_IF), rd_rm),
19367 cCL("mvfd", e008180, 2, (RF, RF_IF), rd_rm),
19368 cCL("mvfdp", e0081a0, 2, (RF, RF_IF), rd_rm),
19369 cCL("mvfdm", e0081c0, 2, (RF, RF_IF), rd_rm),
19370 cCL("mvfdz", e0081e0, 2, (RF, RF_IF), rd_rm),
19371 cCL("mvfe", e088100, 2, (RF, RF_IF), rd_rm),
19372 cCL("mvfep", e088120, 2, (RF, RF_IF), rd_rm),
19373 cCL("mvfem", e088140, 2, (RF, RF_IF), rd_rm),
19374 cCL("mvfez", e088160, 2, (RF, RF_IF), rd_rm),
19375
19376 cCL("mnfs", e108100, 2, (RF, RF_IF), rd_rm),
19377 cCL("mnfsp", e108120, 2, (RF, RF_IF), rd_rm),
19378 cCL("mnfsm", e108140, 2, (RF, RF_IF), rd_rm),
19379 cCL("mnfsz", e108160, 2, (RF, RF_IF), rd_rm),
19380 cCL("mnfd", e108180, 2, (RF, RF_IF), rd_rm),
19381 cCL("mnfdp", e1081a0, 2, (RF, RF_IF), rd_rm),
19382 cCL("mnfdm", e1081c0, 2, (RF, RF_IF), rd_rm),
19383 cCL("mnfdz", e1081e0, 2, (RF, RF_IF), rd_rm),
19384 cCL("mnfe", e188100, 2, (RF, RF_IF), rd_rm),
19385 cCL("mnfep", e188120, 2, (RF, RF_IF), rd_rm),
19386 cCL("mnfem", e188140, 2, (RF, RF_IF), rd_rm),
19387 cCL("mnfez", e188160, 2, (RF, RF_IF), rd_rm),
19388
19389 cCL("abss", e208100, 2, (RF, RF_IF), rd_rm),
19390 cCL("abssp", e208120, 2, (RF, RF_IF), rd_rm),
19391 cCL("abssm", e208140, 2, (RF, RF_IF), rd_rm),
19392 cCL("abssz", e208160, 2, (RF, RF_IF), rd_rm),
19393 cCL("absd", e208180, 2, (RF, RF_IF), rd_rm),
19394 cCL("absdp", e2081a0, 2, (RF, RF_IF), rd_rm),
19395 cCL("absdm", e2081c0, 2, (RF, RF_IF), rd_rm),
19396 cCL("absdz", e2081e0, 2, (RF, RF_IF), rd_rm),
19397 cCL("abse", e288100, 2, (RF, RF_IF), rd_rm),
19398 cCL("absep", e288120, 2, (RF, RF_IF), rd_rm),
19399 cCL("absem", e288140, 2, (RF, RF_IF), rd_rm),
19400 cCL("absez", e288160, 2, (RF, RF_IF), rd_rm),
19401
19402 cCL("rnds", e308100, 2, (RF, RF_IF), rd_rm),
19403 cCL("rndsp", e308120, 2, (RF, RF_IF), rd_rm),
19404 cCL("rndsm", e308140, 2, (RF, RF_IF), rd_rm),
19405 cCL("rndsz", e308160, 2, (RF, RF_IF), rd_rm),
19406 cCL("rndd", e308180, 2, (RF, RF_IF), rd_rm),
19407 cCL("rnddp", e3081a0, 2, (RF, RF_IF), rd_rm),
19408 cCL("rnddm", e3081c0, 2, (RF, RF_IF), rd_rm),
19409 cCL("rnddz", e3081e0, 2, (RF, RF_IF), rd_rm),
19410 cCL("rnde", e388100, 2, (RF, RF_IF), rd_rm),
19411 cCL("rndep", e388120, 2, (RF, RF_IF), rd_rm),
19412 cCL("rndem", e388140, 2, (RF, RF_IF), rd_rm),
19413 cCL("rndez", e388160, 2, (RF, RF_IF), rd_rm),
19414
19415 cCL("sqts", e408100, 2, (RF, RF_IF), rd_rm),
19416 cCL("sqtsp", e408120, 2, (RF, RF_IF), rd_rm),
19417 cCL("sqtsm", e408140, 2, (RF, RF_IF), rd_rm),
19418 cCL("sqtsz", e408160, 2, (RF, RF_IF), rd_rm),
19419 cCL("sqtd", e408180, 2, (RF, RF_IF), rd_rm),
19420 cCL("sqtdp", e4081a0, 2, (RF, RF_IF), rd_rm),
19421 cCL("sqtdm", e4081c0, 2, (RF, RF_IF), rd_rm),
19422 cCL("sqtdz", e4081e0, 2, (RF, RF_IF), rd_rm),
19423 cCL("sqte", e488100, 2, (RF, RF_IF), rd_rm),
19424 cCL("sqtep", e488120, 2, (RF, RF_IF), rd_rm),
19425 cCL("sqtem", e488140, 2, (RF, RF_IF), rd_rm),
19426 cCL("sqtez", e488160, 2, (RF, RF_IF), rd_rm),
19427
19428 cCL("logs", e508100, 2, (RF, RF_IF), rd_rm),
19429 cCL("logsp", e508120, 2, (RF, RF_IF), rd_rm),
19430 cCL("logsm", e508140, 2, (RF, RF_IF), rd_rm),
19431 cCL("logsz", e508160, 2, (RF, RF_IF), rd_rm),
19432 cCL("logd", e508180, 2, (RF, RF_IF), rd_rm),
19433 cCL("logdp", e5081a0, 2, (RF, RF_IF), rd_rm),
19434 cCL("logdm", e5081c0, 2, (RF, RF_IF), rd_rm),
19435 cCL("logdz", e5081e0, 2, (RF, RF_IF), rd_rm),
19436 cCL("loge", e588100, 2, (RF, RF_IF), rd_rm),
19437 cCL("logep", e588120, 2, (RF, RF_IF), rd_rm),
19438 cCL("logem", e588140, 2, (RF, RF_IF), rd_rm),
19439 cCL("logez", e588160, 2, (RF, RF_IF), rd_rm),
19440
19441 cCL("lgns", e608100, 2, (RF, RF_IF), rd_rm),
19442 cCL("lgnsp", e608120, 2, (RF, RF_IF), rd_rm),
19443 cCL("lgnsm", e608140, 2, (RF, RF_IF), rd_rm),
19444 cCL("lgnsz", e608160, 2, (RF, RF_IF), rd_rm),
19445 cCL("lgnd", e608180, 2, (RF, RF_IF), rd_rm),
19446 cCL("lgndp", e6081a0, 2, (RF, RF_IF), rd_rm),
19447 cCL("lgndm", e6081c0, 2, (RF, RF_IF), rd_rm),
19448 cCL("lgndz", e6081e0, 2, (RF, RF_IF), rd_rm),
19449 cCL("lgne", e688100, 2, (RF, RF_IF), rd_rm),
19450 cCL("lgnep", e688120, 2, (RF, RF_IF), rd_rm),
19451 cCL("lgnem", e688140, 2, (RF, RF_IF), rd_rm),
19452 cCL("lgnez", e688160, 2, (RF, RF_IF), rd_rm),
19453
19454 cCL("exps", e708100, 2, (RF, RF_IF), rd_rm),
19455 cCL("expsp", e708120, 2, (RF, RF_IF), rd_rm),
19456 cCL("expsm", e708140, 2, (RF, RF_IF), rd_rm),
19457 cCL("expsz", e708160, 2, (RF, RF_IF), rd_rm),
19458 cCL("expd", e708180, 2, (RF, RF_IF), rd_rm),
19459 cCL("expdp", e7081a0, 2, (RF, RF_IF), rd_rm),
19460 cCL("expdm", e7081c0, 2, (RF, RF_IF), rd_rm),
19461 cCL("expdz", e7081e0, 2, (RF, RF_IF), rd_rm),
19462 cCL("expe", e788100, 2, (RF, RF_IF), rd_rm),
19463 cCL("expep", e788120, 2, (RF, RF_IF), rd_rm),
19464 cCL("expem", e788140, 2, (RF, RF_IF), rd_rm),
19465 cCL("expdz", e788160, 2, (RF, RF_IF), rd_rm),
19466
19467 cCL("sins", e808100, 2, (RF, RF_IF), rd_rm),
19468 cCL("sinsp", e808120, 2, (RF, RF_IF), rd_rm),
19469 cCL("sinsm", e808140, 2, (RF, RF_IF), rd_rm),
19470 cCL("sinsz", e808160, 2, (RF, RF_IF), rd_rm),
19471 cCL("sind", e808180, 2, (RF, RF_IF), rd_rm),
19472 cCL("sindp", e8081a0, 2, (RF, RF_IF), rd_rm),
19473 cCL("sindm", e8081c0, 2, (RF, RF_IF), rd_rm),
19474 cCL("sindz", e8081e0, 2, (RF, RF_IF), rd_rm),
19475 cCL("sine", e888100, 2, (RF, RF_IF), rd_rm),
19476 cCL("sinep", e888120, 2, (RF, RF_IF), rd_rm),
19477 cCL("sinem", e888140, 2, (RF, RF_IF), rd_rm),
19478 cCL("sinez", e888160, 2, (RF, RF_IF), rd_rm),
19479
19480 cCL("coss", e908100, 2, (RF, RF_IF), rd_rm),
19481 cCL("cossp", e908120, 2, (RF, RF_IF), rd_rm),
19482 cCL("cossm", e908140, 2, (RF, RF_IF), rd_rm),
19483 cCL("cossz", e908160, 2, (RF, RF_IF), rd_rm),
19484 cCL("cosd", e908180, 2, (RF, RF_IF), rd_rm),
19485 cCL("cosdp", e9081a0, 2, (RF, RF_IF), rd_rm),
19486 cCL("cosdm", e9081c0, 2, (RF, RF_IF), rd_rm),
19487 cCL("cosdz", e9081e0, 2, (RF, RF_IF), rd_rm),
19488 cCL("cose", e988100, 2, (RF, RF_IF), rd_rm),
19489 cCL("cosep", e988120, 2, (RF, RF_IF), rd_rm),
19490 cCL("cosem", e988140, 2, (RF, RF_IF), rd_rm),
19491 cCL("cosez", e988160, 2, (RF, RF_IF), rd_rm),
19492
19493 cCL("tans", ea08100, 2, (RF, RF_IF), rd_rm),
19494 cCL("tansp", ea08120, 2, (RF, RF_IF), rd_rm),
19495 cCL("tansm", ea08140, 2, (RF, RF_IF), rd_rm),
19496 cCL("tansz", ea08160, 2, (RF, RF_IF), rd_rm),
19497 cCL("tand", ea08180, 2, (RF, RF_IF), rd_rm),
19498 cCL("tandp", ea081a0, 2, (RF, RF_IF), rd_rm),
19499 cCL("tandm", ea081c0, 2, (RF, RF_IF), rd_rm),
19500 cCL("tandz", ea081e0, 2, (RF, RF_IF), rd_rm),
19501 cCL("tane", ea88100, 2, (RF, RF_IF), rd_rm),
19502 cCL("tanep", ea88120, 2, (RF, RF_IF), rd_rm),
19503 cCL("tanem", ea88140, 2, (RF, RF_IF), rd_rm),
19504 cCL("tanez", ea88160, 2, (RF, RF_IF), rd_rm),
19505
19506 cCL("asns", eb08100, 2, (RF, RF_IF), rd_rm),
19507 cCL("asnsp", eb08120, 2, (RF, RF_IF), rd_rm),
19508 cCL("asnsm", eb08140, 2, (RF, RF_IF), rd_rm),
19509 cCL("asnsz", eb08160, 2, (RF, RF_IF), rd_rm),
19510 cCL("asnd", eb08180, 2, (RF, RF_IF), rd_rm),
19511 cCL("asndp", eb081a0, 2, (RF, RF_IF), rd_rm),
19512 cCL("asndm", eb081c0, 2, (RF, RF_IF), rd_rm),
19513 cCL("asndz", eb081e0, 2, (RF, RF_IF), rd_rm),
19514 cCL("asne", eb88100, 2, (RF, RF_IF), rd_rm),
19515 cCL("asnep", eb88120, 2, (RF, RF_IF), rd_rm),
19516 cCL("asnem", eb88140, 2, (RF, RF_IF), rd_rm),
19517 cCL("asnez", eb88160, 2, (RF, RF_IF), rd_rm),
19518
19519 cCL("acss", ec08100, 2, (RF, RF_IF), rd_rm),
19520 cCL("acssp", ec08120, 2, (RF, RF_IF), rd_rm),
19521 cCL("acssm", ec08140, 2, (RF, RF_IF), rd_rm),
19522 cCL("acssz", ec08160, 2, (RF, RF_IF), rd_rm),
19523 cCL("acsd", ec08180, 2, (RF, RF_IF), rd_rm),
19524 cCL("acsdp", ec081a0, 2, (RF, RF_IF), rd_rm),
19525 cCL("acsdm", ec081c0, 2, (RF, RF_IF), rd_rm),
19526 cCL("acsdz", ec081e0, 2, (RF, RF_IF), rd_rm),
19527 cCL("acse", ec88100, 2, (RF, RF_IF), rd_rm),
19528 cCL("acsep", ec88120, 2, (RF, RF_IF), rd_rm),
19529 cCL("acsem", ec88140, 2, (RF, RF_IF), rd_rm),
19530 cCL("acsez", ec88160, 2, (RF, RF_IF), rd_rm),
19531
19532 cCL("atns", ed08100, 2, (RF, RF_IF), rd_rm),
19533 cCL("atnsp", ed08120, 2, (RF, RF_IF), rd_rm),
19534 cCL("atnsm", ed08140, 2, (RF, RF_IF), rd_rm),
19535 cCL("atnsz", ed08160, 2, (RF, RF_IF), rd_rm),
19536 cCL("atnd", ed08180, 2, (RF, RF_IF), rd_rm),
19537 cCL("atndp", ed081a0, 2, (RF, RF_IF), rd_rm),
19538 cCL("atndm", ed081c0, 2, (RF, RF_IF), rd_rm),
19539 cCL("atndz", ed081e0, 2, (RF, RF_IF), rd_rm),
19540 cCL("atne", ed88100, 2, (RF, RF_IF), rd_rm),
19541 cCL("atnep", ed88120, 2, (RF, RF_IF), rd_rm),
19542 cCL("atnem", ed88140, 2, (RF, RF_IF), rd_rm),
19543 cCL("atnez", ed88160, 2, (RF, RF_IF), rd_rm),
19544
19545 cCL("urds", ee08100, 2, (RF, RF_IF), rd_rm),
19546 cCL("urdsp", ee08120, 2, (RF, RF_IF), rd_rm),
19547 cCL("urdsm", ee08140, 2, (RF, RF_IF), rd_rm),
19548 cCL("urdsz", ee08160, 2, (RF, RF_IF), rd_rm),
19549 cCL("urdd", ee08180, 2, (RF, RF_IF), rd_rm),
19550 cCL("urddp", ee081a0, 2, (RF, RF_IF), rd_rm),
19551 cCL("urddm", ee081c0, 2, (RF, RF_IF), rd_rm),
19552 cCL("urddz", ee081e0, 2, (RF, RF_IF), rd_rm),
19553 cCL("urde", ee88100, 2, (RF, RF_IF), rd_rm),
19554 cCL("urdep", ee88120, 2, (RF, RF_IF), rd_rm),
19555 cCL("urdem", ee88140, 2, (RF, RF_IF), rd_rm),
19556 cCL("urdez", ee88160, 2, (RF, RF_IF), rd_rm),
19557
19558 cCL("nrms", ef08100, 2, (RF, RF_IF), rd_rm),
19559 cCL("nrmsp", ef08120, 2, (RF, RF_IF), rd_rm),
19560 cCL("nrmsm", ef08140, 2, (RF, RF_IF), rd_rm),
19561 cCL("nrmsz", ef08160, 2, (RF, RF_IF), rd_rm),
19562 cCL("nrmd", ef08180, 2, (RF, RF_IF), rd_rm),
19563 cCL("nrmdp", ef081a0, 2, (RF, RF_IF), rd_rm),
19564 cCL("nrmdm", ef081c0, 2, (RF, RF_IF), rd_rm),
19565 cCL("nrmdz", ef081e0, 2, (RF, RF_IF), rd_rm),
19566 cCL("nrme", ef88100, 2, (RF, RF_IF), rd_rm),
19567 cCL("nrmep", ef88120, 2, (RF, RF_IF), rd_rm),
19568 cCL("nrmem", ef88140, 2, (RF, RF_IF), rd_rm),
19569 cCL("nrmez", ef88160, 2, (RF, RF_IF), rd_rm),
19570
19571 cCL("adfs", e000100, 3, (RF, RF, RF_IF), rd_rn_rm),
19572 cCL("adfsp", e000120, 3, (RF, RF, RF_IF), rd_rn_rm),
19573 cCL("adfsm", e000140, 3, (RF, RF, RF_IF), rd_rn_rm),
19574 cCL("adfsz", e000160, 3, (RF, RF, RF_IF), rd_rn_rm),
19575 cCL("adfd", e000180, 3, (RF, RF, RF_IF), rd_rn_rm),
19576 cCL("adfdp", e0001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19577 cCL("adfdm", e0001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19578 cCL("adfdz", e0001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19579 cCL("adfe", e080100, 3, (RF, RF, RF_IF), rd_rn_rm),
19580 cCL("adfep", e080120, 3, (RF, RF, RF_IF), rd_rn_rm),
19581 cCL("adfem", e080140, 3, (RF, RF, RF_IF), rd_rn_rm),
19582 cCL("adfez", e080160, 3, (RF, RF, RF_IF), rd_rn_rm),
19583
19584 cCL("sufs", e200100, 3, (RF, RF, RF_IF), rd_rn_rm),
19585 cCL("sufsp", e200120, 3, (RF, RF, RF_IF), rd_rn_rm),
19586 cCL("sufsm", e200140, 3, (RF, RF, RF_IF), rd_rn_rm),
19587 cCL("sufsz", e200160, 3, (RF, RF, RF_IF), rd_rn_rm),
19588 cCL("sufd", e200180, 3, (RF, RF, RF_IF), rd_rn_rm),
19589 cCL("sufdp", e2001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19590 cCL("sufdm", e2001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19591 cCL("sufdz", e2001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19592 cCL("sufe", e280100, 3, (RF, RF, RF_IF), rd_rn_rm),
19593 cCL("sufep", e280120, 3, (RF, RF, RF_IF), rd_rn_rm),
19594 cCL("sufem", e280140, 3, (RF, RF, RF_IF), rd_rn_rm),
19595 cCL("sufez", e280160, 3, (RF, RF, RF_IF), rd_rn_rm),
19596
19597 cCL("rsfs", e300100, 3, (RF, RF, RF_IF), rd_rn_rm),
19598 cCL("rsfsp", e300120, 3, (RF, RF, RF_IF), rd_rn_rm),
19599 cCL("rsfsm", e300140, 3, (RF, RF, RF_IF), rd_rn_rm),
19600 cCL("rsfsz", e300160, 3, (RF, RF, RF_IF), rd_rn_rm),
19601 cCL("rsfd", e300180, 3, (RF, RF, RF_IF), rd_rn_rm),
19602 cCL("rsfdp", e3001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19603 cCL("rsfdm", e3001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19604 cCL("rsfdz", e3001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19605 cCL("rsfe", e380100, 3, (RF, RF, RF_IF), rd_rn_rm),
19606 cCL("rsfep", e380120, 3, (RF, RF, RF_IF), rd_rn_rm),
19607 cCL("rsfem", e380140, 3, (RF, RF, RF_IF), rd_rn_rm),
19608 cCL("rsfez", e380160, 3, (RF, RF, RF_IF), rd_rn_rm),
19609
19610 cCL("mufs", e100100, 3, (RF, RF, RF_IF), rd_rn_rm),
19611 cCL("mufsp", e100120, 3, (RF, RF, RF_IF), rd_rn_rm),
19612 cCL("mufsm", e100140, 3, (RF, RF, RF_IF), rd_rn_rm),
19613 cCL("mufsz", e100160, 3, (RF, RF, RF_IF), rd_rn_rm),
19614 cCL("mufd", e100180, 3, (RF, RF, RF_IF), rd_rn_rm),
19615 cCL("mufdp", e1001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19616 cCL("mufdm", e1001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19617 cCL("mufdz", e1001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19618 cCL("mufe", e180100, 3, (RF, RF, RF_IF), rd_rn_rm),
19619 cCL("mufep", e180120, 3, (RF, RF, RF_IF), rd_rn_rm),
19620 cCL("mufem", e180140, 3, (RF, RF, RF_IF), rd_rn_rm),
19621 cCL("mufez", e180160, 3, (RF, RF, RF_IF), rd_rn_rm),
19622
19623 cCL("dvfs", e400100, 3, (RF, RF, RF_IF), rd_rn_rm),
19624 cCL("dvfsp", e400120, 3, (RF, RF, RF_IF), rd_rn_rm),
19625 cCL("dvfsm", e400140, 3, (RF, RF, RF_IF), rd_rn_rm),
19626 cCL("dvfsz", e400160, 3, (RF, RF, RF_IF), rd_rn_rm),
19627 cCL("dvfd", e400180, 3, (RF, RF, RF_IF), rd_rn_rm),
19628 cCL("dvfdp", e4001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19629 cCL("dvfdm", e4001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19630 cCL("dvfdz", e4001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19631 cCL("dvfe", e480100, 3, (RF, RF, RF_IF), rd_rn_rm),
19632 cCL("dvfep", e480120, 3, (RF, RF, RF_IF), rd_rn_rm),
19633 cCL("dvfem", e480140, 3, (RF, RF, RF_IF), rd_rn_rm),
19634 cCL("dvfez", e480160, 3, (RF, RF, RF_IF), rd_rn_rm),
19635
19636 cCL("rdfs", e500100, 3, (RF, RF, RF_IF), rd_rn_rm),
19637 cCL("rdfsp", e500120, 3, (RF, RF, RF_IF), rd_rn_rm),
19638 cCL("rdfsm", e500140, 3, (RF, RF, RF_IF), rd_rn_rm),
19639 cCL("rdfsz", e500160, 3, (RF, RF, RF_IF), rd_rn_rm),
19640 cCL("rdfd", e500180, 3, (RF, RF, RF_IF), rd_rn_rm),
19641 cCL("rdfdp", e5001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19642 cCL("rdfdm", e5001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19643 cCL("rdfdz", e5001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19644 cCL("rdfe", e580100, 3, (RF, RF, RF_IF), rd_rn_rm),
19645 cCL("rdfep", e580120, 3, (RF, RF, RF_IF), rd_rn_rm),
19646 cCL("rdfem", e580140, 3, (RF, RF, RF_IF), rd_rn_rm),
19647 cCL("rdfez", e580160, 3, (RF, RF, RF_IF), rd_rn_rm),
19648
19649 cCL("pows", e600100, 3, (RF, RF, RF_IF), rd_rn_rm),
19650 cCL("powsp", e600120, 3, (RF, RF, RF_IF), rd_rn_rm),
19651 cCL("powsm", e600140, 3, (RF, RF, RF_IF), rd_rn_rm),
19652 cCL("powsz", e600160, 3, (RF, RF, RF_IF), rd_rn_rm),
19653 cCL("powd", e600180, 3, (RF, RF, RF_IF), rd_rn_rm),
19654 cCL("powdp", e6001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19655 cCL("powdm", e6001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19656 cCL("powdz", e6001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19657 cCL("powe", e680100, 3, (RF, RF, RF_IF), rd_rn_rm),
19658 cCL("powep", e680120, 3, (RF, RF, RF_IF), rd_rn_rm),
19659 cCL("powem", e680140, 3, (RF, RF, RF_IF), rd_rn_rm),
19660 cCL("powez", e680160, 3, (RF, RF, RF_IF), rd_rn_rm),
19661
19662 cCL("rpws", e700100, 3, (RF, RF, RF_IF), rd_rn_rm),
19663 cCL("rpwsp", e700120, 3, (RF, RF, RF_IF), rd_rn_rm),
19664 cCL("rpwsm", e700140, 3, (RF, RF, RF_IF), rd_rn_rm),
19665 cCL("rpwsz", e700160, 3, (RF, RF, RF_IF), rd_rn_rm),
19666 cCL("rpwd", e700180, 3, (RF, RF, RF_IF), rd_rn_rm),
19667 cCL("rpwdp", e7001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19668 cCL("rpwdm", e7001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19669 cCL("rpwdz", e7001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19670 cCL("rpwe", e780100, 3, (RF, RF, RF_IF), rd_rn_rm),
19671 cCL("rpwep", e780120, 3, (RF, RF, RF_IF), rd_rn_rm),
19672 cCL("rpwem", e780140, 3, (RF, RF, RF_IF), rd_rn_rm),
19673 cCL("rpwez", e780160, 3, (RF, RF, RF_IF), rd_rn_rm),
19674
19675 cCL("rmfs", e800100, 3, (RF, RF, RF_IF), rd_rn_rm),
19676 cCL("rmfsp", e800120, 3, (RF, RF, RF_IF), rd_rn_rm),
19677 cCL("rmfsm", e800140, 3, (RF, RF, RF_IF), rd_rn_rm),
19678 cCL("rmfsz", e800160, 3, (RF, RF, RF_IF), rd_rn_rm),
19679 cCL("rmfd", e800180, 3, (RF, RF, RF_IF), rd_rn_rm),
19680 cCL("rmfdp", e8001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19681 cCL("rmfdm", e8001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19682 cCL("rmfdz", e8001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19683 cCL("rmfe", e880100, 3, (RF, RF, RF_IF), rd_rn_rm),
19684 cCL("rmfep", e880120, 3, (RF, RF, RF_IF), rd_rn_rm),
19685 cCL("rmfem", e880140, 3, (RF, RF, RF_IF), rd_rn_rm),
19686 cCL("rmfez", e880160, 3, (RF, RF, RF_IF), rd_rn_rm),
19687
19688 cCL("fmls", e900100, 3, (RF, RF, RF_IF), rd_rn_rm),
19689 cCL("fmlsp", e900120, 3, (RF, RF, RF_IF), rd_rn_rm),
19690 cCL("fmlsm", e900140, 3, (RF, RF, RF_IF), rd_rn_rm),
19691 cCL("fmlsz", e900160, 3, (RF, RF, RF_IF), rd_rn_rm),
19692 cCL("fmld", e900180, 3, (RF, RF, RF_IF), rd_rn_rm),
19693 cCL("fmldp", e9001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19694 cCL("fmldm", e9001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19695 cCL("fmldz", e9001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19696 cCL("fmle", e980100, 3, (RF, RF, RF_IF), rd_rn_rm),
19697 cCL("fmlep", e980120, 3, (RF, RF, RF_IF), rd_rn_rm),
19698 cCL("fmlem", e980140, 3, (RF, RF, RF_IF), rd_rn_rm),
19699 cCL("fmlez", e980160, 3, (RF, RF, RF_IF), rd_rn_rm),
19700
19701 cCL("fdvs", ea00100, 3, (RF, RF, RF_IF), rd_rn_rm),
19702 cCL("fdvsp", ea00120, 3, (RF, RF, RF_IF), rd_rn_rm),
19703 cCL("fdvsm", ea00140, 3, (RF, RF, RF_IF), rd_rn_rm),
19704 cCL("fdvsz", ea00160, 3, (RF, RF, RF_IF), rd_rn_rm),
19705 cCL("fdvd", ea00180, 3, (RF, RF, RF_IF), rd_rn_rm),
19706 cCL("fdvdp", ea001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19707 cCL("fdvdm", ea001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19708 cCL("fdvdz", ea001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19709 cCL("fdve", ea80100, 3, (RF, RF, RF_IF), rd_rn_rm),
19710 cCL("fdvep", ea80120, 3, (RF, RF, RF_IF), rd_rn_rm),
19711 cCL("fdvem", ea80140, 3, (RF, RF, RF_IF), rd_rn_rm),
19712 cCL("fdvez", ea80160, 3, (RF, RF, RF_IF), rd_rn_rm),
19713
19714 cCL("frds", eb00100, 3, (RF, RF, RF_IF), rd_rn_rm),
19715 cCL("frdsp", eb00120, 3, (RF, RF, RF_IF), rd_rn_rm),
19716 cCL("frdsm", eb00140, 3, (RF, RF, RF_IF), rd_rn_rm),
19717 cCL("frdsz", eb00160, 3, (RF, RF, RF_IF), rd_rn_rm),
19718 cCL("frdd", eb00180, 3, (RF, RF, RF_IF), rd_rn_rm),
19719 cCL("frddp", eb001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19720 cCL("frddm", eb001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19721 cCL("frddz", eb001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19722 cCL("frde", eb80100, 3, (RF, RF, RF_IF), rd_rn_rm),
19723 cCL("frdep", eb80120, 3, (RF, RF, RF_IF), rd_rn_rm),
19724 cCL("frdem", eb80140, 3, (RF, RF, RF_IF), rd_rn_rm),
19725 cCL("frdez", eb80160, 3, (RF, RF, RF_IF), rd_rn_rm),
19726
19727 cCL("pols", ec00100, 3, (RF, RF, RF_IF), rd_rn_rm),
19728 cCL("polsp", ec00120, 3, (RF, RF, RF_IF), rd_rn_rm),
19729 cCL("polsm", ec00140, 3, (RF, RF, RF_IF), rd_rn_rm),
19730 cCL("polsz", ec00160, 3, (RF, RF, RF_IF), rd_rn_rm),
19731 cCL("pold", ec00180, 3, (RF, RF, RF_IF), rd_rn_rm),
19732 cCL("poldp", ec001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19733 cCL("poldm", ec001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19734 cCL("poldz", ec001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19735 cCL("pole", ec80100, 3, (RF, RF, RF_IF), rd_rn_rm),
19736 cCL("polep", ec80120, 3, (RF, RF, RF_IF), rd_rn_rm),
19737 cCL("polem", ec80140, 3, (RF, RF, RF_IF), rd_rn_rm),
19738 cCL("polez", ec80160, 3, (RF, RF, RF_IF), rd_rn_rm),
19739
19740 cCE("cmf", e90f110, 2, (RF, RF_IF), fpa_cmp),
19741 C3E("cmfe", ed0f110, 2, (RF, RF_IF), fpa_cmp),
19742 cCE("cnf", eb0f110, 2, (RF, RF_IF), fpa_cmp),
19743 C3E("cnfe", ef0f110, 2, (RF, RF_IF), fpa_cmp),
19744
19745 cCL("flts", e000110, 2, (RF, RR), rn_rd),
19746 cCL("fltsp", e000130, 2, (RF, RR), rn_rd),
19747 cCL("fltsm", e000150, 2, (RF, RR), rn_rd),
19748 cCL("fltsz", e000170, 2, (RF, RR), rn_rd),
19749 cCL("fltd", e000190, 2, (RF, RR), rn_rd),
19750 cCL("fltdp", e0001b0, 2, (RF, RR), rn_rd),
19751 cCL("fltdm", e0001d0, 2, (RF, RR), rn_rd),
19752 cCL("fltdz", e0001f0, 2, (RF, RR), rn_rd),
19753 cCL("flte", e080110, 2, (RF, RR), rn_rd),
19754 cCL("fltep", e080130, 2, (RF, RR), rn_rd),
19755 cCL("fltem", e080150, 2, (RF, RR), rn_rd),
19756 cCL("fltez", e080170, 2, (RF, RR), rn_rd),
19757
19758 /* The implementation of the FIX instruction is broken on some
19759 assemblers, in that it accepts a precision specifier as well as a
19760 rounding specifier, despite the fact that this is meaningless.
19761 To be more compatible, we accept it as well, though of course it
19762 does not set any bits. */
19763 cCE("fix", e100110, 2, (RR, RF), rd_rm),
19764 cCL("fixp", e100130, 2, (RR, RF), rd_rm),
19765 cCL("fixm", e100150, 2, (RR, RF), rd_rm),
19766 cCL("fixz", e100170, 2, (RR, RF), rd_rm),
19767 cCL("fixsp", e100130, 2, (RR, RF), rd_rm),
19768 cCL("fixsm", e100150, 2, (RR, RF), rd_rm),
19769 cCL("fixsz", e100170, 2, (RR, RF), rd_rm),
19770 cCL("fixdp", e100130, 2, (RR, RF), rd_rm),
19771 cCL("fixdm", e100150, 2, (RR, RF), rd_rm),
19772 cCL("fixdz", e100170, 2, (RR, RF), rd_rm),
19773 cCL("fixep", e100130, 2, (RR, RF), rd_rm),
19774 cCL("fixem", e100150, 2, (RR, RF), rd_rm),
19775 cCL("fixez", e100170, 2, (RR, RF), rd_rm),
19776
19777 /* Instructions that were new with the real FPA, call them V2. */
19778 #undef ARM_VARIANT
19779 #define ARM_VARIANT & fpu_fpa_ext_v2
19780
19781 cCE("lfm", c100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
19782 cCL("lfmfd", c900200, 3, (RF, I4b, ADDR), fpa_ldmstm),
19783 cCL("lfmea", d100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
19784 cCE("sfm", c000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
19785 cCL("sfmfd", d000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
19786 cCL("sfmea", c800200, 3, (RF, I4b, ADDR), fpa_ldmstm),
19787
19788 #undef ARM_VARIANT
19789 #define ARM_VARIANT & fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
19790
19791 /* Moves and type conversions. */
19792 cCE("fcpys", eb00a40, 2, (RVS, RVS), vfp_sp_monadic),
19793 cCE("fmrs", e100a10, 2, (RR, RVS), vfp_reg_from_sp),
19794 cCE("fmsr", e000a10, 2, (RVS, RR), vfp_sp_from_reg),
19795 cCE("fmstat", ef1fa10, 0, (), noargs),
19796 cCE("vmrs", ef00a10, 2, (APSR_RR, RVC), vmrs),
19797 cCE("vmsr", ee00a10, 2, (RVC, RR), vmsr),
19798 cCE("fsitos", eb80ac0, 2, (RVS, RVS), vfp_sp_monadic),
19799 cCE("fuitos", eb80a40, 2, (RVS, RVS), vfp_sp_monadic),
19800 cCE("ftosis", ebd0a40, 2, (RVS, RVS), vfp_sp_monadic),
19801 cCE("ftosizs", ebd0ac0, 2, (RVS, RVS), vfp_sp_monadic),
19802 cCE("ftouis", ebc0a40, 2, (RVS, RVS), vfp_sp_monadic),
19803 cCE("ftouizs", ebc0ac0, 2, (RVS, RVS), vfp_sp_monadic),
19804 cCE("fmrx", ef00a10, 2, (RR, RVC), rd_rn),
19805 cCE("fmxr", ee00a10, 2, (RVC, RR), rn_rd),
19806
19807 /* Memory operations. */
19808 cCE("flds", d100a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
19809 cCE("fsts", d000a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
19810 cCE("fldmias", c900a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
19811 cCE("fldmfds", c900a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
19812 cCE("fldmdbs", d300a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
19813 cCE("fldmeas", d300a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
19814 cCE("fldmiax", c900b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
19815 cCE("fldmfdx", c900b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
19816 cCE("fldmdbx", d300b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
19817 cCE("fldmeax", d300b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
19818 cCE("fstmias", c800a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
19819 cCE("fstmeas", c800a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
19820 cCE("fstmdbs", d200a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
19821 cCE("fstmfds", d200a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
19822 cCE("fstmiax", c800b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
19823 cCE("fstmeax", c800b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
19824 cCE("fstmdbx", d200b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
19825 cCE("fstmfdx", d200b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
19826
19827 /* Monadic operations. */
19828 cCE("fabss", eb00ac0, 2, (RVS, RVS), vfp_sp_monadic),
19829 cCE("fnegs", eb10a40, 2, (RVS, RVS), vfp_sp_monadic),
19830 cCE("fsqrts", eb10ac0, 2, (RVS, RVS), vfp_sp_monadic),
19831
19832 /* Dyadic operations. */
19833 cCE("fadds", e300a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19834 cCE("fsubs", e300a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19835 cCE("fmuls", e200a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19836 cCE("fdivs", e800a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19837 cCE("fmacs", e000a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19838 cCE("fmscs", e100a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19839 cCE("fnmuls", e200a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19840 cCE("fnmacs", e000a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19841 cCE("fnmscs", e100a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19842
19843 /* Comparisons. */
19844 cCE("fcmps", eb40a40, 2, (RVS, RVS), vfp_sp_monadic),
19845 cCE("fcmpzs", eb50a40, 1, (RVS), vfp_sp_compare_z),
19846 cCE("fcmpes", eb40ac0, 2, (RVS, RVS), vfp_sp_monadic),
19847 cCE("fcmpezs", eb50ac0, 1, (RVS), vfp_sp_compare_z),
19848
19849 /* Double precision load/store are still present on single precision
19850 implementations. */
19851 cCE("fldd", d100b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
19852 cCE("fstd", d000b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
19853 cCE("fldmiad", c900b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
19854 cCE("fldmfdd", c900b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
19855 cCE("fldmdbd", d300b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
19856 cCE("fldmead", d300b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
19857 cCE("fstmiad", c800b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
19858 cCE("fstmead", c800b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
19859 cCE("fstmdbd", d200b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
19860 cCE("fstmfdd", d200b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
19861
19862 #undef ARM_VARIANT
19863 #define ARM_VARIANT & fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
19864
19865 /* Moves and type conversions. */
19866 cCE("fcpyd", eb00b40, 2, (RVD, RVD), vfp_dp_rd_rm),
19867 cCE("fcvtds", eb70ac0, 2, (RVD, RVS), vfp_dp_sp_cvt),
19868 cCE("fcvtsd", eb70bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
19869 cCE("fmdhr", e200b10, 2, (RVD, RR), vfp_dp_rn_rd),
19870 cCE("fmdlr", e000b10, 2, (RVD, RR), vfp_dp_rn_rd),
19871 cCE("fmrdh", e300b10, 2, (RR, RVD), vfp_dp_rd_rn),
19872 cCE("fmrdl", e100b10, 2, (RR, RVD), vfp_dp_rd_rn),
19873 cCE("fsitod", eb80bc0, 2, (RVD, RVS), vfp_dp_sp_cvt),
19874 cCE("fuitod", eb80b40, 2, (RVD, RVS), vfp_dp_sp_cvt),
19875 cCE("ftosid", ebd0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
19876 cCE("ftosizd", ebd0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
19877 cCE("ftouid", ebc0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
19878 cCE("ftouizd", ebc0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
19879
19880 /* Monadic operations. */
19881 cCE("fabsd", eb00bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
19882 cCE("fnegd", eb10b40, 2, (RVD, RVD), vfp_dp_rd_rm),
19883 cCE("fsqrtd", eb10bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
19884
19885 /* Dyadic operations. */
19886 cCE("faddd", e300b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19887 cCE("fsubd", e300b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19888 cCE("fmuld", e200b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19889 cCE("fdivd", e800b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19890 cCE("fmacd", e000b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19891 cCE("fmscd", e100b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19892 cCE("fnmuld", e200b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19893 cCE("fnmacd", e000b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19894 cCE("fnmscd", e100b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19895
19896 /* Comparisons. */
19897 cCE("fcmpd", eb40b40, 2, (RVD, RVD), vfp_dp_rd_rm),
19898 cCE("fcmpzd", eb50b40, 1, (RVD), vfp_dp_rd),
19899 cCE("fcmped", eb40bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
19900 cCE("fcmpezd", eb50bc0, 1, (RVD), vfp_dp_rd),
19901
19902 #undef ARM_VARIANT
19903 #define ARM_VARIANT & fpu_vfp_ext_v2
19904
19905 cCE("fmsrr", c400a10, 3, (VRSLST, RR, RR), vfp_sp2_from_reg2),
19906 cCE("fmrrs", c500a10, 3, (RR, RR, VRSLST), vfp_reg2_from_sp2),
19907 cCE("fmdrr", c400b10, 3, (RVD, RR, RR), vfp_dp_rm_rd_rn),
19908 cCE("fmrrd", c500b10, 3, (RR, RR, RVD), vfp_dp_rd_rn_rm),
19909
19910 /* Instructions which may belong to either the Neon or VFP instruction sets.
19911 Individual encoder functions perform additional architecture checks. */
19912 #undef ARM_VARIANT
19913 #define ARM_VARIANT & fpu_vfp_ext_v1xd
19914 #undef THUMB_VARIANT
19915 #define THUMB_VARIANT & fpu_vfp_ext_v1xd
19916
19917 /* These mnemonics are unique to VFP. */
19918 NCE(vsqrt, 0, 2, (RVSD, RVSD), vfp_nsyn_sqrt),
19919 NCE(vdiv, 0, 3, (RVSD, RVSD, RVSD), vfp_nsyn_div),
19920 nCE(vnmul, _vnmul, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
19921 nCE(vnmla, _vnmla, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
19922 nCE(vnmls, _vnmls, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
19923 nCE(vcmp, _vcmp, 2, (RVSD, RSVD_FI0), vfp_nsyn_cmp),
19924 nCE(vcmpe, _vcmpe, 2, (RVSD, RSVD_FI0), vfp_nsyn_cmp),
19925 NCE(vpush, 0, 1, (VRSDLST), vfp_nsyn_push),
19926 NCE(vpop, 0, 1, (VRSDLST), vfp_nsyn_pop),
19927 NCE(vcvtz, 0, 2, (RVSD, RVSD), vfp_nsyn_cvtz),
19928
19929 /* Mnemonics shared by Neon and VFP. */
19930 nCEF(vmul, _vmul, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mul),
19931 nCEF(vmla, _vmla, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
19932 nCEF(vmls, _vmls, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
19933
19934 nCEF(vadd, _vadd, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
19935 nCEF(vsub, _vsub, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
19936
19937 NCEF(vabs, 1b10300, 2, (RNSDQ, RNSDQ), neon_abs_neg),
19938 NCEF(vneg, 1b10380, 2, (RNSDQ, RNSDQ), neon_abs_neg),
19939
19940 NCE(vldm, c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
19941 NCE(vldmia, c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
19942 NCE(vldmdb, d100b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
19943 NCE(vstm, c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
19944 NCE(vstmia, c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
19945 NCE(vstmdb, d000b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
19946 NCE(vldr, d100b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
19947 NCE(vstr, d000b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
19948
19949 nCEF(vcvt, _vcvt, 3, (RNSDQ, RNSDQ, oI32z), neon_cvt),
19950 nCEF(vcvtr, _vcvt, 2, (RNSDQ, RNSDQ), neon_cvtr),
19951 NCEF(vcvtb, eb20a40, 2, (RVSD, RVSD), neon_cvtb),
19952 NCEF(vcvtt, eb20a40, 2, (RVSD, RVSD), neon_cvtt),
19953
19954
19955 /* NOTE: All VMOV encoding is special-cased! */
19956 NCE(vmov, 0, 1, (VMOV), neon_mov),
19957 NCE(vmovq, 0, 1, (VMOV), neon_mov),
19958
19959 #undef THUMB_VARIANT
19960 #define THUMB_VARIANT & fpu_neon_ext_v1
19961 #undef ARM_VARIANT
19962 #define ARM_VARIANT & fpu_neon_ext_v1
19963
19964 /* Data processing with three registers of the same length. */
19965 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
19966 NUF(vaba, 0000710, 3, (RNDQ, RNDQ, RNDQ), neon_dyadic_i_su),
19967 NUF(vabaq, 0000710, 3, (RNQ, RNQ, RNQ), neon_dyadic_i_su),
19968 NUF(vhadd, 0000000, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
19969 NUF(vhaddq, 0000000, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
19970 NUF(vrhadd, 0000100, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
19971 NUF(vrhaddq, 0000100, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
19972 NUF(vhsub, 0000200, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
19973 NUF(vhsubq, 0000200, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
19974 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
19975 NUF(vqadd, 0000010, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
19976 NUF(vqaddq, 0000010, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
19977 NUF(vqsub, 0000210, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
19978 NUF(vqsubq, 0000210, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
19979 NUF(vrshl, 0000500, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
19980 NUF(vrshlq, 0000500, 3, (RNQ, oRNQ, RNQ), neon_rshl),
19981 NUF(vqrshl, 0000510, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
19982 NUF(vqrshlq, 0000510, 3, (RNQ, oRNQ, RNQ), neon_rshl),
19983 /* If not immediate, fall back to neon_dyadic_i64_su.
19984 shl_imm should accept I8 I16 I32 I64,
19985 qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64. */
19986 nUF(vshl, _vshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_shl_imm),
19987 nUF(vshlq, _vshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_shl_imm),
19988 nUF(vqshl, _vqshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_qshl_imm),
19989 nUF(vqshlq, _vqshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_qshl_imm),
19990 /* Logic ops, types optional & ignored. */
19991 nUF(vand, _vand, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
19992 nUF(vandq, _vand, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
19993 nUF(vbic, _vbic, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
19994 nUF(vbicq, _vbic, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
19995 nUF(vorr, _vorr, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
19996 nUF(vorrq, _vorr, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
19997 nUF(vorn, _vorn, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
19998 nUF(vornq, _vorn, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
19999 nUF(veor, _veor, 3, (RNDQ, oRNDQ, RNDQ), neon_logic),
20000 nUF(veorq, _veor, 3, (RNQ, oRNQ, RNQ), neon_logic),
20001 /* Bitfield ops, untyped. */
20002 NUF(vbsl, 1100110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
20003 NUF(vbslq, 1100110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
20004 NUF(vbit, 1200110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
20005 NUF(vbitq, 1200110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
20006 NUF(vbif, 1300110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
20007 NUF(vbifq, 1300110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
20008 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F32. */
20009 nUF(vabd, _vabd, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
20010 nUF(vabdq, _vabd, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
20011 nUF(vmax, _vmax, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
20012 nUF(vmaxq, _vmax, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
20013 nUF(vmin, _vmin, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
20014 nUF(vminq, _vmin, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
20015 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
20016 back to neon_dyadic_if_su. */
20017 nUF(vcge, _vcge, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
20018 nUF(vcgeq, _vcge, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
20019 nUF(vcgt, _vcgt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
20020 nUF(vcgtq, _vcgt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
20021 nUF(vclt, _vclt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
20022 nUF(vcltq, _vclt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
20023 nUF(vcle, _vcle, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
20024 nUF(vcleq, _vcle, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
20025 /* Comparison. Type I8 I16 I32 F32. */
20026 nUF(vceq, _vceq, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_ceq),
20027 nUF(vceqq, _vceq, 3, (RNQ, oRNQ, RNDQ_I0), neon_ceq),
20028 /* As above, D registers only. */
20029 nUF(vpmax, _vpmax, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
20030 nUF(vpmin, _vpmin, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
20031 /* Int and float variants, signedness unimportant. */
20032 nUF(vmlaq, _vmla, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
20033 nUF(vmlsq, _vmls, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
20034 nUF(vpadd, _vpadd, 3, (RND, oRND, RND), neon_dyadic_if_i_d),
20035 /* Add/sub take types I8 I16 I32 I64 F32. */
20036 nUF(vaddq, _vadd, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
20037 nUF(vsubq, _vsub, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
20038 /* vtst takes sizes 8, 16, 32. */
20039 NUF(vtst, 0000810, 3, (RNDQ, oRNDQ, RNDQ), neon_tst),
20040 NUF(vtstq, 0000810, 3, (RNQ, oRNQ, RNQ), neon_tst),
20041 /* VMUL takes I8 I16 I32 F32 P8. */
20042 nUF(vmulq, _vmul, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mul),
20043 /* VQD{R}MULH takes S16 S32. */
20044 nUF(vqdmulh, _vqdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
20045 nUF(vqdmulhq, _vqdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
20046 nUF(vqrdmulh, _vqrdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
20047 nUF(vqrdmulhq, _vqrdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
20048 NUF(vacge, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
20049 NUF(vacgeq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
20050 NUF(vacgt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
20051 NUF(vacgtq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
20052 NUF(vaclt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
20053 NUF(vacltq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
20054 NUF(vacle, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
20055 NUF(vacleq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
20056 NUF(vrecps, 0000f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
20057 NUF(vrecpsq, 0000f10, 3, (RNQ, oRNQ, RNQ), neon_step),
20058 NUF(vrsqrts, 0200f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
20059 NUF(vrsqrtsq, 0200f10, 3, (RNQ, oRNQ, RNQ), neon_step),
20060 /* ARM v8.1 extension. */
20061 nUF(vqrdmlah, _vqrdmlah, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
20062 nUF(vqrdmlahq, _vqrdmlah, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
20063 nUF(vqrdmlsh, _vqrdmlsh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
20064 nUF(vqrdmlshq, _vqrdmlsh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
20065
20066 /* Two address, int/float. Types S8 S16 S32 F32. */
20067 NUF(vabsq, 1b10300, 2, (RNQ, RNQ), neon_abs_neg),
20068 NUF(vnegq, 1b10380, 2, (RNQ, RNQ), neon_abs_neg),
20069
20070 /* Data processing with two registers and a shift amount. */
20071 /* Right shifts, and variants with rounding.
20072 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
20073 NUF(vshr, 0800010, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
20074 NUF(vshrq, 0800010, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
20075 NUF(vrshr, 0800210, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
20076 NUF(vrshrq, 0800210, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
20077 NUF(vsra, 0800110, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
20078 NUF(vsraq, 0800110, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
20079 NUF(vrsra, 0800310, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
20080 NUF(vrsraq, 0800310, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
20081 /* Shift and insert. Sizes accepted 8 16 32 64. */
20082 NUF(vsli, 1800510, 3, (RNDQ, oRNDQ, I63), neon_sli),
20083 NUF(vsliq, 1800510, 3, (RNQ, oRNQ, I63), neon_sli),
20084 NUF(vsri, 1800410, 3, (RNDQ, oRNDQ, I64), neon_sri),
20085 NUF(vsriq, 1800410, 3, (RNQ, oRNQ, I64), neon_sri),
20086 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
20087 NUF(vqshlu, 1800610, 3, (RNDQ, oRNDQ, I63), neon_qshlu_imm),
20088 NUF(vqshluq, 1800610, 3, (RNQ, oRNQ, I63), neon_qshlu_imm),
20089 /* Right shift immediate, saturating & narrowing, with rounding variants.
20090 Types accepted S16 S32 S64 U16 U32 U64. */
20091 NUF(vqshrn, 0800910, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
20092 NUF(vqrshrn, 0800950, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
20093 /* As above, unsigned. Types accepted S16 S32 S64. */
20094 NUF(vqshrun, 0800810, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
20095 NUF(vqrshrun, 0800850, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
20096 /* Right shift narrowing. Types accepted I16 I32 I64. */
20097 NUF(vshrn, 0800810, 3, (RND, RNQ, I32z), neon_rshift_narrow),
20098 NUF(vrshrn, 0800850, 3, (RND, RNQ, I32z), neon_rshift_narrow),
20099 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
20100 nUF(vshll, _vshll, 3, (RNQ, RND, I32), neon_shll),
20101 /* CVT with optional immediate for fixed-point variant. */
20102 nUF(vcvtq, _vcvt, 3, (RNQ, RNQ, oI32b), neon_cvt),
20103
20104 nUF(vmvn, _vmvn, 2, (RNDQ, RNDQ_Ibig), neon_mvn),
20105 nUF(vmvnq, _vmvn, 2, (RNQ, RNDQ_Ibig), neon_mvn),
20106
20107 /* Data processing, three registers of different lengths. */
20108 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
20109 NUF(vabal, 0800500, 3, (RNQ, RND, RND), neon_abal),
20110 NUF(vabdl, 0800700, 3, (RNQ, RND, RND), neon_dyadic_long),
20111 NUF(vaddl, 0800000, 3, (RNQ, RND, RND), neon_dyadic_long),
20112 NUF(vsubl, 0800200, 3, (RNQ, RND, RND), neon_dyadic_long),
20113 /* If not scalar, fall back to neon_dyadic_long.
20114 Vector types as above, scalar types S16 S32 U16 U32. */
20115 nUF(vmlal, _vmlal, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
20116 nUF(vmlsl, _vmlsl, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
20117 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
20118 NUF(vaddw, 0800100, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
20119 NUF(vsubw, 0800300, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
20120 /* Dyadic, narrowing insns. Types I16 I32 I64. */
20121 NUF(vaddhn, 0800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
20122 NUF(vraddhn, 1800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
20123 NUF(vsubhn, 0800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
20124 NUF(vrsubhn, 1800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
20125 /* Saturating doubling multiplies. Types S16 S32. */
20126 nUF(vqdmlal, _vqdmlal, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
20127 nUF(vqdmlsl, _vqdmlsl, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
20128 nUF(vqdmull, _vqdmull, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
20129 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
20130 S16 S32 U16 U32. */
20131 nUF(vmull, _vmull, 3, (RNQ, RND, RND_RNSC), neon_vmull),
20132
20133 /* Extract. Size 8. */
20134 NUF(vext, 0b00000, 4, (RNDQ, oRNDQ, RNDQ, I15), neon_ext),
20135 NUF(vextq, 0b00000, 4, (RNQ, oRNQ, RNQ, I15), neon_ext),
20136
20137 /* Two registers, miscellaneous. */
20138 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
20139 NUF(vrev64, 1b00000, 2, (RNDQ, RNDQ), neon_rev),
20140 NUF(vrev64q, 1b00000, 2, (RNQ, RNQ), neon_rev),
20141 NUF(vrev32, 1b00080, 2, (RNDQ, RNDQ), neon_rev),
20142 NUF(vrev32q, 1b00080, 2, (RNQ, RNQ), neon_rev),
20143 NUF(vrev16, 1b00100, 2, (RNDQ, RNDQ), neon_rev),
20144 NUF(vrev16q, 1b00100, 2, (RNQ, RNQ), neon_rev),
20145 /* Vector replicate. Sizes 8 16 32. */
20146 nCE(vdup, _vdup, 2, (RNDQ, RR_RNSC), neon_dup),
20147 nCE(vdupq, _vdup, 2, (RNQ, RR_RNSC), neon_dup),
20148 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
20149 NUF(vmovl, 0800a10, 2, (RNQ, RND), neon_movl),
20150 /* VMOVN. Types I16 I32 I64. */
20151 nUF(vmovn, _vmovn, 2, (RND, RNQ), neon_movn),
20152 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
20153 nUF(vqmovn, _vqmovn, 2, (RND, RNQ), neon_qmovn),
20154 /* VQMOVUN. Types S16 S32 S64. */
20155 nUF(vqmovun, _vqmovun, 2, (RND, RNQ), neon_qmovun),
20156 /* VZIP / VUZP. Sizes 8 16 32. */
20157 NUF(vzip, 1b20180, 2, (RNDQ, RNDQ), neon_zip_uzp),
20158 NUF(vzipq, 1b20180, 2, (RNQ, RNQ), neon_zip_uzp),
20159 NUF(vuzp, 1b20100, 2, (RNDQ, RNDQ), neon_zip_uzp),
20160 NUF(vuzpq, 1b20100, 2, (RNQ, RNQ), neon_zip_uzp),
20161 /* VQABS / VQNEG. Types S8 S16 S32. */
20162 NUF(vqabs, 1b00700, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
20163 NUF(vqabsq, 1b00700, 2, (RNQ, RNQ), neon_sat_abs_neg),
20164 NUF(vqneg, 1b00780, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
20165 NUF(vqnegq, 1b00780, 2, (RNQ, RNQ), neon_sat_abs_neg),
20166 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
20167 NUF(vpadal, 1b00600, 2, (RNDQ, RNDQ), neon_pair_long),
20168 NUF(vpadalq, 1b00600, 2, (RNQ, RNQ), neon_pair_long),
20169 NUF(vpaddl, 1b00200, 2, (RNDQ, RNDQ), neon_pair_long),
20170 NUF(vpaddlq, 1b00200, 2, (RNQ, RNQ), neon_pair_long),
20171 /* Reciprocal estimates. Types U32 F32. */
20172 NUF(vrecpe, 1b30400, 2, (RNDQ, RNDQ), neon_recip_est),
20173 NUF(vrecpeq, 1b30400, 2, (RNQ, RNQ), neon_recip_est),
20174 NUF(vrsqrte, 1b30480, 2, (RNDQ, RNDQ), neon_recip_est),
20175 NUF(vrsqrteq, 1b30480, 2, (RNQ, RNQ), neon_recip_est),
20176 /* VCLS. Types S8 S16 S32. */
20177 NUF(vcls, 1b00400, 2, (RNDQ, RNDQ), neon_cls),
20178 NUF(vclsq, 1b00400, 2, (RNQ, RNQ), neon_cls),
20179 /* VCLZ. Types I8 I16 I32. */
20180 NUF(vclz, 1b00480, 2, (RNDQ, RNDQ), neon_clz),
20181 NUF(vclzq, 1b00480, 2, (RNQ, RNQ), neon_clz),
20182 /* VCNT. Size 8. */
20183 NUF(vcnt, 1b00500, 2, (RNDQ, RNDQ), neon_cnt),
20184 NUF(vcntq, 1b00500, 2, (RNQ, RNQ), neon_cnt),
20185 /* Two address, untyped. */
20186 NUF(vswp, 1b20000, 2, (RNDQ, RNDQ), neon_swp),
20187 NUF(vswpq, 1b20000, 2, (RNQ, RNQ), neon_swp),
20188 /* VTRN. Sizes 8 16 32. */
20189 nUF(vtrn, _vtrn, 2, (RNDQ, RNDQ), neon_trn),
20190 nUF(vtrnq, _vtrn, 2, (RNQ, RNQ), neon_trn),
20191
20192 /* Table lookup. Size 8. */
20193 NUF(vtbl, 1b00800, 3, (RND, NRDLST, RND), neon_tbl_tbx),
20194 NUF(vtbx, 1b00840, 3, (RND, NRDLST, RND), neon_tbl_tbx),
20195
20196 #undef THUMB_VARIANT
20197 #define THUMB_VARIANT & fpu_vfp_v3_or_neon_ext
20198 #undef ARM_VARIANT
20199 #define ARM_VARIANT & fpu_vfp_v3_or_neon_ext
20200
20201 /* Neon element/structure load/store. */
20202 nUF(vld1, _vld1, 2, (NSTRLST, ADDR), neon_ldx_stx),
20203 nUF(vst1, _vst1, 2, (NSTRLST, ADDR), neon_ldx_stx),
20204 nUF(vld2, _vld2, 2, (NSTRLST, ADDR), neon_ldx_stx),
20205 nUF(vst2, _vst2, 2, (NSTRLST, ADDR), neon_ldx_stx),
20206 nUF(vld3, _vld3, 2, (NSTRLST, ADDR), neon_ldx_stx),
20207 nUF(vst3, _vst3, 2, (NSTRLST, ADDR), neon_ldx_stx),
20208 nUF(vld4, _vld4, 2, (NSTRLST, ADDR), neon_ldx_stx),
20209 nUF(vst4, _vst4, 2, (NSTRLST, ADDR), neon_ldx_stx),
20210
20211 #undef THUMB_VARIANT
20212 #define THUMB_VARIANT & fpu_vfp_ext_v3xd
20213 #undef ARM_VARIANT
20214 #define ARM_VARIANT & fpu_vfp_ext_v3xd
20215 cCE("fconsts", eb00a00, 2, (RVS, I255), vfp_sp_const),
20216 cCE("fshtos", eba0a40, 2, (RVS, I16z), vfp_sp_conv_16),
20217 cCE("fsltos", eba0ac0, 2, (RVS, I32), vfp_sp_conv_32),
20218 cCE("fuhtos", ebb0a40, 2, (RVS, I16z), vfp_sp_conv_16),
20219 cCE("fultos", ebb0ac0, 2, (RVS, I32), vfp_sp_conv_32),
20220 cCE("ftoshs", ebe0a40, 2, (RVS, I16z), vfp_sp_conv_16),
20221 cCE("ftosls", ebe0ac0, 2, (RVS, I32), vfp_sp_conv_32),
20222 cCE("ftouhs", ebf0a40, 2, (RVS, I16z), vfp_sp_conv_16),
20223 cCE("ftouls", ebf0ac0, 2, (RVS, I32), vfp_sp_conv_32),
20224
20225 #undef THUMB_VARIANT
20226 #define THUMB_VARIANT & fpu_vfp_ext_v3
20227 #undef ARM_VARIANT
20228 #define ARM_VARIANT & fpu_vfp_ext_v3
20229
20230 cCE("fconstd", eb00b00, 2, (RVD, I255), vfp_dp_const),
20231 cCE("fshtod", eba0b40, 2, (RVD, I16z), vfp_dp_conv_16),
20232 cCE("fsltod", eba0bc0, 2, (RVD, I32), vfp_dp_conv_32),
20233 cCE("fuhtod", ebb0b40, 2, (RVD, I16z), vfp_dp_conv_16),
20234 cCE("fultod", ebb0bc0, 2, (RVD, I32), vfp_dp_conv_32),
20235 cCE("ftoshd", ebe0b40, 2, (RVD, I16z), vfp_dp_conv_16),
20236 cCE("ftosld", ebe0bc0, 2, (RVD, I32), vfp_dp_conv_32),
20237 cCE("ftouhd", ebf0b40, 2, (RVD, I16z), vfp_dp_conv_16),
20238 cCE("ftould", ebf0bc0, 2, (RVD, I32), vfp_dp_conv_32),
20239
20240 #undef ARM_VARIANT
20241 #define ARM_VARIANT & fpu_vfp_ext_fma
20242 #undef THUMB_VARIANT
20243 #define THUMB_VARIANT & fpu_vfp_ext_fma
20244 /* Mnemonics shared by Neon and VFP. These are included in the
20245 VFP FMA variant; NEON and VFP FMA always includes the NEON
20246 FMA instructions. */
20247 nCEF(vfma, _vfma, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_fmac),
20248 nCEF(vfms, _vfms, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_fmac),
20249 /* ffmas/ffmad/ffmss/ffmsd are dummy mnemonics to satisfy gas;
20250 the v form should always be used. */
20251 cCE("ffmas", ea00a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20252 cCE("ffnmas", ea00a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20253 cCE("ffmad", ea00b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20254 cCE("ffnmad", ea00b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20255 nCE(vfnma, _vfnma, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
20256 nCE(vfnms, _vfnms, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
20257
20258 #undef THUMB_VARIANT
20259 #undef ARM_VARIANT
20260 #define ARM_VARIANT & arm_cext_xscale /* Intel XScale extensions. */
20261
20262 cCE("mia", e200010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
20263 cCE("miaph", e280010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
20264 cCE("miabb", e2c0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
20265 cCE("miabt", e2d0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
20266 cCE("miatb", e2e0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
20267 cCE("miatt", e2f0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
20268 cCE("mar", c400000, 3, (RXA, RRnpc, RRnpc), xsc_mar),
20269 cCE("mra", c500000, 3, (RRnpc, RRnpc, RXA), xsc_mra),
20270
20271 #undef ARM_VARIANT
20272 #define ARM_VARIANT & arm_cext_iwmmxt /* Intel Wireless MMX technology. */
20273
20274 cCE("tandcb", e13f130, 1, (RR), iwmmxt_tandorc),
20275 cCE("tandch", e53f130, 1, (RR), iwmmxt_tandorc),
20276 cCE("tandcw", e93f130, 1, (RR), iwmmxt_tandorc),
20277 cCE("tbcstb", e400010, 2, (RIWR, RR), rn_rd),
20278 cCE("tbcsth", e400050, 2, (RIWR, RR), rn_rd),
20279 cCE("tbcstw", e400090, 2, (RIWR, RR), rn_rd),
20280 cCE("textrcb", e130170, 2, (RR, I7), iwmmxt_textrc),
20281 cCE("textrch", e530170, 2, (RR, I7), iwmmxt_textrc),
20282 cCE("textrcw", e930170, 2, (RR, I7), iwmmxt_textrc),
20283 cCE("textrmub",e100070, 3, (RR, RIWR, I7), iwmmxt_textrm),
20284 cCE("textrmuh",e500070, 3, (RR, RIWR, I7), iwmmxt_textrm),
20285 cCE("textrmuw",e900070, 3, (RR, RIWR, I7), iwmmxt_textrm),
20286 cCE("textrmsb",e100078, 3, (RR, RIWR, I7), iwmmxt_textrm),
20287 cCE("textrmsh",e500078, 3, (RR, RIWR, I7), iwmmxt_textrm),
20288 cCE("textrmsw",e900078, 3, (RR, RIWR, I7), iwmmxt_textrm),
20289 cCE("tinsrb", e600010, 3, (RIWR, RR, I7), iwmmxt_tinsr),
20290 cCE("tinsrh", e600050, 3, (RIWR, RR, I7), iwmmxt_tinsr),
20291 cCE("tinsrw", e600090, 3, (RIWR, RR, I7), iwmmxt_tinsr),
20292 cCE("tmcr", e000110, 2, (RIWC_RIWG, RR), rn_rd),
20293 cCE("tmcrr", c400000, 3, (RIWR, RR, RR), rm_rd_rn),
20294 cCE("tmia", e200010, 3, (RIWR, RR, RR), iwmmxt_tmia),
20295 cCE("tmiaph", e280010, 3, (RIWR, RR, RR), iwmmxt_tmia),
20296 cCE("tmiabb", e2c0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
20297 cCE("tmiabt", e2d0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
20298 cCE("tmiatb", e2e0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
20299 cCE("tmiatt", e2f0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
20300 cCE("tmovmskb",e100030, 2, (RR, RIWR), rd_rn),
20301 cCE("tmovmskh",e500030, 2, (RR, RIWR), rd_rn),
20302 cCE("tmovmskw",e900030, 2, (RR, RIWR), rd_rn),
20303 cCE("tmrc", e100110, 2, (RR, RIWC_RIWG), rd_rn),
20304 cCE("tmrrc", c500000, 3, (RR, RR, RIWR), rd_rn_rm),
20305 cCE("torcb", e13f150, 1, (RR), iwmmxt_tandorc),
20306 cCE("torch", e53f150, 1, (RR), iwmmxt_tandorc),
20307 cCE("torcw", e93f150, 1, (RR), iwmmxt_tandorc),
20308 cCE("waccb", e0001c0, 2, (RIWR, RIWR), rd_rn),
20309 cCE("wacch", e4001c0, 2, (RIWR, RIWR), rd_rn),
20310 cCE("waccw", e8001c0, 2, (RIWR, RIWR), rd_rn),
20311 cCE("waddbss", e300180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20312 cCE("waddb", e000180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20313 cCE("waddbus", e100180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20314 cCE("waddhss", e700180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20315 cCE("waddh", e400180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20316 cCE("waddhus", e500180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20317 cCE("waddwss", eb00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20318 cCE("waddw", e800180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20319 cCE("waddwus", e900180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20320 cCE("waligni", e000020, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_waligni),
20321 cCE("walignr0",e800020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20322 cCE("walignr1",e900020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20323 cCE("walignr2",ea00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20324 cCE("walignr3",eb00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20325 cCE("wand", e200000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20326 cCE("wandn", e300000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20327 cCE("wavg2b", e800000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20328 cCE("wavg2br", e900000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20329 cCE("wavg2h", ec00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20330 cCE("wavg2hr", ed00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20331 cCE("wcmpeqb", e000060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20332 cCE("wcmpeqh", e400060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20333 cCE("wcmpeqw", e800060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20334 cCE("wcmpgtub",e100060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20335 cCE("wcmpgtuh",e500060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20336 cCE("wcmpgtuw",e900060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20337 cCE("wcmpgtsb",e300060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20338 cCE("wcmpgtsh",e700060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20339 cCE("wcmpgtsw",eb00060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20340 cCE("wldrb", c100000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
20341 cCE("wldrh", c500000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
20342 cCE("wldrw", c100100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
20343 cCE("wldrd", c500100, 2, (RIWR, ADDR), iwmmxt_wldstd),
20344 cCE("wmacs", e600100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20345 cCE("wmacsz", e700100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20346 cCE("wmacu", e400100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20347 cCE("wmacuz", e500100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20348 cCE("wmadds", ea00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20349 cCE("wmaddu", e800100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20350 cCE("wmaxsb", e200160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20351 cCE("wmaxsh", e600160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20352 cCE("wmaxsw", ea00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20353 cCE("wmaxub", e000160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20354 cCE("wmaxuh", e400160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20355 cCE("wmaxuw", e800160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20356 cCE("wminsb", e300160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20357 cCE("wminsh", e700160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20358 cCE("wminsw", eb00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20359 cCE("wminub", e100160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20360 cCE("wminuh", e500160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20361 cCE("wminuw", e900160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20362 cCE("wmov", e000000, 2, (RIWR, RIWR), iwmmxt_wmov),
20363 cCE("wmulsm", e300100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20364 cCE("wmulsl", e200100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20365 cCE("wmulum", e100100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20366 cCE("wmulul", e000100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20367 cCE("wor", e000000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20368 cCE("wpackhss",e700080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20369 cCE("wpackhus",e500080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20370 cCE("wpackwss",eb00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20371 cCE("wpackwus",e900080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20372 cCE("wpackdss",ef00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20373 cCE("wpackdus",ed00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20374 cCE("wrorh", e700040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20375 cCE("wrorhg", e700148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20376 cCE("wrorw", eb00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20377 cCE("wrorwg", eb00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20378 cCE("wrord", ef00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20379 cCE("wrordg", ef00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20380 cCE("wsadb", e000120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20381 cCE("wsadbz", e100120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20382 cCE("wsadh", e400120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20383 cCE("wsadhz", e500120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20384 cCE("wshufh", e0001e0, 3, (RIWR, RIWR, I255), iwmmxt_wshufh),
20385 cCE("wsllh", e500040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20386 cCE("wsllhg", e500148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20387 cCE("wsllw", e900040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20388 cCE("wsllwg", e900148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20389 cCE("wslld", ed00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20390 cCE("wslldg", ed00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20391 cCE("wsrah", e400040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20392 cCE("wsrahg", e400148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20393 cCE("wsraw", e800040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20394 cCE("wsrawg", e800148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20395 cCE("wsrad", ec00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20396 cCE("wsradg", ec00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20397 cCE("wsrlh", e600040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20398 cCE("wsrlhg", e600148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20399 cCE("wsrlw", ea00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20400 cCE("wsrlwg", ea00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20401 cCE("wsrld", ee00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20402 cCE("wsrldg", ee00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20403 cCE("wstrb", c000000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
20404 cCE("wstrh", c400000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
20405 cCE("wstrw", c000100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
20406 cCE("wstrd", c400100, 2, (RIWR, ADDR), iwmmxt_wldstd),
20407 cCE("wsubbss", e3001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20408 cCE("wsubb", e0001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20409 cCE("wsubbus", e1001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20410 cCE("wsubhss", e7001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20411 cCE("wsubh", e4001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20412 cCE("wsubhus", e5001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20413 cCE("wsubwss", eb001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20414 cCE("wsubw", e8001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20415 cCE("wsubwus", e9001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20416 cCE("wunpckehub",e0000c0, 2, (RIWR, RIWR), rd_rn),
20417 cCE("wunpckehuh",e4000c0, 2, (RIWR, RIWR), rd_rn),
20418 cCE("wunpckehuw",e8000c0, 2, (RIWR, RIWR), rd_rn),
20419 cCE("wunpckehsb",e2000c0, 2, (RIWR, RIWR), rd_rn),
20420 cCE("wunpckehsh",e6000c0, 2, (RIWR, RIWR), rd_rn),
20421 cCE("wunpckehsw",ea000c0, 2, (RIWR, RIWR), rd_rn),
20422 cCE("wunpckihb", e1000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20423 cCE("wunpckihh", e5000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20424 cCE("wunpckihw", e9000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20425 cCE("wunpckelub",e0000e0, 2, (RIWR, RIWR), rd_rn),
20426 cCE("wunpckeluh",e4000e0, 2, (RIWR, RIWR), rd_rn),
20427 cCE("wunpckeluw",e8000e0, 2, (RIWR, RIWR), rd_rn),
20428 cCE("wunpckelsb",e2000e0, 2, (RIWR, RIWR), rd_rn),
20429 cCE("wunpckelsh",e6000e0, 2, (RIWR, RIWR), rd_rn),
20430 cCE("wunpckelsw",ea000e0, 2, (RIWR, RIWR), rd_rn),
20431 cCE("wunpckilb", e1000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20432 cCE("wunpckilh", e5000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20433 cCE("wunpckilw", e9000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20434 cCE("wxor", e100000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20435 cCE("wzero", e300000, 1, (RIWR), iwmmxt_wzero),
20436
20437 #undef ARM_VARIANT
20438 #define ARM_VARIANT & arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2. */
20439
20440 cCE("torvscb", e12f190, 1, (RR), iwmmxt_tandorc),
20441 cCE("torvsch", e52f190, 1, (RR), iwmmxt_tandorc),
20442 cCE("torvscw", e92f190, 1, (RR), iwmmxt_tandorc),
20443 cCE("wabsb", e2001c0, 2, (RIWR, RIWR), rd_rn),
20444 cCE("wabsh", e6001c0, 2, (RIWR, RIWR), rd_rn),
20445 cCE("wabsw", ea001c0, 2, (RIWR, RIWR), rd_rn),
20446 cCE("wabsdiffb", e1001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20447 cCE("wabsdiffh", e5001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20448 cCE("wabsdiffw", e9001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20449 cCE("waddbhusl", e2001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20450 cCE("waddbhusm", e6001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20451 cCE("waddhc", e600180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20452 cCE("waddwc", ea00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20453 cCE("waddsubhx", ea001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20454 cCE("wavg4", e400000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20455 cCE("wavg4r", e500000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20456 cCE("wmaddsn", ee00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20457 cCE("wmaddsx", eb00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20458 cCE("wmaddun", ec00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20459 cCE("wmaddux", e900100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20460 cCE("wmerge", e000080, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_wmerge),
20461 cCE("wmiabb", e0000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20462 cCE("wmiabt", e1000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20463 cCE("wmiatb", e2000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20464 cCE("wmiatt", e3000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20465 cCE("wmiabbn", e4000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20466 cCE("wmiabtn", e5000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20467 cCE("wmiatbn", e6000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20468 cCE("wmiattn", e7000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20469 cCE("wmiawbb", e800120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20470 cCE("wmiawbt", e900120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20471 cCE("wmiawtb", ea00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20472 cCE("wmiawtt", eb00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20473 cCE("wmiawbbn", ec00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20474 cCE("wmiawbtn", ed00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20475 cCE("wmiawtbn", ee00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20476 cCE("wmiawttn", ef00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20477 cCE("wmulsmr", ef00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20478 cCE("wmulumr", ed00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20479 cCE("wmulwumr", ec000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20480 cCE("wmulwsmr", ee000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20481 cCE("wmulwum", ed000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20482 cCE("wmulwsm", ef000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20483 cCE("wmulwl", eb000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20484 cCE("wqmiabb", e8000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20485 cCE("wqmiabt", e9000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20486 cCE("wqmiatb", ea000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20487 cCE("wqmiatt", eb000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20488 cCE("wqmiabbn", ec000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20489 cCE("wqmiabtn", ed000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20490 cCE("wqmiatbn", ee000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20491 cCE("wqmiattn", ef000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20492 cCE("wqmulm", e100080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20493 cCE("wqmulmr", e300080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20494 cCE("wqmulwm", ec000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20495 cCE("wqmulwmr", ee000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20496 cCE("wsubaddhx", ed001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20497
20498 #undef ARM_VARIANT
20499 #define ARM_VARIANT & arm_cext_maverick /* Cirrus Maverick instructions. */
20500
20501 cCE("cfldrs", c100400, 2, (RMF, ADDRGLDC), rd_cpaddr),
20502 cCE("cfldrd", c500400, 2, (RMD, ADDRGLDC), rd_cpaddr),
20503 cCE("cfldr32", c100500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
20504 cCE("cfldr64", c500500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
20505 cCE("cfstrs", c000400, 2, (RMF, ADDRGLDC), rd_cpaddr),
20506 cCE("cfstrd", c400400, 2, (RMD, ADDRGLDC), rd_cpaddr),
20507 cCE("cfstr32", c000500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
20508 cCE("cfstr64", c400500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
20509 cCE("cfmvsr", e000450, 2, (RMF, RR), rn_rd),
20510 cCE("cfmvrs", e100450, 2, (RR, RMF), rd_rn),
20511 cCE("cfmvdlr", e000410, 2, (RMD, RR), rn_rd),
20512 cCE("cfmvrdl", e100410, 2, (RR, RMD), rd_rn),
20513 cCE("cfmvdhr", e000430, 2, (RMD, RR), rn_rd),
20514 cCE("cfmvrdh", e100430, 2, (RR, RMD), rd_rn),
20515 cCE("cfmv64lr",e000510, 2, (RMDX, RR), rn_rd),
20516 cCE("cfmvr64l",e100510, 2, (RR, RMDX), rd_rn),
20517 cCE("cfmv64hr",e000530, 2, (RMDX, RR), rn_rd),
20518 cCE("cfmvr64h",e100530, 2, (RR, RMDX), rd_rn),
20519 cCE("cfmval32",e200440, 2, (RMAX, RMFX), rd_rn),
20520 cCE("cfmv32al",e100440, 2, (RMFX, RMAX), rd_rn),
20521 cCE("cfmvam32",e200460, 2, (RMAX, RMFX), rd_rn),
20522 cCE("cfmv32am",e100460, 2, (RMFX, RMAX), rd_rn),
20523 cCE("cfmvah32",e200480, 2, (RMAX, RMFX), rd_rn),
20524 cCE("cfmv32ah",e100480, 2, (RMFX, RMAX), rd_rn),
20525 cCE("cfmva32", e2004a0, 2, (RMAX, RMFX), rd_rn),
20526 cCE("cfmv32a", e1004a0, 2, (RMFX, RMAX), rd_rn),
20527 cCE("cfmva64", e2004c0, 2, (RMAX, RMDX), rd_rn),
20528 cCE("cfmv64a", e1004c0, 2, (RMDX, RMAX), rd_rn),
20529 cCE("cfmvsc32",e2004e0, 2, (RMDS, RMDX), mav_dspsc),
20530 cCE("cfmv32sc",e1004e0, 2, (RMDX, RMDS), rd),
20531 cCE("cfcpys", e000400, 2, (RMF, RMF), rd_rn),
20532 cCE("cfcpyd", e000420, 2, (RMD, RMD), rd_rn),
20533 cCE("cfcvtsd", e000460, 2, (RMD, RMF), rd_rn),
20534 cCE("cfcvtds", e000440, 2, (RMF, RMD), rd_rn),
20535 cCE("cfcvt32s",e000480, 2, (RMF, RMFX), rd_rn),
20536 cCE("cfcvt32d",e0004a0, 2, (RMD, RMFX), rd_rn),
20537 cCE("cfcvt64s",e0004c0, 2, (RMF, RMDX), rd_rn),
20538 cCE("cfcvt64d",e0004e0, 2, (RMD, RMDX), rd_rn),
20539 cCE("cfcvts32",e100580, 2, (RMFX, RMF), rd_rn),
20540 cCE("cfcvtd32",e1005a0, 2, (RMFX, RMD), rd_rn),
20541 cCE("cftruncs32",e1005c0, 2, (RMFX, RMF), rd_rn),
20542 cCE("cftruncd32",e1005e0, 2, (RMFX, RMD), rd_rn),
20543 cCE("cfrshl32",e000550, 3, (RMFX, RMFX, RR), mav_triple),
20544 cCE("cfrshl64",e000570, 3, (RMDX, RMDX, RR), mav_triple),
20545 cCE("cfsh32", e000500, 3, (RMFX, RMFX, I63s), mav_shift),
20546 cCE("cfsh64", e200500, 3, (RMDX, RMDX, I63s), mav_shift),
20547 cCE("cfcmps", e100490, 3, (RR, RMF, RMF), rd_rn_rm),
20548 cCE("cfcmpd", e1004b0, 3, (RR, RMD, RMD), rd_rn_rm),
20549 cCE("cfcmp32", e100590, 3, (RR, RMFX, RMFX), rd_rn_rm),
20550 cCE("cfcmp64", e1005b0, 3, (RR, RMDX, RMDX), rd_rn_rm),
20551 cCE("cfabss", e300400, 2, (RMF, RMF), rd_rn),
20552 cCE("cfabsd", e300420, 2, (RMD, RMD), rd_rn),
20553 cCE("cfnegs", e300440, 2, (RMF, RMF), rd_rn),
20554 cCE("cfnegd", e300460, 2, (RMD, RMD), rd_rn),
20555 cCE("cfadds", e300480, 3, (RMF, RMF, RMF), rd_rn_rm),
20556 cCE("cfaddd", e3004a0, 3, (RMD, RMD, RMD), rd_rn_rm),
20557 cCE("cfsubs", e3004c0, 3, (RMF, RMF, RMF), rd_rn_rm),
20558 cCE("cfsubd", e3004e0, 3, (RMD, RMD, RMD), rd_rn_rm),
20559 cCE("cfmuls", e100400, 3, (RMF, RMF, RMF), rd_rn_rm),
20560 cCE("cfmuld", e100420, 3, (RMD, RMD, RMD), rd_rn_rm),
20561 cCE("cfabs32", e300500, 2, (RMFX, RMFX), rd_rn),
20562 cCE("cfabs64", e300520, 2, (RMDX, RMDX), rd_rn),
20563 cCE("cfneg32", e300540, 2, (RMFX, RMFX), rd_rn),
20564 cCE("cfneg64", e300560, 2, (RMDX, RMDX), rd_rn),
20565 cCE("cfadd32", e300580, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
20566 cCE("cfadd64", e3005a0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
20567 cCE("cfsub32", e3005c0, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
20568 cCE("cfsub64", e3005e0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
20569 cCE("cfmul32", e100500, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
20570 cCE("cfmul64", e100520, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
20571 cCE("cfmac32", e100540, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
20572 cCE("cfmsc32", e100560, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
20573 cCE("cfmadd32",e000600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
20574 cCE("cfmsub32",e100600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
20575 cCE("cfmadda32", e200600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
20576 cCE("cfmsuba32", e300600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
20577
20578 #undef ARM_VARIANT
20579 #define ARM_VARIANT NULL
20580 #undef THUMB_VARIANT
20581 #define THUMB_VARIANT & arm_ext_v8m
20582 TUE("tt", 0, e840f000, 2, (RRnpc, RRnpc), 0, tt),
20583 TUE("ttt", 0, e840f040, 2, (RRnpc, RRnpc), 0, tt),
20584 };
20585 #undef ARM_VARIANT
20586 #undef THUMB_VARIANT
20587 #undef TCE
20588 #undef TUE
20589 #undef TUF
20590 #undef TCC
20591 #undef cCE
20592 #undef cCL
20593 #undef C3E
20594 #undef CE
20595 #undef CM
20596 #undef UE
20597 #undef UF
20598 #undef UT
20599 #undef NUF
20600 #undef nUF
20601 #undef NCE
20602 #undef nCE
20603 #undef OPS0
20604 #undef OPS1
20605 #undef OPS2
20606 #undef OPS3
20607 #undef OPS4
20608 #undef OPS5
20609 #undef OPS6
20610 #undef do_0
20611 \f
20612 /* MD interface: bits in the object file. */
20613
20614 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
20615 for use in the a.out file, and stores them in the array pointed to by buf.
20616 This knows about the endian-ness of the target machine and does
20617 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
20618 2 (short) and 4 (long) Floating numbers are put out as a series of
20619 LITTLENUMS (shorts, here at least). */
20620
20621 void
20622 md_number_to_chars (char * buf, valueT val, int n)
20623 {
20624 if (target_big_endian)
20625 number_to_chars_bigendian (buf, val, n);
20626 else
20627 number_to_chars_littleendian (buf, val, n);
20628 }
20629
20630 static valueT
20631 md_chars_to_number (char * buf, int n)
20632 {
20633 valueT result = 0;
20634 unsigned char * where = (unsigned char *) buf;
20635
20636 if (target_big_endian)
20637 {
20638 while (n--)
20639 {
20640 result <<= 8;
20641 result |= (*where++ & 255);
20642 }
20643 }
20644 else
20645 {
20646 while (n--)
20647 {
20648 result <<= 8;
20649 result |= (where[n] & 255);
20650 }
20651 }
20652
20653 return result;
20654 }
20655
20656 /* MD interface: Sections. */
20657
20658 /* Calculate the maximum variable size (i.e., excluding fr_fix)
20659 that an rs_machine_dependent frag may reach. */
20660
20661 unsigned int
20662 arm_frag_max_var (fragS *fragp)
20663 {
20664 /* We only use rs_machine_dependent for variable-size Thumb instructions,
20665 which are either THUMB_SIZE (2) or INSN_SIZE (4).
20666
20667 Note that we generate relaxable instructions even for cases that don't
20668 really need it, like an immediate that's a trivial constant. So we're
20669 overestimating the instruction size for some of those cases. Rather
20670 than putting more intelligence here, it would probably be better to
20671 avoid generating a relaxation frag in the first place when it can be
20672 determined up front that a short instruction will suffice. */
20673
20674 gas_assert (fragp->fr_type == rs_machine_dependent);
20675 return INSN_SIZE;
20676 }
20677
20678 /* Estimate the size of a frag before relaxing. Assume everything fits in
20679 2 bytes. */
20680
20681 int
20682 md_estimate_size_before_relax (fragS * fragp,
20683 segT segtype ATTRIBUTE_UNUSED)
20684 {
20685 fragp->fr_var = 2;
20686 return 2;
20687 }
20688
20689 /* Convert a machine dependent frag. */
20690
20691 void
20692 md_convert_frag (bfd *abfd, segT asec ATTRIBUTE_UNUSED, fragS *fragp)
20693 {
20694 unsigned long insn;
20695 unsigned long old_op;
20696 char *buf;
20697 expressionS exp;
20698 fixS *fixp;
20699 int reloc_type;
20700 int pc_rel;
20701 int opcode;
20702
20703 buf = fragp->fr_literal + fragp->fr_fix;
20704
20705 old_op = bfd_get_16(abfd, buf);
20706 if (fragp->fr_symbol)
20707 {
20708 exp.X_op = O_symbol;
20709 exp.X_add_symbol = fragp->fr_symbol;
20710 }
20711 else
20712 {
20713 exp.X_op = O_constant;
20714 }
20715 exp.X_add_number = fragp->fr_offset;
20716 opcode = fragp->fr_subtype;
20717 switch (opcode)
20718 {
20719 case T_MNEM_ldr_pc:
20720 case T_MNEM_ldr_pc2:
20721 case T_MNEM_ldr_sp:
20722 case T_MNEM_str_sp:
20723 case T_MNEM_ldr:
20724 case T_MNEM_ldrb:
20725 case T_MNEM_ldrh:
20726 case T_MNEM_str:
20727 case T_MNEM_strb:
20728 case T_MNEM_strh:
20729 if (fragp->fr_var == 4)
20730 {
20731 insn = THUMB_OP32 (opcode);
20732 if ((old_op >> 12) == 4 || (old_op >> 12) == 9)
20733 {
20734 insn |= (old_op & 0x700) << 4;
20735 }
20736 else
20737 {
20738 insn |= (old_op & 7) << 12;
20739 insn |= (old_op & 0x38) << 13;
20740 }
20741 insn |= 0x00000c00;
20742 put_thumb32_insn (buf, insn);
20743 reloc_type = BFD_RELOC_ARM_T32_OFFSET_IMM;
20744 }
20745 else
20746 {
20747 reloc_type = BFD_RELOC_ARM_THUMB_OFFSET;
20748 }
20749 pc_rel = (opcode == T_MNEM_ldr_pc2);
20750 break;
20751 case T_MNEM_adr:
20752 if (fragp->fr_var == 4)
20753 {
20754 insn = THUMB_OP32 (opcode);
20755 insn |= (old_op & 0xf0) << 4;
20756 put_thumb32_insn (buf, insn);
20757 reloc_type = BFD_RELOC_ARM_T32_ADD_PC12;
20758 }
20759 else
20760 {
20761 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
20762 exp.X_add_number -= 4;
20763 }
20764 pc_rel = 1;
20765 break;
20766 case T_MNEM_mov:
20767 case T_MNEM_movs:
20768 case T_MNEM_cmp:
20769 case T_MNEM_cmn:
20770 if (fragp->fr_var == 4)
20771 {
20772 int r0off = (opcode == T_MNEM_mov
20773 || opcode == T_MNEM_movs) ? 0 : 8;
20774 insn = THUMB_OP32 (opcode);
20775 insn = (insn & 0xe1ffffff) | 0x10000000;
20776 insn |= (old_op & 0x700) << r0off;
20777 put_thumb32_insn (buf, insn);
20778 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
20779 }
20780 else
20781 {
20782 reloc_type = BFD_RELOC_ARM_THUMB_IMM;
20783 }
20784 pc_rel = 0;
20785 break;
20786 case T_MNEM_b:
20787 if (fragp->fr_var == 4)
20788 {
20789 insn = THUMB_OP32(opcode);
20790 put_thumb32_insn (buf, insn);
20791 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH25;
20792 }
20793 else
20794 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH12;
20795 pc_rel = 1;
20796 break;
20797 case T_MNEM_bcond:
20798 if (fragp->fr_var == 4)
20799 {
20800 insn = THUMB_OP32(opcode);
20801 insn |= (old_op & 0xf00) << 14;
20802 put_thumb32_insn (buf, insn);
20803 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH20;
20804 }
20805 else
20806 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH9;
20807 pc_rel = 1;
20808 break;
20809 case T_MNEM_add_sp:
20810 case T_MNEM_add_pc:
20811 case T_MNEM_inc_sp:
20812 case T_MNEM_dec_sp:
20813 if (fragp->fr_var == 4)
20814 {
20815 /* ??? Choose between add and addw. */
20816 insn = THUMB_OP32 (opcode);
20817 insn |= (old_op & 0xf0) << 4;
20818 put_thumb32_insn (buf, insn);
20819 if (opcode == T_MNEM_add_pc)
20820 reloc_type = BFD_RELOC_ARM_T32_IMM12;
20821 else
20822 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
20823 }
20824 else
20825 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
20826 pc_rel = 0;
20827 break;
20828
20829 case T_MNEM_addi:
20830 case T_MNEM_addis:
20831 case T_MNEM_subi:
20832 case T_MNEM_subis:
20833 if (fragp->fr_var == 4)
20834 {
20835 insn = THUMB_OP32 (opcode);
20836 insn |= (old_op & 0xf0) << 4;
20837 insn |= (old_op & 0xf) << 16;
20838 put_thumb32_insn (buf, insn);
20839 if (insn & (1 << 20))
20840 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
20841 else
20842 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
20843 }
20844 else
20845 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
20846 pc_rel = 0;
20847 break;
20848 default:
20849 abort ();
20850 }
20851 fixp = fix_new_exp (fragp, fragp->fr_fix, fragp->fr_var, &exp, pc_rel,
20852 (enum bfd_reloc_code_real) reloc_type);
20853 fixp->fx_file = fragp->fr_file;
20854 fixp->fx_line = fragp->fr_line;
20855 fragp->fr_fix += fragp->fr_var;
20856
20857 /* Set whether we use thumb-2 ISA based on final relaxation results. */
20858 if (thumb_mode && fragp->fr_var == 4 && no_cpu_selected ()
20859 && !ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_t2))
20860 ARM_MERGE_FEATURE_SETS (arm_arch_used, thumb_arch_used, arm_ext_v6t2);
20861 }
20862
20863 /* Return the size of a relaxable immediate operand instruction.
20864 SHIFT and SIZE specify the form of the allowable immediate. */
20865 static int
20866 relax_immediate (fragS *fragp, int size, int shift)
20867 {
20868 offsetT offset;
20869 offsetT mask;
20870 offsetT low;
20871
20872 /* ??? Should be able to do better than this. */
20873 if (fragp->fr_symbol)
20874 return 4;
20875
20876 low = (1 << shift) - 1;
20877 mask = (1 << (shift + size)) - (1 << shift);
20878 offset = fragp->fr_offset;
20879 /* Force misaligned offsets to 32-bit variant. */
20880 if (offset & low)
20881 return 4;
20882 if (offset & ~mask)
20883 return 4;
20884 return 2;
20885 }
20886
20887 /* Get the address of a symbol during relaxation. */
20888 static addressT
20889 relaxed_symbol_addr (fragS *fragp, long stretch)
20890 {
20891 fragS *sym_frag;
20892 addressT addr;
20893 symbolS *sym;
20894
20895 sym = fragp->fr_symbol;
20896 sym_frag = symbol_get_frag (sym);
20897 know (S_GET_SEGMENT (sym) != absolute_section
20898 || sym_frag == &zero_address_frag);
20899 addr = S_GET_VALUE (sym) + fragp->fr_offset;
20900
20901 /* If frag has yet to be reached on this pass, assume it will
20902 move by STRETCH just as we did. If this is not so, it will
20903 be because some frag between grows, and that will force
20904 another pass. */
20905
20906 if (stretch != 0
20907 && sym_frag->relax_marker != fragp->relax_marker)
20908 {
20909 fragS *f;
20910
20911 /* Adjust stretch for any alignment frag. Note that if have
20912 been expanding the earlier code, the symbol may be
20913 defined in what appears to be an earlier frag. FIXME:
20914 This doesn't handle the fr_subtype field, which specifies
20915 a maximum number of bytes to skip when doing an
20916 alignment. */
20917 for (f = fragp; f != NULL && f != sym_frag; f = f->fr_next)
20918 {
20919 if (f->fr_type == rs_align || f->fr_type == rs_align_code)
20920 {
20921 if (stretch < 0)
20922 stretch = - ((- stretch)
20923 & ~ ((1 << (int) f->fr_offset) - 1));
20924 else
20925 stretch &= ~ ((1 << (int) f->fr_offset) - 1);
20926 if (stretch == 0)
20927 break;
20928 }
20929 }
20930 if (f != NULL)
20931 addr += stretch;
20932 }
20933
20934 return addr;
20935 }
20936
20937 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
20938 load. */
20939 static int
20940 relax_adr (fragS *fragp, asection *sec, long stretch)
20941 {
20942 addressT addr;
20943 offsetT val;
20944
20945 /* Assume worst case for symbols not known to be in the same section. */
20946 if (fragp->fr_symbol == NULL
20947 || !S_IS_DEFINED (fragp->fr_symbol)
20948 || sec != S_GET_SEGMENT (fragp->fr_symbol)
20949 || S_IS_WEAK (fragp->fr_symbol))
20950 return 4;
20951
20952 val = relaxed_symbol_addr (fragp, stretch);
20953 addr = fragp->fr_address + fragp->fr_fix;
20954 addr = (addr + 4) & ~3;
20955 /* Force misaligned targets to 32-bit variant. */
20956 if (val & 3)
20957 return 4;
20958 val -= addr;
20959 if (val < 0 || val > 1020)
20960 return 4;
20961 return 2;
20962 }
20963
20964 /* Return the size of a relaxable add/sub immediate instruction. */
20965 static int
20966 relax_addsub (fragS *fragp, asection *sec)
20967 {
20968 char *buf;
20969 int op;
20970
20971 buf = fragp->fr_literal + fragp->fr_fix;
20972 op = bfd_get_16(sec->owner, buf);
20973 if ((op & 0xf) == ((op >> 4) & 0xf))
20974 return relax_immediate (fragp, 8, 0);
20975 else
20976 return relax_immediate (fragp, 3, 0);
20977 }
20978
20979 /* Return TRUE iff the definition of symbol S could be pre-empted
20980 (overridden) at link or load time. */
20981 static bfd_boolean
20982 symbol_preemptible (symbolS *s)
20983 {
20984 /* Weak symbols can always be pre-empted. */
20985 if (S_IS_WEAK (s))
20986 return TRUE;
20987
20988 /* Non-global symbols cannot be pre-empted. */
20989 if (! S_IS_EXTERNAL (s))
20990 return FALSE;
20991
20992 #ifdef OBJ_ELF
20993 /* In ELF, a global symbol can be marked protected, or private. In that
20994 case it can't be pre-empted (other definitions in the same link unit
20995 would violate the ODR). */
20996 if (ELF_ST_VISIBILITY (S_GET_OTHER (s)) > STV_DEFAULT)
20997 return FALSE;
20998 #endif
20999
21000 /* Other global symbols might be pre-empted. */
21001 return TRUE;
21002 }
21003
21004 /* Return the size of a relaxable branch instruction. BITS is the
21005 size of the offset field in the narrow instruction. */
21006
21007 static int
21008 relax_branch (fragS *fragp, asection *sec, int bits, long stretch)
21009 {
21010 addressT addr;
21011 offsetT val;
21012 offsetT limit;
21013
21014 /* Assume worst case for symbols not known to be in the same section. */
21015 if (!S_IS_DEFINED (fragp->fr_symbol)
21016 || sec != S_GET_SEGMENT (fragp->fr_symbol)
21017 || S_IS_WEAK (fragp->fr_symbol))
21018 return 4;
21019
21020 #ifdef OBJ_ELF
21021 /* A branch to a function in ARM state will require interworking. */
21022 if (S_IS_DEFINED (fragp->fr_symbol)
21023 && ARM_IS_FUNC (fragp->fr_symbol))
21024 return 4;
21025 #endif
21026
21027 if (symbol_preemptible (fragp->fr_symbol))
21028 return 4;
21029
21030 val = relaxed_symbol_addr (fragp, stretch);
21031 addr = fragp->fr_address + fragp->fr_fix + 4;
21032 val -= addr;
21033
21034 /* Offset is a signed value *2 */
21035 limit = 1 << bits;
21036 if (val >= limit || val < -limit)
21037 return 4;
21038 return 2;
21039 }
21040
21041
21042 /* Relax a machine dependent frag. This returns the amount by which
21043 the current size of the frag should change. */
21044
21045 int
21046 arm_relax_frag (asection *sec, fragS *fragp, long stretch)
21047 {
21048 int oldsize;
21049 int newsize;
21050
21051 oldsize = fragp->fr_var;
21052 switch (fragp->fr_subtype)
21053 {
21054 case T_MNEM_ldr_pc2:
21055 newsize = relax_adr (fragp, sec, stretch);
21056 break;
21057 case T_MNEM_ldr_pc:
21058 case T_MNEM_ldr_sp:
21059 case T_MNEM_str_sp:
21060 newsize = relax_immediate (fragp, 8, 2);
21061 break;
21062 case T_MNEM_ldr:
21063 case T_MNEM_str:
21064 newsize = relax_immediate (fragp, 5, 2);
21065 break;
21066 case T_MNEM_ldrh:
21067 case T_MNEM_strh:
21068 newsize = relax_immediate (fragp, 5, 1);
21069 break;
21070 case T_MNEM_ldrb:
21071 case T_MNEM_strb:
21072 newsize = relax_immediate (fragp, 5, 0);
21073 break;
21074 case T_MNEM_adr:
21075 newsize = relax_adr (fragp, sec, stretch);
21076 break;
21077 case T_MNEM_mov:
21078 case T_MNEM_movs:
21079 case T_MNEM_cmp:
21080 case T_MNEM_cmn:
21081 newsize = relax_immediate (fragp, 8, 0);
21082 break;
21083 case T_MNEM_b:
21084 newsize = relax_branch (fragp, sec, 11, stretch);
21085 break;
21086 case T_MNEM_bcond:
21087 newsize = relax_branch (fragp, sec, 8, stretch);
21088 break;
21089 case T_MNEM_add_sp:
21090 case T_MNEM_add_pc:
21091 newsize = relax_immediate (fragp, 8, 2);
21092 break;
21093 case T_MNEM_inc_sp:
21094 case T_MNEM_dec_sp:
21095 newsize = relax_immediate (fragp, 7, 2);
21096 break;
21097 case T_MNEM_addi:
21098 case T_MNEM_addis:
21099 case T_MNEM_subi:
21100 case T_MNEM_subis:
21101 newsize = relax_addsub (fragp, sec);
21102 break;
21103 default:
21104 abort ();
21105 }
21106
21107 fragp->fr_var = newsize;
21108 /* Freeze wide instructions that are at or before the same location as
21109 in the previous pass. This avoids infinite loops.
21110 Don't freeze them unconditionally because targets may be artificially
21111 misaligned by the expansion of preceding frags. */
21112 if (stretch <= 0 && newsize > 2)
21113 {
21114 md_convert_frag (sec->owner, sec, fragp);
21115 frag_wane (fragp);
21116 }
21117
21118 return newsize - oldsize;
21119 }
21120
21121 /* Round up a section size to the appropriate boundary. */
21122
21123 valueT
21124 md_section_align (segT segment ATTRIBUTE_UNUSED,
21125 valueT size)
21126 {
21127 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
21128 if (OUTPUT_FLAVOR == bfd_target_aout_flavour)
21129 {
21130 /* For a.out, force the section size to be aligned. If we don't do
21131 this, BFD will align it for us, but it will not write out the
21132 final bytes of the section. This may be a bug in BFD, but it is
21133 easier to fix it here since that is how the other a.out targets
21134 work. */
21135 int align;
21136
21137 align = bfd_get_section_alignment (stdoutput, segment);
21138 size = ((size + (1 << align) - 1) & (-((valueT) 1 << align)));
21139 }
21140 #endif
21141
21142 return size;
21143 }
21144
21145 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
21146 of an rs_align_code fragment. */
21147
21148 void
21149 arm_handle_align (fragS * fragP)
21150 {
21151 static char const arm_noop[2][2][4] =
21152 {
21153 { /* ARMv1 */
21154 {0x00, 0x00, 0xa0, 0xe1}, /* LE */
21155 {0xe1, 0xa0, 0x00, 0x00}, /* BE */
21156 },
21157 { /* ARMv6k */
21158 {0x00, 0xf0, 0x20, 0xe3}, /* LE */
21159 {0xe3, 0x20, 0xf0, 0x00}, /* BE */
21160 },
21161 };
21162 static char const thumb_noop[2][2][2] =
21163 {
21164 { /* Thumb-1 */
21165 {0xc0, 0x46}, /* LE */
21166 {0x46, 0xc0}, /* BE */
21167 },
21168 { /* Thumb-2 */
21169 {0x00, 0xbf}, /* LE */
21170 {0xbf, 0x00} /* BE */
21171 }
21172 };
21173 static char const wide_thumb_noop[2][4] =
21174 { /* Wide Thumb-2 */
21175 {0xaf, 0xf3, 0x00, 0x80}, /* LE */
21176 {0xf3, 0xaf, 0x80, 0x00}, /* BE */
21177 };
21178
21179 unsigned bytes, fix, noop_size;
21180 char * p;
21181 const char * noop;
21182 const char *narrow_noop = NULL;
21183 #ifdef OBJ_ELF
21184 enum mstate state;
21185 #endif
21186
21187 if (fragP->fr_type != rs_align_code)
21188 return;
21189
21190 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
21191 p = fragP->fr_literal + fragP->fr_fix;
21192 fix = 0;
21193
21194 if (bytes > MAX_MEM_FOR_RS_ALIGN_CODE)
21195 bytes &= MAX_MEM_FOR_RS_ALIGN_CODE;
21196
21197 gas_assert ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) != 0);
21198
21199 if (fragP->tc_frag_data.thumb_mode & (~ MODE_RECORDED))
21200 {
21201 if (ARM_CPU_HAS_FEATURE (selected_cpu_name[0]
21202 ? selected_cpu : arm_arch_none, arm_ext_v6t2))
21203 {
21204 narrow_noop = thumb_noop[1][target_big_endian];
21205 noop = wide_thumb_noop[target_big_endian];
21206 }
21207 else
21208 noop = thumb_noop[0][target_big_endian];
21209 noop_size = 2;
21210 #ifdef OBJ_ELF
21211 state = MAP_THUMB;
21212 #endif
21213 }
21214 else
21215 {
21216 noop = arm_noop[ARM_CPU_HAS_FEATURE (selected_cpu_name[0]
21217 ? selected_cpu : arm_arch_none,
21218 arm_ext_v6k) != 0]
21219 [target_big_endian];
21220 noop_size = 4;
21221 #ifdef OBJ_ELF
21222 state = MAP_ARM;
21223 #endif
21224 }
21225
21226 fragP->fr_var = noop_size;
21227
21228 if (bytes & (noop_size - 1))
21229 {
21230 fix = bytes & (noop_size - 1);
21231 #ifdef OBJ_ELF
21232 insert_data_mapping_symbol (state, fragP->fr_fix, fragP, fix);
21233 #endif
21234 memset (p, 0, fix);
21235 p += fix;
21236 bytes -= fix;
21237 }
21238
21239 if (narrow_noop)
21240 {
21241 if (bytes & noop_size)
21242 {
21243 /* Insert a narrow noop. */
21244 memcpy (p, narrow_noop, noop_size);
21245 p += noop_size;
21246 bytes -= noop_size;
21247 fix += noop_size;
21248 }
21249
21250 /* Use wide noops for the remainder */
21251 noop_size = 4;
21252 }
21253
21254 while (bytes >= noop_size)
21255 {
21256 memcpy (p, noop, noop_size);
21257 p += noop_size;
21258 bytes -= noop_size;
21259 fix += noop_size;
21260 }
21261
21262 fragP->fr_fix += fix;
21263 }
21264
21265 /* Called from md_do_align. Used to create an alignment
21266 frag in a code section. */
21267
21268 void
21269 arm_frag_align_code (int n, int max)
21270 {
21271 char * p;
21272
21273 /* We assume that there will never be a requirement
21274 to support alignments greater than MAX_MEM_FOR_RS_ALIGN_CODE bytes. */
21275 if (max > MAX_MEM_FOR_RS_ALIGN_CODE)
21276 {
21277 char err_msg[128];
21278
21279 sprintf (err_msg,
21280 _("alignments greater than %d bytes not supported in .text sections."),
21281 MAX_MEM_FOR_RS_ALIGN_CODE + 1);
21282 as_fatal ("%s", err_msg);
21283 }
21284
21285 p = frag_var (rs_align_code,
21286 MAX_MEM_FOR_RS_ALIGN_CODE,
21287 1,
21288 (relax_substateT) max,
21289 (symbolS *) NULL,
21290 (offsetT) n,
21291 (char *) NULL);
21292 *p = 0;
21293 }
21294
21295 /* Perform target specific initialisation of a frag.
21296 Note - despite the name this initialisation is not done when the frag
21297 is created, but only when its type is assigned. A frag can be created
21298 and used a long time before its type is set, so beware of assuming that
21299 this initialisationis performed first. */
21300
21301 #ifndef OBJ_ELF
21302 void
21303 arm_init_frag (fragS * fragP, int max_chars ATTRIBUTE_UNUSED)
21304 {
21305 /* Record whether this frag is in an ARM or a THUMB area. */
21306 fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
21307 }
21308
21309 #else /* OBJ_ELF is defined. */
21310 void
21311 arm_init_frag (fragS * fragP, int max_chars)
21312 {
21313 int frag_thumb_mode;
21314
21315 /* If the current ARM vs THUMB mode has not already
21316 been recorded into this frag then do so now. */
21317 if ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) == 0)
21318 fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
21319
21320 frag_thumb_mode = fragP->tc_frag_data.thumb_mode ^ MODE_RECORDED;
21321
21322 /* Record a mapping symbol for alignment frags. We will delete this
21323 later if the alignment ends up empty. */
21324 switch (fragP->fr_type)
21325 {
21326 case rs_align:
21327 case rs_align_test:
21328 case rs_fill:
21329 mapping_state_2 (MAP_DATA, max_chars);
21330 break;
21331 case rs_align_code:
21332 mapping_state_2 (frag_thumb_mode ? MAP_THUMB : MAP_ARM, max_chars);
21333 break;
21334 default:
21335 break;
21336 }
21337 }
21338
21339 /* When we change sections we need to issue a new mapping symbol. */
21340
21341 void
21342 arm_elf_change_section (void)
21343 {
21344 /* Link an unlinked unwind index table section to the .text section. */
21345 if (elf_section_type (now_seg) == SHT_ARM_EXIDX
21346 && elf_linked_to_section (now_seg) == NULL)
21347 elf_linked_to_section (now_seg) = text_section;
21348 }
21349
21350 int
21351 arm_elf_section_type (const char * str, size_t len)
21352 {
21353 if (len == 5 && strncmp (str, "exidx", 5) == 0)
21354 return SHT_ARM_EXIDX;
21355
21356 return -1;
21357 }
21358 \f
21359 /* Code to deal with unwinding tables. */
21360
21361 static void add_unwind_adjustsp (offsetT);
21362
21363 /* Generate any deferred unwind frame offset. */
21364
21365 static void
21366 flush_pending_unwind (void)
21367 {
21368 offsetT offset;
21369
21370 offset = unwind.pending_offset;
21371 unwind.pending_offset = 0;
21372 if (offset != 0)
21373 add_unwind_adjustsp (offset);
21374 }
21375
21376 /* Add an opcode to this list for this function. Two-byte opcodes should
21377 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse
21378 order. */
21379
21380 static void
21381 add_unwind_opcode (valueT op, int length)
21382 {
21383 /* Add any deferred stack adjustment. */
21384 if (unwind.pending_offset)
21385 flush_pending_unwind ();
21386
21387 unwind.sp_restored = 0;
21388
21389 if (unwind.opcode_count + length > unwind.opcode_alloc)
21390 {
21391 unwind.opcode_alloc += ARM_OPCODE_CHUNK_SIZE;
21392 if (unwind.opcodes)
21393 unwind.opcodes = (unsigned char *) xrealloc (unwind.opcodes,
21394 unwind.opcode_alloc);
21395 else
21396 unwind.opcodes = (unsigned char *) xmalloc (unwind.opcode_alloc);
21397 }
21398 while (length > 0)
21399 {
21400 length--;
21401 unwind.opcodes[unwind.opcode_count] = op & 0xff;
21402 op >>= 8;
21403 unwind.opcode_count++;
21404 }
21405 }
21406
21407 /* Add unwind opcodes to adjust the stack pointer. */
21408
21409 static void
21410 add_unwind_adjustsp (offsetT offset)
21411 {
21412 valueT op;
21413
21414 if (offset > 0x200)
21415 {
21416 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */
21417 char bytes[5];
21418 int n;
21419 valueT o;
21420
21421 /* Long form: 0xb2, uleb128. */
21422 /* This might not fit in a word so add the individual bytes,
21423 remembering the list is built in reverse order. */
21424 o = (valueT) ((offset - 0x204) >> 2);
21425 if (o == 0)
21426 add_unwind_opcode (0, 1);
21427
21428 /* Calculate the uleb128 encoding of the offset. */
21429 n = 0;
21430 while (o)
21431 {
21432 bytes[n] = o & 0x7f;
21433 o >>= 7;
21434 if (o)
21435 bytes[n] |= 0x80;
21436 n++;
21437 }
21438 /* Add the insn. */
21439 for (; n; n--)
21440 add_unwind_opcode (bytes[n - 1], 1);
21441 add_unwind_opcode (0xb2, 1);
21442 }
21443 else if (offset > 0x100)
21444 {
21445 /* Two short opcodes. */
21446 add_unwind_opcode (0x3f, 1);
21447 op = (offset - 0x104) >> 2;
21448 add_unwind_opcode (op, 1);
21449 }
21450 else if (offset > 0)
21451 {
21452 /* Short opcode. */
21453 op = (offset - 4) >> 2;
21454 add_unwind_opcode (op, 1);
21455 }
21456 else if (offset < 0)
21457 {
21458 offset = -offset;
21459 while (offset > 0x100)
21460 {
21461 add_unwind_opcode (0x7f, 1);
21462 offset -= 0x100;
21463 }
21464 op = ((offset - 4) >> 2) | 0x40;
21465 add_unwind_opcode (op, 1);
21466 }
21467 }
21468
21469 /* Finish the list of unwind opcodes for this function. */
21470 static void
21471 finish_unwind_opcodes (void)
21472 {
21473 valueT op;
21474
21475 if (unwind.fp_used)
21476 {
21477 /* Adjust sp as necessary. */
21478 unwind.pending_offset += unwind.fp_offset - unwind.frame_size;
21479 flush_pending_unwind ();
21480
21481 /* After restoring sp from the frame pointer. */
21482 op = 0x90 | unwind.fp_reg;
21483 add_unwind_opcode (op, 1);
21484 }
21485 else
21486 flush_pending_unwind ();
21487 }
21488
21489
21490 /* Start an exception table entry. If idx is nonzero this is an index table
21491 entry. */
21492
21493 static void
21494 start_unwind_section (const segT text_seg, int idx)
21495 {
21496 const char * text_name;
21497 const char * prefix;
21498 const char * prefix_once;
21499 const char * group_name;
21500 size_t prefix_len;
21501 size_t text_len;
21502 char * sec_name;
21503 size_t sec_name_len;
21504 int type;
21505 int flags;
21506 int linkonce;
21507
21508 if (idx)
21509 {
21510 prefix = ELF_STRING_ARM_unwind;
21511 prefix_once = ELF_STRING_ARM_unwind_once;
21512 type = SHT_ARM_EXIDX;
21513 }
21514 else
21515 {
21516 prefix = ELF_STRING_ARM_unwind_info;
21517 prefix_once = ELF_STRING_ARM_unwind_info_once;
21518 type = SHT_PROGBITS;
21519 }
21520
21521 text_name = segment_name (text_seg);
21522 if (streq (text_name, ".text"))
21523 text_name = "";
21524
21525 if (strncmp (text_name, ".gnu.linkonce.t.",
21526 strlen (".gnu.linkonce.t.")) == 0)
21527 {
21528 prefix = prefix_once;
21529 text_name += strlen (".gnu.linkonce.t.");
21530 }
21531
21532 prefix_len = strlen (prefix);
21533 text_len = strlen (text_name);
21534 sec_name_len = prefix_len + text_len;
21535 sec_name = (char *) xmalloc (sec_name_len + 1);
21536 memcpy (sec_name, prefix, prefix_len);
21537 memcpy (sec_name + prefix_len, text_name, text_len);
21538 sec_name[prefix_len + text_len] = '\0';
21539
21540 flags = SHF_ALLOC;
21541 linkonce = 0;
21542 group_name = 0;
21543
21544 /* Handle COMDAT group. */
21545 if (prefix != prefix_once && (text_seg->flags & SEC_LINK_ONCE) != 0)
21546 {
21547 group_name = elf_group_name (text_seg);
21548 if (group_name == NULL)
21549 {
21550 as_bad (_("Group section `%s' has no group signature"),
21551 segment_name (text_seg));
21552 ignore_rest_of_line ();
21553 return;
21554 }
21555 flags |= SHF_GROUP;
21556 linkonce = 1;
21557 }
21558
21559 obj_elf_change_section (sec_name, type, flags, 0, group_name, linkonce, 0);
21560
21561 /* Set the section link for index tables. */
21562 if (idx)
21563 elf_linked_to_section (now_seg) = text_seg;
21564 }
21565
21566
21567 /* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
21568 personality routine data. Returns zero, or the index table value for
21569 an inline entry. */
21570
21571 static valueT
21572 create_unwind_entry (int have_data)
21573 {
21574 int size;
21575 addressT where;
21576 char *ptr;
21577 /* The current word of data. */
21578 valueT data;
21579 /* The number of bytes left in this word. */
21580 int n;
21581
21582 finish_unwind_opcodes ();
21583
21584 /* Remember the current text section. */
21585 unwind.saved_seg = now_seg;
21586 unwind.saved_subseg = now_subseg;
21587
21588 start_unwind_section (now_seg, 0);
21589
21590 if (unwind.personality_routine == NULL)
21591 {
21592 if (unwind.personality_index == -2)
21593 {
21594 if (have_data)
21595 as_bad (_("handlerdata in cantunwind frame"));
21596 return 1; /* EXIDX_CANTUNWIND. */
21597 }
21598
21599 /* Use a default personality routine if none is specified. */
21600 if (unwind.personality_index == -1)
21601 {
21602 if (unwind.opcode_count > 3)
21603 unwind.personality_index = 1;
21604 else
21605 unwind.personality_index = 0;
21606 }
21607
21608 /* Space for the personality routine entry. */
21609 if (unwind.personality_index == 0)
21610 {
21611 if (unwind.opcode_count > 3)
21612 as_bad (_("too many unwind opcodes for personality routine 0"));
21613
21614 if (!have_data)
21615 {
21616 /* All the data is inline in the index table. */
21617 data = 0x80;
21618 n = 3;
21619 while (unwind.opcode_count > 0)
21620 {
21621 unwind.opcode_count--;
21622 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
21623 n--;
21624 }
21625
21626 /* Pad with "finish" opcodes. */
21627 while (n--)
21628 data = (data << 8) | 0xb0;
21629
21630 return data;
21631 }
21632 size = 0;
21633 }
21634 else
21635 /* We get two opcodes "free" in the first word. */
21636 size = unwind.opcode_count - 2;
21637 }
21638 else
21639 {
21640 /* PR 16765: Missing or misplaced unwind directives can trigger this. */
21641 if (unwind.personality_index != -1)
21642 {
21643 as_bad (_("attempt to recreate an unwind entry"));
21644 return 1;
21645 }
21646
21647 /* An extra byte is required for the opcode count. */
21648 size = unwind.opcode_count + 1;
21649 }
21650
21651 size = (size + 3) >> 2;
21652 if (size > 0xff)
21653 as_bad (_("too many unwind opcodes"));
21654
21655 frag_align (2, 0, 0);
21656 record_alignment (now_seg, 2);
21657 unwind.table_entry = expr_build_dot ();
21658
21659 /* Allocate the table entry. */
21660 ptr = frag_more ((size << 2) + 4);
21661 /* PR 13449: Zero the table entries in case some of them are not used. */
21662 memset (ptr, 0, (size << 2) + 4);
21663 where = frag_now_fix () - ((size << 2) + 4);
21664
21665 switch (unwind.personality_index)
21666 {
21667 case -1:
21668 /* ??? Should this be a PLT generating relocation? */
21669 /* Custom personality routine. */
21670 fix_new (frag_now, where, 4, unwind.personality_routine, 0, 1,
21671 BFD_RELOC_ARM_PREL31);
21672
21673 where += 4;
21674 ptr += 4;
21675
21676 /* Set the first byte to the number of additional words. */
21677 data = size > 0 ? size - 1 : 0;
21678 n = 3;
21679 break;
21680
21681 /* ABI defined personality routines. */
21682 case 0:
21683 /* Three opcodes bytes are packed into the first word. */
21684 data = 0x80;
21685 n = 3;
21686 break;
21687
21688 case 1:
21689 case 2:
21690 /* The size and first two opcode bytes go in the first word. */
21691 data = ((0x80 + unwind.personality_index) << 8) | size;
21692 n = 2;
21693 break;
21694
21695 default:
21696 /* Should never happen. */
21697 abort ();
21698 }
21699
21700 /* Pack the opcodes into words (MSB first), reversing the list at the same
21701 time. */
21702 while (unwind.opcode_count > 0)
21703 {
21704 if (n == 0)
21705 {
21706 md_number_to_chars (ptr, data, 4);
21707 ptr += 4;
21708 n = 4;
21709 data = 0;
21710 }
21711 unwind.opcode_count--;
21712 n--;
21713 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
21714 }
21715
21716 /* Finish off the last word. */
21717 if (n < 4)
21718 {
21719 /* Pad with "finish" opcodes. */
21720 while (n--)
21721 data = (data << 8) | 0xb0;
21722
21723 md_number_to_chars (ptr, data, 4);
21724 }
21725
21726 if (!have_data)
21727 {
21728 /* Add an empty descriptor if there is no user-specified data. */
21729 ptr = frag_more (4);
21730 md_number_to_chars (ptr, 0, 4);
21731 }
21732
21733 return 0;
21734 }
21735
21736
21737 /* Initialize the DWARF-2 unwind information for this procedure. */
21738
21739 void
21740 tc_arm_frame_initial_instructions (void)
21741 {
21742 cfi_add_CFA_def_cfa (REG_SP, 0);
21743 }
21744 #endif /* OBJ_ELF */
21745
21746 /* Convert REGNAME to a DWARF-2 register number. */
21747
21748 int
21749 tc_arm_regname_to_dw2regnum (char *regname)
21750 {
21751 int reg = arm_reg_parse (&regname, REG_TYPE_RN);
21752 if (reg != FAIL)
21753 return reg;
21754
21755 /* PR 16694: Allow VFP registers as well. */
21756 reg = arm_reg_parse (&regname, REG_TYPE_VFS);
21757 if (reg != FAIL)
21758 return 64 + reg;
21759
21760 reg = arm_reg_parse (&regname, REG_TYPE_VFD);
21761 if (reg != FAIL)
21762 return reg + 256;
21763
21764 return -1;
21765 }
21766
21767 #ifdef TE_PE
21768 void
21769 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
21770 {
21771 expressionS exp;
21772
21773 exp.X_op = O_secrel;
21774 exp.X_add_symbol = symbol;
21775 exp.X_add_number = 0;
21776 emit_expr (&exp, size);
21777 }
21778 #endif
21779
21780 /* MD interface: Symbol and relocation handling. */
21781
21782 /* Return the address within the segment that a PC-relative fixup is
21783 relative to. For ARM, PC-relative fixups applied to instructions
21784 are generally relative to the location of the fixup plus 8 bytes.
21785 Thumb branches are offset by 4, and Thumb loads relative to PC
21786 require special handling. */
21787
21788 long
21789 md_pcrel_from_section (fixS * fixP, segT seg)
21790 {
21791 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
21792
21793 /* If this is pc-relative and we are going to emit a relocation
21794 then we just want to put out any pipeline compensation that the linker
21795 will need. Otherwise we want to use the calculated base.
21796 For WinCE we skip the bias for externals as well, since this
21797 is how the MS ARM-CE assembler behaves and we want to be compatible. */
21798 if (fixP->fx_pcrel
21799 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
21800 || (arm_force_relocation (fixP)
21801 #ifdef TE_WINCE
21802 && !S_IS_EXTERNAL (fixP->fx_addsy)
21803 #endif
21804 )))
21805 base = 0;
21806
21807
21808 switch (fixP->fx_r_type)
21809 {
21810 /* PC relative addressing on the Thumb is slightly odd as the
21811 bottom two bits of the PC are forced to zero for the
21812 calculation. This happens *after* application of the
21813 pipeline offset. However, Thumb adrl already adjusts for
21814 this, so we need not do it again. */
21815 case BFD_RELOC_ARM_THUMB_ADD:
21816 return base & ~3;
21817
21818 case BFD_RELOC_ARM_THUMB_OFFSET:
21819 case BFD_RELOC_ARM_T32_OFFSET_IMM:
21820 case BFD_RELOC_ARM_T32_ADD_PC12:
21821 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
21822 return (base + 4) & ~3;
21823
21824 /* Thumb branches are simply offset by +4. */
21825 case BFD_RELOC_THUMB_PCREL_BRANCH7:
21826 case BFD_RELOC_THUMB_PCREL_BRANCH9:
21827 case BFD_RELOC_THUMB_PCREL_BRANCH12:
21828 case BFD_RELOC_THUMB_PCREL_BRANCH20:
21829 case BFD_RELOC_THUMB_PCREL_BRANCH25:
21830 return base + 4;
21831
21832 case BFD_RELOC_THUMB_PCREL_BRANCH23:
21833 if (fixP->fx_addsy
21834 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21835 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
21836 && ARM_IS_FUNC (fixP->fx_addsy)
21837 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
21838 base = fixP->fx_where + fixP->fx_frag->fr_address;
21839 return base + 4;
21840
21841 /* BLX is like branches above, but forces the low two bits of PC to
21842 zero. */
21843 case BFD_RELOC_THUMB_PCREL_BLX:
21844 if (fixP->fx_addsy
21845 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21846 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
21847 && THUMB_IS_FUNC (fixP->fx_addsy)
21848 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
21849 base = fixP->fx_where + fixP->fx_frag->fr_address;
21850 return (base + 4) & ~3;
21851
21852 /* ARM mode branches are offset by +8. However, the Windows CE
21853 loader expects the relocation not to take this into account. */
21854 case BFD_RELOC_ARM_PCREL_BLX:
21855 if (fixP->fx_addsy
21856 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21857 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
21858 && ARM_IS_FUNC (fixP->fx_addsy)
21859 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
21860 base = fixP->fx_where + fixP->fx_frag->fr_address;
21861 return base + 8;
21862
21863 case BFD_RELOC_ARM_PCREL_CALL:
21864 if (fixP->fx_addsy
21865 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21866 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
21867 && THUMB_IS_FUNC (fixP->fx_addsy)
21868 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
21869 base = fixP->fx_where + fixP->fx_frag->fr_address;
21870 return base + 8;
21871
21872 case BFD_RELOC_ARM_PCREL_BRANCH:
21873 case BFD_RELOC_ARM_PCREL_JUMP:
21874 case BFD_RELOC_ARM_PLT32:
21875 #ifdef TE_WINCE
21876 /* When handling fixups immediately, because we have already
21877 discovered the value of a symbol, or the address of the frag involved
21878 we must account for the offset by +8, as the OS loader will never see the reloc.
21879 see fixup_segment() in write.c
21880 The S_IS_EXTERNAL test handles the case of global symbols.
21881 Those need the calculated base, not just the pipe compensation the linker will need. */
21882 if (fixP->fx_pcrel
21883 && fixP->fx_addsy != NULL
21884 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21885 && (S_IS_EXTERNAL (fixP->fx_addsy) || !arm_force_relocation (fixP)))
21886 return base + 8;
21887 return base;
21888 #else
21889 return base + 8;
21890 #endif
21891
21892
21893 /* ARM mode loads relative to PC are also offset by +8. Unlike
21894 branches, the Windows CE loader *does* expect the relocation
21895 to take this into account. */
21896 case BFD_RELOC_ARM_OFFSET_IMM:
21897 case BFD_RELOC_ARM_OFFSET_IMM8:
21898 case BFD_RELOC_ARM_HWLITERAL:
21899 case BFD_RELOC_ARM_LITERAL:
21900 case BFD_RELOC_ARM_CP_OFF_IMM:
21901 return base + 8;
21902
21903
21904 /* Other PC-relative relocations are un-offset. */
21905 default:
21906 return base;
21907 }
21908 }
21909
21910 static bfd_boolean flag_warn_syms = TRUE;
21911
21912 bfd_boolean
21913 arm_tc_equal_in_insn (int c ATTRIBUTE_UNUSED, char * name)
21914 {
21915 /* PR 18347 - Warn if the user attempts to create a symbol with the same
21916 name as an ARM instruction. Whilst strictly speaking it is allowed, it
21917 does mean that the resulting code might be very confusing to the reader.
21918 Also this warning can be triggered if the user omits an operand before
21919 an immediate address, eg:
21920
21921 LDR =foo
21922
21923 GAS treats this as an assignment of the value of the symbol foo to a
21924 symbol LDR, and so (without this code) it will not issue any kind of
21925 warning or error message.
21926
21927 Note - ARM instructions are case-insensitive but the strings in the hash
21928 table are all stored in lower case, so we must first ensure that name is
21929 lower case too. */
21930 if (flag_warn_syms && arm_ops_hsh)
21931 {
21932 char * nbuf = strdup (name);
21933 char * p;
21934
21935 for (p = nbuf; *p; p++)
21936 *p = TOLOWER (*p);
21937 if (hash_find (arm_ops_hsh, nbuf) != NULL)
21938 {
21939 static struct hash_control * already_warned = NULL;
21940
21941 if (already_warned == NULL)
21942 already_warned = hash_new ();
21943 /* Only warn about the symbol once. To keep the code
21944 simple we let hash_insert do the lookup for us. */
21945 if (hash_insert (already_warned, name, NULL) == NULL)
21946 as_warn (_("[-mwarn-syms]: Assignment makes a symbol match an ARM instruction: %s"), name);
21947 }
21948 else
21949 free (nbuf);
21950 }
21951
21952 return FALSE;
21953 }
21954
21955 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
21956 Otherwise we have no need to default values of symbols. */
21957
21958 symbolS *
21959 md_undefined_symbol (char * name ATTRIBUTE_UNUSED)
21960 {
21961 #ifdef OBJ_ELF
21962 if (name[0] == '_' && name[1] == 'G'
21963 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
21964 {
21965 if (!GOT_symbol)
21966 {
21967 if (symbol_find (name))
21968 as_bad (_("GOT already in the symbol table"));
21969
21970 GOT_symbol = symbol_new (name, undefined_section,
21971 (valueT) 0, & zero_address_frag);
21972 }
21973
21974 return GOT_symbol;
21975 }
21976 #endif
21977
21978 return NULL;
21979 }
21980
21981 /* Subroutine of md_apply_fix. Check to see if an immediate can be
21982 computed as two separate immediate values, added together. We
21983 already know that this value cannot be computed by just one ARM
21984 instruction. */
21985
21986 static unsigned int
21987 validate_immediate_twopart (unsigned int val,
21988 unsigned int * highpart)
21989 {
21990 unsigned int a;
21991 unsigned int i;
21992
21993 for (i = 0; i < 32; i += 2)
21994 if (((a = rotate_left (val, i)) & 0xff) != 0)
21995 {
21996 if (a & 0xff00)
21997 {
21998 if (a & ~ 0xffff)
21999 continue;
22000 * highpart = (a >> 8) | ((i + 24) << 7);
22001 }
22002 else if (a & 0xff0000)
22003 {
22004 if (a & 0xff000000)
22005 continue;
22006 * highpart = (a >> 16) | ((i + 16) << 7);
22007 }
22008 else
22009 {
22010 gas_assert (a & 0xff000000);
22011 * highpart = (a >> 24) | ((i + 8) << 7);
22012 }
22013
22014 return (a & 0xff) | (i << 7);
22015 }
22016
22017 return FAIL;
22018 }
22019
22020 static int
22021 validate_offset_imm (unsigned int val, int hwse)
22022 {
22023 if ((hwse && val > 255) || val > 4095)
22024 return FAIL;
22025 return val;
22026 }
22027
22028 /* Subroutine of md_apply_fix. Do those data_ops which can take a
22029 negative immediate constant by altering the instruction. A bit of
22030 a hack really.
22031 MOV <-> MVN
22032 AND <-> BIC
22033 ADC <-> SBC
22034 by inverting the second operand, and
22035 ADD <-> SUB
22036 CMP <-> CMN
22037 by negating the second operand. */
22038
22039 static int
22040 negate_data_op (unsigned long * instruction,
22041 unsigned long value)
22042 {
22043 int op, new_inst;
22044 unsigned long negated, inverted;
22045
22046 negated = encode_arm_immediate (-value);
22047 inverted = encode_arm_immediate (~value);
22048
22049 op = (*instruction >> DATA_OP_SHIFT) & 0xf;
22050 switch (op)
22051 {
22052 /* First negates. */
22053 case OPCODE_SUB: /* ADD <-> SUB */
22054 new_inst = OPCODE_ADD;
22055 value = negated;
22056 break;
22057
22058 case OPCODE_ADD:
22059 new_inst = OPCODE_SUB;
22060 value = negated;
22061 break;
22062
22063 case OPCODE_CMP: /* CMP <-> CMN */
22064 new_inst = OPCODE_CMN;
22065 value = negated;
22066 break;
22067
22068 case OPCODE_CMN:
22069 new_inst = OPCODE_CMP;
22070 value = negated;
22071 break;
22072
22073 /* Now Inverted ops. */
22074 case OPCODE_MOV: /* MOV <-> MVN */
22075 new_inst = OPCODE_MVN;
22076 value = inverted;
22077 break;
22078
22079 case OPCODE_MVN:
22080 new_inst = OPCODE_MOV;
22081 value = inverted;
22082 break;
22083
22084 case OPCODE_AND: /* AND <-> BIC */
22085 new_inst = OPCODE_BIC;
22086 value = inverted;
22087 break;
22088
22089 case OPCODE_BIC:
22090 new_inst = OPCODE_AND;
22091 value = inverted;
22092 break;
22093
22094 case OPCODE_ADC: /* ADC <-> SBC */
22095 new_inst = OPCODE_SBC;
22096 value = inverted;
22097 break;
22098
22099 case OPCODE_SBC:
22100 new_inst = OPCODE_ADC;
22101 value = inverted;
22102 break;
22103
22104 /* We cannot do anything. */
22105 default:
22106 return FAIL;
22107 }
22108
22109 if (value == (unsigned) FAIL)
22110 return FAIL;
22111
22112 *instruction &= OPCODE_MASK;
22113 *instruction |= new_inst << DATA_OP_SHIFT;
22114 return value;
22115 }
22116
22117 /* Like negate_data_op, but for Thumb-2. */
22118
22119 static unsigned int
22120 thumb32_negate_data_op (offsetT *instruction, unsigned int value)
22121 {
22122 int op, new_inst;
22123 int rd;
22124 unsigned int negated, inverted;
22125
22126 negated = encode_thumb32_immediate (-value);
22127 inverted = encode_thumb32_immediate (~value);
22128
22129 rd = (*instruction >> 8) & 0xf;
22130 op = (*instruction >> T2_DATA_OP_SHIFT) & 0xf;
22131 switch (op)
22132 {
22133 /* ADD <-> SUB. Includes CMP <-> CMN. */
22134 case T2_OPCODE_SUB:
22135 new_inst = T2_OPCODE_ADD;
22136 value = negated;
22137 break;
22138
22139 case T2_OPCODE_ADD:
22140 new_inst = T2_OPCODE_SUB;
22141 value = negated;
22142 break;
22143
22144 /* ORR <-> ORN. Includes MOV <-> MVN. */
22145 case T2_OPCODE_ORR:
22146 new_inst = T2_OPCODE_ORN;
22147 value = inverted;
22148 break;
22149
22150 case T2_OPCODE_ORN:
22151 new_inst = T2_OPCODE_ORR;
22152 value = inverted;
22153 break;
22154
22155 /* AND <-> BIC. TST has no inverted equivalent. */
22156 case T2_OPCODE_AND:
22157 new_inst = T2_OPCODE_BIC;
22158 if (rd == 15)
22159 value = FAIL;
22160 else
22161 value = inverted;
22162 break;
22163
22164 case T2_OPCODE_BIC:
22165 new_inst = T2_OPCODE_AND;
22166 value = inverted;
22167 break;
22168
22169 /* ADC <-> SBC */
22170 case T2_OPCODE_ADC:
22171 new_inst = T2_OPCODE_SBC;
22172 value = inverted;
22173 break;
22174
22175 case T2_OPCODE_SBC:
22176 new_inst = T2_OPCODE_ADC;
22177 value = inverted;
22178 break;
22179
22180 /* We cannot do anything. */
22181 default:
22182 return FAIL;
22183 }
22184
22185 if (value == (unsigned int)FAIL)
22186 return FAIL;
22187
22188 *instruction &= T2_OPCODE_MASK;
22189 *instruction |= new_inst << T2_DATA_OP_SHIFT;
22190 return value;
22191 }
22192
22193 /* Read a 32-bit thumb instruction from buf. */
22194 static unsigned long
22195 get_thumb32_insn (char * buf)
22196 {
22197 unsigned long insn;
22198 insn = md_chars_to_number (buf, THUMB_SIZE) << 16;
22199 insn |= md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
22200
22201 return insn;
22202 }
22203
22204
22205 /* We usually want to set the low bit on the address of thumb function
22206 symbols. In particular .word foo - . should have the low bit set.
22207 Generic code tries to fold the difference of two symbols to
22208 a constant. Prevent this and force a relocation when the first symbols
22209 is a thumb function. */
22210
22211 bfd_boolean
22212 arm_optimize_expr (expressionS *l, operatorT op, expressionS *r)
22213 {
22214 if (op == O_subtract
22215 && l->X_op == O_symbol
22216 && r->X_op == O_symbol
22217 && THUMB_IS_FUNC (l->X_add_symbol))
22218 {
22219 l->X_op = O_subtract;
22220 l->X_op_symbol = r->X_add_symbol;
22221 l->X_add_number -= r->X_add_number;
22222 return TRUE;
22223 }
22224
22225 /* Process as normal. */
22226 return FALSE;
22227 }
22228
22229 /* Encode Thumb2 unconditional branches and calls. The encoding
22230 for the 2 are identical for the immediate values. */
22231
22232 static void
22233 encode_thumb2_b_bl_offset (char * buf, offsetT value)
22234 {
22235 #define T2I1I2MASK ((1 << 13) | (1 << 11))
22236 offsetT newval;
22237 offsetT newval2;
22238 addressT S, I1, I2, lo, hi;
22239
22240 S = (value >> 24) & 0x01;
22241 I1 = (value >> 23) & 0x01;
22242 I2 = (value >> 22) & 0x01;
22243 hi = (value >> 12) & 0x3ff;
22244 lo = (value >> 1) & 0x7ff;
22245 newval = md_chars_to_number (buf, THUMB_SIZE);
22246 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
22247 newval |= (S << 10) | hi;
22248 newval2 &= ~T2I1I2MASK;
22249 newval2 |= (((I1 ^ S) << 13) | ((I2 ^ S) << 11) | lo) ^ T2I1I2MASK;
22250 md_number_to_chars (buf, newval, THUMB_SIZE);
22251 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
22252 }
22253
22254 void
22255 md_apply_fix (fixS * fixP,
22256 valueT * valP,
22257 segT seg)
22258 {
22259 offsetT value = * valP;
22260 offsetT newval;
22261 unsigned int newimm;
22262 unsigned long temp;
22263 int sign;
22264 char * buf = fixP->fx_where + fixP->fx_frag->fr_literal;
22265
22266 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
22267
22268 /* Note whether this will delete the relocation. */
22269
22270 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
22271 fixP->fx_done = 1;
22272
22273 /* On a 64-bit host, silently truncate 'value' to 32 bits for
22274 consistency with the behaviour on 32-bit hosts. Remember value
22275 for emit_reloc. */
22276 value &= 0xffffffff;
22277 value ^= 0x80000000;
22278 value -= 0x80000000;
22279
22280 *valP = value;
22281 fixP->fx_addnumber = value;
22282
22283 /* Same treatment for fixP->fx_offset. */
22284 fixP->fx_offset &= 0xffffffff;
22285 fixP->fx_offset ^= 0x80000000;
22286 fixP->fx_offset -= 0x80000000;
22287
22288 switch (fixP->fx_r_type)
22289 {
22290 case BFD_RELOC_NONE:
22291 /* This will need to go in the object file. */
22292 fixP->fx_done = 0;
22293 break;
22294
22295 case BFD_RELOC_ARM_IMMEDIATE:
22296 /* We claim that this fixup has been processed here,
22297 even if in fact we generate an error because we do
22298 not have a reloc for it, so tc_gen_reloc will reject it. */
22299 fixP->fx_done = 1;
22300
22301 if (fixP->fx_addsy)
22302 {
22303 const char *msg = 0;
22304
22305 if (! S_IS_DEFINED (fixP->fx_addsy))
22306 msg = _("undefined symbol %s used as an immediate value");
22307 else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
22308 msg = _("symbol %s is in a different section");
22309 else if (S_IS_WEAK (fixP->fx_addsy))
22310 msg = _("symbol %s is weak and may be overridden later");
22311
22312 if (msg)
22313 {
22314 as_bad_where (fixP->fx_file, fixP->fx_line,
22315 msg, S_GET_NAME (fixP->fx_addsy));
22316 break;
22317 }
22318 }
22319
22320 temp = md_chars_to_number (buf, INSN_SIZE);
22321
22322 /* If the offset is negative, we should use encoding A2 for ADR. */
22323 if ((temp & 0xfff0000) == 0x28f0000 && value < 0)
22324 newimm = negate_data_op (&temp, value);
22325 else
22326 {
22327 newimm = encode_arm_immediate (value);
22328
22329 /* If the instruction will fail, see if we can fix things up by
22330 changing the opcode. */
22331 if (newimm == (unsigned int) FAIL)
22332 newimm = negate_data_op (&temp, value);
22333 }
22334
22335 if (newimm == (unsigned int) FAIL)
22336 {
22337 as_bad_where (fixP->fx_file, fixP->fx_line,
22338 _("invalid constant (%lx) after fixup"),
22339 (unsigned long) value);
22340 break;
22341 }
22342
22343 newimm |= (temp & 0xfffff000);
22344 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
22345 break;
22346
22347 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
22348 {
22349 unsigned int highpart = 0;
22350 unsigned int newinsn = 0xe1a00000; /* nop. */
22351
22352 if (fixP->fx_addsy)
22353 {
22354 const char *msg = 0;
22355
22356 if (! S_IS_DEFINED (fixP->fx_addsy))
22357 msg = _("undefined symbol %s used as an immediate value");
22358 else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
22359 msg = _("symbol %s is in a different section");
22360 else if (S_IS_WEAK (fixP->fx_addsy))
22361 msg = _("symbol %s is weak and may be overridden later");
22362
22363 if (msg)
22364 {
22365 as_bad_where (fixP->fx_file, fixP->fx_line,
22366 msg, S_GET_NAME (fixP->fx_addsy));
22367 break;
22368 }
22369 }
22370
22371 newimm = encode_arm_immediate (value);
22372 temp = md_chars_to_number (buf, INSN_SIZE);
22373
22374 /* If the instruction will fail, see if we can fix things up by
22375 changing the opcode. */
22376 if (newimm == (unsigned int) FAIL
22377 && (newimm = negate_data_op (& temp, value)) == (unsigned int) FAIL)
22378 {
22379 /* No ? OK - try using two ADD instructions to generate
22380 the value. */
22381 newimm = validate_immediate_twopart (value, & highpart);
22382
22383 /* Yes - then make sure that the second instruction is
22384 also an add. */
22385 if (newimm != (unsigned int) FAIL)
22386 newinsn = temp;
22387 /* Still No ? Try using a negated value. */
22388 else if ((newimm = validate_immediate_twopart (- value, & highpart)) != (unsigned int) FAIL)
22389 temp = newinsn = (temp & OPCODE_MASK) | OPCODE_SUB << DATA_OP_SHIFT;
22390 /* Otherwise - give up. */
22391 else
22392 {
22393 as_bad_where (fixP->fx_file, fixP->fx_line,
22394 _("unable to compute ADRL instructions for PC offset of 0x%lx"),
22395 (long) value);
22396 break;
22397 }
22398
22399 /* Replace the first operand in the 2nd instruction (which
22400 is the PC) with the destination register. We have
22401 already added in the PC in the first instruction and we
22402 do not want to do it again. */
22403 newinsn &= ~ 0xf0000;
22404 newinsn |= ((newinsn & 0x0f000) << 4);
22405 }
22406
22407 newimm |= (temp & 0xfffff000);
22408 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
22409
22410 highpart |= (newinsn & 0xfffff000);
22411 md_number_to_chars (buf + INSN_SIZE, (valueT) highpart, INSN_SIZE);
22412 }
22413 break;
22414
22415 case BFD_RELOC_ARM_OFFSET_IMM:
22416 if (!fixP->fx_done && seg->use_rela_p)
22417 value = 0;
22418
22419 case BFD_RELOC_ARM_LITERAL:
22420 sign = value > 0;
22421
22422 if (value < 0)
22423 value = - value;
22424
22425 if (validate_offset_imm (value, 0) == FAIL)
22426 {
22427 if (fixP->fx_r_type == BFD_RELOC_ARM_LITERAL)
22428 as_bad_where (fixP->fx_file, fixP->fx_line,
22429 _("invalid literal constant: pool needs to be closer"));
22430 else
22431 as_bad_where (fixP->fx_file, fixP->fx_line,
22432 _("bad immediate value for offset (%ld)"),
22433 (long) value);
22434 break;
22435 }
22436
22437 newval = md_chars_to_number (buf, INSN_SIZE);
22438 if (value == 0)
22439 newval &= 0xfffff000;
22440 else
22441 {
22442 newval &= 0xff7ff000;
22443 newval |= value | (sign ? INDEX_UP : 0);
22444 }
22445 md_number_to_chars (buf, newval, INSN_SIZE);
22446 break;
22447
22448 case BFD_RELOC_ARM_OFFSET_IMM8:
22449 case BFD_RELOC_ARM_HWLITERAL:
22450 sign = value > 0;
22451
22452 if (value < 0)
22453 value = - value;
22454
22455 if (validate_offset_imm (value, 1) == FAIL)
22456 {
22457 if (fixP->fx_r_type == BFD_RELOC_ARM_HWLITERAL)
22458 as_bad_where (fixP->fx_file, fixP->fx_line,
22459 _("invalid literal constant: pool needs to be closer"));
22460 else
22461 as_bad_where (fixP->fx_file, fixP->fx_line,
22462 _("bad immediate value for 8-bit offset (%ld)"),
22463 (long) value);
22464 break;
22465 }
22466
22467 newval = md_chars_to_number (buf, INSN_SIZE);
22468 if (value == 0)
22469 newval &= 0xfffff0f0;
22470 else
22471 {
22472 newval &= 0xff7ff0f0;
22473 newval |= ((value >> 4) << 8) | (value & 0xf) | (sign ? INDEX_UP : 0);
22474 }
22475 md_number_to_chars (buf, newval, INSN_SIZE);
22476 break;
22477
22478 case BFD_RELOC_ARM_T32_OFFSET_U8:
22479 if (value < 0 || value > 1020 || value % 4 != 0)
22480 as_bad_where (fixP->fx_file, fixP->fx_line,
22481 _("bad immediate value for offset (%ld)"), (long) value);
22482 value /= 4;
22483
22484 newval = md_chars_to_number (buf+2, THUMB_SIZE);
22485 newval |= value;
22486 md_number_to_chars (buf+2, newval, THUMB_SIZE);
22487 break;
22488
22489 case BFD_RELOC_ARM_T32_OFFSET_IMM:
22490 /* This is a complicated relocation used for all varieties of Thumb32
22491 load/store instruction with immediate offset:
22492
22493 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
22494 *4, optional writeback(W)
22495 (doubleword load/store)
22496
22497 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
22498 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
22499 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
22500 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
22501 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
22502
22503 Uppercase letters indicate bits that are already encoded at
22504 this point. Lowercase letters are our problem. For the
22505 second block of instructions, the secondary opcode nybble
22506 (bits 8..11) is present, and bit 23 is zero, even if this is
22507 a PC-relative operation. */
22508 newval = md_chars_to_number (buf, THUMB_SIZE);
22509 newval <<= 16;
22510 newval |= md_chars_to_number (buf+THUMB_SIZE, THUMB_SIZE);
22511
22512 if ((newval & 0xf0000000) == 0xe0000000)
22513 {
22514 /* Doubleword load/store: 8-bit offset, scaled by 4. */
22515 if (value >= 0)
22516 newval |= (1 << 23);
22517 else
22518 value = -value;
22519 if (value % 4 != 0)
22520 {
22521 as_bad_where (fixP->fx_file, fixP->fx_line,
22522 _("offset not a multiple of 4"));
22523 break;
22524 }
22525 value /= 4;
22526 if (value > 0xff)
22527 {
22528 as_bad_where (fixP->fx_file, fixP->fx_line,
22529 _("offset out of range"));
22530 break;
22531 }
22532 newval &= ~0xff;
22533 }
22534 else if ((newval & 0x000f0000) == 0x000f0000)
22535 {
22536 /* PC-relative, 12-bit offset. */
22537 if (value >= 0)
22538 newval |= (1 << 23);
22539 else
22540 value = -value;
22541 if (value > 0xfff)
22542 {
22543 as_bad_where (fixP->fx_file, fixP->fx_line,
22544 _("offset out of range"));
22545 break;
22546 }
22547 newval &= ~0xfff;
22548 }
22549 else if ((newval & 0x00000100) == 0x00000100)
22550 {
22551 /* Writeback: 8-bit, +/- offset. */
22552 if (value >= 0)
22553 newval |= (1 << 9);
22554 else
22555 value = -value;
22556 if (value > 0xff)
22557 {
22558 as_bad_where (fixP->fx_file, fixP->fx_line,
22559 _("offset out of range"));
22560 break;
22561 }
22562 newval &= ~0xff;
22563 }
22564 else if ((newval & 0x00000f00) == 0x00000e00)
22565 {
22566 /* T-instruction: positive 8-bit offset. */
22567 if (value < 0 || value > 0xff)
22568 {
22569 as_bad_where (fixP->fx_file, fixP->fx_line,
22570 _("offset out of range"));
22571 break;
22572 }
22573 newval &= ~0xff;
22574 newval |= value;
22575 }
22576 else
22577 {
22578 /* Positive 12-bit or negative 8-bit offset. */
22579 int limit;
22580 if (value >= 0)
22581 {
22582 newval |= (1 << 23);
22583 limit = 0xfff;
22584 }
22585 else
22586 {
22587 value = -value;
22588 limit = 0xff;
22589 }
22590 if (value > limit)
22591 {
22592 as_bad_where (fixP->fx_file, fixP->fx_line,
22593 _("offset out of range"));
22594 break;
22595 }
22596 newval &= ~limit;
22597 }
22598
22599 newval |= value;
22600 md_number_to_chars (buf, (newval >> 16) & 0xffff, THUMB_SIZE);
22601 md_number_to_chars (buf + THUMB_SIZE, newval & 0xffff, THUMB_SIZE);
22602 break;
22603
22604 case BFD_RELOC_ARM_SHIFT_IMM:
22605 newval = md_chars_to_number (buf, INSN_SIZE);
22606 if (((unsigned long) value) > 32
22607 || (value == 32
22608 && (((newval & 0x60) == 0) || (newval & 0x60) == 0x60)))
22609 {
22610 as_bad_where (fixP->fx_file, fixP->fx_line,
22611 _("shift expression is too large"));
22612 break;
22613 }
22614
22615 if (value == 0)
22616 /* Shifts of zero must be done as lsl. */
22617 newval &= ~0x60;
22618 else if (value == 32)
22619 value = 0;
22620 newval &= 0xfffff07f;
22621 newval |= (value & 0x1f) << 7;
22622 md_number_to_chars (buf, newval, INSN_SIZE);
22623 break;
22624
22625 case BFD_RELOC_ARM_T32_IMMEDIATE:
22626 case BFD_RELOC_ARM_T32_ADD_IMM:
22627 case BFD_RELOC_ARM_T32_IMM12:
22628 case BFD_RELOC_ARM_T32_ADD_PC12:
22629 /* We claim that this fixup has been processed here,
22630 even if in fact we generate an error because we do
22631 not have a reloc for it, so tc_gen_reloc will reject it. */
22632 fixP->fx_done = 1;
22633
22634 if (fixP->fx_addsy
22635 && ! S_IS_DEFINED (fixP->fx_addsy))
22636 {
22637 as_bad_where (fixP->fx_file, fixP->fx_line,
22638 _("undefined symbol %s used as an immediate value"),
22639 S_GET_NAME (fixP->fx_addsy));
22640 break;
22641 }
22642
22643 newval = md_chars_to_number (buf, THUMB_SIZE);
22644 newval <<= 16;
22645 newval |= md_chars_to_number (buf+2, THUMB_SIZE);
22646
22647 newimm = FAIL;
22648 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
22649 || fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
22650 {
22651 newimm = encode_thumb32_immediate (value);
22652 if (newimm == (unsigned int) FAIL)
22653 newimm = thumb32_negate_data_op (&newval, value);
22654 }
22655 if (fixP->fx_r_type != BFD_RELOC_ARM_T32_IMMEDIATE
22656 && newimm == (unsigned int) FAIL)
22657 {
22658 /* Turn add/sum into addw/subw. */
22659 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
22660 newval = (newval & 0xfeffffff) | 0x02000000;
22661 /* No flat 12-bit imm encoding for addsw/subsw. */
22662 if ((newval & 0x00100000) == 0)
22663 {
22664 /* 12 bit immediate for addw/subw. */
22665 if (value < 0)
22666 {
22667 value = -value;
22668 newval ^= 0x00a00000;
22669 }
22670 if (value > 0xfff)
22671 newimm = (unsigned int) FAIL;
22672 else
22673 newimm = value;
22674 }
22675 }
22676
22677 if (newimm == (unsigned int)FAIL)
22678 {
22679 as_bad_where (fixP->fx_file, fixP->fx_line,
22680 _("invalid constant (%lx) after fixup"),
22681 (unsigned long) value);
22682 break;
22683 }
22684
22685 newval |= (newimm & 0x800) << 15;
22686 newval |= (newimm & 0x700) << 4;
22687 newval |= (newimm & 0x0ff);
22688
22689 md_number_to_chars (buf, (valueT) ((newval >> 16) & 0xffff), THUMB_SIZE);
22690 md_number_to_chars (buf+2, (valueT) (newval & 0xffff), THUMB_SIZE);
22691 break;
22692
22693 case BFD_RELOC_ARM_SMC:
22694 if (((unsigned long) value) > 0xffff)
22695 as_bad_where (fixP->fx_file, fixP->fx_line,
22696 _("invalid smc expression"));
22697 newval = md_chars_to_number (buf, INSN_SIZE);
22698 newval |= (value & 0xf) | ((value & 0xfff0) << 4);
22699 md_number_to_chars (buf, newval, INSN_SIZE);
22700 break;
22701
22702 case BFD_RELOC_ARM_HVC:
22703 if (((unsigned long) value) > 0xffff)
22704 as_bad_where (fixP->fx_file, fixP->fx_line,
22705 _("invalid hvc expression"));
22706 newval = md_chars_to_number (buf, INSN_SIZE);
22707 newval |= (value & 0xf) | ((value & 0xfff0) << 4);
22708 md_number_to_chars (buf, newval, INSN_SIZE);
22709 break;
22710
22711 case BFD_RELOC_ARM_SWI:
22712 if (fixP->tc_fix_data != 0)
22713 {
22714 if (((unsigned long) value) > 0xff)
22715 as_bad_where (fixP->fx_file, fixP->fx_line,
22716 _("invalid swi expression"));
22717 newval = md_chars_to_number (buf, THUMB_SIZE);
22718 newval |= value;
22719 md_number_to_chars (buf, newval, THUMB_SIZE);
22720 }
22721 else
22722 {
22723 if (((unsigned long) value) > 0x00ffffff)
22724 as_bad_where (fixP->fx_file, fixP->fx_line,
22725 _("invalid swi expression"));
22726 newval = md_chars_to_number (buf, INSN_SIZE);
22727 newval |= value;
22728 md_number_to_chars (buf, newval, INSN_SIZE);
22729 }
22730 break;
22731
22732 case BFD_RELOC_ARM_MULTI:
22733 if (((unsigned long) value) > 0xffff)
22734 as_bad_where (fixP->fx_file, fixP->fx_line,
22735 _("invalid expression in load/store multiple"));
22736 newval = value | md_chars_to_number (buf, INSN_SIZE);
22737 md_number_to_chars (buf, newval, INSN_SIZE);
22738 break;
22739
22740 #ifdef OBJ_ELF
22741 case BFD_RELOC_ARM_PCREL_CALL:
22742
22743 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
22744 && fixP->fx_addsy
22745 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22746 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22747 && THUMB_IS_FUNC (fixP->fx_addsy))
22748 /* Flip the bl to blx. This is a simple flip
22749 bit here because we generate PCREL_CALL for
22750 unconditional bls. */
22751 {
22752 newval = md_chars_to_number (buf, INSN_SIZE);
22753 newval = newval | 0x10000000;
22754 md_number_to_chars (buf, newval, INSN_SIZE);
22755 temp = 1;
22756 fixP->fx_done = 1;
22757 }
22758 else
22759 temp = 3;
22760 goto arm_branch_common;
22761
22762 case BFD_RELOC_ARM_PCREL_JUMP:
22763 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
22764 && fixP->fx_addsy
22765 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22766 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22767 && THUMB_IS_FUNC (fixP->fx_addsy))
22768 {
22769 /* This would map to a bl<cond>, b<cond>,
22770 b<always> to a Thumb function. We
22771 need to force a relocation for this particular
22772 case. */
22773 newval = md_chars_to_number (buf, INSN_SIZE);
22774 fixP->fx_done = 0;
22775 }
22776
22777 case BFD_RELOC_ARM_PLT32:
22778 #endif
22779 case BFD_RELOC_ARM_PCREL_BRANCH:
22780 temp = 3;
22781 goto arm_branch_common;
22782
22783 case BFD_RELOC_ARM_PCREL_BLX:
22784
22785 temp = 1;
22786 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
22787 && fixP->fx_addsy
22788 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22789 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22790 && ARM_IS_FUNC (fixP->fx_addsy))
22791 {
22792 /* Flip the blx to a bl and warn. */
22793 const char *name = S_GET_NAME (fixP->fx_addsy);
22794 newval = 0xeb000000;
22795 as_warn_where (fixP->fx_file, fixP->fx_line,
22796 _("blx to '%s' an ARM ISA state function changed to bl"),
22797 name);
22798 md_number_to_chars (buf, newval, INSN_SIZE);
22799 temp = 3;
22800 fixP->fx_done = 1;
22801 }
22802
22803 #ifdef OBJ_ELF
22804 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
22805 fixP->fx_r_type = BFD_RELOC_ARM_PCREL_CALL;
22806 #endif
22807
22808 arm_branch_common:
22809 /* We are going to store value (shifted right by two) in the
22810 instruction, in a 24 bit, signed field. Bits 26 through 32 either
22811 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
22812 also be be clear. */
22813 if (value & temp)
22814 as_bad_where (fixP->fx_file, fixP->fx_line,
22815 _("misaligned branch destination"));
22816 if ((value & (offsetT)0xfe000000) != (offsetT)0
22817 && (value & (offsetT)0xfe000000) != (offsetT)0xfe000000)
22818 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
22819
22820 if (fixP->fx_done || !seg->use_rela_p)
22821 {
22822 newval = md_chars_to_number (buf, INSN_SIZE);
22823 newval |= (value >> 2) & 0x00ffffff;
22824 /* Set the H bit on BLX instructions. */
22825 if (temp == 1)
22826 {
22827 if (value & 2)
22828 newval |= 0x01000000;
22829 else
22830 newval &= ~0x01000000;
22831 }
22832 md_number_to_chars (buf, newval, INSN_SIZE);
22833 }
22834 break;
22835
22836 case BFD_RELOC_THUMB_PCREL_BRANCH7: /* CBZ */
22837 /* CBZ can only branch forward. */
22838
22839 /* Attempts to use CBZ to branch to the next instruction
22840 (which, strictly speaking, are prohibited) will be turned into
22841 no-ops.
22842
22843 FIXME: It may be better to remove the instruction completely and
22844 perform relaxation. */
22845 if (value == -2)
22846 {
22847 newval = md_chars_to_number (buf, THUMB_SIZE);
22848 newval = 0xbf00; /* NOP encoding T1 */
22849 md_number_to_chars (buf, newval, THUMB_SIZE);
22850 }
22851 else
22852 {
22853 if (value & ~0x7e)
22854 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
22855
22856 if (fixP->fx_done || !seg->use_rela_p)
22857 {
22858 newval = md_chars_to_number (buf, THUMB_SIZE);
22859 newval |= ((value & 0x3e) << 2) | ((value & 0x40) << 3);
22860 md_number_to_chars (buf, newval, THUMB_SIZE);
22861 }
22862 }
22863 break;
22864
22865 case BFD_RELOC_THUMB_PCREL_BRANCH9: /* Conditional branch. */
22866 if ((value & ~0xff) && ((value & ~0xff) != ~0xff))
22867 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
22868
22869 if (fixP->fx_done || !seg->use_rela_p)
22870 {
22871 newval = md_chars_to_number (buf, THUMB_SIZE);
22872 newval |= (value & 0x1ff) >> 1;
22873 md_number_to_chars (buf, newval, THUMB_SIZE);
22874 }
22875 break;
22876
22877 case BFD_RELOC_THUMB_PCREL_BRANCH12: /* Unconditional branch. */
22878 if ((value & ~0x7ff) && ((value & ~0x7ff) != ~0x7ff))
22879 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
22880
22881 if (fixP->fx_done || !seg->use_rela_p)
22882 {
22883 newval = md_chars_to_number (buf, THUMB_SIZE);
22884 newval |= (value & 0xfff) >> 1;
22885 md_number_to_chars (buf, newval, THUMB_SIZE);
22886 }
22887 break;
22888
22889 case BFD_RELOC_THUMB_PCREL_BRANCH20:
22890 if (fixP->fx_addsy
22891 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22892 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22893 && ARM_IS_FUNC (fixP->fx_addsy)
22894 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
22895 {
22896 /* Force a relocation for a branch 20 bits wide. */
22897 fixP->fx_done = 0;
22898 }
22899 if ((value & ~0x1fffff) && ((value & ~0x0fffff) != ~0x0fffff))
22900 as_bad_where (fixP->fx_file, fixP->fx_line,
22901 _("conditional branch out of range"));
22902
22903 if (fixP->fx_done || !seg->use_rela_p)
22904 {
22905 offsetT newval2;
22906 addressT S, J1, J2, lo, hi;
22907
22908 S = (value & 0x00100000) >> 20;
22909 J2 = (value & 0x00080000) >> 19;
22910 J1 = (value & 0x00040000) >> 18;
22911 hi = (value & 0x0003f000) >> 12;
22912 lo = (value & 0x00000ffe) >> 1;
22913
22914 newval = md_chars_to_number (buf, THUMB_SIZE);
22915 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
22916 newval |= (S << 10) | hi;
22917 newval2 |= (J1 << 13) | (J2 << 11) | lo;
22918 md_number_to_chars (buf, newval, THUMB_SIZE);
22919 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
22920 }
22921 break;
22922
22923 case BFD_RELOC_THUMB_PCREL_BLX:
22924 /* If there is a blx from a thumb state function to
22925 another thumb function flip this to a bl and warn
22926 about it. */
22927
22928 if (fixP->fx_addsy
22929 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22930 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22931 && THUMB_IS_FUNC (fixP->fx_addsy))
22932 {
22933 const char *name = S_GET_NAME (fixP->fx_addsy);
22934 as_warn_where (fixP->fx_file, fixP->fx_line,
22935 _("blx to Thumb func '%s' from Thumb ISA state changed to bl"),
22936 name);
22937 newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
22938 newval = newval | 0x1000;
22939 md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
22940 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
22941 fixP->fx_done = 1;
22942 }
22943
22944
22945 goto thumb_bl_common;
22946
22947 case BFD_RELOC_THUMB_PCREL_BRANCH23:
22948 /* A bl from Thumb state ISA to an internal ARM state function
22949 is converted to a blx. */
22950 if (fixP->fx_addsy
22951 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22952 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22953 && ARM_IS_FUNC (fixP->fx_addsy)
22954 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
22955 {
22956 newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
22957 newval = newval & ~0x1000;
22958 md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
22959 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BLX;
22960 fixP->fx_done = 1;
22961 }
22962
22963 thumb_bl_common:
22964
22965 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
22966 /* For a BLX instruction, make sure that the relocation is rounded up
22967 to a word boundary. This follows the semantics of the instruction
22968 which specifies that bit 1 of the target address will come from bit
22969 1 of the base address. */
22970 value = (value + 3) & ~ 3;
22971
22972 #ifdef OBJ_ELF
22973 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4
22974 && fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
22975 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
22976 #endif
22977
22978 if ((value & ~0x3fffff) && ((value & ~0x3fffff) != ~0x3fffff))
22979 {
22980 if (!(ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2)))
22981 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
22982 else if ((value & ~0x1ffffff)
22983 && ((value & ~0x1ffffff) != ~0x1ffffff))
22984 as_bad_where (fixP->fx_file, fixP->fx_line,
22985 _("Thumb2 branch out of range"));
22986 }
22987
22988 if (fixP->fx_done || !seg->use_rela_p)
22989 encode_thumb2_b_bl_offset (buf, value);
22990
22991 break;
22992
22993 case BFD_RELOC_THUMB_PCREL_BRANCH25:
22994 if ((value & ~0x0ffffff) && ((value & ~0x0ffffff) != ~0x0ffffff))
22995 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
22996
22997 if (fixP->fx_done || !seg->use_rela_p)
22998 encode_thumb2_b_bl_offset (buf, value);
22999
23000 break;
23001
23002 case BFD_RELOC_8:
23003 if (fixP->fx_done || !seg->use_rela_p)
23004 *buf = value;
23005 break;
23006
23007 case BFD_RELOC_16:
23008 if (fixP->fx_done || !seg->use_rela_p)
23009 md_number_to_chars (buf, value, 2);
23010 break;
23011
23012 #ifdef OBJ_ELF
23013 case BFD_RELOC_ARM_TLS_CALL:
23014 case BFD_RELOC_ARM_THM_TLS_CALL:
23015 case BFD_RELOC_ARM_TLS_DESCSEQ:
23016 case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
23017 case BFD_RELOC_ARM_TLS_GOTDESC:
23018 case BFD_RELOC_ARM_TLS_GD32:
23019 case BFD_RELOC_ARM_TLS_LE32:
23020 case BFD_RELOC_ARM_TLS_IE32:
23021 case BFD_RELOC_ARM_TLS_LDM32:
23022 case BFD_RELOC_ARM_TLS_LDO32:
23023 S_SET_THREAD_LOCAL (fixP->fx_addsy);
23024 break;
23025
23026 case BFD_RELOC_ARM_GOT32:
23027 case BFD_RELOC_ARM_GOTOFF:
23028 break;
23029
23030 case BFD_RELOC_ARM_GOT_PREL:
23031 if (fixP->fx_done || !seg->use_rela_p)
23032 md_number_to_chars (buf, value, 4);
23033 break;
23034
23035 case BFD_RELOC_ARM_TARGET2:
23036 /* TARGET2 is not partial-inplace, so we need to write the
23037 addend here for REL targets, because it won't be written out
23038 during reloc processing later. */
23039 if (fixP->fx_done || !seg->use_rela_p)
23040 md_number_to_chars (buf, fixP->fx_offset, 4);
23041 break;
23042 #endif
23043
23044 case BFD_RELOC_RVA:
23045 case BFD_RELOC_32:
23046 case BFD_RELOC_ARM_TARGET1:
23047 case BFD_RELOC_ARM_ROSEGREL32:
23048 case BFD_RELOC_ARM_SBREL32:
23049 case BFD_RELOC_32_PCREL:
23050 #ifdef TE_PE
23051 case BFD_RELOC_32_SECREL:
23052 #endif
23053 if (fixP->fx_done || !seg->use_rela_p)
23054 #ifdef TE_WINCE
23055 /* For WinCE we only do this for pcrel fixups. */
23056 if (fixP->fx_done || fixP->fx_pcrel)
23057 #endif
23058 md_number_to_chars (buf, value, 4);
23059 break;
23060
23061 #ifdef OBJ_ELF
23062 case BFD_RELOC_ARM_PREL31:
23063 if (fixP->fx_done || !seg->use_rela_p)
23064 {
23065 newval = md_chars_to_number (buf, 4) & 0x80000000;
23066 if ((value ^ (value >> 1)) & 0x40000000)
23067 {
23068 as_bad_where (fixP->fx_file, fixP->fx_line,
23069 _("rel31 relocation overflow"));
23070 }
23071 newval |= value & 0x7fffffff;
23072 md_number_to_chars (buf, newval, 4);
23073 }
23074 break;
23075 #endif
23076
23077 case BFD_RELOC_ARM_CP_OFF_IMM:
23078 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
23079 if (value < -1023 || value > 1023 || (value & 3))
23080 as_bad_where (fixP->fx_file, fixP->fx_line,
23081 _("co-processor offset out of range"));
23082 cp_off_common:
23083 sign = value > 0;
23084 if (value < 0)
23085 value = -value;
23086 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
23087 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
23088 newval = md_chars_to_number (buf, INSN_SIZE);
23089 else
23090 newval = get_thumb32_insn (buf);
23091 if (value == 0)
23092 newval &= 0xffffff00;
23093 else
23094 {
23095 newval &= 0xff7fff00;
23096 newval |= (value >> 2) | (sign ? INDEX_UP : 0);
23097 }
23098 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
23099 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
23100 md_number_to_chars (buf, newval, INSN_SIZE);
23101 else
23102 put_thumb32_insn (buf, newval);
23103 break;
23104
23105 case BFD_RELOC_ARM_CP_OFF_IMM_S2:
23106 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2:
23107 if (value < -255 || value > 255)
23108 as_bad_where (fixP->fx_file, fixP->fx_line,
23109 _("co-processor offset out of range"));
23110 value *= 4;
23111 goto cp_off_common;
23112
23113 case BFD_RELOC_ARM_THUMB_OFFSET:
23114 newval = md_chars_to_number (buf, THUMB_SIZE);
23115 /* Exactly what ranges, and where the offset is inserted depends
23116 on the type of instruction, we can establish this from the
23117 top 4 bits. */
23118 switch (newval >> 12)
23119 {
23120 case 4: /* PC load. */
23121 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
23122 forced to zero for these loads; md_pcrel_from has already
23123 compensated for this. */
23124 if (value & 3)
23125 as_bad_where (fixP->fx_file, fixP->fx_line,
23126 _("invalid offset, target not word aligned (0x%08lX)"),
23127 (((unsigned long) fixP->fx_frag->fr_address
23128 + (unsigned long) fixP->fx_where) & ~3)
23129 + (unsigned long) value);
23130
23131 if (value & ~0x3fc)
23132 as_bad_where (fixP->fx_file, fixP->fx_line,
23133 _("invalid offset, value too big (0x%08lX)"),
23134 (long) value);
23135
23136 newval |= value >> 2;
23137 break;
23138
23139 case 9: /* SP load/store. */
23140 if (value & ~0x3fc)
23141 as_bad_where (fixP->fx_file, fixP->fx_line,
23142 _("invalid offset, value too big (0x%08lX)"),
23143 (long) value);
23144 newval |= value >> 2;
23145 break;
23146
23147 case 6: /* Word load/store. */
23148 if (value & ~0x7c)
23149 as_bad_where (fixP->fx_file, fixP->fx_line,
23150 _("invalid offset, value too big (0x%08lX)"),
23151 (long) value);
23152 newval |= value << 4; /* 6 - 2. */
23153 break;
23154
23155 case 7: /* Byte load/store. */
23156 if (value & ~0x1f)
23157 as_bad_where (fixP->fx_file, fixP->fx_line,
23158 _("invalid offset, value too big (0x%08lX)"),
23159 (long) value);
23160 newval |= value << 6;
23161 break;
23162
23163 case 8: /* Halfword load/store. */
23164 if (value & ~0x3e)
23165 as_bad_where (fixP->fx_file, fixP->fx_line,
23166 _("invalid offset, value too big (0x%08lX)"),
23167 (long) value);
23168 newval |= value << 5; /* 6 - 1. */
23169 break;
23170
23171 default:
23172 as_bad_where (fixP->fx_file, fixP->fx_line,
23173 "Unable to process relocation for thumb opcode: %lx",
23174 (unsigned long) newval);
23175 break;
23176 }
23177 md_number_to_chars (buf, newval, THUMB_SIZE);
23178 break;
23179
23180 case BFD_RELOC_ARM_THUMB_ADD:
23181 /* This is a complicated relocation, since we use it for all of
23182 the following immediate relocations:
23183
23184 3bit ADD/SUB
23185 8bit ADD/SUB
23186 9bit ADD/SUB SP word-aligned
23187 10bit ADD PC/SP word-aligned
23188
23189 The type of instruction being processed is encoded in the
23190 instruction field:
23191
23192 0x8000 SUB
23193 0x00F0 Rd
23194 0x000F Rs
23195 */
23196 newval = md_chars_to_number (buf, THUMB_SIZE);
23197 {
23198 int rd = (newval >> 4) & 0xf;
23199 int rs = newval & 0xf;
23200 int subtract = !!(newval & 0x8000);
23201
23202 /* Check for HI regs, only very restricted cases allowed:
23203 Adjusting SP, and using PC or SP to get an address. */
23204 if ((rd > 7 && (rd != REG_SP || rs != REG_SP))
23205 || (rs > 7 && rs != REG_SP && rs != REG_PC))
23206 as_bad_where (fixP->fx_file, fixP->fx_line,
23207 _("invalid Hi register with immediate"));
23208
23209 /* If value is negative, choose the opposite instruction. */
23210 if (value < 0)
23211 {
23212 value = -value;
23213 subtract = !subtract;
23214 if (value < 0)
23215 as_bad_where (fixP->fx_file, fixP->fx_line,
23216 _("immediate value out of range"));
23217 }
23218
23219 if (rd == REG_SP)
23220 {
23221 if (value & ~0x1fc)
23222 as_bad_where (fixP->fx_file, fixP->fx_line,
23223 _("invalid immediate for stack address calculation"));
23224 newval = subtract ? T_OPCODE_SUB_ST : T_OPCODE_ADD_ST;
23225 newval |= value >> 2;
23226 }
23227 else if (rs == REG_PC || rs == REG_SP)
23228 {
23229 /* PR gas/18541. If the addition is for a defined symbol
23230 within range of an ADR instruction then accept it. */
23231 if (subtract
23232 && value == 4
23233 && fixP->fx_addsy != NULL)
23234 {
23235 subtract = 0;
23236
23237 if (! S_IS_DEFINED (fixP->fx_addsy)
23238 || S_GET_SEGMENT (fixP->fx_addsy) != seg
23239 || S_IS_WEAK (fixP->fx_addsy))
23240 {
23241 as_bad_where (fixP->fx_file, fixP->fx_line,
23242 _("address calculation needs a strongly defined nearby symbol"));
23243 }
23244 else
23245 {
23246 offsetT v = fixP->fx_where + fixP->fx_frag->fr_address;
23247
23248 /* Round up to the next 4-byte boundary. */
23249 if (v & 3)
23250 v = (v + 3) & ~ 3;
23251 else
23252 v += 4;
23253 v = S_GET_VALUE (fixP->fx_addsy) - v;
23254
23255 if (v & ~0x3fc)
23256 {
23257 as_bad_where (fixP->fx_file, fixP->fx_line,
23258 _("symbol too far away"));
23259 }
23260 else
23261 {
23262 fixP->fx_done = 1;
23263 value = v;
23264 }
23265 }
23266 }
23267
23268 if (subtract || value & ~0x3fc)
23269 as_bad_where (fixP->fx_file, fixP->fx_line,
23270 _("invalid immediate for address calculation (value = 0x%08lX)"),
23271 (unsigned long) (subtract ? - value : value));
23272 newval = (rs == REG_PC ? T_OPCODE_ADD_PC : T_OPCODE_ADD_SP);
23273 newval |= rd << 8;
23274 newval |= value >> 2;
23275 }
23276 else if (rs == rd)
23277 {
23278 if (value & ~0xff)
23279 as_bad_where (fixP->fx_file, fixP->fx_line,
23280 _("immediate value out of range"));
23281 newval = subtract ? T_OPCODE_SUB_I8 : T_OPCODE_ADD_I8;
23282 newval |= (rd << 8) | value;
23283 }
23284 else
23285 {
23286 if (value & ~0x7)
23287 as_bad_where (fixP->fx_file, fixP->fx_line,
23288 _("immediate value out of range"));
23289 newval = subtract ? T_OPCODE_SUB_I3 : T_OPCODE_ADD_I3;
23290 newval |= rd | (rs << 3) | (value << 6);
23291 }
23292 }
23293 md_number_to_chars (buf, newval, THUMB_SIZE);
23294 break;
23295
23296 case BFD_RELOC_ARM_THUMB_IMM:
23297 newval = md_chars_to_number (buf, THUMB_SIZE);
23298 if (value < 0 || value > 255)
23299 as_bad_where (fixP->fx_file, fixP->fx_line,
23300 _("invalid immediate: %ld is out of range"),
23301 (long) value);
23302 newval |= value;
23303 md_number_to_chars (buf, newval, THUMB_SIZE);
23304 break;
23305
23306 case BFD_RELOC_ARM_THUMB_SHIFT:
23307 /* 5bit shift value (0..32). LSL cannot take 32. */
23308 newval = md_chars_to_number (buf, THUMB_SIZE) & 0xf83f;
23309 temp = newval & 0xf800;
23310 if (value < 0 || value > 32 || (value == 32 && temp == T_OPCODE_LSL_I))
23311 as_bad_where (fixP->fx_file, fixP->fx_line,
23312 _("invalid shift value: %ld"), (long) value);
23313 /* Shifts of zero must be encoded as LSL. */
23314 if (value == 0)
23315 newval = (newval & 0x003f) | T_OPCODE_LSL_I;
23316 /* Shifts of 32 are encoded as zero. */
23317 else if (value == 32)
23318 value = 0;
23319 newval |= value << 6;
23320 md_number_to_chars (buf, newval, THUMB_SIZE);
23321 break;
23322
23323 case BFD_RELOC_VTABLE_INHERIT:
23324 case BFD_RELOC_VTABLE_ENTRY:
23325 fixP->fx_done = 0;
23326 return;
23327
23328 case BFD_RELOC_ARM_MOVW:
23329 case BFD_RELOC_ARM_MOVT:
23330 case BFD_RELOC_ARM_THUMB_MOVW:
23331 case BFD_RELOC_ARM_THUMB_MOVT:
23332 if (fixP->fx_done || !seg->use_rela_p)
23333 {
23334 /* REL format relocations are limited to a 16-bit addend. */
23335 if (!fixP->fx_done)
23336 {
23337 if (value < -0x8000 || value > 0x7fff)
23338 as_bad_where (fixP->fx_file, fixP->fx_line,
23339 _("offset out of range"));
23340 }
23341 else if (fixP->fx_r_type == BFD_RELOC_ARM_MOVT
23342 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
23343 {
23344 value >>= 16;
23345 }
23346
23347 if (fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
23348 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
23349 {
23350 newval = get_thumb32_insn (buf);
23351 newval &= 0xfbf08f00;
23352 newval |= (value & 0xf000) << 4;
23353 newval |= (value & 0x0800) << 15;
23354 newval |= (value & 0x0700) << 4;
23355 newval |= (value & 0x00ff);
23356 put_thumb32_insn (buf, newval);
23357 }
23358 else
23359 {
23360 newval = md_chars_to_number (buf, 4);
23361 newval &= 0xfff0f000;
23362 newval |= value & 0x0fff;
23363 newval |= (value & 0xf000) << 4;
23364 md_number_to_chars (buf, newval, 4);
23365 }
23366 }
23367 return;
23368
23369 case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC:
23370 case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC:
23371 case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC:
23372 case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC:
23373 gas_assert (!fixP->fx_done);
23374 {
23375 bfd_vma insn;
23376 bfd_boolean is_mov;
23377 bfd_vma encoded_addend = value;
23378
23379 /* Check that addend can be encoded in instruction. */
23380 if (!seg->use_rela_p && (value < 0 || value > 255))
23381 as_bad_where (fixP->fx_file, fixP->fx_line,
23382 _("the offset 0x%08lX is not representable"),
23383 (unsigned long) encoded_addend);
23384
23385 /* Extract the instruction. */
23386 insn = md_chars_to_number (buf, THUMB_SIZE);
23387 is_mov = (insn & 0xf800) == 0x2000;
23388
23389 /* Encode insn. */
23390 if (is_mov)
23391 {
23392 if (!seg->use_rela_p)
23393 insn |= encoded_addend;
23394 }
23395 else
23396 {
23397 int rd, rs;
23398
23399 /* Extract the instruction. */
23400 /* Encoding is the following
23401 0x8000 SUB
23402 0x00F0 Rd
23403 0x000F Rs
23404 */
23405 /* The following conditions must be true :
23406 - ADD
23407 - Rd == Rs
23408 - Rd <= 7
23409 */
23410 rd = (insn >> 4) & 0xf;
23411 rs = insn & 0xf;
23412 if ((insn & 0x8000) || (rd != rs) || rd > 7)
23413 as_bad_where (fixP->fx_file, fixP->fx_line,
23414 _("Unable to process relocation for thumb opcode: %lx"),
23415 (unsigned long) insn);
23416
23417 /* Encode as ADD immediate8 thumb 1 code. */
23418 insn = 0x3000 | (rd << 8);
23419
23420 /* Place the encoded addend into the first 8 bits of the
23421 instruction. */
23422 if (!seg->use_rela_p)
23423 insn |= encoded_addend;
23424 }
23425
23426 /* Update the instruction. */
23427 md_number_to_chars (buf, insn, THUMB_SIZE);
23428 }
23429 break;
23430
23431 case BFD_RELOC_ARM_ALU_PC_G0_NC:
23432 case BFD_RELOC_ARM_ALU_PC_G0:
23433 case BFD_RELOC_ARM_ALU_PC_G1_NC:
23434 case BFD_RELOC_ARM_ALU_PC_G1:
23435 case BFD_RELOC_ARM_ALU_PC_G2:
23436 case BFD_RELOC_ARM_ALU_SB_G0_NC:
23437 case BFD_RELOC_ARM_ALU_SB_G0:
23438 case BFD_RELOC_ARM_ALU_SB_G1_NC:
23439 case BFD_RELOC_ARM_ALU_SB_G1:
23440 case BFD_RELOC_ARM_ALU_SB_G2:
23441 gas_assert (!fixP->fx_done);
23442 if (!seg->use_rela_p)
23443 {
23444 bfd_vma insn;
23445 bfd_vma encoded_addend;
23446 bfd_vma addend_abs = abs (value);
23447
23448 /* Check that the absolute value of the addend can be
23449 expressed as an 8-bit constant plus a rotation. */
23450 encoded_addend = encode_arm_immediate (addend_abs);
23451 if (encoded_addend == (unsigned int) FAIL)
23452 as_bad_where (fixP->fx_file, fixP->fx_line,
23453 _("the offset 0x%08lX is not representable"),
23454 (unsigned long) addend_abs);
23455
23456 /* Extract the instruction. */
23457 insn = md_chars_to_number (buf, INSN_SIZE);
23458
23459 /* If the addend is positive, use an ADD instruction.
23460 Otherwise use a SUB. Take care not to destroy the S bit. */
23461 insn &= 0xff1fffff;
23462 if (value < 0)
23463 insn |= 1 << 22;
23464 else
23465 insn |= 1 << 23;
23466
23467 /* Place the encoded addend into the first 12 bits of the
23468 instruction. */
23469 insn &= 0xfffff000;
23470 insn |= encoded_addend;
23471
23472 /* Update the instruction. */
23473 md_number_to_chars (buf, insn, INSN_SIZE);
23474 }
23475 break;
23476
23477 case BFD_RELOC_ARM_LDR_PC_G0:
23478 case BFD_RELOC_ARM_LDR_PC_G1:
23479 case BFD_RELOC_ARM_LDR_PC_G2:
23480 case BFD_RELOC_ARM_LDR_SB_G0:
23481 case BFD_RELOC_ARM_LDR_SB_G1:
23482 case BFD_RELOC_ARM_LDR_SB_G2:
23483 gas_assert (!fixP->fx_done);
23484 if (!seg->use_rela_p)
23485 {
23486 bfd_vma insn;
23487 bfd_vma addend_abs = abs (value);
23488
23489 /* Check that the absolute value of the addend can be
23490 encoded in 12 bits. */
23491 if (addend_abs >= 0x1000)
23492 as_bad_where (fixP->fx_file, fixP->fx_line,
23493 _("bad offset 0x%08lX (only 12 bits available for the magnitude)"),
23494 (unsigned long) addend_abs);
23495
23496 /* Extract the instruction. */
23497 insn = md_chars_to_number (buf, INSN_SIZE);
23498
23499 /* If the addend is negative, clear bit 23 of the instruction.
23500 Otherwise set it. */
23501 if (value < 0)
23502 insn &= ~(1 << 23);
23503 else
23504 insn |= 1 << 23;
23505
23506 /* Place the absolute value of the addend into the first 12 bits
23507 of the instruction. */
23508 insn &= 0xfffff000;
23509 insn |= addend_abs;
23510
23511 /* Update the instruction. */
23512 md_number_to_chars (buf, insn, INSN_SIZE);
23513 }
23514 break;
23515
23516 case BFD_RELOC_ARM_LDRS_PC_G0:
23517 case BFD_RELOC_ARM_LDRS_PC_G1:
23518 case BFD_RELOC_ARM_LDRS_PC_G2:
23519 case BFD_RELOC_ARM_LDRS_SB_G0:
23520 case BFD_RELOC_ARM_LDRS_SB_G1:
23521 case BFD_RELOC_ARM_LDRS_SB_G2:
23522 gas_assert (!fixP->fx_done);
23523 if (!seg->use_rela_p)
23524 {
23525 bfd_vma insn;
23526 bfd_vma addend_abs = abs (value);
23527
23528 /* Check that the absolute value of the addend can be
23529 encoded in 8 bits. */
23530 if (addend_abs >= 0x100)
23531 as_bad_where (fixP->fx_file, fixP->fx_line,
23532 _("bad offset 0x%08lX (only 8 bits available for the magnitude)"),
23533 (unsigned long) addend_abs);
23534
23535 /* Extract the instruction. */
23536 insn = md_chars_to_number (buf, INSN_SIZE);
23537
23538 /* If the addend is negative, clear bit 23 of the instruction.
23539 Otherwise set it. */
23540 if (value < 0)
23541 insn &= ~(1 << 23);
23542 else
23543 insn |= 1 << 23;
23544
23545 /* Place the first four bits of the absolute value of the addend
23546 into the first 4 bits of the instruction, and the remaining
23547 four into bits 8 .. 11. */
23548 insn &= 0xfffff0f0;
23549 insn |= (addend_abs & 0xf) | ((addend_abs & 0xf0) << 4);
23550
23551 /* Update the instruction. */
23552 md_number_to_chars (buf, insn, INSN_SIZE);
23553 }
23554 break;
23555
23556 case BFD_RELOC_ARM_LDC_PC_G0:
23557 case BFD_RELOC_ARM_LDC_PC_G1:
23558 case BFD_RELOC_ARM_LDC_PC_G2:
23559 case BFD_RELOC_ARM_LDC_SB_G0:
23560 case BFD_RELOC_ARM_LDC_SB_G1:
23561 case BFD_RELOC_ARM_LDC_SB_G2:
23562 gas_assert (!fixP->fx_done);
23563 if (!seg->use_rela_p)
23564 {
23565 bfd_vma insn;
23566 bfd_vma addend_abs = abs (value);
23567
23568 /* Check that the absolute value of the addend is a multiple of
23569 four and, when divided by four, fits in 8 bits. */
23570 if (addend_abs & 0x3)
23571 as_bad_where (fixP->fx_file, fixP->fx_line,
23572 _("bad offset 0x%08lX (must be word-aligned)"),
23573 (unsigned long) addend_abs);
23574
23575 if ((addend_abs >> 2) > 0xff)
23576 as_bad_where (fixP->fx_file, fixP->fx_line,
23577 _("bad offset 0x%08lX (must be an 8-bit number of words)"),
23578 (unsigned long) addend_abs);
23579
23580 /* Extract the instruction. */
23581 insn = md_chars_to_number (buf, INSN_SIZE);
23582
23583 /* If the addend is negative, clear bit 23 of the instruction.
23584 Otherwise set it. */
23585 if (value < 0)
23586 insn &= ~(1 << 23);
23587 else
23588 insn |= 1 << 23;
23589
23590 /* Place the addend (divided by four) into the first eight
23591 bits of the instruction. */
23592 insn &= 0xfffffff0;
23593 insn |= addend_abs >> 2;
23594
23595 /* Update the instruction. */
23596 md_number_to_chars (buf, insn, INSN_SIZE);
23597 }
23598 break;
23599
23600 case BFD_RELOC_ARM_V4BX:
23601 /* This will need to go in the object file. */
23602 fixP->fx_done = 0;
23603 break;
23604
23605 case BFD_RELOC_UNUSED:
23606 default:
23607 as_bad_where (fixP->fx_file, fixP->fx_line,
23608 _("bad relocation fixup type (%d)"), fixP->fx_r_type);
23609 }
23610 }
23611
23612 /* Translate internal representation of relocation info to BFD target
23613 format. */
23614
23615 arelent *
23616 tc_gen_reloc (asection *section, fixS *fixp)
23617 {
23618 arelent * reloc;
23619 bfd_reloc_code_real_type code;
23620
23621 reloc = (arelent *) xmalloc (sizeof (arelent));
23622
23623 reloc->sym_ptr_ptr = (asymbol **) xmalloc (sizeof (asymbol *));
23624 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
23625 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
23626
23627 if (fixp->fx_pcrel)
23628 {
23629 if (section->use_rela_p)
23630 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
23631 else
23632 fixp->fx_offset = reloc->address;
23633 }
23634 reloc->addend = fixp->fx_offset;
23635
23636 switch (fixp->fx_r_type)
23637 {
23638 case BFD_RELOC_8:
23639 if (fixp->fx_pcrel)
23640 {
23641 code = BFD_RELOC_8_PCREL;
23642 break;
23643 }
23644
23645 case BFD_RELOC_16:
23646 if (fixp->fx_pcrel)
23647 {
23648 code = BFD_RELOC_16_PCREL;
23649 break;
23650 }
23651
23652 case BFD_RELOC_32:
23653 if (fixp->fx_pcrel)
23654 {
23655 code = BFD_RELOC_32_PCREL;
23656 break;
23657 }
23658
23659 case BFD_RELOC_ARM_MOVW:
23660 if (fixp->fx_pcrel)
23661 {
23662 code = BFD_RELOC_ARM_MOVW_PCREL;
23663 break;
23664 }
23665
23666 case BFD_RELOC_ARM_MOVT:
23667 if (fixp->fx_pcrel)
23668 {
23669 code = BFD_RELOC_ARM_MOVT_PCREL;
23670 break;
23671 }
23672
23673 case BFD_RELOC_ARM_THUMB_MOVW:
23674 if (fixp->fx_pcrel)
23675 {
23676 code = BFD_RELOC_ARM_THUMB_MOVW_PCREL;
23677 break;
23678 }
23679
23680 case BFD_RELOC_ARM_THUMB_MOVT:
23681 if (fixp->fx_pcrel)
23682 {
23683 code = BFD_RELOC_ARM_THUMB_MOVT_PCREL;
23684 break;
23685 }
23686
23687 case BFD_RELOC_NONE:
23688 case BFD_RELOC_ARM_PCREL_BRANCH:
23689 case BFD_RELOC_ARM_PCREL_BLX:
23690 case BFD_RELOC_RVA:
23691 case BFD_RELOC_THUMB_PCREL_BRANCH7:
23692 case BFD_RELOC_THUMB_PCREL_BRANCH9:
23693 case BFD_RELOC_THUMB_PCREL_BRANCH12:
23694 case BFD_RELOC_THUMB_PCREL_BRANCH20:
23695 case BFD_RELOC_THUMB_PCREL_BRANCH23:
23696 case BFD_RELOC_THUMB_PCREL_BRANCH25:
23697 case BFD_RELOC_VTABLE_ENTRY:
23698 case BFD_RELOC_VTABLE_INHERIT:
23699 #ifdef TE_PE
23700 case BFD_RELOC_32_SECREL:
23701 #endif
23702 code = fixp->fx_r_type;
23703 break;
23704
23705 case BFD_RELOC_THUMB_PCREL_BLX:
23706 #ifdef OBJ_ELF
23707 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
23708 code = BFD_RELOC_THUMB_PCREL_BRANCH23;
23709 else
23710 #endif
23711 code = BFD_RELOC_THUMB_PCREL_BLX;
23712 break;
23713
23714 case BFD_RELOC_ARM_LITERAL:
23715 case BFD_RELOC_ARM_HWLITERAL:
23716 /* If this is called then the a literal has
23717 been referenced across a section boundary. */
23718 as_bad_where (fixp->fx_file, fixp->fx_line,
23719 _("literal referenced across section boundary"));
23720 return NULL;
23721
23722 #ifdef OBJ_ELF
23723 case BFD_RELOC_ARM_TLS_CALL:
23724 case BFD_RELOC_ARM_THM_TLS_CALL:
23725 case BFD_RELOC_ARM_TLS_DESCSEQ:
23726 case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
23727 case BFD_RELOC_ARM_GOT32:
23728 case BFD_RELOC_ARM_GOTOFF:
23729 case BFD_RELOC_ARM_GOT_PREL:
23730 case BFD_RELOC_ARM_PLT32:
23731 case BFD_RELOC_ARM_TARGET1:
23732 case BFD_RELOC_ARM_ROSEGREL32:
23733 case BFD_RELOC_ARM_SBREL32:
23734 case BFD_RELOC_ARM_PREL31:
23735 case BFD_RELOC_ARM_TARGET2:
23736 case BFD_RELOC_ARM_TLS_LDO32:
23737 case BFD_RELOC_ARM_PCREL_CALL:
23738 case BFD_RELOC_ARM_PCREL_JUMP:
23739 case BFD_RELOC_ARM_ALU_PC_G0_NC:
23740 case BFD_RELOC_ARM_ALU_PC_G0:
23741 case BFD_RELOC_ARM_ALU_PC_G1_NC:
23742 case BFD_RELOC_ARM_ALU_PC_G1:
23743 case BFD_RELOC_ARM_ALU_PC_G2:
23744 case BFD_RELOC_ARM_LDR_PC_G0:
23745 case BFD_RELOC_ARM_LDR_PC_G1:
23746 case BFD_RELOC_ARM_LDR_PC_G2:
23747 case BFD_RELOC_ARM_LDRS_PC_G0:
23748 case BFD_RELOC_ARM_LDRS_PC_G1:
23749 case BFD_RELOC_ARM_LDRS_PC_G2:
23750 case BFD_RELOC_ARM_LDC_PC_G0:
23751 case BFD_RELOC_ARM_LDC_PC_G1:
23752 case BFD_RELOC_ARM_LDC_PC_G2:
23753 case BFD_RELOC_ARM_ALU_SB_G0_NC:
23754 case BFD_RELOC_ARM_ALU_SB_G0:
23755 case BFD_RELOC_ARM_ALU_SB_G1_NC:
23756 case BFD_RELOC_ARM_ALU_SB_G1:
23757 case BFD_RELOC_ARM_ALU_SB_G2:
23758 case BFD_RELOC_ARM_LDR_SB_G0:
23759 case BFD_RELOC_ARM_LDR_SB_G1:
23760 case BFD_RELOC_ARM_LDR_SB_G2:
23761 case BFD_RELOC_ARM_LDRS_SB_G0:
23762 case BFD_RELOC_ARM_LDRS_SB_G1:
23763 case BFD_RELOC_ARM_LDRS_SB_G2:
23764 case BFD_RELOC_ARM_LDC_SB_G0:
23765 case BFD_RELOC_ARM_LDC_SB_G1:
23766 case BFD_RELOC_ARM_LDC_SB_G2:
23767 case BFD_RELOC_ARM_V4BX:
23768 case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC:
23769 case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC:
23770 case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC:
23771 case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC:
23772 code = fixp->fx_r_type;
23773 break;
23774
23775 case BFD_RELOC_ARM_TLS_GOTDESC:
23776 case BFD_RELOC_ARM_TLS_GD32:
23777 case BFD_RELOC_ARM_TLS_LE32:
23778 case BFD_RELOC_ARM_TLS_IE32:
23779 case BFD_RELOC_ARM_TLS_LDM32:
23780 /* BFD will include the symbol's address in the addend.
23781 But we don't want that, so subtract it out again here. */
23782 if (!S_IS_COMMON (fixp->fx_addsy))
23783 reloc->addend -= (*reloc->sym_ptr_ptr)->value;
23784 code = fixp->fx_r_type;
23785 break;
23786 #endif
23787
23788 case BFD_RELOC_ARM_IMMEDIATE:
23789 as_bad_where (fixp->fx_file, fixp->fx_line,
23790 _("internal relocation (type: IMMEDIATE) not fixed up"));
23791 return NULL;
23792
23793 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
23794 as_bad_where (fixp->fx_file, fixp->fx_line,
23795 _("ADRL used for a symbol not defined in the same file"));
23796 return NULL;
23797
23798 case BFD_RELOC_ARM_OFFSET_IMM:
23799 if (section->use_rela_p)
23800 {
23801 code = fixp->fx_r_type;
23802 break;
23803 }
23804
23805 if (fixp->fx_addsy != NULL
23806 && !S_IS_DEFINED (fixp->fx_addsy)
23807 && S_IS_LOCAL (fixp->fx_addsy))
23808 {
23809 as_bad_where (fixp->fx_file, fixp->fx_line,
23810 _("undefined local label `%s'"),
23811 S_GET_NAME (fixp->fx_addsy));
23812 return NULL;
23813 }
23814
23815 as_bad_where (fixp->fx_file, fixp->fx_line,
23816 _("internal_relocation (type: OFFSET_IMM) not fixed up"));
23817 return NULL;
23818
23819 default:
23820 {
23821 char * type;
23822
23823 switch (fixp->fx_r_type)
23824 {
23825 case BFD_RELOC_NONE: type = "NONE"; break;
23826 case BFD_RELOC_ARM_OFFSET_IMM8: type = "OFFSET_IMM8"; break;
23827 case BFD_RELOC_ARM_SHIFT_IMM: type = "SHIFT_IMM"; break;
23828 case BFD_RELOC_ARM_SMC: type = "SMC"; break;
23829 case BFD_RELOC_ARM_SWI: type = "SWI"; break;
23830 case BFD_RELOC_ARM_MULTI: type = "MULTI"; break;
23831 case BFD_RELOC_ARM_CP_OFF_IMM: type = "CP_OFF_IMM"; break;
23832 case BFD_RELOC_ARM_T32_OFFSET_IMM: type = "T32_OFFSET_IMM"; break;
23833 case BFD_RELOC_ARM_T32_CP_OFF_IMM: type = "T32_CP_OFF_IMM"; break;
23834 case BFD_RELOC_ARM_THUMB_ADD: type = "THUMB_ADD"; break;
23835 case BFD_RELOC_ARM_THUMB_SHIFT: type = "THUMB_SHIFT"; break;
23836 case BFD_RELOC_ARM_THUMB_IMM: type = "THUMB_IMM"; break;
23837 case BFD_RELOC_ARM_THUMB_OFFSET: type = "THUMB_OFFSET"; break;
23838 default: type = _("<unknown>"); break;
23839 }
23840 as_bad_where (fixp->fx_file, fixp->fx_line,
23841 _("cannot represent %s relocation in this object file format"),
23842 type);
23843 return NULL;
23844 }
23845 }
23846
23847 #ifdef OBJ_ELF
23848 if ((code == BFD_RELOC_32_PCREL || code == BFD_RELOC_32)
23849 && GOT_symbol
23850 && fixp->fx_addsy == GOT_symbol)
23851 {
23852 code = BFD_RELOC_ARM_GOTPC;
23853 reloc->addend = fixp->fx_offset = reloc->address;
23854 }
23855 #endif
23856
23857 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
23858
23859 if (reloc->howto == NULL)
23860 {
23861 as_bad_where (fixp->fx_file, fixp->fx_line,
23862 _("cannot represent %s relocation in this object file format"),
23863 bfd_get_reloc_code_name (code));
23864 return NULL;
23865 }
23866
23867 /* HACK: Since arm ELF uses Rel instead of Rela, encode the
23868 vtable entry to be used in the relocation's section offset. */
23869 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
23870 reloc->address = fixp->fx_offset;
23871
23872 return reloc;
23873 }
23874
23875 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
23876
23877 void
23878 cons_fix_new_arm (fragS * frag,
23879 int where,
23880 int size,
23881 expressionS * exp,
23882 bfd_reloc_code_real_type reloc)
23883 {
23884 int pcrel = 0;
23885
23886 /* Pick a reloc.
23887 FIXME: @@ Should look at CPU word size. */
23888 switch (size)
23889 {
23890 case 1:
23891 reloc = BFD_RELOC_8;
23892 break;
23893 case 2:
23894 reloc = BFD_RELOC_16;
23895 break;
23896 case 4:
23897 default:
23898 reloc = BFD_RELOC_32;
23899 break;
23900 case 8:
23901 reloc = BFD_RELOC_64;
23902 break;
23903 }
23904
23905 #ifdef TE_PE
23906 if (exp->X_op == O_secrel)
23907 {
23908 exp->X_op = O_symbol;
23909 reloc = BFD_RELOC_32_SECREL;
23910 }
23911 #endif
23912
23913 fix_new_exp (frag, where, size, exp, pcrel, reloc);
23914 }
23915
23916 #if defined (OBJ_COFF)
23917 void
23918 arm_validate_fix (fixS * fixP)
23919 {
23920 /* If the destination of the branch is a defined symbol which does not have
23921 the THUMB_FUNC attribute, then we must be calling a function which has
23922 the (interfacearm) attribute. We look for the Thumb entry point to that
23923 function and change the branch to refer to that function instead. */
23924 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BRANCH23
23925 && fixP->fx_addsy != NULL
23926 && S_IS_DEFINED (fixP->fx_addsy)
23927 && ! THUMB_IS_FUNC (fixP->fx_addsy))
23928 {
23929 fixP->fx_addsy = find_real_start (fixP->fx_addsy);
23930 }
23931 }
23932 #endif
23933
23934
23935 int
23936 arm_force_relocation (struct fix * fixp)
23937 {
23938 #if defined (OBJ_COFF) && defined (TE_PE)
23939 if (fixp->fx_r_type == BFD_RELOC_RVA)
23940 return 1;
23941 #endif
23942
23943 /* In case we have a call or a branch to a function in ARM ISA mode from
23944 a thumb function or vice-versa force the relocation. These relocations
23945 are cleared off for some cores that might have blx and simple transformations
23946 are possible. */
23947
23948 #ifdef OBJ_ELF
23949 switch (fixp->fx_r_type)
23950 {
23951 case BFD_RELOC_ARM_PCREL_JUMP:
23952 case BFD_RELOC_ARM_PCREL_CALL:
23953 case BFD_RELOC_THUMB_PCREL_BLX:
23954 if (THUMB_IS_FUNC (fixp->fx_addsy))
23955 return 1;
23956 break;
23957
23958 case BFD_RELOC_ARM_PCREL_BLX:
23959 case BFD_RELOC_THUMB_PCREL_BRANCH25:
23960 case BFD_RELOC_THUMB_PCREL_BRANCH20:
23961 case BFD_RELOC_THUMB_PCREL_BRANCH23:
23962 if (ARM_IS_FUNC (fixp->fx_addsy))
23963 return 1;
23964 break;
23965
23966 default:
23967 break;
23968 }
23969 #endif
23970
23971 /* Resolve these relocations even if the symbol is extern or weak.
23972 Technically this is probably wrong due to symbol preemption.
23973 In practice these relocations do not have enough range to be useful
23974 at dynamic link time, and some code (e.g. in the Linux kernel)
23975 expects these references to be resolved. */
23976 if (fixp->fx_r_type == BFD_RELOC_ARM_IMMEDIATE
23977 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM
23978 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM8
23979 || fixp->fx_r_type == BFD_RELOC_ARM_ADRL_IMMEDIATE
23980 || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
23981 || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2
23982 || fixp->fx_r_type == BFD_RELOC_ARM_THUMB_OFFSET
23983 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM
23984 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
23985 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMM12
23986 || fixp->fx_r_type == BFD_RELOC_ARM_T32_OFFSET_IMM
23987 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_PC12
23988 || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM
23989 || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM_S2)
23990 return 0;
23991
23992 /* Always leave these relocations for the linker. */
23993 if ((fixp->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
23994 && fixp->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
23995 || fixp->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
23996 return 1;
23997
23998 /* Always generate relocations against function symbols. */
23999 if (fixp->fx_r_type == BFD_RELOC_32
24000 && fixp->fx_addsy
24001 && (symbol_get_bfdsym (fixp->fx_addsy)->flags & BSF_FUNCTION))
24002 return 1;
24003
24004 return generic_force_reloc (fixp);
24005 }
24006
24007 #if defined (OBJ_ELF) || defined (OBJ_COFF)
24008 /* Relocations against function names must be left unadjusted,
24009 so that the linker can use this information to generate interworking
24010 stubs. The MIPS version of this function
24011 also prevents relocations that are mips-16 specific, but I do not
24012 know why it does this.
24013
24014 FIXME:
24015 There is one other problem that ought to be addressed here, but
24016 which currently is not: Taking the address of a label (rather
24017 than a function) and then later jumping to that address. Such
24018 addresses also ought to have their bottom bit set (assuming that
24019 they reside in Thumb code), but at the moment they will not. */
24020
24021 bfd_boolean
24022 arm_fix_adjustable (fixS * fixP)
24023 {
24024 if (fixP->fx_addsy == NULL)
24025 return 1;
24026
24027 /* Preserve relocations against symbols with function type. */
24028 if (symbol_get_bfdsym (fixP->fx_addsy)->flags & BSF_FUNCTION)
24029 return FALSE;
24030
24031 if (THUMB_IS_FUNC (fixP->fx_addsy)
24032 && fixP->fx_subsy == NULL)
24033 return FALSE;
24034
24035 /* We need the symbol name for the VTABLE entries. */
24036 if ( fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
24037 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
24038 return FALSE;
24039
24040 /* Don't allow symbols to be discarded on GOT related relocs. */
24041 if (fixP->fx_r_type == BFD_RELOC_ARM_PLT32
24042 || fixP->fx_r_type == BFD_RELOC_ARM_GOT32
24043 || fixP->fx_r_type == BFD_RELOC_ARM_GOTOFF
24044 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GD32
24045 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LE32
24046 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_IE32
24047 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDM32
24048 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDO32
24049 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GOTDESC
24050 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_CALL
24051 || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_CALL
24052 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_DESCSEQ
24053 || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_DESCSEQ
24054 || fixP->fx_r_type == BFD_RELOC_ARM_TARGET2)
24055 return FALSE;
24056
24057 /* Similarly for group relocations. */
24058 if ((fixP->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
24059 && fixP->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
24060 || fixP->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
24061 return FALSE;
24062
24063 /* MOVW/MOVT REL relocations have limited offsets, so keep the symbols. */
24064 if (fixP->fx_r_type == BFD_RELOC_ARM_MOVW
24065 || fixP->fx_r_type == BFD_RELOC_ARM_MOVT
24066 || fixP->fx_r_type == BFD_RELOC_ARM_MOVW_PCREL
24067 || fixP->fx_r_type == BFD_RELOC_ARM_MOVT_PCREL
24068 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
24069 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT
24070 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW_PCREL
24071 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT_PCREL)
24072 return FALSE;
24073
24074 /* BFD_RELOC_ARM_THUMB_ALU_ABS_Gx_NC relocations have VERY limited
24075 offsets, so keep these symbols. */
24076 if (fixP->fx_r_type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
24077 && fixP->fx_r_type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC)
24078 return FALSE;
24079
24080 return TRUE;
24081 }
24082 #endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */
24083
24084 #ifdef OBJ_ELF
24085
24086 const char *
24087 elf32_arm_target_format (void)
24088 {
24089 #ifdef TE_SYMBIAN
24090 return (target_big_endian
24091 ? "elf32-bigarm-symbian"
24092 : "elf32-littlearm-symbian");
24093 #elif defined (TE_VXWORKS)
24094 return (target_big_endian
24095 ? "elf32-bigarm-vxworks"
24096 : "elf32-littlearm-vxworks");
24097 #elif defined (TE_NACL)
24098 return (target_big_endian
24099 ? "elf32-bigarm-nacl"
24100 : "elf32-littlearm-nacl");
24101 #else
24102 if (target_big_endian)
24103 return "elf32-bigarm";
24104 else
24105 return "elf32-littlearm";
24106 #endif
24107 }
24108
24109 void
24110 armelf_frob_symbol (symbolS * symp,
24111 int * puntp)
24112 {
24113 elf_frob_symbol (symp, puntp);
24114 }
24115 #endif
24116
24117 /* MD interface: Finalization. */
24118
24119 void
24120 arm_cleanup (void)
24121 {
24122 literal_pool * pool;
24123
24124 /* Ensure that all the IT blocks are properly closed. */
24125 check_it_blocks_finished ();
24126
24127 for (pool = list_of_pools; pool; pool = pool->next)
24128 {
24129 /* Put it at the end of the relevant section. */
24130 subseg_set (pool->section, pool->sub_section);
24131 #ifdef OBJ_ELF
24132 arm_elf_change_section ();
24133 #endif
24134 s_ltorg (0);
24135 }
24136 }
24137
24138 #ifdef OBJ_ELF
24139 /* Remove any excess mapping symbols generated for alignment frags in
24140 SEC. We may have created a mapping symbol before a zero byte
24141 alignment; remove it if there's a mapping symbol after the
24142 alignment. */
24143 static void
24144 check_mapping_symbols (bfd *abfd ATTRIBUTE_UNUSED, asection *sec,
24145 void *dummy ATTRIBUTE_UNUSED)
24146 {
24147 segment_info_type *seginfo = seg_info (sec);
24148 fragS *fragp;
24149
24150 if (seginfo == NULL || seginfo->frchainP == NULL)
24151 return;
24152
24153 for (fragp = seginfo->frchainP->frch_root;
24154 fragp != NULL;
24155 fragp = fragp->fr_next)
24156 {
24157 symbolS *sym = fragp->tc_frag_data.last_map;
24158 fragS *next = fragp->fr_next;
24159
24160 /* Variable-sized frags have been converted to fixed size by
24161 this point. But if this was variable-sized to start with,
24162 there will be a fixed-size frag after it. So don't handle
24163 next == NULL. */
24164 if (sym == NULL || next == NULL)
24165 continue;
24166
24167 if (S_GET_VALUE (sym) < next->fr_address)
24168 /* Not at the end of this frag. */
24169 continue;
24170 know (S_GET_VALUE (sym) == next->fr_address);
24171
24172 do
24173 {
24174 if (next->tc_frag_data.first_map != NULL)
24175 {
24176 /* Next frag starts with a mapping symbol. Discard this
24177 one. */
24178 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
24179 break;
24180 }
24181
24182 if (next->fr_next == NULL)
24183 {
24184 /* This mapping symbol is at the end of the section. Discard
24185 it. */
24186 know (next->fr_fix == 0 && next->fr_var == 0);
24187 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
24188 break;
24189 }
24190
24191 /* As long as we have empty frags without any mapping symbols,
24192 keep looking. */
24193 /* If the next frag is non-empty and does not start with a
24194 mapping symbol, then this mapping symbol is required. */
24195 if (next->fr_address != next->fr_next->fr_address)
24196 break;
24197
24198 next = next->fr_next;
24199 }
24200 while (next != NULL);
24201 }
24202 }
24203 #endif
24204
24205 /* Adjust the symbol table. This marks Thumb symbols as distinct from
24206 ARM ones. */
24207
24208 void
24209 arm_adjust_symtab (void)
24210 {
24211 #ifdef OBJ_COFF
24212 symbolS * sym;
24213
24214 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
24215 {
24216 if (ARM_IS_THUMB (sym))
24217 {
24218 if (THUMB_IS_FUNC (sym))
24219 {
24220 /* Mark the symbol as a Thumb function. */
24221 if ( S_GET_STORAGE_CLASS (sym) == C_STAT
24222 || S_GET_STORAGE_CLASS (sym) == C_LABEL) /* This can happen! */
24223 S_SET_STORAGE_CLASS (sym, C_THUMBSTATFUNC);
24224
24225 else if (S_GET_STORAGE_CLASS (sym) == C_EXT)
24226 S_SET_STORAGE_CLASS (sym, C_THUMBEXTFUNC);
24227 else
24228 as_bad (_("%s: unexpected function type: %d"),
24229 S_GET_NAME (sym), S_GET_STORAGE_CLASS (sym));
24230 }
24231 else switch (S_GET_STORAGE_CLASS (sym))
24232 {
24233 case C_EXT:
24234 S_SET_STORAGE_CLASS (sym, C_THUMBEXT);
24235 break;
24236 case C_STAT:
24237 S_SET_STORAGE_CLASS (sym, C_THUMBSTAT);
24238 break;
24239 case C_LABEL:
24240 S_SET_STORAGE_CLASS (sym, C_THUMBLABEL);
24241 break;
24242 default:
24243 /* Do nothing. */
24244 break;
24245 }
24246 }
24247
24248 if (ARM_IS_INTERWORK (sym))
24249 coffsymbol (symbol_get_bfdsym (sym))->native->u.syment.n_flags = 0xFF;
24250 }
24251 #endif
24252 #ifdef OBJ_ELF
24253 symbolS * sym;
24254 char bind;
24255
24256 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
24257 {
24258 if (ARM_IS_THUMB (sym))
24259 {
24260 elf_symbol_type * elf_sym;
24261
24262 elf_sym = elf_symbol (symbol_get_bfdsym (sym));
24263 bind = ELF_ST_BIND (elf_sym->internal_elf_sym.st_info);
24264
24265 if (! bfd_is_arm_special_symbol_name (elf_sym->symbol.name,
24266 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
24267 {
24268 /* If it's a .thumb_func, declare it as so,
24269 otherwise tag label as .code 16. */
24270 if (THUMB_IS_FUNC (sym))
24271 elf_sym->internal_elf_sym.st_target_internal
24272 = ST_BRANCH_TO_THUMB;
24273 else if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
24274 elf_sym->internal_elf_sym.st_info =
24275 ELF_ST_INFO (bind, STT_ARM_16BIT);
24276 }
24277 }
24278 }
24279
24280 /* Remove any overlapping mapping symbols generated by alignment frags. */
24281 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
24282 /* Now do generic ELF adjustments. */
24283 elf_adjust_symtab ();
24284 #endif
24285 }
24286
24287 /* MD interface: Initialization. */
24288
24289 static void
24290 set_constant_flonums (void)
24291 {
24292 int i;
24293
24294 for (i = 0; i < NUM_FLOAT_VALS; i++)
24295 if (atof_ieee ((char *) fp_const[i], 'x', fp_values[i]) == NULL)
24296 abort ();
24297 }
24298
24299 /* Auto-select Thumb mode if it's the only available instruction set for the
24300 given architecture. */
24301
24302 static void
24303 autoselect_thumb_from_cpu_variant (void)
24304 {
24305 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
24306 opcode_select (16);
24307 }
24308
24309 void
24310 md_begin (void)
24311 {
24312 unsigned mach;
24313 unsigned int i;
24314
24315 if ( (arm_ops_hsh = hash_new ()) == NULL
24316 || (arm_cond_hsh = hash_new ()) == NULL
24317 || (arm_shift_hsh = hash_new ()) == NULL
24318 || (arm_psr_hsh = hash_new ()) == NULL
24319 || (arm_v7m_psr_hsh = hash_new ()) == NULL
24320 || (arm_reg_hsh = hash_new ()) == NULL
24321 || (arm_reloc_hsh = hash_new ()) == NULL
24322 || (arm_barrier_opt_hsh = hash_new ()) == NULL)
24323 as_fatal (_("virtual memory exhausted"));
24324
24325 for (i = 0; i < sizeof (insns) / sizeof (struct asm_opcode); i++)
24326 hash_insert (arm_ops_hsh, insns[i].template_name, (void *) (insns + i));
24327 for (i = 0; i < sizeof (conds) / sizeof (struct asm_cond); i++)
24328 hash_insert (arm_cond_hsh, conds[i].template_name, (void *) (conds + i));
24329 for (i = 0; i < sizeof (shift_names) / sizeof (struct asm_shift_name); i++)
24330 hash_insert (arm_shift_hsh, shift_names[i].name, (void *) (shift_names + i));
24331 for (i = 0; i < sizeof (psrs) / sizeof (struct asm_psr); i++)
24332 hash_insert (arm_psr_hsh, psrs[i].template_name, (void *) (psrs + i));
24333 for (i = 0; i < sizeof (v7m_psrs) / sizeof (struct asm_psr); i++)
24334 hash_insert (arm_v7m_psr_hsh, v7m_psrs[i].template_name,
24335 (void *) (v7m_psrs + i));
24336 for (i = 0; i < sizeof (reg_names) / sizeof (struct reg_entry); i++)
24337 hash_insert (arm_reg_hsh, reg_names[i].name, (void *) (reg_names + i));
24338 for (i = 0;
24339 i < sizeof (barrier_opt_names) / sizeof (struct asm_barrier_opt);
24340 i++)
24341 hash_insert (arm_barrier_opt_hsh, barrier_opt_names[i].template_name,
24342 (void *) (barrier_opt_names + i));
24343 #ifdef OBJ_ELF
24344 for (i = 0; i < ARRAY_SIZE (reloc_names); i++)
24345 {
24346 struct reloc_entry * entry = reloc_names + i;
24347
24348 if (arm_is_eabi() && entry->reloc == BFD_RELOC_ARM_PLT32)
24349 /* This makes encode_branch() use the EABI versions of this relocation. */
24350 entry->reloc = BFD_RELOC_UNUSED;
24351
24352 hash_insert (arm_reloc_hsh, entry->name, (void *) entry);
24353 }
24354 #endif
24355
24356 set_constant_flonums ();
24357
24358 /* Set the cpu variant based on the command-line options. We prefer
24359 -mcpu= over -march= if both are set (as for GCC); and we prefer
24360 -mfpu= over any other way of setting the floating point unit.
24361 Use of legacy options with new options are faulted. */
24362 if (legacy_cpu)
24363 {
24364 if (mcpu_cpu_opt || march_cpu_opt)
24365 as_bad (_("use of old and new-style options to set CPU type"));
24366
24367 mcpu_cpu_opt = legacy_cpu;
24368 }
24369 else if (!mcpu_cpu_opt)
24370 mcpu_cpu_opt = march_cpu_opt;
24371
24372 if (legacy_fpu)
24373 {
24374 if (mfpu_opt)
24375 as_bad (_("use of old and new-style options to set FPU type"));
24376
24377 mfpu_opt = legacy_fpu;
24378 }
24379 else if (!mfpu_opt)
24380 {
24381 #if !(defined (EABI_DEFAULT) || defined (TE_LINUX) \
24382 || defined (TE_NetBSD) || defined (TE_VXWORKS))
24383 /* Some environments specify a default FPU. If they don't, infer it
24384 from the processor. */
24385 if (mcpu_fpu_opt)
24386 mfpu_opt = mcpu_fpu_opt;
24387 else
24388 mfpu_opt = march_fpu_opt;
24389 #else
24390 mfpu_opt = &fpu_default;
24391 #endif
24392 }
24393
24394 if (!mfpu_opt)
24395 {
24396 if (mcpu_cpu_opt != NULL)
24397 mfpu_opt = &fpu_default;
24398 else if (mcpu_fpu_opt != NULL && ARM_CPU_HAS_FEATURE (*mcpu_fpu_opt, arm_ext_v5))
24399 mfpu_opt = &fpu_arch_vfp_v2;
24400 else
24401 mfpu_opt = &fpu_arch_fpa;
24402 }
24403
24404 #ifdef CPU_DEFAULT
24405 if (!mcpu_cpu_opt)
24406 {
24407 mcpu_cpu_opt = &cpu_default;
24408 selected_cpu = cpu_default;
24409 }
24410 else if (no_cpu_selected ())
24411 selected_cpu = cpu_default;
24412 #else
24413 if (mcpu_cpu_opt)
24414 selected_cpu = *mcpu_cpu_opt;
24415 else
24416 mcpu_cpu_opt = &arm_arch_any;
24417 #endif
24418
24419 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
24420
24421 autoselect_thumb_from_cpu_variant ();
24422
24423 arm_arch_used = thumb_arch_used = arm_arch_none;
24424
24425 #if defined OBJ_COFF || defined OBJ_ELF
24426 {
24427 unsigned int flags = 0;
24428
24429 #if defined OBJ_ELF
24430 flags = meabi_flags;
24431
24432 switch (meabi_flags)
24433 {
24434 case EF_ARM_EABI_UNKNOWN:
24435 #endif
24436 /* Set the flags in the private structure. */
24437 if (uses_apcs_26) flags |= F_APCS26;
24438 if (support_interwork) flags |= F_INTERWORK;
24439 if (uses_apcs_float) flags |= F_APCS_FLOAT;
24440 if (pic_code) flags |= F_PIC;
24441 if (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_any_hard))
24442 flags |= F_SOFT_FLOAT;
24443
24444 switch (mfloat_abi_opt)
24445 {
24446 case ARM_FLOAT_ABI_SOFT:
24447 case ARM_FLOAT_ABI_SOFTFP:
24448 flags |= F_SOFT_FLOAT;
24449 break;
24450
24451 case ARM_FLOAT_ABI_HARD:
24452 if (flags & F_SOFT_FLOAT)
24453 as_bad (_("hard-float conflicts with specified fpu"));
24454 break;
24455 }
24456
24457 /* Using pure-endian doubles (even if soft-float). */
24458 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
24459 flags |= F_VFP_FLOAT;
24460
24461 #if defined OBJ_ELF
24462 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_maverick))
24463 flags |= EF_ARM_MAVERICK_FLOAT;
24464 break;
24465
24466 case EF_ARM_EABI_VER4:
24467 case EF_ARM_EABI_VER5:
24468 /* No additional flags to set. */
24469 break;
24470
24471 default:
24472 abort ();
24473 }
24474 #endif
24475 bfd_set_private_flags (stdoutput, flags);
24476
24477 /* We have run out flags in the COFF header to encode the
24478 status of ATPCS support, so instead we create a dummy,
24479 empty, debug section called .arm.atpcs. */
24480 if (atpcs)
24481 {
24482 asection * sec;
24483
24484 sec = bfd_make_section (stdoutput, ".arm.atpcs");
24485
24486 if (sec != NULL)
24487 {
24488 bfd_set_section_flags
24489 (stdoutput, sec, SEC_READONLY | SEC_DEBUGGING /* | SEC_HAS_CONTENTS */);
24490 bfd_set_section_size (stdoutput, sec, 0);
24491 bfd_set_section_contents (stdoutput, sec, NULL, 0, 0);
24492 }
24493 }
24494 }
24495 #endif
24496
24497 /* Record the CPU type as well. */
24498 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2))
24499 mach = bfd_mach_arm_iWMMXt2;
24500 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt))
24501 mach = bfd_mach_arm_iWMMXt;
24502 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_xscale))
24503 mach = bfd_mach_arm_XScale;
24504 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_maverick))
24505 mach = bfd_mach_arm_ep9312;
24506 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5e))
24507 mach = bfd_mach_arm_5TE;
24508 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5))
24509 {
24510 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
24511 mach = bfd_mach_arm_5T;
24512 else
24513 mach = bfd_mach_arm_5;
24514 }
24515 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4))
24516 {
24517 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
24518 mach = bfd_mach_arm_4T;
24519 else
24520 mach = bfd_mach_arm_4;
24521 }
24522 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3m))
24523 mach = bfd_mach_arm_3M;
24524 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3))
24525 mach = bfd_mach_arm_3;
24526 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2s))
24527 mach = bfd_mach_arm_2a;
24528 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2))
24529 mach = bfd_mach_arm_2;
24530 else
24531 mach = bfd_mach_arm_unknown;
24532
24533 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
24534 }
24535
24536 /* Command line processing. */
24537
24538 /* md_parse_option
24539 Invocation line includes a switch not recognized by the base assembler.
24540 See if it's a processor-specific option.
24541
24542 This routine is somewhat complicated by the need for backwards
24543 compatibility (since older releases of gcc can't be changed).
24544 The new options try to make the interface as compatible as
24545 possible with GCC.
24546
24547 New options (supported) are:
24548
24549 -mcpu=<cpu name> Assemble for selected processor
24550 -march=<architecture name> Assemble for selected architecture
24551 -mfpu=<fpu architecture> Assemble for selected FPU.
24552 -EB/-mbig-endian Big-endian
24553 -EL/-mlittle-endian Little-endian
24554 -k Generate PIC code
24555 -mthumb Start in Thumb mode
24556 -mthumb-interwork Code supports ARM/Thumb interworking
24557
24558 -m[no-]warn-deprecated Warn about deprecated features
24559 -m[no-]warn-syms Warn when symbols match instructions
24560
24561 For now we will also provide support for:
24562
24563 -mapcs-32 32-bit Program counter
24564 -mapcs-26 26-bit Program counter
24565 -macps-float Floats passed in FP registers
24566 -mapcs-reentrant Reentrant code
24567 -matpcs
24568 (sometime these will probably be replaced with -mapcs=<list of options>
24569 and -matpcs=<list of options>)
24570
24571 The remaining options are only supported for back-wards compatibility.
24572 Cpu variants, the arm part is optional:
24573 -m[arm]1 Currently not supported.
24574 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor
24575 -m[arm]3 Arm 3 processor
24576 -m[arm]6[xx], Arm 6 processors
24577 -m[arm]7[xx][t][[d]m] Arm 7 processors
24578 -m[arm]8[10] Arm 8 processors
24579 -m[arm]9[20][tdmi] Arm 9 processors
24580 -mstrongarm[110[0]] StrongARM processors
24581 -mxscale XScale processors
24582 -m[arm]v[2345[t[e]]] Arm architectures
24583 -mall All (except the ARM1)
24584 FP variants:
24585 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions
24586 -mfpe-old (No float load/store multiples)
24587 -mvfpxd VFP Single precision
24588 -mvfp All VFP
24589 -mno-fpu Disable all floating point instructions
24590
24591 The following CPU names are recognized:
24592 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
24593 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
24594 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
24595 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
24596 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
24597 arm10t arm10e, arm1020t, arm1020e, arm10200e,
24598 strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
24599
24600 */
24601
24602 const char * md_shortopts = "m:k";
24603
24604 #ifdef ARM_BI_ENDIAN
24605 #define OPTION_EB (OPTION_MD_BASE + 0)
24606 #define OPTION_EL (OPTION_MD_BASE + 1)
24607 #else
24608 #if TARGET_BYTES_BIG_ENDIAN
24609 #define OPTION_EB (OPTION_MD_BASE + 0)
24610 #else
24611 #define OPTION_EL (OPTION_MD_BASE + 1)
24612 #endif
24613 #endif
24614 #define OPTION_FIX_V4BX (OPTION_MD_BASE + 2)
24615
24616 struct option md_longopts[] =
24617 {
24618 #ifdef OPTION_EB
24619 {"EB", no_argument, NULL, OPTION_EB},
24620 #endif
24621 #ifdef OPTION_EL
24622 {"EL", no_argument, NULL, OPTION_EL},
24623 #endif
24624 {"fix-v4bx", no_argument, NULL, OPTION_FIX_V4BX},
24625 {NULL, no_argument, NULL, 0}
24626 };
24627
24628
24629 size_t md_longopts_size = sizeof (md_longopts);
24630
24631 struct arm_option_table
24632 {
24633 char *option; /* Option name to match. */
24634 char *help; /* Help information. */
24635 int *var; /* Variable to change. */
24636 int value; /* What to change it to. */
24637 char *deprecated; /* If non-null, print this message. */
24638 };
24639
24640 struct arm_option_table arm_opts[] =
24641 {
24642 {"k", N_("generate PIC code"), &pic_code, 1, NULL},
24643 {"mthumb", N_("assemble Thumb code"), &thumb_mode, 1, NULL},
24644 {"mthumb-interwork", N_("support ARM/Thumb interworking"),
24645 &support_interwork, 1, NULL},
24646 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26, 0, NULL},
24647 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26, 1, NULL},
24648 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float,
24649 1, NULL},
24650 {"mapcs-reentrant", N_("re-entrant code"), &pic_code, 1, NULL},
24651 {"matpcs", N_("code is ATPCS conformant"), &atpcs, 1, NULL},
24652 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
24653 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
24654 NULL},
24655
24656 /* These are recognized by the assembler, but have no affect on code. */
24657 {"mapcs-frame", N_("use frame pointer"), NULL, 0, NULL},
24658 {"mapcs-stack-check", N_("use stack size checking"), NULL, 0, NULL},
24659
24660 {"mwarn-deprecated", NULL, &warn_on_deprecated, 1, NULL},
24661 {"mno-warn-deprecated", N_("do not warn on use of deprecated feature"),
24662 &warn_on_deprecated, 0, NULL},
24663 {"mwarn-syms", N_("warn about symbols that match instruction names [default]"), (int *) (& flag_warn_syms), TRUE, NULL},
24664 {"mno-warn-syms", N_("disable warnings about symobls that match instructions"), (int *) (& flag_warn_syms), FALSE, NULL},
24665 {NULL, NULL, NULL, 0, NULL}
24666 };
24667
24668 struct arm_legacy_option_table
24669 {
24670 char *option; /* Option name to match. */
24671 const arm_feature_set **var; /* Variable to change. */
24672 const arm_feature_set value; /* What to change it to. */
24673 char *deprecated; /* If non-null, print this message. */
24674 };
24675
24676 const struct arm_legacy_option_table arm_legacy_opts[] =
24677 {
24678 /* DON'T add any new processors to this list -- we want the whole list
24679 to go away... Add them to the processors table instead. */
24680 {"marm1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
24681 {"m1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
24682 {"marm2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
24683 {"m2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
24684 {"marm250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
24685 {"m250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
24686 {"marm3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
24687 {"m3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
24688 {"marm6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
24689 {"m6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
24690 {"marm600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
24691 {"m600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
24692 {"marm610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
24693 {"m610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
24694 {"marm620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
24695 {"m620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
24696 {"marm7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
24697 {"m7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
24698 {"marm70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
24699 {"m70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
24700 {"marm700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
24701 {"m700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
24702 {"marm700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
24703 {"m700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
24704 {"marm710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
24705 {"m710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
24706 {"marm710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
24707 {"m710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
24708 {"marm720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
24709 {"m720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
24710 {"marm7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
24711 {"m7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
24712 {"marm7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
24713 {"m7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
24714 {"marm7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
24715 {"m7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
24716 {"marm7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
24717 {"m7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
24718 {"marm7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
24719 {"m7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
24720 {"marm7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
24721 {"m7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
24722 {"marm7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
24723 {"m7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
24724 {"marm7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
24725 {"m7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
24726 {"marm7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
24727 {"m7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
24728 {"marm7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
24729 {"m7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
24730 {"marm710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
24731 {"m710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
24732 {"marm720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
24733 {"m720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
24734 {"marm740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
24735 {"m740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
24736 {"marm8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
24737 {"m8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
24738 {"marm810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
24739 {"m810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
24740 {"marm9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
24741 {"m9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
24742 {"marm9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
24743 {"m9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
24744 {"marm920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
24745 {"m920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
24746 {"marm940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
24747 {"m940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
24748 {"mstrongarm", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=strongarm")},
24749 {"mstrongarm110", &legacy_cpu, ARM_ARCH_V4,
24750 N_("use -mcpu=strongarm110")},
24751 {"mstrongarm1100", &legacy_cpu, ARM_ARCH_V4,
24752 N_("use -mcpu=strongarm1100")},
24753 {"mstrongarm1110", &legacy_cpu, ARM_ARCH_V4,
24754 N_("use -mcpu=strongarm1110")},
24755 {"mxscale", &legacy_cpu, ARM_ARCH_XSCALE, N_("use -mcpu=xscale")},
24756 {"miwmmxt", &legacy_cpu, ARM_ARCH_IWMMXT, N_("use -mcpu=iwmmxt")},
24757 {"mall", &legacy_cpu, ARM_ANY, N_("use -mcpu=all")},
24758
24759 /* Architecture variants -- don't add any more to this list either. */
24760 {"mv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
24761 {"marmv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
24762 {"mv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
24763 {"marmv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
24764 {"mv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
24765 {"marmv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
24766 {"mv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
24767 {"marmv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
24768 {"mv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
24769 {"marmv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
24770 {"mv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
24771 {"marmv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
24772 {"mv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
24773 {"marmv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
24774 {"mv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
24775 {"marmv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
24776 {"mv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
24777 {"marmv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
24778
24779 /* Floating point variants -- don't add any more to this list either. */
24780 {"mfpe-old", &legacy_fpu, FPU_ARCH_FPE, N_("use -mfpu=fpe")},
24781 {"mfpa10", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa10")},
24782 {"mfpa11", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa11")},
24783 {"mno-fpu", &legacy_fpu, ARM_ARCH_NONE,
24784 N_("use either -mfpu=softfpa or -mfpu=softvfp")},
24785
24786 {NULL, NULL, ARM_ARCH_NONE, NULL}
24787 };
24788
24789 struct arm_cpu_option_table
24790 {
24791 char *name;
24792 size_t name_len;
24793 const arm_feature_set value;
24794 /* For some CPUs we assume an FPU unless the user explicitly sets
24795 -mfpu=... */
24796 const arm_feature_set default_fpu;
24797 /* The canonical name of the CPU, or NULL to use NAME converted to upper
24798 case. */
24799 const char *canonical_name;
24800 };
24801
24802 /* This list should, at a minimum, contain all the cpu names
24803 recognized by GCC. */
24804 #define ARM_CPU_OPT(N, V, DF, CN) { N, sizeof (N) - 1, V, DF, CN }
24805 static const struct arm_cpu_option_table arm_cpus[] =
24806 {
24807 ARM_CPU_OPT ("all", ARM_ANY, FPU_ARCH_FPA, NULL),
24808 ARM_CPU_OPT ("arm1", ARM_ARCH_V1, FPU_ARCH_FPA, NULL),
24809 ARM_CPU_OPT ("arm2", ARM_ARCH_V2, FPU_ARCH_FPA, NULL),
24810 ARM_CPU_OPT ("arm250", ARM_ARCH_V2S, FPU_ARCH_FPA, NULL),
24811 ARM_CPU_OPT ("arm3", ARM_ARCH_V2S, FPU_ARCH_FPA, NULL),
24812 ARM_CPU_OPT ("arm6", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24813 ARM_CPU_OPT ("arm60", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24814 ARM_CPU_OPT ("arm600", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24815 ARM_CPU_OPT ("arm610", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24816 ARM_CPU_OPT ("arm620", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24817 ARM_CPU_OPT ("arm7", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24818 ARM_CPU_OPT ("arm7m", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL),
24819 ARM_CPU_OPT ("arm7d", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24820 ARM_CPU_OPT ("arm7dm", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL),
24821 ARM_CPU_OPT ("arm7di", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24822 ARM_CPU_OPT ("arm7dmi", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL),
24823 ARM_CPU_OPT ("arm70", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24824 ARM_CPU_OPT ("arm700", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24825 ARM_CPU_OPT ("arm700i", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24826 ARM_CPU_OPT ("arm710", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24827 ARM_CPU_OPT ("arm710t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24828 ARM_CPU_OPT ("arm720", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24829 ARM_CPU_OPT ("arm720t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24830 ARM_CPU_OPT ("arm740t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24831 ARM_CPU_OPT ("arm710c", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24832 ARM_CPU_OPT ("arm7100", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24833 ARM_CPU_OPT ("arm7500", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24834 ARM_CPU_OPT ("arm7500fe", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24835 ARM_CPU_OPT ("arm7t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24836 ARM_CPU_OPT ("arm7tdmi", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24837 ARM_CPU_OPT ("arm7tdmi-s", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24838 ARM_CPU_OPT ("arm8", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
24839 ARM_CPU_OPT ("arm810", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
24840 ARM_CPU_OPT ("strongarm", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
24841 ARM_CPU_OPT ("strongarm1", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
24842 ARM_CPU_OPT ("strongarm110", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
24843 ARM_CPU_OPT ("strongarm1100", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
24844 ARM_CPU_OPT ("strongarm1110", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
24845 ARM_CPU_OPT ("arm9", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24846 ARM_CPU_OPT ("arm920", ARM_ARCH_V4T, FPU_ARCH_FPA, "ARM920T"),
24847 ARM_CPU_OPT ("arm920t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24848 ARM_CPU_OPT ("arm922t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24849 ARM_CPU_OPT ("arm940t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24850 ARM_CPU_OPT ("arm9tdmi", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24851 ARM_CPU_OPT ("fa526", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
24852 ARM_CPU_OPT ("fa626", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
24853 /* For V5 or later processors we default to using VFP; but the user
24854 should really set the FPU type explicitly. */
24855 ARM_CPU_OPT ("arm9e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL),
24856 ARM_CPU_OPT ("arm9e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24857 ARM_CPU_OPT ("arm926ej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM926EJ-S"),
24858 ARM_CPU_OPT ("arm926ejs", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM926EJ-S"),
24859 ARM_CPU_OPT ("arm926ej-s", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, NULL),
24860 ARM_CPU_OPT ("arm946e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL),
24861 ARM_CPU_OPT ("arm946e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM946E-S"),
24862 ARM_CPU_OPT ("arm946e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24863 ARM_CPU_OPT ("arm966e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL),
24864 ARM_CPU_OPT ("arm966e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM966E-S"),
24865 ARM_CPU_OPT ("arm966e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24866 ARM_CPU_OPT ("arm968e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24867 ARM_CPU_OPT ("arm10t", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL),
24868 ARM_CPU_OPT ("arm10tdmi", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL),
24869 ARM_CPU_OPT ("arm10e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24870 ARM_CPU_OPT ("arm1020", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM1020E"),
24871 ARM_CPU_OPT ("arm1020t", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL),
24872 ARM_CPU_OPT ("arm1020e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24873 ARM_CPU_OPT ("arm1022e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24874 ARM_CPU_OPT ("arm1026ejs", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2,
24875 "ARM1026EJ-S"),
24876 ARM_CPU_OPT ("arm1026ej-s", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, NULL),
24877 ARM_CPU_OPT ("fa606te", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24878 ARM_CPU_OPT ("fa616te", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24879 ARM_CPU_OPT ("fa626te", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24880 ARM_CPU_OPT ("fmp626", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24881 ARM_CPU_OPT ("fa726te", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24882 ARM_CPU_OPT ("arm1136js", ARM_ARCH_V6, FPU_NONE, "ARM1136J-S"),
24883 ARM_CPU_OPT ("arm1136j-s", ARM_ARCH_V6, FPU_NONE, NULL),
24884 ARM_CPU_OPT ("arm1136jfs", ARM_ARCH_V6, FPU_ARCH_VFP_V2,
24885 "ARM1136JF-S"),
24886 ARM_CPU_OPT ("arm1136jf-s", ARM_ARCH_V6, FPU_ARCH_VFP_V2, NULL),
24887 ARM_CPU_OPT ("mpcore", ARM_ARCH_V6K, FPU_ARCH_VFP_V2, "MPCore"),
24888 ARM_CPU_OPT ("mpcorenovfp", ARM_ARCH_V6K, FPU_NONE, "MPCore"),
24889 ARM_CPU_OPT ("arm1156t2-s", ARM_ARCH_V6T2, FPU_NONE, NULL),
24890 ARM_CPU_OPT ("arm1156t2f-s", ARM_ARCH_V6T2, FPU_ARCH_VFP_V2, NULL),
24891 ARM_CPU_OPT ("arm1176jz-s", ARM_ARCH_V6KZ, FPU_NONE, NULL),
24892 ARM_CPU_OPT ("arm1176jzf-s", ARM_ARCH_V6KZ, FPU_ARCH_VFP_V2, NULL),
24893 ARM_CPU_OPT ("cortex-a5", ARM_ARCH_V7A_MP_SEC,
24894 FPU_NONE, "Cortex-A5"),
24895 ARM_CPU_OPT ("cortex-a7", ARM_ARCH_V7VE, FPU_ARCH_NEON_VFP_V4,
24896 "Cortex-A7"),
24897 ARM_CPU_OPT ("cortex-a8", ARM_ARCH_V7A_SEC,
24898 ARM_FEATURE_COPROC (FPU_VFP_V3
24899 | FPU_NEON_EXT_V1),
24900 "Cortex-A8"),
24901 ARM_CPU_OPT ("cortex-a9", ARM_ARCH_V7A_MP_SEC,
24902 ARM_FEATURE_COPROC (FPU_VFP_V3
24903 | FPU_NEON_EXT_V1),
24904 "Cortex-A9"),
24905 ARM_CPU_OPT ("cortex-a12", ARM_ARCH_V7VE, FPU_ARCH_NEON_VFP_V4,
24906 "Cortex-A12"),
24907 ARM_CPU_OPT ("cortex-a15", ARM_ARCH_V7VE, FPU_ARCH_NEON_VFP_V4,
24908 "Cortex-A15"),
24909 ARM_CPU_OPT ("cortex-a17", ARM_ARCH_V7VE, FPU_ARCH_NEON_VFP_V4,
24910 "Cortex-A17"),
24911 ARM_CPU_OPT ("cortex-a35", ARM_ARCH_V8A, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
24912 "Cortex-A35"),
24913 ARM_CPU_OPT ("cortex-a53", ARM_ARCH_V8A, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
24914 "Cortex-A53"),
24915 ARM_CPU_OPT ("cortex-a57", ARM_ARCH_V8A, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
24916 "Cortex-A57"),
24917 ARM_CPU_OPT ("cortex-a72", ARM_ARCH_V8A, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
24918 "Cortex-A72"),
24919 ARM_CPU_OPT ("cortex-r4", ARM_ARCH_V7R, FPU_NONE, "Cortex-R4"),
24920 ARM_CPU_OPT ("cortex-r4f", ARM_ARCH_V7R, FPU_ARCH_VFP_V3D16,
24921 "Cortex-R4F"),
24922 ARM_CPU_OPT ("cortex-r5", ARM_ARCH_V7R_IDIV,
24923 FPU_NONE, "Cortex-R5"),
24924 ARM_CPU_OPT ("cortex-r7", ARM_ARCH_V7R_IDIV,
24925 FPU_ARCH_VFP_V3D16,
24926 "Cortex-R7"),
24927 ARM_CPU_OPT ("cortex-m7", ARM_ARCH_V7EM, FPU_NONE, "Cortex-M7"),
24928 ARM_CPU_OPT ("cortex-m4", ARM_ARCH_V7EM, FPU_NONE, "Cortex-M4"),
24929 ARM_CPU_OPT ("cortex-m3", ARM_ARCH_V7M, FPU_NONE, "Cortex-M3"),
24930 ARM_CPU_OPT ("cortex-m1", ARM_ARCH_V6SM, FPU_NONE, "Cortex-M1"),
24931 ARM_CPU_OPT ("cortex-m0", ARM_ARCH_V6SM, FPU_NONE, "Cortex-M0"),
24932 ARM_CPU_OPT ("cortex-m0plus", ARM_ARCH_V6SM, FPU_NONE, "Cortex-M0+"),
24933 ARM_CPU_OPT ("exynos-m1", ARM_ARCH_V8A, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
24934 "Samsung " \
24935 "Exynos M1"),
24936 ARM_CPU_OPT ("qdf24xx", ARM_ARCH_V8A, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
24937 "Qualcomm "
24938 "QDF24XX"),
24939
24940 /* ??? XSCALE is really an architecture. */
24941 ARM_CPU_OPT ("xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL),
24942 /* ??? iwmmxt is not a processor. */
24943 ARM_CPU_OPT ("iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP_V2, NULL),
24944 ARM_CPU_OPT ("iwmmxt2", ARM_ARCH_IWMMXT2,FPU_ARCH_VFP_V2, NULL),
24945 ARM_CPU_OPT ("i80200", ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL),
24946 /* Maverick */
24947 ARM_CPU_OPT ("ep9312", ARM_FEATURE_LOW (ARM_AEXT_V4T, ARM_CEXT_MAVERICK),
24948 FPU_ARCH_MAVERICK, "ARM920T"),
24949 /* Marvell processors. */
24950 ARM_CPU_OPT ("marvell-pj4", ARM_FEATURE_CORE (ARM_AEXT_V7A | ARM_EXT_MP
24951 | ARM_EXT_SEC,
24952 ARM_EXT2_V6T2_V8M),
24953 FPU_ARCH_VFP_V3D16, NULL),
24954 ARM_CPU_OPT ("marvell-whitney", ARM_FEATURE_CORE (ARM_AEXT_V7A | ARM_EXT_MP
24955 | ARM_EXT_SEC,
24956 ARM_EXT2_V6T2_V8M),
24957 FPU_ARCH_NEON_VFP_V4, NULL),
24958 /* APM X-Gene family. */
24959 ARM_CPU_OPT ("xgene1", ARM_ARCH_V8A, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
24960 "APM X-Gene 1"),
24961 ARM_CPU_OPT ("xgene2", ARM_ARCH_V8A, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
24962 "APM X-Gene 2"),
24963
24964 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, NULL }
24965 };
24966 #undef ARM_CPU_OPT
24967
24968 struct arm_arch_option_table
24969 {
24970 char *name;
24971 size_t name_len;
24972 const arm_feature_set value;
24973 const arm_feature_set default_fpu;
24974 };
24975
24976 /* This list should, at a minimum, contain all the architecture names
24977 recognized by GCC. */
24978 #define ARM_ARCH_OPT(N, V, DF) { N, sizeof (N) - 1, V, DF }
24979 static const struct arm_arch_option_table arm_archs[] =
24980 {
24981 ARM_ARCH_OPT ("all", ARM_ANY, FPU_ARCH_FPA),
24982 ARM_ARCH_OPT ("armv1", ARM_ARCH_V1, FPU_ARCH_FPA),
24983 ARM_ARCH_OPT ("armv2", ARM_ARCH_V2, FPU_ARCH_FPA),
24984 ARM_ARCH_OPT ("armv2a", ARM_ARCH_V2S, FPU_ARCH_FPA),
24985 ARM_ARCH_OPT ("armv2s", ARM_ARCH_V2S, FPU_ARCH_FPA),
24986 ARM_ARCH_OPT ("armv3", ARM_ARCH_V3, FPU_ARCH_FPA),
24987 ARM_ARCH_OPT ("armv3m", ARM_ARCH_V3M, FPU_ARCH_FPA),
24988 ARM_ARCH_OPT ("armv4", ARM_ARCH_V4, FPU_ARCH_FPA),
24989 ARM_ARCH_OPT ("armv4xm", ARM_ARCH_V4xM, FPU_ARCH_FPA),
24990 ARM_ARCH_OPT ("armv4t", ARM_ARCH_V4T, FPU_ARCH_FPA),
24991 ARM_ARCH_OPT ("armv4txm", ARM_ARCH_V4TxM, FPU_ARCH_FPA),
24992 ARM_ARCH_OPT ("armv5", ARM_ARCH_V5, FPU_ARCH_VFP),
24993 ARM_ARCH_OPT ("armv5t", ARM_ARCH_V5T, FPU_ARCH_VFP),
24994 ARM_ARCH_OPT ("armv5txm", ARM_ARCH_V5TxM, FPU_ARCH_VFP),
24995 ARM_ARCH_OPT ("armv5te", ARM_ARCH_V5TE, FPU_ARCH_VFP),
24996 ARM_ARCH_OPT ("armv5texp", ARM_ARCH_V5TExP, FPU_ARCH_VFP),
24997 ARM_ARCH_OPT ("armv5tej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP),
24998 ARM_ARCH_OPT ("armv6", ARM_ARCH_V6, FPU_ARCH_VFP),
24999 ARM_ARCH_OPT ("armv6j", ARM_ARCH_V6, FPU_ARCH_VFP),
25000 ARM_ARCH_OPT ("armv6k", ARM_ARCH_V6K, FPU_ARCH_VFP),
25001 ARM_ARCH_OPT ("armv6z", ARM_ARCH_V6Z, FPU_ARCH_VFP),
25002 /* The official spelling of this variant is ARMv6KZ, the name "armv6zk" is
25003 kept to preserve existing behaviour. */
25004 ARM_ARCH_OPT ("armv6kz", ARM_ARCH_V6KZ, FPU_ARCH_VFP),
25005 ARM_ARCH_OPT ("armv6zk", ARM_ARCH_V6KZ, FPU_ARCH_VFP),
25006 ARM_ARCH_OPT ("armv6t2", ARM_ARCH_V6T2, FPU_ARCH_VFP),
25007 ARM_ARCH_OPT ("armv6kt2", ARM_ARCH_V6KT2, FPU_ARCH_VFP),
25008 ARM_ARCH_OPT ("armv6zt2", ARM_ARCH_V6ZT2, FPU_ARCH_VFP),
25009 /* The official spelling of this variant is ARMv6KZ, the name "armv6zkt2" is
25010 kept to preserve existing behaviour. */
25011 ARM_ARCH_OPT ("armv6kzt2", ARM_ARCH_V6KZT2, FPU_ARCH_VFP),
25012 ARM_ARCH_OPT ("armv6zkt2", ARM_ARCH_V6KZT2, FPU_ARCH_VFP),
25013 ARM_ARCH_OPT ("armv6-m", ARM_ARCH_V6M, FPU_ARCH_VFP),
25014 ARM_ARCH_OPT ("armv6s-m", ARM_ARCH_V6SM, FPU_ARCH_VFP),
25015 ARM_ARCH_OPT ("armv7", ARM_ARCH_V7, FPU_ARCH_VFP),
25016 /* The official spelling of the ARMv7 profile variants is the dashed form.
25017 Accept the non-dashed form for compatibility with old toolchains. */
25018 ARM_ARCH_OPT ("armv7a", ARM_ARCH_V7A, FPU_ARCH_VFP),
25019 ARM_ARCH_OPT ("armv7ve", ARM_ARCH_V7VE, FPU_ARCH_VFP),
25020 ARM_ARCH_OPT ("armv7r", ARM_ARCH_V7R, FPU_ARCH_VFP),
25021 ARM_ARCH_OPT ("armv7m", ARM_ARCH_V7M, FPU_ARCH_VFP),
25022 ARM_ARCH_OPT ("armv7-a", ARM_ARCH_V7A, FPU_ARCH_VFP),
25023 ARM_ARCH_OPT ("armv7-r", ARM_ARCH_V7R, FPU_ARCH_VFP),
25024 ARM_ARCH_OPT ("armv7-m", ARM_ARCH_V7M, FPU_ARCH_VFP),
25025 ARM_ARCH_OPT ("armv7e-m", ARM_ARCH_V7EM, FPU_ARCH_VFP),
25026 ARM_ARCH_OPT ("armv8-m.base", ARM_ARCH_V8M_BASE, FPU_ARCH_VFP),
25027 ARM_ARCH_OPT ("armv8-m.main", ARM_ARCH_V8M_MAIN, FPU_ARCH_VFP),
25028 ARM_ARCH_OPT ("armv8-a", ARM_ARCH_V8A, FPU_ARCH_VFP),
25029 ARM_ARCH_OPT ("armv8.1-a", ARM_ARCH_V8_1A, FPU_ARCH_VFP),
25030 ARM_ARCH_OPT ("armv8.2-a", ARM_ARCH_V8_2A, FPU_ARCH_VFP),
25031 ARM_ARCH_OPT ("xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP),
25032 ARM_ARCH_OPT ("iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP),
25033 ARM_ARCH_OPT ("iwmmxt2", ARM_ARCH_IWMMXT2,FPU_ARCH_VFP),
25034 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
25035 };
25036 #undef ARM_ARCH_OPT
25037
25038 /* ISA extensions in the co-processor and main instruction set space. */
25039 struct arm_option_extension_value_table
25040 {
25041 char *name;
25042 size_t name_len;
25043 const arm_feature_set merge_value;
25044 const arm_feature_set clear_value;
25045 const arm_feature_set allowed_archs;
25046 };
25047
25048 /* The following table must be in alphabetical order with a NULL last entry.
25049 */
25050 #define ARM_EXT_OPT(N, M, C, AA) { N, sizeof (N) - 1, M, C, AA }
25051 static const struct arm_option_extension_value_table arm_extensions[] =
25052 {
25053 ARM_EXT_OPT ("crc", ARCH_CRC_ARMV8, ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
25054 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
25055 ARM_EXT_OPT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
25056 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8),
25057 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
25058 ARM_EXT_OPT ("fp", FPU_ARCH_VFP_ARMV8, ARM_FEATURE_COPROC (FPU_VFP_ARMV8),
25059 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
25060 ARM_EXT_OPT ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV),
25061 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV),
25062 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A | ARM_EXT_V7R)),
25063 ARM_EXT_OPT ("iwmmxt",ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT),
25064 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT), ARM_ANY),
25065 ARM_EXT_OPT ("iwmmxt2", ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2),
25066 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2), ARM_ANY),
25067 ARM_EXT_OPT ("maverick", ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK),
25068 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK), ARM_ANY),
25069 ARM_EXT_OPT ("mp", ARM_FEATURE_CORE_LOW (ARM_EXT_MP),
25070 ARM_FEATURE_CORE_LOW (ARM_EXT_MP),
25071 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A | ARM_EXT_V7R)),
25072 ARM_EXT_OPT ("simd", FPU_ARCH_NEON_VFP_ARMV8,
25073 ARM_FEATURE_COPROC (FPU_NEON_ARMV8),
25074 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
25075 ARM_EXT_OPT ("os", ARM_FEATURE_CORE_LOW (ARM_EXT_OS),
25076 ARM_FEATURE_CORE_LOW (ARM_EXT_OS),
25077 ARM_FEATURE_CORE_LOW (ARM_EXT_V6M)),
25078 ARM_EXT_OPT ("pan", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN),
25079 ARM_FEATURE (ARM_EXT_V8, ARM_EXT2_PAN, 0),
25080 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
25081 ARM_EXT_OPT ("sec", ARM_FEATURE_CORE_LOW (ARM_EXT_SEC),
25082 ARM_FEATURE_CORE_LOW (ARM_EXT_SEC),
25083 ARM_FEATURE_CORE_LOW (ARM_EXT_V6K | ARM_EXT_V7A)),
25084 ARM_EXT_OPT ("virt", ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT | ARM_EXT_ADIV
25085 | ARM_EXT_DIV),
25086 ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT),
25087 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A)),
25088 ARM_EXT_OPT ("rdma", FPU_ARCH_NEON_VFP_ARMV8,
25089 ARM_FEATURE_COPROC (FPU_NEON_ARMV8 | FPU_NEON_EXT_RDMA),
25090 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
25091 ARM_EXT_OPT ("xscale",ARM_FEATURE_COPROC (ARM_CEXT_XSCALE),
25092 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE), ARM_ANY),
25093 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, ARM_ARCH_NONE }
25094 };
25095 #undef ARM_EXT_OPT
25096
25097 /* ISA floating-point and Advanced SIMD extensions. */
25098 struct arm_option_fpu_value_table
25099 {
25100 char *name;
25101 const arm_feature_set value;
25102 };
25103
25104 /* This list should, at a minimum, contain all the fpu names
25105 recognized by GCC. */
25106 static const struct arm_option_fpu_value_table arm_fpus[] =
25107 {
25108 {"softfpa", FPU_NONE},
25109 {"fpe", FPU_ARCH_FPE},
25110 {"fpe2", FPU_ARCH_FPE},
25111 {"fpe3", FPU_ARCH_FPA}, /* Third release supports LFM/SFM. */
25112 {"fpa", FPU_ARCH_FPA},
25113 {"fpa10", FPU_ARCH_FPA},
25114 {"fpa11", FPU_ARCH_FPA},
25115 {"arm7500fe", FPU_ARCH_FPA},
25116 {"softvfp", FPU_ARCH_VFP},
25117 {"softvfp+vfp", FPU_ARCH_VFP_V2},
25118 {"vfp", FPU_ARCH_VFP_V2},
25119 {"vfp9", FPU_ARCH_VFP_V2},
25120 {"vfp3", FPU_ARCH_VFP_V3}, /* For backwards compatbility. */
25121 {"vfp10", FPU_ARCH_VFP_V2},
25122 {"vfp10-r0", FPU_ARCH_VFP_V1},
25123 {"vfpxd", FPU_ARCH_VFP_V1xD},
25124 {"vfpv2", FPU_ARCH_VFP_V2},
25125 {"vfpv3", FPU_ARCH_VFP_V3},
25126 {"vfpv3-fp16", FPU_ARCH_VFP_V3_FP16},
25127 {"vfpv3-d16", FPU_ARCH_VFP_V3D16},
25128 {"vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16},
25129 {"vfpv3xd", FPU_ARCH_VFP_V3xD},
25130 {"vfpv3xd-fp16", FPU_ARCH_VFP_V3xD_FP16},
25131 {"arm1020t", FPU_ARCH_VFP_V1},
25132 {"arm1020e", FPU_ARCH_VFP_V2},
25133 {"arm1136jfs", FPU_ARCH_VFP_V2},
25134 {"arm1136jf-s", FPU_ARCH_VFP_V2},
25135 {"maverick", FPU_ARCH_MAVERICK},
25136 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1},
25137 {"neon-fp16", FPU_ARCH_NEON_FP16},
25138 {"vfpv4", FPU_ARCH_VFP_V4},
25139 {"vfpv4-d16", FPU_ARCH_VFP_V4D16},
25140 {"fpv4-sp-d16", FPU_ARCH_VFP_V4_SP_D16},
25141 {"fpv5-d16", FPU_ARCH_VFP_V5D16},
25142 {"fpv5-sp-d16", FPU_ARCH_VFP_V5_SP_D16},
25143 {"neon-vfpv4", FPU_ARCH_NEON_VFP_V4},
25144 {"fp-armv8", FPU_ARCH_VFP_ARMV8},
25145 {"neon-fp-armv8", FPU_ARCH_NEON_VFP_ARMV8},
25146 {"crypto-neon-fp-armv8",
25147 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8},
25148 {"neon-fp-armv8.1", FPU_ARCH_NEON_VFP_ARMV8_1},
25149 {"crypto-neon-fp-armv8.1",
25150 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1},
25151 {NULL, ARM_ARCH_NONE}
25152 };
25153
25154 struct arm_option_value_table
25155 {
25156 char *name;
25157 long value;
25158 };
25159
25160 static const struct arm_option_value_table arm_float_abis[] =
25161 {
25162 {"hard", ARM_FLOAT_ABI_HARD},
25163 {"softfp", ARM_FLOAT_ABI_SOFTFP},
25164 {"soft", ARM_FLOAT_ABI_SOFT},
25165 {NULL, 0}
25166 };
25167
25168 #ifdef OBJ_ELF
25169 /* We only know how to output GNU and ver 4/5 (AAELF) formats. */
25170 static const struct arm_option_value_table arm_eabis[] =
25171 {
25172 {"gnu", EF_ARM_EABI_UNKNOWN},
25173 {"4", EF_ARM_EABI_VER4},
25174 {"5", EF_ARM_EABI_VER5},
25175 {NULL, 0}
25176 };
25177 #endif
25178
25179 struct arm_long_option_table
25180 {
25181 char * option; /* Substring to match. */
25182 char * help; /* Help information. */
25183 int (* func) (char * subopt); /* Function to decode sub-option. */
25184 char * deprecated; /* If non-null, print this message. */
25185 };
25186
25187 static bfd_boolean
25188 arm_parse_extension (char *str, const arm_feature_set **opt_p)
25189 {
25190 arm_feature_set *ext_set = (arm_feature_set *)
25191 xmalloc (sizeof (arm_feature_set));
25192
25193 /* We insist on extensions being specified in alphabetical order, and with
25194 extensions being added before being removed. We achieve this by having
25195 the global ARM_EXTENSIONS table in alphabetical order, and using the
25196 ADDING_VALUE variable to indicate whether we are adding an extension (1)
25197 or removing it (0) and only allowing it to change in the order
25198 -1 -> 1 -> 0. */
25199 const struct arm_option_extension_value_table * opt = NULL;
25200 int adding_value = -1;
25201
25202 /* Copy the feature set, so that we can modify it. */
25203 *ext_set = **opt_p;
25204 *opt_p = ext_set;
25205
25206 while (str != NULL && *str != 0)
25207 {
25208 char *ext;
25209 size_t len;
25210
25211 if (*str != '+')
25212 {
25213 as_bad (_("invalid architectural extension"));
25214 return FALSE;
25215 }
25216
25217 str++;
25218 ext = strchr (str, '+');
25219
25220 if (ext != NULL)
25221 len = ext - str;
25222 else
25223 len = strlen (str);
25224
25225 if (len >= 2 && strncmp (str, "no", 2) == 0)
25226 {
25227 if (adding_value != 0)
25228 {
25229 adding_value = 0;
25230 opt = arm_extensions;
25231 }
25232
25233 len -= 2;
25234 str += 2;
25235 }
25236 else if (len > 0)
25237 {
25238 if (adding_value == -1)
25239 {
25240 adding_value = 1;
25241 opt = arm_extensions;
25242 }
25243 else if (adding_value != 1)
25244 {
25245 as_bad (_("must specify extensions to add before specifying "
25246 "those to remove"));
25247 return FALSE;
25248 }
25249 }
25250
25251 if (len == 0)
25252 {
25253 as_bad (_("missing architectural extension"));
25254 return FALSE;
25255 }
25256
25257 gas_assert (adding_value != -1);
25258 gas_assert (opt != NULL);
25259
25260 /* Scan over the options table trying to find an exact match. */
25261 for (; opt->name != NULL; opt++)
25262 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
25263 {
25264 /* Check we can apply the extension to this architecture. */
25265 if (!ARM_CPU_HAS_FEATURE (*ext_set, opt->allowed_archs))
25266 {
25267 as_bad (_("extension does not apply to the base architecture"));
25268 return FALSE;
25269 }
25270
25271 /* Add or remove the extension. */
25272 if (adding_value)
25273 ARM_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->merge_value);
25274 else
25275 ARM_CLEAR_FEATURE (*ext_set, *ext_set, opt->clear_value);
25276
25277 break;
25278 }
25279
25280 if (opt->name == NULL)
25281 {
25282 /* Did we fail to find an extension because it wasn't specified in
25283 alphabetical order, or because it does not exist? */
25284
25285 for (opt = arm_extensions; opt->name != NULL; opt++)
25286 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
25287 break;
25288
25289 if (opt->name == NULL)
25290 as_bad (_("unknown architectural extension `%s'"), str);
25291 else
25292 as_bad (_("architectural extensions must be specified in "
25293 "alphabetical order"));
25294
25295 return FALSE;
25296 }
25297 else
25298 {
25299 /* We should skip the extension we've just matched the next time
25300 round. */
25301 opt++;
25302 }
25303
25304 str = ext;
25305 };
25306
25307 return TRUE;
25308 }
25309
25310 static bfd_boolean
25311 arm_parse_cpu (char *str)
25312 {
25313 const struct arm_cpu_option_table *opt;
25314 char *ext = strchr (str, '+');
25315 size_t len;
25316
25317 if (ext != NULL)
25318 len = ext - str;
25319 else
25320 len = strlen (str);
25321
25322 if (len == 0)
25323 {
25324 as_bad (_("missing cpu name `%s'"), str);
25325 return FALSE;
25326 }
25327
25328 for (opt = arm_cpus; opt->name != NULL; opt++)
25329 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
25330 {
25331 mcpu_cpu_opt = &opt->value;
25332 mcpu_fpu_opt = &opt->default_fpu;
25333 if (opt->canonical_name)
25334 {
25335 gas_assert (sizeof selected_cpu_name > strlen (opt->canonical_name));
25336 strcpy (selected_cpu_name, opt->canonical_name);
25337 }
25338 else
25339 {
25340 size_t i;
25341
25342 if (len >= sizeof selected_cpu_name)
25343 len = (sizeof selected_cpu_name) - 1;
25344
25345 for (i = 0; i < len; i++)
25346 selected_cpu_name[i] = TOUPPER (opt->name[i]);
25347 selected_cpu_name[i] = 0;
25348 }
25349
25350 if (ext != NULL)
25351 return arm_parse_extension (ext, &mcpu_cpu_opt);
25352
25353 return TRUE;
25354 }
25355
25356 as_bad (_("unknown cpu `%s'"), str);
25357 return FALSE;
25358 }
25359
25360 static bfd_boolean
25361 arm_parse_arch (char *str)
25362 {
25363 const struct arm_arch_option_table *opt;
25364 char *ext = strchr (str, '+');
25365 size_t len;
25366
25367 if (ext != NULL)
25368 len = ext - str;
25369 else
25370 len = strlen (str);
25371
25372 if (len == 0)
25373 {
25374 as_bad (_("missing architecture name `%s'"), str);
25375 return FALSE;
25376 }
25377
25378 for (opt = arm_archs; opt->name != NULL; opt++)
25379 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
25380 {
25381 march_cpu_opt = &opt->value;
25382 march_fpu_opt = &opt->default_fpu;
25383 strcpy (selected_cpu_name, opt->name);
25384
25385 if (ext != NULL)
25386 return arm_parse_extension (ext, &march_cpu_opt);
25387
25388 return TRUE;
25389 }
25390
25391 as_bad (_("unknown architecture `%s'\n"), str);
25392 return FALSE;
25393 }
25394
25395 static bfd_boolean
25396 arm_parse_fpu (char * str)
25397 {
25398 const struct arm_option_fpu_value_table * opt;
25399
25400 for (opt = arm_fpus; opt->name != NULL; opt++)
25401 if (streq (opt->name, str))
25402 {
25403 mfpu_opt = &opt->value;
25404 return TRUE;
25405 }
25406
25407 as_bad (_("unknown floating point format `%s'\n"), str);
25408 return FALSE;
25409 }
25410
25411 static bfd_boolean
25412 arm_parse_float_abi (char * str)
25413 {
25414 const struct arm_option_value_table * opt;
25415
25416 for (opt = arm_float_abis; opt->name != NULL; opt++)
25417 if (streq (opt->name, str))
25418 {
25419 mfloat_abi_opt = opt->value;
25420 return TRUE;
25421 }
25422
25423 as_bad (_("unknown floating point abi `%s'\n"), str);
25424 return FALSE;
25425 }
25426
25427 #ifdef OBJ_ELF
25428 static bfd_boolean
25429 arm_parse_eabi (char * str)
25430 {
25431 const struct arm_option_value_table *opt;
25432
25433 for (opt = arm_eabis; opt->name != NULL; opt++)
25434 if (streq (opt->name, str))
25435 {
25436 meabi_flags = opt->value;
25437 return TRUE;
25438 }
25439 as_bad (_("unknown EABI `%s'\n"), str);
25440 return FALSE;
25441 }
25442 #endif
25443
25444 static bfd_boolean
25445 arm_parse_it_mode (char * str)
25446 {
25447 bfd_boolean ret = TRUE;
25448
25449 if (streq ("arm", str))
25450 implicit_it_mode = IMPLICIT_IT_MODE_ARM;
25451 else if (streq ("thumb", str))
25452 implicit_it_mode = IMPLICIT_IT_MODE_THUMB;
25453 else if (streq ("always", str))
25454 implicit_it_mode = IMPLICIT_IT_MODE_ALWAYS;
25455 else if (streq ("never", str))
25456 implicit_it_mode = IMPLICIT_IT_MODE_NEVER;
25457 else
25458 {
25459 as_bad (_("unknown implicit IT mode `%s', should be "\
25460 "arm, thumb, always, or never."), str);
25461 ret = FALSE;
25462 }
25463
25464 return ret;
25465 }
25466
25467 static bfd_boolean
25468 arm_ccs_mode (char * unused ATTRIBUTE_UNUSED)
25469 {
25470 codecomposer_syntax = TRUE;
25471 arm_comment_chars[0] = ';';
25472 arm_line_separator_chars[0] = 0;
25473 return TRUE;
25474 }
25475
25476 struct arm_long_option_table arm_long_opts[] =
25477 {
25478 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
25479 arm_parse_cpu, NULL},
25480 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
25481 arm_parse_arch, NULL},
25482 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"),
25483 arm_parse_fpu, NULL},
25484 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"),
25485 arm_parse_float_abi, NULL},
25486 #ifdef OBJ_ELF
25487 {"meabi=", N_("<ver>\t\t assemble for eabi version <ver>"),
25488 arm_parse_eabi, NULL},
25489 #endif
25490 {"mimplicit-it=", N_("<mode>\t controls implicit insertion of IT instructions"),
25491 arm_parse_it_mode, NULL},
25492 {"mccs", N_("\t\t\t TI CodeComposer Studio syntax compatibility mode"),
25493 arm_ccs_mode, NULL},
25494 {NULL, NULL, 0, NULL}
25495 };
25496
25497 int
25498 md_parse_option (int c, char * arg)
25499 {
25500 struct arm_option_table *opt;
25501 const struct arm_legacy_option_table *fopt;
25502 struct arm_long_option_table *lopt;
25503
25504 switch (c)
25505 {
25506 #ifdef OPTION_EB
25507 case OPTION_EB:
25508 target_big_endian = 1;
25509 break;
25510 #endif
25511
25512 #ifdef OPTION_EL
25513 case OPTION_EL:
25514 target_big_endian = 0;
25515 break;
25516 #endif
25517
25518 case OPTION_FIX_V4BX:
25519 fix_v4bx = TRUE;
25520 break;
25521
25522 case 'a':
25523 /* Listing option. Just ignore these, we don't support additional
25524 ones. */
25525 return 0;
25526
25527 default:
25528 for (opt = arm_opts; opt->option != NULL; opt++)
25529 {
25530 if (c == opt->option[0]
25531 && ((arg == NULL && opt->option[1] == 0)
25532 || streq (arg, opt->option + 1)))
25533 {
25534 /* If the option is deprecated, tell the user. */
25535 if (warn_on_deprecated && opt->deprecated != NULL)
25536 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
25537 arg ? arg : "", _(opt->deprecated));
25538
25539 if (opt->var != NULL)
25540 *opt->var = opt->value;
25541
25542 return 1;
25543 }
25544 }
25545
25546 for (fopt = arm_legacy_opts; fopt->option != NULL; fopt++)
25547 {
25548 if (c == fopt->option[0]
25549 && ((arg == NULL && fopt->option[1] == 0)
25550 || streq (arg, fopt->option + 1)))
25551 {
25552 /* If the option is deprecated, tell the user. */
25553 if (warn_on_deprecated && fopt->deprecated != NULL)
25554 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
25555 arg ? arg : "", _(fopt->deprecated));
25556
25557 if (fopt->var != NULL)
25558 *fopt->var = &fopt->value;
25559
25560 return 1;
25561 }
25562 }
25563
25564 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
25565 {
25566 /* These options are expected to have an argument. */
25567 if (c == lopt->option[0]
25568 && arg != NULL
25569 && strncmp (arg, lopt->option + 1,
25570 strlen (lopt->option + 1)) == 0)
25571 {
25572 /* If the option is deprecated, tell the user. */
25573 if (warn_on_deprecated && lopt->deprecated != NULL)
25574 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
25575 _(lopt->deprecated));
25576
25577 /* Call the sup-option parser. */
25578 return lopt->func (arg + strlen (lopt->option) - 1);
25579 }
25580 }
25581
25582 return 0;
25583 }
25584
25585 return 1;
25586 }
25587
25588 void
25589 md_show_usage (FILE * fp)
25590 {
25591 struct arm_option_table *opt;
25592 struct arm_long_option_table *lopt;
25593
25594 fprintf (fp, _(" ARM-specific assembler options:\n"));
25595
25596 for (opt = arm_opts; opt->option != NULL; opt++)
25597 if (opt->help != NULL)
25598 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
25599
25600 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
25601 if (lopt->help != NULL)
25602 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
25603
25604 #ifdef OPTION_EB
25605 fprintf (fp, _("\
25606 -EB assemble code for a big-endian cpu\n"));
25607 #endif
25608
25609 #ifdef OPTION_EL
25610 fprintf (fp, _("\
25611 -EL assemble code for a little-endian cpu\n"));
25612 #endif
25613
25614 fprintf (fp, _("\
25615 --fix-v4bx Allow BX in ARMv4 code\n"));
25616 }
25617
25618
25619 #ifdef OBJ_ELF
25620 typedef struct
25621 {
25622 int val;
25623 arm_feature_set flags;
25624 } cpu_arch_ver_table;
25625
25626 /* Mapping from CPU features to EABI CPU arch values. As a general rule, table
25627 must be sorted least features first but some reordering is needed, eg. for
25628 Thumb-2 instructions to be detected as coming from ARMv6T2. */
25629 static const cpu_arch_ver_table cpu_arch_ver[] =
25630 {
25631 {1, ARM_ARCH_V4},
25632 {2, ARM_ARCH_V4T},
25633 {3, ARM_ARCH_V5},
25634 {3, ARM_ARCH_V5T},
25635 {4, ARM_ARCH_V5TE},
25636 {5, ARM_ARCH_V5TEJ},
25637 {6, ARM_ARCH_V6},
25638 {9, ARM_ARCH_V6K},
25639 {7, ARM_ARCH_V6Z},
25640 {11, ARM_ARCH_V6M},
25641 {12, ARM_ARCH_V6SM},
25642 {8, ARM_ARCH_V6T2},
25643 {10, ARM_ARCH_V7VE},
25644 {10, ARM_ARCH_V7R},
25645 {10, ARM_ARCH_V7M},
25646 {14, ARM_ARCH_V8A},
25647 {16, ARM_ARCH_V8M_BASE},
25648 {17, ARM_ARCH_V8M_MAIN},
25649 {0, ARM_ARCH_NONE}
25650 };
25651
25652 /* Set an attribute if it has not already been set by the user. */
25653 static void
25654 aeabi_set_attribute_int (int tag, int value)
25655 {
25656 if (tag < 1
25657 || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
25658 || !attributes_set_explicitly[tag])
25659 bfd_elf_add_proc_attr_int (stdoutput, tag, value);
25660 }
25661
25662 static void
25663 aeabi_set_attribute_string (int tag, const char *value)
25664 {
25665 if (tag < 1
25666 || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
25667 || !attributes_set_explicitly[tag])
25668 bfd_elf_add_proc_attr_string (stdoutput, tag, value);
25669 }
25670
25671 /* Set the public EABI object attributes. */
25672 void
25673 aeabi_set_public_attributes (void)
25674 {
25675 int arch;
25676 char profile;
25677 int virt_sec = 0;
25678 int fp16_optional = 0;
25679 arm_feature_set flags;
25680 arm_feature_set tmp;
25681 arm_feature_set arm_arch_v8m_base = ARM_ARCH_V8M_BASE;
25682 const cpu_arch_ver_table *p;
25683
25684 /* Choose the architecture based on the capabilities of the requested cpu
25685 (if any) and/or the instructions actually used. */
25686 ARM_MERGE_FEATURE_SETS (flags, arm_arch_used, thumb_arch_used);
25687 ARM_MERGE_FEATURE_SETS (flags, flags, *mfpu_opt);
25688 ARM_MERGE_FEATURE_SETS (flags, flags, selected_cpu);
25689
25690 if (ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_any))
25691 ARM_MERGE_FEATURE_SETS (flags, flags, arm_ext_v1);
25692
25693 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_any))
25694 ARM_MERGE_FEATURE_SETS (flags, flags, arm_ext_v4t);
25695
25696 selected_cpu = flags;
25697
25698 /* Allow the user to override the reported architecture. */
25699 if (object_arch)
25700 {
25701 ARM_CLEAR_FEATURE (flags, flags, arm_arch_any);
25702 ARM_MERGE_FEATURE_SETS (flags, flags, *object_arch);
25703 }
25704
25705 /* We need to make sure that the attributes do not identify us as v6S-M
25706 when the only v6S-M feature in use is the Operating System Extensions. */
25707 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_os))
25708 if (!ARM_CPU_HAS_FEATURE (flags, arm_arch_v6m_only))
25709 ARM_CLEAR_FEATURE (flags, flags, arm_ext_os);
25710
25711 tmp = flags;
25712 arch = 0;
25713 for (p = cpu_arch_ver; p->val; p++)
25714 {
25715 if (ARM_CPU_HAS_FEATURE (tmp, p->flags))
25716 {
25717 arch = p->val;
25718 ARM_CLEAR_FEATURE (tmp, tmp, p->flags);
25719 }
25720 }
25721
25722 /* The table lookup above finds the last architecture to contribute
25723 a new feature. Unfortunately, Tag13 is a subset of the union of
25724 v6T2 and v7-M, so it is never seen as contributing a new feature.
25725 We can not search for the last entry which is entirely used,
25726 because if no CPU is specified we build up only those flags
25727 actually used. Perhaps we should separate out the specified
25728 and implicit cases. Avoid taking this path for -march=all by
25729 checking for contradictory v7-A / v7-M features. */
25730 if (arch == TAG_CPU_ARCH_V7
25731 && !ARM_CPU_HAS_FEATURE (flags, arm_ext_v7a)
25732 && ARM_CPU_HAS_FEATURE (flags, arm_ext_v7m)
25733 && ARM_CPU_HAS_FEATURE (flags, arm_ext_v6_dsp))
25734 arch = TAG_CPU_ARCH_V7E_M;
25735
25736 ARM_CLEAR_FEATURE (tmp, flags, arm_arch_v8m_base);
25737 if (arch == TAG_CPU_ARCH_V8M_BASE && ARM_CPU_HAS_FEATURE (tmp, arm_arch_any))
25738 arch = TAG_CPU_ARCH_V8M_MAIN;
25739
25740 /* In cpu_arch_ver ARMv8-A is before ARMv8-M for atomics to be detected as
25741 coming from ARMv8-A. However, since ARMv8-A has more instructions than
25742 ARMv8-M, -march=all must be detected as ARMv8-A. */
25743 if (arch == TAG_CPU_ARCH_V8M_MAIN
25744 && ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any))
25745 arch = TAG_CPU_ARCH_V8;
25746
25747 /* Tag_CPU_name. */
25748 if (selected_cpu_name[0])
25749 {
25750 char *q;
25751
25752 q = selected_cpu_name;
25753 if (strncmp (q, "armv", 4) == 0)
25754 {
25755 int i;
25756
25757 q += 4;
25758 for (i = 0; q[i]; i++)
25759 q[i] = TOUPPER (q[i]);
25760 }
25761 aeabi_set_attribute_string (Tag_CPU_name, q);
25762 }
25763
25764 /* Tag_CPU_arch. */
25765 aeabi_set_attribute_int (Tag_CPU_arch, arch);
25766
25767 /* Tag_CPU_arch_profile. */
25768 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7a)
25769 || ARM_CPU_HAS_FEATURE (flags, arm_ext_v8)
25770 || (ARM_CPU_HAS_FEATURE (flags, arm_ext_atomics)
25771 && !ARM_CPU_HAS_FEATURE (flags, arm_ext_v8m)))
25772 profile = 'A';
25773 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7r))
25774 profile = 'R';
25775 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_m))
25776 profile = 'M';
25777 else
25778 profile = '\0';
25779
25780 if (profile != '\0')
25781 aeabi_set_attribute_int (Tag_CPU_arch_profile, profile);
25782
25783 /* Tag_ARM_ISA_use. */
25784 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v1)
25785 || arch == 0)
25786 aeabi_set_attribute_int (Tag_ARM_ISA_use, 1);
25787
25788 /* Tag_THUMB_ISA_use. */
25789 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v4t)
25790 || arch == 0)
25791 {
25792 int thumb_isa_use;
25793
25794 if (!ARM_CPU_HAS_FEATURE (flags, arm_ext_v8)
25795 && ARM_CPU_HAS_FEATURE (flags, arm_ext_v8m))
25796 thumb_isa_use = 3;
25797 else if (ARM_CPU_HAS_FEATURE (flags, arm_arch_t2))
25798 thumb_isa_use = 2;
25799 else
25800 thumb_isa_use = 1;
25801 aeabi_set_attribute_int (Tag_THUMB_ISA_use, thumb_isa_use);
25802 }
25803
25804 /* Tag_VFP_arch. */
25805 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_armv8xd))
25806 aeabi_set_attribute_int (Tag_VFP_arch,
25807 ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32)
25808 ? 7 : 8);
25809 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_fma))
25810 aeabi_set_attribute_int (Tag_VFP_arch,
25811 ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32)
25812 ? 5 : 6);
25813 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32))
25814 {
25815 fp16_optional = 1;
25816 aeabi_set_attribute_int (Tag_VFP_arch, 3);
25817 }
25818 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v3xd))
25819 {
25820 aeabi_set_attribute_int (Tag_VFP_arch, 4);
25821 fp16_optional = 1;
25822 }
25823 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v2))
25824 aeabi_set_attribute_int (Tag_VFP_arch, 2);
25825 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1)
25826 || ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd))
25827 aeabi_set_attribute_int (Tag_VFP_arch, 1);
25828
25829 /* Tag_ABI_HardFP_use. */
25830 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd)
25831 && !ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1))
25832 aeabi_set_attribute_int (Tag_ABI_HardFP_use, 1);
25833
25834 /* Tag_WMMX_arch. */
25835 if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt2))
25836 aeabi_set_attribute_int (Tag_WMMX_arch, 2);
25837 else if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt))
25838 aeabi_set_attribute_int (Tag_WMMX_arch, 1);
25839
25840 /* Tag_Advanced_SIMD_arch (formerly Tag_NEON_arch). */
25841 if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_armv8))
25842 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 3);
25843 else if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_v1))
25844 {
25845 if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_fma))
25846 {
25847 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 2);
25848 }
25849 else
25850 {
25851 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 1);
25852 fp16_optional = 1;
25853 }
25854 }
25855
25856 /* Tag_VFP_HP_extension (formerly Tag_NEON_FP16_arch). */
25857 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_fp16) && fp16_optional)
25858 aeabi_set_attribute_int (Tag_VFP_HP_extension, 1);
25859
25860 /* Tag_DIV_use.
25861
25862 We set Tag_DIV_use to two when integer divide instructions have been used
25863 in ARM state, or when Thumb integer divide instructions have been used,
25864 but we have no architecture profile set, nor have we any ARM instructions.
25865
25866 For ARMv8-A and ARMv8-M we set the tag to 0 as integer divide is implied
25867 by the base architecture.
25868
25869 For new architectures we will have to check these tests. */
25870 gas_assert (arch <= TAG_CPU_ARCH_V8
25871 || (arch >= TAG_CPU_ARCH_V8M_BASE
25872 && arch <= TAG_CPU_ARCH_V8M_MAIN));
25873 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v8)
25874 || ARM_CPU_HAS_FEATURE (flags, arm_ext_v8m))
25875 aeabi_set_attribute_int (Tag_DIV_use, 0);
25876 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_adiv)
25877 || (profile == '\0'
25878 && ARM_CPU_HAS_FEATURE (flags, arm_ext_div)
25879 && !ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_any)))
25880 aeabi_set_attribute_int (Tag_DIV_use, 2);
25881
25882 /* Tag_MP_extension_use. */
25883 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_mp))
25884 aeabi_set_attribute_int (Tag_MPextension_use, 1);
25885
25886 /* Tag Virtualization_use. */
25887 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_sec))
25888 virt_sec |= 1;
25889 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_virt))
25890 virt_sec |= 2;
25891 if (virt_sec != 0)
25892 aeabi_set_attribute_int (Tag_Virtualization_use, virt_sec);
25893 }
25894
25895 /* Add the default contents for the .ARM.attributes section. */
25896 void
25897 arm_md_end (void)
25898 {
25899 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
25900 return;
25901
25902 aeabi_set_public_attributes ();
25903 }
25904 #endif /* OBJ_ELF */
25905
25906
25907 /* Parse a .cpu directive. */
25908
25909 static void
25910 s_arm_cpu (int ignored ATTRIBUTE_UNUSED)
25911 {
25912 const struct arm_cpu_option_table *opt;
25913 char *name;
25914 char saved_char;
25915
25916 name = input_line_pointer;
25917 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
25918 input_line_pointer++;
25919 saved_char = *input_line_pointer;
25920 *input_line_pointer = 0;
25921
25922 /* Skip the first "all" entry. */
25923 for (opt = arm_cpus + 1; opt->name != NULL; opt++)
25924 if (streq (opt->name, name))
25925 {
25926 mcpu_cpu_opt = &opt->value;
25927 selected_cpu = opt->value;
25928 if (opt->canonical_name)
25929 strcpy (selected_cpu_name, opt->canonical_name);
25930 else
25931 {
25932 int i;
25933 for (i = 0; opt->name[i]; i++)
25934 selected_cpu_name[i] = TOUPPER (opt->name[i]);
25935
25936 selected_cpu_name[i] = 0;
25937 }
25938 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
25939 *input_line_pointer = saved_char;
25940 demand_empty_rest_of_line ();
25941 return;
25942 }
25943 as_bad (_("unknown cpu `%s'"), name);
25944 *input_line_pointer = saved_char;
25945 ignore_rest_of_line ();
25946 }
25947
25948
25949 /* Parse a .arch directive. */
25950
25951 static void
25952 s_arm_arch (int ignored ATTRIBUTE_UNUSED)
25953 {
25954 const struct arm_arch_option_table *opt;
25955 char saved_char;
25956 char *name;
25957
25958 name = input_line_pointer;
25959 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
25960 input_line_pointer++;
25961 saved_char = *input_line_pointer;
25962 *input_line_pointer = 0;
25963
25964 /* Skip the first "all" entry. */
25965 for (opt = arm_archs + 1; opt->name != NULL; opt++)
25966 if (streq (opt->name, name))
25967 {
25968 mcpu_cpu_opt = &opt->value;
25969 selected_cpu = opt->value;
25970 strcpy (selected_cpu_name, opt->name);
25971 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
25972 *input_line_pointer = saved_char;
25973 demand_empty_rest_of_line ();
25974 return;
25975 }
25976
25977 as_bad (_("unknown architecture `%s'\n"), name);
25978 *input_line_pointer = saved_char;
25979 ignore_rest_of_line ();
25980 }
25981
25982
25983 /* Parse a .object_arch directive. */
25984
25985 static void
25986 s_arm_object_arch (int ignored ATTRIBUTE_UNUSED)
25987 {
25988 const struct arm_arch_option_table *opt;
25989 char saved_char;
25990 char *name;
25991
25992 name = input_line_pointer;
25993 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
25994 input_line_pointer++;
25995 saved_char = *input_line_pointer;
25996 *input_line_pointer = 0;
25997
25998 /* Skip the first "all" entry. */
25999 for (opt = arm_archs + 1; opt->name != NULL; opt++)
26000 if (streq (opt->name, name))
26001 {
26002 object_arch = &opt->value;
26003 *input_line_pointer = saved_char;
26004 demand_empty_rest_of_line ();
26005 return;
26006 }
26007
26008 as_bad (_("unknown architecture `%s'\n"), name);
26009 *input_line_pointer = saved_char;
26010 ignore_rest_of_line ();
26011 }
26012
26013 /* Parse a .arch_extension directive. */
26014
26015 static void
26016 s_arm_arch_extension (int ignored ATTRIBUTE_UNUSED)
26017 {
26018 const struct arm_option_extension_value_table *opt;
26019 char saved_char;
26020 char *name;
26021 int adding_value = 1;
26022
26023 name = input_line_pointer;
26024 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
26025 input_line_pointer++;
26026 saved_char = *input_line_pointer;
26027 *input_line_pointer = 0;
26028
26029 if (strlen (name) >= 2
26030 && strncmp (name, "no", 2) == 0)
26031 {
26032 adding_value = 0;
26033 name += 2;
26034 }
26035
26036 for (opt = arm_extensions; opt->name != NULL; opt++)
26037 if (streq (opt->name, name))
26038 {
26039 if (!ARM_CPU_HAS_FEATURE (*mcpu_cpu_opt, opt->allowed_archs))
26040 {
26041 as_bad (_("architectural extension `%s' is not allowed for the "
26042 "current base architecture"), name);
26043 break;
26044 }
26045
26046 if (adding_value)
26047 ARM_MERGE_FEATURE_SETS (selected_cpu, selected_cpu,
26048 opt->merge_value);
26049 else
26050 ARM_CLEAR_FEATURE (selected_cpu, selected_cpu, opt->clear_value);
26051
26052 mcpu_cpu_opt = &selected_cpu;
26053 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
26054 *input_line_pointer = saved_char;
26055 demand_empty_rest_of_line ();
26056 return;
26057 }
26058
26059 if (opt->name == NULL)
26060 as_bad (_("unknown architecture extension `%s'\n"), name);
26061
26062 *input_line_pointer = saved_char;
26063 ignore_rest_of_line ();
26064 }
26065
26066 /* Parse a .fpu directive. */
26067
26068 static void
26069 s_arm_fpu (int ignored ATTRIBUTE_UNUSED)
26070 {
26071 const struct arm_option_fpu_value_table *opt;
26072 char saved_char;
26073 char *name;
26074
26075 name = input_line_pointer;
26076 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
26077 input_line_pointer++;
26078 saved_char = *input_line_pointer;
26079 *input_line_pointer = 0;
26080
26081 for (opt = arm_fpus; opt->name != NULL; opt++)
26082 if (streq (opt->name, name))
26083 {
26084 mfpu_opt = &opt->value;
26085 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
26086 *input_line_pointer = saved_char;
26087 demand_empty_rest_of_line ();
26088 return;
26089 }
26090
26091 as_bad (_("unknown floating point format `%s'\n"), name);
26092 *input_line_pointer = saved_char;
26093 ignore_rest_of_line ();
26094 }
26095
26096 /* Copy symbol information. */
26097
26098 void
26099 arm_copy_symbol_attributes (symbolS *dest, symbolS *src)
26100 {
26101 ARM_GET_FLAG (dest) = ARM_GET_FLAG (src);
26102 }
26103
26104 #ifdef OBJ_ELF
26105 /* Given a symbolic attribute NAME, return the proper integer value.
26106 Returns -1 if the attribute is not known. */
26107
26108 int
26109 arm_convert_symbolic_attribute (const char *name)
26110 {
26111 static const struct
26112 {
26113 const char * name;
26114 const int tag;
26115 }
26116 attribute_table[] =
26117 {
26118 /* When you modify this table you should
26119 also modify the list in doc/c-arm.texi. */
26120 #define T(tag) {#tag, tag}
26121 T (Tag_CPU_raw_name),
26122 T (Tag_CPU_name),
26123 T (Tag_CPU_arch),
26124 T (Tag_CPU_arch_profile),
26125 T (Tag_ARM_ISA_use),
26126 T (Tag_THUMB_ISA_use),
26127 T (Tag_FP_arch),
26128 T (Tag_VFP_arch),
26129 T (Tag_WMMX_arch),
26130 T (Tag_Advanced_SIMD_arch),
26131 T (Tag_PCS_config),
26132 T (Tag_ABI_PCS_R9_use),
26133 T (Tag_ABI_PCS_RW_data),
26134 T (Tag_ABI_PCS_RO_data),
26135 T (Tag_ABI_PCS_GOT_use),
26136 T (Tag_ABI_PCS_wchar_t),
26137 T (Tag_ABI_FP_rounding),
26138 T (Tag_ABI_FP_denormal),
26139 T (Tag_ABI_FP_exceptions),
26140 T (Tag_ABI_FP_user_exceptions),
26141 T (Tag_ABI_FP_number_model),
26142 T (Tag_ABI_align_needed),
26143 T (Tag_ABI_align8_needed),
26144 T (Tag_ABI_align_preserved),
26145 T (Tag_ABI_align8_preserved),
26146 T (Tag_ABI_enum_size),
26147 T (Tag_ABI_HardFP_use),
26148 T (Tag_ABI_VFP_args),
26149 T (Tag_ABI_WMMX_args),
26150 T (Tag_ABI_optimization_goals),
26151 T (Tag_ABI_FP_optimization_goals),
26152 T (Tag_compatibility),
26153 T (Tag_CPU_unaligned_access),
26154 T (Tag_FP_HP_extension),
26155 T (Tag_VFP_HP_extension),
26156 T (Tag_ABI_FP_16bit_format),
26157 T (Tag_MPextension_use),
26158 T (Tag_DIV_use),
26159 T (Tag_nodefaults),
26160 T (Tag_also_compatible_with),
26161 T (Tag_conformance),
26162 T (Tag_T2EE_use),
26163 T (Tag_Virtualization_use),
26164 /* We deliberately do not include Tag_MPextension_use_legacy. */
26165 #undef T
26166 };
26167 unsigned int i;
26168
26169 if (name == NULL)
26170 return -1;
26171
26172 for (i = 0; i < ARRAY_SIZE (attribute_table); i++)
26173 if (streq (name, attribute_table[i].name))
26174 return attribute_table[i].tag;
26175
26176 return -1;
26177 }
26178
26179
26180 /* Apply sym value for relocations only in the case that they are for
26181 local symbols in the same segment as the fixup and you have the
26182 respective architectural feature for blx and simple switches. */
26183 int
26184 arm_apply_sym_value (struct fix * fixP, segT this_seg)
26185 {
26186 if (fixP->fx_addsy
26187 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
26188 /* PR 17444: If the local symbol is in a different section then a reloc
26189 will always be generated for it, so applying the symbol value now
26190 will result in a double offset being stored in the relocation. */
26191 && (S_GET_SEGMENT (fixP->fx_addsy) == this_seg)
26192 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE))
26193 {
26194 switch (fixP->fx_r_type)
26195 {
26196 case BFD_RELOC_ARM_PCREL_BLX:
26197 case BFD_RELOC_THUMB_PCREL_BRANCH23:
26198 if (ARM_IS_FUNC (fixP->fx_addsy))
26199 return 1;
26200 break;
26201
26202 case BFD_RELOC_ARM_PCREL_CALL:
26203 case BFD_RELOC_THUMB_PCREL_BLX:
26204 if (THUMB_IS_FUNC (fixP->fx_addsy))
26205 return 1;
26206 break;
26207
26208 default:
26209 break;
26210 }
26211
26212 }
26213 return 0;
26214 }
26215 #endif /* OBJ_ELF */