Remove legacy basepri_mask MRS/MSR special reg
[binutils-gdb.git] / gas / config / tc-arm.c
1 /* tc-arm.c -- Assemble for the ARM
2 Copyright (C) 1994-2016 Free Software Foundation, Inc.
3 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
4 Modified by David Taylor (dtaylor@armltd.co.uk)
5 Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
6 Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
7 Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
8
9 This file is part of GAS, the GNU Assembler.
10
11 GAS is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 3, or (at your option)
14 any later version.
15
16 GAS is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
20
21 You should have received a copy of the GNU General Public License
22 along with GAS; see the file COPYING. If not, write to the Free
23 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
24 02110-1301, USA. */
25
26 #include "as.h"
27 #include <limits.h>
28 #include <stdarg.h>
29 #define NO_RELOC 0
30 #include "safe-ctype.h"
31 #include "subsegs.h"
32 #include "obstack.h"
33 #include "libiberty.h"
34 #include "opcode/arm.h"
35
36 #ifdef OBJ_ELF
37 #include "elf/arm.h"
38 #include "dw2gencfi.h"
39 #endif
40
41 #include "dwarf2dbg.h"
42
43 #ifdef OBJ_ELF
44 /* Must be at least the size of the largest unwind opcode (currently two). */
45 #define ARM_OPCODE_CHUNK_SIZE 8
46
47 /* This structure holds the unwinding state. */
48
49 static struct
50 {
51 symbolS * proc_start;
52 symbolS * table_entry;
53 symbolS * personality_routine;
54 int personality_index;
55 /* The segment containing the function. */
56 segT saved_seg;
57 subsegT saved_subseg;
58 /* Opcodes generated from this function. */
59 unsigned char * opcodes;
60 int opcode_count;
61 int opcode_alloc;
62 /* The number of bytes pushed to the stack. */
63 offsetT frame_size;
64 /* We don't add stack adjustment opcodes immediately so that we can merge
65 multiple adjustments. We can also omit the final adjustment
66 when using a frame pointer. */
67 offsetT pending_offset;
68 /* These two fields are set by both unwind_movsp and unwind_setfp. They
69 hold the reg+offset to use when restoring sp from a frame pointer. */
70 offsetT fp_offset;
71 int fp_reg;
72 /* Nonzero if an unwind_setfp directive has been seen. */
73 unsigned fp_used:1;
74 /* Nonzero if the last opcode restores sp from fp_reg. */
75 unsigned sp_restored:1;
76 } unwind;
77
78 #endif /* OBJ_ELF */
79
80 /* Results from operand parsing worker functions. */
81
82 typedef enum
83 {
84 PARSE_OPERAND_SUCCESS,
85 PARSE_OPERAND_FAIL,
86 PARSE_OPERAND_FAIL_NO_BACKTRACK
87 } parse_operand_result;
88
89 enum arm_float_abi
90 {
91 ARM_FLOAT_ABI_HARD,
92 ARM_FLOAT_ABI_SOFTFP,
93 ARM_FLOAT_ABI_SOFT
94 };
95
96 /* Types of processor to assemble for. */
97 #ifndef CPU_DEFAULT
98 /* The code that was here used to select a default CPU depending on compiler
99 pre-defines which were only present when doing native builds, thus
100 changing gas' default behaviour depending upon the build host.
101
102 If you have a target that requires a default CPU option then the you
103 should define CPU_DEFAULT here. */
104 #endif
105
106 #ifndef FPU_DEFAULT
107 # ifdef TE_LINUX
108 # define FPU_DEFAULT FPU_ARCH_FPA
109 # elif defined (TE_NetBSD)
110 # ifdef OBJ_ELF
111 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */
112 # else
113 /* Legacy a.out format. */
114 # define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */
115 # endif
116 # elif defined (TE_VXWORKS)
117 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */
118 # else
119 /* For backwards compatibility, default to FPA. */
120 # define FPU_DEFAULT FPU_ARCH_FPA
121 # endif
122 #endif /* ifndef FPU_DEFAULT */
123
124 #define streq(a, b) (strcmp (a, b) == 0)
125
126 static arm_feature_set cpu_variant;
127 static arm_feature_set arm_arch_used;
128 static arm_feature_set thumb_arch_used;
129
130 /* Flags stored in private area of BFD structure. */
131 static int uses_apcs_26 = FALSE;
132 static int atpcs = FALSE;
133 static int support_interwork = FALSE;
134 static int uses_apcs_float = FALSE;
135 static int pic_code = FALSE;
136 static int fix_v4bx = FALSE;
137 /* Warn on using deprecated features. */
138 static int warn_on_deprecated = TRUE;
139
140 /* Understand CodeComposer Studio assembly syntax. */
141 bfd_boolean codecomposer_syntax = FALSE;
142
143 /* Variables that we set while parsing command-line options. Once all
144 options have been read we re-process these values to set the real
145 assembly flags. */
146 static const arm_feature_set *legacy_cpu = NULL;
147 static const arm_feature_set *legacy_fpu = NULL;
148
149 static const arm_feature_set *mcpu_cpu_opt = NULL;
150 static const arm_feature_set *mcpu_fpu_opt = NULL;
151 static const arm_feature_set *march_cpu_opt = NULL;
152 static const arm_feature_set *march_fpu_opt = NULL;
153 static const arm_feature_set *mfpu_opt = NULL;
154 static const arm_feature_set *object_arch = NULL;
155
156 /* Constants for known architecture features. */
157 static const arm_feature_set fpu_default = FPU_DEFAULT;
158 static const arm_feature_set fpu_arch_vfp_v1 ATTRIBUTE_UNUSED = FPU_ARCH_VFP_V1;
159 static const arm_feature_set fpu_arch_vfp_v2 = FPU_ARCH_VFP_V2;
160 static const arm_feature_set fpu_arch_vfp_v3 ATTRIBUTE_UNUSED = FPU_ARCH_VFP_V3;
161 static const arm_feature_set fpu_arch_neon_v1 ATTRIBUTE_UNUSED = FPU_ARCH_NEON_V1;
162 static const arm_feature_set fpu_arch_fpa = FPU_ARCH_FPA;
163 static const arm_feature_set fpu_any_hard = FPU_ANY_HARD;
164 #ifdef OBJ_ELF
165 static const arm_feature_set fpu_arch_maverick = FPU_ARCH_MAVERICK;
166 #endif
167 static const arm_feature_set fpu_endian_pure = FPU_ARCH_ENDIAN_PURE;
168
169 #ifdef CPU_DEFAULT
170 static const arm_feature_set cpu_default = CPU_DEFAULT;
171 #endif
172
173 static const arm_feature_set arm_ext_v1 = ARM_FEATURE_CORE_LOW (ARM_EXT_V1);
174 static const arm_feature_set arm_ext_v2 = ARM_FEATURE_CORE_LOW (ARM_EXT_V1);
175 static const arm_feature_set arm_ext_v2s = ARM_FEATURE_CORE_LOW (ARM_EXT_V2S);
176 static const arm_feature_set arm_ext_v3 = ARM_FEATURE_CORE_LOW (ARM_EXT_V3);
177 static const arm_feature_set arm_ext_v3m = ARM_FEATURE_CORE_LOW (ARM_EXT_V3M);
178 static const arm_feature_set arm_ext_v4 = ARM_FEATURE_CORE_LOW (ARM_EXT_V4);
179 static const arm_feature_set arm_ext_v4t = ARM_FEATURE_CORE_LOW (ARM_EXT_V4T);
180 static const arm_feature_set arm_ext_v5 = ARM_FEATURE_CORE_LOW (ARM_EXT_V5);
181 static const arm_feature_set arm_ext_v4t_5 =
182 ARM_FEATURE_CORE_LOW (ARM_EXT_V4T | ARM_EXT_V5);
183 static const arm_feature_set arm_ext_v5t = ARM_FEATURE_CORE_LOW (ARM_EXT_V5T);
184 static const arm_feature_set arm_ext_v5e = ARM_FEATURE_CORE_LOW (ARM_EXT_V5E);
185 static const arm_feature_set arm_ext_v5exp = ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP);
186 static const arm_feature_set arm_ext_v5j = ARM_FEATURE_CORE_LOW (ARM_EXT_V5J);
187 static const arm_feature_set arm_ext_v6 = ARM_FEATURE_CORE_LOW (ARM_EXT_V6);
188 static const arm_feature_set arm_ext_v6k = ARM_FEATURE_CORE_LOW (ARM_EXT_V6K);
189 static const arm_feature_set arm_ext_v6t2 = ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2);
190 static const arm_feature_set arm_ext_v6m = ARM_FEATURE_CORE_LOW (ARM_EXT_V6M);
191 static const arm_feature_set arm_ext_v6_notm =
192 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_NOTM);
193 static const arm_feature_set arm_ext_v6_dsp =
194 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_DSP);
195 static const arm_feature_set arm_ext_barrier =
196 ARM_FEATURE_CORE_LOW (ARM_EXT_BARRIER);
197 static const arm_feature_set arm_ext_msr =
198 ARM_FEATURE_CORE_LOW (ARM_EXT_THUMB_MSR);
199 static const arm_feature_set arm_ext_div = ARM_FEATURE_CORE_LOW (ARM_EXT_DIV);
200 static const arm_feature_set arm_ext_v7 = ARM_FEATURE_CORE_LOW (ARM_EXT_V7);
201 static const arm_feature_set arm_ext_v7a = ARM_FEATURE_CORE_LOW (ARM_EXT_V7A);
202 static const arm_feature_set arm_ext_v7r = ARM_FEATURE_CORE_LOW (ARM_EXT_V7R);
203 #ifdef OBJ_ELF
204 static const arm_feature_set arm_ext_v7m = ARM_FEATURE_CORE_LOW (ARM_EXT_V7M);
205 #endif
206 static const arm_feature_set arm_ext_v8 = ARM_FEATURE_CORE_LOW (ARM_EXT_V8);
207 static const arm_feature_set arm_ext_m =
208 ARM_FEATURE_CORE (ARM_EXT_V6M | ARM_EXT_OS | ARM_EXT_V7M,
209 ARM_EXT2_V8M | ARM_EXT2_V8M_MAIN);
210 static const arm_feature_set arm_ext_mp = ARM_FEATURE_CORE_LOW (ARM_EXT_MP);
211 static const arm_feature_set arm_ext_sec = ARM_FEATURE_CORE_LOW (ARM_EXT_SEC);
212 static const arm_feature_set arm_ext_os = ARM_FEATURE_CORE_LOW (ARM_EXT_OS);
213 static const arm_feature_set arm_ext_adiv = ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV);
214 static const arm_feature_set arm_ext_virt = ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT);
215 static const arm_feature_set arm_ext_pan = ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN);
216 static const arm_feature_set arm_ext_v8m = ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M);
217 static const arm_feature_set arm_ext_v8m_main =
218 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M_MAIN);
219 /* Instructions in ARMv8-M only found in M profile architectures. */
220 static const arm_feature_set arm_ext_v8m_m_only =
221 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M | ARM_EXT2_V8M_MAIN);
222 static const arm_feature_set arm_ext_v6t2_v8m =
223 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V6T2_V8M);
224 /* Instructions shared between ARMv8-A and ARMv8-M. */
225 static const arm_feature_set arm_ext_atomics =
226 ARM_FEATURE_CORE_HIGH (ARM_EXT2_ATOMICS);
227 #ifdef OBJ_ELF
228 /* DSP instructions Tag_DSP_extension refers to. */
229 static const arm_feature_set arm_ext_dsp =
230 ARM_FEATURE_CORE_LOW (ARM_EXT_V5E | ARM_EXT_V5ExP | ARM_EXT_V6_DSP);
231 #endif
232 static const arm_feature_set arm_ext_ras =
233 ARM_FEATURE_CORE_HIGH (ARM_EXT2_RAS);
234 /* FP16 instructions. */
235 static const arm_feature_set arm_ext_fp16 =
236 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST);
237
238 static const arm_feature_set arm_arch_any = ARM_ANY;
239 static const arm_feature_set arm_arch_full ATTRIBUTE_UNUSED = ARM_FEATURE (-1, -1, -1);
240 static const arm_feature_set arm_arch_t2 = ARM_ARCH_THUMB2;
241 static const arm_feature_set arm_arch_none = ARM_ARCH_NONE;
242 #ifdef OBJ_ELF
243 static const arm_feature_set arm_arch_v6m_only = ARM_ARCH_V6M_ONLY;
244 #endif
245
246 static const arm_feature_set arm_cext_iwmmxt2 =
247 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2);
248 static const arm_feature_set arm_cext_iwmmxt =
249 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT);
250 static const arm_feature_set arm_cext_xscale =
251 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE);
252 static const arm_feature_set arm_cext_maverick =
253 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK);
254 static const arm_feature_set fpu_fpa_ext_v1 =
255 ARM_FEATURE_COPROC (FPU_FPA_EXT_V1);
256 static const arm_feature_set fpu_fpa_ext_v2 =
257 ARM_FEATURE_COPROC (FPU_FPA_EXT_V2);
258 static const arm_feature_set fpu_vfp_ext_v1xd =
259 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1xD);
260 static const arm_feature_set fpu_vfp_ext_v1 =
261 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1);
262 static const arm_feature_set fpu_vfp_ext_v2 =
263 ARM_FEATURE_COPROC (FPU_VFP_EXT_V2);
264 static const arm_feature_set fpu_vfp_ext_v3xd =
265 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3xD);
266 static const arm_feature_set fpu_vfp_ext_v3 =
267 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3);
268 static const arm_feature_set fpu_vfp_ext_d32 =
269 ARM_FEATURE_COPROC (FPU_VFP_EXT_D32);
270 static const arm_feature_set fpu_neon_ext_v1 =
271 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1);
272 static const arm_feature_set fpu_vfp_v3_or_neon_ext =
273 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1 | FPU_VFP_EXT_V3);
274 #ifdef OBJ_ELF
275 static const arm_feature_set fpu_vfp_fp16 =
276 ARM_FEATURE_COPROC (FPU_VFP_EXT_FP16);
277 static const arm_feature_set fpu_neon_ext_fma =
278 ARM_FEATURE_COPROC (FPU_NEON_EXT_FMA);
279 #endif
280 static const arm_feature_set fpu_vfp_ext_fma =
281 ARM_FEATURE_COPROC (FPU_VFP_EXT_FMA);
282 static const arm_feature_set fpu_vfp_ext_armv8 =
283 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8);
284 static const arm_feature_set fpu_vfp_ext_armv8xd =
285 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8xD);
286 static const arm_feature_set fpu_neon_ext_armv8 =
287 ARM_FEATURE_COPROC (FPU_NEON_EXT_ARMV8);
288 static const arm_feature_set fpu_crypto_ext_armv8 =
289 ARM_FEATURE_COPROC (FPU_CRYPTO_EXT_ARMV8);
290 static const arm_feature_set crc_ext_armv8 =
291 ARM_FEATURE_COPROC (CRC_EXT_ARMV8);
292 static const arm_feature_set fpu_neon_ext_v8_1 =
293 ARM_FEATURE_COPROC (FPU_NEON_EXT_RDMA);
294
295 static int mfloat_abi_opt = -1;
296 /* Record user cpu selection for object attributes. */
297 static arm_feature_set selected_cpu = ARM_ARCH_NONE;
298 /* Must be long enough to hold any of the names in arm_cpus. */
299 static char selected_cpu_name[20];
300
301 extern FLONUM_TYPE generic_floating_point_number;
302
303 /* Return if no cpu was selected on command-line. */
304 static bfd_boolean
305 no_cpu_selected (void)
306 {
307 return ARM_FEATURE_EQUAL (selected_cpu, arm_arch_none);
308 }
309
310 #ifdef OBJ_ELF
311 # ifdef EABI_DEFAULT
312 static int meabi_flags = EABI_DEFAULT;
313 # else
314 static int meabi_flags = EF_ARM_EABI_UNKNOWN;
315 # endif
316
317 static int attributes_set_explicitly[NUM_KNOWN_OBJ_ATTRIBUTES];
318
319 bfd_boolean
320 arm_is_eabi (void)
321 {
322 return (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4);
323 }
324 #endif
325
326 #ifdef OBJ_ELF
327 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
328 symbolS * GOT_symbol;
329 #endif
330
331 /* 0: assemble for ARM,
332 1: assemble for Thumb,
333 2: assemble for Thumb even though target CPU does not support thumb
334 instructions. */
335 static int thumb_mode = 0;
336 /* A value distinct from the possible values for thumb_mode that we
337 can use to record whether thumb_mode has been copied into the
338 tc_frag_data field of a frag. */
339 #define MODE_RECORDED (1 << 4)
340
341 /* Specifies the intrinsic IT insn behavior mode. */
342 enum implicit_it_mode
343 {
344 IMPLICIT_IT_MODE_NEVER = 0x00,
345 IMPLICIT_IT_MODE_ARM = 0x01,
346 IMPLICIT_IT_MODE_THUMB = 0x02,
347 IMPLICIT_IT_MODE_ALWAYS = (IMPLICIT_IT_MODE_ARM | IMPLICIT_IT_MODE_THUMB)
348 };
349 static int implicit_it_mode = IMPLICIT_IT_MODE_ARM;
350
351 /* If unified_syntax is true, we are processing the new unified
352 ARM/Thumb syntax. Important differences from the old ARM mode:
353
354 - Immediate operands do not require a # prefix.
355 - Conditional affixes always appear at the end of the
356 instruction. (For backward compatibility, those instructions
357 that formerly had them in the middle, continue to accept them
358 there.)
359 - The IT instruction may appear, and if it does is validated
360 against subsequent conditional affixes. It does not generate
361 machine code.
362
363 Important differences from the old Thumb mode:
364
365 - Immediate operands do not require a # prefix.
366 - Most of the V6T2 instructions are only available in unified mode.
367 - The .N and .W suffixes are recognized and honored (it is an error
368 if they cannot be honored).
369 - All instructions set the flags if and only if they have an 's' affix.
370 - Conditional affixes may be used. They are validated against
371 preceding IT instructions. Unlike ARM mode, you cannot use a
372 conditional affix except in the scope of an IT instruction. */
373
374 static bfd_boolean unified_syntax = FALSE;
375
376 /* An immediate operand can start with #, and ld*, st*, pld operands
377 can contain [ and ]. We need to tell APP not to elide whitespace
378 before a [, which can appear as the first operand for pld.
379 Likewise, a { can appear as the first operand for push, pop, vld*, etc. */
380 const char arm_symbol_chars[] = "#[]{}";
381
382 enum neon_el_type
383 {
384 NT_invtype,
385 NT_untyped,
386 NT_integer,
387 NT_float,
388 NT_poly,
389 NT_signed,
390 NT_unsigned
391 };
392
393 struct neon_type_el
394 {
395 enum neon_el_type type;
396 unsigned size;
397 };
398
399 #define NEON_MAX_TYPE_ELS 4
400
401 struct neon_type
402 {
403 struct neon_type_el el[NEON_MAX_TYPE_ELS];
404 unsigned elems;
405 };
406
407 enum it_instruction_type
408 {
409 OUTSIDE_IT_INSN,
410 INSIDE_IT_INSN,
411 INSIDE_IT_LAST_INSN,
412 IF_INSIDE_IT_LAST_INSN, /* Either outside or inside;
413 if inside, should be the last one. */
414 NEUTRAL_IT_INSN, /* This could be either inside or outside,
415 i.e. BKPT and NOP. */
416 IT_INSN /* The IT insn has been parsed. */
417 };
418
419 /* The maximum number of operands we need. */
420 #define ARM_IT_MAX_OPERANDS 6
421
422 struct arm_it
423 {
424 const char * error;
425 unsigned long instruction;
426 int size;
427 int size_req;
428 int cond;
429 /* "uncond_value" is set to the value in place of the conditional field in
430 unconditional versions of the instruction, or -1 if nothing is
431 appropriate. */
432 int uncond_value;
433 struct neon_type vectype;
434 /* This does not indicate an actual NEON instruction, only that
435 the mnemonic accepts neon-style type suffixes. */
436 int is_neon;
437 /* Set to the opcode if the instruction needs relaxation.
438 Zero if the instruction is not relaxed. */
439 unsigned long relax;
440 struct
441 {
442 bfd_reloc_code_real_type type;
443 expressionS exp;
444 int pc_rel;
445 } reloc;
446
447 enum it_instruction_type it_insn_type;
448
449 struct
450 {
451 unsigned reg;
452 signed int imm;
453 struct neon_type_el vectype;
454 unsigned present : 1; /* Operand present. */
455 unsigned isreg : 1; /* Operand was a register. */
456 unsigned immisreg : 1; /* .imm field is a second register. */
457 unsigned isscalar : 1; /* Operand is a (Neon) scalar. */
458 unsigned immisalign : 1; /* Immediate is an alignment specifier. */
459 unsigned immisfloat : 1; /* Immediate was parsed as a float. */
460 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
461 instructions. This allows us to disambiguate ARM <-> vector insns. */
462 unsigned regisimm : 1; /* 64-bit immediate, reg forms high 32 bits. */
463 unsigned isvec : 1; /* Is a single, double or quad VFP/Neon reg. */
464 unsigned isquad : 1; /* Operand is Neon quad-precision register. */
465 unsigned issingle : 1; /* Operand is VFP single-precision register. */
466 unsigned hasreloc : 1; /* Operand has relocation suffix. */
467 unsigned writeback : 1; /* Operand has trailing ! */
468 unsigned preind : 1; /* Preindexed address. */
469 unsigned postind : 1; /* Postindexed address. */
470 unsigned negative : 1; /* Index register was negated. */
471 unsigned shifted : 1; /* Shift applied to operation. */
472 unsigned shift_kind : 3; /* Shift operation (enum shift_kind). */
473 } operands[ARM_IT_MAX_OPERANDS];
474 };
475
476 static struct arm_it inst;
477
478 #define NUM_FLOAT_VALS 8
479
480 const char * fp_const[] =
481 {
482 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
483 };
484
485 /* Number of littlenums required to hold an extended precision number. */
486 #define MAX_LITTLENUMS 6
487
488 LITTLENUM_TYPE fp_values[NUM_FLOAT_VALS][MAX_LITTLENUMS];
489
490 #define FAIL (-1)
491 #define SUCCESS (0)
492
493 #define SUFF_S 1
494 #define SUFF_D 2
495 #define SUFF_E 3
496 #define SUFF_P 4
497
498 #define CP_T_X 0x00008000
499 #define CP_T_Y 0x00400000
500
501 #define CONDS_BIT 0x00100000
502 #define LOAD_BIT 0x00100000
503
504 #define DOUBLE_LOAD_FLAG 0x00000001
505
506 struct asm_cond
507 {
508 const char * template_name;
509 unsigned long value;
510 };
511
512 #define COND_ALWAYS 0xE
513
514 struct asm_psr
515 {
516 const char * template_name;
517 unsigned long field;
518 };
519
520 struct asm_barrier_opt
521 {
522 const char * template_name;
523 unsigned long value;
524 const arm_feature_set arch;
525 };
526
527 /* The bit that distinguishes CPSR and SPSR. */
528 #define SPSR_BIT (1 << 22)
529
530 /* The individual PSR flag bits. */
531 #define PSR_c (1 << 16)
532 #define PSR_x (1 << 17)
533 #define PSR_s (1 << 18)
534 #define PSR_f (1 << 19)
535
536 struct reloc_entry
537 {
538 const char * name;
539 bfd_reloc_code_real_type reloc;
540 };
541
542 enum vfp_reg_pos
543 {
544 VFP_REG_Sd, VFP_REG_Sm, VFP_REG_Sn,
545 VFP_REG_Dd, VFP_REG_Dm, VFP_REG_Dn
546 };
547
548 enum vfp_ldstm_type
549 {
550 VFP_LDSTMIA, VFP_LDSTMDB, VFP_LDSTMIAX, VFP_LDSTMDBX
551 };
552
553 /* Bits for DEFINED field in neon_typed_alias. */
554 #define NTA_HASTYPE 1
555 #define NTA_HASINDEX 2
556
557 struct neon_typed_alias
558 {
559 unsigned char defined;
560 unsigned char index;
561 struct neon_type_el eltype;
562 };
563
564 /* ARM register categories. This includes coprocessor numbers and various
565 architecture extensions' registers. */
566 enum arm_reg_type
567 {
568 REG_TYPE_RN,
569 REG_TYPE_CP,
570 REG_TYPE_CN,
571 REG_TYPE_FN,
572 REG_TYPE_VFS,
573 REG_TYPE_VFD,
574 REG_TYPE_NQ,
575 REG_TYPE_VFSD,
576 REG_TYPE_NDQ,
577 REG_TYPE_NSDQ,
578 REG_TYPE_VFC,
579 REG_TYPE_MVF,
580 REG_TYPE_MVD,
581 REG_TYPE_MVFX,
582 REG_TYPE_MVDX,
583 REG_TYPE_MVAX,
584 REG_TYPE_DSPSC,
585 REG_TYPE_MMXWR,
586 REG_TYPE_MMXWC,
587 REG_TYPE_MMXWCG,
588 REG_TYPE_XSCALE,
589 REG_TYPE_RNB
590 };
591
592 /* Structure for a hash table entry for a register.
593 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
594 information which states whether a vector type or index is specified (for a
595 register alias created with .dn or .qn). Otherwise NEON should be NULL. */
596 struct reg_entry
597 {
598 const char * name;
599 unsigned int number;
600 unsigned char type;
601 unsigned char builtin;
602 struct neon_typed_alias * neon;
603 };
604
605 /* Diagnostics used when we don't get a register of the expected type. */
606 const char * const reg_expected_msgs[] =
607 {
608 N_("ARM register expected"),
609 N_("bad or missing co-processor number"),
610 N_("co-processor register expected"),
611 N_("FPA register expected"),
612 N_("VFP single precision register expected"),
613 N_("VFP/Neon double precision register expected"),
614 N_("Neon quad precision register expected"),
615 N_("VFP single or double precision register expected"),
616 N_("Neon double or quad precision register expected"),
617 N_("VFP single, double or Neon quad precision register expected"),
618 N_("VFP system register expected"),
619 N_("Maverick MVF register expected"),
620 N_("Maverick MVD register expected"),
621 N_("Maverick MVFX register expected"),
622 N_("Maverick MVDX register expected"),
623 N_("Maverick MVAX register expected"),
624 N_("Maverick DSPSC register expected"),
625 N_("iWMMXt data register expected"),
626 N_("iWMMXt control register expected"),
627 N_("iWMMXt scalar register expected"),
628 N_("XScale accumulator register expected"),
629 };
630
631 /* Some well known registers that we refer to directly elsewhere. */
632 #define REG_R12 12
633 #define REG_SP 13
634 #define REG_LR 14
635 #define REG_PC 15
636
637 /* ARM instructions take 4bytes in the object file, Thumb instructions
638 take 2: */
639 #define INSN_SIZE 4
640
641 struct asm_opcode
642 {
643 /* Basic string to match. */
644 const char * template_name;
645
646 /* Parameters to instruction. */
647 unsigned int operands[8];
648
649 /* Conditional tag - see opcode_lookup. */
650 unsigned int tag : 4;
651
652 /* Basic instruction code. */
653 unsigned int avalue : 28;
654
655 /* Thumb-format instruction code. */
656 unsigned int tvalue;
657
658 /* Which architecture variant provides this instruction. */
659 const arm_feature_set * avariant;
660 const arm_feature_set * tvariant;
661
662 /* Function to call to encode instruction in ARM format. */
663 void (* aencode) (void);
664
665 /* Function to call to encode instruction in Thumb format. */
666 void (* tencode) (void);
667 };
668
669 /* Defines for various bits that we will want to toggle. */
670 #define INST_IMMEDIATE 0x02000000
671 #define OFFSET_REG 0x02000000
672 #define HWOFFSET_IMM 0x00400000
673 #define SHIFT_BY_REG 0x00000010
674 #define PRE_INDEX 0x01000000
675 #define INDEX_UP 0x00800000
676 #define WRITE_BACK 0x00200000
677 #define LDM_TYPE_2_OR_3 0x00400000
678 #define CPSI_MMOD 0x00020000
679
680 #define LITERAL_MASK 0xf000f000
681 #define OPCODE_MASK 0xfe1fffff
682 #define V4_STR_BIT 0x00000020
683 #define VLDR_VMOV_SAME 0x0040f000
684
685 #define T2_SUBS_PC_LR 0xf3de8f00
686
687 #define DATA_OP_SHIFT 21
688
689 #define T2_OPCODE_MASK 0xfe1fffff
690 #define T2_DATA_OP_SHIFT 21
691
692 #define A_COND_MASK 0xf0000000
693 #define A_PUSH_POP_OP_MASK 0x0fff0000
694
695 /* Opcodes for pushing/poping registers to/from the stack. */
696 #define A1_OPCODE_PUSH 0x092d0000
697 #define A2_OPCODE_PUSH 0x052d0004
698 #define A2_OPCODE_POP 0x049d0004
699
700 /* Codes to distinguish the arithmetic instructions. */
701 #define OPCODE_AND 0
702 #define OPCODE_EOR 1
703 #define OPCODE_SUB 2
704 #define OPCODE_RSB 3
705 #define OPCODE_ADD 4
706 #define OPCODE_ADC 5
707 #define OPCODE_SBC 6
708 #define OPCODE_RSC 7
709 #define OPCODE_TST 8
710 #define OPCODE_TEQ 9
711 #define OPCODE_CMP 10
712 #define OPCODE_CMN 11
713 #define OPCODE_ORR 12
714 #define OPCODE_MOV 13
715 #define OPCODE_BIC 14
716 #define OPCODE_MVN 15
717
718 #define T2_OPCODE_AND 0
719 #define T2_OPCODE_BIC 1
720 #define T2_OPCODE_ORR 2
721 #define T2_OPCODE_ORN 3
722 #define T2_OPCODE_EOR 4
723 #define T2_OPCODE_ADD 8
724 #define T2_OPCODE_ADC 10
725 #define T2_OPCODE_SBC 11
726 #define T2_OPCODE_SUB 13
727 #define T2_OPCODE_RSB 14
728
729 #define T_OPCODE_MUL 0x4340
730 #define T_OPCODE_TST 0x4200
731 #define T_OPCODE_CMN 0x42c0
732 #define T_OPCODE_NEG 0x4240
733 #define T_OPCODE_MVN 0x43c0
734
735 #define T_OPCODE_ADD_R3 0x1800
736 #define T_OPCODE_SUB_R3 0x1a00
737 #define T_OPCODE_ADD_HI 0x4400
738 #define T_OPCODE_ADD_ST 0xb000
739 #define T_OPCODE_SUB_ST 0xb080
740 #define T_OPCODE_ADD_SP 0xa800
741 #define T_OPCODE_ADD_PC 0xa000
742 #define T_OPCODE_ADD_I8 0x3000
743 #define T_OPCODE_SUB_I8 0x3800
744 #define T_OPCODE_ADD_I3 0x1c00
745 #define T_OPCODE_SUB_I3 0x1e00
746
747 #define T_OPCODE_ASR_R 0x4100
748 #define T_OPCODE_LSL_R 0x4080
749 #define T_OPCODE_LSR_R 0x40c0
750 #define T_OPCODE_ROR_R 0x41c0
751 #define T_OPCODE_ASR_I 0x1000
752 #define T_OPCODE_LSL_I 0x0000
753 #define T_OPCODE_LSR_I 0x0800
754
755 #define T_OPCODE_MOV_I8 0x2000
756 #define T_OPCODE_CMP_I8 0x2800
757 #define T_OPCODE_CMP_LR 0x4280
758 #define T_OPCODE_MOV_HR 0x4600
759 #define T_OPCODE_CMP_HR 0x4500
760
761 #define T_OPCODE_LDR_PC 0x4800
762 #define T_OPCODE_LDR_SP 0x9800
763 #define T_OPCODE_STR_SP 0x9000
764 #define T_OPCODE_LDR_IW 0x6800
765 #define T_OPCODE_STR_IW 0x6000
766 #define T_OPCODE_LDR_IH 0x8800
767 #define T_OPCODE_STR_IH 0x8000
768 #define T_OPCODE_LDR_IB 0x7800
769 #define T_OPCODE_STR_IB 0x7000
770 #define T_OPCODE_LDR_RW 0x5800
771 #define T_OPCODE_STR_RW 0x5000
772 #define T_OPCODE_LDR_RH 0x5a00
773 #define T_OPCODE_STR_RH 0x5200
774 #define T_OPCODE_LDR_RB 0x5c00
775 #define T_OPCODE_STR_RB 0x5400
776
777 #define T_OPCODE_PUSH 0xb400
778 #define T_OPCODE_POP 0xbc00
779
780 #define T_OPCODE_BRANCH 0xe000
781
782 #define THUMB_SIZE 2 /* Size of thumb instruction. */
783 #define THUMB_PP_PC_LR 0x0100
784 #define THUMB_LOAD_BIT 0x0800
785 #define THUMB2_LOAD_BIT 0x00100000
786
787 #define BAD_ARGS _("bad arguments to instruction")
788 #define BAD_SP _("r13 not allowed here")
789 #define BAD_PC _("r15 not allowed here")
790 #define BAD_COND _("instruction cannot be conditional")
791 #define BAD_OVERLAP _("registers may not be the same")
792 #define BAD_HIREG _("lo register required")
793 #define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
794 #define BAD_ADDR_MODE _("instruction does not accept this addressing mode");
795 #define BAD_BRANCH _("branch must be last instruction in IT block")
796 #define BAD_NOT_IT _("instruction not allowed in IT block")
797 #define BAD_FPU _("selected FPU does not support instruction")
798 #define BAD_OUT_IT _("thumb conditional instruction should be in IT block")
799 #define BAD_IT_COND _("incorrect condition in IT block")
800 #define BAD_IT_IT _("IT falling in the range of a previous IT block")
801 #define MISSING_FNSTART _("missing .fnstart before unwinding directive")
802 #define BAD_PC_ADDRESSING \
803 _("cannot use register index with PC-relative addressing")
804 #define BAD_PC_WRITEBACK \
805 _("cannot use writeback with PC-relative addressing")
806 #define BAD_RANGE _("branch out of range")
807 #define BAD_FP16 _("selected processor does not support fp16 instruction")
808 #define UNPRED_REG(R) _("using " R " results in unpredictable behaviour")
809 #define THUMB1_RELOC_ONLY _("relocation valid in thumb1 code only")
810
811 static struct hash_control * arm_ops_hsh;
812 static struct hash_control * arm_cond_hsh;
813 static struct hash_control * arm_shift_hsh;
814 static struct hash_control * arm_psr_hsh;
815 static struct hash_control * arm_v7m_psr_hsh;
816 static struct hash_control * arm_reg_hsh;
817 static struct hash_control * arm_reloc_hsh;
818 static struct hash_control * arm_barrier_opt_hsh;
819
820 /* Stuff needed to resolve the label ambiguity
821 As:
822 ...
823 label: <insn>
824 may differ from:
825 ...
826 label:
827 <insn> */
828
829 symbolS * last_label_seen;
830 static int label_is_thumb_function_name = FALSE;
831
832 /* Literal pool structure. Held on a per-section
833 and per-sub-section basis. */
834
835 #define MAX_LITERAL_POOL_SIZE 1024
836 typedef struct literal_pool
837 {
838 expressionS literals [MAX_LITERAL_POOL_SIZE];
839 unsigned int next_free_entry;
840 unsigned int id;
841 symbolS * symbol;
842 segT section;
843 subsegT sub_section;
844 #ifdef OBJ_ELF
845 struct dwarf2_line_info locs [MAX_LITERAL_POOL_SIZE];
846 #endif
847 struct literal_pool * next;
848 unsigned int alignment;
849 } literal_pool;
850
851 /* Pointer to a linked list of literal pools. */
852 literal_pool * list_of_pools = NULL;
853
854 typedef enum asmfunc_states
855 {
856 OUTSIDE_ASMFUNC,
857 WAITING_ASMFUNC_NAME,
858 WAITING_ENDASMFUNC
859 } asmfunc_states;
860
861 static asmfunc_states asmfunc_state = OUTSIDE_ASMFUNC;
862
863 #ifdef OBJ_ELF
864 # define now_it seg_info (now_seg)->tc_segment_info_data.current_it
865 #else
866 static struct current_it now_it;
867 #endif
868
869 static inline int
870 now_it_compatible (int cond)
871 {
872 return (cond & ~1) == (now_it.cc & ~1);
873 }
874
875 static inline int
876 conditional_insn (void)
877 {
878 return inst.cond != COND_ALWAYS;
879 }
880
881 static int in_it_block (void);
882
883 static int handle_it_state (void);
884
885 static void force_automatic_it_block_close (void);
886
887 static void it_fsm_post_encode (void);
888
889 #define set_it_insn_type(type) \
890 do \
891 { \
892 inst.it_insn_type = type; \
893 if (handle_it_state () == FAIL) \
894 return; \
895 } \
896 while (0)
897
898 #define set_it_insn_type_nonvoid(type, failret) \
899 do \
900 { \
901 inst.it_insn_type = type; \
902 if (handle_it_state () == FAIL) \
903 return failret; \
904 } \
905 while(0)
906
907 #define set_it_insn_type_last() \
908 do \
909 { \
910 if (inst.cond == COND_ALWAYS) \
911 set_it_insn_type (IF_INSIDE_IT_LAST_INSN); \
912 else \
913 set_it_insn_type (INSIDE_IT_LAST_INSN); \
914 } \
915 while (0)
916
917 /* Pure syntax. */
918
919 /* This array holds the chars that always start a comment. If the
920 pre-processor is disabled, these aren't very useful. */
921 char arm_comment_chars[] = "@";
922
923 /* This array holds the chars that only start a comment at the beginning of
924 a line. If the line seems to have the form '# 123 filename'
925 .line and .file directives will appear in the pre-processed output. */
926 /* Note that input_file.c hand checks for '#' at the beginning of the
927 first line of the input file. This is because the compiler outputs
928 #NO_APP at the beginning of its output. */
929 /* Also note that comments like this one will always work. */
930 const char line_comment_chars[] = "#";
931
932 char arm_line_separator_chars[] = ";";
933
934 /* Chars that can be used to separate mant
935 from exp in floating point numbers. */
936 const char EXP_CHARS[] = "eE";
937
938 /* Chars that mean this number is a floating point constant. */
939 /* As in 0f12.456 */
940 /* or 0d1.2345e12 */
941
942 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
943
944 /* Prefix characters that indicate the start of an immediate
945 value. */
946 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
947
948 /* Separator character handling. */
949
950 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
951
952 static inline int
953 skip_past_char (char ** str, char c)
954 {
955 /* PR gas/14987: Allow for whitespace before the expected character. */
956 skip_whitespace (*str);
957
958 if (**str == c)
959 {
960 (*str)++;
961 return SUCCESS;
962 }
963 else
964 return FAIL;
965 }
966
967 #define skip_past_comma(str) skip_past_char (str, ',')
968
969 /* Arithmetic expressions (possibly involving symbols). */
970
971 /* Return TRUE if anything in the expression is a bignum. */
972
973 static int
974 walk_no_bignums (symbolS * sp)
975 {
976 if (symbol_get_value_expression (sp)->X_op == O_big)
977 return 1;
978
979 if (symbol_get_value_expression (sp)->X_add_symbol)
980 {
981 return (walk_no_bignums (symbol_get_value_expression (sp)->X_add_symbol)
982 || (symbol_get_value_expression (sp)->X_op_symbol
983 && walk_no_bignums (symbol_get_value_expression (sp)->X_op_symbol)));
984 }
985
986 return 0;
987 }
988
989 static int in_my_get_expression = 0;
990
991 /* Third argument to my_get_expression. */
992 #define GE_NO_PREFIX 0
993 #define GE_IMM_PREFIX 1
994 #define GE_OPT_PREFIX 2
995 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
996 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
997 #define GE_OPT_PREFIX_BIG 3
998
999 static int
1000 my_get_expression (expressionS * ep, char ** str, int prefix_mode)
1001 {
1002 char * save_in;
1003 segT seg;
1004
1005 /* In unified syntax, all prefixes are optional. */
1006 if (unified_syntax)
1007 prefix_mode = (prefix_mode == GE_OPT_PREFIX_BIG) ? prefix_mode
1008 : GE_OPT_PREFIX;
1009
1010 switch (prefix_mode)
1011 {
1012 case GE_NO_PREFIX: break;
1013 case GE_IMM_PREFIX:
1014 if (!is_immediate_prefix (**str))
1015 {
1016 inst.error = _("immediate expression requires a # prefix");
1017 return FAIL;
1018 }
1019 (*str)++;
1020 break;
1021 case GE_OPT_PREFIX:
1022 case GE_OPT_PREFIX_BIG:
1023 if (is_immediate_prefix (**str))
1024 (*str)++;
1025 break;
1026 default: abort ();
1027 }
1028
1029 memset (ep, 0, sizeof (expressionS));
1030
1031 save_in = input_line_pointer;
1032 input_line_pointer = *str;
1033 in_my_get_expression = 1;
1034 seg = expression (ep);
1035 in_my_get_expression = 0;
1036
1037 if (ep->X_op == O_illegal || ep->X_op == O_absent)
1038 {
1039 /* We found a bad or missing expression in md_operand(). */
1040 *str = input_line_pointer;
1041 input_line_pointer = save_in;
1042 if (inst.error == NULL)
1043 inst.error = (ep->X_op == O_absent
1044 ? _("missing expression") :_("bad expression"));
1045 return 1;
1046 }
1047
1048 #ifdef OBJ_AOUT
1049 if (seg != absolute_section
1050 && seg != text_section
1051 && seg != data_section
1052 && seg != bss_section
1053 && seg != undefined_section)
1054 {
1055 inst.error = _("bad segment");
1056 *str = input_line_pointer;
1057 input_line_pointer = save_in;
1058 return 1;
1059 }
1060 #else
1061 (void) seg;
1062 #endif
1063
1064 /* Get rid of any bignums now, so that we don't generate an error for which
1065 we can't establish a line number later on. Big numbers are never valid
1066 in instructions, which is where this routine is always called. */
1067 if (prefix_mode != GE_OPT_PREFIX_BIG
1068 && (ep->X_op == O_big
1069 || (ep->X_add_symbol
1070 && (walk_no_bignums (ep->X_add_symbol)
1071 || (ep->X_op_symbol
1072 && walk_no_bignums (ep->X_op_symbol))))))
1073 {
1074 inst.error = _("invalid constant");
1075 *str = input_line_pointer;
1076 input_line_pointer = save_in;
1077 return 1;
1078 }
1079
1080 *str = input_line_pointer;
1081 input_line_pointer = save_in;
1082 return 0;
1083 }
1084
1085 /* Turn a string in input_line_pointer into a floating point constant
1086 of type TYPE, and store the appropriate bytes in *LITP. The number
1087 of LITTLENUMS emitted is stored in *SIZEP. An error message is
1088 returned, or NULL on OK.
1089
1090 Note that fp constants aren't represent in the normal way on the ARM.
1091 In big endian mode, things are as expected. However, in little endian
1092 mode fp constants are big-endian word-wise, and little-endian byte-wise
1093 within the words. For example, (double) 1.1 in big endian mode is
1094 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
1095 the byte sequence 99 99 f1 3f 9a 99 99 99.
1096
1097 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
1098
1099 const char *
1100 md_atof (int type, char * litP, int * sizeP)
1101 {
1102 int prec;
1103 LITTLENUM_TYPE words[MAX_LITTLENUMS];
1104 char *t;
1105 int i;
1106
1107 switch (type)
1108 {
1109 case 'f':
1110 case 'F':
1111 case 's':
1112 case 'S':
1113 prec = 2;
1114 break;
1115
1116 case 'd':
1117 case 'D':
1118 case 'r':
1119 case 'R':
1120 prec = 4;
1121 break;
1122
1123 case 'x':
1124 case 'X':
1125 prec = 5;
1126 break;
1127
1128 case 'p':
1129 case 'P':
1130 prec = 5;
1131 break;
1132
1133 default:
1134 *sizeP = 0;
1135 return _("Unrecognized or unsupported floating point constant");
1136 }
1137
1138 t = atof_ieee (input_line_pointer, type, words);
1139 if (t)
1140 input_line_pointer = t;
1141 *sizeP = prec * sizeof (LITTLENUM_TYPE);
1142
1143 if (target_big_endian)
1144 {
1145 for (i = 0; i < prec; i++)
1146 {
1147 md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1148 litP += sizeof (LITTLENUM_TYPE);
1149 }
1150 }
1151 else
1152 {
1153 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
1154 for (i = prec - 1; i >= 0; i--)
1155 {
1156 md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1157 litP += sizeof (LITTLENUM_TYPE);
1158 }
1159 else
1160 /* For a 4 byte float the order of elements in `words' is 1 0.
1161 For an 8 byte float the order is 1 0 3 2. */
1162 for (i = 0; i < prec; i += 2)
1163 {
1164 md_number_to_chars (litP, (valueT) words[i + 1],
1165 sizeof (LITTLENUM_TYPE));
1166 md_number_to_chars (litP + sizeof (LITTLENUM_TYPE),
1167 (valueT) words[i], sizeof (LITTLENUM_TYPE));
1168 litP += 2 * sizeof (LITTLENUM_TYPE);
1169 }
1170 }
1171
1172 return NULL;
1173 }
1174
1175 /* We handle all bad expressions here, so that we can report the faulty
1176 instruction in the error message. */
1177 void
1178 md_operand (expressionS * exp)
1179 {
1180 if (in_my_get_expression)
1181 exp->X_op = O_illegal;
1182 }
1183
1184 /* Immediate values. */
1185
1186 /* Generic immediate-value read function for use in directives.
1187 Accepts anything that 'expression' can fold to a constant.
1188 *val receives the number. */
1189 #ifdef OBJ_ELF
1190 static int
1191 immediate_for_directive (int *val)
1192 {
1193 expressionS exp;
1194 exp.X_op = O_illegal;
1195
1196 if (is_immediate_prefix (*input_line_pointer))
1197 {
1198 input_line_pointer++;
1199 expression (&exp);
1200 }
1201
1202 if (exp.X_op != O_constant)
1203 {
1204 as_bad (_("expected #constant"));
1205 ignore_rest_of_line ();
1206 return FAIL;
1207 }
1208 *val = exp.X_add_number;
1209 return SUCCESS;
1210 }
1211 #endif
1212
1213 /* Register parsing. */
1214
1215 /* Generic register parser. CCP points to what should be the
1216 beginning of a register name. If it is indeed a valid register
1217 name, advance CCP over it and return the reg_entry structure;
1218 otherwise return NULL. Does not issue diagnostics. */
1219
1220 static struct reg_entry *
1221 arm_reg_parse_multi (char **ccp)
1222 {
1223 char *start = *ccp;
1224 char *p;
1225 struct reg_entry *reg;
1226
1227 skip_whitespace (start);
1228
1229 #ifdef REGISTER_PREFIX
1230 if (*start != REGISTER_PREFIX)
1231 return NULL;
1232 start++;
1233 #endif
1234 #ifdef OPTIONAL_REGISTER_PREFIX
1235 if (*start == OPTIONAL_REGISTER_PREFIX)
1236 start++;
1237 #endif
1238
1239 p = start;
1240 if (!ISALPHA (*p) || !is_name_beginner (*p))
1241 return NULL;
1242
1243 do
1244 p++;
1245 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
1246
1247 reg = (struct reg_entry *) hash_find_n (arm_reg_hsh, start, p - start);
1248
1249 if (!reg)
1250 return NULL;
1251
1252 *ccp = p;
1253 return reg;
1254 }
1255
1256 static int
1257 arm_reg_alt_syntax (char **ccp, char *start, struct reg_entry *reg,
1258 enum arm_reg_type type)
1259 {
1260 /* Alternative syntaxes are accepted for a few register classes. */
1261 switch (type)
1262 {
1263 case REG_TYPE_MVF:
1264 case REG_TYPE_MVD:
1265 case REG_TYPE_MVFX:
1266 case REG_TYPE_MVDX:
1267 /* Generic coprocessor register names are allowed for these. */
1268 if (reg && reg->type == REG_TYPE_CN)
1269 return reg->number;
1270 break;
1271
1272 case REG_TYPE_CP:
1273 /* For backward compatibility, a bare number is valid here. */
1274 {
1275 unsigned long processor = strtoul (start, ccp, 10);
1276 if (*ccp != start && processor <= 15)
1277 return processor;
1278 }
1279
1280 case REG_TYPE_MMXWC:
1281 /* WC includes WCG. ??? I'm not sure this is true for all
1282 instructions that take WC registers. */
1283 if (reg && reg->type == REG_TYPE_MMXWCG)
1284 return reg->number;
1285 break;
1286
1287 default:
1288 break;
1289 }
1290
1291 return FAIL;
1292 }
1293
1294 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1295 return value is the register number or FAIL. */
1296
1297 static int
1298 arm_reg_parse (char **ccp, enum arm_reg_type type)
1299 {
1300 char *start = *ccp;
1301 struct reg_entry *reg = arm_reg_parse_multi (ccp);
1302 int ret;
1303
1304 /* Do not allow a scalar (reg+index) to parse as a register. */
1305 if (reg && reg->neon && (reg->neon->defined & NTA_HASINDEX))
1306 return FAIL;
1307
1308 if (reg && reg->type == type)
1309 return reg->number;
1310
1311 if ((ret = arm_reg_alt_syntax (ccp, start, reg, type)) != FAIL)
1312 return ret;
1313
1314 *ccp = start;
1315 return FAIL;
1316 }
1317
1318 /* Parse a Neon type specifier. *STR should point at the leading '.'
1319 character. Does no verification at this stage that the type fits the opcode
1320 properly. E.g.,
1321
1322 .i32.i32.s16
1323 .s32.f32
1324 .u16
1325
1326 Can all be legally parsed by this function.
1327
1328 Fills in neon_type struct pointer with parsed information, and updates STR
1329 to point after the parsed type specifier. Returns SUCCESS if this was a legal
1330 type, FAIL if not. */
1331
1332 static int
1333 parse_neon_type (struct neon_type *type, char **str)
1334 {
1335 char *ptr = *str;
1336
1337 if (type)
1338 type->elems = 0;
1339
1340 while (type->elems < NEON_MAX_TYPE_ELS)
1341 {
1342 enum neon_el_type thistype = NT_untyped;
1343 unsigned thissize = -1u;
1344
1345 if (*ptr != '.')
1346 break;
1347
1348 ptr++;
1349
1350 /* Just a size without an explicit type. */
1351 if (ISDIGIT (*ptr))
1352 goto parsesize;
1353
1354 switch (TOLOWER (*ptr))
1355 {
1356 case 'i': thistype = NT_integer; break;
1357 case 'f': thistype = NT_float; break;
1358 case 'p': thistype = NT_poly; break;
1359 case 's': thistype = NT_signed; break;
1360 case 'u': thistype = NT_unsigned; break;
1361 case 'd':
1362 thistype = NT_float;
1363 thissize = 64;
1364 ptr++;
1365 goto done;
1366 default:
1367 as_bad (_("unexpected character `%c' in type specifier"), *ptr);
1368 return FAIL;
1369 }
1370
1371 ptr++;
1372
1373 /* .f is an abbreviation for .f32. */
1374 if (thistype == NT_float && !ISDIGIT (*ptr))
1375 thissize = 32;
1376 else
1377 {
1378 parsesize:
1379 thissize = strtoul (ptr, &ptr, 10);
1380
1381 if (thissize != 8 && thissize != 16 && thissize != 32
1382 && thissize != 64)
1383 {
1384 as_bad (_("bad size %d in type specifier"), thissize);
1385 return FAIL;
1386 }
1387 }
1388
1389 done:
1390 if (type)
1391 {
1392 type->el[type->elems].type = thistype;
1393 type->el[type->elems].size = thissize;
1394 type->elems++;
1395 }
1396 }
1397
1398 /* Empty/missing type is not a successful parse. */
1399 if (type->elems == 0)
1400 return FAIL;
1401
1402 *str = ptr;
1403
1404 return SUCCESS;
1405 }
1406
1407 /* Errors may be set multiple times during parsing or bit encoding
1408 (particularly in the Neon bits), but usually the earliest error which is set
1409 will be the most meaningful. Avoid overwriting it with later (cascading)
1410 errors by calling this function. */
1411
1412 static void
1413 first_error (const char *err)
1414 {
1415 if (!inst.error)
1416 inst.error = err;
1417 }
1418
1419 /* Parse a single type, e.g. ".s32", leading period included. */
1420 static int
1421 parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
1422 {
1423 char *str = *ccp;
1424 struct neon_type optype;
1425
1426 if (*str == '.')
1427 {
1428 if (parse_neon_type (&optype, &str) == SUCCESS)
1429 {
1430 if (optype.elems == 1)
1431 *vectype = optype.el[0];
1432 else
1433 {
1434 first_error (_("only one type should be specified for operand"));
1435 return FAIL;
1436 }
1437 }
1438 else
1439 {
1440 first_error (_("vector type expected"));
1441 return FAIL;
1442 }
1443 }
1444 else
1445 return FAIL;
1446
1447 *ccp = str;
1448
1449 return SUCCESS;
1450 }
1451
1452 /* Special meanings for indices (which have a range of 0-7), which will fit into
1453 a 4-bit integer. */
1454
1455 #define NEON_ALL_LANES 15
1456 #define NEON_INTERLEAVE_LANES 14
1457
1458 /* Parse either a register or a scalar, with an optional type. Return the
1459 register number, and optionally fill in the actual type of the register
1460 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1461 type/index information in *TYPEINFO. */
1462
1463 static int
1464 parse_typed_reg_or_scalar (char **ccp, enum arm_reg_type type,
1465 enum arm_reg_type *rtype,
1466 struct neon_typed_alias *typeinfo)
1467 {
1468 char *str = *ccp;
1469 struct reg_entry *reg = arm_reg_parse_multi (&str);
1470 struct neon_typed_alias atype;
1471 struct neon_type_el parsetype;
1472
1473 atype.defined = 0;
1474 atype.index = -1;
1475 atype.eltype.type = NT_invtype;
1476 atype.eltype.size = -1;
1477
1478 /* Try alternate syntax for some types of register. Note these are mutually
1479 exclusive with the Neon syntax extensions. */
1480 if (reg == NULL)
1481 {
1482 int altreg = arm_reg_alt_syntax (&str, *ccp, reg, type);
1483 if (altreg != FAIL)
1484 *ccp = str;
1485 if (typeinfo)
1486 *typeinfo = atype;
1487 return altreg;
1488 }
1489
1490 /* Undo polymorphism when a set of register types may be accepted. */
1491 if ((type == REG_TYPE_NDQ
1492 && (reg->type == REG_TYPE_NQ || reg->type == REG_TYPE_VFD))
1493 || (type == REG_TYPE_VFSD
1494 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD))
1495 || (type == REG_TYPE_NSDQ
1496 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD
1497 || reg->type == REG_TYPE_NQ))
1498 || (type == REG_TYPE_MMXWC
1499 && (reg->type == REG_TYPE_MMXWCG)))
1500 type = (enum arm_reg_type) reg->type;
1501
1502 if (type != reg->type)
1503 return FAIL;
1504
1505 if (reg->neon)
1506 atype = *reg->neon;
1507
1508 if (parse_neon_operand_type (&parsetype, &str) == SUCCESS)
1509 {
1510 if ((atype.defined & NTA_HASTYPE) != 0)
1511 {
1512 first_error (_("can't redefine type for operand"));
1513 return FAIL;
1514 }
1515 atype.defined |= NTA_HASTYPE;
1516 atype.eltype = parsetype;
1517 }
1518
1519 if (skip_past_char (&str, '[') == SUCCESS)
1520 {
1521 if (type != REG_TYPE_VFD)
1522 {
1523 first_error (_("only D registers may be indexed"));
1524 return FAIL;
1525 }
1526
1527 if ((atype.defined & NTA_HASINDEX) != 0)
1528 {
1529 first_error (_("can't change index for operand"));
1530 return FAIL;
1531 }
1532
1533 atype.defined |= NTA_HASINDEX;
1534
1535 if (skip_past_char (&str, ']') == SUCCESS)
1536 atype.index = NEON_ALL_LANES;
1537 else
1538 {
1539 expressionS exp;
1540
1541 my_get_expression (&exp, &str, GE_NO_PREFIX);
1542
1543 if (exp.X_op != O_constant)
1544 {
1545 first_error (_("constant expression required"));
1546 return FAIL;
1547 }
1548
1549 if (skip_past_char (&str, ']') == FAIL)
1550 return FAIL;
1551
1552 atype.index = exp.X_add_number;
1553 }
1554 }
1555
1556 if (typeinfo)
1557 *typeinfo = atype;
1558
1559 if (rtype)
1560 *rtype = type;
1561
1562 *ccp = str;
1563
1564 return reg->number;
1565 }
1566
1567 /* Like arm_reg_parse, but allow allow the following extra features:
1568 - If RTYPE is non-zero, return the (possibly restricted) type of the
1569 register (e.g. Neon double or quad reg when either has been requested).
1570 - If this is a Neon vector type with additional type information, fill
1571 in the struct pointed to by VECTYPE (if non-NULL).
1572 This function will fault on encountering a scalar. */
1573
1574 static int
1575 arm_typed_reg_parse (char **ccp, enum arm_reg_type type,
1576 enum arm_reg_type *rtype, struct neon_type_el *vectype)
1577 {
1578 struct neon_typed_alias atype;
1579 char *str = *ccp;
1580 int reg = parse_typed_reg_or_scalar (&str, type, rtype, &atype);
1581
1582 if (reg == FAIL)
1583 return FAIL;
1584
1585 /* Do not allow regname(... to parse as a register. */
1586 if (*str == '(')
1587 return FAIL;
1588
1589 /* Do not allow a scalar (reg+index) to parse as a register. */
1590 if ((atype.defined & NTA_HASINDEX) != 0)
1591 {
1592 first_error (_("register operand expected, but got scalar"));
1593 return FAIL;
1594 }
1595
1596 if (vectype)
1597 *vectype = atype.eltype;
1598
1599 *ccp = str;
1600
1601 return reg;
1602 }
1603
1604 #define NEON_SCALAR_REG(X) ((X) >> 4)
1605 #define NEON_SCALAR_INDEX(X) ((X) & 15)
1606
1607 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1608 have enough information to be able to do a good job bounds-checking. So, we
1609 just do easy checks here, and do further checks later. */
1610
1611 static int
1612 parse_scalar (char **ccp, int elsize, struct neon_type_el *type)
1613 {
1614 int reg;
1615 char *str = *ccp;
1616 struct neon_typed_alias atype;
1617
1618 reg = parse_typed_reg_or_scalar (&str, REG_TYPE_VFD, NULL, &atype);
1619
1620 if (reg == FAIL || (atype.defined & NTA_HASINDEX) == 0)
1621 return FAIL;
1622
1623 if (atype.index == NEON_ALL_LANES)
1624 {
1625 first_error (_("scalar must have an index"));
1626 return FAIL;
1627 }
1628 else if (atype.index >= 64 / elsize)
1629 {
1630 first_error (_("scalar index out of range"));
1631 return FAIL;
1632 }
1633
1634 if (type)
1635 *type = atype.eltype;
1636
1637 *ccp = str;
1638
1639 return reg * 16 + atype.index;
1640 }
1641
1642 /* Parse an ARM register list. Returns the bitmask, or FAIL. */
1643
1644 static long
1645 parse_reg_list (char ** strp)
1646 {
1647 char * str = * strp;
1648 long range = 0;
1649 int another_range;
1650
1651 /* We come back here if we get ranges concatenated by '+' or '|'. */
1652 do
1653 {
1654 skip_whitespace (str);
1655
1656 another_range = 0;
1657
1658 if (*str == '{')
1659 {
1660 int in_range = 0;
1661 int cur_reg = -1;
1662
1663 str++;
1664 do
1665 {
1666 int reg;
1667
1668 if ((reg = arm_reg_parse (&str, REG_TYPE_RN)) == FAIL)
1669 {
1670 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
1671 return FAIL;
1672 }
1673
1674 if (in_range)
1675 {
1676 int i;
1677
1678 if (reg <= cur_reg)
1679 {
1680 first_error (_("bad range in register list"));
1681 return FAIL;
1682 }
1683
1684 for (i = cur_reg + 1; i < reg; i++)
1685 {
1686 if (range & (1 << i))
1687 as_tsktsk
1688 (_("Warning: duplicated register (r%d) in register list"),
1689 i);
1690 else
1691 range |= 1 << i;
1692 }
1693 in_range = 0;
1694 }
1695
1696 if (range & (1 << reg))
1697 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1698 reg);
1699 else if (reg <= cur_reg)
1700 as_tsktsk (_("Warning: register range not in ascending order"));
1701
1702 range |= 1 << reg;
1703 cur_reg = reg;
1704 }
1705 while (skip_past_comma (&str) != FAIL
1706 || (in_range = 1, *str++ == '-'));
1707 str--;
1708
1709 if (skip_past_char (&str, '}') == FAIL)
1710 {
1711 first_error (_("missing `}'"));
1712 return FAIL;
1713 }
1714 }
1715 else
1716 {
1717 expressionS exp;
1718
1719 if (my_get_expression (&exp, &str, GE_NO_PREFIX))
1720 return FAIL;
1721
1722 if (exp.X_op == O_constant)
1723 {
1724 if (exp.X_add_number
1725 != (exp.X_add_number & 0x0000ffff))
1726 {
1727 inst.error = _("invalid register mask");
1728 return FAIL;
1729 }
1730
1731 if ((range & exp.X_add_number) != 0)
1732 {
1733 int regno = range & exp.X_add_number;
1734
1735 regno &= -regno;
1736 regno = (1 << regno) - 1;
1737 as_tsktsk
1738 (_("Warning: duplicated register (r%d) in register list"),
1739 regno);
1740 }
1741
1742 range |= exp.X_add_number;
1743 }
1744 else
1745 {
1746 if (inst.reloc.type != 0)
1747 {
1748 inst.error = _("expression too complex");
1749 return FAIL;
1750 }
1751
1752 memcpy (&inst.reloc.exp, &exp, sizeof (expressionS));
1753 inst.reloc.type = BFD_RELOC_ARM_MULTI;
1754 inst.reloc.pc_rel = 0;
1755 }
1756 }
1757
1758 if (*str == '|' || *str == '+')
1759 {
1760 str++;
1761 another_range = 1;
1762 }
1763 }
1764 while (another_range);
1765
1766 *strp = str;
1767 return range;
1768 }
1769
1770 /* Types of registers in a list. */
1771
1772 enum reg_list_els
1773 {
1774 REGLIST_VFP_S,
1775 REGLIST_VFP_D,
1776 REGLIST_NEON_D
1777 };
1778
1779 /* Parse a VFP register list. If the string is invalid return FAIL.
1780 Otherwise return the number of registers, and set PBASE to the first
1781 register. Parses registers of type ETYPE.
1782 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1783 - Q registers can be used to specify pairs of D registers
1784 - { } can be omitted from around a singleton register list
1785 FIXME: This is not implemented, as it would require backtracking in
1786 some cases, e.g.:
1787 vtbl.8 d3,d4,d5
1788 This could be done (the meaning isn't really ambiguous), but doesn't
1789 fit in well with the current parsing framework.
1790 - 32 D registers may be used (also true for VFPv3).
1791 FIXME: Types are ignored in these register lists, which is probably a
1792 bug. */
1793
1794 static int
1795 parse_vfp_reg_list (char **ccp, unsigned int *pbase, enum reg_list_els etype)
1796 {
1797 char *str = *ccp;
1798 int base_reg;
1799 int new_base;
1800 enum arm_reg_type regtype = (enum arm_reg_type) 0;
1801 int max_regs = 0;
1802 int count = 0;
1803 int warned = 0;
1804 unsigned long mask = 0;
1805 int i;
1806
1807 if (skip_past_char (&str, '{') == FAIL)
1808 {
1809 inst.error = _("expecting {");
1810 return FAIL;
1811 }
1812
1813 switch (etype)
1814 {
1815 case REGLIST_VFP_S:
1816 regtype = REG_TYPE_VFS;
1817 max_regs = 32;
1818 break;
1819
1820 case REGLIST_VFP_D:
1821 regtype = REG_TYPE_VFD;
1822 break;
1823
1824 case REGLIST_NEON_D:
1825 regtype = REG_TYPE_NDQ;
1826 break;
1827 }
1828
1829 if (etype != REGLIST_VFP_S)
1830 {
1831 /* VFPv3 allows 32 D registers, except for the VFPv3-D16 variant. */
1832 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
1833 {
1834 max_regs = 32;
1835 if (thumb_mode)
1836 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
1837 fpu_vfp_ext_d32);
1838 else
1839 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
1840 fpu_vfp_ext_d32);
1841 }
1842 else
1843 max_regs = 16;
1844 }
1845
1846 base_reg = max_regs;
1847
1848 do
1849 {
1850 int setmask = 1, addregs = 1;
1851
1852 new_base = arm_typed_reg_parse (&str, regtype, &regtype, NULL);
1853
1854 if (new_base == FAIL)
1855 {
1856 first_error (_(reg_expected_msgs[regtype]));
1857 return FAIL;
1858 }
1859
1860 if (new_base >= max_regs)
1861 {
1862 first_error (_("register out of range in list"));
1863 return FAIL;
1864 }
1865
1866 /* Note: a value of 2 * n is returned for the register Q<n>. */
1867 if (regtype == REG_TYPE_NQ)
1868 {
1869 setmask = 3;
1870 addregs = 2;
1871 }
1872
1873 if (new_base < base_reg)
1874 base_reg = new_base;
1875
1876 if (mask & (setmask << new_base))
1877 {
1878 first_error (_("invalid register list"));
1879 return FAIL;
1880 }
1881
1882 if ((mask >> new_base) != 0 && ! warned)
1883 {
1884 as_tsktsk (_("register list not in ascending order"));
1885 warned = 1;
1886 }
1887
1888 mask |= setmask << new_base;
1889 count += addregs;
1890
1891 if (*str == '-') /* We have the start of a range expression */
1892 {
1893 int high_range;
1894
1895 str++;
1896
1897 if ((high_range = arm_typed_reg_parse (&str, regtype, NULL, NULL))
1898 == FAIL)
1899 {
1900 inst.error = gettext (reg_expected_msgs[regtype]);
1901 return FAIL;
1902 }
1903
1904 if (high_range >= max_regs)
1905 {
1906 first_error (_("register out of range in list"));
1907 return FAIL;
1908 }
1909
1910 if (regtype == REG_TYPE_NQ)
1911 high_range = high_range + 1;
1912
1913 if (high_range <= new_base)
1914 {
1915 inst.error = _("register range not in ascending order");
1916 return FAIL;
1917 }
1918
1919 for (new_base += addregs; new_base <= high_range; new_base += addregs)
1920 {
1921 if (mask & (setmask << new_base))
1922 {
1923 inst.error = _("invalid register list");
1924 return FAIL;
1925 }
1926
1927 mask |= setmask << new_base;
1928 count += addregs;
1929 }
1930 }
1931 }
1932 while (skip_past_comma (&str) != FAIL);
1933
1934 str++;
1935
1936 /* Sanity check -- should have raised a parse error above. */
1937 if (count == 0 || count > max_regs)
1938 abort ();
1939
1940 *pbase = base_reg;
1941
1942 /* Final test -- the registers must be consecutive. */
1943 mask >>= base_reg;
1944 for (i = 0; i < count; i++)
1945 {
1946 if ((mask & (1u << i)) == 0)
1947 {
1948 inst.error = _("non-contiguous register range");
1949 return FAIL;
1950 }
1951 }
1952
1953 *ccp = str;
1954
1955 return count;
1956 }
1957
1958 /* True if two alias types are the same. */
1959
1960 static bfd_boolean
1961 neon_alias_types_same (struct neon_typed_alias *a, struct neon_typed_alias *b)
1962 {
1963 if (!a && !b)
1964 return TRUE;
1965
1966 if (!a || !b)
1967 return FALSE;
1968
1969 if (a->defined != b->defined)
1970 return FALSE;
1971
1972 if ((a->defined & NTA_HASTYPE) != 0
1973 && (a->eltype.type != b->eltype.type
1974 || a->eltype.size != b->eltype.size))
1975 return FALSE;
1976
1977 if ((a->defined & NTA_HASINDEX) != 0
1978 && (a->index != b->index))
1979 return FALSE;
1980
1981 return TRUE;
1982 }
1983
1984 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
1985 The base register is put in *PBASE.
1986 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
1987 the return value.
1988 The register stride (minus one) is put in bit 4 of the return value.
1989 Bits [6:5] encode the list length (minus one).
1990 The type of the list elements is put in *ELTYPE, if non-NULL. */
1991
1992 #define NEON_LANE(X) ((X) & 0xf)
1993 #define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1)
1994 #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
1995
1996 static int
1997 parse_neon_el_struct_list (char **str, unsigned *pbase,
1998 struct neon_type_el *eltype)
1999 {
2000 char *ptr = *str;
2001 int base_reg = -1;
2002 int reg_incr = -1;
2003 int count = 0;
2004 int lane = -1;
2005 int leading_brace = 0;
2006 enum arm_reg_type rtype = REG_TYPE_NDQ;
2007 const char *const incr_error = _("register stride must be 1 or 2");
2008 const char *const type_error = _("mismatched element/structure types in list");
2009 struct neon_typed_alias firsttype;
2010 firsttype.defined = 0;
2011 firsttype.eltype.type = NT_invtype;
2012 firsttype.eltype.size = -1;
2013 firsttype.index = -1;
2014
2015 if (skip_past_char (&ptr, '{') == SUCCESS)
2016 leading_brace = 1;
2017
2018 do
2019 {
2020 struct neon_typed_alias atype;
2021 int getreg = parse_typed_reg_or_scalar (&ptr, rtype, &rtype, &atype);
2022
2023 if (getreg == FAIL)
2024 {
2025 first_error (_(reg_expected_msgs[rtype]));
2026 return FAIL;
2027 }
2028
2029 if (base_reg == -1)
2030 {
2031 base_reg = getreg;
2032 if (rtype == REG_TYPE_NQ)
2033 {
2034 reg_incr = 1;
2035 }
2036 firsttype = atype;
2037 }
2038 else if (reg_incr == -1)
2039 {
2040 reg_incr = getreg - base_reg;
2041 if (reg_incr < 1 || reg_incr > 2)
2042 {
2043 first_error (_(incr_error));
2044 return FAIL;
2045 }
2046 }
2047 else if (getreg != base_reg + reg_incr * count)
2048 {
2049 first_error (_(incr_error));
2050 return FAIL;
2051 }
2052
2053 if (! neon_alias_types_same (&atype, &firsttype))
2054 {
2055 first_error (_(type_error));
2056 return FAIL;
2057 }
2058
2059 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
2060 modes. */
2061 if (ptr[0] == '-')
2062 {
2063 struct neon_typed_alias htype;
2064 int hireg, dregs = (rtype == REG_TYPE_NQ) ? 2 : 1;
2065 if (lane == -1)
2066 lane = NEON_INTERLEAVE_LANES;
2067 else if (lane != NEON_INTERLEAVE_LANES)
2068 {
2069 first_error (_(type_error));
2070 return FAIL;
2071 }
2072 if (reg_incr == -1)
2073 reg_incr = 1;
2074 else if (reg_incr != 1)
2075 {
2076 first_error (_("don't use Rn-Rm syntax with non-unit stride"));
2077 return FAIL;
2078 }
2079 ptr++;
2080 hireg = parse_typed_reg_or_scalar (&ptr, rtype, NULL, &htype);
2081 if (hireg == FAIL)
2082 {
2083 first_error (_(reg_expected_msgs[rtype]));
2084 return FAIL;
2085 }
2086 if (! neon_alias_types_same (&htype, &firsttype))
2087 {
2088 first_error (_(type_error));
2089 return FAIL;
2090 }
2091 count += hireg + dregs - getreg;
2092 continue;
2093 }
2094
2095 /* If we're using Q registers, we can't use [] or [n] syntax. */
2096 if (rtype == REG_TYPE_NQ)
2097 {
2098 count += 2;
2099 continue;
2100 }
2101
2102 if ((atype.defined & NTA_HASINDEX) != 0)
2103 {
2104 if (lane == -1)
2105 lane = atype.index;
2106 else if (lane != atype.index)
2107 {
2108 first_error (_(type_error));
2109 return FAIL;
2110 }
2111 }
2112 else if (lane == -1)
2113 lane = NEON_INTERLEAVE_LANES;
2114 else if (lane != NEON_INTERLEAVE_LANES)
2115 {
2116 first_error (_(type_error));
2117 return FAIL;
2118 }
2119 count++;
2120 }
2121 while ((count != 1 || leading_brace) && skip_past_comma (&ptr) != FAIL);
2122
2123 /* No lane set by [x]. We must be interleaving structures. */
2124 if (lane == -1)
2125 lane = NEON_INTERLEAVE_LANES;
2126
2127 /* Sanity check. */
2128 if (lane == -1 || base_reg == -1 || count < 1 || count > 4
2129 || (count > 1 && reg_incr == -1))
2130 {
2131 first_error (_("error parsing element/structure list"));
2132 return FAIL;
2133 }
2134
2135 if ((count > 1 || leading_brace) && skip_past_char (&ptr, '}') == FAIL)
2136 {
2137 first_error (_("expected }"));
2138 return FAIL;
2139 }
2140
2141 if (reg_incr == -1)
2142 reg_incr = 1;
2143
2144 if (eltype)
2145 *eltype = firsttype.eltype;
2146
2147 *pbase = base_reg;
2148 *str = ptr;
2149
2150 return lane | ((reg_incr - 1) << 4) | ((count - 1) << 5);
2151 }
2152
2153 /* Parse an explicit relocation suffix on an expression. This is
2154 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
2155 arm_reloc_hsh contains no entries, so this function can only
2156 succeed if there is no () after the word. Returns -1 on error,
2157 BFD_RELOC_UNUSED if there wasn't any suffix. */
2158
2159 static int
2160 parse_reloc (char **str)
2161 {
2162 struct reloc_entry *r;
2163 char *p, *q;
2164
2165 if (**str != '(')
2166 return BFD_RELOC_UNUSED;
2167
2168 p = *str + 1;
2169 q = p;
2170
2171 while (*q && *q != ')' && *q != ',')
2172 q++;
2173 if (*q != ')')
2174 return -1;
2175
2176 if ((r = (struct reloc_entry *)
2177 hash_find_n (arm_reloc_hsh, p, q - p)) == NULL)
2178 return -1;
2179
2180 *str = q + 1;
2181 return r->reloc;
2182 }
2183
2184 /* Directives: register aliases. */
2185
2186 static struct reg_entry *
2187 insert_reg_alias (char *str, unsigned number, int type)
2188 {
2189 struct reg_entry *new_reg;
2190 const char *name;
2191
2192 if ((new_reg = (struct reg_entry *) hash_find (arm_reg_hsh, str)) != 0)
2193 {
2194 if (new_reg->builtin)
2195 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str);
2196
2197 /* Only warn about a redefinition if it's not defined as the
2198 same register. */
2199 else if (new_reg->number != number || new_reg->type != type)
2200 as_warn (_("ignoring redefinition of register alias '%s'"), str);
2201
2202 return NULL;
2203 }
2204
2205 name = xstrdup (str);
2206 new_reg = XNEW (struct reg_entry);
2207
2208 new_reg->name = name;
2209 new_reg->number = number;
2210 new_reg->type = type;
2211 new_reg->builtin = FALSE;
2212 new_reg->neon = NULL;
2213
2214 if (hash_insert (arm_reg_hsh, name, (void *) new_reg))
2215 abort ();
2216
2217 return new_reg;
2218 }
2219
2220 static void
2221 insert_neon_reg_alias (char *str, int number, int type,
2222 struct neon_typed_alias *atype)
2223 {
2224 struct reg_entry *reg = insert_reg_alias (str, number, type);
2225
2226 if (!reg)
2227 {
2228 first_error (_("attempt to redefine typed alias"));
2229 return;
2230 }
2231
2232 if (atype)
2233 {
2234 reg->neon = XNEW (struct neon_typed_alias);
2235 *reg->neon = *atype;
2236 }
2237 }
2238
2239 /* Look for the .req directive. This is of the form:
2240
2241 new_register_name .req existing_register_name
2242
2243 If we find one, or if it looks sufficiently like one that we want to
2244 handle any error here, return TRUE. Otherwise return FALSE. */
2245
2246 static bfd_boolean
2247 create_register_alias (char * newname, char *p)
2248 {
2249 struct reg_entry *old;
2250 char *oldname, *nbuf;
2251 size_t nlen;
2252
2253 /* The input scrubber ensures that whitespace after the mnemonic is
2254 collapsed to single spaces. */
2255 oldname = p;
2256 if (strncmp (oldname, " .req ", 6) != 0)
2257 return FALSE;
2258
2259 oldname += 6;
2260 if (*oldname == '\0')
2261 return FALSE;
2262
2263 old = (struct reg_entry *) hash_find (arm_reg_hsh, oldname);
2264 if (!old)
2265 {
2266 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
2267 return TRUE;
2268 }
2269
2270 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2271 the desired alias name, and p points to its end. If not, then
2272 the desired alias name is in the global original_case_string. */
2273 #ifdef TC_CASE_SENSITIVE
2274 nlen = p - newname;
2275 #else
2276 newname = original_case_string;
2277 nlen = strlen (newname);
2278 #endif
2279
2280 nbuf = xmemdup0 (newname, nlen);
2281
2282 /* Create aliases under the new name as stated; an all-lowercase
2283 version of the new name; and an all-uppercase version of the new
2284 name. */
2285 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
2286 {
2287 for (p = nbuf; *p; p++)
2288 *p = TOUPPER (*p);
2289
2290 if (strncmp (nbuf, newname, nlen))
2291 {
2292 /* If this attempt to create an additional alias fails, do not bother
2293 trying to create the all-lower case alias. We will fail and issue
2294 a second, duplicate error message. This situation arises when the
2295 programmer does something like:
2296 foo .req r0
2297 Foo .req r1
2298 The second .req creates the "Foo" alias but then fails to create
2299 the artificial FOO alias because it has already been created by the
2300 first .req. */
2301 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
2302 {
2303 free (nbuf);
2304 return TRUE;
2305 }
2306 }
2307
2308 for (p = nbuf; *p; p++)
2309 *p = TOLOWER (*p);
2310
2311 if (strncmp (nbuf, newname, nlen))
2312 insert_reg_alias (nbuf, old->number, old->type);
2313 }
2314
2315 free (nbuf);
2316 return TRUE;
2317 }
2318
2319 /* Create a Neon typed/indexed register alias using directives, e.g.:
2320 X .dn d5.s32[1]
2321 Y .qn 6.s16
2322 Z .dn d7
2323 T .dn Z[0]
2324 These typed registers can be used instead of the types specified after the
2325 Neon mnemonic, so long as all operands given have types. Types can also be
2326 specified directly, e.g.:
2327 vadd d0.s32, d1.s32, d2.s32 */
2328
2329 static bfd_boolean
2330 create_neon_reg_alias (char *newname, char *p)
2331 {
2332 enum arm_reg_type basetype;
2333 struct reg_entry *basereg;
2334 struct reg_entry mybasereg;
2335 struct neon_type ntype;
2336 struct neon_typed_alias typeinfo;
2337 char *namebuf, *nameend ATTRIBUTE_UNUSED;
2338 int namelen;
2339
2340 typeinfo.defined = 0;
2341 typeinfo.eltype.type = NT_invtype;
2342 typeinfo.eltype.size = -1;
2343 typeinfo.index = -1;
2344
2345 nameend = p;
2346
2347 if (strncmp (p, " .dn ", 5) == 0)
2348 basetype = REG_TYPE_VFD;
2349 else if (strncmp (p, " .qn ", 5) == 0)
2350 basetype = REG_TYPE_NQ;
2351 else
2352 return FALSE;
2353
2354 p += 5;
2355
2356 if (*p == '\0')
2357 return FALSE;
2358
2359 basereg = arm_reg_parse_multi (&p);
2360
2361 if (basereg && basereg->type != basetype)
2362 {
2363 as_bad (_("bad type for register"));
2364 return FALSE;
2365 }
2366
2367 if (basereg == NULL)
2368 {
2369 expressionS exp;
2370 /* Try parsing as an integer. */
2371 my_get_expression (&exp, &p, GE_NO_PREFIX);
2372 if (exp.X_op != O_constant)
2373 {
2374 as_bad (_("expression must be constant"));
2375 return FALSE;
2376 }
2377 basereg = &mybasereg;
2378 basereg->number = (basetype == REG_TYPE_NQ) ? exp.X_add_number * 2
2379 : exp.X_add_number;
2380 basereg->neon = 0;
2381 }
2382
2383 if (basereg->neon)
2384 typeinfo = *basereg->neon;
2385
2386 if (parse_neon_type (&ntype, &p) == SUCCESS)
2387 {
2388 /* We got a type. */
2389 if (typeinfo.defined & NTA_HASTYPE)
2390 {
2391 as_bad (_("can't redefine the type of a register alias"));
2392 return FALSE;
2393 }
2394
2395 typeinfo.defined |= NTA_HASTYPE;
2396 if (ntype.elems != 1)
2397 {
2398 as_bad (_("you must specify a single type only"));
2399 return FALSE;
2400 }
2401 typeinfo.eltype = ntype.el[0];
2402 }
2403
2404 if (skip_past_char (&p, '[') == SUCCESS)
2405 {
2406 expressionS exp;
2407 /* We got a scalar index. */
2408
2409 if (typeinfo.defined & NTA_HASINDEX)
2410 {
2411 as_bad (_("can't redefine the index of a scalar alias"));
2412 return FALSE;
2413 }
2414
2415 my_get_expression (&exp, &p, GE_NO_PREFIX);
2416
2417 if (exp.X_op != O_constant)
2418 {
2419 as_bad (_("scalar index must be constant"));
2420 return FALSE;
2421 }
2422
2423 typeinfo.defined |= NTA_HASINDEX;
2424 typeinfo.index = exp.X_add_number;
2425
2426 if (skip_past_char (&p, ']') == FAIL)
2427 {
2428 as_bad (_("expecting ]"));
2429 return FALSE;
2430 }
2431 }
2432
2433 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2434 the desired alias name, and p points to its end. If not, then
2435 the desired alias name is in the global original_case_string. */
2436 #ifdef TC_CASE_SENSITIVE
2437 namelen = nameend - newname;
2438 #else
2439 newname = original_case_string;
2440 namelen = strlen (newname);
2441 #endif
2442
2443 namebuf = xmemdup0 (newname, namelen);
2444
2445 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2446 typeinfo.defined != 0 ? &typeinfo : NULL);
2447
2448 /* Insert name in all uppercase. */
2449 for (p = namebuf; *p; p++)
2450 *p = TOUPPER (*p);
2451
2452 if (strncmp (namebuf, newname, namelen))
2453 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2454 typeinfo.defined != 0 ? &typeinfo : NULL);
2455
2456 /* Insert name in all lowercase. */
2457 for (p = namebuf; *p; p++)
2458 *p = TOLOWER (*p);
2459
2460 if (strncmp (namebuf, newname, namelen))
2461 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2462 typeinfo.defined != 0 ? &typeinfo : NULL);
2463
2464 free (namebuf);
2465 return TRUE;
2466 }
2467
2468 /* Should never be called, as .req goes between the alias and the
2469 register name, not at the beginning of the line. */
2470
2471 static void
2472 s_req (int a ATTRIBUTE_UNUSED)
2473 {
2474 as_bad (_("invalid syntax for .req directive"));
2475 }
2476
2477 static void
2478 s_dn (int a ATTRIBUTE_UNUSED)
2479 {
2480 as_bad (_("invalid syntax for .dn directive"));
2481 }
2482
2483 static void
2484 s_qn (int a ATTRIBUTE_UNUSED)
2485 {
2486 as_bad (_("invalid syntax for .qn directive"));
2487 }
2488
2489 /* The .unreq directive deletes an alias which was previously defined
2490 by .req. For example:
2491
2492 my_alias .req r11
2493 .unreq my_alias */
2494
2495 static void
2496 s_unreq (int a ATTRIBUTE_UNUSED)
2497 {
2498 char * name;
2499 char saved_char;
2500
2501 name = input_line_pointer;
2502
2503 while (*input_line_pointer != 0
2504 && *input_line_pointer != ' '
2505 && *input_line_pointer != '\n')
2506 ++input_line_pointer;
2507
2508 saved_char = *input_line_pointer;
2509 *input_line_pointer = 0;
2510
2511 if (!*name)
2512 as_bad (_("invalid syntax for .unreq directive"));
2513 else
2514 {
2515 struct reg_entry *reg = (struct reg_entry *) hash_find (arm_reg_hsh,
2516 name);
2517
2518 if (!reg)
2519 as_bad (_("unknown register alias '%s'"), name);
2520 else if (reg->builtin)
2521 as_warn (_("ignoring attempt to use .unreq on fixed register name: '%s'"),
2522 name);
2523 else
2524 {
2525 char * p;
2526 char * nbuf;
2527
2528 hash_delete (arm_reg_hsh, name, FALSE);
2529 free ((char *) reg->name);
2530 if (reg->neon)
2531 free (reg->neon);
2532 free (reg);
2533
2534 /* Also locate the all upper case and all lower case versions.
2535 Do not complain if we cannot find one or the other as it
2536 was probably deleted above. */
2537
2538 nbuf = strdup (name);
2539 for (p = nbuf; *p; p++)
2540 *p = TOUPPER (*p);
2541 reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
2542 if (reg)
2543 {
2544 hash_delete (arm_reg_hsh, nbuf, FALSE);
2545 free ((char *) reg->name);
2546 if (reg->neon)
2547 free (reg->neon);
2548 free (reg);
2549 }
2550
2551 for (p = nbuf; *p; p++)
2552 *p = TOLOWER (*p);
2553 reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
2554 if (reg)
2555 {
2556 hash_delete (arm_reg_hsh, nbuf, FALSE);
2557 free ((char *) reg->name);
2558 if (reg->neon)
2559 free (reg->neon);
2560 free (reg);
2561 }
2562
2563 free (nbuf);
2564 }
2565 }
2566
2567 *input_line_pointer = saved_char;
2568 demand_empty_rest_of_line ();
2569 }
2570
2571 /* Directives: Instruction set selection. */
2572
2573 #ifdef OBJ_ELF
2574 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
2575 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2576 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2577 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
2578
2579 /* Create a new mapping symbol for the transition to STATE. */
2580
2581 static void
2582 make_mapping_symbol (enum mstate state, valueT value, fragS *frag)
2583 {
2584 symbolS * symbolP;
2585 const char * symname;
2586 int type;
2587
2588 switch (state)
2589 {
2590 case MAP_DATA:
2591 symname = "$d";
2592 type = BSF_NO_FLAGS;
2593 break;
2594 case MAP_ARM:
2595 symname = "$a";
2596 type = BSF_NO_FLAGS;
2597 break;
2598 case MAP_THUMB:
2599 symname = "$t";
2600 type = BSF_NO_FLAGS;
2601 break;
2602 default:
2603 abort ();
2604 }
2605
2606 symbolP = symbol_new (symname, now_seg, value, frag);
2607 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
2608
2609 switch (state)
2610 {
2611 case MAP_ARM:
2612 THUMB_SET_FUNC (symbolP, 0);
2613 ARM_SET_THUMB (symbolP, 0);
2614 ARM_SET_INTERWORK (symbolP, support_interwork);
2615 break;
2616
2617 case MAP_THUMB:
2618 THUMB_SET_FUNC (symbolP, 1);
2619 ARM_SET_THUMB (symbolP, 1);
2620 ARM_SET_INTERWORK (symbolP, support_interwork);
2621 break;
2622
2623 case MAP_DATA:
2624 default:
2625 break;
2626 }
2627
2628 /* Save the mapping symbols for future reference. Also check that
2629 we do not place two mapping symbols at the same offset within a
2630 frag. We'll handle overlap between frags in
2631 check_mapping_symbols.
2632
2633 If .fill or other data filling directive generates zero sized data,
2634 the mapping symbol for the following code will have the same value
2635 as the one generated for the data filling directive. In this case,
2636 we replace the old symbol with the new one at the same address. */
2637 if (value == 0)
2638 {
2639 if (frag->tc_frag_data.first_map != NULL)
2640 {
2641 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
2642 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP, &symbol_lastP);
2643 }
2644 frag->tc_frag_data.first_map = symbolP;
2645 }
2646 if (frag->tc_frag_data.last_map != NULL)
2647 {
2648 know (S_GET_VALUE (frag->tc_frag_data.last_map) <= S_GET_VALUE (symbolP));
2649 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
2650 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP, &symbol_lastP);
2651 }
2652 frag->tc_frag_data.last_map = symbolP;
2653 }
2654
2655 /* We must sometimes convert a region marked as code to data during
2656 code alignment, if an odd number of bytes have to be padded. The
2657 code mapping symbol is pushed to an aligned address. */
2658
2659 static void
2660 insert_data_mapping_symbol (enum mstate state,
2661 valueT value, fragS *frag, offsetT bytes)
2662 {
2663 /* If there was already a mapping symbol, remove it. */
2664 if (frag->tc_frag_data.last_map != NULL
2665 && S_GET_VALUE (frag->tc_frag_data.last_map) == frag->fr_address + value)
2666 {
2667 symbolS *symp = frag->tc_frag_data.last_map;
2668
2669 if (value == 0)
2670 {
2671 know (frag->tc_frag_data.first_map == symp);
2672 frag->tc_frag_data.first_map = NULL;
2673 }
2674 frag->tc_frag_data.last_map = NULL;
2675 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
2676 }
2677
2678 make_mapping_symbol (MAP_DATA, value, frag);
2679 make_mapping_symbol (state, value + bytes, frag);
2680 }
2681
2682 static void mapping_state_2 (enum mstate state, int max_chars);
2683
2684 /* Set the mapping state to STATE. Only call this when about to
2685 emit some STATE bytes to the file. */
2686
2687 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
2688 void
2689 mapping_state (enum mstate state)
2690 {
2691 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2692
2693 if (mapstate == state)
2694 /* The mapping symbol has already been emitted.
2695 There is nothing else to do. */
2696 return;
2697
2698 if (state == MAP_ARM || state == MAP_THUMB)
2699 /* PR gas/12931
2700 All ARM instructions require 4-byte alignment.
2701 (Almost) all Thumb instructions require 2-byte alignment.
2702
2703 When emitting instructions into any section, mark the section
2704 appropriately.
2705
2706 Some Thumb instructions are alignment-sensitive modulo 4 bytes,
2707 but themselves require 2-byte alignment; this applies to some
2708 PC- relative forms. However, these cases will invovle implicit
2709 literal pool generation or an explicit .align >=2, both of
2710 which will cause the section to me marked with sufficient
2711 alignment. Thus, we don't handle those cases here. */
2712 record_alignment (now_seg, state == MAP_ARM ? 2 : 1);
2713
2714 if (TRANSITION (MAP_UNDEFINED, MAP_DATA))
2715 /* This case will be evaluated later. */
2716 return;
2717
2718 mapping_state_2 (state, 0);
2719 }
2720
2721 /* Same as mapping_state, but MAX_CHARS bytes have already been
2722 allocated. Put the mapping symbol that far back. */
2723
2724 static void
2725 mapping_state_2 (enum mstate state, int max_chars)
2726 {
2727 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2728
2729 if (!SEG_NORMAL (now_seg))
2730 return;
2731
2732 if (mapstate == state)
2733 /* The mapping symbol has already been emitted.
2734 There is nothing else to do. */
2735 return;
2736
2737 if (TRANSITION (MAP_UNDEFINED, MAP_ARM)
2738 || TRANSITION (MAP_UNDEFINED, MAP_THUMB))
2739 {
2740 struct frag * const frag_first = seg_info (now_seg)->frchainP->frch_root;
2741 const int add_symbol = (frag_now != frag_first) || (frag_now_fix () > 0);
2742
2743 if (add_symbol)
2744 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
2745 }
2746
2747 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
2748 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
2749 }
2750 #undef TRANSITION
2751 #else
2752 #define mapping_state(x) ((void)0)
2753 #define mapping_state_2(x, y) ((void)0)
2754 #endif
2755
2756 /* Find the real, Thumb encoded start of a Thumb function. */
2757
2758 #ifdef OBJ_COFF
2759 static symbolS *
2760 find_real_start (symbolS * symbolP)
2761 {
2762 char * real_start;
2763 const char * name = S_GET_NAME (symbolP);
2764 symbolS * new_target;
2765
2766 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
2767 #define STUB_NAME ".real_start_of"
2768
2769 if (name == NULL)
2770 abort ();
2771
2772 /* The compiler may generate BL instructions to local labels because
2773 it needs to perform a branch to a far away location. These labels
2774 do not have a corresponding ".real_start_of" label. We check
2775 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2776 the ".real_start_of" convention for nonlocal branches. */
2777 if (S_IS_LOCAL (symbolP) || name[0] == '.')
2778 return symbolP;
2779
2780 real_start = concat (STUB_NAME, name, NULL);
2781 new_target = symbol_find (real_start);
2782 free (real_start);
2783
2784 if (new_target == NULL)
2785 {
2786 as_warn (_("Failed to find real start of function: %s\n"), name);
2787 new_target = symbolP;
2788 }
2789
2790 return new_target;
2791 }
2792 #endif
2793
2794 static void
2795 opcode_select (int width)
2796 {
2797 switch (width)
2798 {
2799 case 16:
2800 if (! thumb_mode)
2801 {
2802 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
2803 as_bad (_("selected processor does not support THUMB opcodes"));
2804
2805 thumb_mode = 1;
2806 /* No need to force the alignment, since we will have been
2807 coming from ARM mode, which is word-aligned. */
2808 record_alignment (now_seg, 1);
2809 }
2810 break;
2811
2812 case 32:
2813 if (thumb_mode)
2814 {
2815 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
2816 as_bad (_("selected processor does not support ARM opcodes"));
2817
2818 thumb_mode = 0;
2819
2820 if (!need_pass_2)
2821 frag_align (2, 0, 0);
2822
2823 record_alignment (now_seg, 1);
2824 }
2825 break;
2826
2827 default:
2828 as_bad (_("invalid instruction size selected (%d)"), width);
2829 }
2830 }
2831
2832 static void
2833 s_arm (int ignore ATTRIBUTE_UNUSED)
2834 {
2835 opcode_select (32);
2836 demand_empty_rest_of_line ();
2837 }
2838
2839 static void
2840 s_thumb (int ignore ATTRIBUTE_UNUSED)
2841 {
2842 opcode_select (16);
2843 demand_empty_rest_of_line ();
2844 }
2845
2846 static void
2847 s_code (int unused ATTRIBUTE_UNUSED)
2848 {
2849 int temp;
2850
2851 temp = get_absolute_expression ();
2852 switch (temp)
2853 {
2854 case 16:
2855 case 32:
2856 opcode_select (temp);
2857 break;
2858
2859 default:
2860 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp);
2861 }
2862 }
2863
2864 static void
2865 s_force_thumb (int ignore ATTRIBUTE_UNUSED)
2866 {
2867 /* If we are not already in thumb mode go into it, EVEN if
2868 the target processor does not support thumb instructions.
2869 This is used by gcc/config/arm/lib1funcs.asm for example
2870 to compile interworking support functions even if the
2871 target processor should not support interworking. */
2872 if (! thumb_mode)
2873 {
2874 thumb_mode = 2;
2875 record_alignment (now_seg, 1);
2876 }
2877
2878 demand_empty_rest_of_line ();
2879 }
2880
2881 static void
2882 s_thumb_func (int ignore ATTRIBUTE_UNUSED)
2883 {
2884 s_thumb (0);
2885
2886 /* The following label is the name/address of the start of a Thumb function.
2887 We need to know this for the interworking support. */
2888 label_is_thumb_function_name = TRUE;
2889 }
2890
2891 /* Perform a .set directive, but also mark the alias as
2892 being a thumb function. */
2893
2894 static void
2895 s_thumb_set (int equiv)
2896 {
2897 /* XXX the following is a duplicate of the code for s_set() in read.c
2898 We cannot just call that code as we need to get at the symbol that
2899 is created. */
2900 char * name;
2901 char delim;
2902 char * end_name;
2903 symbolS * symbolP;
2904
2905 /* Especial apologies for the random logic:
2906 This just grew, and could be parsed much more simply!
2907 Dean - in haste. */
2908 delim = get_symbol_name (& name);
2909 end_name = input_line_pointer;
2910 (void) restore_line_pointer (delim);
2911
2912 if (*input_line_pointer != ',')
2913 {
2914 *end_name = 0;
2915 as_bad (_("expected comma after name \"%s\""), name);
2916 *end_name = delim;
2917 ignore_rest_of_line ();
2918 return;
2919 }
2920
2921 input_line_pointer++;
2922 *end_name = 0;
2923
2924 if (name[0] == '.' && name[1] == '\0')
2925 {
2926 /* XXX - this should not happen to .thumb_set. */
2927 abort ();
2928 }
2929
2930 if ((symbolP = symbol_find (name)) == NULL
2931 && (symbolP = md_undefined_symbol (name)) == NULL)
2932 {
2933 #ifndef NO_LISTING
2934 /* When doing symbol listings, play games with dummy fragments living
2935 outside the normal fragment chain to record the file and line info
2936 for this symbol. */
2937 if (listing & LISTING_SYMBOLS)
2938 {
2939 extern struct list_info_struct * listing_tail;
2940 fragS * dummy_frag = (fragS * ) xmalloc (sizeof (fragS));
2941
2942 memset (dummy_frag, 0, sizeof (fragS));
2943 dummy_frag->fr_type = rs_fill;
2944 dummy_frag->line = listing_tail;
2945 symbolP = symbol_new (name, undefined_section, 0, dummy_frag);
2946 dummy_frag->fr_symbol = symbolP;
2947 }
2948 else
2949 #endif
2950 symbolP = symbol_new (name, undefined_section, 0, &zero_address_frag);
2951
2952 #ifdef OBJ_COFF
2953 /* "set" symbols are local unless otherwise specified. */
2954 SF_SET_LOCAL (symbolP);
2955 #endif /* OBJ_COFF */
2956 } /* Make a new symbol. */
2957
2958 symbol_table_insert (symbolP);
2959
2960 * end_name = delim;
2961
2962 if (equiv
2963 && S_IS_DEFINED (symbolP)
2964 && S_GET_SEGMENT (symbolP) != reg_section)
2965 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP));
2966
2967 pseudo_set (symbolP);
2968
2969 demand_empty_rest_of_line ();
2970
2971 /* XXX Now we come to the Thumb specific bit of code. */
2972
2973 THUMB_SET_FUNC (symbolP, 1);
2974 ARM_SET_THUMB (symbolP, 1);
2975 #if defined OBJ_ELF || defined OBJ_COFF
2976 ARM_SET_INTERWORK (symbolP, support_interwork);
2977 #endif
2978 }
2979
2980 /* Directives: Mode selection. */
2981
2982 /* .syntax [unified|divided] - choose the new unified syntax
2983 (same for Arm and Thumb encoding, modulo slight differences in what
2984 can be represented) or the old divergent syntax for each mode. */
2985 static void
2986 s_syntax (int unused ATTRIBUTE_UNUSED)
2987 {
2988 char *name, delim;
2989
2990 delim = get_symbol_name (& name);
2991
2992 if (!strcasecmp (name, "unified"))
2993 unified_syntax = TRUE;
2994 else if (!strcasecmp (name, "divided"))
2995 unified_syntax = FALSE;
2996 else
2997 {
2998 as_bad (_("unrecognized syntax mode \"%s\""), name);
2999 return;
3000 }
3001 (void) restore_line_pointer (delim);
3002 demand_empty_rest_of_line ();
3003 }
3004
3005 /* Directives: sectioning and alignment. */
3006
3007 static void
3008 s_bss (int ignore ATTRIBUTE_UNUSED)
3009 {
3010 /* We don't support putting frags in the BSS segment, we fake it by
3011 marking in_bss, then looking at s_skip for clues. */
3012 subseg_set (bss_section, 0);
3013 demand_empty_rest_of_line ();
3014
3015 #ifdef md_elf_section_change_hook
3016 md_elf_section_change_hook ();
3017 #endif
3018 }
3019
3020 static void
3021 s_even (int ignore ATTRIBUTE_UNUSED)
3022 {
3023 /* Never make frag if expect extra pass. */
3024 if (!need_pass_2)
3025 frag_align (1, 0, 0);
3026
3027 record_alignment (now_seg, 1);
3028
3029 demand_empty_rest_of_line ();
3030 }
3031
3032 /* Directives: CodeComposer Studio. */
3033
3034 /* .ref (for CodeComposer Studio syntax only). */
3035 static void
3036 s_ccs_ref (int unused ATTRIBUTE_UNUSED)
3037 {
3038 if (codecomposer_syntax)
3039 ignore_rest_of_line ();
3040 else
3041 as_bad (_(".ref pseudo-op only available with -mccs flag."));
3042 }
3043
3044 /* If name is not NULL, then it is used for marking the beginning of a
3045 function, wherease if it is NULL then it means the function end. */
3046 static void
3047 asmfunc_debug (const char * name)
3048 {
3049 static const char * last_name = NULL;
3050
3051 if (name != NULL)
3052 {
3053 gas_assert (last_name == NULL);
3054 last_name = name;
3055
3056 if (debug_type == DEBUG_STABS)
3057 stabs_generate_asm_func (name, name);
3058 }
3059 else
3060 {
3061 gas_assert (last_name != NULL);
3062
3063 if (debug_type == DEBUG_STABS)
3064 stabs_generate_asm_endfunc (last_name, last_name);
3065
3066 last_name = NULL;
3067 }
3068 }
3069
3070 static void
3071 s_ccs_asmfunc (int unused ATTRIBUTE_UNUSED)
3072 {
3073 if (codecomposer_syntax)
3074 {
3075 switch (asmfunc_state)
3076 {
3077 case OUTSIDE_ASMFUNC:
3078 asmfunc_state = WAITING_ASMFUNC_NAME;
3079 break;
3080
3081 case WAITING_ASMFUNC_NAME:
3082 as_bad (_(".asmfunc repeated."));
3083 break;
3084
3085 case WAITING_ENDASMFUNC:
3086 as_bad (_(".asmfunc without function."));
3087 break;
3088 }
3089 demand_empty_rest_of_line ();
3090 }
3091 else
3092 as_bad (_(".asmfunc pseudo-op only available with -mccs flag."));
3093 }
3094
3095 static void
3096 s_ccs_endasmfunc (int unused ATTRIBUTE_UNUSED)
3097 {
3098 if (codecomposer_syntax)
3099 {
3100 switch (asmfunc_state)
3101 {
3102 case OUTSIDE_ASMFUNC:
3103 as_bad (_(".endasmfunc without a .asmfunc."));
3104 break;
3105
3106 case WAITING_ASMFUNC_NAME:
3107 as_bad (_(".endasmfunc without function."));
3108 break;
3109
3110 case WAITING_ENDASMFUNC:
3111 asmfunc_state = OUTSIDE_ASMFUNC;
3112 asmfunc_debug (NULL);
3113 break;
3114 }
3115 demand_empty_rest_of_line ();
3116 }
3117 else
3118 as_bad (_(".endasmfunc pseudo-op only available with -mccs flag."));
3119 }
3120
3121 static void
3122 s_ccs_def (int name)
3123 {
3124 if (codecomposer_syntax)
3125 s_globl (name);
3126 else
3127 as_bad (_(".def pseudo-op only available with -mccs flag."));
3128 }
3129
3130 /* Directives: Literal pools. */
3131
3132 static literal_pool *
3133 find_literal_pool (void)
3134 {
3135 literal_pool * pool;
3136
3137 for (pool = list_of_pools; pool != NULL; pool = pool->next)
3138 {
3139 if (pool->section == now_seg
3140 && pool->sub_section == now_subseg)
3141 break;
3142 }
3143
3144 return pool;
3145 }
3146
3147 static literal_pool *
3148 find_or_make_literal_pool (void)
3149 {
3150 /* Next literal pool ID number. */
3151 static unsigned int latest_pool_num = 1;
3152 literal_pool * pool;
3153
3154 pool = find_literal_pool ();
3155
3156 if (pool == NULL)
3157 {
3158 /* Create a new pool. */
3159 pool = XNEW (literal_pool);
3160 if (! pool)
3161 return NULL;
3162
3163 pool->next_free_entry = 0;
3164 pool->section = now_seg;
3165 pool->sub_section = now_subseg;
3166 pool->next = list_of_pools;
3167 pool->symbol = NULL;
3168 pool->alignment = 2;
3169
3170 /* Add it to the list. */
3171 list_of_pools = pool;
3172 }
3173
3174 /* New pools, and emptied pools, will have a NULL symbol. */
3175 if (pool->symbol == NULL)
3176 {
3177 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
3178 (valueT) 0, &zero_address_frag);
3179 pool->id = latest_pool_num ++;
3180 }
3181
3182 /* Done. */
3183 return pool;
3184 }
3185
3186 /* Add the literal in the global 'inst'
3187 structure to the relevant literal pool. */
3188
3189 static int
3190 add_to_lit_pool (unsigned int nbytes)
3191 {
3192 #define PADDING_SLOT 0x1
3193 #define LIT_ENTRY_SIZE_MASK 0xFF
3194 literal_pool * pool;
3195 unsigned int entry, pool_size = 0;
3196 bfd_boolean padding_slot_p = FALSE;
3197 unsigned imm1 = 0;
3198 unsigned imm2 = 0;
3199
3200 if (nbytes == 8)
3201 {
3202 imm1 = inst.operands[1].imm;
3203 imm2 = (inst.operands[1].regisimm ? inst.operands[1].reg
3204 : inst.reloc.exp.X_unsigned ? 0
3205 : ((bfd_int64_t) inst.operands[1].imm) >> 32);
3206 if (target_big_endian)
3207 {
3208 imm1 = imm2;
3209 imm2 = inst.operands[1].imm;
3210 }
3211 }
3212
3213 pool = find_or_make_literal_pool ();
3214
3215 /* Check if this literal value is already in the pool. */
3216 for (entry = 0; entry < pool->next_free_entry; entry ++)
3217 {
3218 if (nbytes == 4)
3219 {
3220 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
3221 && (inst.reloc.exp.X_op == O_constant)
3222 && (pool->literals[entry].X_add_number
3223 == inst.reloc.exp.X_add_number)
3224 && (pool->literals[entry].X_md == nbytes)
3225 && (pool->literals[entry].X_unsigned
3226 == inst.reloc.exp.X_unsigned))
3227 break;
3228
3229 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
3230 && (inst.reloc.exp.X_op == O_symbol)
3231 && (pool->literals[entry].X_add_number
3232 == inst.reloc.exp.X_add_number)
3233 && (pool->literals[entry].X_add_symbol
3234 == inst.reloc.exp.X_add_symbol)
3235 && (pool->literals[entry].X_op_symbol
3236 == inst.reloc.exp.X_op_symbol)
3237 && (pool->literals[entry].X_md == nbytes))
3238 break;
3239 }
3240 else if ((nbytes == 8)
3241 && !(pool_size & 0x7)
3242 && ((entry + 1) != pool->next_free_entry)
3243 && (pool->literals[entry].X_op == O_constant)
3244 && (pool->literals[entry].X_add_number == (offsetT) imm1)
3245 && (pool->literals[entry].X_unsigned
3246 == inst.reloc.exp.X_unsigned)
3247 && (pool->literals[entry + 1].X_op == O_constant)
3248 && (pool->literals[entry + 1].X_add_number == (offsetT) imm2)
3249 && (pool->literals[entry + 1].X_unsigned
3250 == inst.reloc.exp.X_unsigned))
3251 break;
3252
3253 padding_slot_p = ((pool->literals[entry].X_md >> 8) == PADDING_SLOT);
3254 if (padding_slot_p && (nbytes == 4))
3255 break;
3256
3257 pool_size += 4;
3258 }
3259
3260 /* Do we need to create a new entry? */
3261 if (entry == pool->next_free_entry)
3262 {
3263 if (entry >= MAX_LITERAL_POOL_SIZE)
3264 {
3265 inst.error = _("literal pool overflow");
3266 return FAIL;
3267 }
3268
3269 if (nbytes == 8)
3270 {
3271 /* For 8-byte entries, we align to an 8-byte boundary,
3272 and split it into two 4-byte entries, because on 32-bit
3273 host, 8-byte constants are treated as big num, thus
3274 saved in "generic_bignum" which will be overwritten
3275 by later assignments.
3276
3277 We also need to make sure there is enough space for
3278 the split.
3279
3280 We also check to make sure the literal operand is a
3281 constant number. */
3282 if (!(inst.reloc.exp.X_op == O_constant
3283 || inst.reloc.exp.X_op == O_big))
3284 {
3285 inst.error = _("invalid type for literal pool");
3286 return FAIL;
3287 }
3288 else if (pool_size & 0x7)
3289 {
3290 if ((entry + 2) >= MAX_LITERAL_POOL_SIZE)
3291 {
3292 inst.error = _("literal pool overflow");
3293 return FAIL;
3294 }
3295
3296 pool->literals[entry] = inst.reloc.exp;
3297 pool->literals[entry].X_op = O_constant;
3298 pool->literals[entry].X_add_number = 0;
3299 pool->literals[entry++].X_md = (PADDING_SLOT << 8) | 4;
3300 pool->next_free_entry += 1;
3301 pool_size += 4;
3302 }
3303 else if ((entry + 1) >= MAX_LITERAL_POOL_SIZE)
3304 {
3305 inst.error = _("literal pool overflow");
3306 return FAIL;
3307 }
3308
3309 pool->literals[entry] = inst.reloc.exp;
3310 pool->literals[entry].X_op = O_constant;
3311 pool->literals[entry].X_add_number = imm1;
3312 pool->literals[entry].X_unsigned = inst.reloc.exp.X_unsigned;
3313 pool->literals[entry++].X_md = 4;
3314 pool->literals[entry] = inst.reloc.exp;
3315 pool->literals[entry].X_op = O_constant;
3316 pool->literals[entry].X_add_number = imm2;
3317 pool->literals[entry].X_unsigned = inst.reloc.exp.X_unsigned;
3318 pool->literals[entry].X_md = 4;
3319 pool->alignment = 3;
3320 pool->next_free_entry += 1;
3321 }
3322 else
3323 {
3324 pool->literals[entry] = inst.reloc.exp;
3325 pool->literals[entry].X_md = 4;
3326 }
3327
3328 #ifdef OBJ_ELF
3329 /* PR ld/12974: Record the location of the first source line to reference
3330 this entry in the literal pool. If it turns out during linking that the
3331 symbol does not exist we will be able to give an accurate line number for
3332 the (first use of the) missing reference. */
3333 if (debug_type == DEBUG_DWARF2)
3334 dwarf2_where (pool->locs + entry);
3335 #endif
3336 pool->next_free_entry += 1;
3337 }
3338 else if (padding_slot_p)
3339 {
3340 pool->literals[entry] = inst.reloc.exp;
3341 pool->literals[entry].X_md = nbytes;
3342 }
3343
3344 inst.reloc.exp.X_op = O_symbol;
3345 inst.reloc.exp.X_add_number = pool_size;
3346 inst.reloc.exp.X_add_symbol = pool->symbol;
3347
3348 return SUCCESS;
3349 }
3350
3351 bfd_boolean
3352 tc_start_label_without_colon (void)
3353 {
3354 bfd_boolean ret = TRUE;
3355
3356 if (codecomposer_syntax && asmfunc_state == WAITING_ASMFUNC_NAME)
3357 {
3358 const char *label = input_line_pointer;
3359
3360 while (!is_end_of_line[(int) label[-1]])
3361 --label;
3362
3363 if (*label == '.')
3364 {
3365 as_bad (_("Invalid label '%s'"), label);
3366 ret = FALSE;
3367 }
3368
3369 asmfunc_debug (label);
3370
3371 asmfunc_state = WAITING_ENDASMFUNC;
3372 }
3373
3374 return ret;
3375 }
3376
3377 /* Can't use symbol_new here, so have to create a symbol and then at
3378 a later date assign it a value. Thats what these functions do. */
3379
3380 static void
3381 symbol_locate (symbolS * symbolP,
3382 const char * name, /* It is copied, the caller can modify. */
3383 segT segment, /* Segment identifier (SEG_<something>). */
3384 valueT valu, /* Symbol value. */
3385 fragS * frag) /* Associated fragment. */
3386 {
3387 size_t name_length;
3388 char * preserved_copy_of_name;
3389
3390 name_length = strlen (name) + 1; /* +1 for \0. */
3391 obstack_grow (&notes, name, name_length);
3392 preserved_copy_of_name = (char *) obstack_finish (&notes);
3393
3394 #ifdef tc_canonicalize_symbol_name
3395 preserved_copy_of_name =
3396 tc_canonicalize_symbol_name (preserved_copy_of_name);
3397 #endif
3398
3399 S_SET_NAME (symbolP, preserved_copy_of_name);
3400
3401 S_SET_SEGMENT (symbolP, segment);
3402 S_SET_VALUE (symbolP, valu);
3403 symbol_clear_list_pointers (symbolP);
3404
3405 symbol_set_frag (symbolP, frag);
3406
3407 /* Link to end of symbol chain. */
3408 {
3409 extern int symbol_table_frozen;
3410
3411 if (symbol_table_frozen)
3412 abort ();
3413 }
3414
3415 symbol_append (symbolP, symbol_lastP, & symbol_rootP, & symbol_lastP);
3416
3417 obj_symbol_new_hook (symbolP);
3418
3419 #ifdef tc_symbol_new_hook
3420 tc_symbol_new_hook (symbolP);
3421 #endif
3422
3423 #ifdef DEBUG_SYMS
3424 verify_symbol_chain (symbol_rootP, symbol_lastP);
3425 #endif /* DEBUG_SYMS */
3426 }
3427
3428 static void
3429 s_ltorg (int ignored ATTRIBUTE_UNUSED)
3430 {
3431 unsigned int entry;
3432 literal_pool * pool;
3433 char sym_name[20];
3434
3435 pool = find_literal_pool ();
3436 if (pool == NULL
3437 || pool->symbol == NULL
3438 || pool->next_free_entry == 0)
3439 return;
3440
3441 /* Align pool as you have word accesses.
3442 Only make a frag if we have to. */
3443 if (!need_pass_2)
3444 frag_align (pool->alignment, 0, 0);
3445
3446 record_alignment (now_seg, 2);
3447
3448 #ifdef OBJ_ELF
3449 seg_info (now_seg)->tc_segment_info_data.mapstate = MAP_DATA;
3450 make_mapping_symbol (MAP_DATA, (valueT) frag_now_fix (), frag_now);
3451 #endif
3452 sprintf (sym_name, "$$lit_\002%x", pool->id);
3453
3454 symbol_locate (pool->symbol, sym_name, now_seg,
3455 (valueT) frag_now_fix (), frag_now);
3456 symbol_table_insert (pool->symbol);
3457
3458 ARM_SET_THUMB (pool->symbol, thumb_mode);
3459
3460 #if defined OBJ_COFF || defined OBJ_ELF
3461 ARM_SET_INTERWORK (pool->symbol, support_interwork);
3462 #endif
3463
3464 for (entry = 0; entry < pool->next_free_entry; entry ++)
3465 {
3466 #ifdef OBJ_ELF
3467 if (debug_type == DEBUG_DWARF2)
3468 dwarf2_gen_line_info (frag_now_fix (), pool->locs + entry);
3469 #endif
3470 /* First output the expression in the instruction to the pool. */
3471 emit_expr (&(pool->literals[entry]),
3472 pool->literals[entry].X_md & LIT_ENTRY_SIZE_MASK);
3473 }
3474
3475 /* Mark the pool as empty. */
3476 pool->next_free_entry = 0;
3477 pool->symbol = NULL;
3478 }
3479
3480 #ifdef OBJ_ELF
3481 /* Forward declarations for functions below, in the MD interface
3482 section. */
3483 static void fix_new_arm (fragS *, int, short, expressionS *, int, int);
3484 static valueT create_unwind_entry (int);
3485 static void start_unwind_section (const segT, int);
3486 static void add_unwind_opcode (valueT, int);
3487 static void flush_pending_unwind (void);
3488
3489 /* Directives: Data. */
3490
3491 static void
3492 s_arm_elf_cons (int nbytes)
3493 {
3494 expressionS exp;
3495
3496 #ifdef md_flush_pending_output
3497 md_flush_pending_output ();
3498 #endif
3499
3500 if (is_it_end_of_statement ())
3501 {
3502 demand_empty_rest_of_line ();
3503 return;
3504 }
3505
3506 #ifdef md_cons_align
3507 md_cons_align (nbytes);
3508 #endif
3509
3510 mapping_state (MAP_DATA);
3511 do
3512 {
3513 int reloc;
3514 char *base = input_line_pointer;
3515
3516 expression (& exp);
3517
3518 if (exp.X_op != O_symbol)
3519 emit_expr (&exp, (unsigned int) nbytes);
3520 else
3521 {
3522 char *before_reloc = input_line_pointer;
3523 reloc = parse_reloc (&input_line_pointer);
3524 if (reloc == -1)
3525 {
3526 as_bad (_("unrecognized relocation suffix"));
3527 ignore_rest_of_line ();
3528 return;
3529 }
3530 else if (reloc == BFD_RELOC_UNUSED)
3531 emit_expr (&exp, (unsigned int) nbytes);
3532 else
3533 {
3534 reloc_howto_type *howto = (reloc_howto_type *)
3535 bfd_reloc_type_lookup (stdoutput,
3536 (bfd_reloc_code_real_type) reloc);
3537 int size = bfd_get_reloc_size (howto);
3538
3539 if (reloc == BFD_RELOC_ARM_PLT32)
3540 {
3541 as_bad (_("(plt) is only valid on branch targets"));
3542 reloc = BFD_RELOC_UNUSED;
3543 size = 0;
3544 }
3545
3546 if (size > nbytes)
3547 as_bad (_("%s relocations do not fit in %d bytes"),
3548 howto->name, nbytes);
3549 else
3550 {
3551 /* We've parsed an expression stopping at O_symbol.
3552 But there may be more expression left now that we
3553 have parsed the relocation marker. Parse it again.
3554 XXX Surely there is a cleaner way to do this. */
3555 char *p = input_line_pointer;
3556 int offset;
3557 char *save_buf = XNEWVEC (char, input_line_pointer - base);
3558
3559 memcpy (save_buf, base, input_line_pointer - base);
3560 memmove (base + (input_line_pointer - before_reloc),
3561 base, before_reloc - base);
3562
3563 input_line_pointer = base + (input_line_pointer-before_reloc);
3564 expression (&exp);
3565 memcpy (base, save_buf, p - base);
3566
3567 offset = nbytes - size;
3568 p = frag_more (nbytes);
3569 memset (p, 0, nbytes);
3570 fix_new_exp (frag_now, p - frag_now->fr_literal + offset,
3571 size, &exp, 0, (enum bfd_reloc_code_real) reloc);
3572 free (save_buf);
3573 }
3574 }
3575 }
3576 }
3577 while (*input_line_pointer++ == ',');
3578
3579 /* Put terminator back into stream. */
3580 input_line_pointer --;
3581 demand_empty_rest_of_line ();
3582 }
3583
3584 /* Emit an expression containing a 32-bit thumb instruction.
3585 Implementation based on put_thumb32_insn. */
3586
3587 static void
3588 emit_thumb32_expr (expressionS * exp)
3589 {
3590 expressionS exp_high = *exp;
3591
3592 exp_high.X_add_number = (unsigned long)exp_high.X_add_number >> 16;
3593 emit_expr (& exp_high, (unsigned int) THUMB_SIZE);
3594 exp->X_add_number &= 0xffff;
3595 emit_expr (exp, (unsigned int) THUMB_SIZE);
3596 }
3597
3598 /* Guess the instruction size based on the opcode. */
3599
3600 static int
3601 thumb_insn_size (int opcode)
3602 {
3603 if ((unsigned int) opcode < 0xe800u)
3604 return 2;
3605 else if ((unsigned int) opcode >= 0xe8000000u)
3606 return 4;
3607 else
3608 return 0;
3609 }
3610
3611 static bfd_boolean
3612 emit_insn (expressionS *exp, int nbytes)
3613 {
3614 int size = 0;
3615
3616 if (exp->X_op == O_constant)
3617 {
3618 size = nbytes;
3619
3620 if (size == 0)
3621 size = thumb_insn_size (exp->X_add_number);
3622
3623 if (size != 0)
3624 {
3625 if (size == 2 && (unsigned int)exp->X_add_number > 0xffffu)
3626 {
3627 as_bad (_(".inst.n operand too big. "\
3628 "Use .inst.w instead"));
3629 size = 0;
3630 }
3631 else
3632 {
3633 if (now_it.state == AUTOMATIC_IT_BLOCK)
3634 set_it_insn_type_nonvoid (OUTSIDE_IT_INSN, 0);
3635 else
3636 set_it_insn_type_nonvoid (NEUTRAL_IT_INSN, 0);
3637
3638 if (thumb_mode && (size > THUMB_SIZE) && !target_big_endian)
3639 emit_thumb32_expr (exp);
3640 else
3641 emit_expr (exp, (unsigned int) size);
3642
3643 it_fsm_post_encode ();
3644 }
3645 }
3646 else
3647 as_bad (_("cannot determine Thumb instruction size. " \
3648 "Use .inst.n/.inst.w instead"));
3649 }
3650 else
3651 as_bad (_("constant expression required"));
3652
3653 return (size != 0);
3654 }
3655
3656 /* Like s_arm_elf_cons but do not use md_cons_align and
3657 set the mapping state to MAP_ARM/MAP_THUMB. */
3658
3659 static void
3660 s_arm_elf_inst (int nbytes)
3661 {
3662 if (is_it_end_of_statement ())
3663 {
3664 demand_empty_rest_of_line ();
3665 return;
3666 }
3667
3668 /* Calling mapping_state () here will not change ARM/THUMB,
3669 but will ensure not to be in DATA state. */
3670
3671 if (thumb_mode)
3672 mapping_state (MAP_THUMB);
3673 else
3674 {
3675 if (nbytes != 0)
3676 {
3677 as_bad (_("width suffixes are invalid in ARM mode"));
3678 ignore_rest_of_line ();
3679 return;
3680 }
3681
3682 nbytes = 4;
3683
3684 mapping_state (MAP_ARM);
3685 }
3686
3687 do
3688 {
3689 expressionS exp;
3690
3691 expression (& exp);
3692
3693 if (! emit_insn (& exp, nbytes))
3694 {
3695 ignore_rest_of_line ();
3696 return;
3697 }
3698 }
3699 while (*input_line_pointer++ == ',');
3700
3701 /* Put terminator back into stream. */
3702 input_line_pointer --;
3703 demand_empty_rest_of_line ();
3704 }
3705
3706 /* Parse a .rel31 directive. */
3707
3708 static void
3709 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED)
3710 {
3711 expressionS exp;
3712 char *p;
3713 valueT highbit;
3714
3715 highbit = 0;
3716 if (*input_line_pointer == '1')
3717 highbit = 0x80000000;
3718 else if (*input_line_pointer != '0')
3719 as_bad (_("expected 0 or 1"));
3720
3721 input_line_pointer++;
3722 if (*input_line_pointer != ',')
3723 as_bad (_("missing comma"));
3724 input_line_pointer++;
3725
3726 #ifdef md_flush_pending_output
3727 md_flush_pending_output ();
3728 #endif
3729
3730 #ifdef md_cons_align
3731 md_cons_align (4);
3732 #endif
3733
3734 mapping_state (MAP_DATA);
3735
3736 expression (&exp);
3737
3738 p = frag_more (4);
3739 md_number_to_chars (p, highbit, 4);
3740 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 1,
3741 BFD_RELOC_ARM_PREL31);
3742
3743 demand_empty_rest_of_line ();
3744 }
3745
3746 /* Directives: AEABI stack-unwind tables. */
3747
3748 /* Parse an unwind_fnstart directive. Simply records the current location. */
3749
3750 static void
3751 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED)
3752 {
3753 demand_empty_rest_of_line ();
3754 if (unwind.proc_start)
3755 {
3756 as_bad (_("duplicate .fnstart directive"));
3757 return;
3758 }
3759
3760 /* Mark the start of the function. */
3761 unwind.proc_start = expr_build_dot ();
3762
3763 /* Reset the rest of the unwind info. */
3764 unwind.opcode_count = 0;
3765 unwind.table_entry = NULL;
3766 unwind.personality_routine = NULL;
3767 unwind.personality_index = -1;
3768 unwind.frame_size = 0;
3769 unwind.fp_offset = 0;
3770 unwind.fp_reg = REG_SP;
3771 unwind.fp_used = 0;
3772 unwind.sp_restored = 0;
3773 }
3774
3775
3776 /* Parse a handlerdata directive. Creates the exception handling table entry
3777 for the function. */
3778
3779 static void
3780 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED)
3781 {
3782 demand_empty_rest_of_line ();
3783 if (!unwind.proc_start)
3784 as_bad (MISSING_FNSTART);
3785
3786 if (unwind.table_entry)
3787 as_bad (_("duplicate .handlerdata directive"));
3788
3789 create_unwind_entry (1);
3790 }
3791
3792 /* Parse an unwind_fnend directive. Generates the index table entry. */
3793
3794 static void
3795 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED)
3796 {
3797 long where;
3798 char *ptr;
3799 valueT val;
3800 unsigned int marked_pr_dependency;
3801
3802 demand_empty_rest_of_line ();
3803
3804 if (!unwind.proc_start)
3805 {
3806 as_bad (_(".fnend directive without .fnstart"));
3807 return;
3808 }
3809
3810 /* Add eh table entry. */
3811 if (unwind.table_entry == NULL)
3812 val = create_unwind_entry (0);
3813 else
3814 val = 0;
3815
3816 /* Add index table entry. This is two words. */
3817 start_unwind_section (unwind.saved_seg, 1);
3818 frag_align (2, 0, 0);
3819 record_alignment (now_seg, 2);
3820
3821 ptr = frag_more (8);
3822 memset (ptr, 0, 8);
3823 where = frag_now_fix () - 8;
3824
3825 /* Self relative offset of the function start. */
3826 fix_new (frag_now, where, 4, unwind.proc_start, 0, 1,
3827 BFD_RELOC_ARM_PREL31);
3828
3829 /* Indicate dependency on EHABI-defined personality routines to the
3830 linker, if it hasn't been done already. */
3831 marked_pr_dependency
3832 = seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency;
3833 if (unwind.personality_index >= 0 && unwind.personality_index < 3
3834 && !(marked_pr_dependency & (1 << unwind.personality_index)))
3835 {
3836 static const char *const name[] =
3837 {
3838 "__aeabi_unwind_cpp_pr0",
3839 "__aeabi_unwind_cpp_pr1",
3840 "__aeabi_unwind_cpp_pr2"
3841 };
3842 symbolS *pr = symbol_find_or_make (name[unwind.personality_index]);
3843 fix_new (frag_now, where, 0, pr, 0, 1, BFD_RELOC_NONE);
3844 seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency
3845 |= 1 << unwind.personality_index;
3846 }
3847
3848 if (val)
3849 /* Inline exception table entry. */
3850 md_number_to_chars (ptr + 4, val, 4);
3851 else
3852 /* Self relative offset of the table entry. */
3853 fix_new (frag_now, where + 4, 4, unwind.table_entry, 0, 1,
3854 BFD_RELOC_ARM_PREL31);
3855
3856 /* Restore the original section. */
3857 subseg_set (unwind.saved_seg, unwind.saved_subseg);
3858
3859 unwind.proc_start = NULL;
3860 }
3861
3862
3863 /* Parse an unwind_cantunwind directive. */
3864
3865 static void
3866 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED)
3867 {
3868 demand_empty_rest_of_line ();
3869 if (!unwind.proc_start)
3870 as_bad (MISSING_FNSTART);
3871
3872 if (unwind.personality_routine || unwind.personality_index != -1)
3873 as_bad (_("personality routine specified for cantunwind frame"));
3874
3875 unwind.personality_index = -2;
3876 }
3877
3878
3879 /* Parse a personalityindex directive. */
3880
3881 static void
3882 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED)
3883 {
3884 expressionS exp;
3885
3886 if (!unwind.proc_start)
3887 as_bad (MISSING_FNSTART);
3888
3889 if (unwind.personality_routine || unwind.personality_index != -1)
3890 as_bad (_("duplicate .personalityindex directive"));
3891
3892 expression (&exp);
3893
3894 if (exp.X_op != O_constant
3895 || exp.X_add_number < 0 || exp.X_add_number > 15)
3896 {
3897 as_bad (_("bad personality routine number"));
3898 ignore_rest_of_line ();
3899 return;
3900 }
3901
3902 unwind.personality_index = exp.X_add_number;
3903
3904 demand_empty_rest_of_line ();
3905 }
3906
3907
3908 /* Parse a personality directive. */
3909
3910 static void
3911 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED)
3912 {
3913 char *name, *p, c;
3914
3915 if (!unwind.proc_start)
3916 as_bad (MISSING_FNSTART);
3917
3918 if (unwind.personality_routine || unwind.personality_index != -1)
3919 as_bad (_("duplicate .personality directive"));
3920
3921 c = get_symbol_name (& name);
3922 p = input_line_pointer;
3923 if (c == '"')
3924 ++ input_line_pointer;
3925 unwind.personality_routine = symbol_find_or_make (name);
3926 *p = c;
3927 demand_empty_rest_of_line ();
3928 }
3929
3930
3931 /* Parse a directive saving core registers. */
3932
3933 static void
3934 s_arm_unwind_save_core (void)
3935 {
3936 valueT op;
3937 long range;
3938 int n;
3939
3940 range = parse_reg_list (&input_line_pointer);
3941 if (range == FAIL)
3942 {
3943 as_bad (_("expected register list"));
3944 ignore_rest_of_line ();
3945 return;
3946 }
3947
3948 demand_empty_rest_of_line ();
3949
3950 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
3951 into .unwind_save {..., sp...}. We aren't bothered about the value of
3952 ip because it is clobbered by calls. */
3953 if (unwind.sp_restored && unwind.fp_reg == 12
3954 && (range & 0x3000) == 0x1000)
3955 {
3956 unwind.opcode_count--;
3957 unwind.sp_restored = 0;
3958 range = (range | 0x2000) & ~0x1000;
3959 unwind.pending_offset = 0;
3960 }
3961
3962 /* Pop r4-r15. */
3963 if (range & 0xfff0)
3964 {
3965 /* See if we can use the short opcodes. These pop a block of up to 8
3966 registers starting with r4, plus maybe r14. */
3967 for (n = 0; n < 8; n++)
3968 {
3969 /* Break at the first non-saved register. */
3970 if ((range & (1 << (n + 4))) == 0)
3971 break;
3972 }
3973 /* See if there are any other bits set. */
3974 if (n == 0 || (range & (0xfff0 << n) & 0xbff0) != 0)
3975 {
3976 /* Use the long form. */
3977 op = 0x8000 | ((range >> 4) & 0xfff);
3978 add_unwind_opcode (op, 2);
3979 }
3980 else
3981 {
3982 /* Use the short form. */
3983 if (range & 0x4000)
3984 op = 0xa8; /* Pop r14. */
3985 else
3986 op = 0xa0; /* Do not pop r14. */
3987 op |= (n - 1);
3988 add_unwind_opcode (op, 1);
3989 }
3990 }
3991
3992 /* Pop r0-r3. */
3993 if (range & 0xf)
3994 {
3995 op = 0xb100 | (range & 0xf);
3996 add_unwind_opcode (op, 2);
3997 }
3998
3999 /* Record the number of bytes pushed. */
4000 for (n = 0; n < 16; n++)
4001 {
4002 if (range & (1 << n))
4003 unwind.frame_size += 4;
4004 }
4005 }
4006
4007
4008 /* Parse a directive saving FPA registers. */
4009
4010 static void
4011 s_arm_unwind_save_fpa (int reg)
4012 {
4013 expressionS exp;
4014 int num_regs;
4015 valueT op;
4016
4017 /* Get Number of registers to transfer. */
4018 if (skip_past_comma (&input_line_pointer) != FAIL)
4019 expression (&exp);
4020 else
4021 exp.X_op = O_illegal;
4022
4023 if (exp.X_op != O_constant)
4024 {
4025 as_bad (_("expected , <constant>"));
4026 ignore_rest_of_line ();
4027 return;
4028 }
4029
4030 num_regs = exp.X_add_number;
4031
4032 if (num_regs < 1 || num_regs > 4)
4033 {
4034 as_bad (_("number of registers must be in the range [1:4]"));
4035 ignore_rest_of_line ();
4036 return;
4037 }
4038
4039 demand_empty_rest_of_line ();
4040
4041 if (reg == 4)
4042 {
4043 /* Short form. */
4044 op = 0xb4 | (num_regs - 1);
4045 add_unwind_opcode (op, 1);
4046 }
4047 else
4048 {
4049 /* Long form. */
4050 op = 0xc800 | (reg << 4) | (num_regs - 1);
4051 add_unwind_opcode (op, 2);
4052 }
4053 unwind.frame_size += num_regs * 12;
4054 }
4055
4056
4057 /* Parse a directive saving VFP registers for ARMv6 and above. */
4058
4059 static void
4060 s_arm_unwind_save_vfp_armv6 (void)
4061 {
4062 int count;
4063 unsigned int start;
4064 valueT op;
4065 int num_vfpv3_regs = 0;
4066 int num_regs_below_16;
4067
4068 count = parse_vfp_reg_list (&input_line_pointer, &start, REGLIST_VFP_D);
4069 if (count == FAIL)
4070 {
4071 as_bad (_("expected register list"));
4072 ignore_rest_of_line ();
4073 return;
4074 }
4075
4076 demand_empty_rest_of_line ();
4077
4078 /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather
4079 than FSTMX/FLDMX-style ones). */
4080
4081 /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31. */
4082 if (start >= 16)
4083 num_vfpv3_regs = count;
4084 else if (start + count > 16)
4085 num_vfpv3_regs = start + count - 16;
4086
4087 if (num_vfpv3_regs > 0)
4088 {
4089 int start_offset = start > 16 ? start - 16 : 0;
4090 op = 0xc800 | (start_offset << 4) | (num_vfpv3_regs - 1);
4091 add_unwind_opcode (op, 2);
4092 }
4093
4094 /* Generate opcode for registers numbered in the range 0 .. 15. */
4095 num_regs_below_16 = num_vfpv3_regs > 0 ? 16 - (int) start : count;
4096 gas_assert (num_regs_below_16 + num_vfpv3_regs == count);
4097 if (num_regs_below_16 > 0)
4098 {
4099 op = 0xc900 | (start << 4) | (num_regs_below_16 - 1);
4100 add_unwind_opcode (op, 2);
4101 }
4102
4103 unwind.frame_size += count * 8;
4104 }
4105
4106
4107 /* Parse a directive saving VFP registers for pre-ARMv6. */
4108
4109 static void
4110 s_arm_unwind_save_vfp (void)
4111 {
4112 int count;
4113 unsigned int reg;
4114 valueT op;
4115
4116 count = parse_vfp_reg_list (&input_line_pointer, &reg, REGLIST_VFP_D);
4117 if (count == FAIL)
4118 {
4119 as_bad (_("expected register list"));
4120 ignore_rest_of_line ();
4121 return;
4122 }
4123
4124 demand_empty_rest_of_line ();
4125
4126 if (reg == 8)
4127 {
4128 /* Short form. */
4129 op = 0xb8 | (count - 1);
4130 add_unwind_opcode (op, 1);
4131 }
4132 else
4133 {
4134 /* Long form. */
4135 op = 0xb300 | (reg << 4) | (count - 1);
4136 add_unwind_opcode (op, 2);
4137 }
4138 unwind.frame_size += count * 8 + 4;
4139 }
4140
4141
4142 /* Parse a directive saving iWMMXt data registers. */
4143
4144 static void
4145 s_arm_unwind_save_mmxwr (void)
4146 {
4147 int reg;
4148 int hi_reg;
4149 int i;
4150 unsigned mask = 0;
4151 valueT op;
4152
4153 if (*input_line_pointer == '{')
4154 input_line_pointer++;
4155
4156 do
4157 {
4158 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
4159
4160 if (reg == FAIL)
4161 {
4162 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
4163 goto error;
4164 }
4165
4166 if (mask >> reg)
4167 as_tsktsk (_("register list not in ascending order"));
4168 mask |= 1 << reg;
4169
4170 if (*input_line_pointer == '-')
4171 {
4172 input_line_pointer++;
4173 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
4174 if (hi_reg == FAIL)
4175 {
4176 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
4177 goto error;
4178 }
4179 else if (reg >= hi_reg)
4180 {
4181 as_bad (_("bad register range"));
4182 goto error;
4183 }
4184 for (; reg < hi_reg; reg++)
4185 mask |= 1 << reg;
4186 }
4187 }
4188 while (skip_past_comma (&input_line_pointer) != FAIL);
4189
4190 skip_past_char (&input_line_pointer, '}');
4191
4192 demand_empty_rest_of_line ();
4193
4194 /* Generate any deferred opcodes because we're going to be looking at
4195 the list. */
4196 flush_pending_unwind ();
4197
4198 for (i = 0; i < 16; i++)
4199 {
4200 if (mask & (1 << i))
4201 unwind.frame_size += 8;
4202 }
4203
4204 /* Attempt to combine with a previous opcode. We do this because gcc
4205 likes to output separate unwind directives for a single block of
4206 registers. */
4207 if (unwind.opcode_count > 0)
4208 {
4209 i = unwind.opcodes[unwind.opcode_count - 1];
4210 if ((i & 0xf8) == 0xc0)
4211 {
4212 i &= 7;
4213 /* Only merge if the blocks are contiguous. */
4214 if (i < 6)
4215 {
4216 if ((mask & 0xfe00) == (1 << 9))
4217 {
4218 mask |= ((1 << (i + 11)) - 1) & 0xfc00;
4219 unwind.opcode_count--;
4220 }
4221 }
4222 else if (i == 6 && unwind.opcode_count >= 2)
4223 {
4224 i = unwind.opcodes[unwind.opcode_count - 2];
4225 reg = i >> 4;
4226 i &= 0xf;
4227
4228 op = 0xffff << (reg - 1);
4229 if (reg > 0
4230 && ((mask & op) == (1u << (reg - 1))))
4231 {
4232 op = (1 << (reg + i + 1)) - 1;
4233 op &= ~((1 << reg) - 1);
4234 mask |= op;
4235 unwind.opcode_count -= 2;
4236 }
4237 }
4238 }
4239 }
4240
4241 hi_reg = 15;
4242 /* We want to generate opcodes in the order the registers have been
4243 saved, ie. descending order. */
4244 for (reg = 15; reg >= -1; reg--)
4245 {
4246 /* Save registers in blocks. */
4247 if (reg < 0
4248 || !(mask & (1 << reg)))
4249 {
4250 /* We found an unsaved reg. Generate opcodes to save the
4251 preceding block. */
4252 if (reg != hi_reg)
4253 {
4254 if (reg == 9)
4255 {
4256 /* Short form. */
4257 op = 0xc0 | (hi_reg - 10);
4258 add_unwind_opcode (op, 1);
4259 }
4260 else
4261 {
4262 /* Long form. */
4263 op = 0xc600 | ((reg + 1) << 4) | ((hi_reg - reg) - 1);
4264 add_unwind_opcode (op, 2);
4265 }
4266 }
4267 hi_reg = reg - 1;
4268 }
4269 }
4270
4271 return;
4272 error:
4273 ignore_rest_of_line ();
4274 }
4275
4276 static void
4277 s_arm_unwind_save_mmxwcg (void)
4278 {
4279 int reg;
4280 int hi_reg;
4281 unsigned mask = 0;
4282 valueT op;
4283
4284 if (*input_line_pointer == '{')
4285 input_line_pointer++;
4286
4287 skip_whitespace (input_line_pointer);
4288
4289 do
4290 {
4291 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
4292
4293 if (reg == FAIL)
4294 {
4295 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
4296 goto error;
4297 }
4298
4299 reg -= 8;
4300 if (mask >> reg)
4301 as_tsktsk (_("register list not in ascending order"));
4302 mask |= 1 << reg;
4303
4304 if (*input_line_pointer == '-')
4305 {
4306 input_line_pointer++;
4307 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
4308 if (hi_reg == FAIL)
4309 {
4310 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
4311 goto error;
4312 }
4313 else if (reg >= hi_reg)
4314 {
4315 as_bad (_("bad register range"));
4316 goto error;
4317 }
4318 for (; reg < hi_reg; reg++)
4319 mask |= 1 << reg;
4320 }
4321 }
4322 while (skip_past_comma (&input_line_pointer) != FAIL);
4323
4324 skip_past_char (&input_line_pointer, '}');
4325
4326 demand_empty_rest_of_line ();
4327
4328 /* Generate any deferred opcodes because we're going to be looking at
4329 the list. */
4330 flush_pending_unwind ();
4331
4332 for (reg = 0; reg < 16; reg++)
4333 {
4334 if (mask & (1 << reg))
4335 unwind.frame_size += 4;
4336 }
4337 op = 0xc700 | mask;
4338 add_unwind_opcode (op, 2);
4339 return;
4340 error:
4341 ignore_rest_of_line ();
4342 }
4343
4344
4345 /* Parse an unwind_save directive.
4346 If the argument is non-zero, this is a .vsave directive. */
4347
4348 static void
4349 s_arm_unwind_save (int arch_v6)
4350 {
4351 char *peek;
4352 struct reg_entry *reg;
4353 bfd_boolean had_brace = FALSE;
4354
4355 if (!unwind.proc_start)
4356 as_bad (MISSING_FNSTART);
4357
4358 /* Figure out what sort of save we have. */
4359 peek = input_line_pointer;
4360
4361 if (*peek == '{')
4362 {
4363 had_brace = TRUE;
4364 peek++;
4365 }
4366
4367 reg = arm_reg_parse_multi (&peek);
4368
4369 if (!reg)
4370 {
4371 as_bad (_("register expected"));
4372 ignore_rest_of_line ();
4373 return;
4374 }
4375
4376 switch (reg->type)
4377 {
4378 case REG_TYPE_FN:
4379 if (had_brace)
4380 {
4381 as_bad (_("FPA .unwind_save does not take a register list"));
4382 ignore_rest_of_line ();
4383 return;
4384 }
4385 input_line_pointer = peek;
4386 s_arm_unwind_save_fpa (reg->number);
4387 return;
4388
4389 case REG_TYPE_RN:
4390 s_arm_unwind_save_core ();
4391 return;
4392
4393 case REG_TYPE_VFD:
4394 if (arch_v6)
4395 s_arm_unwind_save_vfp_armv6 ();
4396 else
4397 s_arm_unwind_save_vfp ();
4398 return;
4399
4400 case REG_TYPE_MMXWR:
4401 s_arm_unwind_save_mmxwr ();
4402 return;
4403
4404 case REG_TYPE_MMXWCG:
4405 s_arm_unwind_save_mmxwcg ();
4406 return;
4407
4408 default:
4409 as_bad (_(".unwind_save does not support this kind of register"));
4410 ignore_rest_of_line ();
4411 }
4412 }
4413
4414
4415 /* Parse an unwind_movsp directive. */
4416
4417 static void
4418 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED)
4419 {
4420 int reg;
4421 valueT op;
4422 int offset;
4423
4424 if (!unwind.proc_start)
4425 as_bad (MISSING_FNSTART);
4426
4427 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4428 if (reg == FAIL)
4429 {
4430 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_RN]));
4431 ignore_rest_of_line ();
4432 return;
4433 }
4434
4435 /* Optional constant. */
4436 if (skip_past_comma (&input_line_pointer) != FAIL)
4437 {
4438 if (immediate_for_directive (&offset) == FAIL)
4439 return;
4440 }
4441 else
4442 offset = 0;
4443
4444 demand_empty_rest_of_line ();
4445
4446 if (reg == REG_SP || reg == REG_PC)
4447 {
4448 as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
4449 return;
4450 }
4451
4452 if (unwind.fp_reg != REG_SP)
4453 as_bad (_("unexpected .unwind_movsp directive"));
4454
4455 /* Generate opcode to restore the value. */
4456 op = 0x90 | reg;
4457 add_unwind_opcode (op, 1);
4458
4459 /* Record the information for later. */
4460 unwind.fp_reg = reg;
4461 unwind.fp_offset = unwind.frame_size - offset;
4462 unwind.sp_restored = 1;
4463 }
4464
4465 /* Parse an unwind_pad directive. */
4466
4467 static void
4468 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED)
4469 {
4470 int offset;
4471
4472 if (!unwind.proc_start)
4473 as_bad (MISSING_FNSTART);
4474
4475 if (immediate_for_directive (&offset) == FAIL)
4476 return;
4477
4478 if (offset & 3)
4479 {
4480 as_bad (_("stack increment must be multiple of 4"));
4481 ignore_rest_of_line ();
4482 return;
4483 }
4484
4485 /* Don't generate any opcodes, just record the details for later. */
4486 unwind.frame_size += offset;
4487 unwind.pending_offset += offset;
4488
4489 demand_empty_rest_of_line ();
4490 }
4491
4492 /* Parse an unwind_setfp directive. */
4493
4494 static void
4495 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED)
4496 {
4497 int sp_reg;
4498 int fp_reg;
4499 int offset;
4500
4501 if (!unwind.proc_start)
4502 as_bad (MISSING_FNSTART);
4503
4504 fp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4505 if (skip_past_comma (&input_line_pointer) == FAIL)
4506 sp_reg = FAIL;
4507 else
4508 sp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4509
4510 if (fp_reg == FAIL || sp_reg == FAIL)
4511 {
4512 as_bad (_("expected <reg>, <reg>"));
4513 ignore_rest_of_line ();
4514 return;
4515 }
4516
4517 /* Optional constant. */
4518 if (skip_past_comma (&input_line_pointer) != FAIL)
4519 {
4520 if (immediate_for_directive (&offset) == FAIL)
4521 return;
4522 }
4523 else
4524 offset = 0;
4525
4526 demand_empty_rest_of_line ();
4527
4528 if (sp_reg != REG_SP && sp_reg != unwind.fp_reg)
4529 {
4530 as_bad (_("register must be either sp or set by a previous"
4531 "unwind_movsp directive"));
4532 return;
4533 }
4534
4535 /* Don't generate any opcodes, just record the information for later. */
4536 unwind.fp_reg = fp_reg;
4537 unwind.fp_used = 1;
4538 if (sp_reg == REG_SP)
4539 unwind.fp_offset = unwind.frame_size - offset;
4540 else
4541 unwind.fp_offset -= offset;
4542 }
4543
4544 /* Parse an unwind_raw directive. */
4545
4546 static void
4547 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED)
4548 {
4549 expressionS exp;
4550 /* This is an arbitrary limit. */
4551 unsigned char op[16];
4552 int count;
4553
4554 if (!unwind.proc_start)
4555 as_bad (MISSING_FNSTART);
4556
4557 expression (&exp);
4558 if (exp.X_op == O_constant
4559 && skip_past_comma (&input_line_pointer) != FAIL)
4560 {
4561 unwind.frame_size += exp.X_add_number;
4562 expression (&exp);
4563 }
4564 else
4565 exp.X_op = O_illegal;
4566
4567 if (exp.X_op != O_constant)
4568 {
4569 as_bad (_("expected <offset>, <opcode>"));
4570 ignore_rest_of_line ();
4571 return;
4572 }
4573
4574 count = 0;
4575
4576 /* Parse the opcode. */
4577 for (;;)
4578 {
4579 if (count >= 16)
4580 {
4581 as_bad (_("unwind opcode too long"));
4582 ignore_rest_of_line ();
4583 }
4584 if (exp.X_op != O_constant || exp.X_add_number & ~0xff)
4585 {
4586 as_bad (_("invalid unwind opcode"));
4587 ignore_rest_of_line ();
4588 return;
4589 }
4590 op[count++] = exp.X_add_number;
4591
4592 /* Parse the next byte. */
4593 if (skip_past_comma (&input_line_pointer) == FAIL)
4594 break;
4595
4596 expression (&exp);
4597 }
4598
4599 /* Add the opcode bytes in reverse order. */
4600 while (count--)
4601 add_unwind_opcode (op[count], 1);
4602
4603 demand_empty_rest_of_line ();
4604 }
4605
4606
4607 /* Parse a .eabi_attribute directive. */
4608
4609 static void
4610 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED)
4611 {
4612 int tag = obj_elf_vendor_attribute (OBJ_ATTR_PROC);
4613
4614 if (tag < NUM_KNOWN_OBJ_ATTRIBUTES)
4615 attributes_set_explicitly[tag] = 1;
4616 }
4617
4618 /* Emit a tls fix for the symbol. */
4619
4620 static void
4621 s_arm_tls_descseq (int ignored ATTRIBUTE_UNUSED)
4622 {
4623 char *p;
4624 expressionS exp;
4625 #ifdef md_flush_pending_output
4626 md_flush_pending_output ();
4627 #endif
4628
4629 #ifdef md_cons_align
4630 md_cons_align (4);
4631 #endif
4632
4633 /* Since we're just labelling the code, there's no need to define a
4634 mapping symbol. */
4635 expression (&exp);
4636 p = obstack_next_free (&frchain_now->frch_obstack);
4637 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 0,
4638 thumb_mode ? BFD_RELOC_ARM_THM_TLS_DESCSEQ
4639 : BFD_RELOC_ARM_TLS_DESCSEQ);
4640 }
4641 #endif /* OBJ_ELF */
4642
4643 static void s_arm_arch (int);
4644 static void s_arm_object_arch (int);
4645 static void s_arm_cpu (int);
4646 static void s_arm_fpu (int);
4647 static void s_arm_arch_extension (int);
4648
4649 #ifdef TE_PE
4650
4651 static void
4652 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
4653 {
4654 expressionS exp;
4655
4656 do
4657 {
4658 expression (&exp);
4659 if (exp.X_op == O_symbol)
4660 exp.X_op = O_secrel;
4661
4662 emit_expr (&exp, 4);
4663 }
4664 while (*input_line_pointer++ == ',');
4665
4666 input_line_pointer--;
4667 demand_empty_rest_of_line ();
4668 }
4669 #endif /* TE_PE */
4670
4671 /* This table describes all the machine specific pseudo-ops the assembler
4672 has to support. The fields are:
4673 pseudo-op name without dot
4674 function to call to execute this pseudo-op
4675 Integer arg to pass to the function. */
4676
4677 const pseudo_typeS md_pseudo_table[] =
4678 {
4679 /* Never called because '.req' does not start a line. */
4680 { "req", s_req, 0 },
4681 /* Following two are likewise never called. */
4682 { "dn", s_dn, 0 },
4683 { "qn", s_qn, 0 },
4684 { "unreq", s_unreq, 0 },
4685 { "bss", s_bss, 0 },
4686 { "align", s_align_ptwo, 2 },
4687 { "arm", s_arm, 0 },
4688 { "thumb", s_thumb, 0 },
4689 { "code", s_code, 0 },
4690 { "force_thumb", s_force_thumb, 0 },
4691 { "thumb_func", s_thumb_func, 0 },
4692 { "thumb_set", s_thumb_set, 0 },
4693 { "even", s_even, 0 },
4694 { "ltorg", s_ltorg, 0 },
4695 { "pool", s_ltorg, 0 },
4696 { "syntax", s_syntax, 0 },
4697 { "cpu", s_arm_cpu, 0 },
4698 { "arch", s_arm_arch, 0 },
4699 { "object_arch", s_arm_object_arch, 0 },
4700 { "fpu", s_arm_fpu, 0 },
4701 { "arch_extension", s_arm_arch_extension, 0 },
4702 #ifdef OBJ_ELF
4703 { "word", s_arm_elf_cons, 4 },
4704 { "long", s_arm_elf_cons, 4 },
4705 { "inst.n", s_arm_elf_inst, 2 },
4706 { "inst.w", s_arm_elf_inst, 4 },
4707 { "inst", s_arm_elf_inst, 0 },
4708 { "rel31", s_arm_rel31, 0 },
4709 { "fnstart", s_arm_unwind_fnstart, 0 },
4710 { "fnend", s_arm_unwind_fnend, 0 },
4711 { "cantunwind", s_arm_unwind_cantunwind, 0 },
4712 { "personality", s_arm_unwind_personality, 0 },
4713 { "personalityindex", s_arm_unwind_personalityindex, 0 },
4714 { "handlerdata", s_arm_unwind_handlerdata, 0 },
4715 { "save", s_arm_unwind_save, 0 },
4716 { "vsave", s_arm_unwind_save, 1 },
4717 { "movsp", s_arm_unwind_movsp, 0 },
4718 { "pad", s_arm_unwind_pad, 0 },
4719 { "setfp", s_arm_unwind_setfp, 0 },
4720 { "unwind_raw", s_arm_unwind_raw, 0 },
4721 { "eabi_attribute", s_arm_eabi_attribute, 0 },
4722 { "tlsdescseq", s_arm_tls_descseq, 0 },
4723 #else
4724 { "word", cons, 4},
4725
4726 /* These are used for dwarf. */
4727 {"2byte", cons, 2},
4728 {"4byte", cons, 4},
4729 {"8byte", cons, 8},
4730 /* These are used for dwarf2. */
4731 { "file", (void (*) (int)) dwarf2_directive_file, 0 },
4732 { "loc", dwarf2_directive_loc, 0 },
4733 { "loc_mark_labels", dwarf2_directive_loc_mark_labels, 0 },
4734 #endif
4735 { "extend", float_cons, 'x' },
4736 { "ldouble", float_cons, 'x' },
4737 { "packed", float_cons, 'p' },
4738 #ifdef TE_PE
4739 {"secrel32", pe_directive_secrel, 0},
4740 #endif
4741
4742 /* These are for compatibility with CodeComposer Studio. */
4743 {"ref", s_ccs_ref, 0},
4744 {"def", s_ccs_def, 0},
4745 {"asmfunc", s_ccs_asmfunc, 0},
4746 {"endasmfunc", s_ccs_endasmfunc, 0},
4747
4748 { 0, 0, 0 }
4749 };
4750 \f
4751 /* Parser functions used exclusively in instruction operands. */
4752
4753 /* Generic immediate-value read function for use in insn parsing.
4754 STR points to the beginning of the immediate (the leading #);
4755 VAL receives the value; if the value is outside [MIN, MAX]
4756 issue an error. PREFIX_OPT is true if the immediate prefix is
4757 optional. */
4758
4759 static int
4760 parse_immediate (char **str, int *val, int min, int max,
4761 bfd_boolean prefix_opt)
4762 {
4763 expressionS exp;
4764 my_get_expression (&exp, str, prefix_opt ? GE_OPT_PREFIX : GE_IMM_PREFIX);
4765 if (exp.X_op != O_constant)
4766 {
4767 inst.error = _("constant expression required");
4768 return FAIL;
4769 }
4770
4771 if (exp.X_add_number < min || exp.X_add_number > max)
4772 {
4773 inst.error = _("immediate value out of range");
4774 return FAIL;
4775 }
4776
4777 *val = exp.X_add_number;
4778 return SUCCESS;
4779 }
4780
4781 /* Less-generic immediate-value read function with the possibility of loading a
4782 big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate
4783 instructions. Puts the result directly in inst.operands[i]. */
4784
4785 static int
4786 parse_big_immediate (char **str, int i, expressionS *in_exp,
4787 bfd_boolean allow_symbol_p)
4788 {
4789 expressionS exp;
4790 expressionS *exp_p = in_exp ? in_exp : &exp;
4791 char *ptr = *str;
4792
4793 my_get_expression (exp_p, &ptr, GE_OPT_PREFIX_BIG);
4794
4795 if (exp_p->X_op == O_constant)
4796 {
4797 inst.operands[i].imm = exp_p->X_add_number & 0xffffffff;
4798 /* If we're on a 64-bit host, then a 64-bit number can be returned using
4799 O_constant. We have to be careful not to break compilation for
4800 32-bit X_add_number, though. */
4801 if ((exp_p->X_add_number & ~(offsetT)(0xffffffffU)) != 0)
4802 {
4803 /* X >> 32 is illegal if sizeof (exp_p->X_add_number) == 4. */
4804 inst.operands[i].reg = (((exp_p->X_add_number >> 16) >> 16)
4805 & 0xffffffff);
4806 inst.operands[i].regisimm = 1;
4807 }
4808 }
4809 else if (exp_p->X_op == O_big
4810 && LITTLENUM_NUMBER_OF_BITS * exp_p->X_add_number > 32)
4811 {
4812 unsigned parts = 32 / LITTLENUM_NUMBER_OF_BITS, j, idx = 0;
4813
4814 /* Bignums have their least significant bits in
4815 generic_bignum[0]. Make sure we put 32 bits in imm and
4816 32 bits in reg, in a (hopefully) portable way. */
4817 gas_assert (parts != 0);
4818
4819 /* Make sure that the number is not too big.
4820 PR 11972: Bignums can now be sign-extended to the
4821 size of a .octa so check that the out of range bits
4822 are all zero or all one. */
4823 if (LITTLENUM_NUMBER_OF_BITS * exp_p->X_add_number > 64)
4824 {
4825 LITTLENUM_TYPE m = -1;
4826
4827 if (generic_bignum[parts * 2] != 0
4828 && generic_bignum[parts * 2] != m)
4829 return FAIL;
4830
4831 for (j = parts * 2 + 1; j < (unsigned) exp_p->X_add_number; j++)
4832 if (generic_bignum[j] != generic_bignum[j-1])
4833 return FAIL;
4834 }
4835
4836 inst.operands[i].imm = 0;
4837 for (j = 0; j < parts; j++, idx++)
4838 inst.operands[i].imm |= generic_bignum[idx]
4839 << (LITTLENUM_NUMBER_OF_BITS * j);
4840 inst.operands[i].reg = 0;
4841 for (j = 0; j < parts; j++, idx++)
4842 inst.operands[i].reg |= generic_bignum[idx]
4843 << (LITTLENUM_NUMBER_OF_BITS * j);
4844 inst.operands[i].regisimm = 1;
4845 }
4846 else if (!(exp_p->X_op == O_symbol && allow_symbol_p))
4847 return FAIL;
4848
4849 *str = ptr;
4850
4851 return SUCCESS;
4852 }
4853
4854 /* Returns the pseudo-register number of an FPA immediate constant,
4855 or FAIL if there isn't a valid constant here. */
4856
4857 static int
4858 parse_fpa_immediate (char ** str)
4859 {
4860 LITTLENUM_TYPE words[MAX_LITTLENUMS];
4861 char * save_in;
4862 expressionS exp;
4863 int i;
4864 int j;
4865
4866 /* First try and match exact strings, this is to guarantee
4867 that some formats will work even for cross assembly. */
4868
4869 for (i = 0; fp_const[i]; i++)
4870 {
4871 if (strncmp (*str, fp_const[i], strlen (fp_const[i])) == 0)
4872 {
4873 char *start = *str;
4874
4875 *str += strlen (fp_const[i]);
4876 if (is_end_of_line[(unsigned char) **str])
4877 return i + 8;
4878 *str = start;
4879 }
4880 }
4881
4882 /* Just because we didn't get a match doesn't mean that the constant
4883 isn't valid, just that it is in a format that we don't
4884 automatically recognize. Try parsing it with the standard
4885 expression routines. */
4886
4887 memset (words, 0, MAX_LITTLENUMS * sizeof (LITTLENUM_TYPE));
4888
4889 /* Look for a raw floating point number. */
4890 if ((save_in = atof_ieee (*str, 'x', words)) != NULL
4891 && is_end_of_line[(unsigned char) *save_in])
4892 {
4893 for (i = 0; i < NUM_FLOAT_VALS; i++)
4894 {
4895 for (j = 0; j < MAX_LITTLENUMS; j++)
4896 {
4897 if (words[j] != fp_values[i][j])
4898 break;
4899 }
4900
4901 if (j == MAX_LITTLENUMS)
4902 {
4903 *str = save_in;
4904 return i + 8;
4905 }
4906 }
4907 }
4908
4909 /* Try and parse a more complex expression, this will probably fail
4910 unless the code uses a floating point prefix (eg "0f"). */
4911 save_in = input_line_pointer;
4912 input_line_pointer = *str;
4913 if (expression (&exp) == absolute_section
4914 && exp.X_op == O_big
4915 && exp.X_add_number < 0)
4916 {
4917 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
4918 Ditto for 15. */
4919 #define X_PRECISION 5
4920 #define E_PRECISION 15L
4921 if (gen_to_words (words, X_PRECISION, E_PRECISION) == 0)
4922 {
4923 for (i = 0; i < NUM_FLOAT_VALS; i++)
4924 {
4925 for (j = 0; j < MAX_LITTLENUMS; j++)
4926 {
4927 if (words[j] != fp_values[i][j])
4928 break;
4929 }
4930
4931 if (j == MAX_LITTLENUMS)
4932 {
4933 *str = input_line_pointer;
4934 input_line_pointer = save_in;
4935 return i + 8;
4936 }
4937 }
4938 }
4939 }
4940
4941 *str = input_line_pointer;
4942 input_line_pointer = save_in;
4943 inst.error = _("invalid FPA immediate expression");
4944 return FAIL;
4945 }
4946
4947 /* Returns 1 if a number has "quarter-precision" float format
4948 0baBbbbbbc defgh000 00000000 00000000. */
4949
4950 static int
4951 is_quarter_float (unsigned imm)
4952 {
4953 int bs = (imm & 0x20000000) ? 0x3e000000 : 0x40000000;
4954 return (imm & 0x7ffff) == 0 && ((imm & 0x7e000000) ^ bs) == 0;
4955 }
4956
4957
4958 /* Detect the presence of a floating point or integer zero constant,
4959 i.e. #0.0 or #0. */
4960
4961 static bfd_boolean
4962 parse_ifimm_zero (char **in)
4963 {
4964 int error_code;
4965
4966 if (!is_immediate_prefix (**in))
4967 return FALSE;
4968
4969 ++*in;
4970
4971 /* Accept #0x0 as a synonym for #0. */
4972 if (strncmp (*in, "0x", 2) == 0)
4973 {
4974 int val;
4975 if (parse_immediate (in, &val, 0, 0, TRUE) == FAIL)
4976 return FALSE;
4977 return TRUE;
4978 }
4979
4980 error_code = atof_generic (in, ".", EXP_CHARS,
4981 &generic_floating_point_number);
4982
4983 if (!error_code
4984 && generic_floating_point_number.sign == '+'
4985 && (generic_floating_point_number.low
4986 > generic_floating_point_number.leader))
4987 return TRUE;
4988
4989 return FALSE;
4990 }
4991
4992 /* Parse an 8-bit "quarter-precision" floating point number of the form:
4993 0baBbbbbbc defgh000 00000000 00000000.
4994 The zero and minus-zero cases need special handling, since they can't be
4995 encoded in the "quarter-precision" float format, but can nonetheless be
4996 loaded as integer constants. */
4997
4998 static unsigned
4999 parse_qfloat_immediate (char **ccp, int *immed)
5000 {
5001 char *str = *ccp;
5002 char *fpnum;
5003 LITTLENUM_TYPE words[MAX_LITTLENUMS];
5004 int found_fpchar = 0;
5005
5006 skip_past_char (&str, '#');
5007
5008 /* We must not accidentally parse an integer as a floating-point number. Make
5009 sure that the value we parse is not an integer by checking for special
5010 characters '.' or 'e'.
5011 FIXME: This is a horrible hack, but doing better is tricky because type
5012 information isn't in a very usable state at parse time. */
5013 fpnum = str;
5014 skip_whitespace (fpnum);
5015
5016 if (strncmp (fpnum, "0x", 2) == 0)
5017 return FAIL;
5018 else
5019 {
5020 for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
5021 if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
5022 {
5023 found_fpchar = 1;
5024 break;
5025 }
5026
5027 if (!found_fpchar)
5028 return FAIL;
5029 }
5030
5031 if ((str = atof_ieee (str, 's', words)) != NULL)
5032 {
5033 unsigned fpword = 0;
5034 int i;
5035
5036 /* Our FP word must be 32 bits (single-precision FP). */
5037 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
5038 {
5039 fpword <<= LITTLENUM_NUMBER_OF_BITS;
5040 fpword |= words[i];
5041 }
5042
5043 if (is_quarter_float (fpword) || (fpword & 0x7fffffff) == 0)
5044 *immed = fpword;
5045 else
5046 return FAIL;
5047
5048 *ccp = str;
5049
5050 return SUCCESS;
5051 }
5052
5053 return FAIL;
5054 }
5055
5056 /* Shift operands. */
5057 enum shift_kind
5058 {
5059 SHIFT_LSL, SHIFT_LSR, SHIFT_ASR, SHIFT_ROR, SHIFT_RRX
5060 };
5061
5062 struct asm_shift_name
5063 {
5064 const char *name;
5065 enum shift_kind kind;
5066 };
5067
5068 /* Third argument to parse_shift. */
5069 enum parse_shift_mode
5070 {
5071 NO_SHIFT_RESTRICT, /* Any kind of shift is accepted. */
5072 SHIFT_IMMEDIATE, /* Shift operand must be an immediate. */
5073 SHIFT_LSL_OR_ASR_IMMEDIATE, /* Shift must be LSL or ASR immediate. */
5074 SHIFT_ASR_IMMEDIATE, /* Shift must be ASR immediate. */
5075 SHIFT_LSL_IMMEDIATE, /* Shift must be LSL immediate. */
5076 };
5077
5078 /* Parse a <shift> specifier on an ARM data processing instruction.
5079 This has three forms:
5080
5081 (LSL|LSR|ASL|ASR|ROR) Rs
5082 (LSL|LSR|ASL|ASR|ROR) #imm
5083 RRX
5084
5085 Note that ASL is assimilated to LSL in the instruction encoding, and
5086 RRX to ROR #0 (which cannot be written as such). */
5087
5088 static int
5089 parse_shift (char **str, int i, enum parse_shift_mode mode)
5090 {
5091 const struct asm_shift_name *shift_name;
5092 enum shift_kind shift;
5093 char *s = *str;
5094 char *p = s;
5095 int reg;
5096
5097 for (p = *str; ISALPHA (*p); p++)
5098 ;
5099
5100 if (p == *str)
5101 {
5102 inst.error = _("shift expression expected");
5103 return FAIL;
5104 }
5105
5106 shift_name = (const struct asm_shift_name *) hash_find_n (arm_shift_hsh, *str,
5107 p - *str);
5108
5109 if (shift_name == NULL)
5110 {
5111 inst.error = _("shift expression expected");
5112 return FAIL;
5113 }
5114
5115 shift = shift_name->kind;
5116
5117 switch (mode)
5118 {
5119 case NO_SHIFT_RESTRICT:
5120 case SHIFT_IMMEDIATE: break;
5121
5122 case SHIFT_LSL_OR_ASR_IMMEDIATE:
5123 if (shift != SHIFT_LSL && shift != SHIFT_ASR)
5124 {
5125 inst.error = _("'LSL' or 'ASR' required");
5126 return FAIL;
5127 }
5128 break;
5129
5130 case SHIFT_LSL_IMMEDIATE:
5131 if (shift != SHIFT_LSL)
5132 {
5133 inst.error = _("'LSL' required");
5134 return FAIL;
5135 }
5136 break;
5137
5138 case SHIFT_ASR_IMMEDIATE:
5139 if (shift != SHIFT_ASR)
5140 {
5141 inst.error = _("'ASR' required");
5142 return FAIL;
5143 }
5144 break;
5145
5146 default: abort ();
5147 }
5148
5149 if (shift != SHIFT_RRX)
5150 {
5151 /* Whitespace can appear here if the next thing is a bare digit. */
5152 skip_whitespace (p);
5153
5154 if (mode == NO_SHIFT_RESTRICT
5155 && (reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5156 {
5157 inst.operands[i].imm = reg;
5158 inst.operands[i].immisreg = 1;
5159 }
5160 else if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5161 return FAIL;
5162 }
5163 inst.operands[i].shift_kind = shift;
5164 inst.operands[i].shifted = 1;
5165 *str = p;
5166 return SUCCESS;
5167 }
5168
5169 /* Parse a <shifter_operand> for an ARM data processing instruction:
5170
5171 #<immediate>
5172 #<immediate>, <rotate>
5173 <Rm>
5174 <Rm>, <shift>
5175
5176 where <shift> is defined by parse_shift above, and <rotate> is a
5177 multiple of 2 between 0 and 30. Validation of immediate operands
5178 is deferred to md_apply_fix. */
5179
5180 static int
5181 parse_shifter_operand (char **str, int i)
5182 {
5183 int value;
5184 expressionS exp;
5185
5186 if ((value = arm_reg_parse (str, REG_TYPE_RN)) != FAIL)
5187 {
5188 inst.operands[i].reg = value;
5189 inst.operands[i].isreg = 1;
5190
5191 /* parse_shift will override this if appropriate */
5192 inst.reloc.exp.X_op = O_constant;
5193 inst.reloc.exp.X_add_number = 0;
5194
5195 if (skip_past_comma (str) == FAIL)
5196 return SUCCESS;
5197
5198 /* Shift operation on register. */
5199 return parse_shift (str, i, NO_SHIFT_RESTRICT);
5200 }
5201
5202 if (my_get_expression (&inst.reloc.exp, str, GE_IMM_PREFIX))
5203 return FAIL;
5204
5205 if (skip_past_comma (str) == SUCCESS)
5206 {
5207 /* #x, y -- ie explicit rotation by Y. */
5208 if (my_get_expression (&exp, str, GE_NO_PREFIX))
5209 return FAIL;
5210
5211 if (exp.X_op != O_constant || inst.reloc.exp.X_op != O_constant)
5212 {
5213 inst.error = _("constant expression expected");
5214 return FAIL;
5215 }
5216
5217 value = exp.X_add_number;
5218 if (value < 0 || value > 30 || value % 2 != 0)
5219 {
5220 inst.error = _("invalid rotation");
5221 return FAIL;
5222 }
5223 if (inst.reloc.exp.X_add_number < 0 || inst.reloc.exp.X_add_number > 255)
5224 {
5225 inst.error = _("invalid constant");
5226 return FAIL;
5227 }
5228
5229 /* Encode as specified. */
5230 inst.operands[i].imm = inst.reloc.exp.X_add_number | value << 7;
5231 return SUCCESS;
5232 }
5233
5234 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
5235 inst.reloc.pc_rel = 0;
5236 return SUCCESS;
5237 }
5238
5239 /* Group relocation information. Each entry in the table contains the
5240 textual name of the relocation as may appear in assembler source
5241 and must end with a colon.
5242 Along with this textual name are the relocation codes to be used if
5243 the corresponding instruction is an ALU instruction (ADD or SUB only),
5244 an LDR, an LDRS, or an LDC. */
5245
5246 struct group_reloc_table_entry
5247 {
5248 const char *name;
5249 int alu_code;
5250 int ldr_code;
5251 int ldrs_code;
5252 int ldc_code;
5253 };
5254
5255 typedef enum
5256 {
5257 /* Varieties of non-ALU group relocation. */
5258
5259 GROUP_LDR,
5260 GROUP_LDRS,
5261 GROUP_LDC
5262 } group_reloc_type;
5263
5264 static struct group_reloc_table_entry group_reloc_table[] =
5265 { /* Program counter relative: */
5266 { "pc_g0_nc",
5267 BFD_RELOC_ARM_ALU_PC_G0_NC, /* ALU */
5268 0, /* LDR */
5269 0, /* LDRS */
5270 0 }, /* LDC */
5271 { "pc_g0",
5272 BFD_RELOC_ARM_ALU_PC_G0, /* ALU */
5273 BFD_RELOC_ARM_LDR_PC_G0, /* LDR */
5274 BFD_RELOC_ARM_LDRS_PC_G0, /* LDRS */
5275 BFD_RELOC_ARM_LDC_PC_G0 }, /* LDC */
5276 { "pc_g1_nc",
5277 BFD_RELOC_ARM_ALU_PC_G1_NC, /* ALU */
5278 0, /* LDR */
5279 0, /* LDRS */
5280 0 }, /* LDC */
5281 { "pc_g1",
5282 BFD_RELOC_ARM_ALU_PC_G1, /* ALU */
5283 BFD_RELOC_ARM_LDR_PC_G1, /* LDR */
5284 BFD_RELOC_ARM_LDRS_PC_G1, /* LDRS */
5285 BFD_RELOC_ARM_LDC_PC_G1 }, /* LDC */
5286 { "pc_g2",
5287 BFD_RELOC_ARM_ALU_PC_G2, /* ALU */
5288 BFD_RELOC_ARM_LDR_PC_G2, /* LDR */
5289 BFD_RELOC_ARM_LDRS_PC_G2, /* LDRS */
5290 BFD_RELOC_ARM_LDC_PC_G2 }, /* LDC */
5291 /* Section base relative */
5292 { "sb_g0_nc",
5293 BFD_RELOC_ARM_ALU_SB_G0_NC, /* ALU */
5294 0, /* LDR */
5295 0, /* LDRS */
5296 0 }, /* LDC */
5297 { "sb_g0",
5298 BFD_RELOC_ARM_ALU_SB_G0, /* ALU */
5299 BFD_RELOC_ARM_LDR_SB_G0, /* LDR */
5300 BFD_RELOC_ARM_LDRS_SB_G0, /* LDRS */
5301 BFD_RELOC_ARM_LDC_SB_G0 }, /* LDC */
5302 { "sb_g1_nc",
5303 BFD_RELOC_ARM_ALU_SB_G1_NC, /* ALU */
5304 0, /* LDR */
5305 0, /* LDRS */
5306 0 }, /* LDC */
5307 { "sb_g1",
5308 BFD_RELOC_ARM_ALU_SB_G1, /* ALU */
5309 BFD_RELOC_ARM_LDR_SB_G1, /* LDR */
5310 BFD_RELOC_ARM_LDRS_SB_G1, /* LDRS */
5311 BFD_RELOC_ARM_LDC_SB_G1 }, /* LDC */
5312 { "sb_g2",
5313 BFD_RELOC_ARM_ALU_SB_G2, /* ALU */
5314 BFD_RELOC_ARM_LDR_SB_G2, /* LDR */
5315 BFD_RELOC_ARM_LDRS_SB_G2, /* LDRS */
5316 BFD_RELOC_ARM_LDC_SB_G2 }, /* LDC */
5317 /* Absolute thumb alu relocations. */
5318 { "lower0_7",
5319 BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC,/* ALU. */
5320 0, /* LDR. */
5321 0, /* LDRS. */
5322 0 }, /* LDC. */
5323 { "lower8_15",
5324 BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC,/* ALU. */
5325 0, /* LDR. */
5326 0, /* LDRS. */
5327 0 }, /* LDC. */
5328 { "upper0_7",
5329 BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC,/* ALU. */
5330 0, /* LDR. */
5331 0, /* LDRS. */
5332 0 }, /* LDC. */
5333 { "upper8_15",
5334 BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC,/* ALU. */
5335 0, /* LDR. */
5336 0, /* LDRS. */
5337 0 } }; /* LDC. */
5338
5339 /* Given the address of a pointer pointing to the textual name of a group
5340 relocation as may appear in assembler source, attempt to find its details
5341 in group_reloc_table. The pointer will be updated to the character after
5342 the trailing colon. On failure, FAIL will be returned; SUCCESS
5343 otherwise. On success, *entry will be updated to point at the relevant
5344 group_reloc_table entry. */
5345
5346 static int
5347 find_group_reloc_table_entry (char **str, struct group_reloc_table_entry **out)
5348 {
5349 unsigned int i;
5350 for (i = 0; i < ARRAY_SIZE (group_reloc_table); i++)
5351 {
5352 int length = strlen (group_reloc_table[i].name);
5353
5354 if (strncasecmp (group_reloc_table[i].name, *str, length) == 0
5355 && (*str)[length] == ':')
5356 {
5357 *out = &group_reloc_table[i];
5358 *str += (length + 1);
5359 return SUCCESS;
5360 }
5361 }
5362
5363 return FAIL;
5364 }
5365
5366 /* Parse a <shifter_operand> for an ARM data processing instruction
5367 (as for parse_shifter_operand) where group relocations are allowed:
5368
5369 #<immediate>
5370 #<immediate>, <rotate>
5371 #:<group_reloc>:<expression>
5372 <Rm>
5373 <Rm>, <shift>
5374
5375 where <group_reloc> is one of the strings defined in group_reloc_table.
5376 The hashes are optional.
5377
5378 Everything else is as for parse_shifter_operand. */
5379
5380 static parse_operand_result
5381 parse_shifter_operand_group_reloc (char **str, int i)
5382 {
5383 /* Determine if we have the sequence of characters #: or just :
5384 coming next. If we do, then we check for a group relocation.
5385 If we don't, punt the whole lot to parse_shifter_operand. */
5386
5387 if (((*str)[0] == '#' && (*str)[1] == ':')
5388 || (*str)[0] == ':')
5389 {
5390 struct group_reloc_table_entry *entry;
5391
5392 if ((*str)[0] == '#')
5393 (*str) += 2;
5394 else
5395 (*str)++;
5396
5397 /* Try to parse a group relocation. Anything else is an error. */
5398 if (find_group_reloc_table_entry (str, &entry) == FAIL)
5399 {
5400 inst.error = _("unknown group relocation");
5401 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5402 }
5403
5404 /* We now have the group relocation table entry corresponding to
5405 the name in the assembler source. Next, we parse the expression. */
5406 if (my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX))
5407 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5408
5409 /* Record the relocation type (always the ALU variant here). */
5410 inst.reloc.type = (bfd_reloc_code_real_type) entry->alu_code;
5411 gas_assert (inst.reloc.type != 0);
5412
5413 return PARSE_OPERAND_SUCCESS;
5414 }
5415 else
5416 return parse_shifter_operand (str, i) == SUCCESS
5417 ? PARSE_OPERAND_SUCCESS : PARSE_OPERAND_FAIL;
5418
5419 /* Never reached. */
5420 }
5421
5422 /* Parse a Neon alignment expression. Information is written to
5423 inst.operands[i]. We assume the initial ':' has been skipped.
5424
5425 align .imm = align << 8, .immisalign=1, .preind=0 */
5426 static parse_operand_result
5427 parse_neon_alignment (char **str, int i)
5428 {
5429 char *p = *str;
5430 expressionS exp;
5431
5432 my_get_expression (&exp, &p, GE_NO_PREFIX);
5433
5434 if (exp.X_op != O_constant)
5435 {
5436 inst.error = _("alignment must be constant");
5437 return PARSE_OPERAND_FAIL;
5438 }
5439
5440 inst.operands[i].imm = exp.X_add_number << 8;
5441 inst.operands[i].immisalign = 1;
5442 /* Alignments are not pre-indexes. */
5443 inst.operands[i].preind = 0;
5444
5445 *str = p;
5446 return PARSE_OPERAND_SUCCESS;
5447 }
5448
5449 /* Parse all forms of an ARM address expression. Information is written
5450 to inst.operands[i] and/or inst.reloc.
5451
5452 Preindexed addressing (.preind=1):
5453
5454 [Rn, #offset] .reg=Rn .reloc.exp=offset
5455 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5456 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5457 .shift_kind=shift .reloc.exp=shift_imm
5458
5459 These three may have a trailing ! which causes .writeback to be set also.
5460
5461 Postindexed addressing (.postind=1, .writeback=1):
5462
5463 [Rn], #offset .reg=Rn .reloc.exp=offset
5464 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5465 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5466 .shift_kind=shift .reloc.exp=shift_imm
5467
5468 Unindexed addressing (.preind=0, .postind=0):
5469
5470 [Rn], {option} .reg=Rn .imm=option .immisreg=0
5471
5472 Other:
5473
5474 [Rn]{!} shorthand for [Rn,#0]{!}
5475 =immediate .isreg=0 .reloc.exp=immediate
5476 label .reg=PC .reloc.pc_rel=1 .reloc.exp=label
5477
5478 It is the caller's responsibility to check for addressing modes not
5479 supported by the instruction, and to set inst.reloc.type. */
5480
5481 static parse_operand_result
5482 parse_address_main (char **str, int i, int group_relocations,
5483 group_reloc_type group_type)
5484 {
5485 char *p = *str;
5486 int reg;
5487
5488 if (skip_past_char (&p, '[') == FAIL)
5489 {
5490 if (skip_past_char (&p, '=') == FAIL)
5491 {
5492 /* Bare address - translate to PC-relative offset. */
5493 inst.reloc.pc_rel = 1;
5494 inst.operands[i].reg = REG_PC;
5495 inst.operands[i].isreg = 1;
5496 inst.operands[i].preind = 1;
5497
5498 if (my_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX_BIG))
5499 return PARSE_OPERAND_FAIL;
5500 }
5501 else if (parse_big_immediate (&p, i, &inst.reloc.exp,
5502 /*allow_symbol_p=*/TRUE))
5503 return PARSE_OPERAND_FAIL;
5504
5505 *str = p;
5506 return PARSE_OPERAND_SUCCESS;
5507 }
5508
5509 /* PR gas/14887: Allow for whitespace after the opening bracket. */
5510 skip_whitespace (p);
5511
5512 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5513 {
5514 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
5515 return PARSE_OPERAND_FAIL;
5516 }
5517 inst.operands[i].reg = reg;
5518 inst.operands[i].isreg = 1;
5519
5520 if (skip_past_comma (&p) == SUCCESS)
5521 {
5522 inst.operands[i].preind = 1;
5523
5524 if (*p == '+') p++;
5525 else if (*p == '-') p++, inst.operands[i].negative = 1;
5526
5527 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5528 {
5529 inst.operands[i].imm = reg;
5530 inst.operands[i].immisreg = 1;
5531
5532 if (skip_past_comma (&p) == SUCCESS)
5533 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
5534 return PARSE_OPERAND_FAIL;
5535 }
5536 else if (skip_past_char (&p, ':') == SUCCESS)
5537 {
5538 /* FIXME: '@' should be used here, but it's filtered out by generic
5539 code before we get to see it here. This may be subject to
5540 change. */
5541 parse_operand_result result = parse_neon_alignment (&p, i);
5542
5543 if (result != PARSE_OPERAND_SUCCESS)
5544 return result;
5545 }
5546 else
5547 {
5548 if (inst.operands[i].negative)
5549 {
5550 inst.operands[i].negative = 0;
5551 p--;
5552 }
5553
5554 if (group_relocations
5555 && ((*p == '#' && *(p + 1) == ':') || *p == ':'))
5556 {
5557 struct group_reloc_table_entry *entry;
5558
5559 /* Skip over the #: or : sequence. */
5560 if (*p == '#')
5561 p += 2;
5562 else
5563 p++;
5564
5565 /* Try to parse a group relocation. Anything else is an
5566 error. */
5567 if (find_group_reloc_table_entry (&p, &entry) == FAIL)
5568 {
5569 inst.error = _("unknown group relocation");
5570 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5571 }
5572
5573 /* We now have the group relocation table entry corresponding to
5574 the name in the assembler source. Next, we parse the
5575 expression. */
5576 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
5577 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5578
5579 /* Record the relocation type. */
5580 switch (group_type)
5581 {
5582 case GROUP_LDR:
5583 inst.reloc.type = (bfd_reloc_code_real_type) entry->ldr_code;
5584 break;
5585
5586 case GROUP_LDRS:
5587 inst.reloc.type = (bfd_reloc_code_real_type) entry->ldrs_code;
5588 break;
5589
5590 case GROUP_LDC:
5591 inst.reloc.type = (bfd_reloc_code_real_type) entry->ldc_code;
5592 break;
5593
5594 default:
5595 gas_assert (0);
5596 }
5597
5598 if (inst.reloc.type == 0)
5599 {
5600 inst.error = _("this group relocation is not allowed on this instruction");
5601 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5602 }
5603 }
5604 else
5605 {
5606 char *q = p;
5607 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5608 return PARSE_OPERAND_FAIL;
5609 /* If the offset is 0, find out if it's a +0 or -0. */
5610 if (inst.reloc.exp.X_op == O_constant
5611 && inst.reloc.exp.X_add_number == 0)
5612 {
5613 skip_whitespace (q);
5614 if (*q == '#')
5615 {
5616 q++;
5617 skip_whitespace (q);
5618 }
5619 if (*q == '-')
5620 inst.operands[i].negative = 1;
5621 }
5622 }
5623 }
5624 }
5625 else if (skip_past_char (&p, ':') == SUCCESS)
5626 {
5627 /* FIXME: '@' should be used here, but it's filtered out by generic code
5628 before we get to see it here. This may be subject to change. */
5629 parse_operand_result result = parse_neon_alignment (&p, i);
5630
5631 if (result != PARSE_OPERAND_SUCCESS)
5632 return result;
5633 }
5634
5635 if (skip_past_char (&p, ']') == FAIL)
5636 {
5637 inst.error = _("']' expected");
5638 return PARSE_OPERAND_FAIL;
5639 }
5640
5641 if (skip_past_char (&p, '!') == SUCCESS)
5642 inst.operands[i].writeback = 1;
5643
5644 else if (skip_past_comma (&p) == SUCCESS)
5645 {
5646 if (skip_past_char (&p, '{') == SUCCESS)
5647 {
5648 /* [Rn], {expr} - unindexed, with option */
5649 if (parse_immediate (&p, &inst.operands[i].imm,
5650 0, 255, TRUE) == FAIL)
5651 return PARSE_OPERAND_FAIL;
5652
5653 if (skip_past_char (&p, '}') == FAIL)
5654 {
5655 inst.error = _("'}' expected at end of 'option' field");
5656 return PARSE_OPERAND_FAIL;
5657 }
5658 if (inst.operands[i].preind)
5659 {
5660 inst.error = _("cannot combine index with option");
5661 return PARSE_OPERAND_FAIL;
5662 }
5663 *str = p;
5664 return PARSE_OPERAND_SUCCESS;
5665 }
5666 else
5667 {
5668 inst.operands[i].postind = 1;
5669 inst.operands[i].writeback = 1;
5670
5671 if (inst.operands[i].preind)
5672 {
5673 inst.error = _("cannot combine pre- and post-indexing");
5674 return PARSE_OPERAND_FAIL;
5675 }
5676
5677 if (*p == '+') p++;
5678 else if (*p == '-') p++, inst.operands[i].negative = 1;
5679
5680 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5681 {
5682 /* We might be using the immediate for alignment already. If we
5683 are, OR the register number into the low-order bits. */
5684 if (inst.operands[i].immisalign)
5685 inst.operands[i].imm |= reg;
5686 else
5687 inst.operands[i].imm = reg;
5688 inst.operands[i].immisreg = 1;
5689
5690 if (skip_past_comma (&p) == SUCCESS)
5691 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
5692 return PARSE_OPERAND_FAIL;
5693 }
5694 else
5695 {
5696 char *q = p;
5697 if (inst.operands[i].negative)
5698 {
5699 inst.operands[i].negative = 0;
5700 p--;
5701 }
5702 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5703 return PARSE_OPERAND_FAIL;
5704 /* If the offset is 0, find out if it's a +0 or -0. */
5705 if (inst.reloc.exp.X_op == O_constant
5706 && inst.reloc.exp.X_add_number == 0)
5707 {
5708 skip_whitespace (q);
5709 if (*q == '#')
5710 {
5711 q++;
5712 skip_whitespace (q);
5713 }
5714 if (*q == '-')
5715 inst.operands[i].negative = 1;
5716 }
5717 }
5718 }
5719 }
5720
5721 /* If at this point neither .preind nor .postind is set, we have a
5722 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
5723 if (inst.operands[i].preind == 0 && inst.operands[i].postind == 0)
5724 {
5725 inst.operands[i].preind = 1;
5726 inst.reloc.exp.X_op = O_constant;
5727 inst.reloc.exp.X_add_number = 0;
5728 }
5729 *str = p;
5730 return PARSE_OPERAND_SUCCESS;
5731 }
5732
5733 static int
5734 parse_address (char **str, int i)
5735 {
5736 return parse_address_main (str, i, 0, GROUP_LDR) == PARSE_OPERAND_SUCCESS
5737 ? SUCCESS : FAIL;
5738 }
5739
5740 static parse_operand_result
5741 parse_address_group_reloc (char **str, int i, group_reloc_type type)
5742 {
5743 return parse_address_main (str, i, 1, type);
5744 }
5745
5746 /* Parse an operand for a MOVW or MOVT instruction. */
5747 static int
5748 parse_half (char **str)
5749 {
5750 char * p;
5751
5752 p = *str;
5753 skip_past_char (&p, '#');
5754 if (strncasecmp (p, ":lower16:", 9) == 0)
5755 inst.reloc.type = BFD_RELOC_ARM_MOVW;
5756 else if (strncasecmp (p, ":upper16:", 9) == 0)
5757 inst.reloc.type = BFD_RELOC_ARM_MOVT;
5758
5759 if (inst.reloc.type != BFD_RELOC_UNUSED)
5760 {
5761 p += 9;
5762 skip_whitespace (p);
5763 }
5764
5765 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
5766 return FAIL;
5767
5768 if (inst.reloc.type == BFD_RELOC_UNUSED)
5769 {
5770 if (inst.reloc.exp.X_op != O_constant)
5771 {
5772 inst.error = _("constant expression expected");
5773 return FAIL;
5774 }
5775 if (inst.reloc.exp.X_add_number < 0
5776 || inst.reloc.exp.X_add_number > 0xffff)
5777 {
5778 inst.error = _("immediate value out of range");
5779 return FAIL;
5780 }
5781 }
5782 *str = p;
5783 return SUCCESS;
5784 }
5785
5786 /* Miscellaneous. */
5787
5788 /* Parse a PSR flag operand. The value returned is FAIL on syntax error,
5789 or a bitmask suitable to be or-ed into the ARM msr instruction. */
5790 static int
5791 parse_psr (char **str, bfd_boolean lhs)
5792 {
5793 char *p;
5794 unsigned long psr_field;
5795 const struct asm_psr *psr;
5796 char *start;
5797 bfd_boolean is_apsr = FALSE;
5798 bfd_boolean m_profile = ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m);
5799
5800 /* PR gas/12698: If the user has specified -march=all then m_profile will
5801 be TRUE, but we want to ignore it in this case as we are building for any
5802 CPU type, including non-m variants. */
5803 if (ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any))
5804 m_profile = FALSE;
5805
5806 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
5807 feature for ease of use and backwards compatibility. */
5808 p = *str;
5809 if (strncasecmp (p, "SPSR", 4) == 0)
5810 {
5811 if (m_profile)
5812 goto unsupported_psr;
5813
5814 psr_field = SPSR_BIT;
5815 }
5816 else if (strncasecmp (p, "CPSR", 4) == 0)
5817 {
5818 if (m_profile)
5819 goto unsupported_psr;
5820
5821 psr_field = 0;
5822 }
5823 else if (strncasecmp (p, "APSR", 4) == 0)
5824 {
5825 /* APSR[_<bits>] can be used as a synonym for CPSR[_<flags>] on ARMv7-A
5826 and ARMv7-R architecture CPUs. */
5827 is_apsr = TRUE;
5828 psr_field = 0;
5829 }
5830 else if (m_profile)
5831 {
5832 start = p;
5833 do
5834 p++;
5835 while (ISALNUM (*p) || *p == '_');
5836
5837 if (strncasecmp (start, "iapsr", 5) == 0
5838 || strncasecmp (start, "eapsr", 5) == 0
5839 || strncasecmp (start, "xpsr", 4) == 0
5840 || strncasecmp (start, "psr", 3) == 0)
5841 p = start + strcspn (start, "rR") + 1;
5842
5843 psr = (const struct asm_psr *) hash_find_n (arm_v7m_psr_hsh, start,
5844 p - start);
5845
5846 if (!psr)
5847 return FAIL;
5848
5849 /* If APSR is being written, a bitfield may be specified. Note that
5850 APSR itself is handled above. */
5851 if (psr->field <= 3)
5852 {
5853 psr_field = psr->field;
5854 is_apsr = TRUE;
5855 goto check_suffix;
5856 }
5857
5858 *str = p;
5859 /* M-profile MSR instructions have the mask field set to "10", except
5860 *PSR variants which modify APSR, which may use a different mask (and
5861 have been handled already). Do that by setting the PSR_f field
5862 here. */
5863 return psr->field | (lhs ? PSR_f : 0);
5864 }
5865 else
5866 goto unsupported_psr;
5867
5868 p += 4;
5869 check_suffix:
5870 if (*p == '_')
5871 {
5872 /* A suffix follows. */
5873 p++;
5874 start = p;
5875
5876 do
5877 p++;
5878 while (ISALNUM (*p) || *p == '_');
5879
5880 if (is_apsr)
5881 {
5882 /* APSR uses a notation for bits, rather than fields. */
5883 unsigned int nzcvq_bits = 0;
5884 unsigned int g_bit = 0;
5885 char *bit;
5886
5887 for (bit = start; bit != p; bit++)
5888 {
5889 switch (TOLOWER (*bit))
5890 {
5891 case 'n':
5892 nzcvq_bits |= (nzcvq_bits & 0x01) ? 0x20 : 0x01;
5893 break;
5894
5895 case 'z':
5896 nzcvq_bits |= (nzcvq_bits & 0x02) ? 0x20 : 0x02;
5897 break;
5898
5899 case 'c':
5900 nzcvq_bits |= (nzcvq_bits & 0x04) ? 0x20 : 0x04;
5901 break;
5902
5903 case 'v':
5904 nzcvq_bits |= (nzcvq_bits & 0x08) ? 0x20 : 0x08;
5905 break;
5906
5907 case 'q':
5908 nzcvq_bits |= (nzcvq_bits & 0x10) ? 0x20 : 0x10;
5909 break;
5910
5911 case 'g':
5912 g_bit |= (g_bit & 0x1) ? 0x2 : 0x1;
5913 break;
5914
5915 default:
5916 inst.error = _("unexpected bit specified after APSR");
5917 return FAIL;
5918 }
5919 }
5920
5921 if (nzcvq_bits == 0x1f)
5922 psr_field |= PSR_f;
5923
5924 if (g_bit == 0x1)
5925 {
5926 if (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp))
5927 {
5928 inst.error = _("selected processor does not "
5929 "support DSP extension");
5930 return FAIL;
5931 }
5932
5933 psr_field |= PSR_s;
5934 }
5935
5936 if ((nzcvq_bits & 0x20) != 0
5937 || (nzcvq_bits != 0x1f && nzcvq_bits != 0)
5938 || (g_bit & 0x2) != 0)
5939 {
5940 inst.error = _("bad bitmask specified after APSR");
5941 return FAIL;
5942 }
5943 }
5944 else
5945 {
5946 psr = (const struct asm_psr *) hash_find_n (arm_psr_hsh, start,
5947 p - start);
5948 if (!psr)
5949 goto error;
5950
5951 psr_field |= psr->field;
5952 }
5953 }
5954 else
5955 {
5956 if (ISALNUM (*p))
5957 goto error; /* Garbage after "[CS]PSR". */
5958
5959 /* Unadorned APSR is equivalent to APSR_nzcvq/CPSR_f (for writes). This
5960 is deprecated, but allow it anyway. */
5961 if (is_apsr && lhs)
5962 {
5963 psr_field |= PSR_f;
5964 as_tsktsk (_("writing to APSR without specifying a bitmask is "
5965 "deprecated"));
5966 }
5967 else if (!m_profile)
5968 /* These bits are never right for M-profile devices: don't set them
5969 (only code paths which read/write APSR reach here). */
5970 psr_field |= (PSR_c | PSR_f);
5971 }
5972 *str = p;
5973 return psr_field;
5974
5975 unsupported_psr:
5976 inst.error = _("selected processor does not support requested special "
5977 "purpose register");
5978 return FAIL;
5979
5980 error:
5981 inst.error = _("flag for {c}psr instruction expected");
5982 return FAIL;
5983 }
5984
5985 /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
5986 value suitable for splatting into the AIF field of the instruction. */
5987
5988 static int
5989 parse_cps_flags (char **str)
5990 {
5991 int val = 0;
5992 int saw_a_flag = 0;
5993 char *s = *str;
5994
5995 for (;;)
5996 switch (*s++)
5997 {
5998 case '\0': case ',':
5999 goto done;
6000
6001 case 'a': case 'A': saw_a_flag = 1; val |= 0x4; break;
6002 case 'i': case 'I': saw_a_flag = 1; val |= 0x2; break;
6003 case 'f': case 'F': saw_a_flag = 1; val |= 0x1; break;
6004
6005 default:
6006 inst.error = _("unrecognized CPS flag");
6007 return FAIL;
6008 }
6009
6010 done:
6011 if (saw_a_flag == 0)
6012 {
6013 inst.error = _("missing CPS flags");
6014 return FAIL;
6015 }
6016
6017 *str = s - 1;
6018 return val;
6019 }
6020
6021 /* Parse an endian specifier ("BE" or "LE", case insensitive);
6022 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
6023
6024 static int
6025 parse_endian_specifier (char **str)
6026 {
6027 int little_endian;
6028 char *s = *str;
6029
6030 if (strncasecmp (s, "BE", 2))
6031 little_endian = 0;
6032 else if (strncasecmp (s, "LE", 2))
6033 little_endian = 1;
6034 else
6035 {
6036 inst.error = _("valid endian specifiers are be or le");
6037 return FAIL;
6038 }
6039
6040 if (ISALNUM (s[2]) || s[2] == '_')
6041 {
6042 inst.error = _("valid endian specifiers are be or le");
6043 return FAIL;
6044 }
6045
6046 *str = s + 2;
6047 return little_endian;
6048 }
6049
6050 /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
6051 value suitable for poking into the rotate field of an sxt or sxta
6052 instruction, or FAIL on error. */
6053
6054 static int
6055 parse_ror (char **str)
6056 {
6057 int rot;
6058 char *s = *str;
6059
6060 if (strncasecmp (s, "ROR", 3) == 0)
6061 s += 3;
6062 else
6063 {
6064 inst.error = _("missing rotation field after comma");
6065 return FAIL;
6066 }
6067
6068 if (parse_immediate (&s, &rot, 0, 24, FALSE) == FAIL)
6069 return FAIL;
6070
6071 switch (rot)
6072 {
6073 case 0: *str = s; return 0x0;
6074 case 8: *str = s; return 0x1;
6075 case 16: *str = s; return 0x2;
6076 case 24: *str = s; return 0x3;
6077
6078 default:
6079 inst.error = _("rotation can only be 0, 8, 16, or 24");
6080 return FAIL;
6081 }
6082 }
6083
6084 /* Parse a conditional code (from conds[] below). The value returned is in the
6085 range 0 .. 14, or FAIL. */
6086 static int
6087 parse_cond (char **str)
6088 {
6089 char *q;
6090 const struct asm_cond *c;
6091 int n;
6092 /* Condition codes are always 2 characters, so matching up to
6093 3 characters is sufficient. */
6094 char cond[3];
6095
6096 q = *str;
6097 n = 0;
6098 while (ISALPHA (*q) && n < 3)
6099 {
6100 cond[n] = TOLOWER (*q);
6101 q++;
6102 n++;
6103 }
6104
6105 c = (const struct asm_cond *) hash_find_n (arm_cond_hsh, cond, n);
6106 if (!c)
6107 {
6108 inst.error = _("condition required");
6109 return FAIL;
6110 }
6111
6112 *str = q;
6113 return c->value;
6114 }
6115
6116 /* Record a use of the given feature. */
6117 static void
6118 record_feature_use (const arm_feature_set *feature)
6119 {
6120 if (thumb_mode)
6121 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, *feature);
6122 else
6123 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, *feature);
6124 }
6125
6126 /* If the given feature available in the selected CPU, mark it as used.
6127 Returns TRUE iff feature is available. */
6128 static bfd_boolean
6129 mark_feature_used (const arm_feature_set *feature)
6130 {
6131 /* Ensure the option is valid on the current architecture. */
6132 if (!ARM_CPU_HAS_FEATURE (cpu_variant, *feature))
6133 return FALSE;
6134
6135 /* Add the appropriate architecture feature for the barrier option used.
6136 */
6137 record_feature_use (feature);
6138
6139 return TRUE;
6140 }
6141
6142 /* Parse an option for a barrier instruction. Returns the encoding for the
6143 option, or FAIL. */
6144 static int
6145 parse_barrier (char **str)
6146 {
6147 char *p, *q;
6148 const struct asm_barrier_opt *o;
6149
6150 p = q = *str;
6151 while (ISALPHA (*q))
6152 q++;
6153
6154 o = (const struct asm_barrier_opt *) hash_find_n (arm_barrier_opt_hsh, p,
6155 q - p);
6156 if (!o)
6157 return FAIL;
6158
6159 if (!mark_feature_used (&o->arch))
6160 return FAIL;
6161
6162 *str = q;
6163 return o->value;
6164 }
6165
6166 /* Parse the operands of a table branch instruction. Similar to a memory
6167 operand. */
6168 static int
6169 parse_tb (char **str)
6170 {
6171 char * p = *str;
6172 int reg;
6173
6174 if (skip_past_char (&p, '[') == FAIL)
6175 {
6176 inst.error = _("'[' expected");
6177 return FAIL;
6178 }
6179
6180 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
6181 {
6182 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
6183 return FAIL;
6184 }
6185 inst.operands[0].reg = reg;
6186
6187 if (skip_past_comma (&p) == FAIL)
6188 {
6189 inst.error = _("',' expected");
6190 return FAIL;
6191 }
6192
6193 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
6194 {
6195 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
6196 return FAIL;
6197 }
6198 inst.operands[0].imm = reg;
6199
6200 if (skip_past_comma (&p) == SUCCESS)
6201 {
6202 if (parse_shift (&p, 0, SHIFT_LSL_IMMEDIATE) == FAIL)
6203 return FAIL;
6204 if (inst.reloc.exp.X_add_number != 1)
6205 {
6206 inst.error = _("invalid shift");
6207 return FAIL;
6208 }
6209 inst.operands[0].shifted = 1;
6210 }
6211
6212 if (skip_past_char (&p, ']') == FAIL)
6213 {
6214 inst.error = _("']' expected");
6215 return FAIL;
6216 }
6217 *str = p;
6218 return SUCCESS;
6219 }
6220
6221 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
6222 information on the types the operands can take and how they are encoded.
6223 Up to four operands may be read; this function handles setting the
6224 ".present" field for each read operand itself.
6225 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
6226 else returns FAIL. */
6227
6228 static int
6229 parse_neon_mov (char **str, int *which_operand)
6230 {
6231 int i = *which_operand, val;
6232 enum arm_reg_type rtype;
6233 char *ptr = *str;
6234 struct neon_type_el optype;
6235
6236 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
6237 {
6238 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
6239 inst.operands[i].reg = val;
6240 inst.operands[i].isscalar = 1;
6241 inst.operands[i].vectype = optype;
6242 inst.operands[i++].present = 1;
6243
6244 if (skip_past_comma (&ptr) == FAIL)
6245 goto wanted_comma;
6246
6247 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6248 goto wanted_arm;
6249
6250 inst.operands[i].reg = val;
6251 inst.operands[i].isreg = 1;
6252 inst.operands[i].present = 1;
6253 }
6254 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype, &optype))
6255 != FAIL)
6256 {
6257 /* Cases 0, 1, 2, 3, 5 (D only). */
6258 if (skip_past_comma (&ptr) == FAIL)
6259 goto wanted_comma;
6260
6261 inst.operands[i].reg = val;
6262 inst.operands[i].isreg = 1;
6263 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
6264 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6265 inst.operands[i].isvec = 1;
6266 inst.operands[i].vectype = optype;
6267 inst.operands[i++].present = 1;
6268
6269 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6270 {
6271 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
6272 Case 13: VMOV <Sd>, <Rm> */
6273 inst.operands[i].reg = val;
6274 inst.operands[i].isreg = 1;
6275 inst.operands[i].present = 1;
6276
6277 if (rtype == REG_TYPE_NQ)
6278 {
6279 first_error (_("can't use Neon quad register here"));
6280 return FAIL;
6281 }
6282 else if (rtype != REG_TYPE_VFS)
6283 {
6284 i++;
6285 if (skip_past_comma (&ptr) == FAIL)
6286 goto wanted_comma;
6287 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6288 goto wanted_arm;
6289 inst.operands[i].reg = val;
6290 inst.operands[i].isreg = 1;
6291 inst.operands[i].present = 1;
6292 }
6293 }
6294 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype,
6295 &optype)) != FAIL)
6296 {
6297 /* Case 0: VMOV<c><q> <Qd>, <Qm>
6298 Case 1: VMOV<c><q> <Dd>, <Dm>
6299 Case 8: VMOV.F32 <Sd>, <Sm>
6300 Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm> */
6301
6302 inst.operands[i].reg = val;
6303 inst.operands[i].isreg = 1;
6304 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
6305 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6306 inst.operands[i].isvec = 1;
6307 inst.operands[i].vectype = optype;
6308 inst.operands[i].present = 1;
6309
6310 if (skip_past_comma (&ptr) == SUCCESS)
6311 {
6312 /* Case 15. */
6313 i++;
6314
6315 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6316 goto wanted_arm;
6317
6318 inst.operands[i].reg = val;
6319 inst.operands[i].isreg = 1;
6320 inst.operands[i++].present = 1;
6321
6322 if (skip_past_comma (&ptr) == FAIL)
6323 goto wanted_comma;
6324
6325 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6326 goto wanted_arm;
6327
6328 inst.operands[i].reg = val;
6329 inst.operands[i].isreg = 1;
6330 inst.operands[i].present = 1;
6331 }
6332 }
6333 else if (parse_qfloat_immediate (&ptr, &inst.operands[i].imm) == SUCCESS)
6334 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
6335 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
6336 Case 10: VMOV.F32 <Sd>, #<imm>
6337 Case 11: VMOV.F64 <Dd>, #<imm> */
6338 inst.operands[i].immisfloat = 1;
6339 else if (parse_big_immediate (&ptr, i, NULL, /*allow_symbol_p=*/FALSE)
6340 == SUCCESS)
6341 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
6342 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
6343 ;
6344 else
6345 {
6346 first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
6347 return FAIL;
6348 }
6349 }
6350 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6351 {
6352 /* Cases 6, 7. */
6353 inst.operands[i].reg = val;
6354 inst.operands[i].isreg = 1;
6355 inst.operands[i++].present = 1;
6356
6357 if (skip_past_comma (&ptr) == FAIL)
6358 goto wanted_comma;
6359
6360 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
6361 {
6362 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
6363 inst.operands[i].reg = val;
6364 inst.operands[i].isscalar = 1;
6365 inst.operands[i].present = 1;
6366 inst.operands[i].vectype = optype;
6367 }
6368 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6369 {
6370 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
6371 inst.operands[i].reg = val;
6372 inst.operands[i].isreg = 1;
6373 inst.operands[i++].present = 1;
6374
6375 if (skip_past_comma (&ptr) == FAIL)
6376 goto wanted_comma;
6377
6378 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFSD, &rtype, &optype))
6379 == FAIL)
6380 {
6381 first_error (_(reg_expected_msgs[REG_TYPE_VFSD]));
6382 return FAIL;
6383 }
6384
6385 inst.operands[i].reg = val;
6386 inst.operands[i].isreg = 1;
6387 inst.operands[i].isvec = 1;
6388 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6389 inst.operands[i].vectype = optype;
6390 inst.operands[i].present = 1;
6391
6392 if (rtype == REG_TYPE_VFS)
6393 {
6394 /* Case 14. */
6395 i++;
6396 if (skip_past_comma (&ptr) == FAIL)
6397 goto wanted_comma;
6398 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL,
6399 &optype)) == FAIL)
6400 {
6401 first_error (_(reg_expected_msgs[REG_TYPE_VFS]));
6402 return FAIL;
6403 }
6404 inst.operands[i].reg = val;
6405 inst.operands[i].isreg = 1;
6406 inst.operands[i].isvec = 1;
6407 inst.operands[i].issingle = 1;
6408 inst.operands[i].vectype = optype;
6409 inst.operands[i].present = 1;
6410 }
6411 }
6412 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL, &optype))
6413 != FAIL)
6414 {
6415 /* Case 13. */
6416 inst.operands[i].reg = val;
6417 inst.operands[i].isreg = 1;
6418 inst.operands[i].isvec = 1;
6419 inst.operands[i].issingle = 1;
6420 inst.operands[i].vectype = optype;
6421 inst.operands[i].present = 1;
6422 }
6423 }
6424 else
6425 {
6426 first_error (_("parse error"));
6427 return FAIL;
6428 }
6429
6430 /* Successfully parsed the operands. Update args. */
6431 *which_operand = i;
6432 *str = ptr;
6433 return SUCCESS;
6434
6435 wanted_comma:
6436 first_error (_("expected comma"));
6437 return FAIL;
6438
6439 wanted_arm:
6440 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
6441 return FAIL;
6442 }
6443
6444 /* Use this macro when the operand constraints are different
6445 for ARM and THUMB (e.g. ldrd). */
6446 #define MIX_ARM_THUMB_OPERANDS(arm_operand, thumb_operand) \
6447 ((arm_operand) | ((thumb_operand) << 16))
6448
6449 /* Matcher codes for parse_operands. */
6450 enum operand_parse_code
6451 {
6452 OP_stop, /* end of line */
6453
6454 OP_RR, /* ARM register */
6455 OP_RRnpc, /* ARM register, not r15 */
6456 OP_RRnpcsp, /* ARM register, neither r15 nor r13 (a.k.a. 'BadReg') */
6457 OP_RRnpcb, /* ARM register, not r15, in square brackets */
6458 OP_RRnpctw, /* ARM register, not r15 in Thumb-state or with writeback,
6459 optional trailing ! */
6460 OP_RRw, /* ARM register, not r15, optional trailing ! */
6461 OP_RCP, /* Coprocessor number */
6462 OP_RCN, /* Coprocessor register */
6463 OP_RF, /* FPA register */
6464 OP_RVS, /* VFP single precision register */
6465 OP_RVD, /* VFP double precision register (0..15) */
6466 OP_RND, /* Neon double precision register (0..31) */
6467 OP_RNQ, /* Neon quad precision register */
6468 OP_RVSD, /* VFP single or double precision register */
6469 OP_RNDQ, /* Neon double or quad precision register */
6470 OP_RNSDQ, /* Neon single, double or quad precision register */
6471 OP_RNSC, /* Neon scalar D[X] */
6472 OP_RVC, /* VFP control register */
6473 OP_RMF, /* Maverick F register */
6474 OP_RMD, /* Maverick D register */
6475 OP_RMFX, /* Maverick FX register */
6476 OP_RMDX, /* Maverick DX register */
6477 OP_RMAX, /* Maverick AX register */
6478 OP_RMDS, /* Maverick DSPSC register */
6479 OP_RIWR, /* iWMMXt wR register */
6480 OP_RIWC, /* iWMMXt wC register */
6481 OP_RIWG, /* iWMMXt wCG register */
6482 OP_RXA, /* XScale accumulator register */
6483
6484 OP_REGLST, /* ARM register list */
6485 OP_VRSLST, /* VFP single-precision register list */
6486 OP_VRDLST, /* VFP double-precision register list */
6487 OP_VRSDLST, /* VFP single or double-precision register list (& quad) */
6488 OP_NRDLST, /* Neon double-precision register list (d0-d31, qN aliases) */
6489 OP_NSTRLST, /* Neon element/structure list */
6490
6491 OP_RNDQ_I0, /* Neon D or Q reg, or immediate zero. */
6492 OP_RVSD_I0, /* VFP S or D reg, or immediate zero. */
6493 OP_RSVD_FI0, /* VFP S or D reg, or floating point immediate zero. */
6494 OP_RR_RNSC, /* ARM reg or Neon scalar. */
6495 OP_RNSDQ_RNSC, /* Vector S, D or Q reg, or Neon scalar. */
6496 OP_RNDQ_RNSC, /* Neon D or Q reg, or Neon scalar. */
6497 OP_RND_RNSC, /* Neon D reg, or Neon scalar. */
6498 OP_VMOV, /* Neon VMOV operands. */
6499 OP_RNDQ_Ibig, /* Neon D or Q reg, or big immediate for logic and VMVN. */
6500 OP_RNDQ_I63b, /* Neon D or Q reg, or immediate for shift. */
6501 OP_RIWR_I32z, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2. */
6502
6503 OP_I0, /* immediate zero */
6504 OP_I7, /* immediate value 0 .. 7 */
6505 OP_I15, /* 0 .. 15 */
6506 OP_I16, /* 1 .. 16 */
6507 OP_I16z, /* 0 .. 16 */
6508 OP_I31, /* 0 .. 31 */
6509 OP_I31w, /* 0 .. 31, optional trailing ! */
6510 OP_I32, /* 1 .. 32 */
6511 OP_I32z, /* 0 .. 32 */
6512 OP_I63, /* 0 .. 63 */
6513 OP_I63s, /* -64 .. 63 */
6514 OP_I64, /* 1 .. 64 */
6515 OP_I64z, /* 0 .. 64 */
6516 OP_I255, /* 0 .. 255 */
6517
6518 OP_I4b, /* immediate, prefix optional, 1 .. 4 */
6519 OP_I7b, /* 0 .. 7 */
6520 OP_I15b, /* 0 .. 15 */
6521 OP_I31b, /* 0 .. 31 */
6522
6523 OP_SH, /* shifter operand */
6524 OP_SHG, /* shifter operand with possible group relocation */
6525 OP_ADDR, /* Memory address expression (any mode) */
6526 OP_ADDRGLDR, /* Mem addr expr (any mode) with possible LDR group reloc */
6527 OP_ADDRGLDRS, /* Mem addr expr (any mode) with possible LDRS group reloc */
6528 OP_ADDRGLDC, /* Mem addr expr (any mode) with possible LDC group reloc */
6529 OP_EXP, /* arbitrary expression */
6530 OP_EXPi, /* same, with optional immediate prefix */
6531 OP_EXPr, /* same, with optional relocation suffix */
6532 OP_HALF, /* 0 .. 65535 or low/high reloc. */
6533
6534 OP_CPSF, /* CPS flags */
6535 OP_ENDI, /* Endianness specifier */
6536 OP_wPSR, /* CPSR/SPSR/APSR mask for msr (writing). */
6537 OP_rPSR, /* CPSR/SPSR/APSR mask for msr (reading). */
6538 OP_COND, /* conditional code */
6539 OP_TB, /* Table branch. */
6540
6541 OP_APSR_RR, /* ARM register or "APSR_nzcv". */
6542
6543 OP_RRnpc_I0, /* ARM register or literal 0 */
6544 OP_RR_EXr, /* ARM register or expression with opt. reloc suff. */
6545 OP_RR_EXi, /* ARM register or expression with imm prefix */
6546 OP_RF_IF, /* FPA register or immediate */
6547 OP_RIWR_RIWC, /* iWMMXt R or C reg */
6548 OP_RIWC_RIWG, /* iWMMXt wC or wCG reg */
6549
6550 /* Optional operands. */
6551 OP_oI7b, /* immediate, prefix optional, 0 .. 7 */
6552 OP_oI31b, /* 0 .. 31 */
6553 OP_oI32b, /* 1 .. 32 */
6554 OP_oI32z, /* 0 .. 32 */
6555 OP_oIffffb, /* 0 .. 65535 */
6556 OP_oI255c, /* curly-brace enclosed, 0 .. 255 */
6557
6558 OP_oRR, /* ARM register */
6559 OP_oRRnpc, /* ARM register, not the PC */
6560 OP_oRRnpcsp, /* ARM register, neither the PC nor the SP (a.k.a. BadReg) */
6561 OP_oRRw, /* ARM register, not r15, optional trailing ! */
6562 OP_oRND, /* Optional Neon double precision register */
6563 OP_oRNQ, /* Optional Neon quad precision register */
6564 OP_oRNDQ, /* Optional Neon double or quad precision register */
6565 OP_oRNSDQ, /* Optional single, double or quad precision vector register */
6566 OP_oSHll, /* LSL immediate */
6567 OP_oSHar, /* ASR immediate */
6568 OP_oSHllar, /* LSL or ASR immediate */
6569 OP_oROR, /* ROR 0/8/16/24 */
6570 OP_oBARRIER_I15, /* Option argument for a barrier instruction. */
6571
6572 /* Some pre-defined mixed (ARM/THUMB) operands. */
6573 OP_RR_npcsp = MIX_ARM_THUMB_OPERANDS (OP_RR, OP_RRnpcsp),
6574 OP_RRnpc_npcsp = MIX_ARM_THUMB_OPERANDS (OP_RRnpc, OP_RRnpcsp),
6575 OP_oRRnpc_npcsp = MIX_ARM_THUMB_OPERANDS (OP_oRRnpc, OP_oRRnpcsp),
6576
6577 OP_FIRST_OPTIONAL = OP_oI7b
6578 };
6579
6580 /* Generic instruction operand parser. This does no encoding and no
6581 semantic validation; it merely squirrels values away in the inst
6582 structure. Returns SUCCESS or FAIL depending on whether the
6583 specified grammar matched. */
6584 static int
6585 parse_operands (char *str, const unsigned int *pattern, bfd_boolean thumb)
6586 {
6587 unsigned const int *upat = pattern;
6588 char *backtrack_pos = 0;
6589 const char *backtrack_error = 0;
6590 int i, val = 0, backtrack_index = 0;
6591 enum arm_reg_type rtype;
6592 parse_operand_result result;
6593 unsigned int op_parse_code;
6594
6595 #define po_char_or_fail(chr) \
6596 do \
6597 { \
6598 if (skip_past_char (&str, chr) == FAIL) \
6599 goto bad_args; \
6600 } \
6601 while (0)
6602
6603 #define po_reg_or_fail(regtype) \
6604 do \
6605 { \
6606 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6607 & inst.operands[i].vectype); \
6608 if (val == FAIL) \
6609 { \
6610 first_error (_(reg_expected_msgs[regtype])); \
6611 goto failure; \
6612 } \
6613 inst.operands[i].reg = val; \
6614 inst.operands[i].isreg = 1; \
6615 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6616 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6617 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6618 || rtype == REG_TYPE_VFD \
6619 || rtype == REG_TYPE_NQ); \
6620 } \
6621 while (0)
6622
6623 #define po_reg_or_goto(regtype, label) \
6624 do \
6625 { \
6626 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6627 & inst.operands[i].vectype); \
6628 if (val == FAIL) \
6629 goto label; \
6630 \
6631 inst.operands[i].reg = val; \
6632 inst.operands[i].isreg = 1; \
6633 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6634 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6635 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6636 || rtype == REG_TYPE_VFD \
6637 || rtype == REG_TYPE_NQ); \
6638 } \
6639 while (0)
6640
6641 #define po_imm_or_fail(min, max, popt) \
6642 do \
6643 { \
6644 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
6645 goto failure; \
6646 inst.operands[i].imm = val; \
6647 } \
6648 while (0)
6649
6650 #define po_scalar_or_goto(elsz, label) \
6651 do \
6652 { \
6653 val = parse_scalar (& str, elsz, & inst.operands[i].vectype); \
6654 if (val == FAIL) \
6655 goto label; \
6656 inst.operands[i].reg = val; \
6657 inst.operands[i].isscalar = 1; \
6658 } \
6659 while (0)
6660
6661 #define po_misc_or_fail(expr) \
6662 do \
6663 { \
6664 if (expr) \
6665 goto failure; \
6666 } \
6667 while (0)
6668
6669 #define po_misc_or_fail_no_backtrack(expr) \
6670 do \
6671 { \
6672 result = expr; \
6673 if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK) \
6674 backtrack_pos = 0; \
6675 if (result != PARSE_OPERAND_SUCCESS) \
6676 goto failure; \
6677 } \
6678 while (0)
6679
6680 #define po_barrier_or_imm(str) \
6681 do \
6682 { \
6683 val = parse_barrier (&str); \
6684 if (val == FAIL && ! ISALPHA (*str)) \
6685 goto immediate; \
6686 if (val == FAIL \
6687 /* ISB can only take SY as an option. */ \
6688 || ((inst.instruction & 0xf0) == 0x60 \
6689 && val != 0xf)) \
6690 { \
6691 inst.error = _("invalid barrier type"); \
6692 backtrack_pos = 0; \
6693 goto failure; \
6694 } \
6695 } \
6696 while (0)
6697
6698 skip_whitespace (str);
6699
6700 for (i = 0; upat[i] != OP_stop; i++)
6701 {
6702 op_parse_code = upat[i];
6703 if (op_parse_code >= 1<<16)
6704 op_parse_code = thumb ? (op_parse_code >> 16)
6705 : (op_parse_code & ((1<<16)-1));
6706
6707 if (op_parse_code >= OP_FIRST_OPTIONAL)
6708 {
6709 /* Remember where we are in case we need to backtrack. */
6710 gas_assert (!backtrack_pos);
6711 backtrack_pos = str;
6712 backtrack_error = inst.error;
6713 backtrack_index = i;
6714 }
6715
6716 if (i > 0 && (i > 1 || inst.operands[0].present))
6717 po_char_or_fail (',');
6718
6719 switch (op_parse_code)
6720 {
6721 /* Registers */
6722 case OP_oRRnpc:
6723 case OP_oRRnpcsp:
6724 case OP_RRnpc:
6725 case OP_RRnpcsp:
6726 case OP_oRR:
6727 case OP_RR: po_reg_or_fail (REG_TYPE_RN); break;
6728 case OP_RCP: po_reg_or_fail (REG_TYPE_CP); break;
6729 case OP_RCN: po_reg_or_fail (REG_TYPE_CN); break;
6730 case OP_RF: po_reg_or_fail (REG_TYPE_FN); break;
6731 case OP_RVS: po_reg_or_fail (REG_TYPE_VFS); break;
6732 case OP_RVD: po_reg_or_fail (REG_TYPE_VFD); break;
6733 case OP_oRND:
6734 case OP_RND: po_reg_or_fail (REG_TYPE_VFD); break;
6735 case OP_RVC:
6736 po_reg_or_goto (REG_TYPE_VFC, coproc_reg);
6737 break;
6738 /* Also accept generic coprocessor regs for unknown registers. */
6739 coproc_reg:
6740 po_reg_or_fail (REG_TYPE_CN);
6741 break;
6742 case OP_RMF: po_reg_or_fail (REG_TYPE_MVF); break;
6743 case OP_RMD: po_reg_or_fail (REG_TYPE_MVD); break;
6744 case OP_RMFX: po_reg_or_fail (REG_TYPE_MVFX); break;
6745 case OP_RMDX: po_reg_or_fail (REG_TYPE_MVDX); break;
6746 case OP_RMAX: po_reg_or_fail (REG_TYPE_MVAX); break;
6747 case OP_RMDS: po_reg_or_fail (REG_TYPE_DSPSC); break;
6748 case OP_RIWR: po_reg_or_fail (REG_TYPE_MMXWR); break;
6749 case OP_RIWC: po_reg_or_fail (REG_TYPE_MMXWC); break;
6750 case OP_RIWG: po_reg_or_fail (REG_TYPE_MMXWCG); break;
6751 case OP_RXA: po_reg_or_fail (REG_TYPE_XSCALE); break;
6752 case OP_oRNQ:
6753 case OP_RNQ: po_reg_or_fail (REG_TYPE_NQ); break;
6754 case OP_oRNDQ:
6755 case OP_RNDQ: po_reg_or_fail (REG_TYPE_NDQ); break;
6756 case OP_RVSD: po_reg_or_fail (REG_TYPE_VFSD); break;
6757 case OP_oRNSDQ:
6758 case OP_RNSDQ: po_reg_or_fail (REG_TYPE_NSDQ); break;
6759
6760 /* Neon scalar. Using an element size of 8 means that some invalid
6761 scalars are accepted here, so deal with those in later code. */
6762 case OP_RNSC: po_scalar_or_goto (8, failure); break;
6763
6764 case OP_RNDQ_I0:
6765 {
6766 po_reg_or_goto (REG_TYPE_NDQ, try_imm0);
6767 break;
6768 try_imm0:
6769 po_imm_or_fail (0, 0, TRUE);
6770 }
6771 break;
6772
6773 case OP_RVSD_I0:
6774 po_reg_or_goto (REG_TYPE_VFSD, try_imm0);
6775 break;
6776
6777 case OP_RSVD_FI0:
6778 {
6779 po_reg_or_goto (REG_TYPE_VFSD, try_ifimm0);
6780 break;
6781 try_ifimm0:
6782 if (parse_ifimm_zero (&str))
6783 inst.operands[i].imm = 0;
6784 else
6785 {
6786 inst.error
6787 = _("only floating point zero is allowed as immediate value");
6788 goto failure;
6789 }
6790 }
6791 break;
6792
6793 case OP_RR_RNSC:
6794 {
6795 po_scalar_or_goto (8, try_rr);
6796 break;
6797 try_rr:
6798 po_reg_or_fail (REG_TYPE_RN);
6799 }
6800 break;
6801
6802 case OP_RNSDQ_RNSC:
6803 {
6804 po_scalar_or_goto (8, try_nsdq);
6805 break;
6806 try_nsdq:
6807 po_reg_or_fail (REG_TYPE_NSDQ);
6808 }
6809 break;
6810
6811 case OP_RNDQ_RNSC:
6812 {
6813 po_scalar_or_goto (8, try_ndq);
6814 break;
6815 try_ndq:
6816 po_reg_or_fail (REG_TYPE_NDQ);
6817 }
6818 break;
6819
6820 case OP_RND_RNSC:
6821 {
6822 po_scalar_or_goto (8, try_vfd);
6823 break;
6824 try_vfd:
6825 po_reg_or_fail (REG_TYPE_VFD);
6826 }
6827 break;
6828
6829 case OP_VMOV:
6830 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
6831 not careful then bad things might happen. */
6832 po_misc_or_fail (parse_neon_mov (&str, &i) == FAIL);
6833 break;
6834
6835 case OP_RNDQ_Ibig:
6836 {
6837 po_reg_or_goto (REG_TYPE_NDQ, try_immbig);
6838 break;
6839 try_immbig:
6840 /* There's a possibility of getting a 64-bit immediate here, so
6841 we need special handling. */
6842 if (parse_big_immediate (&str, i, NULL, /*allow_symbol_p=*/FALSE)
6843 == FAIL)
6844 {
6845 inst.error = _("immediate value is out of range");
6846 goto failure;
6847 }
6848 }
6849 break;
6850
6851 case OP_RNDQ_I63b:
6852 {
6853 po_reg_or_goto (REG_TYPE_NDQ, try_shimm);
6854 break;
6855 try_shimm:
6856 po_imm_or_fail (0, 63, TRUE);
6857 }
6858 break;
6859
6860 case OP_RRnpcb:
6861 po_char_or_fail ('[');
6862 po_reg_or_fail (REG_TYPE_RN);
6863 po_char_or_fail (']');
6864 break;
6865
6866 case OP_RRnpctw:
6867 case OP_RRw:
6868 case OP_oRRw:
6869 po_reg_or_fail (REG_TYPE_RN);
6870 if (skip_past_char (&str, '!') == SUCCESS)
6871 inst.operands[i].writeback = 1;
6872 break;
6873
6874 /* Immediates */
6875 case OP_I7: po_imm_or_fail ( 0, 7, FALSE); break;
6876 case OP_I15: po_imm_or_fail ( 0, 15, FALSE); break;
6877 case OP_I16: po_imm_or_fail ( 1, 16, FALSE); break;
6878 case OP_I16z: po_imm_or_fail ( 0, 16, FALSE); break;
6879 case OP_I31: po_imm_or_fail ( 0, 31, FALSE); break;
6880 case OP_I32: po_imm_or_fail ( 1, 32, FALSE); break;
6881 case OP_I32z: po_imm_or_fail ( 0, 32, FALSE); break;
6882 case OP_I63s: po_imm_or_fail (-64, 63, FALSE); break;
6883 case OP_I63: po_imm_or_fail ( 0, 63, FALSE); break;
6884 case OP_I64: po_imm_or_fail ( 1, 64, FALSE); break;
6885 case OP_I64z: po_imm_or_fail ( 0, 64, FALSE); break;
6886 case OP_I255: po_imm_or_fail ( 0, 255, FALSE); break;
6887
6888 case OP_I4b: po_imm_or_fail ( 1, 4, TRUE); break;
6889 case OP_oI7b:
6890 case OP_I7b: po_imm_or_fail ( 0, 7, TRUE); break;
6891 case OP_I15b: po_imm_or_fail ( 0, 15, TRUE); break;
6892 case OP_oI31b:
6893 case OP_I31b: po_imm_or_fail ( 0, 31, TRUE); break;
6894 case OP_oI32b: po_imm_or_fail ( 1, 32, TRUE); break;
6895 case OP_oI32z: po_imm_or_fail ( 0, 32, TRUE); break;
6896 case OP_oIffffb: po_imm_or_fail ( 0, 0xffff, TRUE); break;
6897
6898 /* Immediate variants */
6899 case OP_oI255c:
6900 po_char_or_fail ('{');
6901 po_imm_or_fail (0, 255, TRUE);
6902 po_char_or_fail ('}');
6903 break;
6904
6905 case OP_I31w:
6906 /* The expression parser chokes on a trailing !, so we have
6907 to find it first and zap it. */
6908 {
6909 char *s = str;
6910 while (*s && *s != ',')
6911 s++;
6912 if (s[-1] == '!')
6913 {
6914 s[-1] = '\0';
6915 inst.operands[i].writeback = 1;
6916 }
6917 po_imm_or_fail (0, 31, TRUE);
6918 if (str == s - 1)
6919 str = s;
6920 }
6921 break;
6922
6923 /* Expressions */
6924 case OP_EXPi: EXPi:
6925 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6926 GE_OPT_PREFIX));
6927 break;
6928
6929 case OP_EXP:
6930 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6931 GE_NO_PREFIX));
6932 break;
6933
6934 case OP_EXPr: EXPr:
6935 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6936 GE_NO_PREFIX));
6937 if (inst.reloc.exp.X_op == O_symbol)
6938 {
6939 val = parse_reloc (&str);
6940 if (val == -1)
6941 {
6942 inst.error = _("unrecognized relocation suffix");
6943 goto failure;
6944 }
6945 else if (val != BFD_RELOC_UNUSED)
6946 {
6947 inst.operands[i].imm = val;
6948 inst.operands[i].hasreloc = 1;
6949 }
6950 }
6951 break;
6952
6953 /* Operand for MOVW or MOVT. */
6954 case OP_HALF:
6955 po_misc_or_fail (parse_half (&str));
6956 break;
6957
6958 /* Register or expression. */
6959 case OP_RR_EXr: po_reg_or_goto (REG_TYPE_RN, EXPr); break;
6960 case OP_RR_EXi: po_reg_or_goto (REG_TYPE_RN, EXPi); break;
6961
6962 /* Register or immediate. */
6963 case OP_RRnpc_I0: po_reg_or_goto (REG_TYPE_RN, I0); break;
6964 I0: po_imm_or_fail (0, 0, FALSE); break;
6965
6966 case OP_RF_IF: po_reg_or_goto (REG_TYPE_FN, IF); break;
6967 IF:
6968 if (!is_immediate_prefix (*str))
6969 goto bad_args;
6970 str++;
6971 val = parse_fpa_immediate (&str);
6972 if (val == FAIL)
6973 goto failure;
6974 /* FPA immediates are encoded as registers 8-15.
6975 parse_fpa_immediate has already applied the offset. */
6976 inst.operands[i].reg = val;
6977 inst.operands[i].isreg = 1;
6978 break;
6979
6980 case OP_RIWR_I32z: po_reg_or_goto (REG_TYPE_MMXWR, I32z); break;
6981 I32z: po_imm_or_fail (0, 32, FALSE); break;
6982
6983 /* Two kinds of register. */
6984 case OP_RIWR_RIWC:
6985 {
6986 struct reg_entry *rege = arm_reg_parse_multi (&str);
6987 if (!rege
6988 || (rege->type != REG_TYPE_MMXWR
6989 && rege->type != REG_TYPE_MMXWC
6990 && rege->type != REG_TYPE_MMXWCG))
6991 {
6992 inst.error = _("iWMMXt data or control register expected");
6993 goto failure;
6994 }
6995 inst.operands[i].reg = rege->number;
6996 inst.operands[i].isreg = (rege->type == REG_TYPE_MMXWR);
6997 }
6998 break;
6999
7000 case OP_RIWC_RIWG:
7001 {
7002 struct reg_entry *rege = arm_reg_parse_multi (&str);
7003 if (!rege
7004 || (rege->type != REG_TYPE_MMXWC
7005 && rege->type != REG_TYPE_MMXWCG))
7006 {
7007 inst.error = _("iWMMXt control register expected");
7008 goto failure;
7009 }
7010 inst.operands[i].reg = rege->number;
7011 inst.operands[i].isreg = 1;
7012 }
7013 break;
7014
7015 /* Misc */
7016 case OP_CPSF: val = parse_cps_flags (&str); break;
7017 case OP_ENDI: val = parse_endian_specifier (&str); break;
7018 case OP_oROR: val = parse_ror (&str); break;
7019 case OP_COND: val = parse_cond (&str); break;
7020 case OP_oBARRIER_I15:
7021 po_barrier_or_imm (str); break;
7022 immediate:
7023 if (parse_immediate (&str, &val, 0, 15, TRUE) == FAIL)
7024 goto failure;
7025 break;
7026
7027 case OP_wPSR:
7028 case OP_rPSR:
7029 po_reg_or_goto (REG_TYPE_RNB, try_psr);
7030 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_virt))
7031 {
7032 inst.error = _("Banked registers are not available with this "
7033 "architecture.");
7034 goto failure;
7035 }
7036 break;
7037 try_psr:
7038 val = parse_psr (&str, op_parse_code == OP_wPSR);
7039 break;
7040
7041 case OP_APSR_RR:
7042 po_reg_or_goto (REG_TYPE_RN, try_apsr);
7043 break;
7044 try_apsr:
7045 /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
7046 instruction). */
7047 if (strncasecmp (str, "APSR_", 5) == 0)
7048 {
7049 unsigned found = 0;
7050 str += 5;
7051 while (found < 15)
7052 switch (*str++)
7053 {
7054 case 'c': found = (found & 1) ? 16 : found | 1; break;
7055 case 'n': found = (found & 2) ? 16 : found | 2; break;
7056 case 'z': found = (found & 4) ? 16 : found | 4; break;
7057 case 'v': found = (found & 8) ? 16 : found | 8; break;
7058 default: found = 16;
7059 }
7060 if (found != 15)
7061 goto failure;
7062 inst.operands[i].isvec = 1;
7063 /* APSR_nzcv is encoded in instructions as if it were the REG_PC. */
7064 inst.operands[i].reg = REG_PC;
7065 }
7066 else
7067 goto failure;
7068 break;
7069
7070 case OP_TB:
7071 po_misc_or_fail (parse_tb (&str));
7072 break;
7073
7074 /* Register lists. */
7075 case OP_REGLST:
7076 val = parse_reg_list (&str);
7077 if (*str == '^')
7078 {
7079 inst.operands[i].writeback = 1;
7080 str++;
7081 }
7082 break;
7083
7084 case OP_VRSLST:
7085 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_S);
7086 break;
7087
7088 case OP_VRDLST:
7089 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_D);
7090 break;
7091
7092 case OP_VRSDLST:
7093 /* Allow Q registers too. */
7094 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7095 REGLIST_NEON_D);
7096 if (val == FAIL)
7097 {
7098 inst.error = NULL;
7099 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7100 REGLIST_VFP_S);
7101 inst.operands[i].issingle = 1;
7102 }
7103 break;
7104
7105 case OP_NRDLST:
7106 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7107 REGLIST_NEON_D);
7108 break;
7109
7110 case OP_NSTRLST:
7111 val = parse_neon_el_struct_list (&str, &inst.operands[i].reg,
7112 &inst.operands[i].vectype);
7113 break;
7114
7115 /* Addressing modes */
7116 case OP_ADDR:
7117 po_misc_or_fail (parse_address (&str, i));
7118 break;
7119
7120 case OP_ADDRGLDR:
7121 po_misc_or_fail_no_backtrack (
7122 parse_address_group_reloc (&str, i, GROUP_LDR));
7123 break;
7124
7125 case OP_ADDRGLDRS:
7126 po_misc_or_fail_no_backtrack (
7127 parse_address_group_reloc (&str, i, GROUP_LDRS));
7128 break;
7129
7130 case OP_ADDRGLDC:
7131 po_misc_or_fail_no_backtrack (
7132 parse_address_group_reloc (&str, i, GROUP_LDC));
7133 break;
7134
7135 case OP_SH:
7136 po_misc_or_fail (parse_shifter_operand (&str, i));
7137 break;
7138
7139 case OP_SHG:
7140 po_misc_or_fail_no_backtrack (
7141 parse_shifter_operand_group_reloc (&str, i));
7142 break;
7143
7144 case OP_oSHll:
7145 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_IMMEDIATE));
7146 break;
7147
7148 case OP_oSHar:
7149 po_misc_or_fail (parse_shift (&str, i, SHIFT_ASR_IMMEDIATE));
7150 break;
7151
7152 case OP_oSHllar:
7153 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_OR_ASR_IMMEDIATE));
7154 break;
7155
7156 default:
7157 as_fatal (_("unhandled operand code %d"), op_parse_code);
7158 }
7159
7160 /* Various value-based sanity checks and shared operations. We
7161 do not signal immediate failures for the register constraints;
7162 this allows a syntax error to take precedence. */
7163 switch (op_parse_code)
7164 {
7165 case OP_oRRnpc:
7166 case OP_RRnpc:
7167 case OP_RRnpcb:
7168 case OP_RRw:
7169 case OP_oRRw:
7170 case OP_RRnpc_I0:
7171 if (inst.operands[i].isreg && inst.operands[i].reg == REG_PC)
7172 inst.error = BAD_PC;
7173 break;
7174
7175 case OP_oRRnpcsp:
7176 case OP_RRnpcsp:
7177 if (inst.operands[i].isreg)
7178 {
7179 if (inst.operands[i].reg == REG_PC)
7180 inst.error = BAD_PC;
7181 else if (inst.operands[i].reg == REG_SP)
7182 inst.error = BAD_SP;
7183 }
7184 break;
7185
7186 case OP_RRnpctw:
7187 if (inst.operands[i].isreg
7188 && inst.operands[i].reg == REG_PC
7189 && (inst.operands[i].writeback || thumb))
7190 inst.error = BAD_PC;
7191 break;
7192
7193 case OP_CPSF:
7194 case OP_ENDI:
7195 case OP_oROR:
7196 case OP_wPSR:
7197 case OP_rPSR:
7198 case OP_COND:
7199 case OP_oBARRIER_I15:
7200 case OP_REGLST:
7201 case OP_VRSLST:
7202 case OP_VRDLST:
7203 case OP_VRSDLST:
7204 case OP_NRDLST:
7205 case OP_NSTRLST:
7206 if (val == FAIL)
7207 goto failure;
7208 inst.operands[i].imm = val;
7209 break;
7210
7211 default:
7212 break;
7213 }
7214
7215 /* If we get here, this operand was successfully parsed. */
7216 inst.operands[i].present = 1;
7217 continue;
7218
7219 bad_args:
7220 inst.error = BAD_ARGS;
7221
7222 failure:
7223 if (!backtrack_pos)
7224 {
7225 /* The parse routine should already have set inst.error, but set a
7226 default here just in case. */
7227 if (!inst.error)
7228 inst.error = _("syntax error");
7229 return FAIL;
7230 }
7231
7232 /* Do not backtrack over a trailing optional argument that
7233 absorbed some text. We will only fail again, with the
7234 'garbage following instruction' error message, which is
7235 probably less helpful than the current one. */
7236 if (backtrack_index == i && backtrack_pos != str
7237 && upat[i+1] == OP_stop)
7238 {
7239 if (!inst.error)
7240 inst.error = _("syntax error");
7241 return FAIL;
7242 }
7243
7244 /* Try again, skipping the optional argument at backtrack_pos. */
7245 str = backtrack_pos;
7246 inst.error = backtrack_error;
7247 inst.operands[backtrack_index].present = 0;
7248 i = backtrack_index;
7249 backtrack_pos = 0;
7250 }
7251
7252 /* Check that we have parsed all the arguments. */
7253 if (*str != '\0' && !inst.error)
7254 inst.error = _("garbage following instruction");
7255
7256 return inst.error ? FAIL : SUCCESS;
7257 }
7258
7259 #undef po_char_or_fail
7260 #undef po_reg_or_fail
7261 #undef po_reg_or_goto
7262 #undef po_imm_or_fail
7263 #undef po_scalar_or_fail
7264 #undef po_barrier_or_imm
7265
7266 /* Shorthand macro for instruction encoding functions issuing errors. */
7267 #define constraint(expr, err) \
7268 do \
7269 { \
7270 if (expr) \
7271 { \
7272 inst.error = err; \
7273 return; \
7274 } \
7275 } \
7276 while (0)
7277
7278 /* Reject "bad registers" for Thumb-2 instructions. Many Thumb-2
7279 instructions are unpredictable if these registers are used. This
7280 is the BadReg predicate in ARM's Thumb-2 documentation. */
7281 #define reject_bad_reg(reg) \
7282 do \
7283 if (reg == REG_SP || reg == REG_PC) \
7284 { \
7285 inst.error = (reg == REG_SP) ? BAD_SP : BAD_PC; \
7286 return; \
7287 } \
7288 while (0)
7289
7290 /* If REG is R13 (the stack pointer), warn that its use is
7291 deprecated. */
7292 #define warn_deprecated_sp(reg) \
7293 do \
7294 if (warn_on_deprecated && reg == REG_SP) \
7295 as_tsktsk (_("use of r13 is deprecated")); \
7296 while (0)
7297
7298 /* Functions for operand encoding. ARM, then Thumb. */
7299
7300 #define rotate_left(v, n) (v << (n & 31) | v >> ((32 - n) & 31))
7301
7302 /* If the current inst is scalar ARMv8.2 fp16 instruction, do special encoding.
7303
7304 The only binary encoding difference is the Coprocessor number. Coprocessor
7305 9 is used for half-precision calculations or conversions. The format of the
7306 instruction is the same as the equivalent Coprocessor 10 instuction that
7307 exists for Single-Precision operation. */
7308
7309 static void
7310 do_scalar_fp16_v82_encode (void)
7311 {
7312 if (inst.cond != COND_ALWAYS)
7313 as_warn (_("ARMv8.2 scalar fp16 instruction cannot be conditional,"
7314 " the behaviour is UNPREDICTABLE"));
7315 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16),
7316 _(BAD_FP16));
7317
7318 inst.instruction = (inst.instruction & 0xfffff0ff) | 0x900;
7319 mark_feature_used (&arm_ext_fp16);
7320 }
7321
7322 /* If VAL can be encoded in the immediate field of an ARM instruction,
7323 return the encoded form. Otherwise, return FAIL. */
7324
7325 static unsigned int
7326 encode_arm_immediate (unsigned int val)
7327 {
7328 unsigned int a, i;
7329
7330 if (val <= 0xff)
7331 return val;
7332
7333 for (i = 2; i < 32; i += 2)
7334 if ((a = rotate_left (val, i)) <= 0xff)
7335 return a | (i << 7); /* 12-bit pack: [shift-cnt,const]. */
7336
7337 return FAIL;
7338 }
7339
7340 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
7341 return the encoded form. Otherwise, return FAIL. */
7342 static unsigned int
7343 encode_thumb32_immediate (unsigned int val)
7344 {
7345 unsigned int a, i;
7346
7347 if (val <= 0xff)
7348 return val;
7349
7350 for (i = 1; i <= 24; i++)
7351 {
7352 a = val >> i;
7353 if ((val & ~(0xff << i)) == 0)
7354 return ((val >> i) & 0x7f) | ((32 - i) << 7);
7355 }
7356
7357 a = val & 0xff;
7358 if (val == ((a << 16) | a))
7359 return 0x100 | a;
7360 if (val == ((a << 24) | (a << 16) | (a << 8) | a))
7361 return 0x300 | a;
7362
7363 a = val & 0xff00;
7364 if (val == ((a << 16) | a))
7365 return 0x200 | (a >> 8);
7366
7367 return FAIL;
7368 }
7369 /* Encode a VFP SP or DP register number into inst.instruction. */
7370
7371 static void
7372 encode_arm_vfp_reg (int reg, enum vfp_reg_pos pos)
7373 {
7374 if ((pos == VFP_REG_Dd || pos == VFP_REG_Dn || pos == VFP_REG_Dm)
7375 && reg > 15)
7376 {
7377 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
7378 {
7379 if (thumb_mode)
7380 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
7381 fpu_vfp_ext_d32);
7382 else
7383 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
7384 fpu_vfp_ext_d32);
7385 }
7386 else
7387 {
7388 first_error (_("D register out of range for selected VFP version"));
7389 return;
7390 }
7391 }
7392
7393 switch (pos)
7394 {
7395 case VFP_REG_Sd:
7396 inst.instruction |= ((reg >> 1) << 12) | ((reg & 1) << 22);
7397 break;
7398
7399 case VFP_REG_Sn:
7400 inst.instruction |= ((reg >> 1) << 16) | ((reg & 1) << 7);
7401 break;
7402
7403 case VFP_REG_Sm:
7404 inst.instruction |= ((reg >> 1) << 0) | ((reg & 1) << 5);
7405 break;
7406
7407 case VFP_REG_Dd:
7408 inst.instruction |= ((reg & 15) << 12) | ((reg >> 4) << 22);
7409 break;
7410
7411 case VFP_REG_Dn:
7412 inst.instruction |= ((reg & 15) << 16) | ((reg >> 4) << 7);
7413 break;
7414
7415 case VFP_REG_Dm:
7416 inst.instruction |= (reg & 15) | ((reg >> 4) << 5);
7417 break;
7418
7419 default:
7420 abort ();
7421 }
7422 }
7423
7424 /* Encode a <shift> in an ARM-format instruction. The immediate,
7425 if any, is handled by md_apply_fix. */
7426 static void
7427 encode_arm_shift (int i)
7428 {
7429 if (inst.operands[i].shift_kind == SHIFT_RRX)
7430 inst.instruction |= SHIFT_ROR << 5;
7431 else
7432 {
7433 inst.instruction |= inst.operands[i].shift_kind << 5;
7434 if (inst.operands[i].immisreg)
7435 {
7436 inst.instruction |= SHIFT_BY_REG;
7437 inst.instruction |= inst.operands[i].imm << 8;
7438 }
7439 else
7440 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
7441 }
7442 }
7443
7444 static void
7445 encode_arm_shifter_operand (int i)
7446 {
7447 if (inst.operands[i].isreg)
7448 {
7449 inst.instruction |= inst.operands[i].reg;
7450 encode_arm_shift (i);
7451 }
7452 else
7453 {
7454 inst.instruction |= INST_IMMEDIATE;
7455 if (inst.reloc.type != BFD_RELOC_ARM_IMMEDIATE)
7456 inst.instruction |= inst.operands[i].imm;
7457 }
7458 }
7459
7460 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
7461 static void
7462 encode_arm_addr_mode_common (int i, bfd_boolean is_t)
7463 {
7464 /* PR 14260:
7465 Generate an error if the operand is not a register. */
7466 constraint (!inst.operands[i].isreg,
7467 _("Instruction does not support =N addresses"));
7468
7469 inst.instruction |= inst.operands[i].reg << 16;
7470
7471 if (inst.operands[i].preind)
7472 {
7473 if (is_t)
7474 {
7475 inst.error = _("instruction does not accept preindexed addressing");
7476 return;
7477 }
7478 inst.instruction |= PRE_INDEX;
7479 if (inst.operands[i].writeback)
7480 inst.instruction |= WRITE_BACK;
7481
7482 }
7483 else if (inst.operands[i].postind)
7484 {
7485 gas_assert (inst.operands[i].writeback);
7486 if (is_t)
7487 inst.instruction |= WRITE_BACK;
7488 }
7489 else /* unindexed - only for coprocessor */
7490 {
7491 inst.error = _("instruction does not accept unindexed addressing");
7492 return;
7493 }
7494
7495 if (((inst.instruction & WRITE_BACK) || !(inst.instruction & PRE_INDEX))
7496 && (((inst.instruction & 0x000f0000) >> 16)
7497 == ((inst.instruction & 0x0000f000) >> 12)))
7498 as_warn ((inst.instruction & LOAD_BIT)
7499 ? _("destination register same as write-back base")
7500 : _("source register same as write-back base"));
7501 }
7502
7503 /* inst.operands[i] was set up by parse_address. Encode it into an
7504 ARM-format mode 2 load or store instruction. If is_t is true,
7505 reject forms that cannot be used with a T instruction (i.e. not
7506 post-indexed). */
7507 static void
7508 encode_arm_addr_mode_2 (int i, bfd_boolean is_t)
7509 {
7510 const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
7511
7512 encode_arm_addr_mode_common (i, is_t);
7513
7514 if (inst.operands[i].immisreg)
7515 {
7516 constraint ((inst.operands[i].imm == REG_PC
7517 || (is_pc && inst.operands[i].writeback)),
7518 BAD_PC_ADDRESSING);
7519 inst.instruction |= INST_IMMEDIATE; /* yes, this is backwards */
7520 inst.instruction |= inst.operands[i].imm;
7521 if (!inst.operands[i].negative)
7522 inst.instruction |= INDEX_UP;
7523 if (inst.operands[i].shifted)
7524 {
7525 if (inst.operands[i].shift_kind == SHIFT_RRX)
7526 inst.instruction |= SHIFT_ROR << 5;
7527 else
7528 {
7529 inst.instruction |= inst.operands[i].shift_kind << 5;
7530 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
7531 }
7532 }
7533 }
7534 else /* immediate offset in inst.reloc */
7535 {
7536 if (is_pc && !inst.reloc.pc_rel)
7537 {
7538 const bfd_boolean is_load = ((inst.instruction & LOAD_BIT) != 0);
7539
7540 /* If is_t is TRUE, it's called from do_ldstt. ldrt/strt
7541 cannot use PC in addressing.
7542 PC cannot be used in writeback addressing, either. */
7543 constraint ((is_t || inst.operands[i].writeback),
7544 BAD_PC_ADDRESSING);
7545
7546 /* Use of PC in str is deprecated for ARMv7. */
7547 if (warn_on_deprecated
7548 && !is_load
7549 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7))
7550 as_tsktsk (_("use of PC in this instruction is deprecated"));
7551 }
7552
7553 if (inst.reloc.type == BFD_RELOC_UNUSED)
7554 {
7555 /* Prefer + for zero encoded value. */
7556 if (!inst.operands[i].negative)
7557 inst.instruction |= INDEX_UP;
7558 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM;
7559 }
7560 }
7561 }
7562
7563 /* inst.operands[i] was set up by parse_address. Encode it into an
7564 ARM-format mode 3 load or store instruction. Reject forms that
7565 cannot be used with such instructions. If is_t is true, reject
7566 forms that cannot be used with a T instruction (i.e. not
7567 post-indexed). */
7568 static void
7569 encode_arm_addr_mode_3 (int i, bfd_boolean is_t)
7570 {
7571 if (inst.operands[i].immisreg && inst.operands[i].shifted)
7572 {
7573 inst.error = _("instruction does not accept scaled register index");
7574 return;
7575 }
7576
7577 encode_arm_addr_mode_common (i, is_t);
7578
7579 if (inst.operands[i].immisreg)
7580 {
7581 constraint ((inst.operands[i].imm == REG_PC
7582 || (is_t && inst.operands[i].reg == REG_PC)),
7583 BAD_PC_ADDRESSING);
7584 constraint (inst.operands[i].reg == REG_PC && inst.operands[i].writeback,
7585 BAD_PC_WRITEBACK);
7586 inst.instruction |= inst.operands[i].imm;
7587 if (!inst.operands[i].negative)
7588 inst.instruction |= INDEX_UP;
7589 }
7590 else /* immediate offset in inst.reloc */
7591 {
7592 constraint ((inst.operands[i].reg == REG_PC && !inst.reloc.pc_rel
7593 && inst.operands[i].writeback),
7594 BAD_PC_WRITEBACK);
7595 inst.instruction |= HWOFFSET_IMM;
7596 if (inst.reloc.type == BFD_RELOC_UNUSED)
7597 {
7598 /* Prefer + for zero encoded value. */
7599 if (!inst.operands[i].negative)
7600 inst.instruction |= INDEX_UP;
7601
7602 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM8;
7603 }
7604 }
7605 }
7606
7607 /* Write immediate bits [7:0] to the following locations:
7608
7609 |28/24|23 19|18 16|15 4|3 0|
7610 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
7611
7612 This function is used by VMOV/VMVN/VORR/VBIC. */
7613
7614 static void
7615 neon_write_immbits (unsigned immbits)
7616 {
7617 inst.instruction |= immbits & 0xf;
7618 inst.instruction |= ((immbits >> 4) & 0x7) << 16;
7619 inst.instruction |= ((immbits >> 7) & 0x1) << (thumb_mode ? 28 : 24);
7620 }
7621
7622 /* Invert low-order SIZE bits of XHI:XLO. */
7623
7624 static void
7625 neon_invert_size (unsigned *xlo, unsigned *xhi, int size)
7626 {
7627 unsigned immlo = xlo ? *xlo : 0;
7628 unsigned immhi = xhi ? *xhi : 0;
7629
7630 switch (size)
7631 {
7632 case 8:
7633 immlo = (~immlo) & 0xff;
7634 break;
7635
7636 case 16:
7637 immlo = (~immlo) & 0xffff;
7638 break;
7639
7640 case 64:
7641 immhi = (~immhi) & 0xffffffff;
7642 /* fall through. */
7643
7644 case 32:
7645 immlo = (~immlo) & 0xffffffff;
7646 break;
7647
7648 default:
7649 abort ();
7650 }
7651
7652 if (xlo)
7653 *xlo = immlo;
7654
7655 if (xhi)
7656 *xhi = immhi;
7657 }
7658
7659 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
7660 A, B, C, D. */
7661
7662 static int
7663 neon_bits_same_in_bytes (unsigned imm)
7664 {
7665 return ((imm & 0x000000ff) == 0 || (imm & 0x000000ff) == 0x000000ff)
7666 && ((imm & 0x0000ff00) == 0 || (imm & 0x0000ff00) == 0x0000ff00)
7667 && ((imm & 0x00ff0000) == 0 || (imm & 0x00ff0000) == 0x00ff0000)
7668 && ((imm & 0xff000000) == 0 || (imm & 0xff000000) == 0xff000000);
7669 }
7670
7671 /* For immediate of above form, return 0bABCD. */
7672
7673 static unsigned
7674 neon_squash_bits (unsigned imm)
7675 {
7676 return (imm & 0x01) | ((imm & 0x0100) >> 7) | ((imm & 0x010000) >> 14)
7677 | ((imm & 0x01000000) >> 21);
7678 }
7679
7680 /* Compress quarter-float representation to 0b...000 abcdefgh. */
7681
7682 static unsigned
7683 neon_qfloat_bits (unsigned imm)
7684 {
7685 return ((imm >> 19) & 0x7f) | ((imm >> 24) & 0x80);
7686 }
7687
7688 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
7689 the instruction. *OP is passed as the initial value of the op field, and
7690 may be set to a different value depending on the constant (i.e.
7691 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
7692 MVN). If the immediate looks like a repeated pattern then also
7693 try smaller element sizes. */
7694
7695 static int
7696 neon_cmode_for_move_imm (unsigned immlo, unsigned immhi, int float_p,
7697 unsigned *immbits, int *op, int size,
7698 enum neon_el_type type)
7699 {
7700 /* Only permit float immediates (including 0.0/-0.0) if the operand type is
7701 float. */
7702 if (type == NT_float && !float_p)
7703 return FAIL;
7704
7705 if (type == NT_float && is_quarter_float (immlo) && immhi == 0)
7706 {
7707 if (size != 32 || *op == 1)
7708 return FAIL;
7709 *immbits = neon_qfloat_bits (immlo);
7710 return 0xf;
7711 }
7712
7713 if (size == 64)
7714 {
7715 if (neon_bits_same_in_bytes (immhi)
7716 && neon_bits_same_in_bytes (immlo))
7717 {
7718 if (*op == 1)
7719 return FAIL;
7720 *immbits = (neon_squash_bits (immhi) << 4)
7721 | neon_squash_bits (immlo);
7722 *op = 1;
7723 return 0xe;
7724 }
7725
7726 if (immhi != immlo)
7727 return FAIL;
7728 }
7729
7730 if (size >= 32)
7731 {
7732 if (immlo == (immlo & 0x000000ff))
7733 {
7734 *immbits = immlo;
7735 return 0x0;
7736 }
7737 else if (immlo == (immlo & 0x0000ff00))
7738 {
7739 *immbits = immlo >> 8;
7740 return 0x2;
7741 }
7742 else if (immlo == (immlo & 0x00ff0000))
7743 {
7744 *immbits = immlo >> 16;
7745 return 0x4;
7746 }
7747 else if (immlo == (immlo & 0xff000000))
7748 {
7749 *immbits = immlo >> 24;
7750 return 0x6;
7751 }
7752 else if (immlo == ((immlo & 0x0000ff00) | 0x000000ff))
7753 {
7754 *immbits = (immlo >> 8) & 0xff;
7755 return 0xc;
7756 }
7757 else if (immlo == ((immlo & 0x00ff0000) | 0x0000ffff))
7758 {
7759 *immbits = (immlo >> 16) & 0xff;
7760 return 0xd;
7761 }
7762
7763 if ((immlo & 0xffff) != (immlo >> 16))
7764 return FAIL;
7765 immlo &= 0xffff;
7766 }
7767
7768 if (size >= 16)
7769 {
7770 if (immlo == (immlo & 0x000000ff))
7771 {
7772 *immbits = immlo;
7773 return 0x8;
7774 }
7775 else if (immlo == (immlo & 0x0000ff00))
7776 {
7777 *immbits = immlo >> 8;
7778 return 0xa;
7779 }
7780
7781 if ((immlo & 0xff) != (immlo >> 8))
7782 return FAIL;
7783 immlo &= 0xff;
7784 }
7785
7786 if (immlo == (immlo & 0x000000ff))
7787 {
7788 /* Don't allow MVN with 8-bit immediate. */
7789 if (*op == 1)
7790 return FAIL;
7791 *immbits = immlo;
7792 return 0xe;
7793 }
7794
7795 return FAIL;
7796 }
7797
7798 #if defined BFD_HOST_64_BIT
7799 /* Returns TRUE if double precision value V may be cast
7800 to single precision without loss of accuracy. */
7801
7802 static bfd_boolean
7803 is_double_a_single (bfd_int64_t v)
7804 {
7805 int exp = (int)((v >> 52) & 0x7FF);
7806 bfd_int64_t mantissa = (v & (bfd_int64_t)0xFFFFFFFFFFFFFULL);
7807
7808 return (exp == 0 || exp == 0x7FF
7809 || (exp >= 1023 - 126 && exp <= 1023 + 127))
7810 && (mantissa & 0x1FFFFFFFl) == 0;
7811 }
7812
7813 /* Returns a double precision value casted to single precision
7814 (ignoring the least significant bits in exponent and mantissa). */
7815
7816 static int
7817 double_to_single (bfd_int64_t v)
7818 {
7819 int sign = (int) ((v >> 63) & 1l);
7820 int exp = (int) ((v >> 52) & 0x7FF);
7821 bfd_int64_t mantissa = (v & (bfd_int64_t)0xFFFFFFFFFFFFFULL);
7822
7823 if (exp == 0x7FF)
7824 exp = 0xFF;
7825 else
7826 {
7827 exp = exp - 1023 + 127;
7828 if (exp >= 0xFF)
7829 {
7830 /* Infinity. */
7831 exp = 0x7F;
7832 mantissa = 0;
7833 }
7834 else if (exp < 0)
7835 {
7836 /* No denormalized numbers. */
7837 exp = 0;
7838 mantissa = 0;
7839 }
7840 }
7841 mantissa >>= 29;
7842 return (sign << 31) | (exp << 23) | mantissa;
7843 }
7844 #endif /* BFD_HOST_64_BIT */
7845
7846 enum lit_type
7847 {
7848 CONST_THUMB,
7849 CONST_ARM,
7850 CONST_VEC
7851 };
7852
7853 static void do_vfp_nsyn_opcode (const char *);
7854
7855 /* inst.reloc.exp describes an "=expr" load pseudo-operation.
7856 Determine whether it can be performed with a move instruction; if
7857 it can, convert inst.instruction to that move instruction and
7858 return TRUE; if it can't, convert inst.instruction to a literal-pool
7859 load and return FALSE. If this is not a valid thing to do in the
7860 current context, set inst.error and return TRUE.
7861
7862 inst.operands[i] describes the destination register. */
7863
7864 static bfd_boolean
7865 move_or_literal_pool (int i, enum lit_type t, bfd_boolean mode_3)
7866 {
7867 unsigned long tbit;
7868 bfd_boolean thumb_p = (t == CONST_THUMB);
7869 bfd_boolean arm_p = (t == CONST_ARM);
7870
7871 if (thumb_p)
7872 tbit = (inst.instruction > 0xffff) ? THUMB2_LOAD_BIT : THUMB_LOAD_BIT;
7873 else
7874 tbit = LOAD_BIT;
7875
7876 if ((inst.instruction & tbit) == 0)
7877 {
7878 inst.error = _("invalid pseudo operation");
7879 return TRUE;
7880 }
7881
7882 if (inst.reloc.exp.X_op != O_constant
7883 && inst.reloc.exp.X_op != O_symbol
7884 && inst.reloc.exp.X_op != O_big)
7885 {
7886 inst.error = _("constant expression expected");
7887 return TRUE;
7888 }
7889
7890 if (inst.reloc.exp.X_op == O_constant
7891 || inst.reloc.exp.X_op == O_big)
7892 {
7893 #if defined BFD_HOST_64_BIT
7894 bfd_int64_t v;
7895 #else
7896 offsetT v;
7897 #endif
7898 if (inst.reloc.exp.X_op == O_big)
7899 {
7900 LITTLENUM_TYPE w[X_PRECISION];
7901 LITTLENUM_TYPE * l;
7902
7903 if (inst.reloc.exp.X_add_number == -1)
7904 {
7905 gen_to_words (w, X_PRECISION, E_PRECISION);
7906 l = w;
7907 /* FIXME: Should we check words w[2..5] ? */
7908 }
7909 else
7910 l = generic_bignum;
7911
7912 #if defined BFD_HOST_64_BIT
7913 v =
7914 ((((((((bfd_int64_t) l[3] & LITTLENUM_MASK)
7915 << LITTLENUM_NUMBER_OF_BITS)
7916 | ((bfd_int64_t) l[2] & LITTLENUM_MASK))
7917 << LITTLENUM_NUMBER_OF_BITS)
7918 | ((bfd_int64_t) l[1] & LITTLENUM_MASK))
7919 << LITTLENUM_NUMBER_OF_BITS)
7920 | ((bfd_int64_t) l[0] & LITTLENUM_MASK));
7921 #else
7922 v = ((l[1] & LITTLENUM_MASK) << LITTLENUM_NUMBER_OF_BITS)
7923 | (l[0] & LITTLENUM_MASK);
7924 #endif
7925 }
7926 else
7927 v = inst.reloc.exp.X_add_number;
7928
7929 if (!inst.operands[i].issingle)
7930 {
7931 if (thumb_p)
7932 {
7933 /* This can be encoded only for a low register. */
7934 if ((v & ~0xFF) == 0 && (inst.operands[i].reg < 8))
7935 {
7936 /* This can be done with a mov(1) instruction. */
7937 inst.instruction = T_OPCODE_MOV_I8 | (inst.operands[i].reg << 8);
7938 inst.instruction |= v;
7939 return TRUE;
7940 }
7941
7942 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2)
7943 || ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m))
7944 {
7945 /* Check if on thumb2 it can be done with a mov.w, mvn or
7946 movw instruction. */
7947 unsigned int newimm;
7948 bfd_boolean isNegated;
7949
7950 newimm = encode_thumb32_immediate (v);
7951 if (newimm != (unsigned int) FAIL)
7952 isNegated = FALSE;
7953 else
7954 {
7955 newimm = encode_thumb32_immediate (~v);
7956 if (newimm != (unsigned int) FAIL)
7957 isNegated = TRUE;
7958 }
7959
7960 /* The number can be loaded with a mov.w or mvn
7961 instruction. */
7962 if (newimm != (unsigned int) FAIL
7963 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2))
7964 {
7965 inst.instruction = (0xf04f0000 /* MOV.W. */
7966 | (inst.operands[i].reg << 8));
7967 /* Change to MOVN. */
7968 inst.instruction |= (isNegated ? 0x200000 : 0);
7969 inst.instruction |= (newimm & 0x800) << 15;
7970 inst.instruction |= (newimm & 0x700) << 4;
7971 inst.instruction |= (newimm & 0x0ff);
7972 return TRUE;
7973 }
7974 /* The number can be loaded with a movw instruction. */
7975 else if ((v & ~0xFFFF) == 0
7976 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m))
7977 {
7978 int imm = v & 0xFFFF;
7979
7980 inst.instruction = 0xf2400000; /* MOVW. */
7981 inst.instruction |= (inst.operands[i].reg << 8);
7982 inst.instruction |= (imm & 0xf000) << 4;
7983 inst.instruction |= (imm & 0x0800) << 15;
7984 inst.instruction |= (imm & 0x0700) << 4;
7985 inst.instruction |= (imm & 0x00ff);
7986 return TRUE;
7987 }
7988 }
7989 }
7990 else if (arm_p)
7991 {
7992 int value = encode_arm_immediate (v);
7993
7994 if (value != FAIL)
7995 {
7996 /* This can be done with a mov instruction. */
7997 inst.instruction &= LITERAL_MASK;
7998 inst.instruction |= INST_IMMEDIATE | (OPCODE_MOV << DATA_OP_SHIFT);
7999 inst.instruction |= value & 0xfff;
8000 return TRUE;
8001 }
8002
8003 value = encode_arm_immediate (~ v);
8004 if (value != FAIL)
8005 {
8006 /* This can be done with a mvn instruction. */
8007 inst.instruction &= LITERAL_MASK;
8008 inst.instruction |= INST_IMMEDIATE | (OPCODE_MVN << DATA_OP_SHIFT);
8009 inst.instruction |= value & 0xfff;
8010 return TRUE;
8011 }
8012 }
8013 else if (t == CONST_VEC && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1))
8014 {
8015 int op = 0;
8016 unsigned immbits = 0;
8017 unsigned immlo = inst.operands[1].imm;
8018 unsigned immhi = inst.operands[1].regisimm
8019 ? inst.operands[1].reg
8020 : inst.reloc.exp.X_unsigned
8021 ? 0
8022 : ((bfd_int64_t)((int) immlo)) >> 32;
8023 int cmode = neon_cmode_for_move_imm (immlo, immhi, FALSE, &immbits,
8024 &op, 64, NT_invtype);
8025
8026 if (cmode == FAIL)
8027 {
8028 neon_invert_size (&immlo, &immhi, 64);
8029 op = !op;
8030 cmode = neon_cmode_for_move_imm (immlo, immhi, FALSE, &immbits,
8031 &op, 64, NT_invtype);
8032 }
8033
8034 if (cmode != FAIL)
8035 {
8036 inst.instruction = (inst.instruction & VLDR_VMOV_SAME)
8037 | (1 << 23)
8038 | (cmode << 8)
8039 | (op << 5)
8040 | (1 << 4);
8041
8042 /* Fill other bits in vmov encoding for both thumb and arm. */
8043 if (thumb_mode)
8044 inst.instruction |= (0x7U << 29) | (0xF << 24);
8045 else
8046 inst.instruction |= (0xFU << 28) | (0x1 << 25);
8047 neon_write_immbits (immbits);
8048 return TRUE;
8049 }
8050 }
8051 }
8052
8053 if (t == CONST_VEC)
8054 {
8055 /* Check if vldr Rx, =constant could be optimized to vmov Rx, #constant. */
8056 if (inst.operands[i].issingle
8057 && is_quarter_float (inst.operands[1].imm)
8058 && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3xd))
8059 {
8060 inst.operands[1].imm =
8061 neon_qfloat_bits (v);
8062 do_vfp_nsyn_opcode ("fconsts");
8063 return TRUE;
8064 }
8065
8066 /* If our host does not support a 64-bit type then we cannot perform
8067 the following optimization. This mean that there will be a
8068 discrepancy between the output produced by an assembler built for
8069 a 32-bit-only host and the output produced from a 64-bit host, but
8070 this cannot be helped. */
8071 #if defined BFD_HOST_64_BIT
8072 else if (!inst.operands[1].issingle
8073 && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3))
8074 {
8075 if (is_double_a_single (v)
8076 && is_quarter_float (double_to_single (v)))
8077 {
8078 inst.operands[1].imm =
8079 neon_qfloat_bits (double_to_single (v));
8080 do_vfp_nsyn_opcode ("fconstd");
8081 return TRUE;
8082 }
8083 }
8084 #endif
8085 }
8086 }
8087
8088 if (add_to_lit_pool ((!inst.operands[i].isvec
8089 || inst.operands[i].issingle) ? 4 : 8) == FAIL)
8090 return TRUE;
8091
8092 inst.operands[1].reg = REG_PC;
8093 inst.operands[1].isreg = 1;
8094 inst.operands[1].preind = 1;
8095 inst.reloc.pc_rel = 1;
8096 inst.reloc.type = (thumb_p
8097 ? BFD_RELOC_ARM_THUMB_OFFSET
8098 : (mode_3
8099 ? BFD_RELOC_ARM_HWLITERAL
8100 : BFD_RELOC_ARM_LITERAL));
8101 return FALSE;
8102 }
8103
8104 /* inst.operands[i] was set up by parse_address. Encode it into an
8105 ARM-format instruction. Reject all forms which cannot be encoded
8106 into a coprocessor load/store instruction. If wb_ok is false,
8107 reject use of writeback; if unind_ok is false, reject use of
8108 unindexed addressing. If reloc_override is not 0, use it instead
8109 of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
8110 (in which case it is preserved). */
8111
8112 static int
8113 encode_arm_cp_address (int i, int wb_ok, int unind_ok, int reloc_override)
8114 {
8115 if (!inst.operands[i].isreg)
8116 {
8117 /* PR 18256 */
8118 if (! inst.operands[0].isvec)
8119 {
8120 inst.error = _("invalid co-processor operand");
8121 return FAIL;
8122 }
8123 if (move_or_literal_pool (0, CONST_VEC, /*mode_3=*/FALSE))
8124 return SUCCESS;
8125 }
8126
8127 inst.instruction |= inst.operands[i].reg << 16;
8128
8129 gas_assert (!(inst.operands[i].preind && inst.operands[i].postind));
8130
8131 if (!inst.operands[i].preind && !inst.operands[i].postind) /* unindexed */
8132 {
8133 gas_assert (!inst.operands[i].writeback);
8134 if (!unind_ok)
8135 {
8136 inst.error = _("instruction does not support unindexed addressing");
8137 return FAIL;
8138 }
8139 inst.instruction |= inst.operands[i].imm;
8140 inst.instruction |= INDEX_UP;
8141 return SUCCESS;
8142 }
8143
8144 if (inst.operands[i].preind)
8145 inst.instruction |= PRE_INDEX;
8146
8147 if (inst.operands[i].writeback)
8148 {
8149 if (inst.operands[i].reg == REG_PC)
8150 {
8151 inst.error = _("pc may not be used with write-back");
8152 return FAIL;
8153 }
8154 if (!wb_ok)
8155 {
8156 inst.error = _("instruction does not support writeback");
8157 return FAIL;
8158 }
8159 inst.instruction |= WRITE_BACK;
8160 }
8161
8162 if (reloc_override)
8163 inst.reloc.type = (bfd_reloc_code_real_type) reloc_override;
8164 else if ((inst.reloc.type < BFD_RELOC_ARM_ALU_PC_G0_NC
8165 || inst.reloc.type > BFD_RELOC_ARM_LDC_SB_G2)
8166 && inst.reloc.type != BFD_RELOC_ARM_LDR_PC_G0)
8167 {
8168 if (thumb_mode)
8169 inst.reloc.type = BFD_RELOC_ARM_T32_CP_OFF_IMM;
8170 else
8171 inst.reloc.type = BFD_RELOC_ARM_CP_OFF_IMM;
8172 }
8173
8174 /* Prefer + for zero encoded value. */
8175 if (!inst.operands[i].negative)
8176 inst.instruction |= INDEX_UP;
8177
8178 return SUCCESS;
8179 }
8180
8181 /* Functions for instruction encoding, sorted by sub-architecture.
8182 First some generics; their names are taken from the conventional
8183 bit positions for register arguments in ARM format instructions. */
8184
8185 static void
8186 do_noargs (void)
8187 {
8188 }
8189
8190 static void
8191 do_rd (void)
8192 {
8193 inst.instruction |= inst.operands[0].reg << 12;
8194 }
8195
8196 static void
8197 do_rn (void)
8198 {
8199 inst.instruction |= inst.operands[0].reg << 16;
8200 }
8201
8202 static void
8203 do_rd_rm (void)
8204 {
8205 inst.instruction |= inst.operands[0].reg << 12;
8206 inst.instruction |= inst.operands[1].reg;
8207 }
8208
8209 static void
8210 do_rm_rn (void)
8211 {
8212 inst.instruction |= inst.operands[0].reg;
8213 inst.instruction |= inst.operands[1].reg << 16;
8214 }
8215
8216 static void
8217 do_rd_rn (void)
8218 {
8219 inst.instruction |= inst.operands[0].reg << 12;
8220 inst.instruction |= inst.operands[1].reg << 16;
8221 }
8222
8223 static void
8224 do_rn_rd (void)
8225 {
8226 inst.instruction |= inst.operands[0].reg << 16;
8227 inst.instruction |= inst.operands[1].reg << 12;
8228 }
8229
8230 static void
8231 do_tt (void)
8232 {
8233 inst.instruction |= inst.operands[0].reg << 8;
8234 inst.instruction |= inst.operands[1].reg << 16;
8235 }
8236
8237 static bfd_boolean
8238 check_obsolete (const arm_feature_set *feature, const char *msg)
8239 {
8240 if (ARM_CPU_IS_ANY (cpu_variant))
8241 {
8242 as_tsktsk ("%s", msg);
8243 return TRUE;
8244 }
8245 else if (ARM_CPU_HAS_FEATURE (cpu_variant, *feature))
8246 {
8247 as_bad ("%s", msg);
8248 return TRUE;
8249 }
8250
8251 return FALSE;
8252 }
8253
8254 static void
8255 do_rd_rm_rn (void)
8256 {
8257 unsigned Rn = inst.operands[2].reg;
8258 /* Enforce restrictions on SWP instruction. */
8259 if ((inst.instruction & 0x0fbfffff) == 0x01000090)
8260 {
8261 constraint (Rn == inst.operands[0].reg || Rn == inst.operands[1].reg,
8262 _("Rn must not overlap other operands"));
8263
8264 /* SWP{b} is obsolete for ARMv8-A, and deprecated for ARMv6* and ARMv7.
8265 */
8266 if (!check_obsolete (&arm_ext_v8,
8267 _("swp{b} use is obsoleted for ARMv8 and later"))
8268 && warn_on_deprecated
8269 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6))
8270 as_tsktsk (_("swp{b} use is deprecated for ARMv6 and ARMv7"));
8271 }
8272
8273 inst.instruction |= inst.operands[0].reg << 12;
8274 inst.instruction |= inst.operands[1].reg;
8275 inst.instruction |= Rn << 16;
8276 }
8277
8278 static void
8279 do_rd_rn_rm (void)
8280 {
8281 inst.instruction |= inst.operands[0].reg << 12;
8282 inst.instruction |= inst.operands[1].reg << 16;
8283 inst.instruction |= inst.operands[2].reg;
8284 }
8285
8286 static void
8287 do_rm_rd_rn (void)
8288 {
8289 constraint ((inst.operands[2].reg == REG_PC), BAD_PC);
8290 constraint (((inst.reloc.exp.X_op != O_constant
8291 && inst.reloc.exp.X_op != O_illegal)
8292 || inst.reloc.exp.X_add_number != 0),
8293 BAD_ADDR_MODE);
8294 inst.instruction |= inst.operands[0].reg;
8295 inst.instruction |= inst.operands[1].reg << 12;
8296 inst.instruction |= inst.operands[2].reg << 16;
8297 }
8298
8299 static void
8300 do_imm0 (void)
8301 {
8302 inst.instruction |= inst.operands[0].imm;
8303 }
8304
8305 static void
8306 do_rd_cpaddr (void)
8307 {
8308 inst.instruction |= inst.operands[0].reg << 12;
8309 encode_arm_cp_address (1, TRUE, TRUE, 0);
8310 }
8311
8312 /* ARM instructions, in alphabetical order by function name (except
8313 that wrapper functions appear immediately after the function they
8314 wrap). */
8315
8316 /* This is a pseudo-op of the form "adr rd, label" to be converted
8317 into a relative address of the form "add rd, pc, #label-.-8". */
8318
8319 static void
8320 do_adr (void)
8321 {
8322 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
8323
8324 /* Frag hacking will turn this into a sub instruction if the offset turns
8325 out to be negative. */
8326 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
8327 inst.reloc.pc_rel = 1;
8328 inst.reloc.exp.X_add_number -= 8;
8329 }
8330
8331 /* This is a pseudo-op of the form "adrl rd, label" to be converted
8332 into a relative address of the form:
8333 add rd, pc, #low(label-.-8)"
8334 add rd, rd, #high(label-.-8)" */
8335
8336 static void
8337 do_adrl (void)
8338 {
8339 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
8340
8341 /* Frag hacking will turn this into a sub instruction if the offset turns
8342 out to be negative. */
8343 inst.reloc.type = BFD_RELOC_ARM_ADRL_IMMEDIATE;
8344 inst.reloc.pc_rel = 1;
8345 inst.size = INSN_SIZE * 2;
8346 inst.reloc.exp.X_add_number -= 8;
8347 }
8348
8349 static void
8350 do_arit (void)
8351 {
8352 constraint (inst.reloc.type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
8353 && inst.reloc.type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC ,
8354 THUMB1_RELOC_ONLY);
8355 if (!inst.operands[1].present)
8356 inst.operands[1].reg = inst.operands[0].reg;
8357 inst.instruction |= inst.operands[0].reg << 12;
8358 inst.instruction |= inst.operands[1].reg << 16;
8359 encode_arm_shifter_operand (2);
8360 }
8361
8362 static void
8363 do_barrier (void)
8364 {
8365 if (inst.operands[0].present)
8366 inst.instruction |= inst.operands[0].imm;
8367 else
8368 inst.instruction |= 0xf;
8369 }
8370
8371 static void
8372 do_bfc (void)
8373 {
8374 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
8375 constraint (msb > 32, _("bit-field extends past end of register"));
8376 /* The instruction encoding stores the LSB and MSB,
8377 not the LSB and width. */
8378 inst.instruction |= inst.operands[0].reg << 12;
8379 inst.instruction |= inst.operands[1].imm << 7;
8380 inst.instruction |= (msb - 1) << 16;
8381 }
8382
8383 static void
8384 do_bfi (void)
8385 {
8386 unsigned int msb;
8387
8388 /* #0 in second position is alternative syntax for bfc, which is
8389 the same instruction but with REG_PC in the Rm field. */
8390 if (!inst.operands[1].isreg)
8391 inst.operands[1].reg = REG_PC;
8392
8393 msb = inst.operands[2].imm + inst.operands[3].imm;
8394 constraint (msb > 32, _("bit-field extends past end of register"));
8395 /* The instruction encoding stores the LSB and MSB,
8396 not the LSB and width. */
8397 inst.instruction |= inst.operands[0].reg << 12;
8398 inst.instruction |= inst.operands[1].reg;
8399 inst.instruction |= inst.operands[2].imm << 7;
8400 inst.instruction |= (msb - 1) << 16;
8401 }
8402
8403 static void
8404 do_bfx (void)
8405 {
8406 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
8407 _("bit-field extends past end of register"));
8408 inst.instruction |= inst.operands[0].reg << 12;
8409 inst.instruction |= inst.operands[1].reg;
8410 inst.instruction |= inst.operands[2].imm << 7;
8411 inst.instruction |= (inst.operands[3].imm - 1) << 16;
8412 }
8413
8414 /* ARM V5 breakpoint instruction (argument parse)
8415 BKPT <16 bit unsigned immediate>
8416 Instruction is not conditional.
8417 The bit pattern given in insns[] has the COND_ALWAYS condition,
8418 and it is an error if the caller tried to override that. */
8419
8420 static void
8421 do_bkpt (void)
8422 {
8423 /* Top 12 of 16 bits to bits 19:8. */
8424 inst.instruction |= (inst.operands[0].imm & 0xfff0) << 4;
8425
8426 /* Bottom 4 of 16 bits to bits 3:0. */
8427 inst.instruction |= inst.operands[0].imm & 0xf;
8428 }
8429
8430 static void
8431 encode_branch (int default_reloc)
8432 {
8433 if (inst.operands[0].hasreloc)
8434 {
8435 constraint (inst.operands[0].imm != BFD_RELOC_ARM_PLT32
8436 && inst.operands[0].imm != BFD_RELOC_ARM_TLS_CALL,
8437 _("the only valid suffixes here are '(plt)' and '(tlscall)'"));
8438 inst.reloc.type = inst.operands[0].imm == BFD_RELOC_ARM_PLT32
8439 ? BFD_RELOC_ARM_PLT32
8440 : thumb_mode ? BFD_RELOC_ARM_THM_TLS_CALL : BFD_RELOC_ARM_TLS_CALL;
8441 }
8442 else
8443 inst.reloc.type = (bfd_reloc_code_real_type) default_reloc;
8444 inst.reloc.pc_rel = 1;
8445 }
8446
8447 static void
8448 do_branch (void)
8449 {
8450 #ifdef OBJ_ELF
8451 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
8452 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
8453 else
8454 #endif
8455 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
8456 }
8457
8458 static void
8459 do_bl (void)
8460 {
8461 #ifdef OBJ_ELF
8462 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
8463 {
8464 if (inst.cond == COND_ALWAYS)
8465 encode_branch (BFD_RELOC_ARM_PCREL_CALL);
8466 else
8467 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
8468 }
8469 else
8470 #endif
8471 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
8472 }
8473
8474 /* ARM V5 branch-link-exchange instruction (argument parse)
8475 BLX <target_addr> ie BLX(1)
8476 BLX{<condition>} <Rm> ie BLX(2)
8477 Unfortunately, there are two different opcodes for this mnemonic.
8478 So, the insns[].value is not used, and the code here zaps values
8479 into inst.instruction.
8480 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
8481
8482 static void
8483 do_blx (void)
8484 {
8485 if (inst.operands[0].isreg)
8486 {
8487 /* Arg is a register; the opcode provided by insns[] is correct.
8488 It is not illegal to do "blx pc", just useless. */
8489 if (inst.operands[0].reg == REG_PC)
8490 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
8491
8492 inst.instruction |= inst.operands[0].reg;
8493 }
8494 else
8495 {
8496 /* Arg is an address; this instruction cannot be executed
8497 conditionally, and the opcode must be adjusted.
8498 We retain the BFD_RELOC_ARM_PCREL_BLX till the very end
8499 where we generate out a BFD_RELOC_ARM_PCREL_CALL instead. */
8500 constraint (inst.cond != COND_ALWAYS, BAD_COND);
8501 inst.instruction = 0xfa000000;
8502 encode_branch (BFD_RELOC_ARM_PCREL_BLX);
8503 }
8504 }
8505
8506 static void
8507 do_bx (void)
8508 {
8509 bfd_boolean want_reloc;
8510
8511 if (inst.operands[0].reg == REG_PC)
8512 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
8513
8514 inst.instruction |= inst.operands[0].reg;
8515 /* Output R_ARM_V4BX relocations if is an EABI object that looks like
8516 it is for ARMv4t or earlier. */
8517 want_reloc = !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5);
8518 if (object_arch && !ARM_CPU_HAS_FEATURE (*object_arch, arm_ext_v5))
8519 want_reloc = TRUE;
8520
8521 #ifdef OBJ_ELF
8522 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
8523 #endif
8524 want_reloc = FALSE;
8525
8526 if (want_reloc)
8527 inst.reloc.type = BFD_RELOC_ARM_V4BX;
8528 }
8529
8530
8531 /* ARM v5TEJ. Jump to Jazelle code. */
8532
8533 static void
8534 do_bxj (void)
8535 {
8536 if (inst.operands[0].reg == REG_PC)
8537 as_tsktsk (_("use of r15 in bxj is not really useful"));
8538
8539 inst.instruction |= inst.operands[0].reg;
8540 }
8541
8542 /* Co-processor data operation:
8543 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
8544 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
8545 static void
8546 do_cdp (void)
8547 {
8548 inst.instruction |= inst.operands[0].reg << 8;
8549 inst.instruction |= inst.operands[1].imm << 20;
8550 inst.instruction |= inst.operands[2].reg << 12;
8551 inst.instruction |= inst.operands[3].reg << 16;
8552 inst.instruction |= inst.operands[4].reg;
8553 inst.instruction |= inst.operands[5].imm << 5;
8554 }
8555
8556 static void
8557 do_cmp (void)
8558 {
8559 inst.instruction |= inst.operands[0].reg << 16;
8560 encode_arm_shifter_operand (1);
8561 }
8562
8563 /* Transfer between coprocessor and ARM registers.
8564 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
8565 MRC2
8566 MCR{cond}
8567 MCR2
8568
8569 No special properties. */
8570
8571 struct deprecated_coproc_regs_s
8572 {
8573 unsigned cp;
8574 int opc1;
8575 unsigned crn;
8576 unsigned crm;
8577 int opc2;
8578 arm_feature_set deprecated;
8579 arm_feature_set obsoleted;
8580 const char *dep_msg;
8581 const char *obs_msg;
8582 };
8583
8584 #define DEPR_ACCESS_V8 \
8585 N_("This coprocessor register access is deprecated in ARMv8")
8586
8587 /* Table of all deprecated coprocessor registers. */
8588 static struct deprecated_coproc_regs_s deprecated_coproc_regs[] =
8589 {
8590 {15, 0, 7, 10, 5, /* CP15DMB. */
8591 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8592 DEPR_ACCESS_V8, NULL},
8593 {15, 0, 7, 10, 4, /* CP15DSB. */
8594 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8595 DEPR_ACCESS_V8, NULL},
8596 {15, 0, 7, 5, 4, /* CP15ISB. */
8597 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8598 DEPR_ACCESS_V8, NULL},
8599 {14, 6, 1, 0, 0, /* TEEHBR. */
8600 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8601 DEPR_ACCESS_V8, NULL},
8602 {14, 6, 0, 0, 0, /* TEECR. */
8603 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8604 DEPR_ACCESS_V8, NULL},
8605 };
8606
8607 #undef DEPR_ACCESS_V8
8608
8609 static const size_t deprecated_coproc_reg_count =
8610 sizeof (deprecated_coproc_regs) / sizeof (deprecated_coproc_regs[0]);
8611
8612 static void
8613 do_co_reg (void)
8614 {
8615 unsigned Rd;
8616 size_t i;
8617
8618 Rd = inst.operands[2].reg;
8619 if (thumb_mode)
8620 {
8621 if (inst.instruction == 0xee000010
8622 || inst.instruction == 0xfe000010)
8623 /* MCR, MCR2 */
8624 reject_bad_reg (Rd);
8625 else
8626 /* MRC, MRC2 */
8627 constraint (Rd == REG_SP, BAD_SP);
8628 }
8629 else
8630 {
8631 /* MCR */
8632 if (inst.instruction == 0xe000010)
8633 constraint (Rd == REG_PC, BAD_PC);
8634 }
8635
8636 for (i = 0; i < deprecated_coproc_reg_count; ++i)
8637 {
8638 const struct deprecated_coproc_regs_s *r =
8639 deprecated_coproc_regs + i;
8640
8641 if (inst.operands[0].reg == r->cp
8642 && inst.operands[1].imm == r->opc1
8643 && inst.operands[3].reg == r->crn
8644 && inst.operands[4].reg == r->crm
8645 && inst.operands[5].imm == r->opc2)
8646 {
8647 if (! ARM_CPU_IS_ANY (cpu_variant)
8648 && warn_on_deprecated
8649 && ARM_CPU_HAS_FEATURE (cpu_variant, r->deprecated))
8650 as_tsktsk ("%s", r->dep_msg);
8651 }
8652 }
8653
8654 inst.instruction |= inst.operands[0].reg << 8;
8655 inst.instruction |= inst.operands[1].imm << 21;
8656 inst.instruction |= Rd << 12;
8657 inst.instruction |= inst.operands[3].reg << 16;
8658 inst.instruction |= inst.operands[4].reg;
8659 inst.instruction |= inst.operands[5].imm << 5;
8660 }
8661
8662 /* Transfer between coprocessor register and pair of ARM registers.
8663 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
8664 MCRR2
8665 MRRC{cond}
8666 MRRC2
8667
8668 Two XScale instructions are special cases of these:
8669
8670 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
8671 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
8672
8673 Result unpredictable if Rd or Rn is R15. */
8674
8675 static void
8676 do_co_reg2c (void)
8677 {
8678 unsigned Rd, Rn;
8679
8680 Rd = inst.operands[2].reg;
8681 Rn = inst.operands[3].reg;
8682
8683 if (thumb_mode)
8684 {
8685 reject_bad_reg (Rd);
8686 reject_bad_reg (Rn);
8687 }
8688 else
8689 {
8690 constraint (Rd == REG_PC, BAD_PC);
8691 constraint (Rn == REG_PC, BAD_PC);
8692 }
8693
8694 /* Only check the MRRC{2} variants. */
8695 if ((inst.instruction & 0x0FF00000) == 0x0C500000)
8696 {
8697 /* If Rd == Rn, error that the operation is
8698 unpredictable (example MRRC p3,#1,r1,r1,c4). */
8699 constraint (Rd == Rn, BAD_OVERLAP);
8700 }
8701
8702 inst.instruction |= inst.operands[0].reg << 8;
8703 inst.instruction |= inst.operands[1].imm << 4;
8704 inst.instruction |= Rd << 12;
8705 inst.instruction |= Rn << 16;
8706 inst.instruction |= inst.operands[4].reg;
8707 }
8708
8709 static void
8710 do_cpsi (void)
8711 {
8712 inst.instruction |= inst.operands[0].imm << 6;
8713 if (inst.operands[1].present)
8714 {
8715 inst.instruction |= CPSI_MMOD;
8716 inst.instruction |= inst.operands[1].imm;
8717 }
8718 }
8719
8720 static void
8721 do_dbg (void)
8722 {
8723 inst.instruction |= inst.operands[0].imm;
8724 }
8725
8726 static void
8727 do_div (void)
8728 {
8729 unsigned Rd, Rn, Rm;
8730
8731 Rd = inst.operands[0].reg;
8732 Rn = (inst.operands[1].present
8733 ? inst.operands[1].reg : Rd);
8734 Rm = inst.operands[2].reg;
8735
8736 constraint ((Rd == REG_PC), BAD_PC);
8737 constraint ((Rn == REG_PC), BAD_PC);
8738 constraint ((Rm == REG_PC), BAD_PC);
8739
8740 inst.instruction |= Rd << 16;
8741 inst.instruction |= Rn << 0;
8742 inst.instruction |= Rm << 8;
8743 }
8744
8745 static void
8746 do_it (void)
8747 {
8748 /* There is no IT instruction in ARM mode. We
8749 process it to do the validation as if in
8750 thumb mode, just in case the code gets
8751 assembled for thumb using the unified syntax. */
8752
8753 inst.size = 0;
8754 if (unified_syntax)
8755 {
8756 set_it_insn_type (IT_INSN);
8757 now_it.mask = (inst.instruction & 0xf) | 0x10;
8758 now_it.cc = inst.operands[0].imm;
8759 }
8760 }
8761
8762 /* If there is only one register in the register list,
8763 then return its register number. Otherwise return -1. */
8764 static int
8765 only_one_reg_in_list (int range)
8766 {
8767 int i = ffs (range) - 1;
8768 return (i > 15 || range != (1 << i)) ? -1 : i;
8769 }
8770
8771 static void
8772 encode_ldmstm(int from_push_pop_mnem)
8773 {
8774 int base_reg = inst.operands[0].reg;
8775 int range = inst.operands[1].imm;
8776 int one_reg;
8777
8778 inst.instruction |= base_reg << 16;
8779 inst.instruction |= range;
8780
8781 if (inst.operands[1].writeback)
8782 inst.instruction |= LDM_TYPE_2_OR_3;
8783
8784 if (inst.operands[0].writeback)
8785 {
8786 inst.instruction |= WRITE_BACK;
8787 /* Check for unpredictable uses of writeback. */
8788 if (inst.instruction & LOAD_BIT)
8789 {
8790 /* Not allowed in LDM type 2. */
8791 if ((inst.instruction & LDM_TYPE_2_OR_3)
8792 && ((range & (1 << REG_PC)) == 0))
8793 as_warn (_("writeback of base register is UNPREDICTABLE"));
8794 /* Only allowed if base reg not in list for other types. */
8795 else if (range & (1 << base_reg))
8796 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
8797 }
8798 else /* STM. */
8799 {
8800 /* Not allowed for type 2. */
8801 if (inst.instruction & LDM_TYPE_2_OR_3)
8802 as_warn (_("writeback of base register is UNPREDICTABLE"));
8803 /* Only allowed if base reg not in list, or first in list. */
8804 else if ((range & (1 << base_reg))
8805 && (range & ((1 << base_reg) - 1)))
8806 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
8807 }
8808 }
8809
8810 /* If PUSH/POP has only one register, then use the A2 encoding. */
8811 one_reg = only_one_reg_in_list (range);
8812 if (from_push_pop_mnem && one_reg >= 0)
8813 {
8814 int is_push = (inst.instruction & A_PUSH_POP_OP_MASK) == A1_OPCODE_PUSH;
8815
8816 inst.instruction &= A_COND_MASK;
8817 inst.instruction |= is_push ? A2_OPCODE_PUSH : A2_OPCODE_POP;
8818 inst.instruction |= one_reg << 12;
8819 }
8820 }
8821
8822 static void
8823 do_ldmstm (void)
8824 {
8825 encode_ldmstm (/*from_push_pop_mnem=*/FALSE);
8826 }
8827
8828 /* ARMv5TE load-consecutive (argument parse)
8829 Mode is like LDRH.
8830
8831 LDRccD R, mode
8832 STRccD R, mode. */
8833
8834 static void
8835 do_ldrd (void)
8836 {
8837 constraint (inst.operands[0].reg % 2 != 0,
8838 _("first transfer register must be even"));
8839 constraint (inst.operands[1].present
8840 && inst.operands[1].reg != inst.operands[0].reg + 1,
8841 _("can only transfer two consecutive registers"));
8842 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
8843 constraint (!inst.operands[2].isreg, _("'[' expected"));
8844
8845 if (!inst.operands[1].present)
8846 inst.operands[1].reg = inst.operands[0].reg + 1;
8847
8848 /* encode_arm_addr_mode_3 will diagnose overlap between the base
8849 register and the first register written; we have to diagnose
8850 overlap between the base and the second register written here. */
8851
8852 if (inst.operands[2].reg == inst.operands[1].reg
8853 && (inst.operands[2].writeback || inst.operands[2].postind))
8854 as_warn (_("base register written back, and overlaps "
8855 "second transfer register"));
8856
8857 if (!(inst.instruction & V4_STR_BIT))
8858 {
8859 /* For an index-register load, the index register must not overlap the
8860 destination (even if not write-back). */
8861 if (inst.operands[2].immisreg
8862 && ((unsigned) inst.operands[2].imm == inst.operands[0].reg
8863 || (unsigned) inst.operands[2].imm == inst.operands[1].reg))
8864 as_warn (_("index register overlaps transfer register"));
8865 }
8866 inst.instruction |= inst.operands[0].reg << 12;
8867 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE);
8868 }
8869
8870 static void
8871 do_ldrex (void)
8872 {
8873 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
8874 || inst.operands[1].postind || inst.operands[1].writeback
8875 || inst.operands[1].immisreg || inst.operands[1].shifted
8876 || inst.operands[1].negative
8877 /* This can arise if the programmer has written
8878 strex rN, rM, foo
8879 or if they have mistakenly used a register name as the last
8880 operand, eg:
8881 strex rN, rM, rX
8882 It is very difficult to distinguish between these two cases
8883 because "rX" might actually be a label. ie the register
8884 name has been occluded by a symbol of the same name. So we
8885 just generate a general 'bad addressing mode' type error
8886 message and leave it up to the programmer to discover the
8887 true cause and fix their mistake. */
8888 || (inst.operands[1].reg == REG_PC),
8889 BAD_ADDR_MODE);
8890
8891 constraint (inst.reloc.exp.X_op != O_constant
8892 || inst.reloc.exp.X_add_number != 0,
8893 _("offset must be zero in ARM encoding"));
8894
8895 constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
8896
8897 inst.instruction |= inst.operands[0].reg << 12;
8898 inst.instruction |= inst.operands[1].reg << 16;
8899 inst.reloc.type = BFD_RELOC_UNUSED;
8900 }
8901
8902 static void
8903 do_ldrexd (void)
8904 {
8905 constraint (inst.operands[0].reg % 2 != 0,
8906 _("even register required"));
8907 constraint (inst.operands[1].present
8908 && inst.operands[1].reg != inst.operands[0].reg + 1,
8909 _("can only load two consecutive registers"));
8910 /* If op 1 were present and equal to PC, this function wouldn't
8911 have been called in the first place. */
8912 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
8913
8914 inst.instruction |= inst.operands[0].reg << 12;
8915 inst.instruction |= inst.operands[2].reg << 16;
8916 }
8917
8918 /* In both ARM and thumb state 'ldr pc, #imm' with an immediate
8919 which is not a multiple of four is UNPREDICTABLE. */
8920 static void
8921 check_ldr_r15_aligned (void)
8922 {
8923 constraint (!(inst.operands[1].immisreg)
8924 && (inst.operands[0].reg == REG_PC
8925 && inst.operands[1].reg == REG_PC
8926 && (inst.reloc.exp.X_add_number & 0x3)),
8927 _("ldr to register 15 must be 4-byte alligned"));
8928 }
8929
8930 static void
8931 do_ldst (void)
8932 {
8933 inst.instruction |= inst.operands[0].reg << 12;
8934 if (!inst.operands[1].isreg)
8935 if (move_or_literal_pool (0, CONST_ARM, /*mode_3=*/FALSE))
8936 return;
8937 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE);
8938 check_ldr_r15_aligned ();
8939 }
8940
8941 static void
8942 do_ldstt (void)
8943 {
8944 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
8945 reject [Rn,...]. */
8946 if (inst.operands[1].preind)
8947 {
8948 constraint (inst.reloc.exp.X_op != O_constant
8949 || inst.reloc.exp.X_add_number != 0,
8950 _("this instruction requires a post-indexed address"));
8951
8952 inst.operands[1].preind = 0;
8953 inst.operands[1].postind = 1;
8954 inst.operands[1].writeback = 1;
8955 }
8956 inst.instruction |= inst.operands[0].reg << 12;
8957 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE);
8958 }
8959
8960 /* Halfword and signed-byte load/store operations. */
8961
8962 static void
8963 do_ldstv4 (void)
8964 {
8965 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
8966 inst.instruction |= inst.operands[0].reg << 12;
8967 if (!inst.operands[1].isreg)
8968 if (move_or_literal_pool (0, CONST_ARM, /*mode_3=*/TRUE))
8969 return;
8970 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE);
8971 }
8972
8973 static void
8974 do_ldsttv4 (void)
8975 {
8976 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
8977 reject [Rn,...]. */
8978 if (inst.operands[1].preind)
8979 {
8980 constraint (inst.reloc.exp.X_op != O_constant
8981 || inst.reloc.exp.X_add_number != 0,
8982 _("this instruction requires a post-indexed address"));
8983
8984 inst.operands[1].preind = 0;
8985 inst.operands[1].postind = 1;
8986 inst.operands[1].writeback = 1;
8987 }
8988 inst.instruction |= inst.operands[0].reg << 12;
8989 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE);
8990 }
8991
8992 /* Co-processor register load/store.
8993 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
8994 static void
8995 do_lstc (void)
8996 {
8997 inst.instruction |= inst.operands[0].reg << 8;
8998 inst.instruction |= inst.operands[1].reg << 12;
8999 encode_arm_cp_address (2, TRUE, TRUE, 0);
9000 }
9001
9002 static void
9003 do_mlas (void)
9004 {
9005 /* This restriction does not apply to mls (nor to mla in v6 or later). */
9006 if (inst.operands[0].reg == inst.operands[1].reg
9007 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6)
9008 && !(inst.instruction & 0x00400000))
9009 as_tsktsk (_("Rd and Rm should be different in mla"));
9010
9011 inst.instruction |= inst.operands[0].reg << 16;
9012 inst.instruction |= inst.operands[1].reg;
9013 inst.instruction |= inst.operands[2].reg << 8;
9014 inst.instruction |= inst.operands[3].reg << 12;
9015 }
9016
9017 static void
9018 do_mov (void)
9019 {
9020 constraint (inst.reloc.type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
9021 && inst.reloc.type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC ,
9022 THUMB1_RELOC_ONLY);
9023 inst.instruction |= inst.operands[0].reg << 12;
9024 encode_arm_shifter_operand (1);
9025 }
9026
9027 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
9028 static void
9029 do_mov16 (void)
9030 {
9031 bfd_vma imm;
9032 bfd_boolean top;
9033
9034 top = (inst.instruction & 0x00400000) != 0;
9035 constraint (top && inst.reloc.type == BFD_RELOC_ARM_MOVW,
9036 _(":lower16: not allowed this instruction"));
9037 constraint (!top && inst.reloc.type == BFD_RELOC_ARM_MOVT,
9038 _(":upper16: not allowed instruction"));
9039 inst.instruction |= inst.operands[0].reg << 12;
9040 if (inst.reloc.type == BFD_RELOC_UNUSED)
9041 {
9042 imm = inst.reloc.exp.X_add_number;
9043 /* The value is in two pieces: 0:11, 16:19. */
9044 inst.instruction |= (imm & 0x00000fff);
9045 inst.instruction |= (imm & 0x0000f000) << 4;
9046 }
9047 }
9048
9049 static int
9050 do_vfp_nsyn_mrs (void)
9051 {
9052 if (inst.operands[0].isvec)
9053 {
9054 if (inst.operands[1].reg != 1)
9055 first_error (_("operand 1 must be FPSCR"));
9056 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
9057 memset (&inst.operands[1], '\0', sizeof (inst.operands[1]));
9058 do_vfp_nsyn_opcode ("fmstat");
9059 }
9060 else if (inst.operands[1].isvec)
9061 do_vfp_nsyn_opcode ("fmrx");
9062 else
9063 return FAIL;
9064
9065 return SUCCESS;
9066 }
9067
9068 static int
9069 do_vfp_nsyn_msr (void)
9070 {
9071 if (inst.operands[0].isvec)
9072 do_vfp_nsyn_opcode ("fmxr");
9073 else
9074 return FAIL;
9075
9076 return SUCCESS;
9077 }
9078
9079 static void
9080 do_vmrs (void)
9081 {
9082 unsigned Rt = inst.operands[0].reg;
9083
9084 if (thumb_mode && Rt == REG_SP)
9085 {
9086 inst.error = BAD_SP;
9087 return;
9088 }
9089
9090 /* APSR_ sets isvec. All other refs to PC are illegal. */
9091 if (!inst.operands[0].isvec && Rt == REG_PC)
9092 {
9093 inst.error = BAD_PC;
9094 return;
9095 }
9096
9097 /* If we get through parsing the register name, we just insert the number
9098 generated into the instruction without further validation. */
9099 inst.instruction |= (inst.operands[1].reg << 16);
9100 inst.instruction |= (Rt << 12);
9101 }
9102
9103 static void
9104 do_vmsr (void)
9105 {
9106 unsigned Rt = inst.operands[1].reg;
9107
9108 if (thumb_mode)
9109 reject_bad_reg (Rt);
9110 else if (Rt == REG_PC)
9111 {
9112 inst.error = BAD_PC;
9113 return;
9114 }
9115
9116 /* If we get through parsing the register name, we just insert the number
9117 generated into the instruction without further validation. */
9118 inst.instruction |= (inst.operands[0].reg << 16);
9119 inst.instruction |= (Rt << 12);
9120 }
9121
9122 static void
9123 do_mrs (void)
9124 {
9125 unsigned br;
9126
9127 if (do_vfp_nsyn_mrs () == SUCCESS)
9128 return;
9129
9130 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
9131 inst.instruction |= inst.operands[0].reg << 12;
9132
9133 if (inst.operands[1].isreg)
9134 {
9135 br = inst.operands[1].reg;
9136 if (((br & 0x200) == 0) && ((br & 0xf0000) != 0xf000))
9137 as_bad (_("bad register for mrs"));
9138 }
9139 else
9140 {
9141 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
9142 constraint ((inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f))
9143 != (PSR_c|PSR_f),
9144 _("'APSR', 'CPSR' or 'SPSR' expected"));
9145 br = (15<<16) | (inst.operands[1].imm & SPSR_BIT);
9146 }
9147
9148 inst.instruction |= br;
9149 }
9150
9151 /* Two possible forms:
9152 "{C|S}PSR_<field>, Rm",
9153 "{C|S}PSR_f, #expression". */
9154
9155 static void
9156 do_msr (void)
9157 {
9158 if (do_vfp_nsyn_msr () == SUCCESS)
9159 return;
9160
9161 inst.instruction |= inst.operands[0].imm;
9162 if (inst.operands[1].isreg)
9163 inst.instruction |= inst.operands[1].reg;
9164 else
9165 {
9166 inst.instruction |= INST_IMMEDIATE;
9167 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
9168 inst.reloc.pc_rel = 0;
9169 }
9170 }
9171
9172 static void
9173 do_mul (void)
9174 {
9175 constraint (inst.operands[2].reg == REG_PC, BAD_PC);
9176
9177 if (!inst.operands[2].present)
9178 inst.operands[2].reg = inst.operands[0].reg;
9179 inst.instruction |= inst.operands[0].reg << 16;
9180 inst.instruction |= inst.operands[1].reg;
9181 inst.instruction |= inst.operands[2].reg << 8;
9182
9183 if (inst.operands[0].reg == inst.operands[1].reg
9184 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
9185 as_tsktsk (_("Rd and Rm should be different in mul"));
9186 }
9187
9188 /* Long Multiply Parser
9189 UMULL RdLo, RdHi, Rm, Rs
9190 SMULL RdLo, RdHi, Rm, Rs
9191 UMLAL RdLo, RdHi, Rm, Rs
9192 SMLAL RdLo, RdHi, Rm, Rs. */
9193
9194 static void
9195 do_mull (void)
9196 {
9197 inst.instruction |= inst.operands[0].reg << 12;
9198 inst.instruction |= inst.operands[1].reg << 16;
9199 inst.instruction |= inst.operands[2].reg;
9200 inst.instruction |= inst.operands[3].reg << 8;
9201
9202 /* rdhi and rdlo must be different. */
9203 if (inst.operands[0].reg == inst.operands[1].reg)
9204 as_tsktsk (_("rdhi and rdlo must be different"));
9205
9206 /* rdhi, rdlo and rm must all be different before armv6. */
9207 if ((inst.operands[0].reg == inst.operands[2].reg
9208 || inst.operands[1].reg == inst.operands[2].reg)
9209 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
9210 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
9211 }
9212
9213 static void
9214 do_nop (void)
9215 {
9216 if (inst.operands[0].present
9217 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6k))
9218 {
9219 /* Architectural NOP hints are CPSR sets with no bits selected. */
9220 inst.instruction &= 0xf0000000;
9221 inst.instruction |= 0x0320f000;
9222 if (inst.operands[0].present)
9223 inst.instruction |= inst.operands[0].imm;
9224 }
9225 }
9226
9227 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
9228 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
9229 Condition defaults to COND_ALWAYS.
9230 Error if Rd, Rn or Rm are R15. */
9231
9232 static void
9233 do_pkhbt (void)
9234 {
9235 inst.instruction |= inst.operands[0].reg << 12;
9236 inst.instruction |= inst.operands[1].reg << 16;
9237 inst.instruction |= inst.operands[2].reg;
9238 if (inst.operands[3].present)
9239 encode_arm_shift (3);
9240 }
9241
9242 /* ARM V6 PKHTB (Argument Parse). */
9243
9244 static void
9245 do_pkhtb (void)
9246 {
9247 if (!inst.operands[3].present)
9248 {
9249 /* If the shift specifier is omitted, turn the instruction
9250 into pkhbt rd, rm, rn. */
9251 inst.instruction &= 0xfff00010;
9252 inst.instruction |= inst.operands[0].reg << 12;
9253 inst.instruction |= inst.operands[1].reg;
9254 inst.instruction |= inst.operands[2].reg << 16;
9255 }
9256 else
9257 {
9258 inst.instruction |= inst.operands[0].reg << 12;
9259 inst.instruction |= inst.operands[1].reg << 16;
9260 inst.instruction |= inst.operands[2].reg;
9261 encode_arm_shift (3);
9262 }
9263 }
9264
9265 /* ARMv5TE: Preload-Cache
9266 MP Extensions: Preload for write
9267
9268 PLD(W) <addr_mode>
9269
9270 Syntactically, like LDR with B=1, W=0, L=1. */
9271
9272 static void
9273 do_pld (void)
9274 {
9275 constraint (!inst.operands[0].isreg,
9276 _("'[' expected after PLD mnemonic"));
9277 constraint (inst.operands[0].postind,
9278 _("post-indexed expression used in preload instruction"));
9279 constraint (inst.operands[0].writeback,
9280 _("writeback used in preload instruction"));
9281 constraint (!inst.operands[0].preind,
9282 _("unindexed addressing used in preload instruction"));
9283 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
9284 }
9285
9286 /* ARMv7: PLI <addr_mode> */
9287 static void
9288 do_pli (void)
9289 {
9290 constraint (!inst.operands[0].isreg,
9291 _("'[' expected after PLI mnemonic"));
9292 constraint (inst.operands[0].postind,
9293 _("post-indexed expression used in preload instruction"));
9294 constraint (inst.operands[0].writeback,
9295 _("writeback used in preload instruction"));
9296 constraint (!inst.operands[0].preind,
9297 _("unindexed addressing used in preload instruction"));
9298 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
9299 inst.instruction &= ~PRE_INDEX;
9300 }
9301
9302 static void
9303 do_push_pop (void)
9304 {
9305 constraint (inst.operands[0].writeback,
9306 _("push/pop do not support {reglist}^"));
9307 inst.operands[1] = inst.operands[0];
9308 memset (&inst.operands[0], 0, sizeof inst.operands[0]);
9309 inst.operands[0].isreg = 1;
9310 inst.operands[0].writeback = 1;
9311 inst.operands[0].reg = REG_SP;
9312 encode_ldmstm (/*from_push_pop_mnem=*/TRUE);
9313 }
9314
9315 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
9316 word at the specified address and the following word
9317 respectively.
9318 Unconditionally executed.
9319 Error if Rn is R15. */
9320
9321 static void
9322 do_rfe (void)
9323 {
9324 inst.instruction |= inst.operands[0].reg << 16;
9325 if (inst.operands[0].writeback)
9326 inst.instruction |= WRITE_BACK;
9327 }
9328
9329 /* ARM V6 ssat (argument parse). */
9330
9331 static void
9332 do_ssat (void)
9333 {
9334 inst.instruction |= inst.operands[0].reg << 12;
9335 inst.instruction |= (inst.operands[1].imm - 1) << 16;
9336 inst.instruction |= inst.operands[2].reg;
9337
9338 if (inst.operands[3].present)
9339 encode_arm_shift (3);
9340 }
9341
9342 /* ARM V6 usat (argument parse). */
9343
9344 static void
9345 do_usat (void)
9346 {
9347 inst.instruction |= inst.operands[0].reg << 12;
9348 inst.instruction |= inst.operands[1].imm << 16;
9349 inst.instruction |= inst.operands[2].reg;
9350
9351 if (inst.operands[3].present)
9352 encode_arm_shift (3);
9353 }
9354
9355 /* ARM V6 ssat16 (argument parse). */
9356
9357 static void
9358 do_ssat16 (void)
9359 {
9360 inst.instruction |= inst.operands[0].reg << 12;
9361 inst.instruction |= ((inst.operands[1].imm - 1) << 16);
9362 inst.instruction |= inst.operands[2].reg;
9363 }
9364
9365 static void
9366 do_usat16 (void)
9367 {
9368 inst.instruction |= inst.operands[0].reg << 12;
9369 inst.instruction |= inst.operands[1].imm << 16;
9370 inst.instruction |= inst.operands[2].reg;
9371 }
9372
9373 /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
9374 preserving the other bits.
9375
9376 setend <endian_specifier>, where <endian_specifier> is either
9377 BE or LE. */
9378
9379 static void
9380 do_setend (void)
9381 {
9382 if (warn_on_deprecated
9383 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
9384 as_tsktsk (_("setend use is deprecated for ARMv8"));
9385
9386 if (inst.operands[0].imm)
9387 inst.instruction |= 0x200;
9388 }
9389
9390 static void
9391 do_shift (void)
9392 {
9393 unsigned int Rm = (inst.operands[1].present
9394 ? inst.operands[1].reg
9395 : inst.operands[0].reg);
9396
9397 inst.instruction |= inst.operands[0].reg << 12;
9398 inst.instruction |= Rm;
9399 if (inst.operands[2].isreg) /* Rd, {Rm,} Rs */
9400 {
9401 inst.instruction |= inst.operands[2].reg << 8;
9402 inst.instruction |= SHIFT_BY_REG;
9403 /* PR 12854: Error on extraneous shifts. */
9404 constraint (inst.operands[2].shifted,
9405 _("extraneous shift as part of operand to shift insn"));
9406 }
9407 else
9408 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
9409 }
9410
9411 static void
9412 do_smc (void)
9413 {
9414 inst.reloc.type = BFD_RELOC_ARM_SMC;
9415 inst.reloc.pc_rel = 0;
9416 }
9417
9418 static void
9419 do_hvc (void)
9420 {
9421 inst.reloc.type = BFD_RELOC_ARM_HVC;
9422 inst.reloc.pc_rel = 0;
9423 }
9424
9425 static void
9426 do_swi (void)
9427 {
9428 inst.reloc.type = BFD_RELOC_ARM_SWI;
9429 inst.reloc.pc_rel = 0;
9430 }
9431
9432 static void
9433 do_setpan (void)
9434 {
9435 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_pan),
9436 _("selected processor does not support SETPAN instruction"));
9437
9438 inst.instruction |= ((inst.operands[0].imm & 1) << 9);
9439 }
9440
9441 static void
9442 do_t_setpan (void)
9443 {
9444 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_pan),
9445 _("selected processor does not support SETPAN instruction"));
9446
9447 inst.instruction |= (inst.operands[0].imm << 3);
9448 }
9449
9450 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
9451 SMLAxy{cond} Rd,Rm,Rs,Rn
9452 SMLAWy{cond} Rd,Rm,Rs,Rn
9453 Error if any register is R15. */
9454
9455 static void
9456 do_smla (void)
9457 {
9458 inst.instruction |= inst.operands[0].reg << 16;
9459 inst.instruction |= inst.operands[1].reg;
9460 inst.instruction |= inst.operands[2].reg << 8;
9461 inst.instruction |= inst.operands[3].reg << 12;
9462 }
9463
9464 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
9465 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
9466 Error if any register is R15.
9467 Warning if Rdlo == Rdhi. */
9468
9469 static void
9470 do_smlal (void)
9471 {
9472 inst.instruction |= inst.operands[0].reg << 12;
9473 inst.instruction |= inst.operands[1].reg << 16;
9474 inst.instruction |= inst.operands[2].reg;
9475 inst.instruction |= inst.operands[3].reg << 8;
9476
9477 if (inst.operands[0].reg == inst.operands[1].reg)
9478 as_tsktsk (_("rdhi and rdlo must be different"));
9479 }
9480
9481 /* ARM V5E (El Segundo) signed-multiply (argument parse)
9482 SMULxy{cond} Rd,Rm,Rs
9483 Error if any register is R15. */
9484
9485 static void
9486 do_smul (void)
9487 {
9488 inst.instruction |= inst.operands[0].reg << 16;
9489 inst.instruction |= inst.operands[1].reg;
9490 inst.instruction |= inst.operands[2].reg << 8;
9491 }
9492
9493 /* ARM V6 srs (argument parse). The variable fields in the encoding are
9494 the same for both ARM and Thumb-2. */
9495
9496 static void
9497 do_srs (void)
9498 {
9499 int reg;
9500
9501 if (inst.operands[0].present)
9502 {
9503 reg = inst.operands[0].reg;
9504 constraint (reg != REG_SP, _("SRS base register must be r13"));
9505 }
9506 else
9507 reg = REG_SP;
9508
9509 inst.instruction |= reg << 16;
9510 inst.instruction |= inst.operands[1].imm;
9511 if (inst.operands[0].writeback || inst.operands[1].writeback)
9512 inst.instruction |= WRITE_BACK;
9513 }
9514
9515 /* ARM V6 strex (argument parse). */
9516
9517 static void
9518 do_strex (void)
9519 {
9520 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
9521 || inst.operands[2].postind || inst.operands[2].writeback
9522 || inst.operands[2].immisreg || inst.operands[2].shifted
9523 || inst.operands[2].negative
9524 /* See comment in do_ldrex(). */
9525 || (inst.operands[2].reg == REG_PC),
9526 BAD_ADDR_MODE);
9527
9528 constraint (inst.operands[0].reg == inst.operands[1].reg
9529 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9530
9531 constraint (inst.reloc.exp.X_op != O_constant
9532 || inst.reloc.exp.X_add_number != 0,
9533 _("offset must be zero in ARM encoding"));
9534
9535 inst.instruction |= inst.operands[0].reg << 12;
9536 inst.instruction |= inst.operands[1].reg;
9537 inst.instruction |= inst.operands[2].reg << 16;
9538 inst.reloc.type = BFD_RELOC_UNUSED;
9539 }
9540
9541 static void
9542 do_t_strexbh (void)
9543 {
9544 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
9545 || inst.operands[2].postind || inst.operands[2].writeback
9546 || inst.operands[2].immisreg || inst.operands[2].shifted
9547 || inst.operands[2].negative,
9548 BAD_ADDR_MODE);
9549
9550 constraint (inst.operands[0].reg == inst.operands[1].reg
9551 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9552
9553 do_rm_rd_rn ();
9554 }
9555
9556 static void
9557 do_strexd (void)
9558 {
9559 constraint (inst.operands[1].reg % 2 != 0,
9560 _("even register required"));
9561 constraint (inst.operands[2].present
9562 && inst.operands[2].reg != inst.operands[1].reg + 1,
9563 _("can only store two consecutive registers"));
9564 /* If op 2 were present and equal to PC, this function wouldn't
9565 have been called in the first place. */
9566 constraint (inst.operands[1].reg == REG_LR, _("r14 not allowed here"));
9567
9568 constraint (inst.operands[0].reg == inst.operands[1].reg
9569 || inst.operands[0].reg == inst.operands[1].reg + 1
9570 || inst.operands[0].reg == inst.operands[3].reg,
9571 BAD_OVERLAP);
9572
9573 inst.instruction |= inst.operands[0].reg << 12;
9574 inst.instruction |= inst.operands[1].reg;
9575 inst.instruction |= inst.operands[3].reg << 16;
9576 }
9577
9578 /* ARM V8 STRL. */
9579 static void
9580 do_stlex (void)
9581 {
9582 constraint (inst.operands[0].reg == inst.operands[1].reg
9583 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9584
9585 do_rd_rm_rn ();
9586 }
9587
9588 static void
9589 do_t_stlex (void)
9590 {
9591 constraint (inst.operands[0].reg == inst.operands[1].reg
9592 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9593
9594 do_rm_rd_rn ();
9595 }
9596
9597 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
9598 extends it to 32-bits, and adds the result to a value in another
9599 register. You can specify a rotation by 0, 8, 16, or 24 bits
9600 before extracting the 16-bit value.
9601 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
9602 Condition defaults to COND_ALWAYS.
9603 Error if any register uses R15. */
9604
9605 static void
9606 do_sxtah (void)
9607 {
9608 inst.instruction |= inst.operands[0].reg << 12;
9609 inst.instruction |= inst.operands[1].reg << 16;
9610 inst.instruction |= inst.operands[2].reg;
9611 inst.instruction |= inst.operands[3].imm << 10;
9612 }
9613
9614 /* ARM V6 SXTH.
9615
9616 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
9617 Condition defaults to COND_ALWAYS.
9618 Error if any register uses R15. */
9619
9620 static void
9621 do_sxth (void)
9622 {
9623 inst.instruction |= inst.operands[0].reg << 12;
9624 inst.instruction |= inst.operands[1].reg;
9625 inst.instruction |= inst.operands[2].imm << 10;
9626 }
9627 \f
9628 /* VFP instructions. In a logical order: SP variant first, monad
9629 before dyad, arithmetic then move then load/store. */
9630
9631 static void
9632 do_vfp_sp_monadic (void)
9633 {
9634 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9635 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
9636 }
9637
9638 static void
9639 do_vfp_sp_dyadic (void)
9640 {
9641 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9642 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
9643 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
9644 }
9645
9646 static void
9647 do_vfp_sp_compare_z (void)
9648 {
9649 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9650 }
9651
9652 static void
9653 do_vfp_dp_sp_cvt (void)
9654 {
9655 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9656 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
9657 }
9658
9659 static void
9660 do_vfp_sp_dp_cvt (void)
9661 {
9662 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9663 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
9664 }
9665
9666 static void
9667 do_vfp_reg_from_sp (void)
9668 {
9669 inst.instruction |= inst.operands[0].reg << 12;
9670 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
9671 }
9672
9673 static void
9674 do_vfp_reg2_from_sp2 (void)
9675 {
9676 constraint (inst.operands[2].imm != 2,
9677 _("only two consecutive VFP SP registers allowed here"));
9678 inst.instruction |= inst.operands[0].reg << 12;
9679 inst.instruction |= inst.operands[1].reg << 16;
9680 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
9681 }
9682
9683 static void
9684 do_vfp_sp_from_reg (void)
9685 {
9686 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sn);
9687 inst.instruction |= inst.operands[1].reg << 12;
9688 }
9689
9690 static void
9691 do_vfp_sp2_from_reg2 (void)
9692 {
9693 constraint (inst.operands[0].imm != 2,
9694 _("only two consecutive VFP SP registers allowed here"));
9695 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sm);
9696 inst.instruction |= inst.operands[1].reg << 12;
9697 inst.instruction |= inst.operands[2].reg << 16;
9698 }
9699
9700 static void
9701 do_vfp_sp_ldst (void)
9702 {
9703 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9704 encode_arm_cp_address (1, FALSE, TRUE, 0);
9705 }
9706
9707 static void
9708 do_vfp_dp_ldst (void)
9709 {
9710 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9711 encode_arm_cp_address (1, FALSE, TRUE, 0);
9712 }
9713
9714
9715 static void
9716 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type)
9717 {
9718 if (inst.operands[0].writeback)
9719 inst.instruction |= WRITE_BACK;
9720 else
9721 constraint (ldstm_type != VFP_LDSTMIA,
9722 _("this addressing mode requires base-register writeback"));
9723 inst.instruction |= inst.operands[0].reg << 16;
9724 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sd);
9725 inst.instruction |= inst.operands[1].imm;
9726 }
9727
9728 static void
9729 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type)
9730 {
9731 int count;
9732
9733 if (inst.operands[0].writeback)
9734 inst.instruction |= WRITE_BACK;
9735 else
9736 constraint (ldstm_type != VFP_LDSTMIA && ldstm_type != VFP_LDSTMIAX,
9737 _("this addressing mode requires base-register writeback"));
9738
9739 inst.instruction |= inst.operands[0].reg << 16;
9740 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
9741
9742 count = inst.operands[1].imm << 1;
9743 if (ldstm_type == VFP_LDSTMIAX || ldstm_type == VFP_LDSTMDBX)
9744 count += 1;
9745
9746 inst.instruction |= count;
9747 }
9748
9749 static void
9750 do_vfp_sp_ldstmia (void)
9751 {
9752 vfp_sp_ldstm (VFP_LDSTMIA);
9753 }
9754
9755 static void
9756 do_vfp_sp_ldstmdb (void)
9757 {
9758 vfp_sp_ldstm (VFP_LDSTMDB);
9759 }
9760
9761 static void
9762 do_vfp_dp_ldstmia (void)
9763 {
9764 vfp_dp_ldstm (VFP_LDSTMIA);
9765 }
9766
9767 static void
9768 do_vfp_dp_ldstmdb (void)
9769 {
9770 vfp_dp_ldstm (VFP_LDSTMDB);
9771 }
9772
9773 static void
9774 do_vfp_xp_ldstmia (void)
9775 {
9776 vfp_dp_ldstm (VFP_LDSTMIAX);
9777 }
9778
9779 static void
9780 do_vfp_xp_ldstmdb (void)
9781 {
9782 vfp_dp_ldstm (VFP_LDSTMDBX);
9783 }
9784
9785 static void
9786 do_vfp_dp_rd_rm (void)
9787 {
9788 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9789 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
9790 }
9791
9792 static void
9793 do_vfp_dp_rn_rd (void)
9794 {
9795 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dn);
9796 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
9797 }
9798
9799 static void
9800 do_vfp_dp_rd_rn (void)
9801 {
9802 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9803 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
9804 }
9805
9806 static void
9807 do_vfp_dp_rd_rn_rm (void)
9808 {
9809 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9810 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
9811 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dm);
9812 }
9813
9814 static void
9815 do_vfp_dp_rd (void)
9816 {
9817 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9818 }
9819
9820 static void
9821 do_vfp_dp_rm_rd_rn (void)
9822 {
9823 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dm);
9824 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
9825 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dn);
9826 }
9827
9828 /* VFPv3 instructions. */
9829 static void
9830 do_vfp_sp_const (void)
9831 {
9832 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9833 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
9834 inst.instruction |= (inst.operands[1].imm & 0x0f);
9835 }
9836
9837 static void
9838 do_vfp_dp_const (void)
9839 {
9840 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9841 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
9842 inst.instruction |= (inst.operands[1].imm & 0x0f);
9843 }
9844
9845 static void
9846 vfp_conv (int srcsize)
9847 {
9848 int immbits = srcsize - inst.operands[1].imm;
9849
9850 if (srcsize == 16 && !(immbits >= 0 && immbits <= srcsize))
9851 {
9852 /* If srcsize is 16, inst.operands[1].imm must be in the range 0-16.
9853 i.e. immbits must be in range 0 - 16. */
9854 inst.error = _("immediate value out of range, expected range [0, 16]");
9855 return;
9856 }
9857 else if (srcsize == 32 && !(immbits >= 0 && immbits < srcsize))
9858 {
9859 /* If srcsize is 32, inst.operands[1].imm must be in the range 1-32.
9860 i.e. immbits must be in range 0 - 31. */
9861 inst.error = _("immediate value out of range, expected range [1, 32]");
9862 return;
9863 }
9864
9865 inst.instruction |= (immbits & 1) << 5;
9866 inst.instruction |= (immbits >> 1);
9867 }
9868
9869 static void
9870 do_vfp_sp_conv_16 (void)
9871 {
9872 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9873 vfp_conv (16);
9874 }
9875
9876 static void
9877 do_vfp_dp_conv_16 (void)
9878 {
9879 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9880 vfp_conv (16);
9881 }
9882
9883 static void
9884 do_vfp_sp_conv_32 (void)
9885 {
9886 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9887 vfp_conv (32);
9888 }
9889
9890 static void
9891 do_vfp_dp_conv_32 (void)
9892 {
9893 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9894 vfp_conv (32);
9895 }
9896 \f
9897 /* FPA instructions. Also in a logical order. */
9898
9899 static void
9900 do_fpa_cmp (void)
9901 {
9902 inst.instruction |= inst.operands[0].reg << 16;
9903 inst.instruction |= inst.operands[1].reg;
9904 }
9905
9906 static void
9907 do_fpa_ldmstm (void)
9908 {
9909 inst.instruction |= inst.operands[0].reg << 12;
9910 switch (inst.operands[1].imm)
9911 {
9912 case 1: inst.instruction |= CP_T_X; break;
9913 case 2: inst.instruction |= CP_T_Y; break;
9914 case 3: inst.instruction |= CP_T_Y | CP_T_X; break;
9915 case 4: break;
9916 default: abort ();
9917 }
9918
9919 if (inst.instruction & (PRE_INDEX | INDEX_UP))
9920 {
9921 /* The instruction specified "ea" or "fd", so we can only accept
9922 [Rn]{!}. The instruction does not really support stacking or
9923 unstacking, so we have to emulate these by setting appropriate
9924 bits and offsets. */
9925 constraint (inst.reloc.exp.X_op != O_constant
9926 || inst.reloc.exp.X_add_number != 0,
9927 _("this instruction does not support indexing"));
9928
9929 if ((inst.instruction & PRE_INDEX) || inst.operands[2].writeback)
9930 inst.reloc.exp.X_add_number = 12 * inst.operands[1].imm;
9931
9932 if (!(inst.instruction & INDEX_UP))
9933 inst.reloc.exp.X_add_number = -inst.reloc.exp.X_add_number;
9934
9935 if (!(inst.instruction & PRE_INDEX) && inst.operands[2].writeback)
9936 {
9937 inst.operands[2].preind = 0;
9938 inst.operands[2].postind = 1;
9939 }
9940 }
9941
9942 encode_arm_cp_address (2, TRUE, TRUE, 0);
9943 }
9944 \f
9945 /* iWMMXt instructions: strictly in alphabetical order. */
9946
9947 static void
9948 do_iwmmxt_tandorc (void)
9949 {
9950 constraint (inst.operands[0].reg != REG_PC, _("only r15 allowed here"));
9951 }
9952
9953 static void
9954 do_iwmmxt_textrc (void)
9955 {
9956 inst.instruction |= inst.operands[0].reg << 12;
9957 inst.instruction |= inst.operands[1].imm;
9958 }
9959
9960 static void
9961 do_iwmmxt_textrm (void)
9962 {
9963 inst.instruction |= inst.operands[0].reg << 12;
9964 inst.instruction |= inst.operands[1].reg << 16;
9965 inst.instruction |= inst.operands[2].imm;
9966 }
9967
9968 static void
9969 do_iwmmxt_tinsr (void)
9970 {
9971 inst.instruction |= inst.operands[0].reg << 16;
9972 inst.instruction |= inst.operands[1].reg << 12;
9973 inst.instruction |= inst.operands[2].imm;
9974 }
9975
9976 static void
9977 do_iwmmxt_tmia (void)
9978 {
9979 inst.instruction |= inst.operands[0].reg << 5;
9980 inst.instruction |= inst.operands[1].reg;
9981 inst.instruction |= inst.operands[2].reg << 12;
9982 }
9983
9984 static void
9985 do_iwmmxt_waligni (void)
9986 {
9987 inst.instruction |= inst.operands[0].reg << 12;
9988 inst.instruction |= inst.operands[1].reg << 16;
9989 inst.instruction |= inst.operands[2].reg;
9990 inst.instruction |= inst.operands[3].imm << 20;
9991 }
9992
9993 static void
9994 do_iwmmxt_wmerge (void)
9995 {
9996 inst.instruction |= inst.operands[0].reg << 12;
9997 inst.instruction |= inst.operands[1].reg << 16;
9998 inst.instruction |= inst.operands[2].reg;
9999 inst.instruction |= inst.operands[3].imm << 21;
10000 }
10001
10002 static void
10003 do_iwmmxt_wmov (void)
10004 {
10005 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
10006 inst.instruction |= inst.operands[0].reg << 12;
10007 inst.instruction |= inst.operands[1].reg << 16;
10008 inst.instruction |= inst.operands[1].reg;
10009 }
10010
10011 static void
10012 do_iwmmxt_wldstbh (void)
10013 {
10014 int reloc;
10015 inst.instruction |= inst.operands[0].reg << 12;
10016 if (thumb_mode)
10017 reloc = BFD_RELOC_ARM_T32_CP_OFF_IMM_S2;
10018 else
10019 reloc = BFD_RELOC_ARM_CP_OFF_IMM_S2;
10020 encode_arm_cp_address (1, TRUE, FALSE, reloc);
10021 }
10022
10023 static void
10024 do_iwmmxt_wldstw (void)
10025 {
10026 /* RIWR_RIWC clears .isreg for a control register. */
10027 if (!inst.operands[0].isreg)
10028 {
10029 constraint (inst.cond != COND_ALWAYS, BAD_COND);
10030 inst.instruction |= 0xf0000000;
10031 }
10032
10033 inst.instruction |= inst.operands[0].reg << 12;
10034 encode_arm_cp_address (1, TRUE, TRUE, 0);
10035 }
10036
10037 static void
10038 do_iwmmxt_wldstd (void)
10039 {
10040 inst.instruction |= inst.operands[0].reg << 12;
10041 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2)
10042 && inst.operands[1].immisreg)
10043 {
10044 inst.instruction &= ~0x1a000ff;
10045 inst.instruction |= (0xfU << 28);
10046 if (inst.operands[1].preind)
10047 inst.instruction |= PRE_INDEX;
10048 if (!inst.operands[1].negative)
10049 inst.instruction |= INDEX_UP;
10050 if (inst.operands[1].writeback)
10051 inst.instruction |= WRITE_BACK;
10052 inst.instruction |= inst.operands[1].reg << 16;
10053 inst.instruction |= inst.reloc.exp.X_add_number << 4;
10054 inst.instruction |= inst.operands[1].imm;
10055 }
10056 else
10057 encode_arm_cp_address (1, TRUE, FALSE, 0);
10058 }
10059
10060 static void
10061 do_iwmmxt_wshufh (void)
10062 {
10063 inst.instruction |= inst.operands[0].reg << 12;
10064 inst.instruction |= inst.operands[1].reg << 16;
10065 inst.instruction |= ((inst.operands[2].imm & 0xf0) << 16);
10066 inst.instruction |= (inst.operands[2].imm & 0x0f);
10067 }
10068
10069 static void
10070 do_iwmmxt_wzero (void)
10071 {
10072 /* WZERO reg is an alias for WANDN reg, reg, reg. */
10073 inst.instruction |= inst.operands[0].reg;
10074 inst.instruction |= inst.operands[0].reg << 12;
10075 inst.instruction |= inst.operands[0].reg << 16;
10076 }
10077
10078 static void
10079 do_iwmmxt_wrwrwr_or_imm5 (void)
10080 {
10081 if (inst.operands[2].isreg)
10082 do_rd_rn_rm ();
10083 else {
10084 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2),
10085 _("immediate operand requires iWMMXt2"));
10086 do_rd_rn ();
10087 if (inst.operands[2].imm == 0)
10088 {
10089 switch ((inst.instruction >> 20) & 0xf)
10090 {
10091 case 4:
10092 case 5:
10093 case 6:
10094 case 7:
10095 /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16. */
10096 inst.operands[2].imm = 16;
10097 inst.instruction = (inst.instruction & 0xff0fffff) | (0x7 << 20);
10098 break;
10099 case 8:
10100 case 9:
10101 case 10:
10102 case 11:
10103 /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32. */
10104 inst.operands[2].imm = 32;
10105 inst.instruction = (inst.instruction & 0xff0fffff) | (0xb << 20);
10106 break;
10107 case 12:
10108 case 13:
10109 case 14:
10110 case 15:
10111 {
10112 /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn. */
10113 unsigned long wrn;
10114 wrn = (inst.instruction >> 16) & 0xf;
10115 inst.instruction &= 0xff0fff0f;
10116 inst.instruction |= wrn;
10117 /* Bail out here; the instruction is now assembled. */
10118 return;
10119 }
10120 }
10121 }
10122 /* Map 32 -> 0, etc. */
10123 inst.operands[2].imm &= 0x1f;
10124 inst.instruction |= (0xfU << 28) | ((inst.operands[2].imm & 0x10) << 4) | (inst.operands[2].imm & 0xf);
10125 }
10126 }
10127 \f
10128 /* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
10129 operations first, then control, shift, and load/store. */
10130
10131 /* Insns like "foo X,Y,Z". */
10132
10133 static void
10134 do_mav_triple (void)
10135 {
10136 inst.instruction |= inst.operands[0].reg << 16;
10137 inst.instruction |= inst.operands[1].reg;
10138 inst.instruction |= inst.operands[2].reg << 12;
10139 }
10140
10141 /* Insns like "foo W,X,Y,Z".
10142 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
10143
10144 static void
10145 do_mav_quad (void)
10146 {
10147 inst.instruction |= inst.operands[0].reg << 5;
10148 inst.instruction |= inst.operands[1].reg << 12;
10149 inst.instruction |= inst.operands[2].reg << 16;
10150 inst.instruction |= inst.operands[3].reg;
10151 }
10152
10153 /* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
10154 static void
10155 do_mav_dspsc (void)
10156 {
10157 inst.instruction |= inst.operands[1].reg << 12;
10158 }
10159
10160 /* Maverick shift immediate instructions.
10161 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
10162 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
10163
10164 static void
10165 do_mav_shift (void)
10166 {
10167 int imm = inst.operands[2].imm;
10168
10169 inst.instruction |= inst.operands[0].reg << 12;
10170 inst.instruction |= inst.operands[1].reg << 16;
10171
10172 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
10173 Bits 5-7 of the insn should have bits 4-6 of the immediate.
10174 Bit 4 should be 0. */
10175 imm = (imm & 0xf) | ((imm & 0x70) << 1);
10176
10177 inst.instruction |= imm;
10178 }
10179 \f
10180 /* XScale instructions. Also sorted arithmetic before move. */
10181
10182 /* Xscale multiply-accumulate (argument parse)
10183 MIAcc acc0,Rm,Rs
10184 MIAPHcc acc0,Rm,Rs
10185 MIAxycc acc0,Rm,Rs. */
10186
10187 static void
10188 do_xsc_mia (void)
10189 {
10190 inst.instruction |= inst.operands[1].reg;
10191 inst.instruction |= inst.operands[2].reg << 12;
10192 }
10193
10194 /* Xscale move-accumulator-register (argument parse)
10195
10196 MARcc acc0,RdLo,RdHi. */
10197
10198 static void
10199 do_xsc_mar (void)
10200 {
10201 inst.instruction |= inst.operands[1].reg << 12;
10202 inst.instruction |= inst.operands[2].reg << 16;
10203 }
10204
10205 /* Xscale move-register-accumulator (argument parse)
10206
10207 MRAcc RdLo,RdHi,acc0. */
10208
10209 static void
10210 do_xsc_mra (void)
10211 {
10212 constraint (inst.operands[0].reg == inst.operands[1].reg, BAD_OVERLAP);
10213 inst.instruction |= inst.operands[0].reg << 12;
10214 inst.instruction |= inst.operands[1].reg << 16;
10215 }
10216 \f
10217 /* Encoding functions relevant only to Thumb. */
10218
10219 /* inst.operands[i] is a shifted-register operand; encode
10220 it into inst.instruction in the format used by Thumb32. */
10221
10222 static void
10223 encode_thumb32_shifted_operand (int i)
10224 {
10225 unsigned int value = inst.reloc.exp.X_add_number;
10226 unsigned int shift = inst.operands[i].shift_kind;
10227
10228 constraint (inst.operands[i].immisreg,
10229 _("shift by register not allowed in thumb mode"));
10230 inst.instruction |= inst.operands[i].reg;
10231 if (shift == SHIFT_RRX)
10232 inst.instruction |= SHIFT_ROR << 4;
10233 else
10234 {
10235 constraint (inst.reloc.exp.X_op != O_constant,
10236 _("expression too complex"));
10237
10238 constraint (value > 32
10239 || (value == 32 && (shift == SHIFT_LSL
10240 || shift == SHIFT_ROR)),
10241 _("shift expression is too large"));
10242
10243 if (value == 0)
10244 shift = SHIFT_LSL;
10245 else if (value == 32)
10246 value = 0;
10247
10248 inst.instruction |= shift << 4;
10249 inst.instruction |= (value & 0x1c) << 10;
10250 inst.instruction |= (value & 0x03) << 6;
10251 }
10252 }
10253
10254
10255 /* inst.operands[i] was set up by parse_address. Encode it into a
10256 Thumb32 format load or store instruction. Reject forms that cannot
10257 be used with such instructions. If is_t is true, reject forms that
10258 cannot be used with a T instruction; if is_d is true, reject forms
10259 that cannot be used with a D instruction. If it is a store insn,
10260 reject PC in Rn. */
10261
10262 static void
10263 encode_thumb32_addr_mode (int i, bfd_boolean is_t, bfd_boolean is_d)
10264 {
10265 const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
10266
10267 constraint (!inst.operands[i].isreg,
10268 _("Instruction does not support =N addresses"));
10269
10270 inst.instruction |= inst.operands[i].reg << 16;
10271 if (inst.operands[i].immisreg)
10272 {
10273 constraint (is_pc, BAD_PC_ADDRESSING);
10274 constraint (is_t || is_d, _("cannot use register index with this instruction"));
10275 constraint (inst.operands[i].negative,
10276 _("Thumb does not support negative register indexing"));
10277 constraint (inst.operands[i].postind,
10278 _("Thumb does not support register post-indexing"));
10279 constraint (inst.operands[i].writeback,
10280 _("Thumb does not support register indexing with writeback"));
10281 constraint (inst.operands[i].shifted && inst.operands[i].shift_kind != SHIFT_LSL,
10282 _("Thumb supports only LSL in shifted register indexing"));
10283
10284 inst.instruction |= inst.operands[i].imm;
10285 if (inst.operands[i].shifted)
10286 {
10287 constraint (inst.reloc.exp.X_op != O_constant,
10288 _("expression too complex"));
10289 constraint (inst.reloc.exp.X_add_number < 0
10290 || inst.reloc.exp.X_add_number > 3,
10291 _("shift out of range"));
10292 inst.instruction |= inst.reloc.exp.X_add_number << 4;
10293 }
10294 inst.reloc.type = BFD_RELOC_UNUSED;
10295 }
10296 else if (inst.operands[i].preind)
10297 {
10298 constraint (is_pc && inst.operands[i].writeback, BAD_PC_WRITEBACK);
10299 constraint (is_t && inst.operands[i].writeback,
10300 _("cannot use writeback with this instruction"));
10301 constraint (is_pc && ((inst.instruction & THUMB2_LOAD_BIT) == 0),
10302 BAD_PC_ADDRESSING);
10303
10304 if (is_d)
10305 {
10306 inst.instruction |= 0x01000000;
10307 if (inst.operands[i].writeback)
10308 inst.instruction |= 0x00200000;
10309 }
10310 else
10311 {
10312 inst.instruction |= 0x00000c00;
10313 if (inst.operands[i].writeback)
10314 inst.instruction |= 0x00000100;
10315 }
10316 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
10317 }
10318 else if (inst.operands[i].postind)
10319 {
10320 gas_assert (inst.operands[i].writeback);
10321 constraint (is_pc, _("cannot use post-indexing with PC-relative addressing"));
10322 constraint (is_t, _("cannot use post-indexing with this instruction"));
10323
10324 if (is_d)
10325 inst.instruction |= 0x00200000;
10326 else
10327 inst.instruction |= 0x00000900;
10328 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
10329 }
10330 else /* unindexed - only for coprocessor */
10331 inst.error = _("instruction does not accept unindexed addressing");
10332 }
10333
10334 /* Table of Thumb instructions which exist in both 16- and 32-bit
10335 encodings (the latter only in post-V6T2 cores). The index is the
10336 value used in the insns table below. When there is more than one
10337 possible 16-bit encoding for the instruction, this table always
10338 holds variant (1).
10339 Also contains several pseudo-instructions used during relaxation. */
10340 #define T16_32_TAB \
10341 X(_adc, 4140, eb400000), \
10342 X(_adcs, 4140, eb500000), \
10343 X(_add, 1c00, eb000000), \
10344 X(_adds, 1c00, eb100000), \
10345 X(_addi, 0000, f1000000), \
10346 X(_addis, 0000, f1100000), \
10347 X(_add_pc,000f, f20f0000), \
10348 X(_add_sp,000d, f10d0000), \
10349 X(_adr, 000f, f20f0000), \
10350 X(_and, 4000, ea000000), \
10351 X(_ands, 4000, ea100000), \
10352 X(_asr, 1000, fa40f000), \
10353 X(_asrs, 1000, fa50f000), \
10354 X(_b, e000, f000b000), \
10355 X(_bcond, d000, f0008000), \
10356 X(_bic, 4380, ea200000), \
10357 X(_bics, 4380, ea300000), \
10358 X(_cmn, 42c0, eb100f00), \
10359 X(_cmp, 2800, ebb00f00), \
10360 X(_cpsie, b660, f3af8400), \
10361 X(_cpsid, b670, f3af8600), \
10362 X(_cpy, 4600, ea4f0000), \
10363 X(_dec_sp,80dd, f1ad0d00), \
10364 X(_eor, 4040, ea800000), \
10365 X(_eors, 4040, ea900000), \
10366 X(_inc_sp,00dd, f10d0d00), \
10367 X(_ldmia, c800, e8900000), \
10368 X(_ldr, 6800, f8500000), \
10369 X(_ldrb, 7800, f8100000), \
10370 X(_ldrh, 8800, f8300000), \
10371 X(_ldrsb, 5600, f9100000), \
10372 X(_ldrsh, 5e00, f9300000), \
10373 X(_ldr_pc,4800, f85f0000), \
10374 X(_ldr_pc2,4800, f85f0000), \
10375 X(_ldr_sp,9800, f85d0000), \
10376 X(_lsl, 0000, fa00f000), \
10377 X(_lsls, 0000, fa10f000), \
10378 X(_lsr, 0800, fa20f000), \
10379 X(_lsrs, 0800, fa30f000), \
10380 X(_mov, 2000, ea4f0000), \
10381 X(_movs, 2000, ea5f0000), \
10382 X(_mul, 4340, fb00f000), \
10383 X(_muls, 4340, ffffffff), /* no 32b muls */ \
10384 X(_mvn, 43c0, ea6f0000), \
10385 X(_mvns, 43c0, ea7f0000), \
10386 X(_neg, 4240, f1c00000), /* rsb #0 */ \
10387 X(_negs, 4240, f1d00000), /* rsbs #0 */ \
10388 X(_orr, 4300, ea400000), \
10389 X(_orrs, 4300, ea500000), \
10390 X(_pop, bc00, e8bd0000), /* ldmia sp!,... */ \
10391 X(_push, b400, e92d0000), /* stmdb sp!,... */ \
10392 X(_rev, ba00, fa90f080), \
10393 X(_rev16, ba40, fa90f090), \
10394 X(_revsh, bac0, fa90f0b0), \
10395 X(_ror, 41c0, fa60f000), \
10396 X(_rors, 41c0, fa70f000), \
10397 X(_sbc, 4180, eb600000), \
10398 X(_sbcs, 4180, eb700000), \
10399 X(_stmia, c000, e8800000), \
10400 X(_str, 6000, f8400000), \
10401 X(_strb, 7000, f8000000), \
10402 X(_strh, 8000, f8200000), \
10403 X(_str_sp,9000, f84d0000), \
10404 X(_sub, 1e00, eba00000), \
10405 X(_subs, 1e00, ebb00000), \
10406 X(_subi, 8000, f1a00000), \
10407 X(_subis, 8000, f1b00000), \
10408 X(_sxtb, b240, fa4ff080), \
10409 X(_sxth, b200, fa0ff080), \
10410 X(_tst, 4200, ea100f00), \
10411 X(_uxtb, b2c0, fa5ff080), \
10412 X(_uxth, b280, fa1ff080), \
10413 X(_nop, bf00, f3af8000), \
10414 X(_yield, bf10, f3af8001), \
10415 X(_wfe, bf20, f3af8002), \
10416 X(_wfi, bf30, f3af8003), \
10417 X(_sev, bf40, f3af8004), \
10418 X(_sevl, bf50, f3af8005), \
10419 X(_udf, de00, f7f0a000)
10420
10421 /* To catch errors in encoding functions, the codes are all offset by
10422 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
10423 as 16-bit instructions. */
10424 #define X(a,b,c) T_MNEM##a
10425 enum t16_32_codes { T16_32_OFFSET = 0xF7FF, T16_32_TAB };
10426 #undef X
10427
10428 #define X(a,b,c) 0x##b
10429 static const unsigned short thumb_op16[] = { T16_32_TAB };
10430 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
10431 #undef X
10432
10433 #define X(a,b,c) 0x##c
10434 static const unsigned int thumb_op32[] = { T16_32_TAB };
10435 #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
10436 #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
10437 #undef X
10438 #undef T16_32_TAB
10439
10440 /* Thumb instruction encoders, in alphabetical order. */
10441
10442 /* ADDW or SUBW. */
10443
10444 static void
10445 do_t_add_sub_w (void)
10446 {
10447 int Rd, Rn;
10448
10449 Rd = inst.operands[0].reg;
10450 Rn = inst.operands[1].reg;
10451
10452 /* If Rn is REG_PC, this is ADR; if Rn is REG_SP, then this
10453 is the SP-{plus,minus}-immediate form of the instruction. */
10454 if (Rn == REG_SP)
10455 constraint (Rd == REG_PC, BAD_PC);
10456 else
10457 reject_bad_reg (Rd);
10458
10459 inst.instruction |= (Rn << 16) | (Rd << 8);
10460 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
10461 }
10462
10463 /* Parse an add or subtract instruction. We get here with inst.instruction
10464 equalling any of THUMB_OPCODE_add, adds, sub, or subs. */
10465
10466 static void
10467 do_t_add_sub (void)
10468 {
10469 int Rd, Rs, Rn;
10470
10471 Rd = inst.operands[0].reg;
10472 Rs = (inst.operands[1].present
10473 ? inst.operands[1].reg /* Rd, Rs, foo */
10474 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
10475
10476 if (Rd == REG_PC)
10477 set_it_insn_type_last ();
10478
10479 if (unified_syntax)
10480 {
10481 bfd_boolean flags;
10482 bfd_boolean narrow;
10483 int opcode;
10484
10485 flags = (inst.instruction == T_MNEM_adds
10486 || inst.instruction == T_MNEM_subs);
10487 if (flags)
10488 narrow = !in_it_block ();
10489 else
10490 narrow = in_it_block ();
10491 if (!inst.operands[2].isreg)
10492 {
10493 int add;
10494
10495 constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
10496
10497 add = (inst.instruction == T_MNEM_add
10498 || inst.instruction == T_MNEM_adds);
10499 opcode = 0;
10500 if (inst.size_req != 4)
10501 {
10502 /* Attempt to use a narrow opcode, with relaxation if
10503 appropriate. */
10504 if (Rd == REG_SP && Rs == REG_SP && !flags)
10505 opcode = add ? T_MNEM_inc_sp : T_MNEM_dec_sp;
10506 else if (Rd <= 7 && Rs == REG_SP && add && !flags)
10507 opcode = T_MNEM_add_sp;
10508 else if (Rd <= 7 && Rs == REG_PC && add && !flags)
10509 opcode = T_MNEM_add_pc;
10510 else if (Rd <= 7 && Rs <= 7 && narrow)
10511 {
10512 if (flags)
10513 opcode = add ? T_MNEM_addis : T_MNEM_subis;
10514 else
10515 opcode = add ? T_MNEM_addi : T_MNEM_subi;
10516 }
10517 if (opcode)
10518 {
10519 inst.instruction = THUMB_OP16(opcode);
10520 inst.instruction |= (Rd << 4) | Rs;
10521 if (inst.reloc.type < BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
10522 || inst.reloc.type > BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC)
10523 {
10524 if (inst.size_req == 2)
10525 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
10526 else
10527 inst.relax = opcode;
10528 }
10529 }
10530 else
10531 constraint (inst.size_req == 2, BAD_HIREG);
10532 }
10533 if (inst.size_req == 4
10534 || (inst.size_req != 2 && !opcode))
10535 {
10536 constraint (inst.reloc.type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
10537 && inst.reloc.type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC ,
10538 THUMB1_RELOC_ONLY);
10539 if (Rd == REG_PC)
10540 {
10541 constraint (add, BAD_PC);
10542 constraint (Rs != REG_LR || inst.instruction != T_MNEM_subs,
10543 _("only SUBS PC, LR, #const allowed"));
10544 constraint (inst.reloc.exp.X_op != O_constant,
10545 _("expression too complex"));
10546 constraint (inst.reloc.exp.X_add_number < 0
10547 || inst.reloc.exp.X_add_number > 0xff,
10548 _("immediate value out of range"));
10549 inst.instruction = T2_SUBS_PC_LR
10550 | inst.reloc.exp.X_add_number;
10551 inst.reloc.type = BFD_RELOC_UNUSED;
10552 return;
10553 }
10554 else if (Rs == REG_PC)
10555 {
10556 /* Always use addw/subw. */
10557 inst.instruction = add ? 0xf20f0000 : 0xf2af0000;
10558 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
10559 }
10560 else
10561 {
10562 inst.instruction = THUMB_OP32 (inst.instruction);
10563 inst.instruction = (inst.instruction & 0xe1ffffff)
10564 | 0x10000000;
10565 if (flags)
10566 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10567 else
10568 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_IMM;
10569 }
10570 inst.instruction |= Rd << 8;
10571 inst.instruction |= Rs << 16;
10572 }
10573 }
10574 else
10575 {
10576 unsigned int value = inst.reloc.exp.X_add_number;
10577 unsigned int shift = inst.operands[2].shift_kind;
10578
10579 Rn = inst.operands[2].reg;
10580 /* See if we can do this with a 16-bit instruction. */
10581 if (!inst.operands[2].shifted && inst.size_req != 4)
10582 {
10583 if (Rd > 7 || Rs > 7 || Rn > 7)
10584 narrow = FALSE;
10585
10586 if (narrow)
10587 {
10588 inst.instruction = ((inst.instruction == T_MNEM_adds
10589 || inst.instruction == T_MNEM_add)
10590 ? T_OPCODE_ADD_R3
10591 : T_OPCODE_SUB_R3);
10592 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
10593 return;
10594 }
10595
10596 if (inst.instruction == T_MNEM_add && (Rd == Rs || Rd == Rn))
10597 {
10598 /* Thumb-1 cores (except v6-M) require at least one high
10599 register in a narrow non flag setting add. */
10600 if (Rd > 7 || Rn > 7
10601 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2)
10602 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_msr))
10603 {
10604 if (Rd == Rn)
10605 {
10606 Rn = Rs;
10607 Rs = Rd;
10608 }
10609 inst.instruction = T_OPCODE_ADD_HI;
10610 inst.instruction |= (Rd & 8) << 4;
10611 inst.instruction |= (Rd & 7);
10612 inst.instruction |= Rn << 3;
10613 return;
10614 }
10615 }
10616 }
10617
10618 constraint (Rd == REG_PC, BAD_PC);
10619 constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
10620 constraint (Rs == REG_PC, BAD_PC);
10621 reject_bad_reg (Rn);
10622
10623 /* If we get here, it can't be done in 16 bits. */
10624 constraint (inst.operands[2].shifted && inst.operands[2].immisreg,
10625 _("shift must be constant"));
10626 inst.instruction = THUMB_OP32 (inst.instruction);
10627 inst.instruction |= Rd << 8;
10628 inst.instruction |= Rs << 16;
10629 constraint (Rd == REG_SP && Rs == REG_SP && value > 3,
10630 _("shift value over 3 not allowed in thumb mode"));
10631 constraint (Rd == REG_SP && Rs == REG_SP && shift != SHIFT_LSL,
10632 _("only LSL shift allowed in thumb mode"));
10633 encode_thumb32_shifted_operand (2);
10634 }
10635 }
10636 else
10637 {
10638 constraint (inst.instruction == T_MNEM_adds
10639 || inst.instruction == T_MNEM_subs,
10640 BAD_THUMB32);
10641
10642 if (!inst.operands[2].isreg) /* Rd, Rs, #imm */
10643 {
10644 constraint ((Rd > 7 && (Rd != REG_SP || Rs != REG_SP))
10645 || (Rs > 7 && Rs != REG_SP && Rs != REG_PC),
10646 BAD_HIREG);
10647
10648 inst.instruction = (inst.instruction == T_MNEM_add
10649 ? 0x0000 : 0x8000);
10650 inst.instruction |= (Rd << 4) | Rs;
10651 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
10652 return;
10653 }
10654
10655 Rn = inst.operands[2].reg;
10656 constraint (inst.operands[2].shifted, _("unshifted register required"));
10657
10658 /* We now have Rd, Rs, and Rn set to registers. */
10659 if (Rd > 7 || Rs > 7 || Rn > 7)
10660 {
10661 /* Can't do this for SUB. */
10662 constraint (inst.instruction == T_MNEM_sub, BAD_HIREG);
10663 inst.instruction = T_OPCODE_ADD_HI;
10664 inst.instruction |= (Rd & 8) << 4;
10665 inst.instruction |= (Rd & 7);
10666 if (Rs == Rd)
10667 inst.instruction |= Rn << 3;
10668 else if (Rn == Rd)
10669 inst.instruction |= Rs << 3;
10670 else
10671 constraint (1, _("dest must overlap one source register"));
10672 }
10673 else
10674 {
10675 inst.instruction = (inst.instruction == T_MNEM_add
10676 ? T_OPCODE_ADD_R3 : T_OPCODE_SUB_R3);
10677 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
10678 }
10679 }
10680 }
10681
10682 static void
10683 do_t_adr (void)
10684 {
10685 unsigned Rd;
10686
10687 Rd = inst.operands[0].reg;
10688 reject_bad_reg (Rd);
10689
10690 if (unified_syntax && inst.size_req == 0 && Rd <= 7)
10691 {
10692 /* Defer to section relaxation. */
10693 inst.relax = inst.instruction;
10694 inst.instruction = THUMB_OP16 (inst.instruction);
10695 inst.instruction |= Rd << 4;
10696 }
10697 else if (unified_syntax && inst.size_req != 2)
10698 {
10699 /* Generate a 32-bit opcode. */
10700 inst.instruction = THUMB_OP32 (inst.instruction);
10701 inst.instruction |= Rd << 8;
10702 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_PC12;
10703 inst.reloc.pc_rel = 1;
10704 }
10705 else
10706 {
10707 /* Generate a 16-bit opcode. */
10708 inst.instruction = THUMB_OP16 (inst.instruction);
10709 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
10710 inst.reloc.exp.X_add_number -= 4; /* PC relative adjust. */
10711 inst.reloc.pc_rel = 1;
10712
10713 inst.instruction |= Rd << 4;
10714 }
10715 }
10716
10717 /* Arithmetic instructions for which there is just one 16-bit
10718 instruction encoding, and it allows only two low registers.
10719 For maximal compatibility with ARM syntax, we allow three register
10720 operands even when Thumb-32 instructions are not available, as long
10721 as the first two are identical. For instance, both "sbc r0,r1" and
10722 "sbc r0,r0,r1" are allowed. */
10723 static void
10724 do_t_arit3 (void)
10725 {
10726 int Rd, Rs, Rn;
10727
10728 Rd = inst.operands[0].reg;
10729 Rs = (inst.operands[1].present
10730 ? inst.operands[1].reg /* Rd, Rs, foo */
10731 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
10732 Rn = inst.operands[2].reg;
10733
10734 reject_bad_reg (Rd);
10735 reject_bad_reg (Rs);
10736 if (inst.operands[2].isreg)
10737 reject_bad_reg (Rn);
10738
10739 if (unified_syntax)
10740 {
10741 if (!inst.operands[2].isreg)
10742 {
10743 /* For an immediate, we always generate a 32-bit opcode;
10744 section relaxation will shrink it later if possible. */
10745 inst.instruction = THUMB_OP32 (inst.instruction);
10746 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
10747 inst.instruction |= Rd << 8;
10748 inst.instruction |= Rs << 16;
10749 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10750 }
10751 else
10752 {
10753 bfd_boolean narrow;
10754
10755 /* See if we can do this with a 16-bit instruction. */
10756 if (THUMB_SETS_FLAGS (inst.instruction))
10757 narrow = !in_it_block ();
10758 else
10759 narrow = in_it_block ();
10760
10761 if (Rd > 7 || Rn > 7 || Rs > 7)
10762 narrow = FALSE;
10763 if (inst.operands[2].shifted)
10764 narrow = FALSE;
10765 if (inst.size_req == 4)
10766 narrow = FALSE;
10767
10768 if (narrow
10769 && Rd == Rs)
10770 {
10771 inst.instruction = THUMB_OP16 (inst.instruction);
10772 inst.instruction |= Rd;
10773 inst.instruction |= Rn << 3;
10774 return;
10775 }
10776
10777 /* If we get here, it can't be done in 16 bits. */
10778 constraint (inst.operands[2].shifted
10779 && inst.operands[2].immisreg,
10780 _("shift must be constant"));
10781 inst.instruction = THUMB_OP32 (inst.instruction);
10782 inst.instruction |= Rd << 8;
10783 inst.instruction |= Rs << 16;
10784 encode_thumb32_shifted_operand (2);
10785 }
10786 }
10787 else
10788 {
10789 /* On its face this is a lie - the instruction does set the
10790 flags. However, the only supported mnemonic in this mode
10791 says it doesn't. */
10792 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
10793
10794 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
10795 _("unshifted register required"));
10796 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
10797 constraint (Rd != Rs,
10798 _("dest and source1 must be the same register"));
10799
10800 inst.instruction = THUMB_OP16 (inst.instruction);
10801 inst.instruction |= Rd;
10802 inst.instruction |= Rn << 3;
10803 }
10804 }
10805
10806 /* Similarly, but for instructions where the arithmetic operation is
10807 commutative, so we can allow either of them to be different from
10808 the destination operand in a 16-bit instruction. For instance, all
10809 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
10810 accepted. */
10811 static void
10812 do_t_arit3c (void)
10813 {
10814 int Rd, Rs, Rn;
10815
10816 Rd = inst.operands[0].reg;
10817 Rs = (inst.operands[1].present
10818 ? inst.operands[1].reg /* Rd, Rs, foo */
10819 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
10820 Rn = inst.operands[2].reg;
10821
10822 reject_bad_reg (Rd);
10823 reject_bad_reg (Rs);
10824 if (inst.operands[2].isreg)
10825 reject_bad_reg (Rn);
10826
10827 if (unified_syntax)
10828 {
10829 if (!inst.operands[2].isreg)
10830 {
10831 /* For an immediate, we always generate a 32-bit opcode;
10832 section relaxation will shrink it later if possible. */
10833 inst.instruction = THUMB_OP32 (inst.instruction);
10834 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
10835 inst.instruction |= Rd << 8;
10836 inst.instruction |= Rs << 16;
10837 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10838 }
10839 else
10840 {
10841 bfd_boolean narrow;
10842
10843 /* See if we can do this with a 16-bit instruction. */
10844 if (THUMB_SETS_FLAGS (inst.instruction))
10845 narrow = !in_it_block ();
10846 else
10847 narrow = in_it_block ();
10848
10849 if (Rd > 7 || Rn > 7 || Rs > 7)
10850 narrow = FALSE;
10851 if (inst.operands[2].shifted)
10852 narrow = FALSE;
10853 if (inst.size_req == 4)
10854 narrow = FALSE;
10855
10856 if (narrow)
10857 {
10858 if (Rd == Rs)
10859 {
10860 inst.instruction = THUMB_OP16 (inst.instruction);
10861 inst.instruction |= Rd;
10862 inst.instruction |= Rn << 3;
10863 return;
10864 }
10865 if (Rd == Rn)
10866 {
10867 inst.instruction = THUMB_OP16 (inst.instruction);
10868 inst.instruction |= Rd;
10869 inst.instruction |= Rs << 3;
10870 return;
10871 }
10872 }
10873
10874 /* If we get here, it can't be done in 16 bits. */
10875 constraint (inst.operands[2].shifted
10876 && inst.operands[2].immisreg,
10877 _("shift must be constant"));
10878 inst.instruction = THUMB_OP32 (inst.instruction);
10879 inst.instruction |= Rd << 8;
10880 inst.instruction |= Rs << 16;
10881 encode_thumb32_shifted_operand (2);
10882 }
10883 }
10884 else
10885 {
10886 /* On its face this is a lie - the instruction does set the
10887 flags. However, the only supported mnemonic in this mode
10888 says it doesn't. */
10889 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
10890
10891 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
10892 _("unshifted register required"));
10893 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
10894
10895 inst.instruction = THUMB_OP16 (inst.instruction);
10896 inst.instruction |= Rd;
10897
10898 if (Rd == Rs)
10899 inst.instruction |= Rn << 3;
10900 else if (Rd == Rn)
10901 inst.instruction |= Rs << 3;
10902 else
10903 constraint (1, _("dest must overlap one source register"));
10904 }
10905 }
10906
10907 static void
10908 do_t_bfc (void)
10909 {
10910 unsigned Rd;
10911 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
10912 constraint (msb > 32, _("bit-field extends past end of register"));
10913 /* The instruction encoding stores the LSB and MSB,
10914 not the LSB and width. */
10915 Rd = inst.operands[0].reg;
10916 reject_bad_reg (Rd);
10917 inst.instruction |= Rd << 8;
10918 inst.instruction |= (inst.operands[1].imm & 0x1c) << 10;
10919 inst.instruction |= (inst.operands[1].imm & 0x03) << 6;
10920 inst.instruction |= msb - 1;
10921 }
10922
10923 static void
10924 do_t_bfi (void)
10925 {
10926 int Rd, Rn;
10927 unsigned int msb;
10928
10929 Rd = inst.operands[0].reg;
10930 reject_bad_reg (Rd);
10931
10932 /* #0 in second position is alternative syntax for bfc, which is
10933 the same instruction but with REG_PC in the Rm field. */
10934 if (!inst.operands[1].isreg)
10935 Rn = REG_PC;
10936 else
10937 {
10938 Rn = inst.operands[1].reg;
10939 reject_bad_reg (Rn);
10940 }
10941
10942 msb = inst.operands[2].imm + inst.operands[3].imm;
10943 constraint (msb > 32, _("bit-field extends past end of register"));
10944 /* The instruction encoding stores the LSB and MSB,
10945 not the LSB and width. */
10946 inst.instruction |= Rd << 8;
10947 inst.instruction |= Rn << 16;
10948 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
10949 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
10950 inst.instruction |= msb - 1;
10951 }
10952
10953 static void
10954 do_t_bfx (void)
10955 {
10956 unsigned Rd, Rn;
10957
10958 Rd = inst.operands[0].reg;
10959 Rn = inst.operands[1].reg;
10960
10961 reject_bad_reg (Rd);
10962 reject_bad_reg (Rn);
10963
10964 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
10965 _("bit-field extends past end of register"));
10966 inst.instruction |= Rd << 8;
10967 inst.instruction |= Rn << 16;
10968 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
10969 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
10970 inst.instruction |= inst.operands[3].imm - 1;
10971 }
10972
10973 /* ARM V5 Thumb BLX (argument parse)
10974 BLX <target_addr> which is BLX(1)
10975 BLX <Rm> which is BLX(2)
10976 Unfortunately, there are two different opcodes for this mnemonic.
10977 So, the insns[].value is not used, and the code here zaps values
10978 into inst.instruction.
10979
10980 ??? How to take advantage of the additional two bits of displacement
10981 available in Thumb32 mode? Need new relocation? */
10982
10983 static void
10984 do_t_blx (void)
10985 {
10986 set_it_insn_type_last ();
10987
10988 if (inst.operands[0].isreg)
10989 {
10990 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
10991 /* We have a register, so this is BLX(2). */
10992 inst.instruction |= inst.operands[0].reg << 3;
10993 }
10994 else
10995 {
10996 /* No register. This must be BLX(1). */
10997 inst.instruction = 0xf000e800;
10998 encode_branch (BFD_RELOC_THUMB_PCREL_BLX);
10999 }
11000 }
11001
11002 static void
11003 do_t_branch (void)
11004 {
11005 int opcode;
11006 int cond;
11007 bfd_reloc_code_real_type reloc;
11008
11009 cond = inst.cond;
11010 set_it_insn_type (IF_INSIDE_IT_LAST_INSN);
11011
11012 if (in_it_block ())
11013 {
11014 /* Conditional branches inside IT blocks are encoded as unconditional
11015 branches. */
11016 cond = COND_ALWAYS;
11017 }
11018 else
11019 cond = inst.cond;
11020
11021 if (cond != COND_ALWAYS)
11022 opcode = T_MNEM_bcond;
11023 else
11024 opcode = inst.instruction;
11025
11026 if (unified_syntax
11027 && (inst.size_req == 4
11028 || (inst.size_req != 2
11029 && (inst.operands[0].hasreloc
11030 || inst.reloc.exp.X_op == O_constant))))
11031 {
11032 inst.instruction = THUMB_OP32(opcode);
11033 if (cond == COND_ALWAYS)
11034 reloc = BFD_RELOC_THUMB_PCREL_BRANCH25;
11035 else
11036 {
11037 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2),
11038 _("selected architecture does not support "
11039 "wide conditional branch instruction"));
11040
11041 gas_assert (cond != 0xF);
11042 inst.instruction |= cond << 22;
11043 reloc = BFD_RELOC_THUMB_PCREL_BRANCH20;
11044 }
11045 }
11046 else
11047 {
11048 inst.instruction = THUMB_OP16(opcode);
11049 if (cond == COND_ALWAYS)
11050 reloc = BFD_RELOC_THUMB_PCREL_BRANCH12;
11051 else
11052 {
11053 inst.instruction |= cond << 8;
11054 reloc = BFD_RELOC_THUMB_PCREL_BRANCH9;
11055 }
11056 /* Allow section relaxation. */
11057 if (unified_syntax && inst.size_req != 2)
11058 inst.relax = opcode;
11059 }
11060 inst.reloc.type = reloc;
11061 inst.reloc.pc_rel = 1;
11062 }
11063
11064 /* Actually do the work for Thumb state bkpt and hlt. The only difference
11065 between the two is the maximum immediate allowed - which is passed in
11066 RANGE. */
11067 static void
11068 do_t_bkpt_hlt1 (int range)
11069 {
11070 constraint (inst.cond != COND_ALWAYS,
11071 _("instruction is always unconditional"));
11072 if (inst.operands[0].present)
11073 {
11074 constraint (inst.operands[0].imm > range,
11075 _("immediate value out of range"));
11076 inst.instruction |= inst.operands[0].imm;
11077 }
11078
11079 set_it_insn_type (NEUTRAL_IT_INSN);
11080 }
11081
11082 static void
11083 do_t_hlt (void)
11084 {
11085 do_t_bkpt_hlt1 (63);
11086 }
11087
11088 static void
11089 do_t_bkpt (void)
11090 {
11091 do_t_bkpt_hlt1 (255);
11092 }
11093
11094 static void
11095 do_t_branch23 (void)
11096 {
11097 set_it_insn_type_last ();
11098 encode_branch (BFD_RELOC_THUMB_PCREL_BRANCH23);
11099
11100 /* md_apply_fix blows up with 'bl foo(PLT)' where foo is defined in
11101 this file. We used to simply ignore the PLT reloc type here --
11102 the branch encoding is now needed to deal with TLSCALL relocs.
11103 So if we see a PLT reloc now, put it back to how it used to be to
11104 keep the preexisting behaviour. */
11105 if (inst.reloc.type == BFD_RELOC_ARM_PLT32)
11106 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH23;
11107
11108 #if defined(OBJ_COFF)
11109 /* If the destination of the branch is a defined symbol which does not have
11110 the THUMB_FUNC attribute, then we must be calling a function which has
11111 the (interfacearm) attribute. We look for the Thumb entry point to that
11112 function and change the branch to refer to that function instead. */
11113 if ( inst.reloc.exp.X_op == O_symbol
11114 && inst.reloc.exp.X_add_symbol != NULL
11115 && S_IS_DEFINED (inst.reloc.exp.X_add_symbol)
11116 && ! THUMB_IS_FUNC (inst.reloc.exp.X_add_symbol))
11117 inst.reloc.exp.X_add_symbol =
11118 find_real_start (inst.reloc.exp.X_add_symbol);
11119 #endif
11120 }
11121
11122 static void
11123 do_t_bx (void)
11124 {
11125 set_it_insn_type_last ();
11126 inst.instruction |= inst.operands[0].reg << 3;
11127 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
11128 should cause the alignment to be checked once it is known. This is
11129 because BX PC only works if the instruction is word aligned. */
11130 }
11131
11132 static void
11133 do_t_bxj (void)
11134 {
11135 int Rm;
11136
11137 set_it_insn_type_last ();
11138 Rm = inst.operands[0].reg;
11139 reject_bad_reg (Rm);
11140 inst.instruction |= Rm << 16;
11141 }
11142
11143 static void
11144 do_t_clz (void)
11145 {
11146 unsigned Rd;
11147 unsigned Rm;
11148
11149 Rd = inst.operands[0].reg;
11150 Rm = inst.operands[1].reg;
11151
11152 reject_bad_reg (Rd);
11153 reject_bad_reg (Rm);
11154
11155 inst.instruction |= Rd << 8;
11156 inst.instruction |= Rm << 16;
11157 inst.instruction |= Rm;
11158 }
11159
11160 static void
11161 do_t_cps (void)
11162 {
11163 set_it_insn_type (OUTSIDE_IT_INSN);
11164 inst.instruction |= inst.operands[0].imm;
11165 }
11166
11167 static void
11168 do_t_cpsi (void)
11169 {
11170 set_it_insn_type (OUTSIDE_IT_INSN);
11171 if (unified_syntax
11172 && (inst.operands[1].present || inst.size_req == 4)
11173 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6_notm))
11174 {
11175 unsigned int imod = (inst.instruction & 0x0030) >> 4;
11176 inst.instruction = 0xf3af8000;
11177 inst.instruction |= imod << 9;
11178 inst.instruction |= inst.operands[0].imm << 5;
11179 if (inst.operands[1].present)
11180 inst.instruction |= 0x100 | inst.operands[1].imm;
11181 }
11182 else
11183 {
11184 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1)
11185 && (inst.operands[0].imm & 4),
11186 _("selected processor does not support 'A' form "
11187 "of this instruction"));
11188 constraint (inst.operands[1].present || inst.size_req == 4,
11189 _("Thumb does not support the 2-argument "
11190 "form of this instruction"));
11191 inst.instruction |= inst.operands[0].imm;
11192 }
11193 }
11194
11195 /* THUMB CPY instruction (argument parse). */
11196
11197 static void
11198 do_t_cpy (void)
11199 {
11200 if (inst.size_req == 4)
11201 {
11202 inst.instruction = THUMB_OP32 (T_MNEM_mov);
11203 inst.instruction |= inst.operands[0].reg << 8;
11204 inst.instruction |= inst.operands[1].reg;
11205 }
11206 else
11207 {
11208 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
11209 inst.instruction |= (inst.operands[0].reg & 0x7);
11210 inst.instruction |= inst.operands[1].reg << 3;
11211 }
11212 }
11213
11214 static void
11215 do_t_cbz (void)
11216 {
11217 set_it_insn_type (OUTSIDE_IT_INSN);
11218 constraint (inst.operands[0].reg > 7, BAD_HIREG);
11219 inst.instruction |= inst.operands[0].reg;
11220 inst.reloc.pc_rel = 1;
11221 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH7;
11222 }
11223
11224 static void
11225 do_t_dbg (void)
11226 {
11227 inst.instruction |= inst.operands[0].imm;
11228 }
11229
11230 static void
11231 do_t_div (void)
11232 {
11233 unsigned Rd, Rn, Rm;
11234
11235 Rd = inst.operands[0].reg;
11236 Rn = (inst.operands[1].present
11237 ? inst.operands[1].reg : Rd);
11238 Rm = inst.operands[2].reg;
11239
11240 reject_bad_reg (Rd);
11241 reject_bad_reg (Rn);
11242 reject_bad_reg (Rm);
11243
11244 inst.instruction |= Rd << 8;
11245 inst.instruction |= Rn << 16;
11246 inst.instruction |= Rm;
11247 }
11248
11249 static void
11250 do_t_hint (void)
11251 {
11252 if (unified_syntax && inst.size_req == 4)
11253 inst.instruction = THUMB_OP32 (inst.instruction);
11254 else
11255 inst.instruction = THUMB_OP16 (inst.instruction);
11256 }
11257
11258 static void
11259 do_t_it (void)
11260 {
11261 unsigned int cond = inst.operands[0].imm;
11262
11263 set_it_insn_type (IT_INSN);
11264 now_it.mask = (inst.instruction & 0xf) | 0x10;
11265 now_it.cc = cond;
11266 now_it.warn_deprecated = FALSE;
11267
11268 /* If the condition is a negative condition, invert the mask. */
11269 if ((cond & 0x1) == 0x0)
11270 {
11271 unsigned int mask = inst.instruction & 0x000f;
11272
11273 if ((mask & 0x7) == 0)
11274 {
11275 /* No conversion needed. */
11276 now_it.block_length = 1;
11277 }
11278 else if ((mask & 0x3) == 0)
11279 {
11280 mask ^= 0x8;
11281 now_it.block_length = 2;
11282 }
11283 else if ((mask & 0x1) == 0)
11284 {
11285 mask ^= 0xC;
11286 now_it.block_length = 3;
11287 }
11288 else
11289 {
11290 mask ^= 0xE;
11291 now_it.block_length = 4;
11292 }
11293
11294 inst.instruction &= 0xfff0;
11295 inst.instruction |= mask;
11296 }
11297
11298 inst.instruction |= cond << 4;
11299 }
11300
11301 /* Helper function used for both push/pop and ldm/stm. */
11302 static void
11303 encode_thumb2_ldmstm (int base, unsigned mask, bfd_boolean writeback)
11304 {
11305 bfd_boolean load;
11306
11307 load = (inst.instruction & (1 << 20)) != 0;
11308
11309 if (mask & (1 << 13))
11310 inst.error = _("SP not allowed in register list");
11311
11312 if ((mask & (1 << base)) != 0
11313 && writeback)
11314 inst.error = _("having the base register in the register list when "
11315 "using write back is UNPREDICTABLE");
11316
11317 if (load)
11318 {
11319 if (mask & (1 << 15))
11320 {
11321 if (mask & (1 << 14))
11322 inst.error = _("LR and PC should not both be in register list");
11323 else
11324 set_it_insn_type_last ();
11325 }
11326 }
11327 else
11328 {
11329 if (mask & (1 << 15))
11330 inst.error = _("PC not allowed in register list");
11331 }
11332
11333 if ((mask & (mask - 1)) == 0)
11334 {
11335 /* Single register transfers implemented as str/ldr. */
11336 if (writeback)
11337 {
11338 if (inst.instruction & (1 << 23))
11339 inst.instruction = 0x00000b04; /* ia! -> [base], #4 */
11340 else
11341 inst.instruction = 0x00000d04; /* db! -> [base, #-4]! */
11342 }
11343 else
11344 {
11345 if (inst.instruction & (1 << 23))
11346 inst.instruction = 0x00800000; /* ia -> [base] */
11347 else
11348 inst.instruction = 0x00000c04; /* db -> [base, #-4] */
11349 }
11350
11351 inst.instruction |= 0xf8400000;
11352 if (load)
11353 inst.instruction |= 0x00100000;
11354
11355 mask = ffs (mask) - 1;
11356 mask <<= 12;
11357 }
11358 else if (writeback)
11359 inst.instruction |= WRITE_BACK;
11360
11361 inst.instruction |= mask;
11362 inst.instruction |= base << 16;
11363 }
11364
11365 static void
11366 do_t_ldmstm (void)
11367 {
11368 /* This really doesn't seem worth it. */
11369 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
11370 _("expression too complex"));
11371 constraint (inst.operands[1].writeback,
11372 _("Thumb load/store multiple does not support {reglist}^"));
11373
11374 if (unified_syntax)
11375 {
11376 bfd_boolean narrow;
11377 unsigned mask;
11378
11379 narrow = FALSE;
11380 /* See if we can use a 16-bit instruction. */
11381 if (inst.instruction < 0xffff /* not ldmdb/stmdb */
11382 && inst.size_req != 4
11383 && !(inst.operands[1].imm & ~0xff))
11384 {
11385 mask = 1 << inst.operands[0].reg;
11386
11387 if (inst.operands[0].reg <= 7)
11388 {
11389 if (inst.instruction == T_MNEM_stmia
11390 ? inst.operands[0].writeback
11391 : (inst.operands[0].writeback
11392 == !(inst.operands[1].imm & mask)))
11393 {
11394 if (inst.instruction == T_MNEM_stmia
11395 && (inst.operands[1].imm & mask)
11396 && (inst.operands[1].imm & (mask - 1)))
11397 as_warn (_("value stored for r%d is UNKNOWN"),
11398 inst.operands[0].reg);
11399
11400 inst.instruction = THUMB_OP16 (inst.instruction);
11401 inst.instruction |= inst.operands[0].reg << 8;
11402 inst.instruction |= inst.operands[1].imm;
11403 narrow = TRUE;
11404 }
11405 else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
11406 {
11407 /* This means 1 register in reg list one of 3 situations:
11408 1. Instruction is stmia, but without writeback.
11409 2. lmdia without writeback, but with Rn not in
11410 reglist.
11411 3. ldmia with writeback, but with Rn in reglist.
11412 Case 3 is UNPREDICTABLE behaviour, so we handle
11413 case 1 and 2 which can be converted into a 16-bit
11414 str or ldr. The SP cases are handled below. */
11415 unsigned long opcode;
11416 /* First, record an error for Case 3. */
11417 if (inst.operands[1].imm & mask
11418 && inst.operands[0].writeback)
11419 inst.error =
11420 _("having the base register in the register list when "
11421 "using write back is UNPREDICTABLE");
11422
11423 opcode = (inst.instruction == T_MNEM_stmia ? T_MNEM_str
11424 : T_MNEM_ldr);
11425 inst.instruction = THUMB_OP16 (opcode);
11426 inst.instruction |= inst.operands[0].reg << 3;
11427 inst.instruction |= (ffs (inst.operands[1].imm)-1);
11428 narrow = TRUE;
11429 }
11430 }
11431 else if (inst.operands[0] .reg == REG_SP)
11432 {
11433 if (inst.operands[0].writeback)
11434 {
11435 inst.instruction =
11436 THUMB_OP16 (inst.instruction == T_MNEM_stmia
11437 ? T_MNEM_push : T_MNEM_pop);
11438 inst.instruction |= inst.operands[1].imm;
11439 narrow = TRUE;
11440 }
11441 else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
11442 {
11443 inst.instruction =
11444 THUMB_OP16 (inst.instruction == T_MNEM_stmia
11445 ? T_MNEM_str_sp : T_MNEM_ldr_sp);
11446 inst.instruction |= ((ffs (inst.operands[1].imm)-1) << 8);
11447 narrow = TRUE;
11448 }
11449 }
11450 }
11451
11452 if (!narrow)
11453 {
11454 if (inst.instruction < 0xffff)
11455 inst.instruction = THUMB_OP32 (inst.instruction);
11456
11457 encode_thumb2_ldmstm (inst.operands[0].reg, inst.operands[1].imm,
11458 inst.operands[0].writeback);
11459 }
11460 }
11461 else
11462 {
11463 constraint (inst.operands[0].reg > 7
11464 || (inst.operands[1].imm & ~0xff), BAD_HIREG);
11465 constraint (inst.instruction != T_MNEM_ldmia
11466 && inst.instruction != T_MNEM_stmia,
11467 _("Thumb-2 instruction only valid in unified syntax"));
11468 if (inst.instruction == T_MNEM_stmia)
11469 {
11470 if (!inst.operands[0].writeback)
11471 as_warn (_("this instruction will write back the base register"));
11472 if ((inst.operands[1].imm & (1 << inst.operands[0].reg))
11473 && (inst.operands[1].imm & ((1 << inst.operands[0].reg) - 1)))
11474 as_warn (_("value stored for r%d is UNKNOWN"),
11475 inst.operands[0].reg);
11476 }
11477 else
11478 {
11479 if (!inst.operands[0].writeback
11480 && !(inst.operands[1].imm & (1 << inst.operands[0].reg)))
11481 as_warn (_("this instruction will write back the base register"));
11482 else if (inst.operands[0].writeback
11483 && (inst.operands[1].imm & (1 << inst.operands[0].reg)))
11484 as_warn (_("this instruction will not write back the base register"));
11485 }
11486
11487 inst.instruction = THUMB_OP16 (inst.instruction);
11488 inst.instruction |= inst.operands[0].reg << 8;
11489 inst.instruction |= inst.operands[1].imm;
11490 }
11491 }
11492
11493 static void
11494 do_t_ldrex (void)
11495 {
11496 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
11497 || inst.operands[1].postind || inst.operands[1].writeback
11498 || inst.operands[1].immisreg || inst.operands[1].shifted
11499 || inst.operands[1].negative,
11500 BAD_ADDR_MODE);
11501
11502 constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
11503
11504 inst.instruction |= inst.operands[0].reg << 12;
11505 inst.instruction |= inst.operands[1].reg << 16;
11506 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
11507 }
11508
11509 static void
11510 do_t_ldrexd (void)
11511 {
11512 if (!inst.operands[1].present)
11513 {
11514 constraint (inst.operands[0].reg == REG_LR,
11515 _("r14 not allowed as first register "
11516 "when second register is omitted"));
11517 inst.operands[1].reg = inst.operands[0].reg + 1;
11518 }
11519 constraint (inst.operands[0].reg == inst.operands[1].reg,
11520 BAD_OVERLAP);
11521
11522 inst.instruction |= inst.operands[0].reg << 12;
11523 inst.instruction |= inst.operands[1].reg << 8;
11524 inst.instruction |= inst.operands[2].reg << 16;
11525 }
11526
11527 static void
11528 do_t_ldst (void)
11529 {
11530 unsigned long opcode;
11531 int Rn;
11532
11533 if (inst.operands[0].isreg
11534 && !inst.operands[0].preind
11535 && inst.operands[0].reg == REG_PC)
11536 set_it_insn_type_last ();
11537
11538 opcode = inst.instruction;
11539 if (unified_syntax)
11540 {
11541 if (!inst.operands[1].isreg)
11542 {
11543 if (opcode <= 0xffff)
11544 inst.instruction = THUMB_OP32 (opcode);
11545 if (move_or_literal_pool (0, CONST_THUMB, /*mode_3=*/FALSE))
11546 return;
11547 }
11548 if (inst.operands[1].isreg
11549 && !inst.operands[1].writeback
11550 && !inst.operands[1].shifted && !inst.operands[1].postind
11551 && !inst.operands[1].negative && inst.operands[0].reg <= 7
11552 && opcode <= 0xffff
11553 && inst.size_req != 4)
11554 {
11555 /* Insn may have a 16-bit form. */
11556 Rn = inst.operands[1].reg;
11557 if (inst.operands[1].immisreg)
11558 {
11559 inst.instruction = THUMB_OP16 (opcode);
11560 /* [Rn, Rik] */
11561 if (Rn <= 7 && inst.operands[1].imm <= 7)
11562 goto op16;
11563 else if (opcode != T_MNEM_ldr && opcode != T_MNEM_str)
11564 reject_bad_reg (inst.operands[1].imm);
11565 }
11566 else if ((Rn <= 7 && opcode != T_MNEM_ldrsh
11567 && opcode != T_MNEM_ldrsb)
11568 || ((Rn == REG_PC || Rn == REG_SP) && opcode == T_MNEM_ldr)
11569 || (Rn == REG_SP && opcode == T_MNEM_str))
11570 {
11571 /* [Rn, #const] */
11572 if (Rn > 7)
11573 {
11574 if (Rn == REG_PC)
11575 {
11576 if (inst.reloc.pc_rel)
11577 opcode = T_MNEM_ldr_pc2;
11578 else
11579 opcode = T_MNEM_ldr_pc;
11580 }
11581 else
11582 {
11583 if (opcode == T_MNEM_ldr)
11584 opcode = T_MNEM_ldr_sp;
11585 else
11586 opcode = T_MNEM_str_sp;
11587 }
11588 inst.instruction = inst.operands[0].reg << 8;
11589 }
11590 else
11591 {
11592 inst.instruction = inst.operands[0].reg;
11593 inst.instruction |= inst.operands[1].reg << 3;
11594 }
11595 inst.instruction |= THUMB_OP16 (opcode);
11596 if (inst.size_req == 2)
11597 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
11598 else
11599 inst.relax = opcode;
11600 return;
11601 }
11602 }
11603 /* Definitely a 32-bit variant. */
11604
11605 /* Warning for Erratum 752419. */
11606 if (opcode == T_MNEM_ldr
11607 && inst.operands[0].reg == REG_SP
11608 && inst.operands[1].writeback == 1
11609 && !inst.operands[1].immisreg)
11610 {
11611 if (no_cpu_selected ()
11612 || (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7)
11613 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a)
11614 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7r)))
11615 as_warn (_("This instruction may be unpredictable "
11616 "if executed on M-profile cores "
11617 "with interrupts enabled."));
11618 }
11619
11620 /* Do some validations regarding addressing modes. */
11621 if (inst.operands[1].immisreg)
11622 reject_bad_reg (inst.operands[1].imm);
11623
11624 constraint (inst.operands[1].writeback == 1
11625 && inst.operands[0].reg == inst.operands[1].reg,
11626 BAD_OVERLAP);
11627
11628 inst.instruction = THUMB_OP32 (opcode);
11629 inst.instruction |= inst.operands[0].reg << 12;
11630 encode_thumb32_addr_mode (1, /*is_t=*/FALSE, /*is_d=*/FALSE);
11631 check_ldr_r15_aligned ();
11632 return;
11633 }
11634
11635 constraint (inst.operands[0].reg > 7, BAD_HIREG);
11636
11637 if (inst.instruction == T_MNEM_ldrsh || inst.instruction == T_MNEM_ldrsb)
11638 {
11639 /* Only [Rn,Rm] is acceptable. */
11640 constraint (inst.operands[1].reg > 7 || inst.operands[1].imm > 7, BAD_HIREG);
11641 constraint (!inst.operands[1].isreg || !inst.operands[1].immisreg
11642 || inst.operands[1].postind || inst.operands[1].shifted
11643 || inst.operands[1].negative,
11644 _("Thumb does not support this addressing mode"));
11645 inst.instruction = THUMB_OP16 (inst.instruction);
11646 goto op16;
11647 }
11648
11649 inst.instruction = THUMB_OP16 (inst.instruction);
11650 if (!inst.operands[1].isreg)
11651 if (move_or_literal_pool (0, CONST_THUMB, /*mode_3=*/FALSE))
11652 return;
11653
11654 constraint (!inst.operands[1].preind
11655 || inst.operands[1].shifted
11656 || inst.operands[1].writeback,
11657 _("Thumb does not support this addressing mode"));
11658 if (inst.operands[1].reg == REG_PC || inst.operands[1].reg == REG_SP)
11659 {
11660 constraint (inst.instruction & 0x0600,
11661 _("byte or halfword not valid for base register"));
11662 constraint (inst.operands[1].reg == REG_PC
11663 && !(inst.instruction & THUMB_LOAD_BIT),
11664 _("r15 based store not allowed"));
11665 constraint (inst.operands[1].immisreg,
11666 _("invalid base register for register offset"));
11667
11668 if (inst.operands[1].reg == REG_PC)
11669 inst.instruction = T_OPCODE_LDR_PC;
11670 else if (inst.instruction & THUMB_LOAD_BIT)
11671 inst.instruction = T_OPCODE_LDR_SP;
11672 else
11673 inst.instruction = T_OPCODE_STR_SP;
11674
11675 inst.instruction |= inst.operands[0].reg << 8;
11676 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
11677 return;
11678 }
11679
11680 constraint (inst.operands[1].reg > 7, BAD_HIREG);
11681 if (!inst.operands[1].immisreg)
11682 {
11683 /* Immediate offset. */
11684 inst.instruction |= inst.operands[0].reg;
11685 inst.instruction |= inst.operands[1].reg << 3;
11686 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
11687 return;
11688 }
11689
11690 /* Register offset. */
11691 constraint (inst.operands[1].imm > 7, BAD_HIREG);
11692 constraint (inst.operands[1].negative,
11693 _("Thumb does not support this addressing mode"));
11694
11695 op16:
11696 switch (inst.instruction)
11697 {
11698 case T_OPCODE_STR_IW: inst.instruction = T_OPCODE_STR_RW; break;
11699 case T_OPCODE_STR_IH: inst.instruction = T_OPCODE_STR_RH; break;
11700 case T_OPCODE_STR_IB: inst.instruction = T_OPCODE_STR_RB; break;
11701 case T_OPCODE_LDR_IW: inst.instruction = T_OPCODE_LDR_RW; break;
11702 case T_OPCODE_LDR_IH: inst.instruction = T_OPCODE_LDR_RH; break;
11703 case T_OPCODE_LDR_IB: inst.instruction = T_OPCODE_LDR_RB; break;
11704 case 0x5600 /* ldrsb */:
11705 case 0x5e00 /* ldrsh */: break;
11706 default: abort ();
11707 }
11708
11709 inst.instruction |= inst.operands[0].reg;
11710 inst.instruction |= inst.operands[1].reg << 3;
11711 inst.instruction |= inst.operands[1].imm << 6;
11712 }
11713
11714 static void
11715 do_t_ldstd (void)
11716 {
11717 if (!inst.operands[1].present)
11718 {
11719 inst.operands[1].reg = inst.operands[0].reg + 1;
11720 constraint (inst.operands[0].reg == REG_LR,
11721 _("r14 not allowed here"));
11722 constraint (inst.operands[0].reg == REG_R12,
11723 _("r12 not allowed here"));
11724 }
11725
11726 if (inst.operands[2].writeback
11727 && (inst.operands[0].reg == inst.operands[2].reg
11728 || inst.operands[1].reg == inst.operands[2].reg))
11729 as_warn (_("base register written back, and overlaps "
11730 "one of transfer registers"));
11731
11732 inst.instruction |= inst.operands[0].reg << 12;
11733 inst.instruction |= inst.operands[1].reg << 8;
11734 encode_thumb32_addr_mode (2, /*is_t=*/FALSE, /*is_d=*/TRUE);
11735 }
11736
11737 static void
11738 do_t_ldstt (void)
11739 {
11740 inst.instruction |= inst.operands[0].reg << 12;
11741 encode_thumb32_addr_mode (1, /*is_t=*/TRUE, /*is_d=*/FALSE);
11742 }
11743
11744 static void
11745 do_t_mla (void)
11746 {
11747 unsigned Rd, Rn, Rm, Ra;
11748
11749 Rd = inst.operands[0].reg;
11750 Rn = inst.operands[1].reg;
11751 Rm = inst.operands[2].reg;
11752 Ra = inst.operands[3].reg;
11753
11754 reject_bad_reg (Rd);
11755 reject_bad_reg (Rn);
11756 reject_bad_reg (Rm);
11757 reject_bad_reg (Ra);
11758
11759 inst.instruction |= Rd << 8;
11760 inst.instruction |= Rn << 16;
11761 inst.instruction |= Rm;
11762 inst.instruction |= Ra << 12;
11763 }
11764
11765 static void
11766 do_t_mlal (void)
11767 {
11768 unsigned RdLo, RdHi, Rn, Rm;
11769
11770 RdLo = inst.operands[0].reg;
11771 RdHi = inst.operands[1].reg;
11772 Rn = inst.operands[2].reg;
11773 Rm = inst.operands[3].reg;
11774
11775 reject_bad_reg (RdLo);
11776 reject_bad_reg (RdHi);
11777 reject_bad_reg (Rn);
11778 reject_bad_reg (Rm);
11779
11780 inst.instruction |= RdLo << 12;
11781 inst.instruction |= RdHi << 8;
11782 inst.instruction |= Rn << 16;
11783 inst.instruction |= Rm;
11784 }
11785
11786 static void
11787 do_t_mov_cmp (void)
11788 {
11789 unsigned Rn, Rm;
11790
11791 Rn = inst.operands[0].reg;
11792 Rm = inst.operands[1].reg;
11793
11794 if (Rn == REG_PC)
11795 set_it_insn_type_last ();
11796
11797 if (unified_syntax)
11798 {
11799 int r0off = (inst.instruction == T_MNEM_mov
11800 || inst.instruction == T_MNEM_movs) ? 8 : 16;
11801 unsigned long opcode;
11802 bfd_boolean narrow;
11803 bfd_boolean low_regs;
11804
11805 low_regs = (Rn <= 7 && Rm <= 7);
11806 opcode = inst.instruction;
11807 if (in_it_block ())
11808 narrow = opcode != T_MNEM_movs;
11809 else
11810 narrow = opcode != T_MNEM_movs || low_regs;
11811 if (inst.size_req == 4
11812 || inst.operands[1].shifted)
11813 narrow = FALSE;
11814
11815 /* MOVS PC, LR is encoded as SUBS PC, LR, #0. */
11816 if (opcode == T_MNEM_movs && inst.operands[1].isreg
11817 && !inst.operands[1].shifted
11818 && Rn == REG_PC
11819 && Rm == REG_LR)
11820 {
11821 inst.instruction = T2_SUBS_PC_LR;
11822 return;
11823 }
11824
11825 if (opcode == T_MNEM_cmp)
11826 {
11827 constraint (Rn == REG_PC, BAD_PC);
11828 if (narrow)
11829 {
11830 /* In the Thumb-2 ISA, use of R13 as Rm is deprecated,
11831 but valid. */
11832 warn_deprecated_sp (Rm);
11833 /* R15 was documented as a valid choice for Rm in ARMv6,
11834 but as UNPREDICTABLE in ARMv7. ARM's proprietary
11835 tools reject R15, so we do too. */
11836 constraint (Rm == REG_PC, BAD_PC);
11837 }
11838 else
11839 reject_bad_reg (Rm);
11840 }
11841 else if (opcode == T_MNEM_mov
11842 || opcode == T_MNEM_movs)
11843 {
11844 if (inst.operands[1].isreg)
11845 {
11846 if (opcode == T_MNEM_movs)
11847 {
11848 reject_bad_reg (Rn);
11849 reject_bad_reg (Rm);
11850 }
11851 else if (narrow)
11852 {
11853 /* This is mov.n. */
11854 if ((Rn == REG_SP || Rn == REG_PC)
11855 && (Rm == REG_SP || Rm == REG_PC))
11856 {
11857 as_tsktsk (_("Use of r%u as a source register is "
11858 "deprecated when r%u is the destination "
11859 "register."), Rm, Rn);
11860 }
11861 }
11862 else
11863 {
11864 /* This is mov.w. */
11865 constraint (Rn == REG_PC, BAD_PC);
11866 constraint (Rm == REG_PC, BAD_PC);
11867 constraint (Rn == REG_SP && Rm == REG_SP, BAD_SP);
11868 }
11869 }
11870 else
11871 reject_bad_reg (Rn);
11872 }
11873
11874 if (!inst.operands[1].isreg)
11875 {
11876 /* Immediate operand. */
11877 if (!in_it_block () && opcode == T_MNEM_mov)
11878 narrow = 0;
11879 if (low_regs && narrow)
11880 {
11881 inst.instruction = THUMB_OP16 (opcode);
11882 inst.instruction |= Rn << 8;
11883 if (inst.reloc.type < BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
11884 || inst.reloc.type > BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC)
11885 {
11886 if (inst.size_req == 2)
11887 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
11888 else
11889 inst.relax = opcode;
11890 }
11891 }
11892 else
11893 {
11894 constraint (inst.reloc.type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
11895 && inst.reloc.type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC ,
11896 THUMB1_RELOC_ONLY);
11897
11898 inst.instruction = THUMB_OP32 (inst.instruction);
11899 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
11900 inst.instruction |= Rn << r0off;
11901 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
11902 }
11903 }
11904 else if (inst.operands[1].shifted && inst.operands[1].immisreg
11905 && (inst.instruction == T_MNEM_mov
11906 || inst.instruction == T_MNEM_movs))
11907 {
11908 /* Register shifts are encoded as separate shift instructions. */
11909 bfd_boolean flags = (inst.instruction == T_MNEM_movs);
11910
11911 if (in_it_block ())
11912 narrow = !flags;
11913 else
11914 narrow = flags;
11915
11916 if (inst.size_req == 4)
11917 narrow = FALSE;
11918
11919 if (!low_regs || inst.operands[1].imm > 7)
11920 narrow = FALSE;
11921
11922 if (Rn != Rm)
11923 narrow = FALSE;
11924
11925 switch (inst.operands[1].shift_kind)
11926 {
11927 case SHIFT_LSL:
11928 opcode = narrow ? T_OPCODE_LSL_R : THUMB_OP32 (T_MNEM_lsl);
11929 break;
11930 case SHIFT_ASR:
11931 opcode = narrow ? T_OPCODE_ASR_R : THUMB_OP32 (T_MNEM_asr);
11932 break;
11933 case SHIFT_LSR:
11934 opcode = narrow ? T_OPCODE_LSR_R : THUMB_OP32 (T_MNEM_lsr);
11935 break;
11936 case SHIFT_ROR:
11937 opcode = narrow ? T_OPCODE_ROR_R : THUMB_OP32 (T_MNEM_ror);
11938 break;
11939 default:
11940 abort ();
11941 }
11942
11943 inst.instruction = opcode;
11944 if (narrow)
11945 {
11946 inst.instruction |= Rn;
11947 inst.instruction |= inst.operands[1].imm << 3;
11948 }
11949 else
11950 {
11951 if (flags)
11952 inst.instruction |= CONDS_BIT;
11953
11954 inst.instruction |= Rn << 8;
11955 inst.instruction |= Rm << 16;
11956 inst.instruction |= inst.operands[1].imm;
11957 }
11958 }
11959 else if (!narrow)
11960 {
11961 /* Some mov with immediate shift have narrow variants.
11962 Register shifts are handled above. */
11963 if (low_regs && inst.operands[1].shifted
11964 && (inst.instruction == T_MNEM_mov
11965 || inst.instruction == T_MNEM_movs))
11966 {
11967 if (in_it_block ())
11968 narrow = (inst.instruction == T_MNEM_mov);
11969 else
11970 narrow = (inst.instruction == T_MNEM_movs);
11971 }
11972
11973 if (narrow)
11974 {
11975 switch (inst.operands[1].shift_kind)
11976 {
11977 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
11978 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
11979 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
11980 default: narrow = FALSE; break;
11981 }
11982 }
11983
11984 if (narrow)
11985 {
11986 inst.instruction |= Rn;
11987 inst.instruction |= Rm << 3;
11988 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
11989 }
11990 else
11991 {
11992 inst.instruction = THUMB_OP32 (inst.instruction);
11993 inst.instruction |= Rn << r0off;
11994 encode_thumb32_shifted_operand (1);
11995 }
11996 }
11997 else
11998 switch (inst.instruction)
11999 {
12000 case T_MNEM_mov:
12001 /* In v4t or v5t a move of two lowregs produces unpredictable
12002 results. Don't allow this. */
12003 if (low_regs)
12004 {
12005 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6),
12006 "MOV Rd, Rs with two low registers is not "
12007 "permitted on this architecture");
12008 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
12009 arm_ext_v6);
12010 }
12011
12012 inst.instruction = T_OPCODE_MOV_HR;
12013 inst.instruction |= (Rn & 0x8) << 4;
12014 inst.instruction |= (Rn & 0x7);
12015 inst.instruction |= Rm << 3;
12016 break;
12017
12018 case T_MNEM_movs:
12019 /* We know we have low registers at this point.
12020 Generate LSLS Rd, Rs, #0. */
12021 inst.instruction = T_OPCODE_LSL_I;
12022 inst.instruction |= Rn;
12023 inst.instruction |= Rm << 3;
12024 break;
12025
12026 case T_MNEM_cmp:
12027 if (low_regs)
12028 {
12029 inst.instruction = T_OPCODE_CMP_LR;
12030 inst.instruction |= Rn;
12031 inst.instruction |= Rm << 3;
12032 }
12033 else
12034 {
12035 inst.instruction = T_OPCODE_CMP_HR;
12036 inst.instruction |= (Rn & 0x8) << 4;
12037 inst.instruction |= (Rn & 0x7);
12038 inst.instruction |= Rm << 3;
12039 }
12040 break;
12041 }
12042 return;
12043 }
12044
12045 inst.instruction = THUMB_OP16 (inst.instruction);
12046
12047 /* PR 10443: Do not silently ignore shifted operands. */
12048 constraint (inst.operands[1].shifted,
12049 _("shifts in CMP/MOV instructions are only supported in unified syntax"));
12050
12051 if (inst.operands[1].isreg)
12052 {
12053 if (Rn < 8 && Rm < 8)
12054 {
12055 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
12056 since a MOV instruction produces unpredictable results. */
12057 if (inst.instruction == T_OPCODE_MOV_I8)
12058 inst.instruction = T_OPCODE_ADD_I3;
12059 else
12060 inst.instruction = T_OPCODE_CMP_LR;
12061
12062 inst.instruction |= Rn;
12063 inst.instruction |= Rm << 3;
12064 }
12065 else
12066 {
12067 if (inst.instruction == T_OPCODE_MOV_I8)
12068 inst.instruction = T_OPCODE_MOV_HR;
12069 else
12070 inst.instruction = T_OPCODE_CMP_HR;
12071 do_t_cpy ();
12072 }
12073 }
12074 else
12075 {
12076 constraint (Rn > 7,
12077 _("only lo regs allowed with immediate"));
12078 inst.instruction |= Rn << 8;
12079 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
12080 }
12081 }
12082
12083 static void
12084 do_t_mov16 (void)
12085 {
12086 unsigned Rd;
12087 bfd_vma imm;
12088 bfd_boolean top;
12089
12090 top = (inst.instruction & 0x00800000) != 0;
12091 if (inst.reloc.type == BFD_RELOC_ARM_MOVW)
12092 {
12093 constraint (top, _(":lower16: not allowed this instruction"));
12094 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVW;
12095 }
12096 else if (inst.reloc.type == BFD_RELOC_ARM_MOVT)
12097 {
12098 constraint (!top, _(":upper16: not allowed this instruction"));
12099 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVT;
12100 }
12101
12102 Rd = inst.operands[0].reg;
12103 reject_bad_reg (Rd);
12104
12105 inst.instruction |= Rd << 8;
12106 if (inst.reloc.type == BFD_RELOC_UNUSED)
12107 {
12108 imm = inst.reloc.exp.X_add_number;
12109 inst.instruction |= (imm & 0xf000) << 4;
12110 inst.instruction |= (imm & 0x0800) << 15;
12111 inst.instruction |= (imm & 0x0700) << 4;
12112 inst.instruction |= (imm & 0x00ff);
12113 }
12114 }
12115
12116 static void
12117 do_t_mvn_tst (void)
12118 {
12119 unsigned Rn, Rm;
12120
12121 Rn = inst.operands[0].reg;
12122 Rm = inst.operands[1].reg;
12123
12124 if (inst.instruction == T_MNEM_cmp
12125 || inst.instruction == T_MNEM_cmn)
12126 constraint (Rn == REG_PC, BAD_PC);
12127 else
12128 reject_bad_reg (Rn);
12129 reject_bad_reg (Rm);
12130
12131 if (unified_syntax)
12132 {
12133 int r0off = (inst.instruction == T_MNEM_mvn
12134 || inst.instruction == T_MNEM_mvns) ? 8 : 16;
12135 bfd_boolean narrow;
12136
12137 if (inst.size_req == 4
12138 || inst.instruction > 0xffff
12139 || inst.operands[1].shifted
12140 || Rn > 7 || Rm > 7)
12141 narrow = FALSE;
12142 else if (inst.instruction == T_MNEM_cmn
12143 || inst.instruction == T_MNEM_tst)
12144 narrow = TRUE;
12145 else if (THUMB_SETS_FLAGS (inst.instruction))
12146 narrow = !in_it_block ();
12147 else
12148 narrow = in_it_block ();
12149
12150 if (!inst.operands[1].isreg)
12151 {
12152 /* For an immediate, we always generate a 32-bit opcode;
12153 section relaxation will shrink it later if possible. */
12154 if (inst.instruction < 0xffff)
12155 inst.instruction = THUMB_OP32 (inst.instruction);
12156 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
12157 inst.instruction |= Rn << r0off;
12158 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
12159 }
12160 else
12161 {
12162 /* See if we can do this with a 16-bit instruction. */
12163 if (narrow)
12164 {
12165 inst.instruction = THUMB_OP16 (inst.instruction);
12166 inst.instruction |= Rn;
12167 inst.instruction |= Rm << 3;
12168 }
12169 else
12170 {
12171 constraint (inst.operands[1].shifted
12172 && inst.operands[1].immisreg,
12173 _("shift must be constant"));
12174 if (inst.instruction < 0xffff)
12175 inst.instruction = THUMB_OP32 (inst.instruction);
12176 inst.instruction |= Rn << r0off;
12177 encode_thumb32_shifted_operand (1);
12178 }
12179 }
12180 }
12181 else
12182 {
12183 constraint (inst.instruction > 0xffff
12184 || inst.instruction == T_MNEM_mvns, BAD_THUMB32);
12185 constraint (!inst.operands[1].isreg || inst.operands[1].shifted,
12186 _("unshifted register required"));
12187 constraint (Rn > 7 || Rm > 7,
12188 BAD_HIREG);
12189
12190 inst.instruction = THUMB_OP16 (inst.instruction);
12191 inst.instruction |= Rn;
12192 inst.instruction |= Rm << 3;
12193 }
12194 }
12195
12196 static void
12197 do_t_mrs (void)
12198 {
12199 unsigned Rd;
12200
12201 if (do_vfp_nsyn_mrs () == SUCCESS)
12202 return;
12203
12204 Rd = inst.operands[0].reg;
12205 reject_bad_reg (Rd);
12206 inst.instruction |= Rd << 8;
12207
12208 if (inst.operands[1].isreg)
12209 {
12210 unsigned br = inst.operands[1].reg;
12211 if (((br & 0x200) == 0) && ((br & 0xf000) != 0xf000))
12212 as_bad (_("bad register for mrs"));
12213
12214 inst.instruction |= br & (0xf << 16);
12215 inst.instruction |= (br & 0x300) >> 4;
12216 inst.instruction |= (br & SPSR_BIT) >> 2;
12217 }
12218 else
12219 {
12220 int flags = inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
12221
12222 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
12223 {
12224 /* PR gas/12698: The constraint is only applied for m_profile.
12225 If the user has specified -march=all, we want to ignore it as
12226 we are building for any CPU type, including non-m variants. */
12227 bfd_boolean m_profile =
12228 !ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any);
12229 constraint ((flags != 0) && m_profile, _("selected processor does "
12230 "not support requested special purpose register"));
12231 }
12232 else
12233 /* mrs only accepts APSR/CPSR/SPSR/CPSR_all/SPSR_all (for non-M profile
12234 devices). */
12235 constraint ((flags & ~SPSR_BIT) != (PSR_c|PSR_f),
12236 _("'APSR', 'CPSR' or 'SPSR' expected"));
12237
12238 inst.instruction |= (flags & SPSR_BIT) >> 2;
12239 inst.instruction |= inst.operands[1].imm & 0xff;
12240 inst.instruction |= 0xf0000;
12241 }
12242 }
12243
12244 static void
12245 do_t_msr (void)
12246 {
12247 int flags;
12248 unsigned Rn;
12249
12250 if (do_vfp_nsyn_msr () == SUCCESS)
12251 return;
12252
12253 constraint (!inst.operands[1].isreg,
12254 _("Thumb encoding does not support an immediate here"));
12255
12256 if (inst.operands[0].isreg)
12257 flags = (int)(inst.operands[0].reg);
12258 else
12259 flags = inst.operands[0].imm;
12260
12261 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
12262 {
12263 int bits = inst.operands[0].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
12264
12265 /* PR gas/12698: The constraint is only applied for m_profile.
12266 If the user has specified -march=all, we want to ignore it as
12267 we are building for any CPU type, including non-m variants. */
12268 bfd_boolean m_profile =
12269 !ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any);
12270 constraint (((ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
12271 && (bits & ~(PSR_s | PSR_f)) != 0)
12272 || (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
12273 && bits != PSR_f)) && m_profile,
12274 _("selected processor does not support requested special "
12275 "purpose register"));
12276 }
12277 else
12278 constraint ((flags & 0xff) != 0, _("selected processor does not support "
12279 "requested special purpose register"));
12280
12281 Rn = inst.operands[1].reg;
12282 reject_bad_reg (Rn);
12283
12284 inst.instruction |= (flags & SPSR_BIT) >> 2;
12285 inst.instruction |= (flags & 0xf0000) >> 8;
12286 inst.instruction |= (flags & 0x300) >> 4;
12287 inst.instruction |= (flags & 0xff);
12288 inst.instruction |= Rn << 16;
12289 }
12290
12291 static void
12292 do_t_mul (void)
12293 {
12294 bfd_boolean narrow;
12295 unsigned Rd, Rn, Rm;
12296
12297 if (!inst.operands[2].present)
12298 inst.operands[2].reg = inst.operands[0].reg;
12299
12300 Rd = inst.operands[0].reg;
12301 Rn = inst.operands[1].reg;
12302 Rm = inst.operands[2].reg;
12303
12304 if (unified_syntax)
12305 {
12306 if (inst.size_req == 4
12307 || (Rd != Rn
12308 && Rd != Rm)
12309 || Rn > 7
12310 || Rm > 7)
12311 narrow = FALSE;
12312 else if (inst.instruction == T_MNEM_muls)
12313 narrow = !in_it_block ();
12314 else
12315 narrow = in_it_block ();
12316 }
12317 else
12318 {
12319 constraint (inst.instruction == T_MNEM_muls, BAD_THUMB32);
12320 constraint (Rn > 7 || Rm > 7,
12321 BAD_HIREG);
12322 narrow = TRUE;
12323 }
12324
12325 if (narrow)
12326 {
12327 /* 16-bit MULS/Conditional MUL. */
12328 inst.instruction = THUMB_OP16 (inst.instruction);
12329 inst.instruction |= Rd;
12330
12331 if (Rd == Rn)
12332 inst.instruction |= Rm << 3;
12333 else if (Rd == Rm)
12334 inst.instruction |= Rn << 3;
12335 else
12336 constraint (1, _("dest must overlap one source register"));
12337 }
12338 else
12339 {
12340 constraint (inst.instruction != T_MNEM_mul,
12341 _("Thumb-2 MUL must not set flags"));
12342 /* 32-bit MUL. */
12343 inst.instruction = THUMB_OP32 (inst.instruction);
12344 inst.instruction |= Rd << 8;
12345 inst.instruction |= Rn << 16;
12346 inst.instruction |= Rm << 0;
12347
12348 reject_bad_reg (Rd);
12349 reject_bad_reg (Rn);
12350 reject_bad_reg (Rm);
12351 }
12352 }
12353
12354 static void
12355 do_t_mull (void)
12356 {
12357 unsigned RdLo, RdHi, Rn, Rm;
12358
12359 RdLo = inst.operands[0].reg;
12360 RdHi = inst.operands[1].reg;
12361 Rn = inst.operands[2].reg;
12362 Rm = inst.operands[3].reg;
12363
12364 reject_bad_reg (RdLo);
12365 reject_bad_reg (RdHi);
12366 reject_bad_reg (Rn);
12367 reject_bad_reg (Rm);
12368
12369 inst.instruction |= RdLo << 12;
12370 inst.instruction |= RdHi << 8;
12371 inst.instruction |= Rn << 16;
12372 inst.instruction |= Rm;
12373
12374 if (RdLo == RdHi)
12375 as_tsktsk (_("rdhi and rdlo must be different"));
12376 }
12377
12378 static void
12379 do_t_nop (void)
12380 {
12381 set_it_insn_type (NEUTRAL_IT_INSN);
12382
12383 if (unified_syntax)
12384 {
12385 if (inst.size_req == 4 || inst.operands[0].imm > 15)
12386 {
12387 inst.instruction = THUMB_OP32 (inst.instruction);
12388 inst.instruction |= inst.operands[0].imm;
12389 }
12390 else
12391 {
12392 /* PR9722: Check for Thumb2 availability before
12393 generating a thumb2 nop instruction. */
12394 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2))
12395 {
12396 inst.instruction = THUMB_OP16 (inst.instruction);
12397 inst.instruction |= inst.operands[0].imm << 4;
12398 }
12399 else
12400 inst.instruction = 0x46c0;
12401 }
12402 }
12403 else
12404 {
12405 constraint (inst.operands[0].present,
12406 _("Thumb does not support NOP with hints"));
12407 inst.instruction = 0x46c0;
12408 }
12409 }
12410
12411 static void
12412 do_t_neg (void)
12413 {
12414 if (unified_syntax)
12415 {
12416 bfd_boolean narrow;
12417
12418 if (THUMB_SETS_FLAGS (inst.instruction))
12419 narrow = !in_it_block ();
12420 else
12421 narrow = in_it_block ();
12422 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
12423 narrow = FALSE;
12424 if (inst.size_req == 4)
12425 narrow = FALSE;
12426
12427 if (!narrow)
12428 {
12429 inst.instruction = THUMB_OP32 (inst.instruction);
12430 inst.instruction |= inst.operands[0].reg << 8;
12431 inst.instruction |= inst.operands[1].reg << 16;
12432 }
12433 else
12434 {
12435 inst.instruction = THUMB_OP16 (inst.instruction);
12436 inst.instruction |= inst.operands[0].reg;
12437 inst.instruction |= inst.operands[1].reg << 3;
12438 }
12439 }
12440 else
12441 {
12442 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
12443 BAD_HIREG);
12444 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
12445
12446 inst.instruction = THUMB_OP16 (inst.instruction);
12447 inst.instruction |= inst.operands[0].reg;
12448 inst.instruction |= inst.operands[1].reg << 3;
12449 }
12450 }
12451
12452 static void
12453 do_t_orn (void)
12454 {
12455 unsigned Rd, Rn;
12456
12457 Rd = inst.operands[0].reg;
12458 Rn = inst.operands[1].present ? inst.operands[1].reg : Rd;
12459
12460 reject_bad_reg (Rd);
12461 /* Rn == REG_SP is unpredictable; Rn == REG_PC is MVN. */
12462 reject_bad_reg (Rn);
12463
12464 inst.instruction |= Rd << 8;
12465 inst.instruction |= Rn << 16;
12466
12467 if (!inst.operands[2].isreg)
12468 {
12469 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
12470 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
12471 }
12472 else
12473 {
12474 unsigned Rm;
12475
12476 Rm = inst.operands[2].reg;
12477 reject_bad_reg (Rm);
12478
12479 constraint (inst.operands[2].shifted
12480 && inst.operands[2].immisreg,
12481 _("shift must be constant"));
12482 encode_thumb32_shifted_operand (2);
12483 }
12484 }
12485
12486 static void
12487 do_t_pkhbt (void)
12488 {
12489 unsigned Rd, Rn, Rm;
12490
12491 Rd = inst.operands[0].reg;
12492 Rn = inst.operands[1].reg;
12493 Rm = inst.operands[2].reg;
12494
12495 reject_bad_reg (Rd);
12496 reject_bad_reg (Rn);
12497 reject_bad_reg (Rm);
12498
12499 inst.instruction |= Rd << 8;
12500 inst.instruction |= Rn << 16;
12501 inst.instruction |= Rm;
12502 if (inst.operands[3].present)
12503 {
12504 unsigned int val = inst.reloc.exp.X_add_number;
12505 constraint (inst.reloc.exp.X_op != O_constant,
12506 _("expression too complex"));
12507 inst.instruction |= (val & 0x1c) << 10;
12508 inst.instruction |= (val & 0x03) << 6;
12509 }
12510 }
12511
12512 static void
12513 do_t_pkhtb (void)
12514 {
12515 if (!inst.operands[3].present)
12516 {
12517 unsigned Rtmp;
12518
12519 inst.instruction &= ~0x00000020;
12520
12521 /* PR 10168. Swap the Rm and Rn registers. */
12522 Rtmp = inst.operands[1].reg;
12523 inst.operands[1].reg = inst.operands[2].reg;
12524 inst.operands[2].reg = Rtmp;
12525 }
12526 do_t_pkhbt ();
12527 }
12528
12529 static void
12530 do_t_pld (void)
12531 {
12532 if (inst.operands[0].immisreg)
12533 reject_bad_reg (inst.operands[0].imm);
12534
12535 encode_thumb32_addr_mode (0, /*is_t=*/FALSE, /*is_d=*/FALSE);
12536 }
12537
12538 static void
12539 do_t_push_pop (void)
12540 {
12541 unsigned mask;
12542
12543 constraint (inst.operands[0].writeback,
12544 _("push/pop do not support {reglist}^"));
12545 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
12546 _("expression too complex"));
12547
12548 mask = inst.operands[0].imm;
12549 if (inst.size_req != 4 && (mask & ~0xff) == 0)
12550 inst.instruction = THUMB_OP16 (inst.instruction) | mask;
12551 else if (inst.size_req != 4
12552 && (mask & ~0xff) == (1U << (inst.instruction == T_MNEM_push
12553 ? REG_LR : REG_PC)))
12554 {
12555 inst.instruction = THUMB_OP16 (inst.instruction);
12556 inst.instruction |= THUMB_PP_PC_LR;
12557 inst.instruction |= mask & 0xff;
12558 }
12559 else if (unified_syntax)
12560 {
12561 inst.instruction = THUMB_OP32 (inst.instruction);
12562 encode_thumb2_ldmstm (13, mask, TRUE);
12563 }
12564 else
12565 {
12566 inst.error = _("invalid register list to push/pop instruction");
12567 return;
12568 }
12569 }
12570
12571 static void
12572 do_t_rbit (void)
12573 {
12574 unsigned Rd, Rm;
12575
12576 Rd = inst.operands[0].reg;
12577 Rm = inst.operands[1].reg;
12578
12579 reject_bad_reg (Rd);
12580 reject_bad_reg (Rm);
12581
12582 inst.instruction |= Rd << 8;
12583 inst.instruction |= Rm << 16;
12584 inst.instruction |= Rm;
12585 }
12586
12587 static void
12588 do_t_rev (void)
12589 {
12590 unsigned Rd, Rm;
12591
12592 Rd = inst.operands[0].reg;
12593 Rm = inst.operands[1].reg;
12594
12595 reject_bad_reg (Rd);
12596 reject_bad_reg (Rm);
12597
12598 if (Rd <= 7 && Rm <= 7
12599 && inst.size_req != 4)
12600 {
12601 inst.instruction = THUMB_OP16 (inst.instruction);
12602 inst.instruction |= Rd;
12603 inst.instruction |= Rm << 3;
12604 }
12605 else if (unified_syntax)
12606 {
12607 inst.instruction = THUMB_OP32 (inst.instruction);
12608 inst.instruction |= Rd << 8;
12609 inst.instruction |= Rm << 16;
12610 inst.instruction |= Rm;
12611 }
12612 else
12613 inst.error = BAD_HIREG;
12614 }
12615
12616 static void
12617 do_t_rrx (void)
12618 {
12619 unsigned Rd, Rm;
12620
12621 Rd = inst.operands[0].reg;
12622 Rm = inst.operands[1].reg;
12623
12624 reject_bad_reg (Rd);
12625 reject_bad_reg (Rm);
12626
12627 inst.instruction |= Rd << 8;
12628 inst.instruction |= Rm;
12629 }
12630
12631 static void
12632 do_t_rsb (void)
12633 {
12634 unsigned Rd, Rs;
12635
12636 Rd = inst.operands[0].reg;
12637 Rs = (inst.operands[1].present
12638 ? inst.operands[1].reg /* Rd, Rs, foo */
12639 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
12640
12641 reject_bad_reg (Rd);
12642 reject_bad_reg (Rs);
12643 if (inst.operands[2].isreg)
12644 reject_bad_reg (inst.operands[2].reg);
12645
12646 inst.instruction |= Rd << 8;
12647 inst.instruction |= Rs << 16;
12648 if (!inst.operands[2].isreg)
12649 {
12650 bfd_boolean narrow;
12651
12652 if ((inst.instruction & 0x00100000) != 0)
12653 narrow = !in_it_block ();
12654 else
12655 narrow = in_it_block ();
12656
12657 if (Rd > 7 || Rs > 7)
12658 narrow = FALSE;
12659
12660 if (inst.size_req == 4 || !unified_syntax)
12661 narrow = FALSE;
12662
12663 if (inst.reloc.exp.X_op != O_constant
12664 || inst.reloc.exp.X_add_number != 0)
12665 narrow = FALSE;
12666
12667 /* Turn rsb #0 into 16-bit neg. We should probably do this via
12668 relaxation, but it doesn't seem worth the hassle. */
12669 if (narrow)
12670 {
12671 inst.reloc.type = BFD_RELOC_UNUSED;
12672 inst.instruction = THUMB_OP16 (T_MNEM_negs);
12673 inst.instruction |= Rs << 3;
12674 inst.instruction |= Rd;
12675 }
12676 else
12677 {
12678 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
12679 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
12680 }
12681 }
12682 else
12683 encode_thumb32_shifted_operand (2);
12684 }
12685
12686 static void
12687 do_t_setend (void)
12688 {
12689 if (warn_on_deprecated
12690 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
12691 as_tsktsk (_("setend use is deprecated for ARMv8"));
12692
12693 set_it_insn_type (OUTSIDE_IT_INSN);
12694 if (inst.operands[0].imm)
12695 inst.instruction |= 0x8;
12696 }
12697
12698 static void
12699 do_t_shift (void)
12700 {
12701 if (!inst.operands[1].present)
12702 inst.operands[1].reg = inst.operands[0].reg;
12703
12704 if (unified_syntax)
12705 {
12706 bfd_boolean narrow;
12707 int shift_kind;
12708
12709 switch (inst.instruction)
12710 {
12711 case T_MNEM_asr:
12712 case T_MNEM_asrs: shift_kind = SHIFT_ASR; break;
12713 case T_MNEM_lsl:
12714 case T_MNEM_lsls: shift_kind = SHIFT_LSL; break;
12715 case T_MNEM_lsr:
12716 case T_MNEM_lsrs: shift_kind = SHIFT_LSR; break;
12717 case T_MNEM_ror:
12718 case T_MNEM_rors: shift_kind = SHIFT_ROR; break;
12719 default: abort ();
12720 }
12721
12722 if (THUMB_SETS_FLAGS (inst.instruction))
12723 narrow = !in_it_block ();
12724 else
12725 narrow = in_it_block ();
12726 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
12727 narrow = FALSE;
12728 if (!inst.operands[2].isreg && shift_kind == SHIFT_ROR)
12729 narrow = FALSE;
12730 if (inst.operands[2].isreg
12731 && (inst.operands[1].reg != inst.operands[0].reg
12732 || inst.operands[2].reg > 7))
12733 narrow = FALSE;
12734 if (inst.size_req == 4)
12735 narrow = FALSE;
12736
12737 reject_bad_reg (inst.operands[0].reg);
12738 reject_bad_reg (inst.operands[1].reg);
12739
12740 if (!narrow)
12741 {
12742 if (inst.operands[2].isreg)
12743 {
12744 reject_bad_reg (inst.operands[2].reg);
12745 inst.instruction = THUMB_OP32 (inst.instruction);
12746 inst.instruction |= inst.operands[0].reg << 8;
12747 inst.instruction |= inst.operands[1].reg << 16;
12748 inst.instruction |= inst.operands[2].reg;
12749
12750 /* PR 12854: Error on extraneous shifts. */
12751 constraint (inst.operands[2].shifted,
12752 _("extraneous shift as part of operand to shift insn"));
12753 }
12754 else
12755 {
12756 inst.operands[1].shifted = 1;
12757 inst.operands[1].shift_kind = shift_kind;
12758 inst.instruction = THUMB_OP32 (THUMB_SETS_FLAGS (inst.instruction)
12759 ? T_MNEM_movs : T_MNEM_mov);
12760 inst.instruction |= inst.operands[0].reg << 8;
12761 encode_thumb32_shifted_operand (1);
12762 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
12763 inst.reloc.type = BFD_RELOC_UNUSED;
12764 }
12765 }
12766 else
12767 {
12768 if (inst.operands[2].isreg)
12769 {
12770 switch (shift_kind)
12771 {
12772 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_R; break;
12773 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_R; break;
12774 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_R; break;
12775 case SHIFT_ROR: inst.instruction = T_OPCODE_ROR_R; break;
12776 default: abort ();
12777 }
12778
12779 inst.instruction |= inst.operands[0].reg;
12780 inst.instruction |= inst.operands[2].reg << 3;
12781
12782 /* PR 12854: Error on extraneous shifts. */
12783 constraint (inst.operands[2].shifted,
12784 _("extraneous shift as part of operand to shift insn"));
12785 }
12786 else
12787 {
12788 switch (shift_kind)
12789 {
12790 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
12791 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
12792 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
12793 default: abort ();
12794 }
12795 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
12796 inst.instruction |= inst.operands[0].reg;
12797 inst.instruction |= inst.operands[1].reg << 3;
12798 }
12799 }
12800 }
12801 else
12802 {
12803 constraint (inst.operands[0].reg > 7
12804 || inst.operands[1].reg > 7, BAD_HIREG);
12805 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
12806
12807 if (inst.operands[2].isreg) /* Rd, {Rs,} Rn */
12808 {
12809 constraint (inst.operands[2].reg > 7, BAD_HIREG);
12810 constraint (inst.operands[0].reg != inst.operands[1].reg,
12811 _("source1 and dest must be same register"));
12812
12813 switch (inst.instruction)
12814 {
12815 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_R; break;
12816 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_R; break;
12817 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_R; break;
12818 case T_MNEM_ror: inst.instruction = T_OPCODE_ROR_R; break;
12819 default: abort ();
12820 }
12821
12822 inst.instruction |= inst.operands[0].reg;
12823 inst.instruction |= inst.operands[2].reg << 3;
12824
12825 /* PR 12854: Error on extraneous shifts. */
12826 constraint (inst.operands[2].shifted,
12827 _("extraneous shift as part of operand to shift insn"));
12828 }
12829 else
12830 {
12831 switch (inst.instruction)
12832 {
12833 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_I; break;
12834 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_I; break;
12835 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_I; break;
12836 case T_MNEM_ror: inst.error = _("ror #imm not supported"); return;
12837 default: abort ();
12838 }
12839 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
12840 inst.instruction |= inst.operands[0].reg;
12841 inst.instruction |= inst.operands[1].reg << 3;
12842 }
12843 }
12844 }
12845
12846 static void
12847 do_t_simd (void)
12848 {
12849 unsigned Rd, Rn, Rm;
12850
12851 Rd = inst.operands[0].reg;
12852 Rn = inst.operands[1].reg;
12853 Rm = inst.operands[2].reg;
12854
12855 reject_bad_reg (Rd);
12856 reject_bad_reg (Rn);
12857 reject_bad_reg (Rm);
12858
12859 inst.instruction |= Rd << 8;
12860 inst.instruction |= Rn << 16;
12861 inst.instruction |= Rm;
12862 }
12863
12864 static void
12865 do_t_simd2 (void)
12866 {
12867 unsigned Rd, Rn, Rm;
12868
12869 Rd = inst.operands[0].reg;
12870 Rm = inst.operands[1].reg;
12871 Rn = inst.operands[2].reg;
12872
12873 reject_bad_reg (Rd);
12874 reject_bad_reg (Rn);
12875 reject_bad_reg (Rm);
12876
12877 inst.instruction |= Rd << 8;
12878 inst.instruction |= Rn << 16;
12879 inst.instruction |= Rm;
12880 }
12881
12882 static void
12883 do_t_smc (void)
12884 {
12885 unsigned int value = inst.reloc.exp.X_add_number;
12886 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a),
12887 _("SMC is not permitted on this architecture"));
12888 constraint (inst.reloc.exp.X_op != O_constant,
12889 _("expression too complex"));
12890 inst.reloc.type = BFD_RELOC_UNUSED;
12891 inst.instruction |= (value & 0xf000) >> 12;
12892 inst.instruction |= (value & 0x0ff0);
12893 inst.instruction |= (value & 0x000f) << 16;
12894 /* PR gas/15623: SMC instructions must be last in an IT block. */
12895 set_it_insn_type_last ();
12896 }
12897
12898 static void
12899 do_t_hvc (void)
12900 {
12901 unsigned int value = inst.reloc.exp.X_add_number;
12902
12903 inst.reloc.type = BFD_RELOC_UNUSED;
12904 inst.instruction |= (value & 0x0fff);
12905 inst.instruction |= (value & 0xf000) << 4;
12906 }
12907
12908 static void
12909 do_t_ssat_usat (int bias)
12910 {
12911 unsigned Rd, Rn;
12912
12913 Rd = inst.operands[0].reg;
12914 Rn = inst.operands[2].reg;
12915
12916 reject_bad_reg (Rd);
12917 reject_bad_reg (Rn);
12918
12919 inst.instruction |= Rd << 8;
12920 inst.instruction |= inst.operands[1].imm - bias;
12921 inst.instruction |= Rn << 16;
12922
12923 if (inst.operands[3].present)
12924 {
12925 offsetT shift_amount = inst.reloc.exp.X_add_number;
12926
12927 inst.reloc.type = BFD_RELOC_UNUSED;
12928
12929 constraint (inst.reloc.exp.X_op != O_constant,
12930 _("expression too complex"));
12931
12932 if (shift_amount != 0)
12933 {
12934 constraint (shift_amount > 31,
12935 _("shift expression is too large"));
12936
12937 if (inst.operands[3].shift_kind == SHIFT_ASR)
12938 inst.instruction |= 0x00200000; /* sh bit. */
12939
12940 inst.instruction |= (shift_amount & 0x1c) << 10;
12941 inst.instruction |= (shift_amount & 0x03) << 6;
12942 }
12943 }
12944 }
12945
12946 static void
12947 do_t_ssat (void)
12948 {
12949 do_t_ssat_usat (1);
12950 }
12951
12952 static void
12953 do_t_ssat16 (void)
12954 {
12955 unsigned Rd, Rn;
12956
12957 Rd = inst.operands[0].reg;
12958 Rn = inst.operands[2].reg;
12959
12960 reject_bad_reg (Rd);
12961 reject_bad_reg (Rn);
12962
12963 inst.instruction |= Rd << 8;
12964 inst.instruction |= inst.operands[1].imm - 1;
12965 inst.instruction |= Rn << 16;
12966 }
12967
12968 static void
12969 do_t_strex (void)
12970 {
12971 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
12972 || inst.operands[2].postind || inst.operands[2].writeback
12973 || inst.operands[2].immisreg || inst.operands[2].shifted
12974 || inst.operands[2].negative,
12975 BAD_ADDR_MODE);
12976
12977 constraint (inst.operands[2].reg == REG_PC, BAD_PC);
12978
12979 inst.instruction |= inst.operands[0].reg << 8;
12980 inst.instruction |= inst.operands[1].reg << 12;
12981 inst.instruction |= inst.operands[2].reg << 16;
12982 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
12983 }
12984
12985 static void
12986 do_t_strexd (void)
12987 {
12988 if (!inst.operands[2].present)
12989 inst.operands[2].reg = inst.operands[1].reg + 1;
12990
12991 constraint (inst.operands[0].reg == inst.operands[1].reg
12992 || inst.operands[0].reg == inst.operands[2].reg
12993 || inst.operands[0].reg == inst.operands[3].reg,
12994 BAD_OVERLAP);
12995
12996 inst.instruction |= inst.operands[0].reg;
12997 inst.instruction |= inst.operands[1].reg << 12;
12998 inst.instruction |= inst.operands[2].reg << 8;
12999 inst.instruction |= inst.operands[3].reg << 16;
13000 }
13001
13002 static void
13003 do_t_sxtah (void)
13004 {
13005 unsigned Rd, Rn, Rm;
13006
13007 Rd = inst.operands[0].reg;
13008 Rn = inst.operands[1].reg;
13009 Rm = inst.operands[2].reg;
13010
13011 reject_bad_reg (Rd);
13012 reject_bad_reg (Rn);
13013 reject_bad_reg (Rm);
13014
13015 inst.instruction |= Rd << 8;
13016 inst.instruction |= Rn << 16;
13017 inst.instruction |= Rm;
13018 inst.instruction |= inst.operands[3].imm << 4;
13019 }
13020
13021 static void
13022 do_t_sxth (void)
13023 {
13024 unsigned Rd, Rm;
13025
13026 Rd = inst.operands[0].reg;
13027 Rm = inst.operands[1].reg;
13028
13029 reject_bad_reg (Rd);
13030 reject_bad_reg (Rm);
13031
13032 if (inst.instruction <= 0xffff
13033 && inst.size_req != 4
13034 && Rd <= 7 && Rm <= 7
13035 && (!inst.operands[2].present || inst.operands[2].imm == 0))
13036 {
13037 inst.instruction = THUMB_OP16 (inst.instruction);
13038 inst.instruction |= Rd;
13039 inst.instruction |= Rm << 3;
13040 }
13041 else if (unified_syntax)
13042 {
13043 if (inst.instruction <= 0xffff)
13044 inst.instruction = THUMB_OP32 (inst.instruction);
13045 inst.instruction |= Rd << 8;
13046 inst.instruction |= Rm;
13047 inst.instruction |= inst.operands[2].imm << 4;
13048 }
13049 else
13050 {
13051 constraint (inst.operands[2].present && inst.operands[2].imm != 0,
13052 _("Thumb encoding does not support rotation"));
13053 constraint (1, BAD_HIREG);
13054 }
13055 }
13056
13057 static void
13058 do_t_swi (void)
13059 {
13060 /* We have to do the following check manually as ARM_EXT_OS only applies
13061 to ARM_EXT_V6M. */
13062 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6m))
13063 {
13064 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_os)
13065 /* This only applies to the v6m howver, not later architectures. */
13066 && ! ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7))
13067 as_bad (_("SVC is not permitted on this architecture"));
13068 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, arm_ext_os);
13069 }
13070
13071 inst.reloc.type = BFD_RELOC_ARM_SWI;
13072 }
13073
13074 static void
13075 do_t_tb (void)
13076 {
13077 unsigned Rn, Rm;
13078 int half;
13079
13080 half = (inst.instruction & 0x10) != 0;
13081 set_it_insn_type_last ();
13082 constraint (inst.operands[0].immisreg,
13083 _("instruction requires register index"));
13084
13085 Rn = inst.operands[0].reg;
13086 Rm = inst.operands[0].imm;
13087
13088 constraint (Rn == REG_SP, BAD_SP);
13089 reject_bad_reg (Rm);
13090
13091 constraint (!half && inst.operands[0].shifted,
13092 _("instruction does not allow shifted index"));
13093 inst.instruction |= (Rn << 16) | Rm;
13094 }
13095
13096 static void
13097 do_t_udf (void)
13098 {
13099 if (!inst.operands[0].present)
13100 inst.operands[0].imm = 0;
13101
13102 if ((unsigned int) inst.operands[0].imm > 255 || inst.size_req == 4)
13103 {
13104 constraint (inst.size_req == 2,
13105 _("immediate value out of range"));
13106 inst.instruction = THUMB_OP32 (inst.instruction);
13107 inst.instruction |= (inst.operands[0].imm & 0xf000u) << 4;
13108 inst.instruction |= (inst.operands[0].imm & 0x0fffu) << 0;
13109 }
13110 else
13111 {
13112 inst.instruction = THUMB_OP16 (inst.instruction);
13113 inst.instruction |= inst.operands[0].imm;
13114 }
13115
13116 set_it_insn_type (NEUTRAL_IT_INSN);
13117 }
13118
13119
13120 static void
13121 do_t_usat (void)
13122 {
13123 do_t_ssat_usat (0);
13124 }
13125
13126 static void
13127 do_t_usat16 (void)
13128 {
13129 unsigned Rd, Rn;
13130
13131 Rd = inst.operands[0].reg;
13132 Rn = inst.operands[2].reg;
13133
13134 reject_bad_reg (Rd);
13135 reject_bad_reg (Rn);
13136
13137 inst.instruction |= Rd << 8;
13138 inst.instruction |= inst.operands[1].imm;
13139 inst.instruction |= Rn << 16;
13140 }
13141
13142 /* Neon instruction encoder helpers. */
13143
13144 /* Encodings for the different types for various Neon opcodes. */
13145
13146 /* An "invalid" code for the following tables. */
13147 #define N_INV -1u
13148
13149 struct neon_tab_entry
13150 {
13151 unsigned integer;
13152 unsigned float_or_poly;
13153 unsigned scalar_or_imm;
13154 };
13155
13156 /* Map overloaded Neon opcodes to their respective encodings. */
13157 #define NEON_ENC_TAB \
13158 X(vabd, 0x0000700, 0x1200d00, N_INV), \
13159 X(vmax, 0x0000600, 0x0000f00, N_INV), \
13160 X(vmin, 0x0000610, 0x0200f00, N_INV), \
13161 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
13162 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
13163 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
13164 X(vadd, 0x0000800, 0x0000d00, N_INV), \
13165 X(vsub, 0x1000800, 0x0200d00, N_INV), \
13166 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
13167 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
13168 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
13169 /* Register variants of the following two instructions are encoded as
13170 vcge / vcgt with the operands reversed. */ \
13171 X(vclt, 0x0000300, 0x1200e00, 0x1b10200), \
13172 X(vcle, 0x0000310, 0x1000e00, 0x1b10180), \
13173 X(vfma, N_INV, 0x0000c10, N_INV), \
13174 X(vfms, N_INV, 0x0200c10, N_INV), \
13175 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
13176 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
13177 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
13178 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
13179 X(vmlal, 0x0800800, N_INV, 0x0800240), \
13180 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
13181 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
13182 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
13183 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
13184 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
13185 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
13186 X(vqrdmlah, 0x3000b10, N_INV, 0x0800e40), \
13187 X(vqrdmlsh, 0x3000c10, N_INV, 0x0800f40), \
13188 X(vshl, 0x0000400, N_INV, 0x0800510), \
13189 X(vqshl, 0x0000410, N_INV, 0x0800710), \
13190 X(vand, 0x0000110, N_INV, 0x0800030), \
13191 X(vbic, 0x0100110, N_INV, 0x0800030), \
13192 X(veor, 0x1000110, N_INV, N_INV), \
13193 X(vorn, 0x0300110, N_INV, 0x0800010), \
13194 X(vorr, 0x0200110, N_INV, 0x0800010), \
13195 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
13196 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
13197 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
13198 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
13199 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
13200 X(vst1, 0x0000000, 0x0800000, N_INV), \
13201 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
13202 X(vst2, 0x0000100, 0x0800100, N_INV), \
13203 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
13204 X(vst3, 0x0000200, 0x0800200, N_INV), \
13205 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
13206 X(vst4, 0x0000300, 0x0800300, N_INV), \
13207 X(vmovn, 0x1b20200, N_INV, N_INV), \
13208 X(vtrn, 0x1b20080, N_INV, N_INV), \
13209 X(vqmovn, 0x1b20200, N_INV, N_INV), \
13210 X(vqmovun, 0x1b20240, N_INV, N_INV), \
13211 X(vnmul, 0xe200a40, 0xe200b40, N_INV), \
13212 X(vnmla, 0xe100a40, 0xe100b40, N_INV), \
13213 X(vnmls, 0xe100a00, 0xe100b00, N_INV), \
13214 X(vfnma, 0xe900a40, 0xe900b40, N_INV), \
13215 X(vfnms, 0xe900a00, 0xe900b00, N_INV), \
13216 X(vcmp, 0xeb40a40, 0xeb40b40, N_INV), \
13217 X(vcmpz, 0xeb50a40, 0xeb50b40, N_INV), \
13218 X(vcmpe, 0xeb40ac0, 0xeb40bc0, N_INV), \
13219 X(vcmpez, 0xeb50ac0, 0xeb50bc0, N_INV), \
13220 X(vseleq, 0xe000a00, N_INV, N_INV), \
13221 X(vselvs, 0xe100a00, N_INV, N_INV), \
13222 X(vselge, 0xe200a00, N_INV, N_INV), \
13223 X(vselgt, 0xe300a00, N_INV, N_INV), \
13224 X(vmaxnm, 0xe800a00, 0x3000f10, N_INV), \
13225 X(vminnm, 0xe800a40, 0x3200f10, N_INV), \
13226 X(vcvta, 0xebc0a40, 0x3bb0000, N_INV), \
13227 X(vrintr, 0xeb60a40, 0x3ba0400, N_INV), \
13228 X(vrinta, 0xeb80a40, 0x3ba0400, N_INV), \
13229 X(aes, 0x3b00300, N_INV, N_INV), \
13230 X(sha3op, 0x2000c00, N_INV, N_INV), \
13231 X(sha1h, 0x3b902c0, N_INV, N_INV), \
13232 X(sha2op, 0x3ba0380, N_INV, N_INV)
13233
13234 enum neon_opc
13235 {
13236 #define X(OPC,I,F,S) N_MNEM_##OPC
13237 NEON_ENC_TAB
13238 #undef X
13239 };
13240
13241 static const struct neon_tab_entry neon_enc_tab[] =
13242 {
13243 #define X(OPC,I,F,S) { (I), (F), (S) }
13244 NEON_ENC_TAB
13245 #undef X
13246 };
13247
13248 /* Do not use these macros; instead, use NEON_ENCODE defined below. */
13249 #define NEON_ENC_INTEGER_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13250 #define NEON_ENC_ARMREG_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13251 #define NEON_ENC_POLY_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13252 #define NEON_ENC_FLOAT_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13253 #define NEON_ENC_SCALAR_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13254 #define NEON_ENC_IMMED_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13255 #define NEON_ENC_INTERLV_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13256 #define NEON_ENC_LANE_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13257 #define NEON_ENC_DUP_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13258 #define NEON_ENC_SINGLE_(X) \
13259 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
13260 #define NEON_ENC_DOUBLE_(X) \
13261 ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
13262 #define NEON_ENC_FPV8_(X) \
13263 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf000000))
13264
13265 #define NEON_ENCODE(type, inst) \
13266 do \
13267 { \
13268 inst.instruction = NEON_ENC_##type##_ (inst.instruction); \
13269 inst.is_neon = 1; \
13270 } \
13271 while (0)
13272
13273 #define check_neon_suffixes \
13274 do \
13275 { \
13276 if (!inst.error && inst.vectype.elems > 0 && !inst.is_neon) \
13277 { \
13278 as_bad (_("invalid neon suffix for non neon instruction")); \
13279 return; \
13280 } \
13281 } \
13282 while (0)
13283
13284 /* Define shapes for instruction operands. The following mnemonic characters
13285 are used in this table:
13286
13287 F - VFP S<n> register
13288 D - Neon D<n> register
13289 Q - Neon Q<n> register
13290 I - Immediate
13291 S - Scalar
13292 R - ARM register
13293 L - D<n> register list
13294
13295 This table is used to generate various data:
13296 - enumerations of the form NS_DDR to be used as arguments to
13297 neon_select_shape.
13298 - a table classifying shapes into single, double, quad, mixed.
13299 - a table used to drive neon_select_shape. */
13300
13301 #define NEON_SHAPE_DEF \
13302 X(3, (D, D, D), DOUBLE), \
13303 X(3, (Q, Q, Q), QUAD), \
13304 X(3, (D, D, I), DOUBLE), \
13305 X(3, (Q, Q, I), QUAD), \
13306 X(3, (D, D, S), DOUBLE), \
13307 X(3, (Q, Q, S), QUAD), \
13308 X(2, (D, D), DOUBLE), \
13309 X(2, (Q, Q), QUAD), \
13310 X(2, (D, S), DOUBLE), \
13311 X(2, (Q, S), QUAD), \
13312 X(2, (D, R), DOUBLE), \
13313 X(2, (Q, R), QUAD), \
13314 X(2, (D, I), DOUBLE), \
13315 X(2, (Q, I), QUAD), \
13316 X(3, (D, L, D), DOUBLE), \
13317 X(2, (D, Q), MIXED), \
13318 X(2, (Q, D), MIXED), \
13319 X(3, (D, Q, I), MIXED), \
13320 X(3, (Q, D, I), MIXED), \
13321 X(3, (Q, D, D), MIXED), \
13322 X(3, (D, Q, Q), MIXED), \
13323 X(3, (Q, Q, D), MIXED), \
13324 X(3, (Q, D, S), MIXED), \
13325 X(3, (D, Q, S), MIXED), \
13326 X(4, (D, D, D, I), DOUBLE), \
13327 X(4, (Q, Q, Q, I), QUAD), \
13328 X(2, (F, F), SINGLE), \
13329 X(3, (F, F, F), SINGLE), \
13330 X(2, (F, I), SINGLE), \
13331 X(2, (F, D), MIXED), \
13332 X(2, (D, F), MIXED), \
13333 X(3, (F, F, I), MIXED), \
13334 X(4, (R, R, F, F), SINGLE), \
13335 X(4, (F, F, R, R), SINGLE), \
13336 X(3, (D, R, R), DOUBLE), \
13337 X(3, (R, R, D), DOUBLE), \
13338 X(2, (S, R), SINGLE), \
13339 X(2, (R, S), SINGLE), \
13340 X(2, (F, R), SINGLE), \
13341 X(2, (R, F), SINGLE), \
13342 /* Half float shape supported so far. */\
13343 X (2, (H, D), MIXED), \
13344 X (2, (D, H), MIXED), \
13345 X (2, (H, F), MIXED), \
13346 X (2, (F, H), MIXED), \
13347 X (2, (H, H), HALF), \
13348 X (2, (H, R), HALF), \
13349 X (2, (R, H), HALF), \
13350 X (2, (H, I), HALF), \
13351 X (3, (H, H, H), HALF), \
13352 X (3, (H, F, I), MIXED), \
13353 X (3, (F, H, I), MIXED)
13354
13355 #define S2(A,B) NS_##A##B
13356 #define S3(A,B,C) NS_##A##B##C
13357 #define S4(A,B,C,D) NS_##A##B##C##D
13358
13359 #define X(N, L, C) S##N L
13360
13361 enum neon_shape
13362 {
13363 NEON_SHAPE_DEF,
13364 NS_NULL
13365 };
13366
13367 #undef X
13368 #undef S2
13369 #undef S3
13370 #undef S4
13371
13372 enum neon_shape_class
13373 {
13374 SC_HALF,
13375 SC_SINGLE,
13376 SC_DOUBLE,
13377 SC_QUAD,
13378 SC_MIXED
13379 };
13380
13381 #define X(N, L, C) SC_##C
13382
13383 static enum neon_shape_class neon_shape_class[] =
13384 {
13385 NEON_SHAPE_DEF
13386 };
13387
13388 #undef X
13389
13390 enum neon_shape_el
13391 {
13392 SE_H,
13393 SE_F,
13394 SE_D,
13395 SE_Q,
13396 SE_I,
13397 SE_S,
13398 SE_R,
13399 SE_L
13400 };
13401
13402 /* Register widths of above. */
13403 static unsigned neon_shape_el_size[] =
13404 {
13405 16,
13406 32,
13407 64,
13408 128,
13409 0,
13410 32,
13411 32,
13412 0
13413 };
13414
13415 struct neon_shape_info
13416 {
13417 unsigned els;
13418 enum neon_shape_el el[NEON_MAX_TYPE_ELS];
13419 };
13420
13421 #define S2(A,B) { SE_##A, SE_##B }
13422 #define S3(A,B,C) { SE_##A, SE_##B, SE_##C }
13423 #define S4(A,B,C,D) { SE_##A, SE_##B, SE_##C, SE_##D }
13424
13425 #define X(N, L, C) { N, S##N L }
13426
13427 static struct neon_shape_info neon_shape_tab[] =
13428 {
13429 NEON_SHAPE_DEF
13430 };
13431
13432 #undef X
13433 #undef S2
13434 #undef S3
13435 #undef S4
13436
13437 /* Bit masks used in type checking given instructions.
13438 'N_EQK' means the type must be the same as (or based on in some way) the key
13439 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
13440 set, various other bits can be set as well in order to modify the meaning of
13441 the type constraint. */
13442
13443 enum neon_type_mask
13444 {
13445 N_S8 = 0x0000001,
13446 N_S16 = 0x0000002,
13447 N_S32 = 0x0000004,
13448 N_S64 = 0x0000008,
13449 N_U8 = 0x0000010,
13450 N_U16 = 0x0000020,
13451 N_U32 = 0x0000040,
13452 N_U64 = 0x0000080,
13453 N_I8 = 0x0000100,
13454 N_I16 = 0x0000200,
13455 N_I32 = 0x0000400,
13456 N_I64 = 0x0000800,
13457 N_8 = 0x0001000,
13458 N_16 = 0x0002000,
13459 N_32 = 0x0004000,
13460 N_64 = 0x0008000,
13461 N_P8 = 0x0010000,
13462 N_P16 = 0x0020000,
13463 N_F16 = 0x0040000,
13464 N_F32 = 0x0080000,
13465 N_F64 = 0x0100000,
13466 N_P64 = 0x0200000,
13467 N_KEY = 0x1000000, /* Key element (main type specifier). */
13468 N_EQK = 0x2000000, /* Given operand has the same type & size as the key. */
13469 N_VFP = 0x4000000, /* VFP mode: operand size must match register width. */
13470 N_UNT = 0x8000000, /* Must be explicitly untyped. */
13471 N_DBL = 0x0000001, /* If N_EQK, this operand is twice the size. */
13472 N_HLF = 0x0000002, /* If N_EQK, this operand is half the size. */
13473 N_SGN = 0x0000004, /* If N_EQK, this operand is forced to be signed. */
13474 N_UNS = 0x0000008, /* If N_EQK, this operand is forced to be unsigned. */
13475 N_INT = 0x0000010, /* If N_EQK, this operand is forced to be integer. */
13476 N_FLT = 0x0000020, /* If N_EQK, this operand is forced to be float. */
13477 N_SIZ = 0x0000040, /* If N_EQK, this operand is forced to be size-only. */
13478 N_UTYP = 0,
13479 N_MAX_NONSPECIAL = N_P64
13480 };
13481
13482 #define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
13483
13484 #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
13485 #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
13486 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
13487 #define N_S_32 (N_S8 | N_S16 | N_S32)
13488 #define N_F_16_32 (N_F16 | N_F32)
13489 #define N_SUF_32 (N_SU_32 | N_F_16_32)
13490 #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
13491 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F16 | N_F32)
13492 #define N_F_ALL (N_F16 | N_F32 | N_F64)
13493
13494 /* Pass this as the first type argument to neon_check_type to ignore types
13495 altogether. */
13496 #define N_IGNORE_TYPE (N_KEY | N_EQK)
13497
13498 /* Select a "shape" for the current instruction (describing register types or
13499 sizes) from a list of alternatives. Return NS_NULL if the current instruction
13500 doesn't fit. For non-polymorphic shapes, checking is usually done as a
13501 function of operand parsing, so this function doesn't need to be called.
13502 Shapes should be listed in order of decreasing length. */
13503
13504 static enum neon_shape
13505 neon_select_shape (enum neon_shape shape, ...)
13506 {
13507 va_list ap;
13508 enum neon_shape first_shape = shape;
13509
13510 /* Fix missing optional operands. FIXME: we don't know at this point how
13511 many arguments we should have, so this makes the assumption that we have
13512 > 1. This is true of all current Neon opcodes, I think, but may not be
13513 true in the future. */
13514 if (!inst.operands[1].present)
13515 inst.operands[1] = inst.operands[0];
13516
13517 va_start (ap, shape);
13518
13519 for (; shape != NS_NULL; shape = (enum neon_shape) va_arg (ap, int))
13520 {
13521 unsigned j;
13522 int matches = 1;
13523
13524 for (j = 0; j < neon_shape_tab[shape].els; j++)
13525 {
13526 if (!inst.operands[j].present)
13527 {
13528 matches = 0;
13529 break;
13530 }
13531
13532 switch (neon_shape_tab[shape].el[j])
13533 {
13534 /* If a .f16, .16, .u16, .s16 type specifier is given over
13535 a VFP single precision register operand, it's essentially
13536 means only half of the register is used.
13537
13538 If the type specifier is given after the mnemonics, the
13539 information is stored in inst.vectype. If the type specifier
13540 is given after register operand, the information is stored
13541 in inst.operands[].vectype.
13542
13543 When there is only one type specifier, and all the register
13544 operands are the same type of hardware register, the type
13545 specifier applies to all register operands.
13546
13547 If no type specifier is given, the shape is inferred from
13548 operand information.
13549
13550 for example:
13551 vadd.f16 s0, s1, s2: NS_HHH
13552 vabs.f16 s0, s1: NS_HH
13553 vmov.f16 s0, r1: NS_HR
13554 vmov.f16 r0, s1: NS_RH
13555 vcvt.f16 r0, s1: NS_RH
13556 vcvt.f16.s32 s2, s2, #29: NS_HFI
13557 vcvt.f16.s32 s2, s2: NS_HF
13558 */
13559 case SE_H:
13560 if (!(inst.operands[j].isreg
13561 && inst.operands[j].isvec
13562 && inst.operands[j].issingle
13563 && !inst.operands[j].isquad
13564 && ((inst.vectype.elems == 1
13565 && inst.vectype.el[0].size == 16)
13566 || (inst.vectype.elems > 1
13567 && inst.vectype.el[j].size == 16)
13568 || (inst.vectype.elems == 0
13569 && inst.operands[j].vectype.type != NT_invtype
13570 && inst.operands[j].vectype.size == 16))))
13571 matches = 0;
13572 break;
13573
13574 case SE_F:
13575 if (!(inst.operands[j].isreg
13576 && inst.operands[j].isvec
13577 && inst.operands[j].issingle
13578 && !inst.operands[j].isquad
13579 && ((inst.vectype.elems == 1 && inst.vectype.el[0].size == 32)
13580 || (inst.vectype.elems > 1 && inst.vectype.el[j].size == 32)
13581 || (inst.vectype.elems == 0
13582 && (inst.operands[j].vectype.size == 32
13583 || inst.operands[j].vectype.type == NT_invtype)))))
13584 matches = 0;
13585 break;
13586
13587 case SE_D:
13588 if (!(inst.operands[j].isreg
13589 && inst.operands[j].isvec
13590 && !inst.operands[j].isquad
13591 && !inst.operands[j].issingle))
13592 matches = 0;
13593 break;
13594
13595 case SE_R:
13596 if (!(inst.operands[j].isreg
13597 && !inst.operands[j].isvec))
13598 matches = 0;
13599 break;
13600
13601 case SE_Q:
13602 if (!(inst.operands[j].isreg
13603 && inst.operands[j].isvec
13604 && inst.operands[j].isquad
13605 && !inst.operands[j].issingle))
13606 matches = 0;
13607 break;
13608
13609 case SE_I:
13610 if (!(!inst.operands[j].isreg
13611 && !inst.operands[j].isscalar))
13612 matches = 0;
13613 break;
13614
13615 case SE_S:
13616 if (!(!inst.operands[j].isreg
13617 && inst.operands[j].isscalar))
13618 matches = 0;
13619 break;
13620
13621 case SE_L:
13622 break;
13623 }
13624 if (!matches)
13625 break;
13626 }
13627 if (matches && (j >= ARM_IT_MAX_OPERANDS || !inst.operands[j].present))
13628 /* We've matched all the entries in the shape table, and we don't
13629 have any left over operands which have not been matched. */
13630 break;
13631 }
13632
13633 va_end (ap);
13634
13635 if (shape == NS_NULL && first_shape != NS_NULL)
13636 first_error (_("invalid instruction shape"));
13637
13638 return shape;
13639 }
13640
13641 /* True if SHAPE is predominantly a quadword operation (most of the time, this
13642 means the Q bit should be set). */
13643
13644 static int
13645 neon_quad (enum neon_shape shape)
13646 {
13647 return neon_shape_class[shape] == SC_QUAD;
13648 }
13649
13650 static void
13651 neon_modify_type_size (unsigned typebits, enum neon_el_type *g_type,
13652 unsigned *g_size)
13653 {
13654 /* Allow modification to be made to types which are constrained to be
13655 based on the key element, based on bits set alongside N_EQK. */
13656 if ((typebits & N_EQK) != 0)
13657 {
13658 if ((typebits & N_HLF) != 0)
13659 *g_size /= 2;
13660 else if ((typebits & N_DBL) != 0)
13661 *g_size *= 2;
13662 if ((typebits & N_SGN) != 0)
13663 *g_type = NT_signed;
13664 else if ((typebits & N_UNS) != 0)
13665 *g_type = NT_unsigned;
13666 else if ((typebits & N_INT) != 0)
13667 *g_type = NT_integer;
13668 else if ((typebits & N_FLT) != 0)
13669 *g_type = NT_float;
13670 else if ((typebits & N_SIZ) != 0)
13671 *g_type = NT_untyped;
13672 }
13673 }
13674
13675 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
13676 operand type, i.e. the single type specified in a Neon instruction when it
13677 is the only one given. */
13678
13679 static struct neon_type_el
13680 neon_type_promote (struct neon_type_el *key, unsigned thisarg)
13681 {
13682 struct neon_type_el dest = *key;
13683
13684 gas_assert ((thisarg & N_EQK) != 0);
13685
13686 neon_modify_type_size (thisarg, &dest.type, &dest.size);
13687
13688 return dest;
13689 }
13690
13691 /* Convert Neon type and size into compact bitmask representation. */
13692
13693 static enum neon_type_mask
13694 type_chk_of_el_type (enum neon_el_type type, unsigned size)
13695 {
13696 switch (type)
13697 {
13698 case NT_untyped:
13699 switch (size)
13700 {
13701 case 8: return N_8;
13702 case 16: return N_16;
13703 case 32: return N_32;
13704 case 64: return N_64;
13705 default: ;
13706 }
13707 break;
13708
13709 case NT_integer:
13710 switch (size)
13711 {
13712 case 8: return N_I8;
13713 case 16: return N_I16;
13714 case 32: return N_I32;
13715 case 64: return N_I64;
13716 default: ;
13717 }
13718 break;
13719
13720 case NT_float:
13721 switch (size)
13722 {
13723 case 16: return N_F16;
13724 case 32: return N_F32;
13725 case 64: return N_F64;
13726 default: ;
13727 }
13728 break;
13729
13730 case NT_poly:
13731 switch (size)
13732 {
13733 case 8: return N_P8;
13734 case 16: return N_P16;
13735 case 64: return N_P64;
13736 default: ;
13737 }
13738 break;
13739
13740 case NT_signed:
13741 switch (size)
13742 {
13743 case 8: return N_S8;
13744 case 16: return N_S16;
13745 case 32: return N_S32;
13746 case 64: return N_S64;
13747 default: ;
13748 }
13749 break;
13750
13751 case NT_unsigned:
13752 switch (size)
13753 {
13754 case 8: return N_U8;
13755 case 16: return N_U16;
13756 case 32: return N_U32;
13757 case 64: return N_U64;
13758 default: ;
13759 }
13760 break;
13761
13762 default: ;
13763 }
13764
13765 return N_UTYP;
13766 }
13767
13768 /* Convert compact Neon bitmask type representation to a type and size. Only
13769 handles the case where a single bit is set in the mask. */
13770
13771 static int
13772 el_type_of_type_chk (enum neon_el_type *type, unsigned *size,
13773 enum neon_type_mask mask)
13774 {
13775 if ((mask & N_EQK) != 0)
13776 return FAIL;
13777
13778 if ((mask & (N_S8 | N_U8 | N_I8 | N_8 | N_P8)) != 0)
13779 *size = 8;
13780 else if ((mask & (N_S16 | N_U16 | N_I16 | N_16 | N_F16 | N_P16)) != 0)
13781 *size = 16;
13782 else if ((mask & (N_S32 | N_U32 | N_I32 | N_32 | N_F32)) != 0)
13783 *size = 32;
13784 else if ((mask & (N_S64 | N_U64 | N_I64 | N_64 | N_F64 | N_P64)) != 0)
13785 *size = 64;
13786 else
13787 return FAIL;
13788
13789 if ((mask & (N_S8 | N_S16 | N_S32 | N_S64)) != 0)
13790 *type = NT_signed;
13791 else if ((mask & (N_U8 | N_U16 | N_U32 | N_U64)) != 0)
13792 *type = NT_unsigned;
13793 else if ((mask & (N_I8 | N_I16 | N_I32 | N_I64)) != 0)
13794 *type = NT_integer;
13795 else if ((mask & (N_8 | N_16 | N_32 | N_64)) != 0)
13796 *type = NT_untyped;
13797 else if ((mask & (N_P8 | N_P16 | N_P64)) != 0)
13798 *type = NT_poly;
13799 else if ((mask & (N_F_ALL)) != 0)
13800 *type = NT_float;
13801 else
13802 return FAIL;
13803
13804 return SUCCESS;
13805 }
13806
13807 /* Modify a bitmask of allowed types. This is only needed for type
13808 relaxation. */
13809
13810 static unsigned
13811 modify_types_allowed (unsigned allowed, unsigned mods)
13812 {
13813 unsigned size;
13814 enum neon_el_type type;
13815 unsigned destmask;
13816 int i;
13817
13818 destmask = 0;
13819
13820 for (i = 1; i <= N_MAX_NONSPECIAL; i <<= 1)
13821 {
13822 if (el_type_of_type_chk (&type, &size,
13823 (enum neon_type_mask) (allowed & i)) == SUCCESS)
13824 {
13825 neon_modify_type_size (mods, &type, &size);
13826 destmask |= type_chk_of_el_type (type, size);
13827 }
13828 }
13829
13830 return destmask;
13831 }
13832
13833 /* Check type and return type classification.
13834 The manual states (paraphrase): If one datatype is given, it indicates the
13835 type given in:
13836 - the second operand, if there is one
13837 - the operand, if there is no second operand
13838 - the result, if there are no operands.
13839 This isn't quite good enough though, so we use a concept of a "key" datatype
13840 which is set on a per-instruction basis, which is the one which matters when
13841 only one data type is written.
13842 Note: this function has side-effects (e.g. filling in missing operands). All
13843 Neon instructions should call it before performing bit encoding. */
13844
13845 static struct neon_type_el
13846 neon_check_type (unsigned els, enum neon_shape ns, ...)
13847 {
13848 va_list ap;
13849 unsigned i, pass, key_el = 0;
13850 unsigned types[NEON_MAX_TYPE_ELS];
13851 enum neon_el_type k_type = NT_invtype;
13852 unsigned k_size = -1u;
13853 struct neon_type_el badtype = {NT_invtype, -1};
13854 unsigned key_allowed = 0;
13855
13856 /* Optional registers in Neon instructions are always (not) in operand 1.
13857 Fill in the missing operand here, if it was omitted. */
13858 if (els > 1 && !inst.operands[1].present)
13859 inst.operands[1] = inst.operands[0];
13860
13861 /* Suck up all the varargs. */
13862 va_start (ap, ns);
13863 for (i = 0; i < els; i++)
13864 {
13865 unsigned thisarg = va_arg (ap, unsigned);
13866 if (thisarg == N_IGNORE_TYPE)
13867 {
13868 va_end (ap);
13869 return badtype;
13870 }
13871 types[i] = thisarg;
13872 if ((thisarg & N_KEY) != 0)
13873 key_el = i;
13874 }
13875 va_end (ap);
13876
13877 if (inst.vectype.elems > 0)
13878 for (i = 0; i < els; i++)
13879 if (inst.operands[i].vectype.type != NT_invtype)
13880 {
13881 first_error (_("types specified in both the mnemonic and operands"));
13882 return badtype;
13883 }
13884
13885 /* Duplicate inst.vectype elements here as necessary.
13886 FIXME: No idea if this is exactly the same as the ARM assembler,
13887 particularly when an insn takes one register and one non-register
13888 operand. */
13889 if (inst.vectype.elems == 1 && els > 1)
13890 {
13891 unsigned j;
13892 inst.vectype.elems = els;
13893 inst.vectype.el[key_el] = inst.vectype.el[0];
13894 for (j = 0; j < els; j++)
13895 if (j != key_el)
13896 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
13897 types[j]);
13898 }
13899 else if (inst.vectype.elems == 0 && els > 0)
13900 {
13901 unsigned j;
13902 /* No types were given after the mnemonic, so look for types specified
13903 after each operand. We allow some flexibility here; as long as the
13904 "key" operand has a type, we can infer the others. */
13905 for (j = 0; j < els; j++)
13906 if (inst.operands[j].vectype.type != NT_invtype)
13907 inst.vectype.el[j] = inst.operands[j].vectype;
13908
13909 if (inst.operands[key_el].vectype.type != NT_invtype)
13910 {
13911 for (j = 0; j < els; j++)
13912 if (inst.operands[j].vectype.type == NT_invtype)
13913 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
13914 types[j]);
13915 }
13916 else
13917 {
13918 first_error (_("operand types can't be inferred"));
13919 return badtype;
13920 }
13921 }
13922 else if (inst.vectype.elems != els)
13923 {
13924 first_error (_("type specifier has the wrong number of parts"));
13925 return badtype;
13926 }
13927
13928 for (pass = 0; pass < 2; pass++)
13929 {
13930 for (i = 0; i < els; i++)
13931 {
13932 unsigned thisarg = types[i];
13933 unsigned types_allowed = ((thisarg & N_EQK) != 0 && pass != 0)
13934 ? modify_types_allowed (key_allowed, thisarg) : thisarg;
13935 enum neon_el_type g_type = inst.vectype.el[i].type;
13936 unsigned g_size = inst.vectype.el[i].size;
13937
13938 /* Decay more-specific signed & unsigned types to sign-insensitive
13939 integer types if sign-specific variants are unavailable. */
13940 if ((g_type == NT_signed || g_type == NT_unsigned)
13941 && (types_allowed & N_SU_ALL) == 0)
13942 g_type = NT_integer;
13943
13944 /* If only untyped args are allowed, decay any more specific types to
13945 them. Some instructions only care about signs for some element
13946 sizes, so handle that properly. */
13947 if (((types_allowed & N_UNT) == 0)
13948 && ((g_size == 8 && (types_allowed & N_8) != 0)
13949 || (g_size == 16 && (types_allowed & N_16) != 0)
13950 || (g_size == 32 && (types_allowed & N_32) != 0)
13951 || (g_size == 64 && (types_allowed & N_64) != 0)))
13952 g_type = NT_untyped;
13953
13954 if (pass == 0)
13955 {
13956 if ((thisarg & N_KEY) != 0)
13957 {
13958 k_type = g_type;
13959 k_size = g_size;
13960 key_allowed = thisarg & ~N_KEY;
13961
13962 /* Check architecture constraint on FP16 extension. */
13963 if (k_size == 16
13964 && k_type == NT_float
13965 && ! ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16))
13966 {
13967 inst.error = _(BAD_FP16);
13968 return badtype;
13969 }
13970 }
13971 }
13972 else
13973 {
13974 if ((thisarg & N_VFP) != 0)
13975 {
13976 enum neon_shape_el regshape;
13977 unsigned regwidth, match;
13978
13979 /* PR 11136: Catch the case where we are passed a shape of NS_NULL. */
13980 if (ns == NS_NULL)
13981 {
13982 first_error (_("invalid instruction shape"));
13983 return badtype;
13984 }
13985 regshape = neon_shape_tab[ns].el[i];
13986 regwidth = neon_shape_el_size[regshape];
13987
13988 /* In VFP mode, operands must match register widths. If we
13989 have a key operand, use its width, else use the width of
13990 the current operand. */
13991 if (k_size != -1u)
13992 match = k_size;
13993 else
13994 match = g_size;
13995
13996 /* FP16 will use a single precision register. */
13997 if (regwidth == 32 && match == 16)
13998 {
13999 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16))
14000 match = regwidth;
14001 else
14002 {
14003 inst.error = _(BAD_FP16);
14004 return badtype;
14005 }
14006 }
14007
14008 if (regwidth != match)
14009 {
14010 first_error (_("operand size must match register width"));
14011 return badtype;
14012 }
14013 }
14014
14015 if ((thisarg & N_EQK) == 0)
14016 {
14017 unsigned given_type = type_chk_of_el_type (g_type, g_size);
14018
14019 if ((given_type & types_allowed) == 0)
14020 {
14021 first_error (_("bad type in Neon instruction"));
14022 return badtype;
14023 }
14024 }
14025 else
14026 {
14027 enum neon_el_type mod_k_type = k_type;
14028 unsigned mod_k_size = k_size;
14029 neon_modify_type_size (thisarg, &mod_k_type, &mod_k_size);
14030 if (g_type != mod_k_type || g_size != mod_k_size)
14031 {
14032 first_error (_("inconsistent types in Neon instruction"));
14033 return badtype;
14034 }
14035 }
14036 }
14037 }
14038 }
14039
14040 return inst.vectype.el[key_el];
14041 }
14042
14043 /* Neon-style VFP instruction forwarding. */
14044
14045 /* Thumb VFP instructions have 0xE in the condition field. */
14046
14047 static void
14048 do_vfp_cond_or_thumb (void)
14049 {
14050 inst.is_neon = 1;
14051
14052 if (thumb_mode)
14053 inst.instruction |= 0xe0000000;
14054 else
14055 inst.instruction |= inst.cond << 28;
14056 }
14057
14058 /* Look up and encode a simple mnemonic, for use as a helper function for the
14059 Neon-style VFP syntax. This avoids duplication of bits of the insns table,
14060 etc. It is assumed that operand parsing has already been done, and that the
14061 operands are in the form expected by the given opcode (this isn't necessarily
14062 the same as the form in which they were parsed, hence some massaging must
14063 take place before this function is called).
14064 Checks current arch version against that in the looked-up opcode. */
14065
14066 static void
14067 do_vfp_nsyn_opcode (const char *opname)
14068 {
14069 const struct asm_opcode *opcode;
14070
14071 opcode = (const struct asm_opcode *) hash_find (arm_ops_hsh, opname);
14072
14073 if (!opcode)
14074 abort ();
14075
14076 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant,
14077 thumb_mode ? *opcode->tvariant : *opcode->avariant),
14078 _(BAD_FPU));
14079
14080 inst.is_neon = 1;
14081
14082 if (thumb_mode)
14083 {
14084 inst.instruction = opcode->tvalue;
14085 opcode->tencode ();
14086 }
14087 else
14088 {
14089 inst.instruction = (inst.cond << 28) | opcode->avalue;
14090 opcode->aencode ();
14091 }
14092 }
14093
14094 static void
14095 do_vfp_nsyn_add_sub (enum neon_shape rs)
14096 {
14097 int is_add = (inst.instruction & 0x0fffffff) == N_MNEM_vadd;
14098
14099 if (rs == NS_FFF || rs == NS_HHH)
14100 {
14101 if (is_add)
14102 do_vfp_nsyn_opcode ("fadds");
14103 else
14104 do_vfp_nsyn_opcode ("fsubs");
14105
14106 /* ARMv8.2 fp16 instruction. */
14107 if (rs == NS_HHH)
14108 do_scalar_fp16_v82_encode ();
14109 }
14110 else
14111 {
14112 if (is_add)
14113 do_vfp_nsyn_opcode ("faddd");
14114 else
14115 do_vfp_nsyn_opcode ("fsubd");
14116 }
14117 }
14118
14119 /* Check operand types to see if this is a VFP instruction, and if so call
14120 PFN (). */
14121
14122 static int
14123 try_vfp_nsyn (int args, void (*pfn) (enum neon_shape))
14124 {
14125 enum neon_shape rs;
14126 struct neon_type_el et;
14127
14128 switch (args)
14129 {
14130 case 2:
14131 rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_NULL);
14132 et = neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY | N_VFP);
14133 break;
14134
14135 case 3:
14136 rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_NULL);
14137 et = neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
14138 N_F_ALL | N_KEY | N_VFP);
14139 break;
14140
14141 default:
14142 abort ();
14143 }
14144
14145 if (et.type != NT_invtype)
14146 {
14147 pfn (rs);
14148 return SUCCESS;
14149 }
14150
14151 inst.error = NULL;
14152 return FAIL;
14153 }
14154
14155 static void
14156 do_vfp_nsyn_mla_mls (enum neon_shape rs)
14157 {
14158 int is_mla = (inst.instruction & 0x0fffffff) == N_MNEM_vmla;
14159
14160 if (rs == NS_FFF || rs == NS_HHH)
14161 {
14162 if (is_mla)
14163 do_vfp_nsyn_opcode ("fmacs");
14164 else
14165 do_vfp_nsyn_opcode ("fnmacs");
14166
14167 /* ARMv8.2 fp16 instruction. */
14168 if (rs == NS_HHH)
14169 do_scalar_fp16_v82_encode ();
14170 }
14171 else
14172 {
14173 if (is_mla)
14174 do_vfp_nsyn_opcode ("fmacd");
14175 else
14176 do_vfp_nsyn_opcode ("fnmacd");
14177 }
14178 }
14179
14180 static void
14181 do_vfp_nsyn_fma_fms (enum neon_shape rs)
14182 {
14183 int is_fma = (inst.instruction & 0x0fffffff) == N_MNEM_vfma;
14184
14185 if (rs == NS_FFF || rs == NS_HHH)
14186 {
14187 if (is_fma)
14188 do_vfp_nsyn_opcode ("ffmas");
14189 else
14190 do_vfp_nsyn_opcode ("ffnmas");
14191
14192 /* ARMv8.2 fp16 instruction. */
14193 if (rs == NS_HHH)
14194 do_scalar_fp16_v82_encode ();
14195 }
14196 else
14197 {
14198 if (is_fma)
14199 do_vfp_nsyn_opcode ("ffmad");
14200 else
14201 do_vfp_nsyn_opcode ("ffnmad");
14202 }
14203 }
14204
14205 static void
14206 do_vfp_nsyn_mul (enum neon_shape rs)
14207 {
14208 if (rs == NS_FFF || rs == NS_HHH)
14209 {
14210 do_vfp_nsyn_opcode ("fmuls");
14211
14212 /* ARMv8.2 fp16 instruction. */
14213 if (rs == NS_HHH)
14214 do_scalar_fp16_v82_encode ();
14215 }
14216 else
14217 do_vfp_nsyn_opcode ("fmuld");
14218 }
14219
14220 static void
14221 do_vfp_nsyn_abs_neg (enum neon_shape rs)
14222 {
14223 int is_neg = (inst.instruction & 0x80) != 0;
14224 neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_VFP | N_KEY);
14225
14226 if (rs == NS_FF || rs == NS_HH)
14227 {
14228 if (is_neg)
14229 do_vfp_nsyn_opcode ("fnegs");
14230 else
14231 do_vfp_nsyn_opcode ("fabss");
14232
14233 /* ARMv8.2 fp16 instruction. */
14234 if (rs == NS_HH)
14235 do_scalar_fp16_v82_encode ();
14236 }
14237 else
14238 {
14239 if (is_neg)
14240 do_vfp_nsyn_opcode ("fnegd");
14241 else
14242 do_vfp_nsyn_opcode ("fabsd");
14243 }
14244 }
14245
14246 /* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
14247 insns belong to Neon, and are handled elsewhere. */
14248
14249 static void
14250 do_vfp_nsyn_ldm_stm (int is_dbmode)
14251 {
14252 int is_ldm = (inst.instruction & (1 << 20)) != 0;
14253 if (is_ldm)
14254 {
14255 if (is_dbmode)
14256 do_vfp_nsyn_opcode ("fldmdbs");
14257 else
14258 do_vfp_nsyn_opcode ("fldmias");
14259 }
14260 else
14261 {
14262 if (is_dbmode)
14263 do_vfp_nsyn_opcode ("fstmdbs");
14264 else
14265 do_vfp_nsyn_opcode ("fstmias");
14266 }
14267 }
14268
14269 static void
14270 do_vfp_nsyn_sqrt (void)
14271 {
14272 enum neon_shape rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_NULL);
14273 neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY | N_VFP);
14274
14275 if (rs == NS_FF || rs == NS_HH)
14276 {
14277 do_vfp_nsyn_opcode ("fsqrts");
14278
14279 /* ARMv8.2 fp16 instruction. */
14280 if (rs == NS_HH)
14281 do_scalar_fp16_v82_encode ();
14282 }
14283 else
14284 do_vfp_nsyn_opcode ("fsqrtd");
14285 }
14286
14287 static void
14288 do_vfp_nsyn_div (void)
14289 {
14290 enum neon_shape rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_NULL);
14291 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
14292 N_F_ALL | N_KEY | N_VFP);
14293
14294 if (rs == NS_FFF || rs == NS_HHH)
14295 {
14296 do_vfp_nsyn_opcode ("fdivs");
14297
14298 /* ARMv8.2 fp16 instruction. */
14299 if (rs == NS_HHH)
14300 do_scalar_fp16_v82_encode ();
14301 }
14302 else
14303 do_vfp_nsyn_opcode ("fdivd");
14304 }
14305
14306 static void
14307 do_vfp_nsyn_nmul (void)
14308 {
14309 enum neon_shape rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_NULL);
14310 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
14311 N_F_ALL | N_KEY | N_VFP);
14312
14313 if (rs == NS_FFF || rs == NS_HHH)
14314 {
14315 NEON_ENCODE (SINGLE, inst);
14316 do_vfp_sp_dyadic ();
14317
14318 /* ARMv8.2 fp16 instruction. */
14319 if (rs == NS_HHH)
14320 do_scalar_fp16_v82_encode ();
14321 }
14322 else
14323 {
14324 NEON_ENCODE (DOUBLE, inst);
14325 do_vfp_dp_rd_rn_rm ();
14326 }
14327 do_vfp_cond_or_thumb ();
14328
14329 }
14330
14331 static void
14332 do_vfp_nsyn_cmp (void)
14333 {
14334 enum neon_shape rs;
14335 if (inst.operands[1].isreg)
14336 {
14337 rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_NULL);
14338 neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY | N_VFP);
14339
14340 if (rs == NS_FF || rs == NS_HH)
14341 {
14342 NEON_ENCODE (SINGLE, inst);
14343 do_vfp_sp_monadic ();
14344 }
14345 else
14346 {
14347 NEON_ENCODE (DOUBLE, inst);
14348 do_vfp_dp_rd_rm ();
14349 }
14350 }
14351 else
14352 {
14353 rs = neon_select_shape (NS_HI, NS_FI, NS_DI, NS_NULL);
14354 neon_check_type (2, rs, N_F_ALL | N_KEY | N_VFP, N_EQK);
14355
14356 switch (inst.instruction & 0x0fffffff)
14357 {
14358 case N_MNEM_vcmp:
14359 inst.instruction += N_MNEM_vcmpz - N_MNEM_vcmp;
14360 break;
14361 case N_MNEM_vcmpe:
14362 inst.instruction += N_MNEM_vcmpez - N_MNEM_vcmpe;
14363 break;
14364 default:
14365 abort ();
14366 }
14367
14368 if (rs == NS_FI || rs == NS_HI)
14369 {
14370 NEON_ENCODE (SINGLE, inst);
14371 do_vfp_sp_compare_z ();
14372 }
14373 else
14374 {
14375 NEON_ENCODE (DOUBLE, inst);
14376 do_vfp_dp_rd ();
14377 }
14378 }
14379 do_vfp_cond_or_thumb ();
14380
14381 /* ARMv8.2 fp16 instruction. */
14382 if (rs == NS_HI || rs == NS_HH)
14383 do_scalar_fp16_v82_encode ();
14384 }
14385
14386 static void
14387 nsyn_insert_sp (void)
14388 {
14389 inst.operands[1] = inst.operands[0];
14390 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
14391 inst.operands[0].reg = REG_SP;
14392 inst.operands[0].isreg = 1;
14393 inst.operands[0].writeback = 1;
14394 inst.operands[0].present = 1;
14395 }
14396
14397 static void
14398 do_vfp_nsyn_push (void)
14399 {
14400 nsyn_insert_sp ();
14401
14402 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
14403 _("register list must contain at least 1 and at most 16 "
14404 "registers"));
14405
14406 if (inst.operands[1].issingle)
14407 do_vfp_nsyn_opcode ("fstmdbs");
14408 else
14409 do_vfp_nsyn_opcode ("fstmdbd");
14410 }
14411
14412 static void
14413 do_vfp_nsyn_pop (void)
14414 {
14415 nsyn_insert_sp ();
14416
14417 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
14418 _("register list must contain at least 1 and at most 16 "
14419 "registers"));
14420
14421 if (inst.operands[1].issingle)
14422 do_vfp_nsyn_opcode ("fldmias");
14423 else
14424 do_vfp_nsyn_opcode ("fldmiad");
14425 }
14426
14427 /* Fix up Neon data-processing instructions, ORing in the correct bits for
14428 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
14429
14430 static void
14431 neon_dp_fixup (struct arm_it* insn)
14432 {
14433 unsigned int i = insn->instruction;
14434 insn->is_neon = 1;
14435
14436 if (thumb_mode)
14437 {
14438 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
14439 if (i & (1 << 24))
14440 i |= 1 << 28;
14441
14442 i &= ~(1 << 24);
14443
14444 i |= 0xef000000;
14445 }
14446 else
14447 i |= 0xf2000000;
14448
14449 insn->instruction = i;
14450 }
14451
14452 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
14453 (0, 1, 2, 3). */
14454
14455 static unsigned
14456 neon_logbits (unsigned x)
14457 {
14458 return ffs (x) - 4;
14459 }
14460
14461 #define LOW4(R) ((R) & 0xf)
14462 #define HI1(R) (((R) >> 4) & 1)
14463
14464 /* Encode insns with bit pattern:
14465
14466 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
14467 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
14468
14469 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
14470 different meaning for some instruction. */
14471
14472 static void
14473 neon_three_same (int isquad, int ubit, int size)
14474 {
14475 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14476 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14477 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
14478 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
14479 inst.instruction |= LOW4 (inst.operands[2].reg);
14480 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
14481 inst.instruction |= (isquad != 0) << 6;
14482 inst.instruction |= (ubit != 0) << 24;
14483 if (size != -1)
14484 inst.instruction |= neon_logbits (size) << 20;
14485
14486 neon_dp_fixup (&inst);
14487 }
14488
14489 /* Encode instructions of the form:
14490
14491 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
14492 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
14493
14494 Don't write size if SIZE == -1. */
14495
14496 static void
14497 neon_two_same (int qbit, int ubit, int size)
14498 {
14499 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14500 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14501 inst.instruction |= LOW4 (inst.operands[1].reg);
14502 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14503 inst.instruction |= (qbit != 0) << 6;
14504 inst.instruction |= (ubit != 0) << 24;
14505
14506 if (size != -1)
14507 inst.instruction |= neon_logbits (size) << 18;
14508
14509 neon_dp_fixup (&inst);
14510 }
14511
14512 /* Neon instruction encoders, in approximate order of appearance. */
14513
14514 static void
14515 do_neon_dyadic_i_su (void)
14516 {
14517 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14518 struct neon_type_el et = neon_check_type (3, rs,
14519 N_EQK, N_EQK, N_SU_32 | N_KEY);
14520 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14521 }
14522
14523 static void
14524 do_neon_dyadic_i64_su (void)
14525 {
14526 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14527 struct neon_type_el et = neon_check_type (3, rs,
14528 N_EQK, N_EQK, N_SU_ALL | N_KEY);
14529 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14530 }
14531
14532 static void
14533 neon_imm_shift (int write_ubit, int uval, int isquad, struct neon_type_el et,
14534 unsigned immbits)
14535 {
14536 unsigned size = et.size >> 3;
14537 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14538 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14539 inst.instruction |= LOW4 (inst.operands[1].reg);
14540 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14541 inst.instruction |= (isquad != 0) << 6;
14542 inst.instruction |= immbits << 16;
14543 inst.instruction |= (size >> 3) << 7;
14544 inst.instruction |= (size & 0x7) << 19;
14545 if (write_ubit)
14546 inst.instruction |= (uval != 0) << 24;
14547
14548 neon_dp_fixup (&inst);
14549 }
14550
14551 static void
14552 do_neon_shl_imm (void)
14553 {
14554 if (!inst.operands[2].isreg)
14555 {
14556 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14557 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_KEY | N_I_ALL);
14558 int imm = inst.operands[2].imm;
14559
14560 constraint (imm < 0 || (unsigned)imm >= et.size,
14561 _("immediate out of range for shift"));
14562 NEON_ENCODE (IMMED, inst);
14563 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
14564 }
14565 else
14566 {
14567 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14568 struct neon_type_el et = neon_check_type (3, rs,
14569 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
14570 unsigned int tmp;
14571
14572 /* VSHL/VQSHL 3-register variants have syntax such as:
14573 vshl.xx Dd, Dm, Dn
14574 whereas other 3-register operations encoded by neon_three_same have
14575 syntax like:
14576 vadd.xx Dd, Dn, Dm
14577 (i.e. with Dn & Dm reversed). Swap operands[1].reg and operands[2].reg
14578 here. */
14579 tmp = inst.operands[2].reg;
14580 inst.operands[2].reg = inst.operands[1].reg;
14581 inst.operands[1].reg = tmp;
14582 NEON_ENCODE (INTEGER, inst);
14583 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14584 }
14585 }
14586
14587 static void
14588 do_neon_qshl_imm (void)
14589 {
14590 if (!inst.operands[2].isreg)
14591 {
14592 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14593 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
14594 int imm = inst.operands[2].imm;
14595
14596 constraint (imm < 0 || (unsigned)imm >= et.size,
14597 _("immediate out of range for shift"));
14598 NEON_ENCODE (IMMED, inst);
14599 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et, imm);
14600 }
14601 else
14602 {
14603 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14604 struct neon_type_el et = neon_check_type (3, rs,
14605 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
14606 unsigned int tmp;
14607
14608 /* See note in do_neon_shl_imm. */
14609 tmp = inst.operands[2].reg;
14610 inst.operands[2].reg = inst.operands[1].reg;
14611 inst.operands[1].reg = tmp;
14612 NEON_ENCODE (INTEGER, inst);
14613 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14614 }
14615 }
14616
14617 static void
14618 do_neon_rshl (void)
14619 {
14620 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14621 struct neon_type_el et = neon_check_type (3, rs,
14622 N_EQK, N_EQK, N_SU_ALL | N_KEY);
14623 unsigned int tmp;
14624
14625 tmp = inst.operands[2].reg;
14626 inst.operands[2].reg = inst.operands[1].reg;
14627 inst.operands[1].reg = tmp;
14628 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14629 }
14630
14631 static int
14632 neon_cmode_for_logic_imm (unsigned immediate, unsigned *immbits, int size)
14633 {
14634 /* Handle .I8 pseudo-instructions. */
14635 if (size == 8)
14636 {
14637 /* Unfortunately, this will make everything apart from zero out-of-range.
14638 FIXME is this the intended semantics? There doesn't seem much point in
14639 accepting .I8 if so. */
14640 immediate |= immediate << 8;
14641 size = 16;
14642 }
14643
14644 if (size >= 32)
14645 {
14646 if (immediate == (immediate & 0x000000ff))
14647 {
14648 *immbits = immediate;
14649 return 0x1;
14650 }
14651 else if (immediate == (immediate & 0x0000ff00))
14652 {
14653 *immbits = immediate >> 8;
14654 return 0x3;
14655 }
14656 else if (immediate == (immediate & 0x00ff0000))
14657 {
14658 *immbits = immediate >> 16;
14659 return 0x5;
14660 }
14661 else if (immediate == (immediate & 0xff000000))
14662 {
14663 *immbits = immediate >> 24;
14664 return 0x7;
14665 }
14666 if ((immediate & 0xffff) != (immediate >> 16))
14667 goto bad_immediate;
14668 immediate &= 0xffff;
14669 }
14670
14671 if (immediate == (immediate & 0x000000ff))
14672 {
14673 *immbits = immediate;
14674 return 0x9;
14675 }
14676 else if (immediate == (immediate & 0x0000ff00))
14677 {
14678 *immbits = immediate >> 8;
14679 return 0xb;
14680 }
14681
14682 bad_immediate:
14683 first_error (_("immediate value out of range"));
14684 return FAIL;
14685 }
14686
14687 static void
14688 do_neon_logic (void)
14689 {
14690 if (inst.operands[2].present && inst.operands[2].isreg)
14691 {
14692 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14693 neon_check_type (3, rs, N_IGNORE_TYPE);
14694 /* U bit and size field were set as part of the bitmask. */
14695 NEON_ENCODE (INTEGER, inst);
14696 neon_three_same (neon_quad (rs), 0, -1);
14697 }
14698 else
14699 {
14700 const int three_ops_form = (inst.operands[2].present
14701 && !inst.operands[2].isreg);
14702 const int immoperand = (three_ops_form ? 2 : 1);
14703 enum neon_shape rs = (three_ops_form
14704 ? neon_select_shape (NS_DDI, NS_QQI, NS_NULL)
14705 : neon_select_shape (NS_DI, NS_QI, NS_NULL));
14706 struct neon_type_el et = neon_check_type (2, rs,
14707 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
14708 enum neon_opc opcode = (enum neon_opc) inst.instruction & 0x0fffffff;
14709 unsigned immbits;
14710 int cmode;
14711
14712 if (et.type == NT_invtype)
14713 return;
14714
14715 if (three_ops_form)
14716 constraint (inst.operands[0].reg != inst.operands[1].reg,
14717 _("first and second operands shall be the same register"));
14718
14719 NEON_ENCODE (IMMED, inst);
14720
14721 immbits = inst.operands[immoperand].imm;
14722 if (et.size == 64)
14723 {
14724 /* .i64 is a pseudo-op, so the immediate must be a repeating
14725 pattern. */
14726 if (immbits != (inst.operands[immoperand].regisimm ?
14727 inst.operands[immoperand].reg : 0))
14728 {
14729 /* Set immbits to an invalid constant. */
14730 immbits = 0xdeadbeef;
14731 }
14732 }
14733
14734 switch (opcode)
14735 {
14736 case N_MNEM_vbic:
14737 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14738 break;
14739
14740 case N_MNEM_vorr:
14741 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14742 break;
14743
14744 case N_MNEM_vand:
14745 /* Pseudo-instruction for VBIC. */
14746 neon_invert_size (&immbits, 0, et.size);
14747 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14748 break;
14749
14750 case N_MNEM_vorn:
14751 /* Pseudo-instruction for VORR. */
14752 neon_invert_size (&immbits, 0, et.size);
14753 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14754 break;
14755
14756 default:
14757 abort ();
14758 }
14759
14760 if (cmode == FAIL)
14761 return;
14762
14763 inst.instruction |= neon_quad (rs) << 6;
14764 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14765 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14766 inst.instruction |= cmode << 8;
14767 neon_write_immbits (immbits);
14768
14769 neon_dp_fixup (&inst);
14770 }
14771 }
14772
14773 static void
14774 do_neon_bitfield (void)
14775 {
14776 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14777 neon_check_type (3, rs, N_IGNORE_TYPE);
14778 neon_three_same (neon_quad (rs), 0, -1);
14779 }
14780
14781 static void
14782 neon_dyadic_misc (enum neon_el_type ubit_meaning, unsigned types,
14783 unsigned destbits)
14784 {
14785 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14786 struct neon_type_el et = neon_check_type (3, rs, N_EQK | destbits, N_EQK,
14787 types | N_KEY);
14788 if (et.type == NT_float)
14789 {
14790 NEON_ENCODE (FLOAT, inst);
14791 neon_three_same (neon_quad (rs), 0, et.size == 16 ? (int) et.size : -1);
14792 }
14793 else
14794 {
14795 NEON_ENCODE (INTEGER, inst);
14796 neon_three_same (neon_quad (rs), et.type == ubit_meaning, et.size);
14797 }
14798 }
14799
14800 static void
14801 do_neon_dyadic_if_su (void)
14802 {
14803 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
14804 }
14805
14806 static void
14807 do_neon_dyadic_if_su_d (void)
14808 {
14809 /* This version only allow D registers, but that constraint is enforced during
14810 operand parsing so we don't need to do anything extra here. */
14811 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
14812 }
14813
14814 static void
14815 do_neon_dyadic_if_i_d (void)
14816 {
14817 /* The "untyped" case can't happen. Do this to stop the "U" bit being
14818 affected if we specify unsigned args. */
14819 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
14820 }
14821
14822 enum vfp_or_neon_is_neon_bits
14823 {
14824 NEON_CHECK_CC = 1,
14825 NEON_CHECK_ARCH = 2,
14826 NEON_CHECK_ARCH8 = 4
14827 };
14828
14829 /* Call this function if an instruction which may have belonged to the VFP or
14830 Neon instruction sets, but turned out to be a Neon instruction (due to the
14831 operand types involved, etc.). We have to check and/or fix-up a couple of
14832 things:
14833
14834 - Make sure the user hasn't attempted to make a Neon instruction
14835 conditional.
14836 - Alter the value in the condition code field if necessary.
14837 - Make sure that the arch supports Neon instructions.
14838
14839 Which of these operations take place depends on bits from enum
14840 vfp_or_neon_is_neon_bits.
14841
14842 WARNING: This function has side effects! If NEON_CHECK_CC is used and the
14843 current instruction's condition is COND_ALWAYS, the condition field is
14844 changed to inst.uncond_value. This is necessary because instructions shared
14845 between VFP and Neon may be conditional for the VFP variants only, and the
14846 unconditional Neon version must have, e.g., 0xF in the condition field. */
14847
14848 static int
14849 vfp_or_neon_is_neon (unsigned check)
14850 {
14851 /* Conditions are always legal in Thumb mode (IT blocks). */
14852 if (!thumb_mode && (check & NEON_CHECK_CC))
14853 {
14854 if (inst.cond != COND_ALWAYS)
14855 {
14856 first_error (_(BAD_COND));
14857 return FAIL;
14858 }
14859 if (inst.uncond_value != -1)
14860 inst.instruction |= inst.uncond_value << 28;
14861 }
14862
14863 if ((check & NEON_CHECK_ARCH)
14864 && !mark_feature_used (&fpu_neon_ext_v1))
14865 {
14866 first_error (_(BAD_FPU));
14867 return FAIL;
14868 }
14869
14870 if ((check & NEON_CHECK_ARCH8)
14871 && !mark_feature_used (&fpu_neon_ext_armv8))
14872 {
14873 first_error (_(BAD_FPU));
14874 return FAIL;
14875 }
14876
14877 return SUCCESS;
14878 }
14879
14880 static void
14881 do_neon_addsub_if_i (void)
14882 {
14883 if (try_vfp_nsyn (3, do_vfp_nsyn_add_sub) == SUCCESS)
14884 return;
14885
14886 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14887 return;
14888
14889 /* The "untyped" case can't happen. Do this to stop the "U" bit being
14890 affected if we specify unsigned args. */
14891 neon_dyadic_misc (NT_untyped, N_IF_32 | N_I64, 0);
14892 }
14893
14894 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
14895 result to be:
14896 V<op> A,B (A is operand 0, B is operand 2)
14897 to mean:
14898 V<op> A,B,A
14899 not:
14900 V<op> A,B,B
14901 so handle that case specially. */
14902
14903 static void
14904 neon_exchange_operands (void)
14905 {
14906 if (inst.operands[1].present)
14907 {
14908 void *scratch = xmalloc (sizeof (inst.operands[0]));
14909
14910 /* Swap operands[1] and operands[2]. */
14911 memcpy (scratch, &inst.operands[1], sizeof (inst.operands[0]));
14912 inst.operands[1] = inst.operands[2];
14913 memcpy (&inst.operands[2], scratch, sizeof (inst.operands[0]));
14914 free (scratch);
14915 }
14916 else
14917 {
14918 inst.operands[1] = inst.operands[2];
14919 inst.operands[2] = inst.operands[0];
14920 }
14921 }
14922
14923 static void
14924 neon_compare (unsigned regtypes, unsigned immtypes, int invert)
14925 {
14926 if (inst.operands[2].isreg)
14927 {
14928 if (invert)
14929 neon_exchange_operands ();
14930 neon_dyadic_misc (NT_unsigned, regtypes, N_SIZ);
14931 }
14932 else
14933 {
14934 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14935 struct neon_type_el et = neon_check_type (2, rs,
14936 N_EQK | N_SIZ, immtypes | N_KEY);
14937
14938 NEON_ENCODE (IMMED, inst);
14939 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14940 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14941 inst.instruction |= LOW4 (inst.operands[1].reg);
14942 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14943 inst.instruction |= neon_quad (rs) << 6;
14944 inst.instruction |= (et.type == NT_float) << 10;
14945 inst.instruction |= neon_logbits (et.size) << 18;
14946
14947 neon_dp_fixup (&inst);
14948 }
14949 }
14950
14951 static void
14952 do_neon_cmp (void)
14953 {
14954 neon_compare (N_SUF_32, N_S_32 | N_F_16_32, FALSE);
14955 }
14956
14957 static void
14958 do_neon_cmp_inv (void)
14959 {
14960 neon_compare (N_SUF_32, N_S_32 | N_F_16_32, TRUE);
14961 }
14962
14963 static void
14964 do_neon_ceq (void)
14965 {
14966 neon_compare (N_IF_32, N_IF_32, FALSE);
14967 }
14968
14969 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
14970 scalars, which are encoded in 5 bits, M : Rm.
14971 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
14972 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
14973 index in M. */
14974
14975 static unsigned
14976 neon_scalar_for_mul (unsigned scalar, unsigned elsize)
14977 {
14978 unsigned regno = NEON_SCALAR_REG (scalar);
14979 unsigned elno = NEON_SCALAR_INDEX (scalar);
14980
14981 switch (elsize)
14982 {
14983 case 16:
14984 if (regno > 7 || elno > 3)
14985 goto bad_scalar;
14986 return regno | (elno << 3);
14987
14988 case 32:
14989 if (regno > 15 || elno > 1)
14990 goto bad_scalar;
14991 return regno | (elno << 4);
14992
14993 default:
14994 bad_scalar:
14995 first_error (_("scalar out of range for multiply instruction"));
14996 }
14997
14998 return 0;
14999 }
15000
15001 /* Encode multiply / multiply-accumulate scalar instructions. */
15002
15003 static void
15004 neon_mul_mac (struct neon_type_el et, int ubit)
15005 {
15006 unsigned scalar;
15007
15008 /* Give a more helpful error message if we have an invalid type. */
15009 if (et.type == NT_invtype)
15010 return;
15011
15012 scalar = neon_scalar_for_mul (inst.operands[2].reg, et.size);
15013 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15014 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15015 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
15016 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
15017 inst.instruction |= LOW4 (scalar);
15018 inst.instruction |= HI1 (scalar) << 5;
15019 inst.instruction |= (et.type == NT_float) << 8;
15020 inst.instruction |= neon_logbits (et.size) << 20;
15021 inst.instruction |= (ubit != 0) << 24;
15022
15023 neon_dp_fixup (&inst);
15024 }
15025
15026 static void
15027 do_neon_mac_maybe_scalar (void)
15028 {
15029 if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls) == SUCCESS)
15030 return;
15031
15032 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15033 return;
15034
15035 if (inst.operands[2].isscalar)
15036 {
15037 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
15038 struct neon_type_el et = neon_check_type (3, rs,
15039 N_EQK, N_EQK, N_I16 | N_I32 | N_F_16_32 | N_KEY);
15040 NEON_ENCODE (SCALAR, inst);
15041 neon_mul_mac (et, neon_quad (rs));
15042 }
15043 else
15044 {
15045 /* The "untyped" case can't happen. Do this to stop the "U" bit being
15046 affected if we specify unsigned args. */
15047 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
15048 }
15049 }
15050
15051 static void
15052 do_neon_fmac (void)
15053 {
15054 if (try_vfp_nsyn (3, do_vfp_nsyn_fma_fms) == SUCCESS)
15055 return;
15056
15057 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15058 return;
15059
15060 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
15061 }
15062
15063 static void
15064 do_neon_tst (void)
15065 {
15066 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15067 struct neon_type_el et = neon_check_type (3, rs,
15068 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_KEY);
15069 neon_three_same (neon_quad (rs), 0, et.size);
15070 }
15071
15072 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
15073 same types as the MAC equivalents. The polynomial type for this instruction
15074 is encoded the same as the integer type. */
15075
15076 static void
15077 do_neon_mul (void)
15078 {
15079 if (try_vfp_nsyn (3, do_vfp_nsyn_mul) == SUCCESS)
15080 return;
15081
15082 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15083 return;
15084
15085 if (inst.operands[2].isscalar)
15086 do_neon_mac_maybe_scalar ();
15087 else
15088 neon_dyadic_misc (NT_poly, N_I8 | N_I16 | N_I32 | N_F16 | N_F32 | N_P8, 0);
15089 }
15090
15091 static void
15092 do_neon_qdmulh (void)
15093 {
15094 if (inst.operands[2].isscalar)
15095 {
15096 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
15097 struct neon_type_el et = neon_check_type (3, rs,
15098 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
15099 NEON_ENCODE (SCALAR, inst);
15100 neon_mul_mac (et, neon_quad (rs));
15101 }
15102 else
15103 {
15104 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15105 struct neon_type_el et = neon_check_type (3, rs,
15106 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
15107 NEON_ENCODE (INTEGER, inst);
15108 /* The U bit (rounding) comes from bit mask. */
15109 neon_three_same (neon_quad (rs), 0, et.size);
15110 }
15111 }
15112
15113 static void
15114 do_neon_qrdmlah (void)
15115 {
15116 /* Check we're on the correct architecture. */
15117 if (!mark_feature_used (&fpu_neon_ext_armv8))
15118 inst.error =
15119 _("instruction form not available on this architecture.");
15120 else if (!mark_feature_used (&fpu_neon_ext_v8_1))
15121 {
15122 as_warn (_("this instruction implies use of ARMv8.1 AdvSIMD."));
15123 record_feature_use (&fpu_neon_ext_v8_1);
15124 }
15125
15126 if (inst.operands[2].isscalar)
15127 {
15128 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
15129 struct neon_type_el et = neon_check_type (3, rs,
15130 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
15131 NEON_ENCODE (SCALAR, inst);
15132 neon_mul_mac (et, neon_quad (rs));
15133 }
15134 else
15135 {
15136 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15137 struct neon_type_el et = neon_check_type (3, rs,
15138 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
15139 NEON_ENCODE (INTEGER, inst);
15140 /* The U bit (rounding) comes from bit mask. */
15141 neon_three_same (neon_quad (rs), 0, et.size);
15142 }
15143 }
15144
15145 static void
15146 do_neon_fcmp_absolute (void)
15147 {
15148 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15149 struct neon_type_el et = neon_check_type (3, rs, N_EQK, N_EQK,
15150 N_F_16_32 | N_KEY);
15151 /* Size field comes from bit mask. */
15152 neon_three_same (neon_quad (rs), 1, et.size == 16 ? (int) et.size : -1);
15153 }
15154
15155 static void
15156 do_neon_fcmp_absolute_inv (void)
15157 {
15158 neon_exchange_operands ();
15159 do_neon_fcmp_absolute ();
15160 }
15161
15162 static void
15163 do_neon_step (void)
15164 {
15165 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15166 struct neon_type_el et = neon_check_type (3, rs, N_EQK, N_EQK,
15167 N_F_16_32 | N_KEY);
15168 neon_three_same (neon_quad (rs), 0, et.size == 16 ? (int) et.size : -1);
15169 }
15170
15171 static void
15172 do_neon_abs_neg (void)
15173 {
15174 enum neon_shape rs;
15175 struct neon_type_el et;
15176
15177 if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg) == SUCCESS)
15178 return;
15179
15180 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15181 return;
15182
15183 rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15184 et = neon_check_type (2, rs, N_EQK, N_S_32 | N_F_16_32 | N_KEY);
15185
15186 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15187 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15188 inst.instruction |= LOW4 (inst.operands[1].reg);
15189 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15190 inst.instruction |= neon_quad (rs) << 6;
15191 inst.instruction |= (et.type == NT_float) << 10;
15192 inst.instruction |= neon_logbits (et.size) << 18;
15193
15194 neon_dp_fixup (&inst);
15195 }
15196
15197 static void
15198 do_neon_sli (void)
15199 {
15200 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
15201 struct neon_type_el et = neon_check_type (2, rs,
15202 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
15203 int imm = inst.operands[2].imm;
15204 constraint (imm < 0 || (unsigned)imm >= et.size,
15205 _("immediate out of range for insert"));
15206 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
15207 }
15208
15209 static void
15210 do_neon_sri (void)
15211 {
15212 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
15213 struct neon_type_el et = neon_check_type (2, rs,
15214 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
15215 int imm = inst.operands[2].imm;
15216 constraint (imm < 1 || (unsigned)imm > et.size,
15217 _("immediate out of range for insert"));
15218 neon_imm_shift (FALSE, 0, neon_quad (rs), et, et.size - imm);
15219 }
15220
15221 static void
15222 do_neon_qshlu_imm (void)
15223 {
15224 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
15225 struct neon_type_el et = neon_check_type (2, rs,
15226 N_EQK | N_UNS, N_S8 | N_S16 | N_S32 | N_S64 | N_KEY);
15227 int imm = inst.operands[2].imm;
15228 constraint (imm < 0 || (unsigned)imm >= et.size,
15229 _("immediate out of range for shift"));
15230 /* Only encodes the 'U present' variant of the instruction.
15231 In this case, signed types have OP (bit 8) set to 0.
15232 Unsigned types have OP set to 1. */
15233 inst.instruction |= (et.type == NT_unsigned) << 8;
15234 /* The rest of the bits are the same as other immediate shifts. */
15235 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
15236 }
15237
15238 static void
15239 do_neon_qmovn (void)
15240 {
15241 struct neon_type_el et = neon_check_type (2, NS_DQ,
15242 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
15243 /* Saturating move where operands can be signed or unsigned, and the
15244 destination has the same signedness. */
15245 NEON_ENCODE (INTEGER, inst);
15246 if (et.type == NT_unsigned)
15247 inst.instruction |= 0xc0;
15248 else
15249 inst.instruction |= 0x80;
15250 neon_two_same (0, 1, et.size / 2);
15251 }
15252
15253 static void
15254 do_neon_qmovun (void)
15255 {
15256 struct neon_type_el et = neon_check_type (2, NS_DQ,
15257 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
15258 /* Saturating move with unsigned results. Operands must be signed. */
15259 NEON_ENCODE (INTEGER, inst);
15260 neon_two_same (0, 1, et.size / 2);
15261 }
15262
15263 static void
15264 do_neon_rshift_sat_narrow (void)
15265 {
15266 /* FIXME: Types for narrowing. If operands are signed, results can be signed
15267 or unsigned. If operands are unsigned, results must also be unsigned. */
15268 struct neon_type_el et = neon_check_type (2, NS_DQI,
15269 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
15270 int imm = inst.operands[2].imm;
15271 /* This gets the bounds check, size encoding and immediate bits calculation
15272 right. */
15273 et.size /= 2;
15274
15275 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
15276 VQMOVN.I<size> <Dd>, <Qm>. */
15277 if (imm == 0)
15278 {
15279 inst.operands[2].present = 0;
15280 inst.instruction = N_MNEM_vqmovn;
15281 do_neon_qmovn ();
15282 return;
15283 }
15284
15285 constraint (imm < 1 || (unsigned)imm > et.size,
15286 _("immediate out of range"));
15287 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, et.size - imm);
15288 }
15289
15290 static void
15291 do_neon_rshift_sat_narrow_u (void)
15292 {
15293 /* FIXME: Types for narrowing. If operands are signed, results can be signed
15294 or unsigned. If operands are unsigned, results must also be unsigned. */
15295 struct neon_type_el et = neon_check_type (2, NS_DQI,
15296 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
15297 int imm = inst.operands[2].imm;
15298 /* This gets the bounds check, size encoding and immediate bits calculation
15299 right. */
15300 et.size /= 2;
15301
15302 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
15303 VQMOVUN.I<size> <Dd>, <Qm>. */
15304 if (imm == 0)
15305 {
15306 inst.operands[2].present = 0;
15307 inst.instruction = N_MNEM_vqmovun;
15308 do_neon_qmovun ();
15309 return;
15310 }
15311
15312 constraint (imm < 1 || (unsigned)imm > et.size,
15313 _("immediate out of range"));
15314 /* FIXME: The manual is kind of unclear about what value U should have in
15315 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
15316 must be 1. */
15317 neon_imm_shift (TRUE, 1, 0, et, et.size - imm);
15318 }
15319
15320 static void
15321 do_neon_movn (void)
15322 {
15323 struct neon_type_el et = neon_check_type (2, NS_DQ,
15324 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
15325 NEON_ENCODE (INTEGER, inst);
15326 neon_two_same (0, 1, et.size / 2);
15327 }
15328
15329 static void
15330 do_neon_rshift_narrow (void)
15331 {
15332 struct neon_type_el et = neon_check_type (2, NS_DQI,
15333 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
15334 int imm = inst.operands[2].imm;
15335 /* This gets the bounds check, size encoding and immediate bits calculation
15336 right. */
15337 et.size /= 2;
15338
15339 /* If immediate is zero then we are a pseudo-instruction for
15340 VMOVN.I<size> <Dd>, <Qm> */
15341 if (imm == 0)
15342 {
15343 inst.operands[2].present = 0;
15344 inst.instruction = N_MNEM_vmovn;
15345 do_neon_movn ();
15346 return;
15347 }
15348
15349 constraint (imm < 1 || (unsigned)imm > et.size,
15350 _("immediate out of range for narrowing operation"));
15351 neon_imm_shift (FALSE, 0, 0, et, et.size - imm);
15352 }
15353
15354 static void
15355 do_neon_shll (void)
15356 {
15357 /* FIXME: Type checking when lengthening. */
15358 struct neon_type_el et = neon_check_type (2, NS_QDI,
15359 N_EQK | N_DBL, N_I8 | N_I16 | N_I32 | N_KEY);
15360 unsigned imm = inst.operands[2].imm;
15361
15362 if (imm == et.size)
15363 {
15364 /* Maximum shift variant. */
15365 NEON_ENCODE (INTEGER, inst);
15366 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15367 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15368 inst.instruction |= LOW4 (inst.operands[1].reg);
15369 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15370 inst.instruction |= neon_logbits (et.size) << 18;
15371
15372 neon_dp_fixup (&inst);
15373 }
15374 else
15375 {
15376 /* A more-specific type check for non-max versions. */
15377 et = neon_check_type (2, NS_QDI,
15378 N_EQK | N_DBL, N_SU_32 | N_KEY);
15379 NEON_ENCODE (IMMED, inst);
15380 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, imm);
15381 }
15382 }
15383
15384 /* Check the various types for the VCVT instruction, and return which version
15385 the current instruction is. */
15386
15387 #define CVT_FLAVOUR_VAR \
15388 CVT_VAR (s32_f32, N_S32, N_F32, whole_reg, "ftosls", "ftosis", "ftosizs") \
15389 CVT_VAR (u32_f32, N_U32, N_F32, whole_reg, "ftouls", "ftouis", "ftouizs") \
15390 CVT_VAR (f32_s32, N_F32, N_S32, whole_reg, "fsltos", "fsitos", NULL) \
15391 CVT_VAR (f32_u32, N_F32, N_U32, whole_reg, "fultos", "fuitos", NULL) \
15392 /* Half-precision conversions. */ \
15393 CVT_VAR (s16_f16, N_S16, N_F16 | N_KEY, whole_reg, NULL, NULL, NULL) \
15394 CVT_VAR (u16_f16, N_U16, N_F16 | N_KEY, whole_reg, NULL, NULL, NULL) \
15395 CVT_VAR (f16_s16, N_F16 | N_KEY, N_S16, whole_reg, NULL, NULL, NULL) \
15396 CVT_VAR (f16_u16, N_F16 | N_KEY, N_U16, whole_reg, NULL, NULL, NULL) \
15397 CVT_VAR (f32_f16, N_F32, N_F16, whole_reg, NULL, NULL, NULL) \
15398 CVT_VAR (f16_f32, N_F16, N_F32, whole_reg, NULL, NULL, NULL) \
15399 /* New VCVT instructions introduced by ARMv8.2 fp16 extension. \
15400 Compared with single/double precision variants, only the co-processor \
15401 field is different, so the encoding flow is reused here. */ \
15402 CVT_VAR (f16_s32, N_F16 | N_KEY, N_S32, N_VFP, "fsltos", "fsitos", NULL) \
15403 CVT_VAR (f16_u32, N_F16 | N_KEY, N_U32, N_VFP, "fultos", "fuitos", NULL) \
15404 CVT_VAR (u32_f16, N_U32, N_F16 | N_KEY, N_VFP, "ftouls", "ftouis", "ftouizs")\
15405 CVT_VAR (s32_f16, N_S32, N_F16 | N_KEY, N_VFP, "ftosls", "ftosis", "ftosizs")\
15406 /* VFP instructions. */ \
15407 CVT_VAR (f32_f64, N_F32, N_F64, N_VFP, NULL, "fcvtsd", NULL) \
15408 CVT_VAR (f64_f32, N_F64, N_F32, N_VFP, NULL, "fcvtds", NULL) \
15409 CVT_VAR (s32_f64, N_S32, N_F64 | key, N_VFP, "ftosld", "ftosid", "ftosizd") \
15410 CVT_VAR (u32_f64, N_U32, N_F64 | key, N_VFP, "ftould", "ftouid", "ftouizd") \
15411 CVT_VAR (f64_s32, N_F64 | key, N_S32, N_VFP, "fsltod", "fsitod", NULL) \
15412 CVT_VAR (f64_u32, N_F64 | key, N_U32, N_VFP, "fultod", "fuitod", NULL) \
15413 /* VFP instructions with bitshift. */ \
15414 CVT_VAR (f32_s16, N_F32 | key, N_S16, N_VFP, "fshtos", NULL, NULL) \
15415 CVT_VAR (f32_u16, N_F32 | key, N_U16, N_VFP, "fuhtos", NULL, NULL) \
15416 CVT_VAR (f64_s16, N_F64 | key, N_S16, N_VFP, "fshtod", NULL, NULL) \
15417 CVT_VAR (f64_u16, N_F64 | key, N_U16, N_VFP, "fuhtod", NULL, NULL) \
15418 CVT_VAR (s16_f32, N_S16, N_F32 | key, N_VFP, "ftoshs", NULL, NULL) \
15419 CVT_VAR (u16_f32, N_U16, N_F32 | key, N_VFP, "ftouhs", NULL, NULL) \
15420 CVT_VAR (s16_f64, N_S16, N_F64 | key, N_VFP, "ftoshd", NULL, NULL) \
15421 CVT_VAR (u16_f64, N_U16, N_F64 | key, N_VFP, "ftouhd", NULL, NULL)
15422
15423 #define CVT_VAR(C, X, Y, R, BSN, CN, ZN) \
15424 neon_cvt_flavour_##C,
15425
15426 /* The different types of conversions we can do. */
15427 enum neon_cvt_flavour
15428 {
15429 CVT_FLAVOUR_VAR
15430 neon_cvt_flavour_invalid,
15431 neon_cvt_flavour_first_fp = neon_cvt_flavour_f32_f64
15432 };
15433
15434 #undef CVT_VAR
15435
15436 static enum neon_cvt_flavour
15437 get_neon_cvt_flavour (enum neon_shape rs)
15438 {
15439 #define CVT_VAR(C,X,Y,R,BSN,CN,ZN) \
15440 et = neon_check_type (2, rs, (R) | (X), (R) | (Y)); \
15441 if (et.type != NT_invtype) \
15442 { \
15443 inst.error = NULL; \
15444 return (neon_cvt_flavour_##C); \
15445 }
15446
15447 struct neon_type_el et;
15448 unsigned whole_reg = (rs == NS_FFI || rs == NS_FD || rs == NS_DF
15449 || rs == NS_FF) ? N_VFP : 0;
15450 /* The instruction versions which take an immediate take one register
15451 argument, which is extended to the width of the full register. Thus the
15452 "source" and "destination" registers must have the same width. Hack that
15453 here by making the size equal to the key (wider, in this case) operand. */
15454 unsigned key = (rs == NS_QQI || rs == NS_DDI || rs == NS_FFI) ? N_KEY : 0;
15455
15456 CVT_FLAVOUR_VAR;
15457
15458 return neon_cvt_flavour_invalid;
15459 #undef CVT_VAR
15460 }
15461
15462 enum neon_cvt_mode
15463 {
15464 neon_cvt_mode_a,
15465 neon_cvt_mode_n,
15466 neon_cvt_mode_p,
15467 neon_cvt_mode_m,
15468 neon_cvt_mode_z,
15469 neon_cvt_mode_x,
15470 neon_cvt_mode_r
15471 };
15472
15473 /* Neon-syntax VFP conversions. */
15474
15475 static void
15476 do_vfp_nsyn_cvt (enum neon_shape rs, enum neon_cvt_flavour flavour)
15477 {
15478 const char *opname = 0;
15479
15480 if (rs == NS_DDI || rs == NS_QQI || rs == NS_FFI
15481 || rs == NS_FHI || rs == NS_HFI)
15482 {
15483 /* Conversions with immediate bitshift. */
15484 const char *enc[] =
15485 {
15486 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) BSN,
15487 CVT_FLAVOUR_VAR
15488 NULL
15489 #undef CVT_VAR
15490 };
15491
15492 if (flavour < (int) ARRAY_SIZE (enc))
15493 {
15494 opname = enc[flavour];
15495 constraint (inst.operands[0].reg != inst.operands[1].reg,
15496 _("operands 0 and 1 must be the same register"));
15497 inst.operands[1] = inst.operands[2];
15498 memset (&inst.operands[2], '\0', sizeof (inst.operands[2]));
15499 }
15500 }
15501 else
15502 {
15503 /* Conversions without bitshift. */
15504 const char *enc[] =
15505 {
15506 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) CN,
15507 CVT_FLAVOUR_VAR
15508 NULL
15509 #undef CVT_VAR
15510 };
15511
15512 if (flavour < (int) ARRAY_SIZE (enc))
15513 opname = enc[flavour];
15514 }
15515
15516 if (opname)
15517 do_vfp_nsyn_opcode (opname);
15518
15519 /* ARMv8.2 fp16 VCVT instruction. */
15520 if (flavour == neon_cvt_flavour_s32_f16
15521 || flavour == neon_cvt_flavour_u32_f16
15522 || flavour == neon_cvt_flavour_f16_u32
15523 || flavour == neon_cvt_flavour_f16_s32)
15524 do_scalar_fp16_v82_encode ();
15525 }
15526
15527 static void
15528 do_vfp_nsyn_cvtz (void)
15529 {
15530 enum neon_shape rs = neon_select_shape (NS_FH, NS_FF, NS_FD, NS_NULL);
15531 enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
15532 const char *enc[] =
15533 {
15534 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) ZN,
15535 CVT_FLAVOUR_VAR
15536 NULL
15537 #undef CVT_VAR
15538 };
15539
15540 if (flavour < (int) ARRAY_SIZE (enc) && enc[flavour])
15541 do_vfp_nsyn_opcode (enc[flavour]);
15542 }
15543
15544 static void
15545 do_vfp_nsyn_cvt_fpv8 (enum neon_cvt_flavour flavour,
15546 enum neon_cvt_mode mode)
15547 {
15548 int sz, op;
15549 int rm;
15550
15551 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
15552 D register operands. */
15553 if (flavour == neon_cvt_flavour_s32_f64
15554 || flavour == neon_cvt_flavour_u32_f64)
15555 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
15556 _(BAD_FPU));
15557
15558 if (flavour == neon_cvt_flavour_s32_f16
15559 || flavour == neon_cvt_flavour_u32_f16)
15560 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16),
15561 _(BAD_FP16));
15562
15563 set_it_insn_type (OUTSIDE_IT_INSN);
15564
15565 switch (flavour)
15566 {
15567 case neon_cvt_flavour_s32_f64:
15568 sz = 1;
15569 op = 1;
15570 break;
15571 case neon_cvt_flavour_s32_f32:
15572 sz = 0;
15573 op = 1;
15574 break;
15575 case neon_cvt_flavour_s32_f16:
15576 sz = 0;
15577 op = 1;
15578 break;
15579 case neon_cvt_flavour_u32_f64:
15580 sz = 1;
15581 op = 0;
15582 break;
15583 case neon_cvt_flavour_u32_f32:
15584 sz = 0;
15585 op = 0;
15586 break;
15587 case neon_cvt_flavour_u32_f16:
15588 sz = 0;
15589 op = 0;
15590 break;
15591 default:
15592 first_error (_("invalid instruction shape"));
15593 return;
15594 }
15595
15596 switch (mode)
15597 {
15598 case neon_cvt_mode_a: rm = 0; break;
15599 case neon_cvt_mode_n: rm = 1; break;
15600 case neon_cvt_mode_p: rm = 2; break;
15601 case neon_cvt_mode_m: rm = 3; break;
15602 default: first_error (_("invalid rounding mode")); return;
15603 }
15604
15605 NEON_ENCODE (FPV8, inst);
15606 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
15607 encode_arm_vfp_reg (inst.operands[1].reg, sz == 1 ? VFP_REG_Dm : VFP_REG_Sm);
15608 inst.instruction |= sz << 8;
15609
15610 /* ARMv8.2 fp16 VCVT instruction. */
15611 if (flavour == neon_cvt_flavour_s32_f16
15612 ||flavour == neon_cvt_flavour_u32_f16)
15613 do_scalar_fp16_v82_encode ();
15614 inst.instruction |= op << 7;
15615 inst.instruction |= rm << 16;
15616 inst.instruction |= 0xf0000000;
15617 inst.is_neon = TRUE;
15618 }
15619
15620 static void
15621 do_neon_cvt_1 (enum neon_cvt_mode mode)
15622 {
15623 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_FFI, NS_DD, NS_QQ,
15624 NS_FD, NS_DF, NS_FF, NS_QD, NS_DQ,
15625 NS_FH, NS_HF, NS_FHI, NS_HFI,
15626 NS_NULL);
15627 enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
15628
15629 if (flavour == neon_cvt_flavour_invalid)
15630 return;
15631
15632 /* PR11109: Handle round-to-zero for VCVT conversions. */
15633 if (mode == neon_cvt_mode_z
15634 && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_vfp_v2)
15635 && (flavour == neon_cvt_flavour_s16_f16
15636 || flavour == neon_cvt_flavour_u16_f16
15637 || flavour == neon_cvt_flavour_s32_f32
15638 || flavour == neon_cvt_flavour_u32_f32
15639 || flavour == neon_cvt_flavour_s32_f64
15640 || flavour == neon_cvt_flavour_u32_f64)
15641 && (rs == NS_FD || rs == NS_FF))
15642 {
15643 do_vfp_nsyn_cvtz ();
15644 return;
15645 }
15646
15647 /* ARMv8.2 fp16 VCVT conversions. */
15648 if (mode == neon_cvt_mode_z
15649 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16)
15650 && (flavour == neon_cvt_flavour_s32_f16
15651 || flavour == neon_cvt_flavour_u32_f16)
15652 && (rs == NS_FH))
15653 {
15654 do_vfp_nsyn_cvtz ();
15655 do_scalar_fp16_v82_encode ();
15656 return;
15657 }
15658
15659 /* VFP rather than Neon conversions. */
15660 if (flavour >= neon_cvt_flavour_first_fp)
15661 {
15662 if (mode == neon_cvt_mode_x || mode == neon_cvt_mode_z)
15663 do_vfp_nsyn_cvt (rs, flavour);
15664 else
15665 do_vfp_nsyn_cvt_fpv8 (flavour, mode);
15666
15667 return;
15668 }
15669
15670 switch (rs)
15671 {
15672 case NS_DDI:
15673 case NS_QQI:
15674 {
15675 unsigned immbits;
15676 unsigned enctab[] = {0x0000100, 0x1000100, 0x0, 0x1000000,
15677 0x0000100, 0x1000100, 0x0, 0x1000000};
15678
15679 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15680 return;
15681
15682 /* Fixed-point conversion with #0 immediate is encoded as an
15683 integer conversion. */
15684 if (inst.operands[2].present && inst.operands[2].imm == 0)
15685 goto int_encode;
15686 NEON_ENCODE (IMMED, inst);
15687 if (flavour != neon_cvt_flavour_invalid)
15688 inst.instruction |= enctab[flavour];
15689 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15690 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15691 inst.instruction |= LOW4 (inst.operands[1].reg);
15692 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15693 inst.instruction |= neon_quad (rs) << 6;
15694 inst.instruction |= 1 << 21;
15695 if (flavour < neon_cvt_flavour_s16_f16)
15696 {
15697 inst.instruction |= 1 << 21;
15698 immbits = 32 - inst.operands[2].imm;
15699 inst.instruction |= immbits << 16;
15700 }
15701 else
15702 {
15703 inst.instruction |= 3 << 20;
15704 immbits = 16 - inst.operands[2].imm;
15705 inst.instruction |= immbits << 16;
15706 inst.instruction &= ~(1 << 9);
15707 }
15708
15709 neon_dp_fixup (&inst);
15710 }
15711 break;
15712
15713 case NS_DD:
15714 case NS_QQ:
15715 if (mode != neon_cvt_mode_x && mode != neon_cvt_mode_z)
15716 {
15717 NEON_ENCODE (FLOAT, inst);
15718 set_it_insn_type (OUTSIDE_IT_INSN);
15719
15720 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
15721 return;
15722
15723 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15724 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15725 inst.instruction |= LOW4 (inst.operands[1].reg);
15726 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15727 inst.instruction |= neon_quad (rs) << 6;
15728 inst.instruction |= (flavour == neon_cvt_flavour_u16_f16
15729 || flavour == neon_cvt_flavour_u32_f32) << 7;
15730 inst.instruction |= mode << 8;
15731 if (flavour == neon_cvt_flavour_u16_f16
15732 || flavour == neon_cvt_flavour_s16_f16)
15733 /* Mask off the original size bits and reencode them. */
15734 inst.instruction = ((inst.instruction & 0xfff3ffff) | (1 << 18));
15735
15736 if (thumb_mode)
15737 inst.instruction |= 0xfc000000;
15738 else
15739 inst.instruction |= 0xf0000000;
15740 }
15741 else
15742 {
15743 int_encode:
15744 {
15745 unsigned enctab[] = { 0x100, 0x180, 0x0, 0x080,
15746 0x100, 0x180, 0x0, 0x080};
15747
15748 NEON_ENCODE (INTEGER, inst);
15749
15750 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15751 return;
15752
15753 if (flavour != neon_cvt_flavour_invalid)
15754 inst.instruction |= enctab[flavour];
15755
15756 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15757 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15758 inst.instruction |= LOW4 (inst.operands[1].reg);
15759 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15760 inst.instruction |= neon_quad (rs) << 6;
15761 if (flavour >= neon_cvt_flavour_s16_f16
15762 && flavour <= neon_cvt_flavour_f16_u16)
15763 /* Half precision. */
15764 inst.instruction |= 1 << 18;
15765 else
15766 inst.instruction |= 2 << 18;
15767
15768 neon_dp_fixup (&inst);
15769 }
15770 }
15771 break;
15772
15773 /* Half-precision conversions for Advanced SIMD -- neon. */
15774 case NS_QD:
15775 case NS_DQ:
15776
15777 if ((rs == NS_DQ)
15778 && (inst.vectype.el[0].size != 16 || inst.vectype.el[1].size != 32))
15779 {
15780 as_bad (_("operand size must match register width"));
15781 break;
15782 }
15783
15784 if ((rs == NS_QD)
15785 && ((inst.vectype.el[0].size != 32 || inst.vectype.el[1].size != 16)))
15786 {
15787 as_bad (_("operand size must match register width"));
15788 break;
15789 }
15790
15791 if (rs == NS_DQ)
15792 inst.instruction = 0x3b60600;
15793 else
15794 inst.instruction = 0x3b60700;
15795
15796 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15797 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15798 inst.instruction |= LOW4 (inst.operands[1].reg);
15799 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15800 neon_dp_fixup (&inst);
15801 break;
15802
15803 default:
15804 /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32). */
15805 if (mode == neon_cvt_mode_x || mode == neon_cvt_mode_z)
15806 do_vfp_nsyn_cvt (rs, flavour);
15807 else
15808 do_vfp_nsyn_cvt_fpv8 (flavour, mode);
15809 }
15810 }
15811
15812 static void
15813 do_neon_cvtr (void)
15814 {
15815 do_neon_cvt_1 (neon_cvt_mode_x);
15816 }
15817
15818 static void
15819 do_neon_cvt (void)
15820 {
15821 do_neon_cvt_1 (neon_cvt_mode_z);
15822 }
15823
15824 static void
15825 do_neon_cvta (void)
15826 {
15827 do_neon_cvt_1 (neon_cvt_mode_a);
15828 }
15829
15830 static void
15831 do_neon_cvtn (void)
15832 {
15833 do_neon_cvt_1 (neon_cvt_mode_n);
15834 }
15835
15836 static void
15837 do_neon_cvtp (void)
15838 {
15839 do_neon_cvt_1 (neon_cvt_mode_p);
15840 }
15841
15842 static void
15843 do_neon_cvtm (void)
15844 {
15845 do_neon_cvt_1 (neon_cvt_mode_m);
15846 }
15847
15848 static void
15849 do_neon_cvttb_2 (bfd_boolean t, bfd_boolean to, bfd_boolean is_double)
15850 {
15851 if (is_double)
15852 mark_feature_used (&fpu_vfp_ext_armv8);
15853
15854 encode_arm_vfp_reg (inst.operands[0].reg,
15855 (is_double && !to) ? VFP_REG_Dd : VFP_REG_Sd);
15856 encode_arm_vfp_reg (inst.operands[1].reg,
15857 (is_double && to) ? VFP_REG_Dm : VFP_REG_Sm);
15858 inst.instruction |= to ? 0x10000 : 0;
15859 inst.instruction |= t ? 0x80 : 0;
15860 inst.instruction |= is_double ? 0x100 : 0;
15861 do_vfp_cond_or_thumb ();
15862 }
15863
15864 static void
15865 do_neon_cvttb_1 (bfd_boolean t)
15866 {
15867 enum neon_shape rs = neon_select_shape (NS_HF, NS_HD, NS_FH, NS_FF, NS_FD,
15868 NS_DF, NS_DH, NS_NULL);
15869
15870 if (rs == NS_NULL)
15871 return;
15872 else if (neon_check_type (2, rs, N_F16, N_F32 | N_VFP).type != NT_invtype)
15873 {
15874 inst.error = NULL;
15875 do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/FALSE);
15876 }
15877 else if (neon_check_type (2, rs, N_F32 | N_VFP, N_F16).type != NT_invtype)
15878 {
15879 inst.error = NULL;
15880 do_neon_cvttb_2 (t, /*to=*/FALSE, /*is_double=*/FALSE);
15881 }
15882 else if (neon_check_type (2, rs, N_F16, N_F64 | N_VFP).type != NT_invtype)
15883 {
15884 /* The VCVTB and VCVTT instructions with D-register operands
15885 don't work for SP only targets. */
15886 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
15887 _(BAD_FPU));
15888
15889 inst.error = NULL;
15890 do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/TRUE);
15891 }
15892 else if (neon_check_type (2, rs, N_F64 | N_VFP, N_F16).type != NT_invtype)
15893 {
15894 /* The VCVTB and VCVTT instructions with D-register operands
15895 don't work for SP only targets. */
15896 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
15897 _(BAD_FPU));
15898
15899 inst.error = NULL;
15900 do_neon_cvttb_2 (t, /*to=*/FALSE, /*is_double=*/TRUE);
15901 }
15902 else
15903 return;
15904 }
15905
15906 static void
15907 do_neon_cvtb (void)
15908 {
15909 do_neon_cvttb_1 (FALSE);
15910 }
15911
15912
15913 static void
15914 do_neon_cvtt (void)
15915 {
15916 do_neon_cvttb_1 (TRUE);
15917 }
15918
15919 static void
15920 neon_move_immediate (void)
15921 {
15922 enum neon_shape rs = neon_select_shape (NS_DI, NS_QI, NS_NULL);
15923 struct neon_type_el et = neon_check_type (2, rs,
15924 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
15925 unsigned immlo, immhi = 0, immbits;
15926 int op, cmode, float_p;
15927
15928 constraint (et.type == NT_invtype,
15929 _("operand size must be specified for immediate VMOV"));
15930
15931 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
15932 op = (inst.instruction & (1 << 5)) != 0;
15933
15934 immlo = inst.operands[1].imm;
15935 if (inst.operands[1].regisimm)
15936 immhi = inst.operands[1].reg;
15937
15938 constraint (et.size < 32 && (immlo & ~((1 << et.size) - 1)) != 0,
15939 _("immediate has bits set outside the operand size"));
15940
15941 float_p = inst.operands[1].immisfloat;
15942
15943 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits, &op,
15944 et.size, et.type)) == FAIL)
15945 {
15946 /* Invert relevant bits only. */
15947 neon_invert_size (&immlo, &immhi, et.size);
15948 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
15949 with one or the other; those cases are caught by
15950 neon_cmode_for_move_imm. */
15951 op = !op;
15952 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits,
15953 &op, et.size, et.type)) == FAIL)
15954 {
15955 first_error (_("immediate out of range"));
15956 return;
15957 }
15958 }
15959
15960 inst.instruction &= ~(1 << 5);
15961 inst.instruction |= op << 5;
15962
15963 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15964 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15965 inst.instruction |= neon_quad (rs) << 6;
15966 inst.instruction |= cmode << 8;
15967
15968 neon_write_immbits (immbits);
15969 }
15970
15971 static void
15972 do_neon_mvn (void)
15973 {
15974 if (inst.operands[1].isreg)
15975 {
15976 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15977
15978 NEON_ENCODE (INTEGER, inst);
15979 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15980 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15981 inst.instruction |= LOW4 (inst.operands[1].reg);
15982 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15983 inst.instruction |= neon_quad (rs) << 6;
15984 }
15985 else
15986 {
15987 NEON_ENCODE (IMMED, inst);
15988 neon_move_immediate ();
15989 }
15990
15991 neon_dp_fixup (&inst);
15992 }
15993
15994 /* Encode instructions of form:
15995
15996 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
15997 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm | */
15998
15999 static void
16000 neon_mixed_length (struct neon_type_el et, unsigned size)
16001 {
16002 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16003 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16004 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16005 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16006 inst.instruction |= LOW4 (inst.operands[2].reg);
16007 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16008 inst.instruction |= (et.type == NT_unsigned) << 24;
16009 inst.instruction |= neon_logbits (size) << 20;
16010
16011 neon_dp_fixup (&inst);
16012 }
16013
16014 static void
16015 do_neon_dyadic_long (void)
16016 {
16017 /* FIXME: Type checking for lengthening op. */
16018 struct neon_type_el et = neon_check_type (3, NS_QDD,
16019 N_EQK | N_DBL, N_EQK, N_SU_32 | N_KEY);
16020 neon_mixed_length (et, et.size);
16021 }
16022
16023 static void
16024 do_neon_abal (void)
16025 {
16026 struct neon_type_el et = neon_check_type (3, NS_QDD,
16027 N_EQK | N_INT | N_DBL, N_EQK, N_SU_32 | N_KEY);
16028 neon_mixed_length (et, et.size);
16029 }
16030
16031 static void
16032 neon_mac_reg_scalar_long (unsigned regtypes, unsigned scalartypes)
16033 {
16034 if (inst.operands[2].isscalar)
16035 {
16036 struct neon_type_el et = neon_check_type (3, NS_QDS,
16037 N_EQK | N_DBL, N_EQK, regtypes | N_KEY);
16038 NEON_ENCODE (SCALAR, inst);
16039 neon_mul_mac (et, et.type == NT_unsigned);
16040 }
16041 else
16042 {
16043 struct neon_type_el et = neon_check_type (3, NS_QDD,
16044 N_EQK | N_DBL, N_EQK, scalartypes | N_KEY);
16045 NEON_ENCODE (INTEGER, inst);
16046 neon_mixed_length (et, et.size);
16047 }
16048 }
16049
16050 static void
16051 do_neon_mac_maybe_scalar_long (void)
16052 {
16053 neon_mac_reg_scalar_long (N_S16 | N_S32 | N_U16 | N_U32, N_SU_32);
16054 }
16055
16056 static void
16057 do_neon_dyadic_wide (void)
16058 {
16059 struct neon_type_el et = neon_check_type (3, NS_QQD,
16060 N_EQK | N_DBL, N_EQK | N_DBL, N_SU_32 | N_KEY);
16061 neon_mixed_length (et, et.size);
16062 }
16063
16064 static void
16065 do_neon_dyadic_narrow (void)
16066 {
16067 struct neon_type_el et = neon_check_type (3, NS_QDD,
16068 N_EQK | N_DBL, N_EQK, N_I16 | N_I32 | N_I64 | N_KEY);
16069 /* Operand sign is unimportant, and the U bit is part of the opcode,
16070 so force the operand type to integer. */
16071 et.type = NT_integer;
16072 neon_mixed_length (et, et.size / 2);
16073 }
16074
16075 static void
16076 do_neon_mul_sat_scalar_long (void)
16077 {
16078 neon_mac_reg_scalar_long (N_S16 | N_S32, N_S16 | N_S32);
16079 }
16080
16081 static void
16082 do_neon_vmull (void)
16083 {
16084 if (inst.operands[2].isscalar)
16085 do_neon_mac_maybe_scalar_long ();
16086 else
16087 {
16088 struct neon_type_el et = neon_check_type (3, NS_QDD,
16089 N_EQK | N_DBL, N_EQK, N_SU_32 | N_P8 | N_P64 | N_KEY);
16090
16091 if (et.type == NT_poly)
16092 NEON_ENCODE (POLY, inst);
16093 else
16094 NEON_ENCODE (INTEGER, inst);
16095
16096 /* For polynomial encoding the U bit must be zero, and the size must
16097 be 8 (encoded as 0b00) or, on ARMv8 or later 64 (encoded, non
16098 obviously, as 0b10). */
16099 if (et.size == 64)
16100 {
16101 /* Check we're on the correct architecture. */
16102 if (!mark_feature_used (&fpu_crypto_ext_armv8))
16103 inst.error =
16104 _("Instruction form not available on this architecture.");
16105
16106 et.size = 32;
16107 }
16108
16109 neon_mixed_length (et, et.size);
16110 }
16111 }
16112
16113 static void
16114 do_neon_ext (void)
16115 {
16116 enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
16117 struct neon_type_el et = neon_check_type (3, rs,
16118 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
16119 unsigned imm = (inst.operands[3].imm * et.size) / 8;
16120
16121 constraint (imm >= (unsigned) (neon_quad (rs) ? 16 : 8),
16122 _("shift out of range"));
16123 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16124 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16125 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16126 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16127 inst.instruction |= LOW4 (inst.operands[2].reg);
16128 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16129 inst.instruction |= neon_quad (rs) << 6;
16130 inst.instruction |= imm << 8;
16131
16132 neon_dp_fixup (&inst);
16133 }
16134
16135 static void
16136 do_neon_rev (void)
16137 {
16138 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16139 struct neon_type_el et = neon_check_type (2, rs,
16140 N_EQK, N_8 | N_16 | N_32 | N_KEY);
16141 unsigned op = (inst.instruction >> 7) & 3;
16142 /* N (width of reversed regions) is encoded as part of the bitmask. We
16143 extract it here to check the elements to be reversed are smaller.
16144 Otherwise we'd get a reserved instruction. */
16145 unsigned elsize = (op == 2) ? 16 : (op == 1) ? 32 : (op == 0) ? 64 : 0;
16146 gas_assert (elsize != 0);
16147 constraint (et.size >= elsize,
16148 _("elements must be smaller than reversal region"));
16149 neon_two_same (neon_quad (rs), 1, et.size);
16150 }
16151
16152 static void
16153 do_neon_dup (void)
16154 {
16155 if (inst.operands[1].isscalar)
16156 {
16157 enum neon_shape rs = neon_select_shape (NS_DS, NS_QS, NS_NULL);
16158 struct neon_type_el et = neon_check_type (2, rs,
16159 N_EQK, N_8 | N_16 | N_32 | N_KEY);
16160 unsigned sizebits = et.size >> 3;
16161 unsigned dm = NEON_SCALAR_REG (inst.operands[1].reg);
16162 int logsize = neon_logbits (et.size);
16163 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg) << logsize;
16164
16165 if (vfp_or_neon_is_neon (NEON_CHECK_CC) == FAIL)
16166 return;
16167
16168 NEON_ENCODE (SCALAR, inst);
16169 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16170 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16171 inst.instruction |= LOW4 (dm);
16172 inst.instruction |= HI1 (dm) << 5;
16173 inst.instruction |= neon_quad (rs) << 6;
16174 inst.instruction |= x << 17;
16175 inst.instruction |= sizebits << 16;
16176
16177 neon_dp_fixup (&inst);
16178 }
16179 else
16180 {
16181 enum neon_shape rs = neon_select_shape (NS_DR, NS_QR, NS_NULL);
16182 struct neon_type_el et = neon_check_type (2, rs,
16183 N_8 | N_16 | N_32 | N_KEY, N_EQK);
16184 /* Duplicate ARM register to lanes of vector. */
16185 NEON_ENCODE (ARMREG, inst);
16186 switch (et.size)
16187 {
16188 case 8: inst.instruction |= 0x400000; break;
16189 case 16: inst.instruction |= 0x000020; break;
16190 case 32: inst.instruction |= 0x000000; break;
16191 default: break;
16192 }
16193 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
16194 inst.instruction |= LOW4 (inst.operands[0].reg) << 16;
16195 inst.instruction |= HI1 (inst.operands[0].reg) << 7;
16196 inst.instruction |= neon_quad (rs) << 21;
16197 /* The encoding for this instruction is identical for the ARM and Thumb
16198 variants, except for the condition field. */
16199 do_vfp_cond_or_thumb ();
16200 }
16201 }
16202
16203 /* VMOV has particularly many variations. It can be one of:
16204 0. VMOV<c><q> <Qd>, <Qm>
16205 1. VMOV<c><q> <Dd>, <Dm>
16206 (Register operations, which are VORR with Rm = Rn.)
16207 2. VMOV<c><q>.<dt> <Qd>, #<imm>
16208 3. VMOV<c><q>.<dt> <Dd>, #<imm>
16209 (Immediate loads.)
16210 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
16211 (ARM register to scalar.)
16212 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
16213 (Two ARM registers to vector.)
16214 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
16215 (Scalar to ARM register.)
16216 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
16217 (Vector to two ARM registers.)
16218 8. VMOV.F32 <Sd>, <Sm>
16219 9. VMOV.F64 <Dd>, <Dm>
16220 (VFP register moves.)
16221 10. VMOV.F32 <Sd>, #imm
16222 11. VMOV.F64 <Dd>, #imm
16223 (VFP float immediate load.)
16224 12. VMOV <Rd>, <Sm>
16225 (VFP single to ARM reg.)
16226 13. VMOV <Sd>, <Rm>
16227 (ARM reg to VFP single.)
16228 14. VMOV <Rd>, <Re>, <Sn>, <Sm>
16229 (Two ARM regs to two VFP singles.)
16230 15. VMOV <Sd>, <Se>, <Rn>, <Rm>
16231 (Two VFP singles to two ARM regs.)
16232
16233 These cases can be disambiguated using neon_select_shape, except cases 1/9
16234 and 3/11 which depend on the operand type too.
16235
16236 All the encoded bits are hardcoded by this function.
16237
16238 Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
16239 Cases 5, 7 may be used with VFPv2 and above.
16240
16241 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
16242 can specify a type where it doesn't make sense to, and is ignored). */
16243
16244 static void
16245 do_neon_mov (void)
16246 {
16247 enum neon_shape rs = neon_select_shape (NS_RRFF, NS_FFRR, NS_DRR, NS_RRD,
16248 NS_QQ, NS_DD, NS_QI, NS_DI, NS_SR,
16249 NS_RS, NS_FF, NS_FI, NS_RF, NS_FR,
16250 NS_HR, NS_RH, NS_HI, NS_NULL);
16251 struct neon_type_el et;
16252 const char *ldconst = 0;
16253
16254 switch (rs)
16255 {
16256 case NS_DD: /* case 1/9. */
16257 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
16258 /* It is not an error here if no type is given. */
16259 inst.error = NULL;
16260 if (et.type == NT_float && et.size == 64)
16261 {
16262 do_vfp_nsyn_opcode ("fcpyd");
16263 break;
16264 }
16265 /* fall through. */
16266
16267 case NS_QQ: /* case 0/1. */
16268 {
16269 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
16270 return;
16271 /* The architecture manual I have doesn't explicitly state which
16272 value the U bit should have for register->register moves, but
16273 the equivalent VORR instruction has U = 0, so do that. */
16274 inst.instruction = 0x0200110;
16275 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16276 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16277 inst.instruction |= LOW4 (inst.operands[1].reg);
16278 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16279 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16280 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16281 inst.instruction |= neon_quad (rs) << 6;
16282
16283 neon_dp_fixup (&inst);
16284 }
16285 break;
16286
16287 case NS_DI: /* case 3/11. */
16288 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
16289 inst.error = NULL;
16290 if (et.type == NT_float && et.size == 64)
16291 {
16292 /* case 11 (fconstd). */
16293 ldconst = "fconstd";
16294 goto encode_fconstd;
16295 }
16296 /* fall through. */
16297
16298 case NS_QI: /* case 2/3. */
16299 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
16300 return;
16301 inst.instruction = 0x0800010;
16302 neon_move_immediate ();
16303 neon_dp_fixup (&inst);
16304 break;
16305
16306 case NS_SR: /* case 4. */
16307 {
16308 unsigned bcdebits = 0;
16309 int logsize;
16310 unsigned dn = NEON_SCALAR_REG (inst.operands[0].reg);
16311 unsigned x = NEON_SCALAR_INDEX (inst.operands[0].reg);
16312
16313 /* .<size> is optional here, defaulting to .32. */
16314 if (inst.vectype.elems == 0
16315 && inst.operands[0].vectype.type == NT_invtype
16316 && inst.operands[1].vectype.type == NT_invtype)
16317 {
16318 inst.vectype.el[0].type = NT_untyped;
16319 inst.vectype.el[0].size = 32;
16320 inst.vectype.elems = 1;
16321 }
16322
16323 et = neon_check_type (2, NS_NULL, N_8 | N_16 | N_32 | N_KEY, N_EQK);
16324 logsize = neon_logbits (et.size);
16325
16326 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
16327 _(BAD_FPU));
16328 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
16329 && et.size != 32, _(BAD_FPU));
16330 constraint (et.type == NT_invtype, _("bad type for scalar"));
16331 constraint (x >= 64 / et.size, _("scalar index out of range"));
16332
16333 switch (et.size)
16334 {
16335 case 8: bcdebits = 0x8; break;
16336 case 16: bcdebits = 0x1; break;
16337 case 32: bcdebits = 0x0; break;
16338 default: ;
16339 }
16340
16341 bcdebits |= x << logsize;
16342
16343 inst.instruction = 0xe000b10;
16344 do_vfp_cond_or_thumb ();
16345 inst.instruction |= LOW4 (dn) << 16;
16346 inst.instruction |= HI1 (dn) << 7;
16347 inst.instruction |= inst.operands[1].reg << 12;
16348 inst.instruction |= (bcdebits & 3) << 5;
16349 inst.instruction |= (bcdebits >> 2) << 21;
16350 }
16351 break;
16352
16353 case NS_DRR: /* case 5 (fmdrr). */
16354 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
16355 _(BAD_FPU));
16356
16357 inst.instruction = 0xc400b10;
16358 do_vfp_cond_or_thumb ();
16359 inst.instruction |= LOW4 (inst.operands[0].reg);
16360 inst.instruction |= HI1 (inst.operands[0].reg) << 5;
16361 inst.instruction |= inst.operands[1].reg << 12;
16362 inst.instruction |= inst.operands[2].reg << 16;
16363 break;
16364
16365 case NS_RS: /* case 6. */
16366 {
16367 unsigned logsize;
16368 unsigned dn = NEON_SCALAR_REG (inst.operands[1].reg);
16369 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg);
16370 unsigned abcdebits = 0;
16371
16372 /* .<dt> is optional here, defaulting to .32. */
16373 if (inst.vectype.elems == 0
16374 && inst.operands[0].vectype.type == NT_invtype
16375 && inst.operands[1].vectype.type == NT_invtype)
16376 {
16377 inst.vectype.el[0].type = NT_untyped;
16378 inst.vectype.el[0].size = 32;
16379 inst.vectype.elems = 1;
16380 }
16381
16382 et = neon_check_type (2, NS_NULL,
16383 N_EQK, N_S8 | N_S16 | N_U8 | N_U16 | N_32 | N_KEY);
16384 logsize = neon_logbits (et.size);
16385
16386 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
16387 _(BAD_FPU));
16388 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
16389 && et.size != 32, _(BAD_FPU));
16390 constraint (et.type == NT_invtype, _("bad type for scalar"));
16391 constraint (x >= 64 / et.size, _("scalar index out of range"));
16392
16393 switch (et.size)
16394 {
16395 case 8: abcdebits = (et.type == NT_signed) ? 0x08 : 0x18; break;
16396 case 16: abcdebits = (et.type == NT_signed) ? 0x01 : 0x11; break;
16397 case 32: abcdebits = 0x00; break;
16398 default: ;
16399 }
16400
16401 abcdebits |= x << logsize;
16402 inst.instruction = 0xe100b10;
16403 do_vfp_cond_or_thumb ();
16404 inst.instruction |= LOW4 (dn) << 16;
16405 inst.instruction |= HI1 (dn) << 7;
16406 inst.instruction |= inst.operands[0].reg << 12;
16407 inst.instruction |= (abcdebits & 3) << 5;
16408 inst.instruction |= (abcdebits >> 2) << 21;
16409 }
16410 break;
16411
16412 case NS_RRD: /* case 7 (fmrrd). */
16413 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
16414 _(BAD_FPU));
16415
16416 inst.instruction = 0xc500b10;
16417 do_vfp_cond_or_thumb ();
16418 inst.instruction |= inst.operands[0].reg << 12;
16419 inst.instruction |= inst.operands[1].reg << 16;
16420 inst.instruction |= LOW4 (inst.operands[2].reg);
16421 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16422 break;
16423
16424 case NS_FF: /* case 8 (fcpys). */
16425 do_vfp_nsyn_opcode ("fcpys");
16426 break;
16427
16428 case NS_HI:
16429 case NS_FI: /* case 10 (fconsts). */
16430 ldconst = "fconsts";
16431 encode_fconstd:
16432 if (is_quarter_float (inst.operands[1].imm))
16433 {
16434 inst.operands[1].imm = neon_qfloat_bits (inst.operands[1].imm);
16435 do_vfp_nsyn_opcode (ldconst);
16436
16437 /* ARMv8.2 fp16 vmov.f16 instruction. */
16438 if (rs == NS_HI)
16439 do_scalar_fp16_v82_encode ();
16440 }
16441 else
16442 first_error (_("immediate out of range"));
16443 break;
16444
16445 case NS_RH:
16446 case NS_RF: /* case 12 (fmrs). */
16447 do_vfp_nsyn_opcode ("fmrs");
16448 /* ARMv8.2 fp16 vmov.f16 instruction. */
16449 if (rs == NS_RH)
16450 do_scalar_fp16_v82_encode ();
16451 break;
16452
16453 case NS_HR:
16454 case NS_FR: /* case 13 (fmsr). */
16455 do_vfp_nsyn_opcode ("fmsr");
16456 /* ARMv8.2 fp16 vmov.f16 instruction. */
16457 if (rs == NS_HR)
16458 do_scalar_fp16_v82_encode ();
16459 break;
16460
16461 /* The encoders for the fmrrs and fmsrr instructions expect three operands
16462 (one of which is a list), but we have parsed four. Do some fiddling to
16463 make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
16464 expect. */
16465 case NS_RRFF: /* case 14 (fmrrs). */
16466 constraint (inst.operands[3].reg != inst.operands[2].reg + 1,
16467 _("VFP registers must be adjacent"));
16468 inst.operands[2].imm = 2;
16469 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
16470 do_vfp_nsyn_opcode ("fmrrs");
16471 break;
16472
16473 case NS_FFRR: /* case 15 (fmsrr). */
16474 constraint (inst.operands[1].reg != inst.operands[0].reg + 1,
16475 _("VFP registers must be adjacent"));
16476 inst.operands[1] = inst.operands[2];
16477 inst.operands[2] = inst.operands[3];
16478 inst.operands[0].imm = 2;
16479 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
16480 do_vfp_nsyn_opcode ("fmsrr");
16481 break;
16482
16483 case NS_NULL:
16484 /* neon_select_shape has determined that the instruction
16485 shape is wrong and has already set the error message. */
16486 break;
16487
16488 default:
16489 abort ();
16490 }
16491 }
16492
16493 static void
16494 do_neon_rshift_round_imm (void)
16495 {
16496 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
16497 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
16498 int imm = inst.operands[2].imm;
16499
16500 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
16501 if (imm == 0)
16502 {
16503 inst.operands[2].present = 0;
16504 do_neon_mov ();
16505 return;
16506 }
16507
16508 constraint (imm < 1 || (unsigned)imm > et.size,
16509 _("immediate out of range for shift"));
16510 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et,
16511 et.size - imm);
16512 }
16513
16514 static void
16515 do_neon_movhf (void)
16516 {
16517 enum neon_shape rs = neon_select_shape (NS_HH, NS_NULL);
16518 constraint (rs != NS_HH, _("invalid suffix"));
16519
16520 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
16521 _(BAD_FPU));
16522
16523 do_vfp_sp_monadic ();
16524
16525 inst.is_neon = 1;
16526 inst.instruction |= 0xf0000000;
16527 }
16528
16529 static void
16530 do_neon_movl (void)
16531 {
16532 struct neon_type_el et = neon_check_type (2, NS_QD,
16533 N_EQK | N_DBL, N_SU_32 | N_KEY);
16534 unsigned sizebits = et.size >> 3;
16535 inst.instruction |= sizebits << 19;
16536 neon_two_same (0, et.type == NT_unsigned, -1);
16537 }
16538
16539 static void
16540 do_neon_trn (void)
16541 {
16542 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16543 struct neon_type_el et = neon_check_type (2, rs,
16544 N_EQK, N_8 | N_16 | N_32 | N_KEY);
16545 NEON_ENCODE (INTEGER, inst);
16546 neon_two_same (neon_quad (rs), 1, et.size);
16547 }
16548
16549 static void
16550 do_neon_zip_uzp (void)
16551 {
16552 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16553 struct neon_type_el et = neon_check_type (2, rs,
16554 N_EQK, N_8 | N_16 | N_32 | N_KEY);
16555 if (rs == NS_DD && et.size == 32)
16556 {
16557 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
16558 inst.instruction = N_MNEM_vtrn;
16559 do_neon_trn ();
16560 return;
16561 }
16562 neon_two_same (neon_quad (rs), 1, et.size);
16563 }
16564
16565 static void
16566 do_neon_sat_abs_neg (void)
16567 {
16568 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16569 struct neon_type_el et = neon_check_type (2, rs,
16570 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
16571 neon_two_same (neon_quad (rs), 1, et.size);
16572 }
16573
16574 static void
16575 do_neon_pair_long (void)
16576 {
16577 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16578 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_32 | N_KEY);
16579 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
16580 inst.instruction |= (et.type == NT_unsigned) << 7;
16581 neon_two_same (neon_quad (rs), 1, et.size);
16582 }
16583
16584 static void
16585 do_neon_recip_est (void)
16586 {
16587 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16588 struct neon_type_el et = neon_check_type (2, rs,
16589 N_EQK | N_FLT, N_F_16_32 | N_U32 | N_KEY);
16590 inst.instruction |= (et.type == NT_float) << 8;
16591 neon_two_same (neon_quad (rs), 1, et.size);
16592 }
16593
16594 static void
16595 do_neon_cls (void)
16596 {
16597 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16598 struct neon_type_el et = neon_check_type (2, rs,
16599 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
16600 neon_two_same (neon_quad (rs), 1, et.size);
16601 }
16602
16603 static void
16604 do_neon_clz (void)
16605 {
16606 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16607 struct neon_type_el et = neon_check_type (2, rs,
16608 N_EQK, N_I8 | N_I16 | N_I32 | N_KEY);
16609 neon_two_same (neon_quad (rs), 1, et.size);
16610 }
16611
16612 static void
16613 do_neon_cnt (void)
16614 {
16615 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16616 struct neon_type_el et = neon_check_type (2, rs,
16617 N_EQK | N_INT, N_8 | N_KEY);
16618 neon_two_same (neon_quad (rs), 1, et.size);
16619 }
16620
16621 static void
16622 do_neon_swp (void)
16623 {
16624 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16625 neon_two_same (neon_quad (rs), 1, -1);
16626 }
16627
16628 static void
16629 do_neon_tbl_tbx (void)
16630 {
16631 unsigned listlenbits;
16632 neon_check_type (3, NS_DLD, N_EQK, N_EQK, N_8 | N_KEY);
16633
16634 if (inst.operands[1].imm < 1 || inst.operands[1].imm > 4)
16635 {
16636 first_error (_("bad list length for table lookup"));
16637 return;
16638 }
16639
16640 listlenbits = inst.operands[1].imm - 1;
16641 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16642 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16643 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16644 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16645 inst.instruction |= LOW4 (inst.operands[2].reg);
16646 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16647 inst.instruction |= listlenbits << 8;
16648
16649 neon_dp_fixup (&inst);
16650 }
16651
16652 static void
16653 do_neon_ldm_stm (void)
16654 {
16655 /* P, U and L bits are part of bitmask. */
16656 int is_dbmode = (inst.instruction & (1 << 24)) != 0;
16657 unsigned offsetbits = inst.operands[1].imm * 2;
16658
16659 if (inst.operands[1].issingle)
16660 {
16661 do_vfp_nsyn_ldm_stm (is_dbmode);
16662 return;
16663 }
16664
16665 constraint (is_dbmode && !inst.operands[0].writeback,
16666 _("writeback (!) must be used for VLDMDB and VSTMDB"));
16667
16668 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
16669 _("register list must contain at least 1 and at most 16 "
16670 "registers"));
16671
16672 inst.instruction |= inst.operands[0].reg << 16;
16673 inst.instruction |= inst.operands[0].writeback << 21;
16674 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
16675 inst.instruction |= HI1 (inst.operands[1].reg) << 22;
16676
16677 inst.instruction |= offsetbits;
16678
16679 do_vfp_cond_or_thumb ();
16680 }
16681
16682 static void
16683 do_neon_ldr_str (void)
16684 {
16685 int is_ldr = (inst.instruction & (1 << 20)) != 0;
16686
16687 /* Use of PC in vstr in ARM mode is deprecated in ARMv7.
16688 And is UNPREDICTABLE in thumb mode. */
16689 if (!is_ldr
16690 && inst.operands[1].reg == REG_PC
16691 && (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7) || thumb_mode))
16692 {
16693 if (thumb_mode)
16694 inst.error = _("Use of PC here is UNPREDICTABLE");
16695 else if (warn_on_deprecated)
16696 as_tsktsk (_("Use of PC here is deprecated"));
16697 }
16698
16699 if (inst.operands[0].issingle)
16700 {
16701 if (is_ldr)
16702 do_vfp_nsyn_opcode ("flds");
16703 else
16704 do_vfp_nsyn_opcode ("fsts");
16705
16706 /* ARMv8.2 vldr.16/vstr.16 instruction. */
16707 if (inst.vectype.el[0].size == 16)
16708 do_scalar_fp16_v82_encode ();
16709 }
16710 else
16711 {
16712 if (is_ldr)
16713 do_vfp_nsyn_opcode ("fldd");
16714 else
16715 do_vfp_nsyn_opcode ("fstd");
16716 }
16717 }
16718
16719 /* "interleave" version also handles non-interleaving register VLD1/VST1
16720 instructions. */
16721
16722 static void
16723 do_neon_ld_st_interleave (void)
16724 {
16725 struct neon_type_el et = neon_check_type (1, NS_NULL,
16726 N_8 | N_16 | N_32 | N_64);
16727 unsigned alignbits = 0;
16728 unsigned idx;
16729 /* The bits in this table go:
16730 0: register stride of one (0) or two (1)
16731 1,2: register list length, minus one (1, 2, 3, 4).
16732 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
16733 We use -1 for invalid entries. */
16734 const int typetable[] =
16735 {
16736 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
16737 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
16738 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
16739 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
16740 };
16741 int typebits;
16742
16743 if (et.type == NT_invtype)
16744 return;
16745
16746 if (inst.operands[1].immisalign)
16747 switch (inst.operands[1].imm >> 8)
16748 {
16749 case 64: alignbits = 1; break;
16750 case 128:
16751 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2
16752 && NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
16753 goto bad_alignment;
16754 alignbits = 2;
16755 break;
16756 case 256:
16757 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
16758 goto bad_alignment;
16759 alignbits = 3;
16760 break;
16761 default:
16762 bad_alignment:
16763 first_error (_("bad alignment"));
16764 return;
16765 }
16766
16767 inst.instruction |= alignbits << 4;
16768 inst.instruction |= neon_logbits (et.size) << 6;
16769
16770 /* Bits [4:6] of the immediate in a list specifier encode register stride
16771 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
16772 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
16773 up the right value for "type" in a table based on this value and the given
16774 list style, then stick it back. */
16775 idx = ((inst.operands[0].imm >> 4) & 7)
16776 | (((inst.instruction >> 8) & 3) << 3);
16777
16778 typebits = typetable[idx];
16779
16780 constraint (typebits == -1, _("bad list type for instruction"));
16781 constraint (((inst.instruction >> 8) & 3) && et.size == 64,
16782 _("bad element type for instruction"));
16783
16784 inst.instruction &= ~0xf00;
16785 inst.instruction |= typebits << 8;
16786 }
16787
16788 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
16789 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
16790 otherwise. The variable arguments are a list of pairs of legal (size, align)
16791 values, terminated with -1. */
16792
16793 static int
16794 neon_alignment_bit (int size, int align, int *do_alignment, ...)
16795 {
16796 va_list ap;
16797 int result = FAIL, thissize, thisalign;
16798
16799 if (!inst.operands[1].immisalign)
16800 {
16801 *do_alignment = 0;
16802 return SUCCESS;
16803 }
16804
16805 va_start (ap, do_alignment);
16806
16807 do
16808 {
16809 thissize = va_arg (ap, int);
16810 if (thissize == -1)
16811 break;
16812 thisalign = va_arg (ap, int);
16813
16814 if (size == thissize && align == thisalign)
16815 result = SUCCESS;
16816 }
16817 while (result != SUCCESS);
16818
16819 va_end (ap);
16820
16821 if (result == SUCCESS)
16822 *do_alignment = 1;
16823 else
16824 first_error (_("unsupported alignment for instruction"));
16825
16826 return result;
16827 }
16828
16829 static void
16830 do_neon_ld_st_lane (void)
16831 {
16832 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
16833 int align_good, do_alignment = 0;
16834 int logsize = neon_logbits (et.size);
16835 int align = inst.operands[1].imm >> 8;
16836 int n = (inst.instruction >> 8) & 3;
16837 int max_el = 64 / et.size;
16838
16839 if (et.type == NT_invtype)
16840 return;
16841
16842 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != n + 1,
16843 _("bad list length"));
16844 constraint (NEON_LANE (inst.operands[0].imm) >= max_el,
16845 _("scalar index out of range"));
16846 constraint (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2
16847 && et.size == 8,
16848 _("stride of 2 unavailable when element size is 8"));
16849
16850 switch (n)
16851 {
16852 case 0: /* VLD1 / VST1. */
16853 align_good = neon_alignment_bit (et.size, align, &do_alignment, 16, 16,
16854 32, 32, -1);
16855 if (align_good == FAIL)
16856 return;
16857 if (do_alignment)
16858 {
16859 unsigned alignbits = 0;
16860 switch (et.size)
16861 {
16862 case 16: alignbits = 0x1; break;
16863 case 32: alignbits = 0x3; break;
16864 default: ;
16865 }
16866 inst.instruction |= alignbits << 4;
16867 }
16868 break;
16869
16870 case 1: /* VLD2 / VST2. */
16871 align_good = neon_alignment_bit (et.size, align, &do_alignment, 8, 16,
16872 16, 32, 32, 64, -1);
16873 if (align_good == FAIL)
16874 return;
16875 if (do_alignment)
16876 inst.instruction |= 1 << 4;
16877 break;
16878
16879 case 2: /* VLD3 / VST3. */
16880 constraint (inst.operands[1].immisalign,
16881 _("can't use alignment with this instruction"));
16882 break;
16883
16884 case 3: /* VLD4 / VST4. */
16885 align_good = neon_alignment_bit (et.size, align, &do_alignment, 8, 32,
16886 16, 64, 32, 64, 32, 128, -1);
16887 if (align_good == FAIL)
16888 return;
16889 if (do_alignment)
16890 {
16891 unsigned alignbits = 0;
16892 switch (et.size)
16893 {
16894 case 8: alignbits = 0x1; break;
16895 case 16: alignbits = 0x1; break;
16896 case 32: alignbits = (align == 64) ? 0x1 : 0x2; break;
16897 default: ;
16898 }
16899 inst.instruction |= alignbits << 4;
16900 }
16901 break;
16902
16903 default: ;
16904 }
16905
16906 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
16907 if (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2)
16908 inst.instruction |= 1 << (4 + logsize);
16909
16910 inst.instruction |= NEON_LANE (inst.operands[0].imm) << (logsize + 5);
16911 inst.instruction |= logsize << 10;
16912 }
16913
16914 /* Encode single n-element structure to all lanes VLD<n> instructions. */
16915
16916 static void
16917 do_neon_ld_dup (void)
16918 {
16919 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
16920 int align_good, do_alignment = 0;
16921
16922 if (et.type == NT_invtype)
16923 return;
16924
16925 switch ((inst.instruction >> 8) & 3)
16926 {
16927 case 0: /* VLD1. */
16928 gas_assert (NEON_REG_STRIDE (inst.operands[0].imm) != 2);
16929 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
16930 &do_alignment, 16, 16, 32, 32, -1);
16931 if (align_good == FAIL)
16932 return;
16933 switch (NEON_REGLIST_LENGTH (inst.operands[0].imm))
16934 {
16935 case 1: break;
16936 case 2: inst.instruction |= 1 << 5; break;
16937 default: first_error (_("bad list length")); return;
16938 }
16939 inst.instruction |= neon_logbits (et.size) << 6;
16940 break;
16941
16942 case 1: /* VLD2. */
16943 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
16944 &do_alignment, 8, 16, 16, 32, 32, 64,
16945 -1);
16946 if (align_good == FAIL)
16947 return;
16948 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2,
16949 _("bad list length"));
16950 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
16951 inst.instruction |= 1 << 5;
16952 inst.instruction |= neon_logbits (et.size) << 6;
16953 break;
16954
16955 case 2: /* VLD3. */
16956 constraint (inst.operands[1].immisalign,
16957 _("can't use alignment with this instruction"));
16958 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 3,
16959 _("bad list length"));
16960 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
16961 inst.instruction |= 1 << 5;
16962 inst.instruction |= neon_logbits (et.size) << 6;
16963 break;
16964
16965 case 3: /* VLD4. */
16966 {
16967 int align = inst.operands[1].imm >> 8;
16968 align_good = neon_alignment_bit (et.size, align, &do_alignment, 8, 32,
16969 16, 64, 32, 64, 32, 128, -1);
16970 if (align_good == FAIL)
16971 return;
16972 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4,
16973 _("bad list length"));
16974 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
16975 inst.instruction |= 1 << 5;
16976 if (et.size == 32 && align == 128)
16977 inst.instruction |= 0x3 << 6;
16978 else
16979 inst.instruction |= neon_logbits (et.size) << 6;
16980 }
16981 break;
16982
16983 default: ;
16984 }
16985
16986 inst.instruction |= do_alignment << 4;
16987 }
16988
16989 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
16990 apart from bits [11:4]. */
16991
16992 static void
16993 do_neon_ldx_stx (void)
16994 {
16995 if (inst.operands[1].isreg)
16996 constraint (inst.operands[1].reg == REG_PC, BAD_PC);
16997
16998 switch (NEON_LANE (inst.operands[0].imm))
16999 {
17000 case NEON_INTERLEAVE_LANES:
17001 NEON_ENCODE (INTERLV, inst);
17002 do_neon_ld_st_interleave ();
17003 break;
17004
17005 case NEON_ALL_LANES:
17006 NEON_ENCODE (DUP, inst);
17007 if (inst.instruction == N_INV)
17008 {
17009 first_error ("only loads support such operands");
17010 break;
17011 }
17012 do_neon_ld_dup ();
17013 break;
17014
17015 default:
17016 NEON_ENCODE (LANE, inst);
17017 do_neon_ld_st_lane ();
17018 }
17019
17020 /* L bit comes from bit mask. */
17021 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17022 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17023 inst.instruction |= inst.operands[1].reg << 16;
17024
17025 if (inst.operands[1].postind)
17026 {
17027 int postreg = inst.operands[1].imm & 0xf;
17028 constraint (!inst.operands[1].immisreg,
17029 _("post-index must be a register"));
17030 constraint (postreg == 0xd || postreg == 0xf,
17031 _("bad register for post-index"));
17032 inst.instruction |= postreg;
17033 }
17034 else
17035 {
17036 constraint (inst.operands[1].immisreg, BAD_ADDR_MODE);
17037 constraint (inst.reloc.exp.X_op != O_constant
17038 || inst.reloc.exp.X_add_number != 0,
17039 BAD_ADDR_MODE);
17040
17041 if (inst.operands[1].writeback)
17042 {
17043 inst.instruction |= 0xd;
17044 }
17045 else
17046 inst.instruction |= 0xf;
17047 }
17048
17049 if (thumb_mode)
17050 inst.instruction |= 0xf9000000;
17051 else
17052 inst.instruction |= 0xf4000000;
17053 }
17054
17055 /* FP v8. */
17056 static void
17057 do_vfp_nsyn_fpv8 (enum neon_shape rs)
17058 {
17059 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
17060 D register operands. */
17061 if (neon_shape_class[rs] == SC_DOUBLE)
17062 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
17063 _(BAD_FPU));
17064
17065 NEON_ENCODE (FPV8, inst);
17066
17067 if (rs == NS_FFF || rs == NS_HHH)
17068 {
17069 do_vfp_sp_dyadic ();
17070
17071 /* ARMv8.2 fp16 instruction. */
17072 if (rs == NS_HHH)
17073 do_scalar_fp16_v82_encode ();
17074 }
17075 else
17076 do_vfp_dp_rd_rn_rm ();
17077
17078 if (rs == NS_DDD)
17079 inst.instruction |= 0x100;
17080
17081 inst.instruction |= 0xf0000000;
17082 }
17083
17084 static void
17085 do_vsel (void)
17086 {
17087 set_it_insn_type (OUTSIDE_IT_INSN);
17088
17089 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8) != SUCCESS)
17090 first_error (_("invalid instruction shape"));
17091 }
17092
17093 static void
17094 do_vmaxnm (void)
17095 {
17096 set_it_insn_type (OUTSIDE_IT_INSN);
17097
17098 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8) == SUCCESS)
17099 return;
17100
17101 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
17102 return;
17103
17104 neon_dyadic_misc (NT_untyped, N_F_16_32, 0);
17105 }
17106
17107 static void
17108 do_vrint_1 (enum neon_cvt_mode mode)
17109 {
17110 enum neon_shape rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_QQ, NS_NULL);
17111 struct neon_type_el et;
17112
17113 if (rs == NS_NULL)
17114 return;
17115
17116 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
17117 D register operands. */
17118 if (neon_shape_class[rs] == SC_DOUBLE)
17119 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
17120 _(BAD_FPU));
17121
17122 et = neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY
17123 | N_VFP);
17124 if (et.type != NT_invtype)
17125 {
17126 /* VFP encodings. */
17127 if (mode == neon_cvt_mode_a || mode == neon_cvt_mode_n
17128 || mode == neon_cvt_mode_p || mode == neon_cvt_mode_m)
17129 set_it_insn_type (OUTSIDE_IT_INSN);
17130
17131 NEON_ENCODE (FPV8, inst);
17132 if (rs == NS_FF || rs == NS_HH)
17133 do_vfp_sp_monadic ();
17134 else
17135 do_vfp_dp_rd_rm ();
17136
17137 switch (mode)
17138 {
17139 case neon_cvt_mode_r: inst.instruction |= 0x00000000; break;
17140 case neon_cvt_mode_z: inst.instruction |= 0x00000080; break;
17141 case neon_cvt_mode_x: inst.instruction |= 0x00010000; break;
17142 case neon_cvt_mode_a: inst.instruction |= 0xf0000000; break;
17143 case neon_cvt_mode_n: inst.instruction |= 0xf0010000; break;
17144 case neon_cvt_mode_p: inst.instruction |= 0xf0020000; break;
17145 case neon_cvt_mode_m: inst.instruction |= 0xf0030000; break;
17146 default: abort ();
17147 }
17148
17149 inst.instruction |= (rs == NS_DD) << 8;
17150 do_vfp_cond_or_thumb ();
17151
17152 /* ARMv8.2 fp16 vrint instruction. */
17153 if (rs == NS_HH)
17154 do_scalar_fp16_v82_encode ();
17155 }
17156 else
17157 {
17158 /* Neon encodings (or something broken...). */
17159 inst.error = NULL;
17160 et = neon_check_type (2, rs, N_EQK, N_F_16_32 | N_KEY);
17161
17162 if (et.type == NT_invtype)
17163 return;
17164
17165 set_it_insn_type (OUTSIDE_IT_INSN);
17166 NEON_ENCODE (FLOAT, inst);
17167
17168 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
17169 return;
17170
17171 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17172 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17173 inst.instruction |= LOW4 (inst.operands[1].reg);
17174 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
17175 inst.instruction |= neon_quad (rs) << 6;
17176 /* Mask off the original size bits and reencode them. */
17177 inst.instruction = ((inst.instruction & 0xfff3ffff)
17178 | neon_logbits (et.size) << 18);
17179
17180 switch (mode)
17181 {
17182 case neon_cvt_mode_z: inst.instruction |= 3 << 7; break;
17183 case neon_cvt_mode_x: inst.instruction |= 1 << 7; break;
17184 case neon_cvt_mode_a: inst.instruction |= 2 << 7; break;
17185 case neon_cvt_mode_n: inst.instruction |= 0 << 7; break;
17186 case neon_cvt_mode_p: inst.instruction |= 7 << 7; break;
17187 case neon_cvt_mode_m: inst.instruction |= 5 << 7; break;
17188 case neon_cvt_mode_r: inst.error = _("invalid rounding mode"); break;
17189 default: abort ();
17190 }
17191
17192 if (thumb_mode)
17193 inst.instruction |= 0xfc000000;
17194 else
17195 inst.instruction |= 0xf0000000;
17196 }
17197 }
17198
17199 static void
17200 do_vrintx (void)
17201 {
17202 do_vrint_1 (neon_cvt_mode_x);
17203 }
17204
17205 static void
17206 do_vrintz (void)
17207 {
17208 do_vrint_1 (neon_cvt_mode_z);
17209 }
17210
17211 static void
17212 do_vrintr (void)
17213 {
17214 do_vrint_1 (neon_cvt_mode_r);
17215 }
17216
17217 static void
17218 do_vrinta (void)
17219 {
17220 do_vrint_1 (neon_cvt_mode_a);
17221 }
17222
17223 static void
17224 do_vrintn (void)
17225 {
17226 do_vrint_1 (neon_cvt_mode_n);
17227 }
17228
17229 static void
17230 do_vrintp (void)
17231 {
17232 do_vrint_1 (neon_cvt_mode_p);
17233 }
17234
17235 static void
17236 do_vrintm (void)
17237 {
17238 do_vrint_1 (neon_cvt_mode_m);
17239 }
17240
17241 /* Crypto v1 instructions. */
17242 static void
17243 do_crypto_2op_1 (unsigned elttype, int op)
17244 {
17245 set_it_insn_type (OUTSIDE_IT_INSN);
17246
17247 if (neon_check_type (2, NS_QQ, N_EQK | N_UNT, elttype | N_UNT | N_KEY).type
17248 == NT_invtype)
17249 return;
17250
17251 inst.error = NULL;
17252
17253 NEON_ENCODE (INTEGER, inst);
17254 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17255 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17256 inst.instruction |= LOW4 (inst.operands[1].reg);
17257 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
17258 if (op != -1)
17259 inst.instruction |= op << 6;
17260
17261 if (thumb_mode)
17262 inst.instruction |= 0xfc000000;
17263 else
17264 inst.instruction |= 0xf0000000;
17265 }
17266
17267 static void
17268 do_crypto_3op_1 (int u, int op)
17269 {
17270 set_it_insn_type (OUTSIDE_IT_INSN);
17271
17272 if (neon_check_type (3, NS_QQQ, N_EQK | N_UNT, N_EQK | N_UNT,
17273 N_32 | N_UNT | N_KEY).type == NT_invtype)
17274 return;
17275
17276 inst.error = NULL;
17277
17278 NEON_ENCODE (INTEGER, inst);
17279 neon_three_same (1, u, 8 << op);
17280 }
17281
17282 static void
17283 do_aese (void)
17284 {
17285 do_crypto_2op_1 (N_8, 0);
17286 }
17287
17288 static void
17289 do_aesd (void)
17290 {
17291 do_crypto_2op_1 (N_8, 1);
17292 }
17293
17294 static void
17295 do_aesmc (void)
17296 {
17297 do_crypto_2op_1 (N_8, 2);
17298 }
17299
17300 static void
17301 do_aesimc (void)
17302 {
17303 do_crypto_2op_1 (N_8, 3);
17304 }
17305
17306 static void
17307 do_sha1c (void)
17308 {
17309 do_crypto_3op_1 (0, 0);
17310 }
17311
17312 static void
17313 do_sha1p (void)
17314 {
17315 do_crypto_3op_1 (0, 1);
17316 }
17317
17318 static void
17319 do_sha1m (void)
17320 {
17321 do_crypto_3op_1 (0, 2);
17322 }
17323
17324 static void
17325 do_sha1su0 (void)
17326 {
17327 do_crypto_3op_1 (0, 3);
17328 }
17329
17330 static void
17331 do_sha256h (void)
17332 {
17333 do_crypto_3op_1 (1, 0);
17334 }
17335
17336 static void
17337 do_sha256h2 (void)
17338 {
17339 do_crypto_3op_1 (1, 1);
17340 }
17341
17342 static void
17343 do_sha256su1 (void)
17344 {
17345 do_crypto_3op_1 (1, 2);
17346 }
17347
17348 static void
17349 do_sha1h (void)
17350 {
17351 do_crypto_2op_1 (N_32, -1);
17352 }
17353
17354 static void
17355 do_sha1su1 (void)
17356 {
17357 do_crypto_2op_1 (N_32, 0);
17358 }
17359
17360 static void
17361 do_sha256su0 (void)
17362 {
17363 do_crypto_2op_1 (N_32, 1);
17364 }
17365
17366 static void
17367 do_crc32_1 (unsigned int poly, unsigned int sz)
17368 {
17369 unsigned int Rd = inst.operands[0].reg;
17370 unsigned int Rn = inst.operands[1].reg;
17371 unsigned int Rm = inst.operands[2].reg;
17372
17373 set_it_insn_type (OUTSIDE_IT_INSN);
17374 inst.instruction |= LOW4 (Rd) << (thumb_mode ? 8 : 12);
17375 inst.instruction |= LOW4 (Rn) << 16;
17376 inst.instruction |= LOW4 (Rm);
17377 inst.instruction |= sz << (thumb_mode ? 4 : 21);
17378 inst.instruction |= poly << (thumb_mode ? 20 : 9);
17379
17380 if (Rd == REG_PC || Rn == REG_PC || Rm == REG_PC)
17381 as_warn (UNPRED_REG ("r15"));
17382 if (thumb_mode && (Rd == REG_SP || Rn == REG_SP || Rm == REG_SP))
17383 as_warn (UNPRED_REG ("r13"));
17384 }
17385
17386 static void
17387 do_crc32b (void)
17388 {
17389 do_crc32_1 (0, 0);
17390 }
17391
17392 static void
17393 do_crc32h (void)
17394 {
17395 do_crc32_1 (0, 1);
17396 }
17397
17398 static void
17399 do_crc32w (void)
17400 {
17401 do_crc32_1 (0, 2);
17402 }
17403
17404 static void
17405 do_crc32cb (void)
17406 {
17407 do_crc32_1 (1, 0);
17408 }
17409
17410 static void
17411 do_crc32ch (void)
17412 {
17413 do_crc32_1 (1, 1);
17414 }
17415
17416 static void
17417 do_crc32cw (void)
17418 {
17419 do_crc32_1 (1, 2);
17420 }
17421
17422 \f
17423 /* Overall per-instruction processing. */
17424
17425 /* We need to be able to fix up arbitrary expressions in some statements.
17426 This is so that we can handle symbols that are an arbitrary distance from
17427 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
17428 which returns part of an address in a form which will be valid for
17429 a data instruction. We do this by pushing the expression into a symbol
17430 in the expr_section, and creating a fix for that. */
17431
17432 static void
17433 fix_new_arm (fragS * frag,
17434 int where,
17435 short int size,
17436 expressionS * exp,
17437 int pc_rel,
17438 int reloc)
17439 {
17440 fixS * new_fix;
17441
17442 switch (exp->X_op)
17443 {
17444 case O_constant:
17445 if (pc_rel)
17446 {
17447 /* Create an absolute valued symbol, so we have something to
17448 refer to in the object file. Unfortunately for us, gas's
17449 generic expression parsing will already have folded out
17450 any use of .set foo/.type foo %function that may have
17451 been used to set type information of the target location,
17452 that's being specified symbolically. We have to presume
17453 the user knows what they are doing. */
17454 char name[16 + 8];
17455 symbolS *symbol;
17456
17457 sprintf (name, "*ABS*0x%lx", (unsigned long)exp->X_add_number);
17458
17459 symbol = symbol_find_or_make (name);
17460 S_SET_SEGMENT (symbol, absolute_section);
17461 symbol_set_frag (symbol, &zero_address_frag);
17462 S_SET_VALUE (symbol, exp->X_add_number);
17463 exp->X_op = O_symbol;
17464 exp->X_add_symbol = symbol;
17465 exp->X_add_number = 0;
17466 }
17467 /* FALLTHROUGH */
17468 case O_symbol:
17469 case O_add:
17470 case O_subtract:
17471 new_fix = fix_new_exp (frag, where, size, exp, pc_rel,
17472 (enum bfd_reloc_code_real) reloc);
17473 break;
17474
17475 default:
17476 new_fix = (fixS *) fix_new (frag, where, size, make_expr_symbol (exp), 0,
17477 pc_rel, (enum bfd_reloc_code_real) reloc);
17478 break;
17479 }
17480
17481 /* Mark whether the fix is to a THUMB instruction, or an ARM
17482 instruction. */
17483 new_fix->tc_fix_data = thumb_mode;
17484 }
17485
17486 /* Create a frg for an instruction requiring relaxation. */
17487 static void
17488 output_relax_insn (void)
17489 {
17490 char * to;
17491 symbolS *sym;
17492 int offset;
17493
17494 /* The size of the instruction is unknown, so tie the debug info to the
17495 start of the instruction. */
17496 dwarf2_emit_insn (0);
17497
17498 switch (inst.reloc.exp.X_op)
17499 {
17500 case O_symbol:
17501 sym = inst.reloc.exp.X_add_symbol;
17502 offset = inst.reloc.exp.X_add_number;
17503 break;
17504 case O_constant:
17505 sym = NULL;
17506 offset = inst.reloc.exp.X_add_number;
17507 break;
17508 default:
17509 sym = make_expr_symbol (&inst.reloc.exp);
17510 offset = 0;
17511 break;
17512 }
17513 to = frag_var (rs_machine_dependent, INSN_SIZE, THUMB_SIZE,
17514 inst.relax, sym, offset, NULL/*offset, opcode*/);
17515 md_number_to_chars (to, inst.instruction, THUMB_SIZE);
17516 }
17517
17518 /* Write a 32-bit thumb instruction to buf. */
17519 static void
17520 put_thumb32_insn (char * buf, unsigned long insn)
17521 {
17522 md_number_to_chars (buf, insn >> 16, THUMB_SIZE);
17523 md_number_to_chars (buf + THUMB_SIZE, insn, THUMB_SIZE);
17524 }
17525
17526 static void
17527 output_inst (const char * str)
17528 {
17529 char * to = NULL;
17530
17531 if (inst.error)
17532 {
17533 as_bad ("%s -- `%s'", inst.error, str);
17534 return;
17535 }
17536 if (inst.relax)
17537 {
17538 output_relax_insn ();
17539 return;
17540 }
17541 if (inst.size == 0)
17542 return;
17543
17544 to = frag_more (inst.size);
17545 /* PR 9814: Record the thumb mode into the current frag so that we know
17546 what type of NOP padding to use, if necessary. We override any previous
17547 setting so that if the mode has changed then the NOPS that we use will
17548 match the encoding of the last instruction in the frag. */
17549 frag_now->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
17550
17551 if (thumb_mode && (inst.size > THUMB_SIZE))
17552 {
17553 gas_assert (inst.size == (2 * THUMB_SIZE));
17554 put_thumb32_insn (to, inst.instruction);
17555 }
17556 else if (inst.size > INSN_SIZE)
17557 {
17558 gas_assert (inst.size == (2 * INSN_SIZE));
17559 md_number_to_chars (to, inst.instruction, INSN_SIZE);
17560 md_number_to_chars (to + INSN_SIZE, inst.instruction, INSN_SIZE);
17561 }
17562 else
17563 md_number_to_chars (to, inst.instruction, inst.size);
17564
17565 if (inst.reloc.type != BFD_RELOC_UNUSED)
17566 fix_new_arm (frag_now, to - frag_now->fr_literal,
17567 inst.size, & inst.reloc.exp, inst.reloc.pc_rel,
17568 inst.reloc.type);
17569
17570 dwarf2_emit_insn (inst.size);
17571 }
17572
17573 static char *
17574 output_it_inst (int cond, int mask, char * to)
17575 {
17576 unsigned long instruction = 0xbf00;
17577
17578 mask &= 0xf;
17579 instruction |= mask;
17580 instruction |= cond << 4;
17581
17582 if (to == NULL)
17583 {
17584 to = frag_more (2);
17585 #ifdef OBJ_ELF
17586 dwarf2_emit_insn (2);
17587 #endif
17588 }
17589
17590 md_number_to_chars (to, instruction, 2);
17591
17592 return to;
17593 }
17594
17595 /* Tag values used in struct asm_opcode's tag field. */
17596 enum opcode_tag
17597 {
17598 OT_unconditional, /* Instruction cannot be conditionalized.
17599 The ARM condition field is still 0xE. */
17600 OT_unconditionalF, /* Instruction cannot be conditionalized
17601 and carries 0xF in its ARM condition field. */
17602 OT_csuffix, /* Instruction takes a conditional suffix. */
17603 OT_csuffixF, /* Some forms of the instruction take a conditional
17604 suffix, others place 0xF where the condition field
17605 would be. */
17606 OT_cinfix3, /* Instruction takes a conditional infix,
17607 beginning at character index 3. (In
17608 unified mode, it becomes a suffix.) */
17609 OT_cinfix3_deprecated, /* The same as OT_cinfix3. This is used for
17610 tsts, cmps, cmns, and teqs. */
17611 OT_cinfix3_legacy, /* Legacy instruction takes a conditional infix at
17612 character index 3, even in unified mode. Used for
17613 legacy instructions where suffix and infix forms
17614 may be ambiguous. */
17615 OT_csuf_or_in3, /* Instruction takes either a conditional
17616 suffix or an infix at character index 3. */
17617 OT_odd_infix_unc, /* This is the unconditional variant of an
17618 instruction that takes a conditional infix
17619 at an unusual position. In unified mode,
17620 this variant will accept a suffix. */
17621 OT_odd_infix_0 /* Values greater than or equal to OT_odd_infix_0
17622 are the conditional variants of instructions that
17623 take conditional infixes in unusual positions.
17624 The infix appears at character index
17625 (tag - OT_odd_infix_0). These are not accepted
17626 in unified mode. */
17627 };
17628
17629 /* Subroutine of md_assemble, responsible for looking up the primary
17630 opcode from the mnemonic the user wrote. STR points to the
17631 beginning of the mnemonic.
17632
17633 This is not simply a hash table lookup, because of conditional
17634 variants. Most instructions have conditional variants, which are
17635 expressed with a _conditional affix_ to the mnemonic. If we were
17636 to encode each conditional variant as a literal string in the opcode
17637 table, it would have approximately 20,000 entries.
17638
17639 Most mnemonics take this affix as a suffix, and in unified syntax,
17640 'most' is upgraded to 'all'. However, in the divided syntax, some
17641 instructions take the affix as an infix, notably the s-variants of
17642 the arithmetic instructions. Of those instructions, all but six
17643 have the infix appear after the third character of the mnemonic.
17644
17645 Accordingly, the algorithm for looking up primary opcodes given
17646 an identifier is:
17647
17648 1. Look up the identifier in the opcode table.
17649 If we find a match, go to step U.
17650
17651 2. Look up the last two characters of the identifier in the
17652 conditions table. If we find a match, look up the first N-2
17653 characters of the identifier in the opcode table. If we
17654 find a match, go to step CE.
17655
17656 3. Look up the fourth and fifth characters of the identifier in
17657 the conditions table. If we find a match, extract those
17658 characters from the identifier, and look up the remaining
17659 characters in the opcode table. If we find a match, go
17660 to step CM.
17661
17662 4. Fail.
17663
17664 U. Examine the tag field of the opcode structure, in case this is
17665 one of the six instructions with its conditional infix in an
17666 unusual place. If it is, the tag tells us where to find the
17667 infix; look it up in the conditions table and set inst.cond
17668 accordingly. Otherwise, this is an unconditional instruction.
17669 Again set inst.cond accordingly. Return the opcode structure.
17670
17671 CE. Examine the tag field to make sure this is an instruction that
17672 should receive a conditional suffix. If it is not, fail.
17673 Otherwise, set inst.cond from the suffix we already looked up,
17674 and return the opcode structure.
17675
17676 CM. Examine the tag field to make sure this is an instruction that
17677 should receive a conditional infix after the third character.
17678 If it is not, fail. Otherwise, undo the edits to the current
17679 line of input and proceed as for case CE. */
17680
17681 static const struct asm_opcode *
17682 opcode_lookup (char **str)
17683 {
17684 char *end, *base;
17685 char *affix;
17686 const struct asm_opcode *opcode;
17687 const struct asm_cond *cond;
17688 char save[2];
17689
17690 /* Scan up to the end of the mnemonic, which must end in white space,
17691 '.' (in unified mode, or for Neon/VFP instructions), or end of string. */
17692 for (base = end = *str; *end != '\0'; end++)
17693 if (*end == ' ' || *end == '.')
17694 break;
17695
17696 if (end == base)
17697 return NULL;
17698
17699 /* Handle a possible width suffix and/or Neon type suffix. */
17700 if (end[0] == '.')
17701 {
17702 int offset = 2;
17703
17704 /* The .w and .n suffixes are only valid if the unified syntax is in
17705 use. */
17706 if (unified_syntax && end[1] == 'w')
17707 inst.size_req = 4;
17708 else if (unified_syntax && end[1] == 'n')
17709 inst.size_req = 2;
17710 else
17711 offset = 0;
17712
17713 inst.vectype.elems = 0;
17714
17715 *str = end + offset;
17716
17717 if (end[offset] == '.')
17718 {
17719 /* See if we have a Neon type suffix (possible in either unified or
17720 non-unified ARM syntax mode). */
17721 if (parse_neon_type (&inst.vectype, str) == FAIL)
17722 return NULL;
17723 }
17724 else if (end[offset] != '\0' && end[offset] != ' ')
17725 return NULL;
17726 }
17727 else
17728 *str = end;
17729
17730 /* Look for unaffixed or special-case affixed mnemonic. */
17731 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
17732 end - base);
17733 if (opcode)
17734 {
17735 /* step U */
17736 if (opcode->tag < OT_odd_infix_0)
17737 {
17738 inst.cond = COND_ALWAYS;
17739 return opcode;
17740 }
17741
17742 if (warn_on_deprecated && unified_syntax)
17743 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
17744 affix = base + (opcode->tag - OT_odd_infix_0);
17745 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
17746 gas_assert (cond);
17747
17748 inst.cond = cond->value;
17749 return opcode;
17750 }
17751
17752 /* Cannot have a conditional suffix on a mnemonic of less than two
17753 characters. */
17754 if (end - base < 3)
17755 return NULL;
17756
17757 /* Look for suffixed mnemonic. */
17758 affix = end - 2;
17759 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
17760 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
17761 affix - base);
17762 if (opcode && cond)
17763 {
17764 /* step CE */
17765 switch (opcode->tag)
17766 {
17767 case OT_cinfix3_legacy:
17768 /* Ignore conditional suffixes matched on infix only mnemonics. */
17769 break;
17770
17771 case OT_cinfix3:
17772 case OT_cinfix3_deprecated:
17773 case OT_odd_infix_unc:
17774 if (!unified_syntax)
17775 return 0;
17776 /* else fall through */
17777
17778 case OT_csuffix:
17779 case OT_csuffixF:
17780 case OT_csuf_or_in3:
17781 inst.cond = cond->value;
17782 return opcode;
17783
17784 case OT_unconditional:
17785 case OT_unconditionalF:
17786 if (thumb_mode)
17787 inst.cond = cond->value;
17788 else
17789 {
17790 /* Delayed diagnostic. */
17791 inst.error = BAD_COND;
17792 inst.cond = COND_ALWAYS;
17793 }
17794 return opcode;
17795
17796 default:
17797 return NULL;
17798 }
17799 }
17800
17801 /* Cannot have a usual-position infix on a mnemonic of less than
17802 six characters (five would be a suffix). */
17803 if (end - base < 6)
17804 return NULL;
17805
17806 /* Look for infixed mnemonic in the usual position. */
17807 affix = base + 3;
17808 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
17809 if (!cond)
17810 return NULL;
17811
17812 memcpy (save, affix, 2);
17813 memmove (affix, affix + 2, (end - affix) - 2);
17814 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
17815 (end - base) - 2);
17816 memmove (affix + 2, affix, (end - affix) - 2);
17817 memcpy (affix, save, 2);
17818
17819 if (opcode
17820 && (opcode->tag == OT_cinfix3
17821 || opcode->tag == OT_cinfix3_deprecated
17822 || opcode->tag == OT_csuf_or_in3
17823 || opcode->tag == OT_cinfix3_legacy))
17824 {
17825 /* Step CM. */
17826 if (warn_on_deprecated && unified_syntax
17827 && (opcode->tag == OT_cinfix3
17828 || opcode->tag == OT_cinfix3_deprecated))
17829 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
17830
17831 inst.cond = cond->value;
17832 return opcode;
17833 }
17834
17835 return NULL;
17836 }
17837
17838 /* This function generates an initial IT instruction, leaving its block
17839 virtually open for the new instructions. Eventually,
17840 the mask will be updated by now_it_add_mask () each time
17841 a new instruction needs to be included in the IT block.
17842 Finally, the block is closed with close_automatic_it_block ().
17843 The block closure can be requested either from md_assemble (),
17844 a tencode (), or due to a label hook. */
17845
17846 static void
17847 new_automatic_it_block (int cond)
17848 {
17849 now_it.state = AUTOMATIC_IT_BLOCK;
17850 now_it.mask = 0x18;
17851 now_it.cc = cond;
17852 now_it.block_length = 1;
17853 mapping_state (MAP_THUMB);
17854 now_it.insn = output_it_inst (cond, now_it.mask, NULL);
17855 now_it.warn_deprecated = FALSE;
17856 now_it.insn_cond = TRUE;
17857 }
17858
17859 /* Close an automatic IT block.
17860 See comments in new_automatic_it_block (). */
17861
17862 static void
17863 close_automatic_it_block (void)
17864 {
17865 now_it.mask = 0x10;
17866 now_it.block_length = 0;
17867 }
17868
17869 /* Update the mask of the current automatically-generated IT
17870 instruction. See comments in new_automatic_it_block (). */
17871
17872 static void
17873 now_it_add_mask (int cond)
17874 {
17875 #define CLEAR_BIT(value, nbit) ((value) & ~(1 << (nbit)))
17876 #define SET_BIT_VALUE(value, bitvalue, nbit) (CLEAR_BIT (value, nbit) \
17877 | ((bitvalue) << (nbit)))
17878 const int resulting_bit = (cond & 1);
17879
17880 now_it.mask &= 0xf;
17881 now_it.mask = SET_BIT_VALUE (now_it.mask,
17882 resulting_bit,
17883 (5 - now_it.block_length));
17884 now_it.mask = SET_BIT_VALUE (now_it.mask,
17885 1,
17886 ((5 - now_it.block_length) - 1) );
17887 output_it_inst (now_it.cc, now_it.mask, now_it.insn);
17888
17889 #undef CLEAR_BIT
17890 #undef SET_BIT_VALUE
17891 }
17892
17893 /* The IT blocks handling machinery is accessed through the these functions:
17894 it_fsm_pre_encode () from md_assemble ()
17895 set_it_insn_type () optional, from the tencode functions
17896 set_it_insn_type_last () ditto
17897 in_it_block () ditto
17898 it_fsm_post_encode () from md_assemble ()
17899 force_automatic_it_block_close () from label habdling functions
17900
17901 Rationale:
17902 1) md_assemble () calls it_fsm_pre_encode () before calling tencode (),
17903 initializing the IT insn type with a generic initial value depending
17904 on the inst.condition.
17905 2) During the tencode function, two things may happen:
17906 a) The tencode function overrides the IT insn type by
17907 calling either set_it_insn_type (type) or set_it_insn_type_last ().
17908 b) The tencode function queries the IT block state by
17909 calling in_it_block () (i.e. to determine narrow/not narrow mode).
17910
17911 Both set_it_insn_type and in_it_block run the internal FSM state
17912 handling function (handle_it_state), because: a) setting the IT insn
17913 type may incur in an invalid state (exiting the function),
17914 and b) querying the state requires the FSM to be updated.
17915 Specifically we want to avoid creating an IT block for conditional
17916 branches, so it_fsm_pre_encode is actually a guess and we can't
17917 determine whether an IT block is required until the tencode () routine
17918 has decided what type of instruction this actually it.
17919 Because of this, if set_it_insn_type and in_it_block have to be used,
17920 set_it_insn_type has to be called first.
17921
17922 set_it_insn_type_last () is a wrapper of set_it_insn_type (type), that
17923 determines the insn IT type depending on the inst.cond code.
17924 When a tencode () routine encodes an instruction that can be
17925 either outside an IT block, or, in the case of being inside, has to be
17926 the last one, set_it_insn_type_last () will determine the proper
17927 IT instruction type based on the inst.cond code. Otherwise,
17928 set_it_insn_type can be called for overriding that logic or
17929 for covering other cases.
17930
17931 Calling handle_it_state () may not transition the IT block state to
17932 OUTSIDE_IT_BLOCK immediatelly, since the (current) state could be
17933 still queried. Instead, if the FSM determines that the state should
17934 be transitioned to OUTSIDE_IT_BLOCK, a flag is marked to be closed
17935 after the tencode () function: that's what it_fsm_post_encode () does.
17936
17937 Since in_it_block () calls the state handling function to get an
17938 updated state, an error may occur (due to invalid insns combination).
17939 In that case, inst.error is set.
17940 Therefore, inst.error has to be checked after the execution of
17941 the tencode () routine.
17942
17943 3) Back in md_assemble(), it_fsm_post_encode () is called to commit
17944 any pending state change (if any) that didn't take place in
17945 handle_it_state () as explained above. */
17946
17947 static void
17948 it_fsm_pre_encode (void)
17949 {
17950 if (inst.cond != COND_ALWAYS)
17951 inst.it_insn_type = INSIDE_IT_INSN;
17952 else
17953 inst.it_insn_type = OUTSIDE_IT_INSN;
17954
17955 now_it.state_handled = 0;
17956 }
17957
17958 /* IT state FSM handling function. */
17959
17960 static int
17961 handle_it_state (void)
17962 {
17963 now_it.state_handled = 1;
17964 now_it.insn_cond = FALSE;
17965
17966 switch (now_it.state)
17967 {
17968 case OUTSIDE_IT_BLOCK:
17969 switch (inst.it_insn_type)
17970 {
17971 case OUTSIDE_IT_INSN:
17972 break;
17973
17974 case INSIDE_IT_INSN:
17975 case INSIDE_IT_LAST_INSN:
17976 if (thumb_mode == 0)
17977 {
17978 if (unified_syntax
17979 && !(implicit_it_mode & IMPLICIT_IT_MODE_ARM))
17980 as_tsktsk (_("Warning: conditional outside an IT block"\
17981 " for Thumb."));
17982 }
17983 else
17984 {
17985 if ((implicit_it_mode & IMPLICIT_IT_MODE_THUMB)
17986 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2))
17987 {
17988 /* Automatically generate the IT instruction. */
17989 new_automatic_it_block (inst.cond);
17990 if (inst.it_insn_type == INSIDE_IT_LAST_INSN)
17991 close_automatic_it_block ();
17992 }
17993 else
17994 {
17995 inst.error = BAD_OUT_IT;
17996 return FAIL;
17997 }
17998 }
17999 break;
18000
18001 case IF_INSIDE_IT_LAST_INSN:
18002 case NEUTRAL_IT_INSN:
18003 break;
18004
18005 case IT_INSN:
18006 now_it.state = MANUAL_IT_BLOCK;
18007 now_it.block_length = 0;
18008 break;
18009 }
18010 break;
18011
18012 case AUTOMATIC_IT_BLOCK:
18013 /* Three things may happen now:
18014 a) We should increment current it block size;
18015 b) We should close current it block (closing insn or 4 insns);
18016 c) We should close current it block and start a new one (due
18017 to incompatible conditions or
18018 4 insns-length block reached). */
18019
18020 switch (inst.it_insn_type)
18021 {
18022 case OUTSIDE_IT_INSN:
18023 /* The closure of the block shall happen immediatelly,
18024 so any in_it_block () call reports the block as closed. */
18025 force_automatic_it_block_close ();
18026 break;
18027
18028 case INSIDE_IT_INSN:
18029 case INSIDE_IT_LAST_INSN:
18030 case IF_INSIDE_IT_LAST_INSN:
18031 now_it.block_length++;
18032
18033 if (now_it.block_length > 4
18034 || !now_it_compatible (inst.cond))
18035 {
18036 force_automatic_it_block_close ();
18037 if (inst.it_insn_type != IF_INSIDE_IT_LAST_INSN)
18038 new_automatic_it_block (inst.cond);
18039 }
18040 else
18041 {
18042 now_it.insn_cond = TRUE;
18043 now_it_add_mask (inst.cond);
18044 }
18045
18046 if (now_it.state == AUTOMATIC_IT_BLOCK
18047 && (inst.it_insn_type == INSIDE_IT_LAST_INSN
18048 || inst.it_insn_type == IF_INSIDE_IT_LAST_INSN))
18049 close_automatic_it_block ();
18050 break;
18051
18052 case NEUTRAL_IT_INSN:
18053 now_it.block_length++;
18054 now_it.insn_cond = TRUE;
18055
18056 if (now_it.block_length > 4)
18057 force_automatic_it_block_close ();
18058 else
18059 now_it_add_mask (now_it.cc & 1);
18060 break;
18061
18062 case IT_INSN:
18063 close_automatic_it_block ();
18064 now_it.state = MANUAL_IT_BLOCK;
18065 break;
18066 }
18067 break;
18068
18069 case MANUAL_IT_BLOCK:
18070 {
18071 /* Check conditional suffixes. */
18072 const int cond = now_it.cc ^ ((now_it.mask >> 4) & 1) ^ 1;
18073 int is_last;
18074 now_it.mask <<= 1;
18075 now_it.mask &= 0x1f;
18076 is_last = (now_it.mask == 0x10);
18077 now_it.insn_cond = TRUE;
18078
18079 switch (inst.it_insn_type)
18080 {
18081 case OUTSIDE_IT_INSN:
18082 inst.error = BAD_NOT_IT;
18083 return FAIL;
18084
18085 case INSIDE_IT_INSN:
18086 if (cond != inst.cond)
18087 {
18088 inst.error = BAD_IT_COND;
18089 return FAIL;
18090 }
18091 break;
18092
18093 case INSIDE_IT_LAST_INSN:
18094 case IF_INSIDE_IT_LAST_INSN:
18095 if (cond != inst.cond)
18096 {
18097 inst.error = BAD_IT_COND;
18098 return FAIL;
18099 }
18100 if (!is_last)
18101 {
18102 inst.error = BAD_BRANCH;
18103 return FAIL;
18104 }
18105 break;
18106
18107 case NEUTRAL_IT_INSN:
18108 /* The BKPT instruction is unconditional even in an IT block. */
18109 break;
18110
18111 case IT_INSN:
18112 inst.error = BAD_IT_IT;
18113 return FAIL;
18114 }
18115 }
18116 break;
18117 }
18118
18119 return SUCCESS;
18120 }
18121
18122 struct depr_insn_mask
18123 {
18124 unsigned long pattern;
18125 unsigned long mask;
18126 const char* description;
18127 };
18128
18129 /* List of 16-bit instruction patterns deprecated in an IT block in
18130 ARMv8. */
18131 static const struct depr_insn_mask depr_it_insns[] = {
18132 { 0xc000, 0xc000, N_("Short branches, Undefined, SVC, LDM/STM") },
18133 { 0xb000, 0xb000, N_("Miscellaneous 16-bit instructions") },
18134 { 0xa000, 0xb800, N_("ADR") },
18135 { 0x4800, 0xf800, N_("Literal loads") },
18136 { 0x4478, 0xf478, N_("Hi-register ADD, MOV, CMP, BX, BLX using pc") },
18137 { 0x4487, 0xfc87, N_("Hi-register ADD, MOV, CMP using pc") },
18138 /* NOTE: 0x00dd is not the real encoding, instead, it is the 'tvalue'
18139 field in asm_opcode. 'tvalue' is used at the stage this check happen. */
18140 { 0x00dd, 0x7fff, N_("ADD/SUB sp, sp #imm") },
18141 { 0, 0, NULL }
18142 };
18143
18144 static void
18145 it_fsm_post_encode (void)
18146 {
18147 int is_last;
18148
18149 if (!now_it.state_handled)
18150 handle_it_state ();
18151
18152 if (now_it.insn_cond
18153 && !now_it.warn_deprecated
18154 && warn_on_deprecated
18155 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
18156 {
18157 if (inst.instruction >= 0x10000)
18158 {
18159 as_tsktsk (_("IT blocks containing 32-bit Thumb instructions are "
18160 "deprecated in ARMv8"));
18161 now_it.warn_deprecated = TRUE;
18162 }
18163 else
18164 {
18165 const struct depr_insn_mask *p = depr_it_insns;
18166
18167 while (p->mask != 0)
18168 {
18169 if ((inst.instruction & p->mask) == p->pattern)
18170 {
18171 as_tsktsk (_("IT blocks containing 16-bit Thumb instructions "
18172 "of the following class are deprecated in ARMv8: "
18173 "%s"), p->description);
18174 now_it.warn_deprecated = TRUE;
18175 break;
18176 }
18177
18178 ++p;
18179 }
18180 }
18181
18182 if (now_it.block_length > 1)
18183 {
18184 as_tsktsk (_("IT blocks containing more than one conditional "
18185 "instruction are deprecated in ARMv8"));
18186 now_it.warn_deprecated = TRUE;
18187 }
18188 }
18189
18190 is_last = (now_it.mask == 0x10);
18191 if (is_last)
18192 {
18193 now_it.state = OUTSIDE_IT_BLOCK;
18194 now_it.mask = 0;
18195 }
18196 }
18197
18198 static void
18199 force_automatic_it_block_close (void)
18200 {
18201 if (now_it.state == AUTOMATIC_IT_BLOCK)
18202 {
18203 close_automatic_it_block ();
18204 now_it.state = OUTSIDE_IT_BLOCK;
18205 now_it.mask = 0;
18206 }
18207 }
18208
18209 static int
18210 in_it_block (void)
18211 {
18212 if (!now_it.state_handled)
18213 handle_it_state ();
18214
18215 return now_it.state != OUTSIDE_IT_BLOCK;
18216 }
18217
18218 /* Whether OPCODE only has T32 encoding. Since this function is only used by
18219 t32_insn_ok, OPCODE enabled by v6t2 extension bit do not need to be listed
18220 here, hence the "known" in the function name. */
18221
18222 static bfd_boolean
18223 known_t32_only_insn (const struct asm_opcode *opcode)
18224 {
18225 /* Original Thumb-1 wide instruction. */
18226 if (opcode->tencode == do_t_blx
18227 || opcode->tencode == do_t_branch23
18228 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_msr)
18229 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_barrier))
18230 return TRUE;
18231
18232 /* Wide-only instruction added to ARMv8-M Baseline. */
18233 if (ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_v8m_m_only)
18234 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_atomics)
18235 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_v6t2_v8m)
18236 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_div))
18237 return TRUE;
18238
18239 return FALSE;
18240 }
18241
18242 /* Whether wide instruction variant can be used if available for a valid OPCODE
18243 in ARCH. */
18244
18245 static bfd_boolean
18246 t32_insn_ok (arm_feature_set arch, const struct asm_opcode *opcode)
18247 {
18248 if (known_t32_only_insn (opcode))
18249 return TRUE;
18250
18251 /* Instruction with narrow and wide encoding added to ARMv8-M. Availability
18252 of variant T3 of B.W is checked in do_t_branch. */
18253 if (ARM_CPU_HAS_FEATURE (arch, arm_ext_v8m)
18254 && opcode->tencode == do_t_branch)
18255 return TRUE;
18256
18257 /* Wide instruction variants of all instructions with narrow *and* wide
18258 variants become available with ARMv6t2. Other opcodes are either
18259 narrow-only or wide-only and are thus available if OPCODE is valid. */
18260 if (ARM_CPU_HAS_FEATURE (arch, arm_ext_v6t2))
18261 return TRUE;
18262
18263 /* OPCODE with narrow only instruction variant or wide variant not
18264 available. */
18265 return FALSE;
18266 }
18267
18268 void
18269 md_assemble (char *str)
18270 {
18271 char *p = str;
18272 const struct asm_opcode * opcode;
18273
18274 /* Align the previous label if needed. */
18275 if (last_label_seen != NULL)
18276 {
18277 symbol_set_frag (last_label_seen, frag_now);
18278 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
18279 S_SET_SEGMENT (last_label_seen, now_seg);
18280 }
18281
18282 memset (&inst, '\0', sizeof (inst));
18283 inst.reloc.type = BFD_RELOC_UNUSED;
18284
18285 opcode = opcode_lookup (&p);
18286 if (!opcode)
18287 {
18288 /* It wasn't an instruction, but it might be a register alias of
18289 the form alias .req reg, or a Neon .dn/.qn directive. */
18290 if (! create_register_alias (str, p)
18291 && ! create_neon_reg_alias (str, p))
18292 as_bad (_("bad instruction `%s'"), str);
18293
18294 return;
18295 }
18296
18297 if (warn_on_deprecated && opcode->tag == OT_cinfix3_deprecated)
18298 as_tsktsk (_("s suffix on comparison instruction is deprecated"));
18299
18300 /* The value which unconditional instructions should have in place of the
18301 condition field. */
18302 inst.uncond_value = (opcode->tag == OT_csuffixF) ? 0xf : -1;
18303
18304 if (thumb_mode)
18305 {
18306 arm_feature_set variant;
18307
18308 variant = cpu_variant;
18309 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
18310 if (!ARM_CPU_HAS_FEATURE (variant, arm_arch_t2))
18311 ARM_CLEAR_FEATURE (variant, variant, fpu_any_hard);
18312 /* Check that this instruction is supported for this CPU. */
18313 if (!opcode->tvariant
18314 || (thumb_mode == 1
18315 && !ARM_CPU_HAS_FEATURE (variant, *opcode->tvariant)))
18316 {
18317 as_bad (_("selected processor does not support `%s' in Thumb mode"), str);
18318 return;
18319 }
18320 if (inst.cond != COND_ALWAYS && !unified_syntax
18321 && opcode->tencode != do_t_branch)
18322 {
18323 as_bad (_("Thumb does not support conditional execution"));
18324 return;
18325 }
18326
18327 /* Two things are addressed here:
18328 1) Implicit require narrow instructions on Thumb-1.
18329 This avoids relaxation accidentally introducing Thumb-2
18330 instructions.
18331 2) Reject wide instructions in non Thumb-2 cores.
18332
18333 Only instructions with narrow and wide variants need to be handled
18334 but selecting all non wide-only instructions is easier. */
18335 if (!ARM_CPU_HAS_FEATURE (variant, arm_ext_v6t2)
18336 && !t32_insn_ok (variant, opcode))
18337 {
18338 if (inst.size_req == 0)
18339 inst.size_req = 2;
18340 else if (inst.size_req == 4)
18341 {
18342 if (ARM_CPU_HAS_FEATURE (variant, arm_ext_v8m))
18343 as_bad (_("selected processor does not support 32bit wide "
18344 "variant of instruction `%s'"), str);
18345 else
18346 as_bad (_("selected processor does not support `%s' in "
18347 "Thumb-2 mode"), str);
18348 return;
18349 }
18350 }
18351
18352 inst.instruction = opcode->tvalue;
18353
18354 if (!parse_operands (p, opcode->operands, /*thumb=*/TRUE))
18355 {
18356 /* Prepare the it_insn_type for those encodings that don't set
18357 it. */
18358 it_fsm_pre_encode ();
18359
18360 opcode->tencode ();
18361
18362 it_fsm_post_encode ();
18363 }
18364
18365 if (!(inst.error || inst.relax))
18366 {
18367 gas_assert (inst.instruction < 0xe800 || inst.instruction > 0xffff);
18368 inst.size = (inst.instruction > 0xffff ? 4 : 2);
18369 if (inst.size_req && inst.size_req != inst.size)
18370 {
18371 as_bad (_("cannot honor width suffix -- `%s'"), str);
18372 return;
18373 }
18374 }
18375
18376 /* Something has gone badly wrong if we try to relax a fixed size
18377 instruction. */
18378 gas_assert (inst.size_req == 0 || !inst.relax);
18379
18380 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
18381 *opcode->tvariant);
18382 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
18383 set those bits when Thumb-2 32-bit instructions are seen. The impact
18384 of relaxable instructions will be considered later after we finish all
18385 relaxation. */
18386 if (ARM_FEATURE_CORE_EQUAL (cpu_variant, arm_arch_any))
18387 variant = arm_arch_none;
18388 else
18389 variant = cpu_variant;
18390 if (inst.size == 4 && !t32_insn_ok (variant, opcode))
18391 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
18392 arm_ext_v6t2);
18393
18394 check_neon_suffixes;
18395
18396 if (!inst.error)
18397 {
18398 mapping_state (MAP_THUMB);
18399 }
18400 }
18401 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
18402 {
18403 bfd_boolean is_bx;
18404
18405 /* bx is allowed on v5 cores, and sometimes on v4 cores. */
18406 is_bx = (opcode->aencode == do_bx);
18407
18408 /* Check that this instruction is supported for this CPU. */
18409 if (!(is_bx && fix_v4bx)
18410 && !(opcode->avariant &&
18411 ARM_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant)))
18412 {
18413 as_bad (_("selected processor does not support `%s' in ARM mode"), str);
18414 return;
18415 }
18416 if (inst.size_req)
18417 {
18418 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str);
18419 return;
18420 }
18421
18422 inst.instruction = opcode->avalue;
18423 if (opcode->tag == OT_unconditionalF)
18424 inst.instruction |= 0xFU << 28;
18425 else
18426 inst.instruction |= inst.cond << 28;
18427 inst.size = INSN_SIZE;
18428 if (!parse_operands (p, opcode->operands, /*thumb=*/FALSE))
18429 {
18430 it_fsm_pre_encode ();
18431 opcode->aencode ();
18432 it_fsm_post_encode ();
18433 }
18434 /* Arm mode bx is marked as both v4T and v5 because it's still required
18435 on a hypothetical non-thumb v5 core. */
18436 if (is_bx)
18437 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, arm_ext_v4t);
18438 else
18439 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
18440 *opcode->avariant);
18441
18442 check_neon_suffixes;
18443
18444 if (!inst.error)
18445 {
18446 mapping_state (MAP_ARM);
18447 }
18448 }
18449 else
18450 {
18451 as_bad (_("attempt to use an ARM instruction on a Thumb-only processor "
18452 "-- `%s'"), str);
18453 return;
18454 }
18455 output_inst (str);
18456 }
18457
18458 static void
18459 check_it_blocks_finished (void)
18460 {
18461 #ifdef OBJ_ELF
18462 asection *sect;
18463
18464 for (sect = stdoutput->sections; sect != NULL; sect = sect->next)
18465 if (seg_info (sect)->tc_segment_info_data.current_it.state
18466 == MANUAL_IT_BLOCK)
18467 {
18468 as_warn (_("section '%s' finished with an open IT block."),
18469 sect->name);
18470 }
18471 #else
18472 if (now_it.state == MANUAL_IT_BLOCK)
18473 as_warn (_("file finished with an open IT block."));
18474 #endif
18475 }
18476
18477 /* Various frobbings of labels and their addresses. */
18478
18479 void
18480 arm_start_line_hook (void)
18481 {
18482 last_label_seen = NULL;
18483 }
18484
18485 void
18486 arm_frob_label (symbolS * sym)
18487 {
18488 last_label_seen = sym;
18489
18490 ARM_SET_THUMB (sym, thumb_mode);
18491
18492 #if defined OBJ_COFF || defined OBJ_ELF
18493 ARM_SET_INTERWORK (sym, support_interwork);
18494 #endif
18495
18496 force_automatic_it_block_close ();
18497
18498 /* Note - do not allow local symbols (.Lxxx) to be labelled
18499 as Thumb functions. This is because these labels, whilst
18500 they exist inside Thumb code, are not the entry points for
18501 possible ARM->Thumb calls. Also, these labels can be used
18502 as part of a computed goto or switch statement. eg gcc
18503 can generate code that looks like this:
18504
18505 ldr r2, [pc, .Laaa]
18506 lsl r3, r3, #2
18507 ldr r2, [r3, r2]
18508 mov pc, r2
18509
18510 .Lbbb: .word .Lxxx
18511 .Lccc: .word .Lyyy
18512 ..etc...
18513 .Laaa: .word Lbbb
18514
18515 The first instruction loads the address of the jump table.
18516 The second instruction converts a table index into a byte offset.
18517 The third instruction gets the jump address out of the table.
18518 The fourth instruction performs the jump.
18519
18520 If the address stored at .Laaa is that of a symbol which has the
18521 Thumb_Func bit set, then the linker will arrange for this address
18522 to have the bottom bit set, which in turn would mean that the
18523 address computation performed by the third instruction would end
18524 up with the bottom bit set. Since the ARM is capable of unaligned
18525 word loads, the instruction would then load the incorrect address
18526 out of the jump table, and chaos would ensue. */
18527 if (label_is_thumb_function_name
18528 && (S_GET_NAME (sym)[0] != '.' || S_GET_NAME (sym)[1] != 'L')
18529 && (bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE) != 0)
18530 {
18531 /* When the address of a Thumb function is taken the bottom
18532 bit of that address should be set. This will allow
18533 interworking between Arm and Thumb functions to work
18534 correctly. */
18535
18536 THUMB_SET_FUNC (sym, 1);
18537
18538 label_is_thumb_function_name = FALSE;
18539 }
18540
18541 dwarf2_emit_label (sym);
18542 }
18543
18544 bfd_boolean
18545 arm_data_in_code (void)
18546 {
18547 if (thumb_mode && ! strncmp (input_line_pointer + 1, "data:", 5))
18548 {
18549 *input_line_pointer = '/';
18550 input_line_pointer += 5;
18551 *input_line_pointer = 0;
18552 return TRUE;
18553 }
18554
18555 return FALSE;
18556 }
18557
18558 char *
18559 arm_canonicalize_symbol_name (char * name)
18560 {
18561 int len;
18562
18563 if (thumb_mode && (len = strlen (name)) > 5
18564 && streq (name + len - 5, "/data"))
18565 *(name + len - 5) = 0;
18566
18567 return name;
18568 }
18569 \f
18570 /* Table of all register names defined by default. The user can
18571 define additional names with .req. Note that all register names
18572 should appear in both upper and lowercase variants. Some registers
18573 also have mixed-case names. */
18574
18575 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
18576 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
18577 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
18578 #define REGSET(p,t) \
18579 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
18580 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
18581 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
18582 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
18583 #define REGSETH(p,t) \
18584 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
18585 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
18586 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
18587 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
18588 #define REGSET2(p,t) \
18589 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
18590 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
18591 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
18592 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
18593 #define SPLRBANK(base,bank,t) \
18594 REGDEF(lr_##bank, 768|((base+0)<<16), t), \
18595 REGDEF(sp_##bank, 768|((base+1)<<16), t), \
18596 REGDEF(spsr_##bank, 768|(base<<16)|SPSR_BIT, t), \
18597 REGDEF(LR_##bank, 768|((base+0)<<16), t), \
18598 REGDEF(SP_##bank, 768|((base+1)<<16), t), \
18599 REGDEF(SPSR_##bank, 768|(base<<16)|SPSR_BIT, t)
18600
18601 static const struct reg_entry reg_names[] =
18602 {
18603 /* ARM integer registers. */
18604 REGSET(r, RN), REGSET(R, RN),
18605
18606 /* ATPCS synonyms. */
18607 REGDEF(a1,0,RN), REGDEF(a2,1,RN), REGDEF(a3, 2,RN), REGDEF(a4, 3,RN),
18608 REGDEF(v1,4,RN), REGDEF(v2,5,RN), REGDEF(v3, 6,RN), REGDEF(v4, 7,RN),
18609 REGDEF(v5,8,RN), REGDEF(v6,9,RN), REGDEF(v7,10,RN), REGDEF(v8,11,RN),
18610
18611 REGDEF(A1,0,RN), REGDEF(A2,1,RN), REGDEF(A3, 2,RN), REGDEF(A4, 3,RN),
18612 REGDEF(V1,4,RN), REGDEF(V2,5,RN), REGDEF(V3, 6,RN), REGDEF(V4, 7,RN),
18613 REGDEF(V5,8,RN), REGDEF(V6,9,RN), REGDEF(V7,10,RN), REGDEF(V8,11,RN),
18614
18615 /* Well-known aliases. */
18616 REGDEF(wr, 7,RN), REGDEF(sb, 9,RN), REGDEF(sl,10,RN), REGDEF(fp,11,RN),
18617 REGDEF(ip,12,RN), REGDEF(sp,13,RN), REGDEF(lr,14,RN), REGDEF(pc,15,RN),
18618
18619 REGDEF(WR, 7,RN), REGDEF(SB, 9,RN), REGDEF(SL,10,RN), REGDEF(FP,11,RN),
18620 REGDEF(IP,12,RN), REGDEF(SP,13,RN), REGDEF(LR,14,RN), REGDEF(PC,15,RN),
18621
18622 /* Coprocessor numbers. */
18623 REGSET(p, CP), REGSET(P, CP),
18624
18625 /* Coprocessor register numbers. The "cr" variants are for backward
18626 compatibility. */
18627 REGSET(c, CN), REGSET(C, CN),
18628 REGSET(cr, CN), REGSET(CR, CN),
18629
18630 /* ARM banked registers. */
18631 REGDEF(R8_usr,512|(0<<16),RNB), REGDEF(r8_usr,512|(0<<16),RNB),
18632 REGDEF(R9_usr,512|(1<<16),RNB), REGDEF(r9_usr,512|(1<<16),RNB),
18633 REGDEF(R10_usr,512|(2<<16),RNB), REGDEF(r10_usr,512|(2<<16),RNB),
18634 REGDEF(R11_usr,512|(3<<16),RNB), REGDEF(r11_usr,512|(3<<16),RNB),
18635 REGDEF(R12_usr,512|(4<<16),RNB), REGDEF(r12_usr,512|(4<<16),RNB),
18636 REGDEF(SP_usr,512|(5<<16),RNB), REGDEF(sp_usr,512|(5<<16),RNB),
18637 REGDEF(LR_usr,512|(6<<16),RNB), REGDEF(lr_usr,512|(6<<16),RNB),
18638
18639 REGDEF(R8_fiq,512|(8<<16),RNB), REGDEF(r8_fiq,512|(8<<16),RNB),
18640 REGDEF(R9_fiq,512|(9<<16),RNB), REGDEF(r9_fiq,512|(9<<16),RNB),
18641 REGDEF(R10_fiq,512|(10<<16),RNB), REGDEF(r10_fiq,512|(10<<16),RNB),
18642 REGDEF(R11_fiq,512|(11<<16),RNB), REGDEF(r11_fiq,512|(11<<16),RNB),
18643 REGDEF(R12_fiq,512|(12<<16),RNB), REGDEF(r12_fiq,512|(12<<16),RNB),
18644 REGDEF(SP_fiq,512|(13<<16),RNB), REGDEF(sp_fiq,512|(13<<16),RNB),
18645 REGDEF(LR_fiq,512|(14<<16),RNB), REGDEF(lr_fiq,512|(14<<16),RNB),
18646 REGDEF(SPSR_fiq,512|(14<<16)|SPSR_BIT,RNB), REGDEF(spsr_fiq,512|(14<<16)|SPSR_BIT,RNB),
18647
18648 SPLRBANK(0,IRQ,RNB), SPLRBANK(0,irq,RNB),
18649 SPLRBANK(2,SVC,RNB), SPLRBANK(2,svc,RNB),
18650 SPLRBANK(4,ABT,RNB), SPLRBANK(4,abt,RNB),
18651 SPLRBANK(6,UND,RNB), SPLRBANK(6,und,RNB),
18652 SPLRBANK(12,MON,RNB), SPLRBANK(12,mon,RNB),
18653 REGDEF(elr_hyp,768|(14<<16),RNB), REGDEF(ELR_hyp,768|(14<<16),RNB),
18654 REGDEF(sp_hyp,768|(15<<16),RNB), REGDEF(SP_hyp,768|(15<<16),RNB),
18655 REGDEF(spsr_hyp,768|(14<<16)|SPSR_BIT,RNB),
18656 REGDEF(SPSR_hyp,768|(14<<16)|SPSR_BIT,RNB),
18657
18658 /* FPA registers. */
18659 REGNUM(f,0,FN), REGNUM(f,1,FN), REGNUM(f,2,FN), REGNUM(f,3,FN),
18660 REGNUM(f,4,FN), REGNUM(f,5,FN), REGNUM(f,6,FN), REGNUM(f,7, FN),
18661
18662 REGNUM(F,0,FN), REGNUM(F,1,FN), REGNUM(F,2,FN), REGNUM(F,3,FN),
18663 REGNUM(F,4,FN), REGNUM(F,5,FN), REGNUM(F,6,FN), REGNUM(F,7, FN),
18664
18665 /* VFP SP registers. */
18666 REGSET(s,VFS), REGSET(S,VFS),
18667 REGSETH(s,VFS), REGSETH(S,VFS),
18668
18669 /* VFP DP Registers. */
18670 REGSET(d,VFD), REGSET(D,VFD),
18671 /* Extra Neon DP registers. */
18672 REGSETH(d,VFD), REGSETH(D,VFD),
18673
18674 /* Neon QP registers. */
18675 REGSET2(q,NQ), REGSET2(Q,NQ),
18676
18677 /* VFP control registers. */
18678 REGDEF(fpsid,0,VFC), REGDEF(fpscr,1,VFC), REGDEF(fpexc,8,VFC),
18679 REGDEF(FPSID,0,VFC), REGDEF(FPSCR,1,VFC), REGDEF(FPEXC,8,VFC),
18680 REGDEF(fpinst,9,VFC), REGDEF(fpinst2,10,VFC),
18681 REGDEF(FPINST,9,VFC), REGDEF(FPINST2,10,VFC),
18682 REGDEF(mvfr0,7,VFC), REGDEF(mvfr1,6,VFC),
18683 REGDEF(MVFR0,7,VFC), REGDEF(MVFR1,6,VFC),
18684
18685 /* Maverick DSP coprocessor registers. */
18686 REGSET(mvf,MVF), REGSET(mvd,MVD), REGSET(mvfx,MVFX), REGSET(mvdx,MVDX),
18687 REGSET(MVF,MVF), REGSET(MVD,MVD), REGSET(MVFX,MVFX), REGSET(MVDX,MVDX),
18688
18689 REGNUM(mvax,0,MVAX), REGNUM(mvax,1,MVAX),
18690 REGNUM(mvax,2,MVAX), REGNUM(mvax,3,MVAX),
18691 REGDEF(dspsc,0,DSPSC),
18692
18693 REGNUM(MVAX,0,MVAX), REGNUM(MVAX,1,MVAX),
18694 REGNUM(MVAX,2,MVAX), REGNUM(MVAX,3,MVAX),
18695 REGDEF(DSPSC,0,DSPSC),
18696
18697 /* iWMMXt data registers - p0, c0-15. */
18698 REGSET(wr,MMXWR), REGSET(wR,MMXWR), REGSET(WR, MMXWR),
18699
18700 /* iWMMXt control registers - p1, c0-3. */
18701 REGDEF(wcid, 0,MMXWC), REGDEF(wCID, 0,MMXWC), REGDEF(WCID, 0,MMXWC),
18702 REGDEF(wcon, 1,MMXWC), REGDEF(wCon, 1,MMXWC), REGDEF(WCON, 1,MMXWC),
18703 REGDEF(wcssf, 2,MMXWC), REGDEF(wCSSF, 2,MMXWC), REGDEF(WCSSF, 2,MMXWC),
18704 REGDEF(wcasf, 3,MMXWC), REGDEF(wCASF, 3,MMXWC), REGDEF(WCASF, 3,MMXWC),
18705
18706 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
18707 REGDEF(wcgr0, 8,MMXWCG), REGDEF(wCGR0, 8,MMXWCG), REGDEF(WCGR0, 8,MMXWCG),
18708 REGDEF(wcgr1, 9,MMXWCG), REGDEF(wCGR1, 9,MMXWCG), REGDEF(WCGR1, 9,MMXWCG),
18709 REGDEF(wcgr2,10,MMXWCG), REGDEF(wCGR2,10,MMXWCG), REGDEF(WCGR2,10,MMXWCG),
18710 REGDEF(wcgr3,11,MMXWCG), REGDEF(wCGR3,11,MMXWCG), REGDEF(WCGR3,11,MMXWCG),
18711
18712 /* XScale accumulator registers. */
18713 REGNUM(acc,0,XSCALE), REGNUM(ACC,0,XSCALE),
18714 };
18715 #undef REGDEF
18716 #undef REGNUM
18717 #undef REGSET
18718
18719 /* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
18720 within psr_required_here. */
18721 static const struct asm_psr psrs[] =
18722 {
18723 /* Backward compatibility notation. Note that "all" is no longer
18724 truly all possible PSR bits. */
18725 {"all", PSR_c | PSR_f},
18726 {"flg", PSR_f},
18727 {"ctl", PSR_c},
18728
18729 /* Individual flags. */
18730 {"f", PSR_f},
18731 {"c", PSR_c},
18732 {"x", PSR_x},
18733 {"s", PSR_s},
18734
18735 /* Combinations of flags. */
18736 {"fs", PSR_f | PSR_s},
18737 {"fx", PSR_f | PSR_x},
18738 {"fc", PSR_f | PSR_c},
18739 {"sf", PSR_s | PSR_f},
18740 {"sx", PSR_s | PSR_x},
18741 {"sc", PSR_s | PSR_c},
18742 {"xf", PSR_x | PSR_f},
18743 {"xs", PSR_x | PSR_s},
18744 {"xc", PSR_x | PSR_c},
18745 {"cf", PSR_c | PSR_f},
18746 {"cs", PSR_c | PSR_s},
18747 {"cx", PSR_c | PSR_x},
18748 {"fsx", PSR_f | PSR_s | PSR_x},
18749 {"fsc", PSR_f | PSR_s | PSR_c},
18750 {"fxs", PSR_f | PSR_x | PSR_s},
18751 {"fxc", PSR_f | PSR_x | PSR_c},
18752 {"fcs", PSR_f | PSR_c | PSR_s},
18753 {"fcx", PSR_f | PSR_c | PSR_x},
18754 {"sfx", PSR_s | PSR_f | PSR_x},
18755 {"sfc", PSR_s | PSR_f | PSR_c},
18756 {"sxf", PSR_s | PSR_x | PSR_f},
18757 {"sxc", PSR_s | PSR_x | PSR_c},
18758 {"scf", PSR_s | PSR_c | PSR_f},
18759 {"scx", PSR_s | PSR_c | PSR_x},
18760 {"xfs", PSR_x | PSR_f | PSR_s},
18761 {"xfc", PSR_x | PSR_f | PSR_c},
18762 {"xsf", PSR_x | PSR_s | PSR_f},
18763 {"xsc", PSR_x | PSR_s | PSR_c},
18764 {"xcf", PSR_x | PSR_c | PSR_f},
18765 {"xcs", PSR_x | PSR_c | PSR_s},
18766 {"cfs", PSR_c | PSR_f | PSR_s},
18767 {"cfx", PSR_c | PSR_f | PSR_x},
18768 {"csf", PSR_c | PSR_s | PSR_f},
18769 {"csx", PSR_c | PSR_s | PSR_x},
18770 {"cxf", PSR_c | PSR_x | PSR_f},
18771 {"cxs", PSR_c | PSR_x | PSR_s},
18772 {"fsxc", PSR_f | PSR_s | PSR_x | PSR_c},
18773 {"fscx", PSR_f | PSR_s | PSR_c | PSR_x},
18774 {"fxsc", PSR_f | PSR_x | PSR_s | PSR_c},
18775 {"fxcs", PSR_f | PSR_x | PSR_c | PSR_s},
18776 {"fcsx", PSR_f | PSR_c | PSR_s | PSR_x},
18777 {"fcxs", PSR_f | PSR_c | PSR_x | PSR_s},
18778 {"sfxc", PSR_s | PSR_f | PSR_x | PSR_c},
18779 {"sfcx", PSR_s | PSR_f | PSR_c | PSR_x},
18780 {"sxfc", PSR_s | PSR_x | PSR_f | PSR_c},
18781 {"sxcf", PSR_s | PSR_x | PSR_c | PSR_f},
18782 {"scfx", PSR_s | PSR_c | PSR_f | PSR_x},
18783 {"scxf", PSR_s | PSR_c | PSR_x | PSR_f},
18784 {"xfsc", PSR_x | PSR_f | PSR_s | PSR_c},
18785 {"xfcs", PSR_x | PSR_f | PSR_c | PSR_s},
18786 {"xsfc", PSR_x | PSR_s | PSR_f | PSR_c},
18787 {"xscf", PSR_x | PSR_s | PSR_c | PSR_f},
18788 {"xcfs", PSR_x | PSR_c | PSR_f | PSR_s},
18789 {"xcsf", PSR_x | PSR_c | PSR_s | PSR_f},
18790 {"cfsx", PSR_c | PSR_f | PSR_s | PSR_x},
18791 {"cfxs", PSR_c | PSR_f | PSR_x | PSR_s},
18792 {"csfx", PSR_c | PSR_s | PSR_f | PSR_x},
18793 {"csxf", PSR_c | PSR_s | PSR_x | PSR_f},
18794 {"cxfs", PSR_c | PSR_x | PSR_f | PSR_s},
18795 {"cxsf", PSR_c | PSR_x | PSR_s | PSR_f},
18796 };
18797
18798 /* Table of V7M psr names. */
18799 static const struct asm_psr v7m_psrs[] =
18800 {
18801 {"apsr", 0x0 }, {"APSR", 0x0 },
18802 {"iapsr", 0x1 }, {"IAPSR", 0x1 },
18803 {"eapsr", 0x2 }, {"EAPSR", 0x2 },
18804 {"psr", 0x3 }, {"PSR", 0x3 },
18805 {"xpsr", 0x3 }, {"XPSR", 0x3 }, {"xPSR", 3 },
18806 {"ipsr", 0x5 }, {"IPSR", 0x5 },
18807 {"epsr", 0x6 }, {"EPSR", 0x6 },
18808 {"iepsr", 0x7 }, {"IEPSR", 0x7 },
18809 {"msp", 0x8 }, {"MSP", 0x8 },
18810 {"psp", 0x9 }, {"PSP", 0x9 },
18811 {"msplim", 0xa }, {"MSPLIM", 0xa },
18812 {"psplim", 0xb }, {"PSPLIM", 0xb },
18813 {"primask", 0x10}, {"PRIMASK", 0x10},
18814 {"basepri", 0x11}, {"BASEPRI", 0x11},
18815 {"basepri_max", 0x12}, {"BASEPRI_MAX", 0x12},
18816 {"faultmask", 0x13}, {"FAULTMASK", 0x13},
18817 {"control", 0x14}, {"CONTROL", 0x14},
18818 {"msp_ns", 0x88}, {"MSP_NS", 0x88},
18819 {"psp_ns", 0x89}, {"PSP_NS", 0x89},
18820 {"msplim_ns", 0x8a}, {"MSPLIM_NS", 0x8a},
18821 {"psplim_ns", 0x8b}, {"PSPLIM_NS", 0x8b},
18822 {"primask_ns", 0x90}, {"PRIMASK_NS", 0x90},
18823 {"basepri_ns", 0x91}, {"BASEPRI_NS", 0x91},
18824 {"faultmask_ns", 0x93}, {"FAULTMASK_NS", 0x93},
18825 {"control_ns", 0x94}, {"CONTROL_NS", 0x94},
18826 {"sp_ns", 0x98}, {"SP_NS", 0x98 }
18827 };
18828
18829 /* Table of all shift-in-operand names. */
18830 static const struct asm_shift_name shift_names [] =
18831 {
18832 { "asl", SHIFT_LSL }, { "ASL", SHIFT_LSL },
18833 { "lsl", SHIFT_LSL }, { "LSL", SHIFT_LSL },
18834 { "lsr", SHIFT_LSR }, { "LSR", SHIFT_LSR },
18835 { "asr", SHIFT_ASR }, { "ASR", SHIFT_ASR },
18836 { "ror", SHIFT_ROR }, { "ROR", SHIFT_ROR },
18837 { "rrx", SHIFT_RRX }, { "RRX", SHIFT_RRX }
18838 };
18839
18840 /* Table of all explicit relocation names. */
18841 #ifdef OBJ_ELF
18842 static struct reloc_entry reloc_names[] =
18843 {
18844 { "got", BFD_RELOC_ARM_GOT32 }, { "GOT", BFD_RELOC_ARM_GOT32 },
18845 { "gotoff", BFD_RELOC_ARM_GOTOFF }, { "GOTOFF", BFD_RELOC_ARM_GOTOFF },
18846 { "plt", BFD_RELOC_ARM_PLT32 }, { "PLT", BFD_RELOC_ARM_PLT32 },
18847 { "target1", BFD_RELOC_ARM_TARGET1 }, { "TARGET1", BFD_RELOC_ARM_TARGET1 },
18848 { "target2", BFD_RELOC_ARM_TARGET2 }, { "TARGET2", BFD_RELOC_ARM_TARGET2 },
18849 { "sbrel", BFD_RELOC_ARM_SBREL32 }, { "SBREL", BFD_RELOC_ARM_SBREL32 },
18850 { "tlsgd", BFD_RELOC_ARM_TLS_GD32}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32},
18851 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32},
18852 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32},
18853 { "gottpoff",BFD_RELOC_ARM_TLS_IE32}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32},
18854 { "tpoff", BFD_RELOC_ARM_TLS_LE32}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32},
18855 { "got_prel", BFD_RELOC_ARM_GOT_PREL}, { "GOT_PREL", BFD_RELOC_ARM_GOT_PREL},
18856 { "tlsdesc", BFD_RELOC_ARM_TLS_GOTDESC},
18857 { "TLSDESC", BFD_RELOC_ARM_TLS_GOTDESC},
18858 { "tlscall", BFD_RELOC_ARM_TLS_CALL},
18859 { "TLSCALL", BFD_RELOC_ARM_TLS_CALL},
18860 { "tlsdescseq", BFD_RELOC_ARM_TLS_DESCSEQ},
18861 { "TLSDESCSEQ", BFD_RELOC_ARM_TLS_DESCSEQ}
18862 };
18863 #endif
18864
18865 /* Table of all conditional affixes. 0xF is not defined as a condition code. */
18866 static const struct asm_cond conds[] =
18867 {
18868 {"eq", 0x0},
18869 {"ne", 0x1},
18870 {"cs", 0x2}, {"hs", 0x2},
18871 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
18872 {"mi", 0x4},
18873 {"pl", 0x5},
18874 {"vs", 0x6},
18875 {"vc", 0x7},
18876 {"hi", 0x8},
18877 {"ls", 0x9},
18878 {"ge", 0xa},
18879 {"lt", 0xb},
18880 {"gt", 0xc},
18881 {"le", 0xd},
18882 {"al", 0xe}
18883 };
18884
18885 #define UL_BARRIER(L,U,CODE,FEAT) \
18886 { L, CODE, ARM_FEATURE_CORE_LOW (FEAT) }, \
18887 { U, CODE, ARM_FEATURE_CORE_LOW (FEAT) }
18888
18889 static struct asm_barrier_opt barrier_opt_names[] =
18890 {
18891 UL_BARRIER ("sy", "SY", 0xf, ARM_EXT_BARRIER),
18892 UL_BARRIER ("st", "ST", 0xe, ARM_EXT_BARRIER),
18893 UL_BARRIER ("ld", "LD", 0xd, ARM_EXT_V8),
18894 UL_BARRIER ("ish", "ISH", 0xb, ARM_EXT_BARRIER),
18895 UL_BARRIER ("sh", "SH", 0xb, ARM_EXT_BARRIER),
18896 UL_BARRIER ("ishst", "ISHST", 0xa, ARM_EXT_BARRIER),
18897 UL_BARRIER ("shst", "SHST", 0xa, ARM_EXT_BARRIER),
18898 UL_BARRIER ("ishld", "ISHLD", 0x9, ARM_EXT_V8),
18899 UL_BARRIER ("un", "UN", 0x7, ARM_EXT_BARRIER),
18900 UL_BARRIER ("nsh", "NSH", 0x7, ARM_EXT_BARRIER),
18901 UL_BARRIER ("unst", "UNST", 0x6, ARM_EXT_BARRIER),
18902 UL_BARRIER ("nshst", "NSHST", 0x6, ARM_EXT_BARRIER),
18903 UL_BARRIER ("nshld", "NSHLD", 0x5, ARM_EXT_V8),
18904 UL_BARRIER ("osh", "OSH", 0x3, ARM_EXT_BARRIER),
18905 UL_BARRIER ("oshst", "OSHST", 0x2, ARM_EXT_BARRIER),
18906 UL_BARRIER ("oshld", "OSHLD", 0x1, ARM_EXT_V8)
18907 };
18908
18909 #undef UL_BARRIER
18910
18911 /* Table of ARM-format instructions. */
18912
18913 /* Macros for gluing together operand strings. N.B. In all cases
18914 other than OPS0, the trailing OP_stop comes from default
18915 zero-initialization of the unspecified elements of the array. */
18916 #define OPS0() { OP_stop, }
18917 #define OPS1(a) { OP_##a, }
18918 #define OPS2(a,b) { OP_##a,OP_##b, }
18919 #define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
18920 #define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
18921 #define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
18922 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
18923
18924 /* These macros are similar to the OPSn, but do not prepend the OP_ prefix.
18925 This is useful when mixing operands for ARM and THUMB, i.e. using the
18926 MIX_ARM_THUMB_OPERANDS macro.
18927 In order to use these macros, prefix the number of operands with _
18928 e.g. _3. */
18929 #define OPS_1(a) { a, }
18930 #define OPS_2(a,b) { a,b, }
18931 #define OPS_3(a,b,c) { a,b,c, }
18932 #define OPS_4(a,b,c,d) { a,b,c,d, }
18933 #define OPS_5(a,b,c,d,e) { a,b,c,d,e, }
18934 #define OPS_6(a,b,c,d,e,f) { a,b,c,d,e,f, }
18935
18936 /* These macros abstract out the exact format of the mnemonic table and
18937 save some repeated characters. */
18938
18939 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
18940 #define TxCE(mnem, op, top, nops, ops, ae, te) \
18941 { mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
18942 THUMB_VARIANT, do_##ae, do_##te }
18943
18944 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
18945 a T_MNEM_xyz enumerator. */
18946 #define TCE(mnem, aop, top, nops, ops, ae, te) \
18947 TxCE (mnem, aop, 0x##top, nops, ops, ae, te)
18948 #define tCE(mnem, aop, top, nops, ops, ae, te) \
18949 TxCE (mnem, aop, T_MNEM##top, nops, ops, ae, te)
18950
18951 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
18952 infix after the third character. */
18953 #define TxC3(mnem, op, top, nops, ops, ae, te) \
18954 { mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
18955 THUMB_VARIANT, do_##ae, do_##te }
18956 #define TxC3w(mnem, op, top, nops, ops, ae, te) \
18957 { mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
18958 THUMB_VARIANT, do_##ae, do_##te }
18959 #define TC3(mnem, aop, top, nops, ops, ae, te) \
18960 TxC3 (mnem, aop, 0x##top, nops, ops, ae, te)
18961 #define TC3w(mnem, aop, top, nops, ops, ae, te) \
18962 TxC3w (mnem, aop, 0x##top, nops, ops, ae, te)
18963 #define tC3(mnem, aop, top, nops, ops, ae, te) \
18964 TxC3 (mnem, aop, T_MNEM##top, nops, ops, ae, te)
18965 #define tC3w(mnem, aop, top, nops, ops, ae, te) \
18966 TxC3w (mnem, aop, T_MNEM##top, nops, ops, ae, te)
18967
18968 /* Mnemonic that cannot be conditionalized. The ARM condition-code
18969 field is still 0xE. Many of the Thumb variants can be executed
18970 conditionally, so this is checked separately. */
18971 #define TUE(mnem, op, top, nops, ops, ae, te) \
18972 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
18973 THUMB_VARIANT, do_##ae, do_##te }
18974
18975 /* Same as TUE but the encoding function for ARM and Thumb modes is the same.
18976 Used by mnemonics that have very minimal differences in the encoding for
18977 ARM and Thumb variants and can be handled in a common function. */
18978 #define TUEc(mnem, op, top, nops, ops, en) \
18979 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
18980 THUMB_VARIANT, do_##en, do_##en }
18981
18982 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
18983 condition code field. */
18984 #define TUF(mnem, op, top, nops, ops, ae, te) \
18985 { mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
18986 THUMB_VARIANT, do_##ae, do_##te }
18987
18988 /* ARM-only variants of all the above. */
18989 #define CE(mnem, op, nops, ops, ae) \
18990 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
18991
18992 #define C3(mnem, op, nops, ops, ae) \
18993 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
18994
18995 /* Legacy mnemonics that always have conditional infix after the third
18996 character. */
18997 #define CL(mnem, op, nops, ops, ae) \
18998 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
18999 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
19000
19001 /* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
19002 #define cCE(mnem, op, nops, ops, ae) \
19003 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
19004
19005 /* Legacy coprocessor instructions where conditional infix and conditional
19006 suffix are ambiguous. For consistency this includes all FPA instructions,
19007 not just the potentially ambiguous ones. */
19008 #define cCL(mnem, op, nops, ops, ae) \
19009 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
19010 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
19011
19012 /* Coprocessor, takes either a suffix or a position-3 infix
19013 (for an FPA corner case). */
19014 #define C3E(mnem, op, nops, ops, ae) \
19015 { mnem, OPS##nops ops, OT_csuf_or_in3, \
19016 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
19017
19018 #define xCM_(m1, m2, m3, op, nops, ops, ae) \
19019 { m1 #m2 m3, OPS##nops ops, \
19020 sizeof (#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof (m1) - 1, \
19021 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
19022
19023 #define CM(m1, m2, op, nops, ops, ae) \
19024 xCM_ (m1, , m2, op, nops, ops, ae), \
19025 xCM_ (m1, eq, m2, op, nops, ops, ae), \
19026 xCM_ (m1, ne, m2, op, nops, ops, ae), \
19027 xCM_ (m1, cs, m2, op, nops, ops, ae), \
19028 xCM_ (m1, hs, m2, op, nops, ops, ae), \
19029 xCM_ (m1, cc, m2, op, nops, ops, ae), \
19030 xCM_ (m1, ul, m2, op, nops, ops, ae), \
19031 xCM_ (m1, lo, m2, op, nops, ops, ae), \
19032 xCM_ (m1, mi, m2, op, nops, ops, ae), \
19033 xCM_ (m1, pl, m2, op, nops, ops, ae), \
19034 xCM_ (m1, vs, m2, op, nops, ops, ae), \
19035 xCM_ (m1, vc, m2, op, nops, ops, ae), \
19036 xCM_ (m1, hi, m2, op, nops, ops, ae), \
19037 xCM_ (m1, ls, m2, op, nops, ops, ae), \
19038 xCM_ (m1, ge, m2, op, nops, ops, ae), \
19039 xCM_ (m1, lt, m2, op, nops, ops, ae), \
19040 xCM_ (m1, gt, m2, op, nops, ops, ae), \
19041 xCM_ (m1, le, m2, op, nops, ops, ae), \
19042 xCM_ (m1, al, m2, op, nops, ops, ae)
19043
19044 #define UE(mnem, op, nops, ops, ae) \
19045 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
19046
19047 #define UF(mnem, op, nops, ops, ae) \
19048 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
19049
19050 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
19051 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
19052 use the same encoding function for each. */
19053 #define NUF(mnem, op, nops, ops, enc) \
19054 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
19055 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
19056
19057 /* Neon data processing, version which indirects through neon_enc_tab for
19058 the various overloaded versions of opcodes. */
19059 #define nUF(mnem, op, nops, ops, enc) \
19060 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM##op, N_MNEM##op, \
19061 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
19062
19063 /* Neon insn with conditional suffix for the ARM version, non-overloaded
19064 version. */
19065 #define NCE_tag(mnem, op, nops, ops, enc, tag) \
19066 { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT, \
19067 THUMB_VARIANT, do_##enc, do_##enc }
19068
19069 #define NCE(mnem, op, nops, ops, enc) \
19070 NCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
19071
19072 #define NCEF(mnem, op, nops, ops, enc) \
19073 NCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
19074
19075 /* Neon insn with conditional suffix for the ARM version, overloaded types. */
19076 #define nCE_tag(mnem, op, nops, ops, enc, tag) \
19077 { #mnem, OPS##nops ops, tag, N_MNEM##op, N_MNEM##op, \
19078 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
19079
19080 #define nCE(mnem, op, nops, ops, enc) \
19081 nCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
19082
19083 #define nCEF(mnem, op, nops, ops, enc) \
19084 nCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
19085
19086 #define do_0 0
19087
19088 static const struct asm_opcode insns[] =
19089 {
19090 #define ARM_VARIANT & arm_ext_v1 /* Core ARM Instructions. */
19091 #define THUMB_VARIANT & arm_ext_v4t
19092 tCE("and", 0000000, _and, 3, (RR, oRR, SH), arit, t_arit3c),
19093 tC3("ands", 0100000, _ands, 3, (RR, oRR, SH), arit, t_arit3c),
19094 tCE("eor", 0200000, _eor, 3, (RR, oRR, SH), arit, t_arit3c),
19095 tC3("eors", 0300000, _eors, 3, (RR, oRR, SH), arit, t_arit3c),
19096 tCE("sub", 0400000, _sub, 3, (RR, oRR, SH), arit, t_add_sub),
19097 tC3("subs", 0500000, _subs, 3, (RR, oRR, SH), arit, t_add_sub),
19098 tCE("add", 0800000, _add, 3, (RR, oRR, SHG), arit, t_add_sub),
19099 tC3("adds", 0900000, _adds, 3, (RR, oRR, SHG), arit, t_add_sub),
19100 tCE("adc", 0a00000, _adc, 3, (RR, oRR, SH), arit, t_arit3c),
19101 tC3("adcs", 0b00000, _adcs, 3, (RR, oRR, SH), arit, t_arit3c),
19102 tCE("sbc", 0c00000, _sbc, 3, (RR, oRR, SH), arit, t_arit3),
19103 tC3("sbcs", 0d00000, _sbcs, 3, (RR, oRR, SH), arit, t_arit3),
19104 tCE("orr", 1800000, _orr, 3, (RR, oRR, SH), arit, t_arit3c),
19105 tC3("orrs", 1900000, _orrs, 3, (RR, oRR, SH), arit, t_arit3c),
19106 tCE("bic", 1c00000, _bic, 3, (RR, oRR, SH), arit, t_arit3),
19107 tC3("bics", 1d00000, _bics, 3, (RR, oRR, SH), arit, t_arit3),
19108
19109 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
19110 for setting PSR flag bits. They are obsolete in V6 and do not
19111 have Thumb equivalents. */
19112 tCE("tst", 1100000, _tst, 2, (RR, SH), cmp, t_mvn_tst),
19113 tC3w("tsts", 1100000, _tst, 2, (RR, SH), cmp, t_mvn_tst),
19114 CL("tstp", 110f000, 2, (RR, SH), cmp),
19115 tCE("cmp", 1500000, _cmp, 2, (RR, SH), cmp, t_mov_cmp),
19116 tC3w("cmps", 1500000, _cmp, 2, (RR, SH), cmp, t_mov_cmp),
19117 CL("cmpp", 150f000, 2, (RR, SH), cmp),
19118 tCE("cmn", 1700000, _cmn, 2, (RR, SH), cmp, t_mvn_tst),
19119 tC3w("cmns", 1700000, _cmn, 2, (RR, SH), cmp, t_mvn_tst),
19120 CL("cmnp", 170f000, 2, (RR, SH), cmp),
19121
19122 tCE("mov", 1a00000, _mov, 2, (RR, SH), mov, t_mov_cmp),
19123 tC3("movs", 1b00000, _movs, 2, (RR, SHG), mov, t_mov_cmp),
19124 tCE("mvn", 1e00000, _mvn, 2, (RR, SH), mov, t_mvn_tst),
19125 tC3("mvns", 1f00000, _mvns, 2, (RR, SH), mov, t_mvn_tst),
19126
19127 tCE("ldr", 4100000, _ldr, 2, (RR, ADDRGLDR),ldst, t_ldst),
19128 tC3("ldrb", 4500000, _ldrb, 2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
19129 tCE("str", 4000000, _str, _2, (MIX_ARM_THUMB_OPERANDS (OP_RR,
19130 OP_RRnpc),
19131 OP_ADDRGLDR),ldst, t_ldst),
19132 tC3("strb", 4400000, _strb, 2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
19133
19134 tCE("stm", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19135 tC3("stmia", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19136 tC3("stmea", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19137 tCE("ldm", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19138 tC3("ldmia", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19139 tC3("ldmfd", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19140
19141 TCE("swi", f000000, df00, 1, (EXPi), swi, t_swi),
19142 TCE("svc", f000000, df00, 1, (EXPi), swi, t_swi),
19143 tCE("b", a000000, _b, 1, (EXPr), branch, t_branch),
19144 TCE("bl", b000000, f000f800, 1, (EXPr), bl, t_branch23),
19145
19146 /* Pseudo ops. */
19147 tCE("adr", 28f0000, _adr, 2, (RR, EXP), adr, t_adr),
19148 C3(adrl, 28f0000, 2, (RR, EXP), adrl),
19149 tCE("nop", 1a00000, _nop, 1, (oI255c), nop, t_nop),
19150 tCE("udf", 7f000f0, _udf, 1, (oIffffb), bkpt, t_udf),
19151
19152 /* Thumb-compatibility pseudo ops. */
19153 tCE("lsl", 1a00000, _lsl, 3, (RR, oRR, SH), shift, t_shift),
19154 tC3("lsls", 1b00000, _lsls, 3, (RR, oRR, SH), shift, t_shift),
19155 tCE("lsr", 1a00020, _lsr, 3, (RR, oRR, SH), shift, t_shift),
19156 tC3("lsrs", 1b00020, _lsrs, 3, (RR, oRR, SH), shift, t_shift),
19157 tCE("asr", 1a00040, _asr, 3, (RR, oRR, SH), shift, t_shift),
19158 tC3("asrs", 1b00040, _asrs, 3, (RR, oRR, SH), shift, t_shift),
19159 tCE("ror", 1a00060, _ror, 3, (RR, oRR, SH), shift, t_shift),
19160 tC3("rors", 1b00060, _rors, 3, (RR, oRR, SH), shift, t_shift),
19161 tCE("neg", 2600000, _neg, 2, (RR, RR), rd_rn, t_neg),
19162 tC3("negs", 2700000, _negs, 2, (RR, RR), rd_rn, t_neg),
19163 tCE("push", 92d0000, _push, 1, (REGLST), push_pop, t_push_pop),
19164 tCE("pop", 8bd0000, _pop, 1, (REGLST), push_pop, t_push_pop),
19165
19166 /* These may simplify to neg. */
19167 TCE("rsb", 0600000, ebc00000, 3, (RR, oRR, SH), arit, t_rsb),
19168 TC3("rsbs", 0700000, ebd00000, 3, (RR, oRR, SH), arit, t_rsb),
19169
19170 #undef THUMB_VARIANT
19171 #define THUMB_VARIANT & arm_ext_v6
19172
19173 TCE("cpy", 1a00000, 4600, 2, (RR, RR), rd_rm, t_cpy),
19174
19175 /* V1 instructions with no Thumb analogue prior to V6T2. */
19176 #undef THUMB_VARIANT
19177 #define THUMB_VARIANT & arm_ext_v6t2
19178
19179 TCE("teq", 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
19180 TC3w("teqs", 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
19181 CL("teqp", 130f000, 2, (RR, SH), cmp),
19182
19183 TC3("ldrt", 4300000, f8500e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
19184 TC3("ldrbt", 4700000, f8100e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
19185 TC3("strt", 4200000, f8400e00, 2, (RR_npcsp, ADDR), ldstt, t_ldstt),
19186 TC3("strbt", 4600000, f8000e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
19187
19188 TC3("stmdb", 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19189 TC3("stmfd", 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19190
19191 TC3("ldmdb", 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19192 TC3("ldmea", 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19193
19194 /* V1 instructions with no Thumb analogue at all. */
19195 CE("rsc", 0e00000, 3, (RR, oRR, SH), arit),
19196 C3(rscs, 0f00000, 3, (RR, oRR, SH), arit),
19197
19198 C3(stmib, 9800000, 2, (RRw, REGLST), ldmstm),
19199 C3(stmfa, 9800000, 2, (RRw, REGLST), ldmstm),
19200 C3(stmda, 8000000, 2, (RRw, REGLST), ldmstm),
19201 C3(stmed, 8000000, 2, (RRw, REGLST), ldmstm),
19202 C3(ldmib, 9900000, 2, (RRw, REGLST), ldmstm),
19203 C3(ldmed, 9900000, 2, (RRw, REGLST), ldmstm),
19204 C3(ldmda, 8100000, 2, (RRw, REGLST), ldmstm),
19205 C3(ldmfa, 8100000, 2, (RRw, REGLST), ldmstm),
19206
19207 #undef ARM_VARIANT
19208 #define ARM_VARIANT & arm_ext_v2 /* ARM 2 - multiplies. */
19209 #undef THUMB_VARIANT
19210 #define THUMB_VARIANT & arm_ext_v4t
19211
19212 tCE("mul", 0000090, _mul, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
19213 tC3("muls", 0100090, _muls, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
19214
19215 #undef THUMB_VARIANT
19216 #define THUMB_VARIANT & arm_ext_v6t2
19217
19218 TCE("mla", 0200090, fb000000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
19219 C3(mlas, 0300090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas),
19220
19221 /* Generic coprocessor instructions. */
19222 TCE("cdp", e000000, ee000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
19223 TCE("ldc", c100000, ec100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
19224 TC3("ldcl", c500000, ec500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
19225 TCE("stc", c000000, ec000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
19226 TC3("stcl", c400000, ec400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
19227 TCE("mcr", e000010, ee000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
19228 TCE("mrc", e100010, ee100010, 6, (RCP, I7b, APSR_RR, RCN, RCN, oI7b), co_reg, co_reg),
19229
19230 #undef ARM_VARIANT
19231 #define ARM_VARIANT & arm_ext_v2s /* ARM 3 - swp instructions. */
19232
19233 CE("swp", 1000090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
19234 C3(swpb, 1400090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
19235
19236 #undef ARM_VARIANT
19237 #define ARM_VARIANT & arm_ext_v3 /* ARM 6 Status register instructions. */
19238 #undef THUMB_VARIANT
19239 #define THUMB_VARIANT & arm_ext_msr
19240
19241 TCE("mrs", 1000000, f3e08000, 2, (RRnpc, rPSR), mrs, t_mrs),
19242 TCE("msr", 120f000, f3808000, 2, (wPSR, RR_EXi), msr, t_msr),
19243
19244 #undef ARM_VARIANT
19245 #define ARM_VARIANT & arm_ext_v3m /* ARM 7M long multiplies. */
19246 #undef THUMB_VARIANT
19247 #define THUMB_VARIANT & arm_ext_v6t2
19248
19249 TCE("smull", 0c00090, fb800000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
19250 CM("smull","s", 0d00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
19251 TCE("umull", 0800090, fba00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
19252 CM("umull","s", 0900090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
19253 TCE("smlal", 0e00090, fbc00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
19254 CM("smlal","s", 0f00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
19255 TCE("umlal", 0a00090, fbe00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
19256 CM("umlal","s", 0b00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
19257
19258 #undef ARM_VARIANT
19259 #define ARM_VARIANT & arm_ext_v4 /* ARM Architecture 4. */
19260 #undef THUMB_VARIANT
19261 #define THUMB_VARIANT & arm_ext_v4t
19262
19263 tC3("ldrh", 01000b0, _ldrh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
19264 tC3("strh", 00000b0, _strh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
19265 tC3("ldrsh", 01000f0, _ldrsh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
19266 tC3("ldrsb", 01000d0, _ldrsb, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
19267 tC3("ldsh", 01000f0, _ldrsh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
19268 tC3("ldsb", 01000d0, _ldrsb, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
19269
19270 #undef ARM_VARIANT
19271 #define ARM_VARIANT & arm_ext_v4t_5
19272
19273 /* ARM Architecture 4T. */
19274 /* Note: bx (and blx) are required on V5, even if the processor does
19275 not support Thumb. */
19276 TCE("bx", 12fff10, 4700, 1, (RR), bx, t_bx),
19277
19278 #undef ARM_VARIANT
19279 #define ARM_VARIANT & arm_ext_v5 /* ARM Architecture 5T. */
19280 #undef THUMB_VARIANT
19281 #define THUMB_VARIANT & arm_ext_v5t
19282
19283 /* Note: blx has 2 variants; the .value coded here is for
19284 BLX(2). Only this variant has conditional execution. */
19285 TCE("blx", 12fff30, 4780, 1, (RR_EXr), blx, t_blx),
19286 TUE("bkpt", 1200070, be00, 1, (oIffffb), bkpt, t_bkpt),
19287
19288 #undef THUMB_VARIANT
19289 #define THUMB_VARIANT & arm_ext_v6t2
19290
19291 TCE("clz", 16f0f10, fab0f080, 2, (RRnpc, RRnpc), rd_rm, t_clz),
19292 TUF("ldc2", c100000, fc100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
19293 TUF("ldc2l", c500000, fc500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
19294 TUF("stc2", c000000, fc000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
19295 TUF("stc2l", c400000, fc400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
19296 TUF("cdp2", e000000, fe000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
19297 TUF("mcr2", e000010, fe000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
19298 TUF("mrc2", e100010, fe100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
19299
19300 #undef ARM_VARIANT
19301 #define ARM_VARIANT & arm_ext_v5exp /* ARM Architecture 5TExP. */
19302 #undef THUMB_VARIANT
19303 #define THUMB_VARIANT & arm_ext_v5exp
19304
19305 TCE("smlabb", 1000080, fb100000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
19306 TCE("smlatb", 10000a0, fb100020, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
19307 TCE("smlabt", 10000c0, fb100010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
19308 TCE("smlatt", 10000e0, fb100030, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
19309
19310 TCE("smlawb", 1200080, fb300000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
19311 TCE("smlawt", 12000c0, fb300010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
19312
19313 TCE("smlalbb", 1400080, fbc00080, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
19314 TCE("smlaltb", 14000a0, fbc000a0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
19315 TCE("smlalbt", 14000c0, fbc00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
19316 TCE("smlaltt", 14000e0, fbc000b0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
19317
19318 TCE("smulbb", 1600080, fb10f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19319 TCE("smultb", 16000a0, fb10f020, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19320 TCE("smulbt", 16000c0, fb10f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19321 TCE("smultt", 16000e0, fb10f030, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19322
19323 TCE("smulwb", 12000a0, fb30f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19324 TCE("smulwt", 12000e0, fb30f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19325
19326 TCE("qadd", 1000050, fa80f080, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
19327 TCE("qdadd", 1400050, fa80f090, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
19328 TCE("qsub", 1200050, fa80f0a0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
19329 TCE("qdsub", 1600050, fa80f0b0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
19330
19331 #undef ARM_VARIANT
19332 #define ARM_VARIANT & arm_ext_v5e /* ARM Architecture 5TE. */
19333 #undef THUMB_VARIANT
19334 #define THUMB_VARIANT & arm_ext_v6t2
19335
19336 TUF("pld", 450f000, f810f000, 1, (ADDR), pld, t_pld),
19337 TC3("ldrd", 00000d0, e8500000, 3, (RRnpc_npcsp, oRRnpc_npcsp, ADDRGLDRS),
19338 ldrd, t_ldstd),
19339 TC3("strd", 00000f0, e8400000, 3, (RRnpc_npcsp, oRRnpc_npcsp,
19340 ADDRGLDRS), ldrd, t_ldstd),
19341
19342 TCE("mcrr", c400000, ec400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
19343 TCE("mrrc", c500000, ec500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
19344
19345 #undef ARM_VARIANT
19346 #define ARM_VARIANT & arm_ext_v5j /* ARM Architecture 5TEJ. */
19347
19348 TCE("bxj", 12fff20, f3c08f00, 1, (RR), bxj, t_bxj),
19349
19350 #undef ARM_VARIANT
19351 #define ARM_VARIANT & arm_ext_v6 /* ARM V6. */
19352 #undef THUMB_VARIANT
19353 #define THUMB_VARIANT & arm_ext_v6
19354
19355 TUF("cpsie", 1080000, b660, 2, (CPSF, oI31b), cpsi, t_cpsi),
19356 TUF("cpsid", 10c0000, b670, 2, (CPSF, oI31b), cpsi, t_cpsi),
19357 tCE("rev", 6bf0f30, _rev, 2, (RRnpc, RRnpc), rd_rm, t_rev),
19358 tCE("rev16", 6bf0fb0, _rev16, 2, (RRnpc, RRnpc), rd_rm, t_rev),
19359 tCE("revsh", 6ff0fb0, _revsh, 2, (RRnpc, RRnpc), rd_rm, t_rev),
19360 tCE("sxth", 6bf0070, _sxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
19361 tCE("uxth", 6ff0070, _uxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
19362 tCE("sxtb", 6af0070, _sxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
19363 tCE("uxtb", 6ef0070, _uxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
19364 TUF("setend", 1010000, b650, 1, (ENDI), setend, t_setend),
19365
19366 #undef THUMB_VARIANT
19367 #define THUMB_VARIANT & arm_ext_v6t2_v8m
19368
19369 TCE("ldrex", 1900f9f, e8500f00, 2, (RRnpc_npcsp, ADDR), ldrex, t_ldrex),
19370 TCE("strex", 1800f90, e8400000, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
19371 strex, t_strex),
19372 #undef THUMB_VARIANT
19373 #define THUMB_VARIANT & arm_ext_v6t2
19374
19375 TUF("mcrr2", c400000, fc400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
19376 TUF("mrrc2", c500000, fc500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
19377
19378 TCE("ssat", 6a00010, f3000000, 4, (RRnpc, I32, RRnpc, oSHllar),ssat, t_ssat),
19379 TCE("usat", 6e00010, f3800000, 4, (RRnpc, I31, RRnpc, oSHllar),usat, t_usat),
19380
19381 /* ARM V6 not included in V7M. */
19382 #undef THUMB_VARIANT
19383 #define THUMB_VARIANT & arm_ext_v6_notm
19384 TUF("rfeia", 8900a00, e990c000, 1, (RRw), rfe, rfe),
19385 TUF("rfe", 8900a00, e990c000, 1, (RRw), rfe, rfe),
19386 UF(rfeib, 9900a00, 1, (RRw), rfe),
19387 UF(rfeda, 8100a00, 1, (RRw), rfe),
19388 TUF("rfedb", 9100a00, e810c000, 1, (RRw), rfe, rfe),
19389 TUF("rfefd", 8900a00, e990c000, 1, (RRw), rfe, rfe),
19390 UF(rfefa, 8100a00, 1, (RRw), rfe),
19391 TUF("rfeea", 9100a00, e810c000, 1, (RRw), rfe, rfe),
19392 UF(rfeed, 9900a00, 1, (RRw), rfe),
19393 TUF("srsia", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
19394 TUF("srs", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
19395 TUF("srsea", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
19396 UF(srsib, 9c00500, 2, (oRRw, I31w), srs),
19397 UF(srsfa, 9c00500, 2, (oRRw, I31w), srs),
19398 UF(srsda, 8400500, 2, (oRRw, I31w), srs),
19399 UF(srsed, 8400500, 2, (oRRw, I31w), srs),
19400 TUF("srsdb", 9400500, e800c000, 2, (oRRw, I31w), srs, srs),
19401 TUF("srsfd", 9400500, e800c000, 2, (oRRw, I31w), srs, srs),
19402 TUF("cps", 1020000, f3af8100, 1, (I31b), imm0, t_cps),
19403
19404 /* ARM V6 not included in V7M (eg. integer SIMD). */
19405 #undef THUMB_VARIANT
19406 #define THUMB_VARIANT & arm_ext_v6_dsp
19407 TCE("pkhbt", 6800010, eac00000, 4, (RRnpc, RRnpc, RRnpc, oSHll), pkhbt, t_pkhbt),
19408 TCE("pkhtb", 6800050, eac00020, 4, (RRnpc, RRnpc, RRnpc, oSHar), pkhtb, t_pkhtb),
19409 TCE("qadd16", 6200f10, fa90f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19410 TCE("qadd8", 6200f90, fa80f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19411 TCE("qasx", 6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19412 /* Old name for QASX. */
19413 TCE("qaddsubx",6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19414 TCE("qsax", 6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19415 /* Old name for QSAX. */
19416 TCE("qsubaddx",6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19417 TCE("qsub16", 6200f70, fad0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19418 TCE("qsub8", 6200ff0, fac0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19419 TCE("sadd16", 6100f10, fa90f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19420 TCE("sadd8", 6100f90, fa80f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19421 TCE("sasx", 6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19422 /* Old name for SASX. */
19423 TCE("saddsubx",6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19424 TCE("shadd16", 6300f10, fa90f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19425 TCE("shadd8", 6300f90, fa80f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19426 TCE("shasx", 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19427 /* Old name for SHASX. */
19428 TCE("shaddsubx", 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19429 TCE("shsax", 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19430 /* Old name for SHSAX. */
19431 TCE("shsubaddx", 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19432 TCE("shsub16", 6300f70, fad0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19433 TCE("shsub8", 6300ff0, fac0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19434 TCE("ssax", 6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19435 /* Old name for SSAX. */
19436 TCE("ssubaddx",6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19437 TCE("ssub16", 6100f70, fad0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19438 TCE("ssub8", 6100ff0, fac0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19439 TCE("uadd16", 6500f10, fa90f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19440 TCE("uadd8", 6500f90, fa80f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19441 TCE("uasx", 6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19442 /* Old name for UASX. */
19443 TCE("uaddsubx",6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19444 TCE("uhadd16", 6700f10, fa90f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19445 TCE("uhadd8", 6700f90, fa80f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19446 TCE("uhasx", 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19447 /* Old name for UHASX. */
19448 TCE("uhaddsubx", 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19449 TCE("uhsax", 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19450 /* Old name for UHSAX. */
19451 TCE("uhsubaddx", 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19452 TCE("uhsub16", 6700f70, fad0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19453 TCE("uhsub8", 6700ff0, fac0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19454 TCE("uqadd16", 6600f10, fa90f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19455 TCE("uqadd8", 6600f90, fa80f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19456 TCE("uqasx", 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19457 /* Old name for UQASX. */
19458 TCE("uqaddsubx", 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19459 TCE("uqsax", 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19460 /* Old name for UQSAX. */
19461 TCE("uqsubaddx", 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19462 TCE("uqsub16", 6600f70, fad0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19463 TCE("uqsub8", 6600ff0, fac0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19464 TCE("usub16", 6500f70, fad0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19465 TCE("usax", 6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19466 /* Old name for USAX. */
19467 TCE("usubaddx",6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19468 TCE("usub8", 6500ff0, fac0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19469 TCE("sxtah", 6b00070, fa00f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19470 TCE("sxtab16", 6800070, fa20f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19471 TCE("sxtab", 6a00070, fa40f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19472 TCE("sxtb16", 68f0070, fa2ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
19473 TCE("uxtah", 6f00070, fa10f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19474 TCE("uxtab16", 6c00070, fa30f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19475 TCE("uxtab", 6e00070, fa50f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19476 TCE("uxtb16", 6cf0070, fa3ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
19477 TCE("sel", 6800fb0, faa0f080, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19478 TCE("smlad", 7000010, fb200000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19479 TCE("smladx", 7000030, fb200010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19480 TCE("smlald", 7400010, fbc000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
19481 TCE("smlaldx", 7400030, fbc000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
19482 TCE("smlsd", 7000050, fb400000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19483 TCE("smlsdx", 7000070, fb400010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19484 TCE("smlsld", 7400050, fbd000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
19485 TCE("smlsldx", 7400070, fbd000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
19486 TCE("smmla", 7500010, fb500000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19487 TCE("smmlar", 7500030, fb500010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19488 TCE("smmls", 75000d0, fb600000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19489 TCE("smmlsr", 75000f0, fb600010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19490 TCE("smmul", 750f010, fb50f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19491 TCE("smmulr", 750f030, fb50f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19492 TCE("smuad", 700f010, fb20f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19493 TCE("smuadx", 700f030, fb20f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19494 TCE("smusd", 700f050, fb40f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19495 TCE("smusdx", 700f070, fb40f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19496 TCE("ssat16", 6a00f30, f3200000, 3, (RRnpc, I16, RRnpc), ssat16, t_ssat16),
19497 TCE("umaal", 0400090, fbe00060, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal, t_mlal),
19498 TCE("usad8", 780f010, fb70f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19499 TCE("usada8", 7800010, fb700000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19500 TCE("usat16", 6e00f30, f3a00000, 3, (RRnpc, I15, RRnpc), usat16, t_usat16),
19501
19502 #undef ARM_VARIANT
19503 #define ARM_VARIANT & arm_ext_v6k
19504 #undef THUMB_VARIANT
19505 #define THUMB_VARIANT & arm_ext_v6k
19506
19507 tCE("yield", 320f001, _yield, 0, (), noargs, t_hint),
19508 tCE("wfe", 320f002, _wfe, 0, (), noargs, t_hint),
19509 tCE("wfi", 320f003, _wfi, 0, (), noargs, t_hint),
19510 tCE("sev", 320f004, _sev, 0, (), noargs, t_hint),
19511
19512 #undef THUMB_VARIANT
19513 #define THUMB_VARIANT & arm_ext_v6_notm
19514 TCE("ldrexd", 1b00f9f, e8d0007f, 3, (RRnpc_npcsp, oRRnpc_npcsp, RRnpcb),
19515 ldrexd, t_ldrexd),
19516 TCE("strexd", 1a00f90, e8c00070, 4, (RRnpc_npcsp, RRnpc_npcsp, oRRnpc_npcsp,
19517 RRnpcb), strexd, t_strexd),
19518
19519 #undef THUMB_VARIANT
19520 #define THUMB_VARIANT & arm_ext_v6t2_v8m
19521 TCE("ldrexb", 1d00f9f, e8d00f4f, 2, (RRnpc_npcsp,RRnpcb),
19522 rd_rn, rd_rn),
19523 TCE("ldrexh", 1f00f9f, e8d00f5f, 2, (RRnpc_npcsp, RRnpcb),
19524 rd_rn, rd_rn),
19525 TCE("strexb", 1c00f90, e8c00f40, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
19526 strex, t_strexbh),
19527 TCE("strexh", 1e00f90, e8c00f50, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
19528 strex, t_strexbh),
19529 TUF("clrex", 57ff01f, f3bf8f2f, 0, (), noargs, noargs),
19530
19531 #undef ARM_VARIANT
19532 #define ARM_VARIANT & arm_ext_sec
19533 #undef THUMB_VARIANT
19534 #define THUMB_VARIANT & arm_ext_sec
19535
19536 TCE("smc", 1600070, f7f08000, 1, (EXPi), smc, t_smc),
19537
19538 #undef ARM_VARIANT
19539 #define ARM_VARIANT & arm_ext_virt
19540 #undef THUMB_VARIANT
19541 #define THUMB_VARIANT & arm_ext_virt
19542
19543 TCE("hvc", 1400070, f7e08000, 1, (EXPi), hvc, t_hvc),
19544 TCE("eret", 160006e, f3de8f00, 0, (), noargs, noargs),
19545
19546 #undef ARM_VARIANT
19547 #define ARM_VARIANT & arm_ext_pan
19548 #undef THUMB_VARIANT
19549 #define THUMB_VARIANT & arm_ext_pan
19550
19551 TUF("setpan", 1100000, b610, 1, (I7), setpan, t_setpan),
19552
19553 #undef ARM_VARIANT
19554 #define ARM_VARIANT & arm_ext_v6t2
19555 #undef THUMB_VARIANT
19556 #define THUMB_VARIANT & arm_ext_v6t2
19557
19558 TCE("bfc", 7c0001f, f36f0000, 3, (RRnpc, I31, I32), bfc, t_bfc),
19559 TCE("bfi", 7c00010, f3600000, 4, (RRnpc, RRnpc_I0, I31, I32), bfi, t_bfi),
19560 TCE("sbfx", 7a00050, f3400000, 4, (RR, RR, I31, I32), bfx, t_bfx),
19561 TCE("ubfx", 7e00050, f3c00000, 4, (RR, RR, I31, I32), bfx, t_bfx),
19562
19563 TCE("mls", 0600090, fb000010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
19564 TCE("rbit", 6ff0f30, fa90f0a0, 2, (RR, RR), rd_rm, t_rbit),
19565
19566 TC3("ldrht", 03000b0, f8300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
19567 TC3("ldrsht", 03000f0, f9300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
19568 TC3("ldrsbt", 03000d0, f9100e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
19569 TC3("strht", 02000b0, f8200e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
19570
19571 #undef THUMB_VARIANT
19572 #define THUMB_VARIANT & arm_ext_v6t2_v8m
19573 TCE("movw", 3000000, f2400000, 2, (RRnpc, HALF), mov16, t_mov16),
19574 TCE("movt", 3400000, f2c00000, 2, (RRnpc, HALF), mov16, t_mov16),
19575
19576 /* Thumb-only instructions. */
19577 #undef ARM_VARIANT
19578 #define ARM_VARIANT NULL
19579 TUE("cbnz", 0, b900, 2, (RR, EXP), 0, t_cbz),
19580 TUE("cbz", 0, b100, 2, (RR, EXP), 0, t_cbz),
19581
19582 /* ARM does not really have an IT instruction, so always allow it.
19583 The opcode is copied from Thumb in order to allow warnings in
19584 -mimplicit-it=[never | arm] modes. */
19585 #undef ARM_VARIANT
19586 #define ARM_VARIANT & arm_ext_v1
19587 #undef THUMB_VARIANT
19588 #define THUMB_VARIANT & arm_ext_v6t2
19589
19590 TUE("it", bf08, bf08, 1, (COND), it, t_it),
19591 TUE("itt", bf0c, bf0c, 1, (COND), it, t_it),
19592 TUE("ite", bf04, bf04, 1, (COND), it, t_it),
19593 TUE("ittt", bf0e, bf0e, 1, (COND), it, t_it),
19594 TUE("itet", bf06, bf06, 1, (COND), it, t_it),
19595 TUE("itte", bf0a, bf0a, 1, (COND), it, t_it),
19596 TUE("itee", bf02, bf02, 1, (COND), it, t_it),
19597 TUE("itttt", bf0f, bf0f, 1, (COND), it, t_it),
19598 TUE("itett", bf07, bf07, 1, (COND), it, t_it),
19599 TUE("ittet", bf0b, bf0b, 1, (COND), it, t_it),
19600 TUE("iteet", bf03, bf03, 1, (COND), it, t_it),
19601 TUE("ittte", bf0d, bf0d, 1, (COND), it, t_it),
19602 TUE("itete", bf05, bf05, 1, (COND), it, t_it),
19603 TUE("ittee", bf09, bf09, 1, (COND), it, t_it),
19604 TUE("iteee", bf01, bf01, 1, (COND), it, t_it),
19605 /* ARM/Thumb-2 instructions with no Thumb-1 equivalent. */
19606 TC3("rrx", 01a00060, ea4f0030, 2, (RR, RR), rd_rm, t_rrx),
19607 TC3("rrxs", 01b00060, ea5f0030, 2, (RR, RR), rd_rm, t_rrx),
19608
19609 /* Thumb2 only instructions. */
19610 #undef ARM_VARIANT
19611 #define ARM_VARIANT NULL
19612
19613 TCE("addw", 0, f2000000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
19614 TCE("subw", 0, f2a00000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
19615 TCE("orn", 0, ea600000, 3, (RR, oRR, SH), 0, t_orn),
19616 TCE("orns", 0, ea700000, 3, (RR, oRR, SH), 0, t_orn),
19617 TCE("tbb", 0, e8d0f000, 1, (TB), 0, t_tb),
19618 TCE("tbh", 0, e8d0f010, 1, (TB), 0, t_tb),
19619
19620 /* Hardware division instructions. */
19621 #undef ARM_VARIANT
19622 #define ARM_VARIANT & arm_ext_adiv
19623 #undef THUMB_VARIANT
19624 #define THUMB_VARIANT & arm_ext_div
19625
19626 TCE("sdiv", 710f010, fb90f0f0, 3, (RR, oRR, RR), div, t_div),
19627 TCE("udiv", 730f010, fbb0f0f0, 3, (RR, oRR, RR), div, t_div),
19628
19629 /* ARM V6M/V7 instructions. */
19630 #undef ARM_VARIANT
19631 #define ARM_VARIANT & arm_ext_barrier
19632 #undef THUMB_VARIANT
19633 #define THUMB_VARIANT & arm_ext_barrier
19634
19635 TUF("dmb", 57ff050, f3bf8f50, 1, (oBARRIER_I15), barrier, barrier),
19636 TUF("dsb", 57ff040, f3bf8f40, 1, (oBARRIER_I15), barrier, barrier),
19637 TUF("isb", 57ff060, f3bf8f60, 1, (oBARRIER_I15), barrier, barrier),
19638
19639 /* ARM V7 instructions. */
19640 #undef ARM_VARIANT
19641 #define ARM_VARIANT & arm_ext_v7
19642 #undef THUMB_VARIANT
19643 #define THUMB_VARIANT & arm_ext_v7
19644
19645 TUF("pli", 450f000, f910f000, 1, (ADDR), pli, t_pld),
19646 TCE("dbg", 320f0f0, f3af80f0, 1, (I15), dbg, t_dbg),
19647
19648 #undef ARM_VARIANT
19649 #define ARM_VARIANT & arm_ext_mp
19650 #undef THUMB_VARIANT
19651 #define THUMB_VARIANT & arm_ext_mp
19652
19653 TUF("pldw", 410f000, f830f000, 1, (ADDR), pld, t_pld),
19654
19655 /* AArchv8 instructions. */
19656 #undef ARM_VARIANT
19657 #define ARM_VARIANT & arm_ext_v8
19658
19659 /* Instructions shared between armv8-a and armv8-m. */
19660 #undef THUMB_VARIANT
19661 #define THUMB_VARIANT & arm_ext_atomics
19662
19663 TCE("lda", 1900c9f, e8d00faf, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
19664 TCE("ldab", 1d00c9f, e8d00f8f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
19665 TCE("ldah", 1f00c9f, e8d00f9f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
19666 TCE("stl", 180fc90, e8c00faf, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
19667 TCE("stlb", 1c0fc90, e8c00f8f, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
19668 TCE("stlh", 1e0fc90, e8c00f9f, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
19669 TCE("ldaex", 1900e9f, e8d00fef, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
19670 TCE("ldaexb", 1d00e9f, e8d00fcf, 2, (RRnpc,RRnpcb), rd_rn, rd_rn),
19671 TCE("ldaexh", 1f00e9f, e8d00fdf, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
19672 TCE("stlex", 1800e90, e8c00fe0, 3, (RRnpc, RRnpc, RRnpcb),
19673 stlex, t_stlex),
19674 TCE("stlexb", 1c00e90, e8c00fc0, 3, (RRnpc, RRnpc, RRnpcb),
19675 stlex, t_stlex),
19676 TCE("stlexh", 1e00e90, e8c00fd0, 3, (RRnpc, RRnpc, RRnpcb),
19677 stlex, t_stlex),
19678 #undef THUMB_VARIANT
19679 #define THUMB_VARIANT & arm_ext_v8
19680
19681 tCE("sevl", 320f005, _sevl, 0, (), noargs, t_hint),
19682 TUE("hlt", 1000070, ba80, 1, (oIffffb), bkpt, t_hlt),
19683 TCE("ldaexd", 1b00e9f, e8d000ff, 3, (RRnpc, oRRnpc, RRnpcb),
19684 ldrexd, t_ldrexd),
19685 TCE("stlexd", 1a00e90, e8c000f0, 4, (RRnpc, RRnpc, oRRnpc, RRnpcb),
19686 strexd, t_strexd),
19687 /* ARMv8 T32 only. */
19688 #undef ARM_VARIANT
19689 #define ARM_VARIANT NULL
19690 TUF("dcps1", 0, f78f8001, 0, (), noargs, noargs),
19691 TUF("dcps2", 0, f78f8002, 0, (), noargs, noargs),
19692 TUF("dcps3", 0, f78f8003, 0, (), noargs, noargs),
19693
19694 /* FP for ARMv8. */
19695 #undef ARM_VARIANT
19696 #define ARM_VARIANT & fpu_vfp_ext_armv8xd
19697 #undef THUMB_VARIANT
19698 #define THUMB_VARIANT & fpu_vfp_ext_armv8xd
19699
19700 nUF(vseleq, _vseleq, 3, (RVSD, RVSD, RVSD), vsel),
19701 nUF(vselvs, _vselvs, 3, (RVSD, RVSD, RVSD), vsel),
19702 nUF(vselge, _vselge, 3, (RVSD, RVSD, RVSD), vsel),
19703 nUF(vselgt, _vselgt, 3, (RVSD, RVSD, RVSD), vsel),
19704 nUF(vmaxnm, _vmaxnm, 3, (RNSDQ, oRNSDQ, RNSDQ), vmaxnm),
19705 nUF(vminnm, _vminnm, 3, (RNSDQ, oRNSDQ, RNSDQ), vmaxnm),
19706 nUF(vcvta, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvta),
19707 nUF(vcvtn, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvtn),
19708 nUF(vcvtp, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvtp),
19709 nUF(vcvtm, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvtm),
19710 nCE(vrintr, _vrintr, 2, (RNSDQ, oRNSDQ), vrintr),
19711 nCE(vrintz, _vrintr, 2, (RNSDQ, oRNSDQ), vrintz),
19712 nCE(vrintx, _vrintr, 2, (RNSDQ, oRNSDQ), vrintx),
19713 nUF(vrinta, _vrinta, 2, (RNSDQ, oRNSDQ), vrinta),
19714 nUF(vrintn, _vrinta, 2, (RNSDQ, oRNSDQ), vrintn),
19715 nUF(vrintp, _vrinta, 2, (RNSDQ, oRNSDQ), vrintp),
19716 nUF(vrintm, _vrinta, 2, (RNSDQ, oRNSDQ), vrintm),
19717
19718 /* Crypto v1 extensions. */
19719 #undef ARM_VARIANT
19720 #define ARM_VARIANT & fpu_crypto_ext_armv8
19721 #undef THUMB_VARIANT
19722 #define THUMB_VARIANT & fpu_crypto_ext_armv8
19723
19724 nUF(aese, _aes, 2, (RNQ, RNQ), aese),
19725 nUF(aesd, _aes, 2, (RNQ, RNQ), aesd),
19726 nUF(aesmc, _aes, 2, (RNQ, RNQ), aesmc),
19727 nUF(aesimc, _aes, 2, (RNQ, RNQ), aesimc),
19728 nUF(sha1c, _sha3op, 3, (RNQ, RNQ, RNQ), sha1c),
19729 nUF(sha1p, _sha3op, 3, (RNQ, RNQ, RNQ), sha1p),
19730 nUF(sha1m, _sha3op, 3, (RNQ, RNQ, RNQ), sha1m),
19731 nUF(sha1su0, _sha3op, 3, (RNQ, RNQ, RNQ), sha1su0),
19732 nUF(sha256h, _sha3op, 3, (RNQ, RNQ, RNQ), sha256h),
19733 nUF(sha256h2, _sha3op, 3, (RNQ, RNQ, RNQ), sha256h2),
19734 nUF(sha256su1, _sha3op, 3, (RNQ, RNQ, RNQ), sha256su1),
19735 nUF(sha1h, _sha1h, 2, (RNQ, RNQ), sha1h),
19736 nUF(sha1su1, _sha2op, 2, (RNQ, RNQ), sha1su1),
19737 nUF(sha256su0, _sha2op, 2, (RNQ, RNQ), sha256su0),
19738
19739 #undef ARM_VARIANT
19740 #define ARM_VARIANT & crc_ext_armv8
19741 #undef THUMB_VARIANT
19742 #define THUMB_VARIANT & crc_ext_armv8
19743 TUEc("crc32b", 1000040, fac0f080, 3, (RR, oRR, RR), crc32b),
19744 TUEc("crc32h", 1200040, fac0f090, 3, (RR, oRR, RR), crc32h),
19745 TUEc("crc32w", 1400040, fac0f0a0, 3, (RR, oRR, RR), crc32w),
19746 TUEc("crc32cb",1000240, fad0f080, 3, (RR, oRR, RR), crc32cb),
19747 TUEc("crc32ch",1200240, fad0f090, 3, (RR, oRR, RR), crc32ch),
19748 TUEc("crc32cw",1400240, fad0f0a0, 3, (RR, oRR, RR), crc32cw),
19749
19750 /* ARMv8.2 RAS extension. */
19751 #undef ARM_VARIANT
19752 #define ARM_VARIANT & arm_ext_ras
19753 #undef THUMB_VARIANT
19754 #define THUMB_VARIANT & arm_ext_ras
19755 TUE ("esb", 320f010, f3af8010, 0, (), noargs, noargs),
19756
19757 #undef ARM_VARIANT
19758 #define ARM_VARIANT & fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
19759 #undef THUMB_VARIANT
19760 #define THUMB_VARIANT NULL
19761
19762 cCE("wfs", e200110, 1, (RR), rd),
19763 cCE("rfs", e300110, 1, (RR), rd),
19764 cCE("wfc", e400110, 1, (RR), rd),
19765 cCE("rfc", e500110, 1, (RR), rd),
19766
19767 cCL("ldfs", c100100, 2, (RF, ADDRGLDC), rd_cpaddr),
19768 cCL("ldfd", c108100, 2, (RF, ADDRGLDC), rd_cpaddr),
19769 cCL("ldfe", c500100, 2, (RF, ADDRGLDC), rd_cpaddr),
19770 cCL("ldfp", c508100, 2, (RF, ADDRGLDC), rd_cpaddr),
19771
19772 cCL("stfs", c000100, 2, (RF, ADDRGLDC), rd_cpaddr),
19773 cCL("stfd", c008100, 2, (RF, ADDRGLDC), rd_cpaddr),
19774 cCL("stfe", c400100, 2, (RF, ADDRGLDC), rd_cpaddr),
19775 cCL("stfp", c408100, 2, (RF, ADDRGLDC), rd_cpaddr),
19776
19777 cCL("mvfs", e008100, 2, (RF, RF_IF), rd_rm),
19778 cCL("mvfsp", e008120, 2, (RF, RF_IF), rd_rm),
19779 cCL("mvfsm", e008140, 2, (RF, RF_IF), rd_rm),
19780 cCL("mvfsz", e008160, 2, (RF, RF_IF), rd_rm),
19781 cCL("mvfd", e008180, 2, (RF, RF_IF), rd_rm),
19782 cCL("mvfdp", e0081a0, 2, (RF, RF_IF), rd_rm),
19783 cCL("mvfdm", e0081c0, 2, (RF, RF_IF), rd_rm),
19784 cCL("mvfdz", e0081e0, 2, (RF, RF_IF), rd_rm),
19785 cCL("mvfe", e088100, 2, (RF, RF_IF), rd_rm),
19786 cCL("mvfep", e088120, 2, (RF, RF_IF), rd_rm),
19787 cCL("mvfem", e088140, 2, (RF, RF_IF), rd_rm),
19788 cCL("mvfez", e088160, 2, (RF, RF_IF), rd_rm),
19789
19790 cCL("mnfs", e108100, 2, (RF, RF_IF), rd_rm),
19791 cCL("mnfsp", e108120, 2, (RF, RF_IF), rd_rm),
19792 cCL("mnfsm", e108140, 2, (RF, RF_IF), rd_rm),
19793 cCL("mnfsz", e108160, 2, (RF, RF_IF), rd_rm),
19794 cCL("mnfd", e108180, 2, (RF, RF_IF), rd_rm),
19795 cCL("mnfdp", e1081a0, 2, (RF, RF_IF), rd_rm),
19796 cCL("mnfdm", e1081c0, 2, (RF, RF_IF), rd_rm),
19797 cCL("mnfdz", e1081e0, 2, (RF, RF_IF), rd_rm),
19798 cCL("mnfe", e188100, 2, (RF, RF_IF), rd_rm),
19799 cCL("mnfep", e188120, 2, (RF, RF_IF), rd_rm),
19800 cCL("mnfem", e188140, 2, (RF, RF_IF), rd_rm),
19801 cCL("mnfez", e188160, 2, (RF, RF_IF), rd_rm),
19802
19803 cCL("abss", e208100, 2, (RF, RF_IF), rd_rm),
19804 cCL("abssp", e208120, 2, (RF, RF_IF), rd_rm),
19805 cCL("abssm", e208140, 2, (RF, RF_IF), rd_rm),
19806 cCL("abssz", e208160, 2, (RF, RF_IF), rd_rm),
19807 cCL("absd", e208180, 2, (RF, RF_IF), rd_rm),
19808 cCL("absdp", e2081a0, 2, (RF, RF_IF), rd_rm),
19809 cCL("absdm", e2081c0, 2, (RF, RF_IF), rd_rm),
19810 cCL("absdz", e2081e0, 2, (RF, RF_IF), rd_rm),
19811 cCL("abse", e288100, 2, (RF, RF_IF), rd_rm),
19812 cCL("absep", e288120, 2, (RF, RF_IF), rd_rm),
19813 cCL("absem", e288140, 2, (RF, RF_IF), rd_rm),
19814 cCL("absez", e288160, 2, (RF, RF_IF), rd_rm),
19815
19816 cCL("rnds", e308100, 2, (RF, RF_IF), rd_rm),
19817 cCL("rndsp", e308120, 2, (RF, RF_IF), rd_rm),
19818 cCL("rndsm", e308140, 2, (RF, RF_IF), rd_rm),
19819 cCL("rndsz", e308160, 2, (RF, RF_IF), rd_rm),
19820 cCL("rndd", e308180, 2, (RF, RF_IF), rd_rm),
19821 cCL("rnddp", e3081a0, 2, (RF, RF_IF), rd_rm),
19822 cCL("rnddm", e3081c0, 2, (RF, RF_IF), rd_rm),
19823 cCL("rnddz", e3081e0, 2, (RF, RF_IF), rd_rm),
19824 cCL("rnde", e388100, 2, (RF, RF_IF), rd_rm),
19825 cCL("rndep", e388120, 2, (RF, RF_IF), rd_rm),
19826 cCL("rndem", e388140, 2, (RF, RF_IF), rd_rm),
19827 cCL("rndez", e388160, 2, (RF, RF_IF), rd_rm),
19828
19829 cCL("sqts", e408100, 2, (RF, RF_IF), rd_rm),
19830 cCL("sqtsp", e408120, 2, (RF, RF_IF), rd_rm),
19831 cCL("sqtsm", e408140, 2, (RF, RF_IF), rd_rm),
19832 cCL("sqtsz", e408160, 2, (RF, RF_IF), rd_rm),
19833 cCL("sqtd", e408180, 2, (RF, RF_IF), rd_rm),
19834 cCL("sqtdp", e4081a0, 2, (RF, RF_IF), rd_rm),
19835 cCL("sqtdm", e4081c0, 2, (RF, RF_IF), rd_rm),
19836 cCL("sqtdz", e4081e0, 2, (RF, RF_IF), rd_rm),
19837 cCL("sqte", e488100, 2, (RF, RF_IF), rd_rm),
19838 cCL("sqtep", e488120, 2, (RF, RF_IF), rd_rm),
19839 cCL("sqtem", e488140, 2, (RF, RF_IF), rd_rm),
19840 cCL("sqtez", e488160, 2, (RF, RF_IF), rd_rm),
19841
19842 cCL("logs", e508100, 2, (RF, RF_IF), rd_rm),
19843 cCL("logsp", e508120, 2, (RF, RF_IF), rd_rm),
19844 cCL("logsm", e508140, 2, (RF, RF_IF), rd_rm),
19845 cCL("logsz", e508160, 2, (RF, RF_IF), rd_rm),
19846 cCL("logd", e508180, 2, (RF, RF_IF), rd_rm),
19847 cCL("logdp", e5081a0, 2, (RF, RF_IF), rd_rm),
19848 cCL("logdm", e5081c0, 2, (RF, RF_IF), rd_rm),
19849 cCL("logdz", e5081e0, 2, (RF, RF_IF), rd_rm),
19850 cCL("loge", e588100, 2, (RF, RF_IF), rd_rm),
19851 cCL("logep", e588120, 2, (RF, RF_IF), rd_rm),
19852 cCL("logem", e588140, 2, (RF, RF_IF), rd_rm),
19853 cCL("logez", e588160, 2, (RF, RF_IF), rd_rm),
19854
19855 cCL("lgns", e608100, 2, (RF, RF_IF), rd_rm),
19856 cCL("lgnsp", e608120, 2, (RF, RF_IF), rd_rm),
19857 cCL("lgnsm", e608140, 2, (RF, RF_IF), rd_rm),
19858 cCL("lgnsz", e608160, 2, (RF, RF_IF), rd_rm),
19859 cCL("lgnd", e608180, 2, (RF, RF_IF), rd_rm),
19860 cCL("lgndp", e6081a0, 2, (RF, RF_IF), rd_rm),
19861 cCL("lgndm", e6081c0, 2, (RF, RF_IF), rd_rm),
19862 cCL("lgndz", e6081e0, 2, (RF, RF_IF), rd_rm),
19863 cCL("lgne", e688100, 2, (RF, RF_IF), rd_rm),
19864 cCL("lgnep", e688120, 2, (RF, RF_IF), rd_rm),
19865 cCL("lgnem", e688140, 2, (RF, RF_IF), rd_rm),
19866 cCL("lgnez", e688160, 2, (RF, RF_IF), rd_rm),
19867
19868 cCL("exps", e708100, 2, (RF, RF_IF), rd_rm),
19869 cCL("expsp", e708120, 2, (RF, RF_IF), rd_rm),
19870 cCL("expsm", e708140, 2, (RF, RF_IF), rd_rm),
19871 cCL("expsz", e708160, 2, (RF, RF_IF), rd_rm),
19872 cCL("expd", e708180, 2, (RF, RF_IF), rd_rm),
19873 cCL("expdp", e7081a0, 2, (RF, RF_IF), rd_rm),
19874 cCL("expdm", e7081c0, 2, (RF, RF_IF), rd_rm),
19875 cCL("expdz", e7081e0, 2, (RF, RF_IF), rd_rm),
19876 cCL("expe", e788100, 2, (RF, RF_IF), rd_rm),
19877 cCL("expep", e788120, 2, (RF, RF_IF), rd_rm),
19878 cCL("expem", e788140, 2, (RF, RF_IF), rd_rm),
19879 cCL("expdz", e788160, 2, (RF, RF_IF), rd_rm),
19880
19881 cCL("sins", e808100, 2, (RF, RF_IF), rd_rm),
19882 cCL("sinsp", e808120, 2, (RF, RF_IF), rd_rm),
19883 cCL("sinsm", e808140, 2, (RF, RF_IF), rd_rm),
19884 cCL("sinsz", e808160, 2, (RF, RF_IF), rd_rm),
19885 cCL("sind", e808180, 2, (RF, RF_IF), rd_rm),
19886 cCL("sindp", e8081a0, 2, (RF, RF_IF), rd_rm),
19887 cCL("sindm", e8081c0, 2, (RF, RF_IF), rd_rm),
19888 cCL("sindz", e8081e0, 2, (RF, RF_IF), rd_rm),
19889 cCL("sine", e888100, 2, (RF, RF_IF), rd_rm),
19890 cCL("sinep", e888120, 2, (RF, RF_IF), rd_rm),
19891 cCL("sinem", e888140, 2, (RF, RF_IF), rd_rm),
19892 cCL("sinez", e888160, 2, (RF, RF_IF), rd_rm),
19893
19894 cCL("coss", e908100, 2, (RF, RF_IF), rd_rm),
19895 cCL("cossp", e908120, 2, (RF, RF_IF), rd_rm),
19896 cCL("cossm", e908140, 2, (RF, RF_IF), rd_rm),
19897 cCL("cossz", e908160, 2, (RF, RF_IF), rd_rm),
19898 cCL("cosd", e908180, 2, (RF, RF_IF), rd_rm),
19899 cCL("cosdp", e9081a0, 2, (RF, RF_IF), rd_rm),
19900 cCL("cosdm", e9081c0, 2, (RF, RF_IF), rd_rm),
19901 cCL("cosdz", e9081e0, 2, (RF, RF_IF), rd_rm),
19902 cCL("cose", e988100, 2, (RF, RF_IF), rd_rm),
19903 cCL("cosep", e988120, 2, (RF, RF_IF), rd_rm),
19904 cCL("cosem", e988140, 2, (RF, RF_IF), rd_rm),
19905 cCL("cosez", e988160, 2, (RF, RF_IF), rd_rm),
19906
19907 cCL("tans", ea08100, 2, (RF, RF_IF), rd_rm),
19908 cCL("tansp", ea08120, 2, (RF, RF_IF), rd_rm),
19909 cCL("tansm", ea08140, 2, (RF, RF_IF), rd_rm),
19910 cCL("tansz", ea08160, 2, (RF, RF_IF), rd_rm),
19911 cCL("tand", ea08180, 2, (RF, RF_IF), rd_rm),
19912 cCL("tandp", ea081a0, 2, (RF, RF_IF), rd_rm),
19913 cCL("tandm", ea081c0, 2, (RF, RF_IF), rd_rm),
19914 cCL("tandz", ea081e0, 2, (RF, RF_IF), rd_rm),
19915 cCL("tane", ea88100, 2, (RF, RF_IF), rd_rm),
19916 cCL("tanep", ea88120, 2, (RF, RF_IF), rd_rm),
19917 cCL("tanem", ea88140, 2, (RF, RF_IF), rd_rm),
19918 cCL("tanez", ea88160, 2, (RF, RF_IF), rd_rm),
19919
19920 cCL("asns", eb08100, 2, (RF, RF_IF), rd_rm),
19921 cCL("asnsp", eb08120, 2, (RF, RF_IF), rd_rm),
19922 cCL("asnsm", eb08140, 2, (RF, RF_IF), rd_rm),
19923 cCL("asnsz", eb08160, 2, (RF, RF_IF), rd_rm),
19924 cCL("asnd", eb08180, 2, (RF, RF_IF), rd_rm),
19925 cCL("asndp", eb081a0, 2, (RF, RF_IF), rd_rm),
19926 cCL("asndm", eb081c0, 2, (RF, RF_IF), rd_rm),
19927 cCL("asndz", eb081e0, 2, (RF, RF_IF), rd_rm),
19928 cCL("asne", eb88100, 2, (RF, RF_IF), rd_rm),
19929 cCL("asnep", eb88120, 2, (RF, RF_IF), rd_rm),
19930 cCL("asnem", eb88140, 2, (RF, RF_IF), rd_rm),
19931 cCL("asnez", eb88160, 2, (RF, RF_IF), rd_rm),
19932
19933 cCL("acss", ec08100, 2, (RF, RF_IF), rd_rm),
19934 cCL("acssp", ec08120, 2, (RF, RF_IF), rd_rm),
19935 cCL("acssm", ec08140, 2, (RF, RF_IF), rd_rm),
19936 cCL("acssz", ec08160, 2, (RF, RF_IF), rd_rm),
19937 cCL("acsd", ec08180, 2, (RF, RF_IF), rd_rm),
19938 cCL("acsdp", ec081a0, 2, (RF, RF_IF), rd_rm),
19939 cCL("acsdm", ec081c0, 2, (RF, RF_IF), rd_rm),
19940 cCL("acsdz", ec081e0, 2, (RF, RF_IF), rd_rm),
19941 cCL("acse", ec88100, 2, (RF, RF_IF), rd_rm),
19942 cCL("acsep", ec88120, 2, (RF, RF_IF), rd_rm),
19943 cCL("acsem", ec88140, 2, (RF, RF_IF), rd_rm),
19944 cCL("acsez", ec88160, 2, (RF, RF_IF), rd_rm),
19945
19946 cCL("atns", ed08100, 2, (RF, RF_IF), rd_rm),
19947 cCL("atnsp", ed08120, 2, (RF, RF_IF), rd_rm),
19948 cCL("atnsm", ed08140, 2, (RF, RF_IF), rd_rm),
19949 cCL("atnsz", ed08160, 2, (RF, RF_IF), rd_rm),
19950 cCL("atnd", ed08180, 2, (RF, RF_IF), rd_rm),
19951 cCL("atndp", ed081a0, 2, (RF, RF_IF), rd_rm),
19952 cCL("atndm", ed081c0, 2, (RF, RF_IF), rd_rm),
19953 cCL("atndz", ed081e0, 2, (RF, RF_IF), rd_rm),
19954 cCL("atne", ed88100, 2, (RF, RF_IF), rd_rm),
19955 cCL("atnep", ed88120, 2, (RF, RF_IF), rd_rm),
19956 cCL("atnem", ed88140, 2, (RF, RF_IF), rd_rm),
19957 cCL("atnez", ed88160, 2, (RF, RF_IF), rd_rm),
19958
19959 cCL("urds", ee08100, 2, (RF, RF_IF), rd_rm),
19960 cCL("urdsp", ee08120, 2, (RF, RF_IF), rd_rm),
19961 cCL("urdsm", ee08140, 2, (RF, RF_IF), rd_rm),
19962 cCL("urdsz", ee08160, 2, (RF, RF_IF), rd_rm),
19963 cCL("urdd", ee08180, 2, (RF, RF_IF), rd_rm),
19964 cCL("urddp", ee081a0, 2, (RF, RF_IF), rd_rm),
19965 cCL("urddm", ee081c0, 2, (RF, RF_IF), rd_rm),
19966 cCL("urddz", ee081e0, 2, (RF, RF_IF), rd_rm),
19967 cCL("urde", ee88100, 2, (RF, RF_IF), rd_rm),
19968 cCL("urdep", ee88120, 2, (RF, RF_IF), rd_rm),
19969 cCL("urdem", ee88140, 2, (RF, RF_IF), rd_rm),
19970 cCL("urdez", ee88160, 2, (RF, RF_IF), rd_rm),
19971
19972 cCL("nrms", ef08100, 2, (RF, RF_IF), rd_rm),
19973 cCL("nrmsp", ef08120, 2, (RF, RF_IF), rd_rm),
19974 cCL("nrmsm", ef08140, 2, (RF, RF_IF), rd_rm),
19975 cCL("nrmsz", ef08160, 2, (RF, RF_IF), rd_rm),
19976 cCL("nrmd", ef08180, 2, (RF, RF_IF), rd_rm),
19977 cCL("nrmdp", ef081a0, 2, (RF, RF_IF), rd_rm),
19978 cCL("nrmdm", ef081c0, 2, (RF, RF_IF), rd_rm),
19979 cCL("nrmdz", ef081e0, 2, (RF, RF_IF), rd_rm),
19980 cCL("nrme", ef88100, 2, (RF, RF_IF), rd_rm),
19981 cCL("nrmep", ef88120, 2, (RF, RF_IF), rd_rm),
19982 cCL("nrmem", ef88140, 2, (RF, RF_IF), rd_rm),
19983 cCL("nrmez", ef88160, 2, (RF, RF_IF), rd_rm),
19984
19985 cCL("adfs", e000100, 3, (RF, RF, RF_IF), rd_rn_rm),
19986 cCL("adfsp", e000120, 3, (RF, RF, RF_IF), rd_rn_rm),
19987 cCL("adfsm", e000140, 3, (RF, RF, RF_IF), rd_rn_rm),
19988 cCL("adfsz", e000160, 3, (RF, RF, RF_IF), rd_rn_rm),
19989 cCL("adfd", e000180, 3, (RF, RF, RF_IF), rd_rn_rm),
19990 cCL("adfdp", e0001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19991 cCL("adfdm", e0001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19992 cCL("adfdz", e0001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19993 cCL("adfe", e080100, 3, (RF, RF, RF_IF), rd_rn_rm),
19994 cCL("adfep", e080120, 3, (RF, RF, RF_IF), rd_rn_rm),
19995 cCL("adfem", e080140, 3, (RF, RF, RF_IF), rd_rn_rm),
19996 cCL("adfez", e080160, 3, (RF, RF, RF_IF), rd_rn_rm),
19997
19998 cCL("sufs", e200100, 3, (RF, RF, RF_IF), rd_rn_rm),
19999 cCL("sufsp", e200120, 3, (RF, RF, RF_IF), rd_rn_rm),
20000 cCL("sufsm", e200140, 3, (RF, RF, RF_IF), rd_rn_rm),
20001 cCL("sufsz", e200160, 3, (RF, RF, RF_IF), rd_rn_rm),
20002 cCL("sufd", e200180, 3, (RF, RF, RF_IF), rd_rn_rm),
20003 cCL("sufdp", e2001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20004 cCL("sufdm", e2001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20005 cCL("sufdz", e2001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20006 cCL("sufe", e280100, 3, (RF, RF, RF_IF), rd_rn_rm),
20007 cCL("sufep", e280120, 3, (RF, RF, RF_IF), rd_rn_rm),
20008 cCL("sufem", e280140, 3, (RF, RF, RF_IF), rd_rn_rm),
20009 cCL("sufez", e280160, 3, (RF, RF, RF_IF), rd_rn_rm),
20010
20011 cCL("rsfs", e300100, 3, (RF, RF, RF_IF), rd_rn_rm),
20012 cCL("rsfsp", e300120, 3, (RF, RF, RF_IF), rd_rn_rm),
20013 cCL("rsfsm", e300140, 3, (RF, RF, RF_IF), rd_rn_rm),
20014 cCL("rsfsz", e300160, 3, (RF, RF, RF_IF), rd_rn_rm),
20015 cCL("rsfd", e300180, 3, (RF, RF, RF_IF), rd_rn_rm),
20016 cCL("rsfdp", e3001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20017 cCL("rsfdm", e3001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20018 cCL("rsfdz", e3001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20019 cCL("rsfe", e380100, 3, (RF, RF, RF_IF), rd_rn_rm),
20020 cCL("rsfep", e380120, 3, (RF, RF, RF_IF), rd_rn_rm),
20021 cCL("rsfem", e380140, 3, (RF, RF, RF_IF), rd_rn_rm),
20022 cCL("rsfez", e380160, 3, (RF, RF, RF_IF), rd_rn_rm),
20023
20024 cCL("mufs", e100100, 3, (RF, RF, RF_IF), rd_rn_rm),
20025 cCL("mufsp", e100120, 3, (RF, RF, RF_IF), rd_rn_rm),
20026 cCL("mufsm", e100140, 3, (RF, RF, RF_IF), rd_rn_rm),
20027 cCL("mufsz", e100160, 3, (RF, RF, RF_IF), rd_rn_rm),
20028 cCL("mufd", e100180, 3, (RF, RF, RF_IF), rd_rn_rm),
20029 cCL("mufdp", e1001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20030 cCL("mufdm", e1001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20031 cCL("mufdz", e1001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20032 cCL("mufe", e180100, 3, (RF, RF, RF_IF), rd_rn_rm),
20033 cCL("mufep", e180120, 3, (RF, RF, RF_IF), rd_rn_rm),
20034 cCL("mufem", e180140, 3, (RF, RF, RF_IF), rd_rn_rm),
20035 cCL("mufez", e180160, 3, (RF, RF, RF_IF), rd_rn_rm),
20036
20037 cCL("dvfs", e400100, 3, (RF, RF, RF_IF), rd_rn_rm),
20038 cCL("dvfsp", e400120, 3, (RF, RF, RF_IF), rd_rn_rm),
20039 cCL("dvfsm", e400140, 3, (RF, RF, RF_IF), rd_rn_rm),
20040 cCL("dvfsz", e400160, 3, (RF, RF, RF_IF), rd_rn_rm),
20041 cCL("dvfd", e400180, 3, (RF, RF, RF_IF), rd_rn_rm),
20042 cCL("dvfdp", e4001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20043 cCL("dvfdm", e4001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20044 cCL("dvfdz", e4001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20045 cCL("dvfe", e480100, 3, (RF, RF, RF_IF), rd_rn_rm),
20046 cCL("dvfep", e480120, 3, (RF, RF, RF_IF), rd_rn_rm),
20047 cCL("dvfem", e480140, 3, (RF, RF, RF_IF), rd_rn_rm),
20048 cCL("dvfez", e480160, 3, (RF, RF, RF_IF), rd_rn_rm),
20049
20050 cCL("rdfs", e500100, 3, (RF, RF, RF_IF), rd_rn_rm),
20051 cCL("rdfsp", e500120, 3, (RF, RF, RF_IF), rd_rn_rm),
20052 cCL("rdfsm", e500140, 3, (RF, RF, RF_IF), rd_rn_rm),
20053 cCL("rdfsz", e500160, 3, (RF, RF, RF_IF), rd_rn_rm),
20054 cCL("rdfd", e500180, 3, (RF, RF, RF_IF), rd_rn_rm),
20055 cCL("rdfdp", e5001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20056 cCL("rdfdm", e5001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20057 cCL("rdfdz", e5001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20058 cCL("rdfe", e580100, 3, (RF, RF, RF_IF), rd_rn_rm),
20059 cCL("rdfep", e580120, 3, (RF, RF, RF_IF), rd_rn_rm),
20060 cCL("rdfem", e580140, 3, (RF, RF, RF_IF), rd_rn_rm),
20061 cCL("rdfez", e580160, 3, (RF, RF, RF_IF), rd_rn_rm),
20062
20063 cCL("pows", e600100, 3, (RF, RF, RF_IF), rd_rn_rm),
20064 cCL("powsp", e600120, 3, (RF, RF, RF_IF), rd_rn_rm),
20065 cCL("powsm", e600140, 3, (RF, RF, RF_IF), rd_rn_rm),
20066 cCL("powsz", e600160, 3, (RF, RF, RF_IF), rd_rn_rm),
20067 cCL("powd", e600180, 3, (RF, RF, RF_IF), rd_rn_rm),
20068 cCL("powdp", e6001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20069 cCL("powdm", e6001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20070 cCL("powdz", e6001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20071 cCL("powe", e680100, 3, (RF, RF, RF_IF), rd_rn_rm),
20072 cCL("powep", e680120, 3, (RF, RF, RF_IF), rd_rn_rm),
20073 cCL("powem", e680140, 3, (RF, RF, RF_IF), rd_rn_rm),
20074 cCL("powez", e680160, 3, (RF, RF, RF_IF), rd_rn_rm),
20075
20076 cCL("rpws", e700100, 3, (RF, RF, RF_IF), rd_rn_rm),
20077 cCL("rpwsp", e700120, 3, (RF, RF, RF_IF), rd_rn_rm),
20078 cCL("rpwsm", e700140, 3, (RF, RF, RF_IF), rd_rn_rm),
20079 cCL("rpwsz", e700160, 3, (RF, RF, RF_IF), rd_rn_rm),
20080 cCL("rpwd", e700180, 3, (RF, RF, RF_IF), rd_rn_rm),
20081 cCL("rpwdp", e7001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20082 cCL("rpwdm", e7001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20083 cCL("rpwdz", e7001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20084 cCL("rpwe", e780100, 3, (RF, RF, RF_IF), rd_rn_rm),
20085 cCL("rpwep", e780120, 3, (RF, RF, RF_IF), rd_rn_rm),
20086 cCL("rpwem", e780140, 3, (RF, RF, RF_IF), rd_rn_rm),
20087 cCL("rpwez", e780160, 3, (RF, RF, RF_IF), rd_rn_rm),
20088
20089 cCL("rmfs", e800100, 3, (RF, RF, RF_IF), rd_rn_rm),
20090 cCL("rmfsp", e800120, 3, (RF, RF, RF_IF), rd_rn_rm),
20091 cCL("rmfsm", e800140, 3, (RF, RF, RF_IF), rd_rn_rm),
20092 cCL("rmfsz", e800160, 3, (RF, RF, RF_IF), rd_rn_rm),
20093 cCL("rmfd", e800180, 3, (RF, RF, RF_IF), rd_rn_rm),
20094 cCL("rmfdp", e8001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20095 cCL("rmfdm", e8001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20096 cCL("rmfdz", e8001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20097 cCL("rmfe", e880100, 3, (RF, RF, RF_IF), rd_rn_rm),
20098 cCL("rmfep", e880120, 3, (RF, RF, RF_IF), rd_rn_rm),
20099 cCL("rmfem", e880140, 3, (RF, RF, RF_IF), rd_rn_rm),
20100 cCL("rmfez", e880160, 3, (RF, RF, RF_IF), rd_rn_rm),
20101
20102 cCL("fmls", e900100, 3, (RF, RF, RF_IF), rd_rn_rm),
20103 cCL("fmlsp", e900120, 3, (RF, RF, RF_IF), rd_rn_rm),
20104 cCL("fmlsm", e900140, 3, (RF, RF, RF_IF), rd_rn_rm),
20105 cCL("fmlsz", e900160, 3, (RF, RF, RF_IF), rd_rn_rm),
20106 cCL("fmld", e900180, 3, (RF, RF, RF_IF), rd_rn_rm),
20107 cCL("fmldp", e9001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20108 cCL("fmldm", e9001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20109 cCL("fmldz", e9001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20110 cCL("fmle", e980100, 3, (RF, RF, RF_IF), rd_rn_rm),
20111 cCL("fmlep", e980120, 3, (RF, RF, RF_IF), rd_rn_rm),
20112 cCL("fmlem", e980140, 3, (RF, RF, RF_IF), rd_rn_rm),
20113 cCL("fmlez", e980160, 3, (RF, RF, RF_IF), rd_rn_rm),
20114
20115 cCL("fdvs", ea00100, 3, (RF, RF, RF_IF), rd_rn_rm),
20116 cCL("fdvsp", ea00120, 3, (RF, RF, RF_IF), rd_rn_rm),
20117 cCL("fdvsm", ea00140, 3, (RF, RF, RF_IF), rd_rn_rm),
20118 cCL("fdvsz", ea00160, 3, (RF, RF, RF_IF), rd_rn_rm),
20119 cCL("fdvd", ea00180, 3, (RF, RF, RF_IF), rd_rn_rm),
20120 cCL("fdvdp", ea001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20121 cCL("fdvdm", ea001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20122 cCL("fdvdz", ea001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20123 cCL("fdve", ea80100, 3, (RF, RF, RF_IF), rd_rn_rm),
20124 cCL("fdvep", ea80120, 3, (RF, RF, RF_IF), rd_rn_rm),
20125 cCL("fdvem", ea80140, 3, (RF, RF, RF_IF), rd_rn_rm),
20126 cCL("fdvez", ea80160, 3, (RF, RF, RF_IF), rd_rn_rm),
20127
20128 cCL("frds", eb00100, 3, (RF, RF, RF_IF), rd_rn_rm),
20129 cCL("frdsp", eb00120, 3, (RF, RF, RF_IF), rd_rn_rm),
20130 cCL("frdsm", eb00140, 3, (RF, RF, RF_IF), rd_rn_rm),
20131 cCL("frdsz", eb00160, 3, (RF, RF, RF_IF), rd_rn_rm),
20132 cCL("frdd", eb00180, 3, (RF, RF, RF_IF), rd_rn_rm),
20133 cCL("frddp", eb001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20134 cCL("frddm", eb001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20135 cCL("frddz", eb001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20136 cCL("frde", eb80100, 3, (RF, RF, RF_IF), rd_rn_rm),
20137 cCL("frdep", eb80120, 3, (RF, RF, RF_IF), rd_rn_rm),
20138 cCL("frdem", eb80140, 3, (RF, RF, RF_IF), rd_rn_rm),
20139 cCL("frdez", eb80160, 3, (RF, RF, RF_IF), rd_rn_rm),
20140
20141 cCL("pols", ec00100, 3, (RF, RF, RF_IF), rd_rn_rm),
20142 cCL("polsp", ec00120, 3, (RF, RF, RF_IF), rd_rn_rm),
20143 cCL("polsm", ec00140, 3, (RF, RF, RF_IF), rd_rn_rm),
20144 cCL("polsz", ec00160, 3, (RF, RF, RF_IF), rd_rn_rm),
20145 cCL("pold", ec00180, 3, (RF, RF, RF_IF), rd_rn_rm),
20146 cCL("poldp", ec001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20147 cCL("poldm", ec001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20148 cCL("poldz", ec001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20149 cCL("pole", ec80100, 3, (RF, RF, RF_IF), rd_rn_rm),
20150 cCL("polep", ec80120, 3, (RF, RF, RF_IF), rd_rn_rm),
20151 cCL("polem", ec80140, 3, (RF, RF, RF_IF), rd_rn_rm),
20152 cCL("polez", ec80160, 3, (RF, RF, RF_IF), rd_rn_rm),
20153
20154 cCE("cmf", e90f110, 2, (RF, RF_IF), fpa_cmp),
20155 C3E("cmfe", ed0f110, 2, (RF, RF_IF), fpa_cmp),
20156 cCE("cnf", eb0f110, 2, (RF, RF_IF), fpa_cmp),
20157 C3E("cnfe", ef0f110, 2, (RF, RF_IF), fpa_cmp),
20158
20159 cCL("flts", e000110, 2, (RF, RR), rn_rd),
20160 cCL("fltsp", e000130, 2, (RF, RR), rn_rd),
20161 cCL("fltsm", e000150, 2, (RF, RR), rn_rd),
20162 cCL("fltsz", e000170, 2, (RF, RR), rn_rd),
20163 cCL("fltd", e000190, 2, (RF, RR), rn_rd),
20164 cCL("fltdp", e0001b0, 2, (RF, RR), rn_rd),
20165 cCL("fltdm", e0001d0, 2, (RF, RR), rn_rd),
20166 cCL("fltdz", e0001f0, 2, (RF, RR), rn_rd),
20167 cCL("flte", e080110, 2, (RF, RR), rn_rd),
20168 cCL("fltep", e080130, 2, (RF, RR), rn_rd),
20169 cCL("fltem", e080150, 2, (RF, RR), rn_rd),
20170 cCL("fltez", e080170, 2, (RF, RR), rn_rd),
20171
20172 /* The implementation of the FIX instruction is broken on some
20173 assemblers, in that it accepts a precision specifier as well as a
20174 rounding specifier, despite the fact that this is meaningless.
20175 To be more compatible, we accept it as well, though of course it
20176 does not set any bits. */
20177 cCE("fix", e100110, 2, (RR, RF), rd_rm),
20178 cCL("fixp", e100130, 2, (RR, RF), rd_rm),
20179 cCL("fixm", e100150, 2, (RR, RF), rd_rm),
20180 cCL("fixz", e100170, 2, (RR, RF), rd_rm),
20181 cCL("fixsp", e100130, 2, (RR, RF), rd_rm),
20182 cCL("fixsm", e100150, 2, (RR, RF), rd_rm),
20183 cCL("fixsz", e100170, 2, (RR, RF), rd_rm),
20184 cCL("fixdp", e100130, 2, (RR, RF), rd_rm),
20185 cCL("fixdm", e100150, 2, (RR, RF), rd_rm),
20186 cCL("fixdz", e100170, 2, (RR, RF), rd_rm),
20187 cCL("fixep", e100130, 2, (RR, RF), rd_rm),
20188 cCL("fixem", e100150, 2, (RR, RF), rd_rm),
20189 cCL("fixez", e100170, 2, (RR, RF), rd_rm),
20190
20191 /* Instructions that were new with the real FPA, call them V2. */
20192 #undef ARM_VARIANT
20193 #define ARM_VARIANT & fpu_fpa_ext_v2
20194
20195 cCE("lfm", c100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
20196 cCL("lfmfd", c900200, 3, (RF, I4b, ADDR), fpa_ldmstm),
20197 cCL("lfmea", d100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
20198 cCE("sfm", c000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
20199 cCL("sfmfd", d000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
20200 cCL("sfmea", c800200, 3, (RF, I4b, ADDR), fpa_ldmstm),
20201
20202 #undef ARM_VARIANT
20203 #define ARM_VARIANT & fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
20204
20205 /* Moves and type conversions. */
20206 cCE("fcpys", eb00a40, 2, (RVS, RVS), vfp_sp_monadic),
20207 cCE("fmrs", e100a10, 2, (RR, RVS), vfp_reg_from_sp),
20208 cCE("fmsr", e000a10, 2, (RVS, RR), vfp_sp_from_reg),
20209 cCE("fmstat", ef1fa10, 0, (), noargs),
20210 cCE("vmrs", ef00a10, 2, (APSR_RR, RVC), vmrs),
20211 cCE("vmsr", ee00a10, 2, (RVC, RR), vmsr),
20212 cCE("fsitos", eb80ac0, 2, (RVS, RVS), vfp_sp_monadic),
20213 cCE("fuitos", eb80a40, 2, (RVS, RVS), vfp_sp_monadic),
20214 cCE("ftosis", ebd0a40, 2, (RVS, RVS), vfp_sp_monadic),
20215 cCE("ftosizs", ebd0ac0, 2, (RVS, RVS), vfp_sp_monadic),
20216 cCE("ftouis", ebc0a40, 2, (RVS, RVS), vfp_sp_monadic),
20217 cCE("ftouizs", ebc0ac0, 2, (RVS, RVS), vfp_sp_monadic),
20218 cCE("fmrx", ef00a10, 2, (RR, RVC), rd_rn),
20219 cCE("fmxr", ee00a10, 2, (RVC, RR), rn_rd),
20220
20221 /* Memory operations. */
20222 cCE("flds", d100a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
20223 cCE("fsts", d000a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
20224 cCE("fldmias", c900a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
20225 cCE("fldmfds", c900a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
20226 cCE("fldmdbs", d300a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
20227 cCE("fldmeas", d300a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
20228 cCE("fldmiax", c900b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
20229 cCE("fldmfdx", c900b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
20230 cCE("fldmdbx", d300b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
20231 cCE("fldmeax", d300b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
20232 cCE("fstmias", c800a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
20233 cCE("fstmeas", c800a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
20234 cCE("fstmdbs", d200a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
20235 cCE("fstmfds", d200a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
20236 cCE("fstmiax", c800b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
20237 cCE("fstmeax", c800b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
20238 cCE("fstmdbx", d200b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
20239 cCE("fstmfdx", d200b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
20240
20241 /* Monadic operations. */
20242 cCE("fabss", eb00ac0, 2, (RVS, RVS), vfp_sp_monadic),
20243 cCE("fnegs", eb10a40, 2, (RVS, RVS), vfp_sp_monadic),
20244 cCE("fsqrts", eb10ac0, 2, (RVS, RVS), vfp_sp_monadic),
20245
20246 /* Dyadic operations. */
20247 cCE("fadds", e300a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20248 cCE("fsubs", e300a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20249 cCE("fmuls", e200a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20250 cCE("fdivs", e800a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20251 cCE("fmacs", e000a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20252 cCE("fmscs", e100a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20253 cCE("fnmuls", e200a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20254 cCE("fnmacs", e000a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20255 cCE("fnmscs", e100a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20256
20257 /* Comparisons. */
20258 cCE("fcmps", eb40a40, 2, (RVS, RVS), vfp_sp_monadic),
20259 cCE("fcmpzs", eb50a40, 1, (RVS), vfp_sp_compare_z),
20260 cCE("fcmpes", eb40ac0, 2, (RVS, RVS), vfp_sp_monadic),
20261 cCE("fcmpezs", eb50ac0, 1, (RVS), vfp_sp_compare_z),
20262
20263 /* Double precision load/store are still present on single precision
20264 implementations. */
20265 cCE("fldd", d100b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
20266 cCE("fstd", d000b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
20267 cCE("fldmiad", c900b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
20268 cCE("fldmfdd", c900b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
20269 cCE("fldmdbd", d300b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
20270 cCE("fldmead", d300b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
20271 cCE("fstmiad", c800b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
20272 cCE("fstmead", c800b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
20273 cCE("fstmdbd", d200b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
20274 cCE("fstmfdd", d200b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
20275
20276 #undef ARM_VARIANT
20277 #define ARM_VARIANT & fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
20278
20279 /* Moves and type conversions. */
20280 cCE("fcpyd", eb00b40, 2, (RVD, RVD), vfp_dp_rd_rm),
20281 cCE("fcvtds", eb70ac0, 2, (RVD, RVS), vfp_dp_sp_cvt),
20282 cCE("fcvtsd", eb70bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
20283 cCE("fmdhr", e200b10, 2, (RVD, RR), vfp_dp_rn_rd),
20284 cCE("fmdlr", e000b10, 2, (RVD, RR), vfp_dp_rn_rd),
20285 cCE("fmrdh", e300b10, 2, (RR, RVD), vfp_dp_rd_rn),
20286 cCE("fmrdl", e100b10, 2, (RR, RVD), vfp_dp_rd_rn),
20287 cCE("fsitod", eb80bc0, 2, (RVD, RVS), vfp_dp_sp_cvt),
20288 cCE("fuitod", eb80b40, 2, (RVD, RVS), vfp_dp_sp_cvt),
20289 cCE("ftosid", ebd0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
20290 cCE("ftosizd", ebd0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
20291 cCE("ftouid", ebc0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
20292 cCE("ftouizd", ebc0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
20293
20294 /* Monadic operations. */
20295 cCE("fabsd", eb00bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
20296 cCE("fnegd", eb10b40, 2, (RVD, RVD), vfp_dp_rd_rm),
20297 cCE("fsqrtd", eb10bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
20298
20299 /* Dyadic operations. */
20300 cCE("faddd", e300b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20301 cCE("fsubd", e300b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20302 cCE("fmuld", e200b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20303 cCE("fdivd", e800b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20304 cCE("fmacd", e000b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20305 cCE("fmscd", e100b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20306 cCE("fnmuld", e200b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20307 cCE("fnmacd", e000b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20308 cCE("fnmscd", e100b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20309
20310 /* Comparisons. */
20311 cCE("fcmpd", eb40b40, 2, (RVD, RVD), vfp_dp_rd_rm),
20312 cCE("fcmpzd", eb50b40, 1, (RVD), vfp_dp_rd),
20313 cCE("fcmped", eb40bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
20314 cCE("fcmpezd", eb50bc0, 1, (RVD), vfp_dp_rd),
20315
20316 #undef ARM_VARIANT
20317 #define ARM_VARIANT & fpu_vfp_ext_v2
20318
20319 cCE("fmsrr", c400a10, 3, (VRSLST, RR, RR), vfp_sp2_from_reg2),
20320 cCE("fmrrs", c500a10, 3, (RR, RR, VRSLST), vfp_reg2_from_sp2),
20321 cCE("fmdrr", c400b10, 3, (RVD, RR, RR), vfp_dp_rm_rd_rn),
20322 cCE("fmrrd", c500b10, 3, (RR, RR, RVD), vfp_dp_rd_rn_rm),
20323
20324 /* Instructions which may belong to either the Neon or VFP instruction sets.
20325 Individual encoder functions perform additional architecture checks. */
20326 #undef ARM_VARIANT
20327 #define ARM_VARIANT & fpu_vfp_ext_v1xd
20328 #undef THUMB_VARIANT
20329 #define THUMB_VARIANT & fpu_vfp_ext_v1xd
20330
20331 /* These mnemonics are unique to VFP. */
20332 NCE(vsqrt, 0, 2, (RVSD, RVSD), vfp_nsyn_sqrt),
20333 NCE(vdiv, 0, 3, (RVSD, RVSD, RVSD), vfp_nsyn_div),
20334 nCE(vnmul, _vnmul, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
20335 nCE(vnmla, _vnmla, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
20336 nCE(vnmls, _vnmls, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
20337 nCE(vcmp, _vcmp, 2, (RVSD, RSVD_FI0), vfp_nsyn_cmp),
20338 nCE(vcmpe, _vcmpe, 2, (RVSD, RSVD_FI0), vfp_nsyn_cmp),
20339 NCE(vpush, 0, 1, (VRSDLST), vfp_nsyn_push),
20340 NCE(vpop, 0, 1, (VRSDLST), vfp_nsyn_pop),
20341 NCE(vcvtz, 0, 2, (RVSD, RVSD), vfp_nsyn_cvtz),
20342
20343 /* Mnemonics shared by Neon and VFP. */
20344 nCEF(vmul, _vmul, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mul),
20345 nCEF(vmla, _vmla, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
20346 nCEF(vmls, _vmls, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
20347
20348 nCEF(vadd, _vadd, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
20349 nCEF(vsub, _vsub, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
20350
20351 NCEF(vabs, 1b10300, 2, (RNSDQ, RNSDQ), neon_abs_neg),
20352 NCEF(vneg, 1b10380, 2, (RNSDQ, RNSDQ), neon_abs_neg),
20353
20354 NCE(vldm, c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
20355 NCE(vldmia, c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
20356 NCE(vldmdb, d100b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
20357 NCE(vstm, c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
20358 NCE(vstmia, c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
20359 NCE(vstmdb, d000b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
20360 NCE(vldr, d100b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
20361 NCE(vstr, d000b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
20362
20363 nCEF(vcvt, _vcvt, 3, (RNSDQ, RNSDQ, oI32z), neon_cvt),
20364 nCEF(vcvtr, _vcvt, 2, (RNSDQ, RNSDQ), neon_cvtr),
20365 NCEF(vcvtb, eb20a40, 2, (RVSD, RVSD), neon_cvtb),
20366 NCEF(vcvtt, eb20a40, 2, (RVSD, RVSD), neon_cvtt),
20367
20368
20369 /* NOTE: All VMOV encoding is special-cased! */
20370 NCE(vmov, 0, 1, (VMOV), neon_mov),
20371 NCE(vmovq, 0, 1, (VMOV), neon_mov),
20372
20373 #undef ARM_VARIANT
20374 #define ARM_VARIANT & arm_ext_fp16
20375 #undef THUMB_VARIANT
20376 #define THUMB_VARIANT & arm_ext_fp16
20377 /* New instructions added from v8.2, allowing the extraction and insertion of
20378 the upper 16 bits of a 32-bit vector register. */
20379 NCE (vmovx, eb00a40, 2, (RVS, RVS), neon_movhf),
20380 NCE (vins, eb00ac0, 2, (RVS, RVS), neon_movhf),
20381
20382 #undef THUMB_VARIANT
20383 #define THUMB_VARIANT & fpu_neon_ext_v1
20384 #undef ARM_VARIANT
20385 #define ARM_VARIANT & fpu_neon_ext_v1
20386
20387 /* Data processing with three registers of the same length. */
20388 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
20389 NUF(vaba, 0000710, 3, (RNDQ, RNDQ, RNDQ), neon_dyadic_i_su),
20390 NUF(vabaq, 0000710, 3, (RNQ, RNQ, RNQ), neon_dyadic_i_su),
20391 NUF(vhadd, 0000000, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
20392 NUF(vhaddq, 0000000, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
20393 NUF(vrhadd, 0000100, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
20394 NUF(vrhaddq, 0000100, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
20395 NUF(vhsub, 0000200, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
20396 NUF(vhsubq, 0000200, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
20397 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
20398 NUF(vqadd, 0000010, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
20399 NUF(vqaddq, 0000010, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
20400 NUF(vqsub, 0000210, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
20401 NUF(vqsubq, 0000210, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
20402 NUF(vrshl, 0000500, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
20403 NUF(vrshlq, 0000500, 3, (RNQ, oRNQ, RNQ), neon_rshl),
20404 NUF(vqrshl, 0000510, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
20405 NUF(vqrshlq, 0000510, 3, (RNQ, oRNQ, RNQ), neon_rshl),
20406 /* If not immediate, fall back to neon_dyadic_i64_su.
20407 shl_imm should accept I8 I16 I32 I64,
20408 qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64. */
20409 nUF(vshl, _vshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_shl_imm),
20410 nUF(vshlq, _vshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_shl_imm),
20411 nUF(vqshl, _vqshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_qshl_imm),
20412 nUF(vqshlq, _vqshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_qshl_imm),
20413 /* Logic ops, types optional & ignored. */
20414 nUF(vand, _vand, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
20415 nUF(vandq, _vand, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
20416 nUF(vbic, _vbic, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
20417 nUF(vbicq, _vbic, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
20418 nUF(vorr, _vorr, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
20419 nUF(vorrq, _vorr, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
20420 nUF(vorn, _vorn, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
20421 nUF(vornq, _vorn, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
20422 nUF(veor, _veor, 3, (RNDQ, oRNDQ, RNDQ), neon_logic),
20423 nUF(veorq, _veor, 3, (RNQ, oRNQ, RNQ), neon_logic),
20424 /* Bitfield ops, untyped. */
20425 NUF(vbsl, 1100110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
20426 NUF(vbslq, 1100110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
20427 NUF(vbit, 1200110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
20428 NUF(vbitq, 1200110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
20429 NUF(vbif, 1300110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
20430 NUF(vbifq, 1300110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
20431 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F16 F32. */
20432 nUF(vabd, _vabd, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
20433 nUF(vabdq, _vabd, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
20434 nUF(vmax, _vmax, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
20435 nUF(vmaxq, _vmax, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
20436 nUF(vmin, _vmin, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
20437 nUF(vminq, _vmin, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
20438 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
20439 back to neon_dyadic_if_su. */
20440 nUF(vcge, _vcge, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
20441 nUF(vcgeq, _vcge, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
20442 nUF(vcgt, _vcgt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
20443 nUF(vcgtq, _vcgt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
20444 nUF(vclt, _vclt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
20445 nUF(vcltq, _vclt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
20446 nUF(vcle, _vcle, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
20447 nUF(vcleq, _vcle, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
20448 /* Comparison. Type I8 I16 I32 F32. */
20449 nUF(vceq, _vceq, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_ceq),
20450 nUF(vceqq, _vceq, 3, (RNQ, oRNQ, RNDQ_I0), neon_ceq),
20451 /* As above, D registers only. */
20452 nUF(vpmax, _vpmax, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
20453 nUF(vpmin, _vpmin, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
20454 /* Int and float variants, signedness unimportant. */
20455 nUF(vmlaq, _vmla, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
20456 nUF(vmlsq, _vmls, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
20457 nUF(vpadd, _vpadd, 3, (RND, oRND, RND), neon_dyadic_if_i_d),
20458 /* Add/sub take types I8 I16 I32 I64 F32. */
20459 nUF(vaddq, _vadd, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
20460 nUF(vsubq, _vsub, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
20461 /* vtst takes sizes 8, 16, 32. */
20462 NUF(vtst, 0000810, 3, (RNDQ, oRNDQ, RNDQ), neon_tst),
20463 NUF(vtstq, 0000810, 3, (RNQ, oRNQ, RNQ), neon_tst),
20464 /* VMUL takes I8 I16 I32 F32 P8. */
20465 nUF(vmulq, _vmul, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mul),
20466 /* VQD{R}MULH takes S16 S32. */
20467 nUF(vqdmulh, _vqdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
20468 nUF(vqdmulhq, _vqdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
20469 nUF(vqrdmulh, _vqrdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
20470 nUF(vqrdmulhq, _vqrdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
20471 NUF(vacge, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
20472 NUF(vacgeq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
20473 NUF(vacgt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
20474 NUF(vacgtq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
20475 NUF(vaclt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
20476 NUF(vacltq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
20477 NUF(vacle, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
20478 NUF(vacleq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
20479 NUF(vrecps, 0000f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
20480 NUF(vrecpsq, 0000f10, 3, (RNQ, oRNQ, RNQ), neon_step),
20481 NUF(vrsqrts, 0200f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
20482 NUF(vrsqrtsq, 0200f10, 3, (RNQ, oRNQ, RNQ), neon_step),
20483 /* ARM v8.1 extension. */
20484 nUF (vqrdmlah, _vqrdmlah, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qrdmlah),
20485 nUF (vqrdmlahq, _vqrdmlah, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qrdmlah),
20486 nUF (vqrdmlsh, _vqrdmlsh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qrdmlah),
20487 nUF (vqrdmlshq, _vqrdmlsh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qrdmlah),
20488
20489 /* Two address, int/float. Types S8 S16 S32 F32. */
20490 NUF(vabsq, 1b10300, 2, (RNQ, RNQ), neon_abs_neg),
20491 NUF(vnegq, 1b10380, 2, (RNQ, RNQ), neon_abs_neg),
20492
20493 /* Data processing with two registers and a shift amount. */
20494 /* Right shifts, and variants with rounding.
20495 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
20496 NUF(vshr, 0800010, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
20497 NUF(vshrq, 0800010, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
20498 NUF(vrshr, 0800210, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
20499 NUF(vrshrq, 0800210, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
20500 NUF(vsra, 0800110, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
20501 NUF(vsraq, 0800110, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
20502 NUF(vrsra, 0800310, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
20503 NUF(vrsraq, 0800310, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
20504 /* Shift and insert. Sizes accepted 8 16 32 64. */
20505 NUF(vsli, 1800510, 3, (RNDQ, oRNDQ, I63), neon_sli),
20506 NUF(vsliq, 1800510, 3, (RNQ, oRNQ, I63), neon_sli),
20507 NUF(vsri, 1800410, 3, (RNDQ, oRNDQ, I64), neon_sri),
20508 NUF(vsriq, 1800410, 3, (RNQ, oRNQ, I64), neon_sri),
20509 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
20510 NUF(vqshlu, 1800610, 3, (RNDQ, oRNDQ, I63), neon_qshlu_imm),
20511 NUF(vqshluq, 1800610, 3, (RNQ, oRNQ, I63), neon_qshlu_imm),
20512 /* Right shift immediate, saturating & narrowing, with rounding variants.
20513 Types accepted S16 S32 S64 U16 U32 U64. */
20514 NUF(vqshrn, 0800910, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
20515 NUF(vqrshrn, 0800950, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
20516 /* As above, unsigned. Types accepted S16 S32 S64. */
20517 NUF(vqshrun, 0800810, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
20518 NUF(vqrshrun, 0800850, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
20519 /* Right shift narrowing. Types accepted I16 I32 I64. */
20520 NUF(vshrn, 0800810, 3, (RND, RNQ, I32z), neon_rshift_narrow),
20521 NUF(vrshrn, 0800850, 3, (RND, RNQ, I32z), neon_rshift_narrow),
20522 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
20523 nUF(vshll, _vshll, 3, (RNQ, RND, I32), neon_shll),
20524 /* CVT with optional immediate for fixed-point variant. */
20525 nUF(vcvtq, _vcvt, 3, (RNQ, RNQ, oI32b), neon_cvt),
20526
20527 nUF(vmvn, _vmvn, 2, (RNDQ, RNDQ_Ibig), neon_mvn),
20528 nUF(vmvnq, _vmvn, 2, (RNQ, RNDQ_Ibig), neon_mvn),
20529
20530 /* Data processing, three registers of different lengths. */
20531 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
20532 NUF(vabal, 0800500, 3, (RNQ, RND, RND), neon_abal),
20533 NUF(vabdl, 0800700, 3, (RNQ, RND, RND), neon_dyadic_long),
20534 NUF(vaddl, 0800000, 3, (RNQ, RND, RND), neon_dyadic_long),
20535 NUF(vsubl, 0800200, 3, (RNQ, RND, RND), neon_dyadic_long),
20536 /* If not scalar, fall back to neon_dyadic_long.
20537 Vector types as above, scalar types S16 S32 U16 U32. */
20538 nUF(vmlal, _vmlal, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
20539 nUF(vmlsl, _vmlsl, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
20540 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
20541 NUF(vaddw, 0800100, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
20542 NUF(vsubw, 0800300, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
20543 /* Dyadic, narrowing insns. Types I16 I32 I64. */
20544 NUF(vaddhn, 0800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
20545 NUF(vraddhn, 1800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
20546 NUF(vsubhn, 0800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
20547 NUF(vrsubhn, 1800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
20548 /* Saturating doubling multiplies. Types S16 S32. */
20549 nUF(vqdmlal, _vqdmlal, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
20550 nUF(vqdmlsl, _vqdmlsl, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
20551 nUF(vqdmull, _vqdmull, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
20552 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
20553 S16 S32 U16 U32. */
20554 nUF(vmull, _vmull, 3, (RNQ, RND, RND_RNSC), neon_vmull),
20555
20556 /* Extract. Size 8. */
20557 NUF(vext, 0b00000, 4, (RNDQ, oRNDQ, RNDQ, I15), neon_ext),
20558 NUF(vextq, 0b00000, 4, (RNQ, oRNQ, RNQ, I15), neon_ext),
20559
20560 /* Two registers, miscellaneous. */
20561 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
20562 NUF(vrev64, 1b00000, 2, (RNDQ, RNDQ), neon_rev),
20563 NUF(vrev64q, 1b00000, 2, (RNQ, RNQ), neon_rev),
20564 NUF(vrev32, 1b00080, 2, (RNDQ, RNDQ), neon_rev),
20565 NUF(vrev32q, 1b00080, 2, (RNQ, RNQ), neon_rev),
20566 NUF(vrev16, 1b00100, 2, (RNDQ, RNDQ), neon_rev),
20567 NUF(vrev16q, 1b00100, 2, (RNQ, RNQ), neon_rev),
20568 /* Vector replicate. Sizes 8 16 32. */
20569 nCE(vdup, _vdup, 2, (RNDQ, RR_RNSC), neon_dup),
20570 nCE(vdupq, _vdup, 2, (RNQ, RR_RNSC), neon_dup),
20571 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
20572 NUF(vmovl, 0800a10, 2, (RNQ, RND), neon_movl),
20573 /* VMOVN. Types I16 I32 I64. */
20574 nUF(vmovn, _vmovn, 2, (RND, RNQ), neon_movn),
20575 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
20576 nUF(vqmovn, _vqmovn, 2, (RND, RNQ), neon_qmovn),
20577 /* VQMOVUN. Types S16 S32 S64. */
20578 nUF(vqmovun, _vqmovun, 2, (RND, RNQ), neon_qmovun),
20579 /* VZIP / VUZP. Sizes 8 16 32. */
20580 NUF(vzip, 1b20180, 2, (RNDQ, RNDQ), neon_zip_uzp),
20581 NUF(vzipq, 1b20180, 2, (RNQ, RNQ), neon_zip_uzp),
20582 NUF(vuzp, 1b20100, 2, (RNDQ, RNDQ), neon_zip_uzp),
20583 NUF(vuzpq, 1b20100, 2, (RNQ, RNQ), neon_zip_uzp),
20584 /* VQABS / VQNEG. Types S8 S16 S32. */
20585 NUF(vqabs, 1b00700, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
20586 NUF(vqabsq, 1b00700, 2, (RNQ, RNQ), neon_sat_abs_neg),
20587 NUF(vqneg, 1b00780, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
20588 NUF(vqnegq, 1b00780, 2, (RNQ, RNQ), neon_sat_abs_neg),
20589 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
20590 NUF(vpadal, 1b00600, 2, (RNDQ, RNDQ), neon_pair_long),
20591 NUF(vpadalq, 1b00600, 2, (RNQ, RNQ), neon_pair_long),
20592 NUF(vpaddl, 1b00200, 2, (RNDQ, RNDQ), neon_pair_long),
20593 NUF(vpaddlq, 1b00200, 2, (RNQ, RNQ), neon_pair_long),
20594 /* Reciprocal estimates. Types U32 F16 F32. */
20595 NUF(vrecpe, 1b30400, 2, (RNDQ, RNDQ), neon_recip_est),
20596 NUF(vrecpeq, 1b30400, 2, (RNQ, RNQ), neon_recip_est),
20597 NUF(vrsqrte, 1b30480, 2, (RNDQ, RNDQ), neon_recip_est),
20598 NUF(vrsqrteq, 1b30480, 2, (RNQ, RNQ), neon_recip_est),
20599 /* VCLS. Types S8 S16 S32. */
20600 NUF(vcls, 1b00400, 2, (RNDQ, RNDQ), neon_cls),
20601 NUF(vclsq, 1b00400, 2, (RNQ, RNQ), neon_cls),
20602 /* VCLZ. Types I8 I16 I32. */
20603 NUF(vclz, 1b00480, 2, (RNDQ, RNDQ), neon_clz),
20604 NUF(vclzq, 1b00480, 2, (RNQ, RNQ), neon_clz),
20605 /* VCNT. Size 8. */
20606 NUF(vcnt, 1b00500, 2, (RNDQ, RNDQ), neon_cnt),
20607 NUF(vcntq, 1b00500, 2, (RNQ, RNQ), neon_cnt),
20608 /* Two address, untyped. */
20609 NUF(vswp, 1b20000, 2, (RNDQ, RNDQ), neon_swp),
20610 NUF(vswpq, 1b20000, 2, (RNQ, RNQ), neon_swp),
20611 /* VTRN. Sizes 8 16 32. */
20612 nUF(vtrn, _vtrn, 2, (RNDQ, RNDQ), neon_trn),
20613 nUF(vtrnq, _vtrn, 2, (RNQ, RNQ), neon_trn),
20614
20615 /* Table lookup. Size 8. */
20616 NUF(vtbl, 1b00800, 3, (RND, NRDLST, RND), neon_tbl_tbx),
20617 NUF(vtbx, 1b00840, 3, (RND, NRDLST, RND), neon_tbl_tbx),
20618
20619 #undef THUMB_VARIANT
20620 #define THUMB_VARIANT & fpu_vfp_v3_or_neon_ext
20621 #undef ARM_VARIANT
20622 #define ARM_VARIANT & fpu_vfp_v3_or_neon_ext
20623
20624 /* Neon element/structure load/store. */
20625 nUF(vld1, _vld1, 2, (NSTRLST, ADDR), neon_ldx_stx),
20626 nUF(vst1, _vst1, 2, (NSTRLST, ADDR), neon_ldx_stx),
20627 nUF(vld2, _vld2, 2, (NSTRLST, ADDR), neon_ldx_stx),
20628 nUF(vst2, _vst2, 2, (NSTRLST, ADDR), neon_ldx_stx),
20629 nUF(vld3, _vld3, 2, (NSTRLST, ADDR), neon_ldx_stx),
20630 nUF(vst3, _vst3, 2, (NSTRLST, ADDR), neon_ldx_stx),
20631 nUF(vld4, _vld4, 2, (NSTRLST, ADDR), neon_ldx_stx),
20632 nUF(vst4, _vst4, 2, (NSTRLST, ADDR), neon_ldx_stx),
20633
20634 #undef THUMB_VARIANT
20635 #define THUMB_VARIANT & fpu_vfp_ext_v3xd
20636 #undef ARM_VARIANT
20637 #define ARM_VARIANT & fpu_vfp_ext_v3xd
20638 cCE("fconsts", eb00a00, 2, (RVS, I255), vfp_sp_const),
20639 cCE("fshtos", eba0a40, 2, (RVS, I16z), vfp_sp_conv_16),
20640 cCE("fsltos", eba0ac0, 2, (RVS, I32), vfp_sp_conv_32),
20641 cCE("fuhtos", ebb0a40, 2, (RVS, I16z), vfp_sp_conv_16),
20642 cCE("fultos", ebb0ac0, 2, (RVS, I32), vfp_sp_conv_32),
20643 cCE("ftoshs", ebe0a40, 2, (RVS, I16z), vfp_sp_conv_16),
20644 cCE("ftosls", ebe0ac0, 2, (RVS, I32), vfp_sp_conv_32),
20645 cCE("ftouhs", ebf0a40, 2, (RVS, I16z), vfp_sp_conv_16),
20646 cCE("ftouls", ebf0ac0, 2, (RVS, I32), vfp_sp_conv_32),
20647
20648 #undef THUMB_VARIANT
20649 #define THUMB_VARIANT & fpu_vfp_ext_v3
20650 #undef ARM_VARIANT
20651 #define ARM_VARIANT & fpu_vfp_ext_v3
20652
20653 cCE("fconstd", eb00b00, 2, (RVD, I255), vfp_dp_const),
20654 cCE("fshtod", eba0b40, 2, (RVD, I16z), vfp_dp_conv_16),
20655 cCE("fsltod", eba0bc0, 2, (RVD, I32), vfp_dp_conv_32),
20656 cCE("fuhtod", ebb0b40, 2, (RVD, I16z), vfp_dp_conv_16),
20657 cCE("fultod", ebb0bc0, 2, (RVD, I32), vfp_dp_conv_32),
20658 cCE("ftoshd", ebe0b40, 2, (RVD, I16z), vfp_dp_conv_16),
20659 cCE("ftosld", ebe0bc0, 2, (RVD, I32), vfp_dp_conv_32),
20660 cCE("ftouhd", ebf0b40, 2, (RVD, I16z), vfp_dp_conv_16),
20661 cCE("ftould", ebf0bc0, 2, (RVD, I32), vfp_dp_conv_32),
20662
20663 #undef ARM_VARIANT
20664 #define ARM_VARIANT & fpu_vfp_ext_fma
20665 #undef THUMB_VARIANT
20666 #define THUMB_VARIANT & fpu_vfp_ext_fma
20667 /* Mnemonics shared by Neon and VFP. These are included in the
20668 VFP FMA variant; NEON and VFP FMA always includes the NEON
20669 FMA instructions. */
20670 nCEF(vfma, _vfma, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_fmac),
20671 nCEF(vfms, _vfms, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_fmac),
20672 /* ffmas/ffmad/ffmss/ffmsd are dummy mnemonics to satisfy gas;
20673 the v form should always be used. */
20674 cCE("ffmas", ea00a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20675 cCE("ffnmas", ea00a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20676 cCE("ffmad", ea00b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20677 cCE("ffnmad", ea00b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20678 nCE(vfnma, _vfnma, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
20679 nCE(vfnms, _vfnms, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
20680
20681 #undef THUMB_VARIANT
20682 #undef ARM_VARIANT
20683 #define ARM_VARIANT & arm_cext_xscale /* Intel XScale extensions. */
20684
20685 cCE("mia", e200010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
20686 cCE("miaph", e280010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
20687 cCE("miabb", e2c0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
20688 cCE("miabt", e2d0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
20689 cCE("miatb", e2e0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
20690 cCE("miatt", e2f0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
20691 cCE("mar", c400000, 3, (RXA, RRnpc, RRnpc), xsc_mar),
20692 cCE("mra", c500000, 3, (RRnpc, RRnpc, RXA), xsc_mra),
20693
20694 #undef ARM_VARIANT
20695 #define ARM_VARIANT & arm_cext_iwmmxt /* Intel Wireless MMX technology. */
20696
20697 cCE("tandcb", e13f130, 1, (RR), iwmmxt_tandorc),
20698 cCE("tandch", e53f130, 1, (RR), iwmmxt_tandorc),
20699 cCE("tandcw", e93f130, 1, (RR), iwmmxt_tandorc),
20700 cCE("tbcstb", e400010, 2, (RIWR, RR), rn_rd),
20701 cCE("tbcsth", e400050, 2, (RIWR, RR), rn_rd),
20702 cCE("tbcstw", e400090, 2, (RIWR, RR), rn_rd),
20703 cCE("textrcb", e130170, 2, (RR, I7), iwmmxt_textrc),
20704 cCE("textrch", e530170, 2, (RR, I7), iwmmxt_textrc),
20705 cCE("textrcw", e930170, 2, (RR, I7), iwmmxt_textrc),
20706 cCE("textrmub",e100070, 3, (RR, RIWR, I7), iwmmxt_textrm),
20707 cCE("textrmuh",e500070, 3, (RR, RIWR, I7), iwmmxt_textrm),
20708 cCE("textrmuw",e900070, 3, (RR, RIWR, I7), iwmmxt_textrm),
20709 cCE("textrmsb",e100078, 3, (RR, RIWR, I7), iwmmxt_textrm),
20710 cCE("textrmsh",e500078, 3, (RR, RIWR, I7), iwmmxt_textrm),
20711 cCE("textrmsw",e900078, 3, (RR, RIWR, I7), iwmmxt_textrm),
20712 cCE("tinsrb", e600010, 3, (RIWR, RR, I7), iwmmxt_tinsr),
20713 cCE("tinsrh", e600050, 3, (RIWR, RR, I7), iwmmxt_tinsr),
20714 cCE("tinsrw", e600090, 3, (RIWR, RR, I7), iwmmxt_tinsr),
20715 cCE("tmcr", e000110, 2, (RIWC_RIWG, RR), rn_rd),
20716 cCE("tmcrr", c400000, 3, (RIWR, RR, RR), rm_rd_rn),
20717 cCE("tmia", e200010, 3, (RIWR, RR, RR), iwmmxt_tmia),
20718 cCE("tmiaph", e280010, 3, (RIWR, RR, RR), iwmmxt_tmia),
20719 cCE("tmiabb", e2c0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
20720 cCE("tmiabt", e2d0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
20721 cCE("tmiatb", e2e0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
20722 cCE("tmiatt", e2f0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
20723 cCE("tmovmskb",e100030, 2, (RR, RIWR), rd_rn),
20724 cCE("tmovmskh",e500030, 2, (RR, RIWR), rd_rn),
20725 cCE("tmovmskw",e900030, 2, (RR, RIWR), rd_rn),
20726 cCE("tmrc", e100110, 2, (RR, RIWC_RIWG), rd_rn),
20727 cCE("tmrrc", c500000, 3, (RR, RR, RIWR), rd_rn_rm),
20728 cCE("torcb", e13f150, 1, (RR), iwmmxt_tandorc),
20729 cCE("torch", e53f150, 1, (RR), iwmmxt_tandorc),
20730 cCE("torcw", e93f150, 1, (RR), iwmmxt_tandorc),
20731 cCE("waccb", e0001c0, 2, (RIWR, RIWR), rd_rn),
20732 cCE("wacch", e4001c0, 2, (RIWR, RIWR), rd_rn),
20733 cCE("waccw", e8001c0, 2, (RIWR, RIWR), rd_rn),
20734 cCE("waddbss", e300180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20735 cCE("waddb", e000180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20736 cCE("waddbus", e100180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20737 cCE("waddhss", e700180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20738 cCE("waddh", e400180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20739 cCE("waddhus", e500180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20740 cCE("waddwss", eb00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20741 cCE("waddw", e800180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20742 cCE("waddwus", e900180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20743 cCE("waligni", e000020, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_waligni),
20744 cCE("walignr0",e800020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20745 cCE("walignr1",e900020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20746 cCE("walignr2",ea00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20747 cCE("walignr3",eb00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20748 cCE("wand", e200000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20749 cCE("wandn", e300000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20750 cCE("wavg2b", e800000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20751 cCE("wavg2br", e900000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20752 cCE("wavg2h", ec00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20753 cCE("wavg2hr", ed00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20754 cCE("wcmpeqb", e000060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20755 cCE("wcmpeqh", e400060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20756 cCE("wcmpeqw", e800060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20757 cCE("wcmpgtub",e100060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20758 cCE("wcmpgtuh",e500060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20759 cCE("wcmpgtuw",e900060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20760 cCE("wcmpgtsb",e300060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20761 cCE("wcmpgtsh",e700060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20762 cCE("wcmpgtsw",eb00060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20763 cCE("wldrb", c100000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
20764 cCE("wldrh", c500000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
20765 cCE("wldrw", c100100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
20766 cCE("wldrd", c500100, 2, (RIWR, ADDR), iwmmxt_wldstd),
20767 cCE("wmacs", e600100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20768 cCE("wmacsz", e700100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20769 cCE("wmacu", e400100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20770 cCE("wmacuz", e500100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20771 cCE("wmadds", ea00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20772 cCE("wmaddu", e800100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20773 cCE("wmaxsb", e200160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20774 cCE("wmaxsh", e600160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20775 cCE("wmaxsw", ea00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20776 cCE("wmaxub", e000160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20777 cCE("wmaxuh", e400160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20778 cCE("wmaxuw", e800160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20779 cCE("wminsb", e300160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20780 cCE("wminsh", e700160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20781 cCE("wminsw", eb00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20782 cCE("wminub", e100160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20783 cCE("wminuh", e500160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20784 cCE("wminuw", e900160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20785 cCE("wmov", e000000, 2, (RIWR, RIWR), iwmmxt_wmov),
20786 cCE("wmulsm", e300100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20787 cCE("wmulsl", e200100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20788 cCE("wmulum", e100100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20789 cCE("wmulul", e000100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20790 cCE("wor", e000000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20791 cCE("wpackhss",e700080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20792 cCE("wpackhus",e500080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20793 cCE("wpackwss",eb00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20794 cCE("wpackwus",e900080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20795 cCE("wpackdss",ef00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20796 cCE("wpackdus",ed00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20797 cCE("wrorh", e700040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20798 cCE("wrorhg", e700148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20799 cCE("wrorw", eb00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20800 cCE("wrorwg", eb00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20801 cCE("wrord", ef00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20802 cCE("wrordg", ef00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20803 cCE("wsadb", e000120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20804 cCE("wsadbz", e100120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20805 cCE("wsadh", e400120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20806 cCE("wsadhz", e500120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20807 cCE("wshufh", e0001e0, 3, (RIWR, RIWR, I255), iwmmxt_wshufh),
20808 cCE("wsllh", e500040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20809 cCE("wsllhg", e500148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20810 cCE("wsllw", e900040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20811 cCE("wsllwg", e900148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20812 cCE("wslld", ed00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20813 cCE("wslldg", ed00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20814 cCE("wsrah", e400040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20815 cCE("wsrahg", e400148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20816 cCE("wsraw", e800040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20817 cCE("wsrawg", e800148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20818 cCE("wsrad", ec00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20819 cCE("wsradg", ec00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20820 cCE("wsrlh", e600040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20821 cCE("wsrlhg", e600148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20822 cCE("wsrlw", ea00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20823 cCE("wsrlwg", ea00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20824 cCE("wsrld", ee00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20825 cCE("wsrldg", ee00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20826 cCE("wstrb", c000000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
20827 cCE("wstrh", c400000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
20828 cCE("wstrw", c000100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
20829 cCE("wstrd", c400100, 2, (RIWR, ADDR), iwmmxt_wldstd),
20830 cCE("wsubbss", e3001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20831 cCE("wsubb", e0001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20832 cCE("wsubbus", e1001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20833 cCE("wsubhss", e7001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20834 cCE("wsubh", e4001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20835 cCE("wsubhus", e5001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20836 cCE("wsubwss", eb001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20837 cCE("wsubw", e8001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20838 cCE("wsubwus", e9001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20839 cCE("wunpckehub",e0000c0, 2, (RIWR, RIWR), rd_rn),
20840 cCE("wunpckehuh",e4000c0, 2, (RIWR, RIWR), rd_rn),
20841 cCE("wunpckehuw",e8000c0, 2, (RIWR, RIWR), rd_rn),
20842 cCE("wunpckehsb",e2000c0, 2, (RIWR, RIWR), rd_rn),
20843 cCE("wunpckehsh",e6000c0, 2, (RIWR, RIWR), rd_rn),
20844 cCE("wunpckehsw",ea000c0, 2, (RIWR, RIWR), rd_rn),
20845 cCE("wunpckihb", e1000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20846 cCE("wunpckihh", e5000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20847 cCE("wunpckihw", e9000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20848 cCE("wunpckelub",e0000e0, 2, (RIWR, RIWR), rd_rn),
20849 cCE("wunpckeluh",e4000e0, 2, (RIWR, RIWR), rd_rn),
20850 cCE("wunpckeluw",e8000e0, 2, (RIWR, RIWR), rd_rn),
20851 cCE("wunpckelsb",e2000e0, 2, (RIWR, RIWR), rd_rn),
20852 cCE("wunpckelsh",e6000e0, 2, (RIWR, RIWR), rd_rn),
20853 cCE("wunpckelsw",ea000e0, 2, (RIWR, RIWR), rd_rn),
20854 cCE("wunpckilb", e1000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20855 cCE("wunpckilh", e5000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20856 cCE("wunpckilw", e9000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20857 cCE("wxor", e100000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20858 cCE("wzero", e300000, 1, (RIWR), iwmmxt_wzero),
20859
20860 #undef ARM_VARIANT
20861 #define ARM_VARIANT & arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2. */
20862
20863 cCE("torvscb", e12f190, 1, (RR), iwmmxt_tandorc),
20864 cCE("torvsch", e52f190, 1, (RR), iwmmxt_tandorc),
20865 cCE("torvscw", e92f190, 1, (RR), iwmmxt_tandorc),
20866 cCE("wabsb", e2001c0, 2, (RIWR, RIWR), rd_rn),
20867 cCE("wabsh", e6001c0, 2, (RIWR, RIWR), rd_rn),
20868 cCE("wabsw", ea001c0, 2, (RIWR, RIWR), rd_rn),
20869 cCE("wabsdiffb", e1001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20870 cCE("wabsdiffh", e5001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20871 cCE("wabsdiffw", e9001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20872 cCE("waddbhusl", e2001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20873 cCE("waddbhusm", e6001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20874 cCE("waddhc", e600180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20875 cCE("waddwc", ea00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20876 cCE("waddsubhx", ea001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20877 cCE("wavg4", e400000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20878 cCE("wavg4r", e500000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20879 cCE("wmaddsn", ee00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20880 cCE("wmaddsx", eb00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20881 cCE("wmaddun", ec00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20882 cCE("wmaddux", e900100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20883 cCE("wmerge", e000080, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_wmerge),
20884 cCE("wmiabb", e0000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20885 cCE("wmiabt", e1000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20886 cCE("wmiatb", e2000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20887 cCE("wmiatt", e3000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20888 cCE("wmiabbn", e4000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20889 cCE("wmiabtn", e5000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20890 cCE("wmiatbn", e6000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20891 cCE("wmiattn", e7000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20892 cCE("wmiawbb", e800120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20893 cCE("wmiawbt", e900120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20894 cCE("wmiawtb", ea00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20895 cCE("wmiawtt", eb00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20896 cCE("wmiawbbn", ec00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20897 cCE("wmiawbtn", ed00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20898 cCE("wmiawtbn", ee00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20899 cCE("wmiawttn", ef00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20900 cCE("wmulsmr", ef00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20901 cCE("wmulumr", ed00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20902 cCE("wmulwumr", ec000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20903 cCE("wmulwsmr", ee000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20904 cCE("wmulwum", ed000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20905 cCE("wmulwsm", ef000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20906 cCE("wmulwl", eb000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20907 cCE("wqmiabb", e8000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20908 cCE("wqmiabt", e9000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20909 cCE("wqmiatb", ea000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20910 cCE("wqmiatt", eb000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20911 cCE("wqmiabbn", ec000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20912 cCE("wqmiabtn", ed000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20913 cCE("wqmiatbn", ee000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20914 cCE("wqmiattn", ef000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20915 cCE("wqmulm", e100080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20916 cCE("wqmulmr", e300080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20917 cCE("wqmulwm", ec000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20918 cCE("wqmulwmr", ee000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20919 cCE("wsubaddhx", ed001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20920
20921 #undef ARM_VARIANT
20922 #define ARM_VARIANT & arm_cext_maverick /* Cirrus Maverick instructions. */
20923
20924 cCE("cfldrs", c100400, 2, (RMF, ADDRGLDC), rd_cpaddr),
20925 cCE("cfldrd", c500400, 2, (RMD, ADDRGLDC), rd_cpaddr),
20926 cCE("cfldr32", c100500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
20927 cCE("cfldr64", c500500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
20928 cCE("cfstrs", c000400, 2, (RMF, ADDRGLDC), rd_cpaddr),
20929 cCE("cfstrd", c400400, 2, (RMD, ADDRGLDC), rd_cpaddr),
20930 cCE("cfstr32", c000500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
20931 cCE("cfstr64", c400500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
20932 cCE("cfmvsr", e000450, 2, (RMF, RR), rn_rd),
20933 cCE("cfmvrs", e100450, 2, (RR, RMF), rd_rn),
20934 cCE("cfmvdlr", e000410, 2, (RMD, RR), rn_rd),
20935 cCE("cfmvrdl", e100410, 2, (RR, RMD), rd_rn),
20936 cCE("cfmvdhr", e000430, 2, (RMD, RR), rn_rd),
20937 cCE("cfmvrdh", e100430, 2, (RR, RMD), rd_rn),
20938 cCE("cfmv64lr",e000510, 2, (RMDX, RR), rn_rd),
20939 cCE("cfmvr64l",e100510, 2, (RR, RMDX), rd_rn),
20940 cCE("cfmv64hr",e000530, 2, (RMDX, RR), rn_rd),
20941 cCE("cfmvr64h",e100530, 2, (RR, RMDX), rd_rn),
20942 cCE("cfmval32",e200440, 2, (RMAX, RMFX), rd_rn),
20943 cCE("cfmv32al",e100440, 2, (RMFX, RMAX), rd_rn),
20944 cCE("cfmvam32",e200460, 2, (RMAX, RMFX), rd_rn),
20945 cCE("cfmv32am",e100460, 2, (RMFX, RMAX), rd_rn),
20946 cCE("cfmvah32",e200480, 2, (RMAX, RMFX), rd_rn),
20947 cCE("cfmv32ah",e100480, 2, (RMFX, RMAX), rd_rn),
20948 cCE("cfmva32", e2004a0, 2, (RMAX, RMFX), rd_rn),
20949 cCE("cfmv32a", e1004a0, 2, (RMFX, RMAX), rd_rn),
20950 cCE("cfmva64", e2004c0, 2, (RMAX, RMDX), rd_rn),
20951 cCE("cfmv64a", e1004c0, 2, (RMDX, RMAX), rd_rn),
20952 cCE("cfmvsc32",e2004e0, 2, (RMDS, RMDX), mav_dspsc),
20953 cCE("cfmv32sc",e1004e0, 2, (RMDX, RMDS), rd),
20954 cCE("cfcpys", e000400, 2, (RMF, RMF), rd_rn),
20955 cCE("cfcpyd", e000420, 2, (RMD, RMD), rd_rn),
20956 cCE("cfcvtsd", e000460, 2, (RMD, RMF), rd_rn),
20957 cCE("cfcvtds", e000440, 2, (RMF, RMD), rd_rn),
20958 cCE("cfcvt32s",e000480, 2, (RMF, RMFX), rd_rn),
20959 cCE("cfcvt32d",e0004a0, 2, (RMD, RMFX), rd_rn),
20960 cCE("cfcvt64s",e0004c0, 2, (RMF, RMDX), rd_rn),
20961 cCE("cfcvt64d",e0004e0, 2, (RMD, RMDX), rd_rn),
20962 cCE("cfcvts32",e100580, 2, (RMFX, RMF), rd_rn),
20963 cCE("cfcvtd32",e1005a0, 2, (RMFX, RMD), rd_rn),
20964 cCE("cftruncs32",e1005c0, 2, (RMFX, RMF), rd_rn),
20965 cCE("cftruncd32",e1005e0, 2, (RMFX, RMD), rd_rn),
20966 cCE("cfrshl32",e000550, 3, (RMFX, RMFX, RR), mav_triple),
20967 cCE("cfrshl64",e000570, 3, (RMDX, RMDX, RR), mav_triple),
20968 cCE("cfsh32", e000500, 3, (RMFX, RMFX, I63s), mav_shift),
20969 cCE("cfsh64", e200500, 3, (RMDX, RMDX, I63s), mav_shift),
20970 cCE("cfcmps", e100490, 3, (RR, RMF, RMF), rd_rn_rm),
20971 cCE("cfcmpd", e1004b0, 3, (RR, RMD, RMD), rd_rn_rm),
20972 cCE("cfcmp32", e100590, 3, (RR, RMFX, RMFX), rd_rn_rm),
20973 cCE("cfcmp64", e1005b0, 3, (RR, RMDX, RMDX), rd_rn_rm),
20974 cCE("cfabss", e300400, 2, (RMF, RMF), rd_rn),
20975 cCE("cfabsd", e300420, 2, (RMD, RMD), rd_rn),
20976 cCE("cfnegs", e300440, 2, (RMF, RMF), rd_rn),
20977 cCE("cfnegd", e300460, 2, (RMD, RMD), rd_rn),
20978 cCE("cfadds", e300480, 3, (RMF, RMF, RMF), rd_rn_rm),
20979 cCE("cfaddd", e3004a0, 3, (RMD, RMD, RMD), rd_rn_rm),
20980 cCE("cfsubs", e3004c0, 3, (RMF, RMF, RMF), rd_rn_rm),
20981 cCE("cfsubd", e3004e0, 3, (RMD, RMD, RMD), rd_rn_rm),
20982 cCE("cfmuls", e100400, 3, (RMF, RMF, RMF), rd_rn_rm),
20983 cCE("cfmuld", e100420, 3, (RMD, RMD, RMD), rd_rn_rm),
20984 cCE("cfabs32", e300500, 2, (RMFX, RMFX), rd_rn),
20985 cCE("cfabs64", e300520, 2, (RMDX, RMDX), rd_rn),
20986 cCE("cfneg32", e300540, 2, (RMFX, RMFX), rd_rn),
20987 cCE("cfneg64", e300560, 2, (RMDX, RMDX), rd_rn),
20988 cCE("cfadd32", e300580, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
20989 cCE("cfadd64", e3005a0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
20990 cCE("cfsub32", e3005c0, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
20991 cCE("cfsub64", e3005e0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
20992 cCE("cfmul32", e100500, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
20993 cCE("cfmul64", e100520, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
20994 cCE("cfmac32", e100540, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
20995 cCE("cfmsc32", e100560, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
20996 cCE("cfmadd32",e000600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
20997 cCE("cfmsub32",e100600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
20998 cCE("cfmadda32", e200600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
20999 cCE("cfmsuba32", e300600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
21000
21001 /* ARMv8-M instructions. */
21002 #undef ARM_VARIANT
21003 #define ARM_VARIANT NULL
21004 #undef THUMB_VARIANT
21005 #define THUMB_VARIANT & arm_ext_v8m
21006 TUE("sg", 0, e97fe97f, 0, (), 0, noargs),
21007 TUE("blxns", 0, 4784, 1, (RRnpc), 0, t_blx),
21008 TUE("bxns", 0, 4704, 1, (RRnpc), 0, t_bx),
21009 TUE("tt", 0, e840f000, 2, (RRnpc, RRnpc), 0, tt),
21010 TUE("ttt", 0, e840f040, 2, (RRnpc, RRnpc), 0, tt),
21011 TUE("tta", 0, e840f080, 2, (RRnpc, RRnpc), 0, tt),
21012 TUE("ttat", 0, e840f0c0, 2, (RRnpc, RRnpc), 0, tt),
21013
21014 /* FP for ARMv8-M Mainline. Enabled for ARMv8-M Mainline because the
21015 instructions behave as nop if no VFP is present. */
21016 #undef THUMB_VARIANT
21017 #define THUMB_VARIANT & arm_ext_v8m_main
21018 TUEc("vlldm", 0, ec300a00, 1, (RRnpc), rn),
21019 TUEc("vlstm", 0, ec200a00, 1, (RRnpc), rn),
21020 };
21021 #undef ARM_VARIANT
21022 #undef THUMB_VARIANT
21023 #undef TCE
21024 #undef TUE
21025 #undef TUF
21026 #undef TCC
21027 #undef cCE
21028 #undef cCL
21029 #undef C3E
21030 #undef CE
21031 #undef CM
21032 #undef UE
21033 #undef UF
21034 #undef UT
21035 #undef NUF
21036 #undef nUF
21037 #undef NCE
21038 #undef nCE
21039 #undef OPS0
21040 #undef OPS1
21041 #undef OPS2
21042 #undef OPS3
21043 #undef OPS4
21044 #undef OPS5
21045 #undef OPS6
21046 #undef do_0
21047 \f
21048 /* MD interface: bits in the object file. */
21049
21050 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
21051 for use in the a.out file, and stores them in the array pointed to by buf.
21052 This knows about the endian-ness of the target machine and does
21053 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
21054 2 (short) and 4 (long) Floating numbers are put out as a series of
21055 LITTLENUMS (shorts, here at least). */
21056
21057 void
21058 md_number_to_chars (char * buf, valueT val, int n)
21059 {
21060 if (target_big_endian)
21061 number_to_chars_bigendian (buf, val, n);
21062 else
21063 number_to_chars_littleendian (buf, val, n);
21064 }
21065
21066 static valueT
21067 md_chars_to_number (char * buf, int n)
21068 {
21069 valueT result = 0;
21070 unsigned char * where = (unsigned char *) buf;
21071
21072 if (target_big_endian)
21073 {
21074 while (n--)
21075 {
21076 result <<= 8;
21077 result |= (*where++ & 255);
21078 }
21079 }
21080 else
21081 {
21082 while (n--)
21083 {
21084 result <<= 8;
21085 result |= (where[n] & 255);
21086 }
21087 }
21088
21089 return result;
21090 }
21091
21092 /* MD interface: Sections. */
21093
21094 /* Calculate the maximum variable size (i.e., excluding fr_fix)
21095 that an rs_machine_dependent frag may reach. */
21096
21097 unsigned int
21098 arm_frag_max_var (fragS *fragp)
21099 {
21100 /* We only use rs_machine_dependent for variable-size Thumb instructions,
21101 which are either THUMB_SIZE (2) or INSN_SIZE (4).
21102
21103 Note that we generate relaxable instructions even for cases that don't
21104 really need it, like an immediate that's a trivial constant. So we're
21105 overestimating the instruction size for some of those cases. Rather
21106 than putting more intelligence here, it would probably be better to
21107 avoid generating a relaxation frag in the first place when it can be
21108 determined up front that a short instruction will suffice. */
21109
21110 gas_assert (fragp->fr_type == rs_machine_dependent);
21111 return INSN_SIZE;
21112 }
21113
21114 /* Estimate the size of a frag before relaxing. Assume everything fits in
21115 2 bytes. */
21116
21117 int
21118 md_estimate_size_before_relax (fragS * fragp,
21119 segT segtype ATTRIBUTE_UNUSED)
21120 {
21121 fragp->fr_var = 2;
21122 return 2;
21123 }
21124
21125 /* Convert a machine dependent frag. */
21126
21127 void
21128 md_convert_frag (bfd *abfd, segT asec ATTRIBUTE_UNUSED, fragS *fragp)
21129 {
21130 unsigned long insn;
21131 unsigned long old_op;
21132 char *buf;
21133 expressionS exp;
21134 fixS *fixp;
21135 int reloc_type;
21136 int pc_rel;
21137 int opcode;
21138
21139 buf = fragp->fr_literal + fragp->fr_fix;
21140
21141 old_op = bfd_get_16(abfd, buf);
21142 if (fragp->fr_symbol)
21143 {
21144 exp.X_op = O_symbol;
21145 exp.X_add_symbol = fragp->fr_symbol;
21146 }
21147 else
21148 {
21149 exp.X_op = O_constant;
21150 }
21151 exp.X_add_number = fragp->fr_offset;
21152 opcode = fragp->fr_subtype;
21153 switch (opcode)
21154 {
21155 case T_MNEM_ldr_pc:
21156 case T_MNEM_ldr_pc2:
21157 case T_MNEM_ldr_sp:
21158 case T_MNEM_str_sp:
21159 case T_MNEM_ldr:
21160 case T_MNEM_ldrb:
21161 case T_MNEM_ldrh:
21162 case T_MNEM_str:
21163 case T_MNEM_strb:
21164 case T_MNEM_strh:
21165 if (fragp->fr_var == 4)
21166 {
21167 insn = THUMB_OP32 (opcode);
21168 if ((old_op >> 12) == 4 || (old_op >> 12) == 9)
21169 {
21170 insn |= (old_op & 0x700) << 4;
21171 }
21172 else
21173 {
21174 insn |= (old_op & 7) << 12;
21175 insn |= (old_op & 0x38) << 13;
21176 }
21177 insn |= 0x00000c00;
21178 put_thumb32_insn (buf, insn);
21179 reloc_type = BFD_RELOC_ARM_T32_OFFSET_IMM;
21180 }
21181 else
21182 {
21183 reloc_type = BFD_RELOC_ARM_THUMB_OFFSET;
21184 }
21185 pc_rel = (opcode == T_MNEM_ldr_pc2);
21186 break;
21187 case T_MNEM_adr:
21188 if (fragp->fr_var == 4)
21189 {
21190 insn = THUMB_OP32 (opcode);
21191 insn |= (old_op & 0xf0) << 4;
21192 put_thumb32_insn (buf, insn);
21193 reloc_type = BFD_RELOC_ARM_T32_ADD_PC12;
21194 }
21195 else
21196 {
21197 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
21198 exp.X_add_number -= 4;
21199 }
21200 pc_rel = 1;
21201 break;
21202 case T_MNEM_mov:
21203 case T_MNEM_movs:
21204 case T_MNEM_cmp:
21205 case T_MNEM_cmn:
21206 if (fragp->fr_var == 4)
21207 {
21208 int r0off = (opcode == T_MNEM_mov
21209 || opcode == T_MNEM_movs) ? 0 : 8;
21210 insn = THUMB_OP32 (opcode);
21211 insn = (insn & 0xe1ffffff) | 0x10000000;
21212 insn |= (old_op & 0x700) << r0off;
21213 put_thumb32_insn (buf, insn);
21214 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
21215 }
21216 else
21217 {
21218 reloc_type = BFD_RELOC_ARM_THUMB_IMM;
21219 }
21220 pc_rel = 0;
21221 break;
21222 case T_MNEM_b:
21223 if (fragp->fr_var == 4)
21224 {
21225 insn = THUMB_OP32(opcode);
21226 put_thumb32_insn (buf, insn);
21227 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH25;
21228 }
21229 else
21230 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH12;
21231 pc_rel = 1;
21232 break;
21233 case T_MNEM_bcond:
21234 if (fragp->fr_var == 4)
21235 {
21236 insn = THUMB_OP32(opcode);
21237 insn |= (old_op & 0xf00) << 14;
21238 put_thumb32_insn (buf, insn);
21239 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH20;
21240 }
21241 else
21242 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH9;
21243 pc_rel = 1;
21244 break;
21245 case T_MNEM_add_sp:
21246 case T_MNEM_add_pc:
21247 case T_MNEM_inc_sp:
21248 case T_MNEM_dec_sp:
21249 if (fragp->fr_var == 4)
21250 {
21251 /* ??? Choose between add and addw. */
21252 insn = THUMB_OP32 (opcode);
21253 insn |= (old_op & 0xf0) << 4;
21254 put_thumb32_insn (buf, insn);
21255 if (opcode == T_MNEM_add_pc)
21256 reloc_type = BFD_RELOC_ARM_T32_IMM12;
21257 else
21258 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
21259 }
21260 else
21261 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
21262 pc_rel = 0;
21263 break;
21264
21265 case T_MNEM_addi:
21266 case T_MNEM_addis:
21267 case T_MNEM_subi:
21268 case T_MNEM_subis:
21269 if (fragp->fr_var == 4)
21270 {
21271 insn = THUMB_OP32 (opcode);
21272 insn |= (old_op & 0xf0) << 4;
21273 insn |= (old_op & 0xf) << 16;
21274 put_thumb32_insn (buf, insn);
21275 if (insn & (1 << 20))
21276 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
21277 else
21278 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
21279 }
21280 else
21281 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
21282 pc_rel = 0;
21283 break;
21284 default:
21285 abort ();
21286 }
21287 fixp = fix_new_exp (fragp, fragp->fr_fix, fragp->fr_var, &exp, pc_rel,
21288 (enum bfd_reloc_code_real) reloc_type);
21289 fixp->fx_file = fragp->fr_file;
21290 fixp->fx_line = fragp->fr_line;
21291 fragp->fr_fix += fragp->fr_var;
21292
21293 /* Set whether we use thumb-2 ISA based on final relaxation results. */
21294 if (thumb_mode && fragp->fr_var == 4 && no_cpu_selected ()
21295 && !ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_t2))
21296 ARM_MERGE_FEATURE_SETS (arm_arch_used, thumb_arch_used, arm_ext_v6t2);
21297 }
21298
21299 /* Return the size of a relaxable immediate operand instruction.
21300 SHIFT and SIZE specify the form of the allowable immediate. */
21301 static int
21302 relax_immediate (fragS *fragp, int size, int shift)
21303 {
21304 offsetT offset;
21305 offsetT mask;
21306 offsetT low;
21307
21308 /* ??? Should be able to do better than this. */
21309 if (fragp->fr_symbol)
21310 return 4;
21311
21312 low = (1 << shift) - 1;
21313 mask = (1 << (shift + size)) - (1 << shift);
21314 offset = fragp->fr_offset;
21315 /* Force misaligned offsets to 32-bit variant. */
21316 if (offset & low)
21317 return 4;
21318 if (offset & ~mask)
21319 return 4;
21320 return 2;
21321 }
21322
21323 /* Get the address of a symbol during relaxation. */
21324 static addressT
21325 relaxed_symbol_addr (fragS *fragp, long stretch)
21326 {
21327 fragS *sym_frag;
21328 addressT addr;
21329 symbolS *sym;
21330
21331 sym = fragp->fr_symbol;
21332 sym_frag = symbol_get_frag (sym);
21333 know (S_GET_SEGMENT (sym) != absolute_section
21334 || sym_frag == &zero_address_frag);
21335 addr = S_GET_VALUE (sym) + fragp->fr_offset;
21336
21337 /* If frag has yet to be reached on this pass, assume it will
21338 move by STRETCH just as we did. If this is not so, it will
21339 be because some frag between grows, and that will force
21340 another pass. */
21341
21342 if (stretch != 0
21343 && sym_frag->relax_marker != fragp->relax_marker)
21344 {
21345 fragS *f;
21346
21347 /* Adjust stretch for any alignment frag. Note that if have
21348 been expanding the earlier code, the symbol may be
21349 defined in what appears to be an earlier frag. FIXME:
21350 This doesn't handle the fr_subtype field, which specifies
21351 a maximum number of bytes to skip when doing an
21352 alignment. */
21353 for (f = fragp; f != NULL && f != sym_frag; f = f->fr_next)
21354 {
21355 if (f->fr_type == rs_align || f->fr_type == rs_align_code)
21356 {
21357 if (stretch < 0)
21358 stretch = - ((- stretch)
21359 & ~ ((1 << (int) f->fr_offset) - 1));
21360 else
21361 stretch &= ~ ((1 << (int) f->fr_offset) - 1);
21362 if (stretch == 0)
21363 break;
21364 }
21365 }
21366 if (f != NULL)
21367 addr += stretch;
21368 }
21369
21370 return addr;
21371 }
21372
21373 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
21374 load. */
21375 static int
21376 relax_adr (fragS *fragp, asection *sec, long stretch)
21377 {
21378 addressT addr;
21379 offsetT val;
21380
21381 /* Assume worst case for symbols not known to be in the same section. */
21382 if (fragp->fr_symbol == NULL
21383 || !S_IS_DEFINED (fragp->fr_symbol)
21384 || sec != S_GET_SEGMENT (fragp->fr_symbol)
21385 || S_IS_WEAK (fragp->fr_symbol))
21386 return 4;
21387
21388 val = relaxed_symbol_addr (fragp, stretch);
21389 addr = fragp->fr_address + fragp->fr_fix;
21390 addr = (addr + 4) & ~3;
21391 /* Force misaligned targets to 32-bit variant. */
21392 if (val & 3)
21393 return 4;
21394 val -= addr;
21395 if (val < 0 || val > 1020)
21396 return 4;
21397 return 2;
21398 }
21399
21400 /* Return the size of a relaxable add/sub immediate instruction. */
21401 static int
21402 relax_addsub (fragS *fragp, asection *sec)
21403 {
21404 char *buf;
21405 int op;
21406
21407 buf = fragp->fr_literal + fragp->fr_fix;
21408 op = bfd_get_16(sec->owner, buf);
21409 if ((op & 0xf) == ((op >> 4) & 0xf))
21410 return relax_immediate (fragp, 8, 0);
21411 else
21412 return relax_immediate (fragp, 3, 0);
21413 }
21414
21415 /* Return TRUE iff the definition of symbol S could be pre-empted
21416 (overridden) at link or load time. */
21417 static bfd_boolean
21418 symbol_preemptible (symbolS *s)
21419 {
21420 /* Weak symbols can always be pre-empted. */
21421 if (S_IS_WEAK (s))
21422 return TRUE;
21423
21424 /* Non-global symbols cannot be pre-empted. */
21425 if (! S_IS_EXTERNAL (s))
21426 return FALSE;
21427
21428 #ifdef OBJ_ELF
21429 /* In ELF, a global symbol can be marked protected, or private. In that
21430 case it can't be pre-empted (other definitions in the same link unit
21431 would violate the ODR). */
21432 if (ELF_ST_VISIBILITY (S_GET_OTHER (s)) > STV_DEFAULT)
21433 return FALSE;
21434 #endif
21435
21436 /* Other global symbols might be pre-empted. */
21437 return TRUE;
21438 }
21439
21440 /* Return the size of a relaxable branch instruction. BITS is the
21441 size of the offset field in the narrow instruction. */
21442
21443 static int
21444 relax_branch (fragS *fragp, asection *sec, int bits, long stretch)
21445 {
21446 addressT addr;
21447 offsetT val;
21448 offsetT limit;
21449
21450 /* Assume worst case for symbols not known to be in the same section. */
21451 if (!S_IS_DEFINED (fragp->fr_symbol)
21452 || sec != S_GET_SEGMENT (fragp->fr_symbol)
21453 || S_IS_WEAK (fragp->fr_symbol))
21454 return 4;
21455
21456 #ifdef OBJ_ELF
21457 /* A branch to a function in ARM state will require interworking. */
21458 if (S_IS_DEFINED (fragp->fr_symbol)
21459 && ARM_IS_FUNC (fragp->fr_symbol))
21460 return 4;
21461 #endif
21462
21463 if (symbol_preemptible (fragp->fr_symbol))
21464 return 4;
21465
21466 val = relaxed_symbol_addr (fragp, stretch);
21467 addr = fragp->fr_address + fragp->fr_fix + 4;
21468 val -= addr;
21469
21470 /* Offset is a signed value *2 */
21471 limit = 1 << bits;
21472 if (val >= limit || val < -limit)
21473 return 4;
21474 return 2;
21475 }
21476
21477
21478 /* Relax a machine dependent frag. This returns the amount by which
21479 the current size of the frag should change. */
21480
21481 int
21482 arm_relax_frag (asection *sec, fragS *fragp, long stretch)
21483 {
21484 int oldsize;
21485 int newsize;
21486
21487 oldsize = fragp->fr_var;
21488 switch (fragp->fr_subtype)
21489 {
21490 case T_MNEM_ldr_pc2:
21491 newsize = relax_adr (fragp, sec, stretch);
21492 break;
21493 case T_MNEM_ldr_pc:
21494 case T_MNEM_ldr_sp:
21495 case T_MNEM_str_sp:
21496 newsize = relax_immediate (fragp, 8, 2);
21497 break;
21498 case T_MNEM_ldr:
21499 case T_MNEM_str:
21500 newsize = relax_immediate (fragp, 5, 2);
21501 break;
21502 case T_MNEM_ldrh:
21503 case T_MNEM_strh:
21504 newsize = relax_immediate (fragp, 5, 1);
21505 break;
21506 case T_MNEM_ldrb:
21507 case T_MNEM_strb:
21508 newsize = relax_immediate (fragp, 5, 0);
21509 break;
21510 case T_MNEM_adr:
21511 newsize = relax_adr (fragp, sec, stretch);
21512 break;
21513 case T_MNEM_mov:
21514 case T_MNEM_movs:
21515 case T_MNEM_cmp:
21516 case T_MNEM_cmn:
21517 newsize = relax_immediate (fragp, 8, 0);
21518 break;
21519 case T_MNEM_b:
21520 newsize = relax_branch (fragp, sec, 11, stretch);
21521 break;
21522 case T_MNEM_bcond:
21523 newsize = relax_branch (fragp, sec, 8, stretch);
21524 break;
21525 case T_MNEM_add_sp:
21526 case T_MNEM_add_pc:
21527 newsize = relax_immediate (fragp, 8, 2);
21528 break;
21529 case T_MNEM_inc_sp:
21530 case T_MNEM_dec_sp:
21531 newsize = relax_immediate (fragp, 7, 2);
21532 break;
21533 case T_MNEM_addi:
21534 case T_MNEM_addis:
21535 case T_MNEM_subi:
21536 case T_MNEM_subis:
21537 newsize = relax_addsub (fragp, sec);
21538 break;
21539 default:
21540 abort ();
21541 }
21542
21543 fragp->fr_var = newsize;
21544 /* Freeze wide instructions that are at or before the same location as
21545 in the previous pass. This avoids infinite loops.
21546 Don't freeze them unconditionally because targets may be artificially
21547 misaligned by the expansion of preceding frags. */
21548 if (stretch <= 0 && newsize > 2)
21549 {
21550 md_convert_frag (sec->owner, sec, fragp);
21551 frag_wane (fragp);
21552 }
21553
21554 return newsize - oldsize;
21555 }
21556
21557 /* Round up a section size to the appropriate boundary. */
21558
21559 valueT
21560 md_section_align (segT segment ATTRIBUTE_UNUSED,
21561 valueT size)
21562 {
21563 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
21564 if (OUTPUT_FLAVOR == bfd_target_aout_flavour)
21565 {
21566 /* For a.out, force the section size to be aligned. If we don't do
21567 this, BFD will align it for us, but it will not write out the
21568 final bytes of the section. This may be a bug in BFD, but it is
21569 easier to fix it here since that is how the other a.out targets
21570 work. */
21571 int align;
21572
21573 align = bfd_get_section_alignment (stdoutput, segment);
21574 size = ((size + (1 << align) - 1) & (-((valueT) 1 << align)));
21575 }
21576 #endif
21577
21578 return size;
21579 }
21580
21581 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
21582 of an rs_align_code fragment. */
21583
21584 void
21585 arm_handle_align (fragS * fragP)
21586 {
21587 static unsigned char const arm_noop[2][2][4] =
21588 {
21589 { /* ARMv1 */
21590 {0x00, 0x00, 0xa0, 0xe1}, /* LE */
21591 {0xe1, 0xa0, 0x00, 0x00}, /* BE */
21592 },
21593 { /* ARMv6k */
21594 {0x00, 0xf0, 0x20, 0xe3}, /* LE */
21595 {0xe3, 0x20, 0xf0, 0x00}, /* BE */
21596 },
21597 };
21598 static unsigned char const thumb_noop[2][2][2] =
21599 {
21600 { /* Thumb-1 */
21601 {0xc0, 0x46}, /* LE */
21602 {0x46, 0xc0}, /* BE */
21603 },
21604 { /* Thumb-2 */
21605 {0x00, 0xbf}, /* LE */
21606 {0xbf, 0x00} /* BE */
21607 }
21608 };
21609 static unsigned char const wide_thumb_noop[2][4] =
21610 { /* Wide Thumb-2 */
21611 {0xaf, 0xf3, 0x00, 0x80}, /* LE */
21612 {0xf3, 0xaf, 0x80, 0x00}, /* BE */
21613 };
21614
21615 unsigned bytes, fix, noop_size;
21616 char * p;
21617 const unsigned char * noop;
21618 const unsigned char *narrow_noop = NULL;
21619 #ifdef OBJ_ELF
21620 enum mstate state;
21621 #endif
21622
21623 if (fragP->fr_type != rs_align_code)
21624 return;
21625
21626 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
21627 p = fragP->fr_literal + fragP->fr_fix;
21628 fix = 0;
21629
21630 if (bytes > MAX_MEM_FOR_RS_ALIGN_CODE)
21631 bytes &= MAX_MEM_FOR_RS_ALIGN_CODE;
21632
21633 gas_assert ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) != 0);
21634
21635 if (fragP->tc_frag_data.thumb_mode & (~ MODE_RECORDED))
21636 {
21637 if (ARM_CPU_HAS_FEATURE (selected_cpu_name[0]
21638 ? selected_cpu : arm_arch_none, arm_ext_v6t2))
21639 {
21640 narrow_noop = thumb_noop[1][target_big_endian];
21641 noop = wide_thumb_noop[target_big_endian];
21642 }
21643 else
21644 noop = thumb_noop[0][target_big_endian];
21645 noop_size = 2;
21646 #ifdef OBJ_ELF
21647 state = MAP_THUMB;
21648 #endif
21649 }
21650 else
21651 {
21652 noop = arm_noop[ARM_CPU_HAS_FEATURE (selected_cpu_name[0]
21653 ? selected_cpu : arm_arch_none,
21654 arm_ext_v6k) != 0]
21655 [target_big_endian];
21656 noop_size = 4;
21657 #ifdef OBJ_ELF
21658 state = MAP_ARM;
21659 #endif
21660 }
21661
21662 fragP->fr_var = noop_size;
21663
21664 if (bytes & (noop_size - 1))
21665 {
21666 fix = bytes & (noop_size - 1);
21667 #ifdef OBJ_ELF
21668 insert_data_mapping_symbol (state, fragP->fr_fix, fragP, fix);
21669 #endif
21670 memset (p, 0, fix);
21671 p += fix;
21672 bytes -= fix;
21673 }
21674
21675 if (narrow_noop)
21676 {
21677 if (bytes & noop_size)
21678 {
21679 /* Insert a narrow noop. */
21680 memcpy (p, narrow_noop, noop_size);
21681 p += noop_size;
21682 bytes -= noop_size;
21683 fix += noop_size;
21684 }
21685
21686 /* Use wide noops for the remainder */
21687 noop_size = 4;
21688 }
21689
21690 while (bytes >= noop_size)
21691 {
21692 memcpy (p, noop, noop_size);
21693 p += noop_size;
21694 bytes -= noop_size;
21695 fix += noop_size;
21696 }
21697
21698 fragP->fr_fix += fix;
21699 }
21700
21701 /* Called from md_do_align. Used to create an alignment
21702 frag in a code section. */
21703
21704 void
21705 arm_frag_align_code (int n, int max)
21706 {
21707 char * p;
21708
21709 /* We assume that there will never be a requirement
21710 to support alignments greater than MAX_MEM_FOR_RS_ALIGN_CODE bytes. */
21711 if (max > MAX_MEM_FOR_RS_ALIGN_CODE)
21712 {
21713 char err_msg[128];
21714
21715 sprintf (err_msg,
21716 _("alignments greater than %d bytes not supported in .text sections."),
21717 MAX_MEM_FOR_RS_ALIGN_CODE + 1);
21718 as_fatal ("%s", err_msg);
21719 }
21720
21721 p = frag_var (rs_align_code,
21722 MAX_MEM_FOR_RS_ALIGN_CODE,
21723 1,
21724 (relax_substateT) max,
21725 (symbolS *) NULL,
21726 (offsetT) n,
21727 (char *) NULL);
21728 *p = 0;
21729 }
21730
21731 /* Perform target specific initialisation of a frag.
21732 Note - despite the name this initialisation is not done when the frag
21733 is created, but only when its type is assigned. A frag can be created
21734 and used a long time before its type is set, so beware of assuming that
21735 this initialisationis performed first. */
21736
21737 #ifndef OBJ_ELF
21738 void
21739 arm_init_frag (fragS * fragP, int max_chars ATTRIBUTE_UNUSED)
21740 {
21741 /* Record whether this frag is in an ARM or a THUMB area. */
21742 fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
21743 }
21744
21745 #else /* OBJ_ELF is defined. */
21746 void
21747 arm_init_frag (fragS * fragP, int max_chars)
21748 {
21749 int frag_thumb_mode;
21750
21751 /* If the current ARM vs THUMB mode has not already
21752 been recorded into this frag then do so now. */
21753 if ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) == 0)
21754 fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
21755
21756 frag_thumb_mode = fragP->tc_frag_data.thumb_mode ^ MODE_RECORDED;
21757
21758 /* Record a mapping symbol for alignment frags. We will delete this
21759 later if the alignment ends up empty. */
21760 switch (fragP->fr_type)
21761 {
21762 case rs_align:
21763 case rs_align_test:
21764 case rs_fill:
21765 mapping_state_2 (MAP_DATA, max_chars);
21766 break;
21767 case rs_align_code:
21768 mapping_state_2 (frag_thumb_mode ? MAP_THUMB : MAP_ARM, max_chars);
21769 break;
21770 default:
21771 break;
21772 }
21773 }
21774
21775 /* When we change sections we need to issue a new mapping symbol. */
21776
21777 void
21778 arm_elf_change_section (void)
21779 {
21780 /* Link an unlinked unwind index table section to the .text section. */
21781 if (elf_section_type (now_seg) == SHT_ARM_EXIDX
21782 && elf_linked_to_section (now_seg) == NULL)
21783 elf_linked_to_section (now_seg) = text_section;
21784 }
21785
21786 int
21787 arm_elf_section_type (const char * str, size_t len)
21788 {
21789 if (len == 5 && strncmp (str, "exidx", 5) == 0)
21790 return SHT_ARM_EXIDX;
21791
21792 return -1;
21793 }
21794 \f
21795 /* Code to deal with unwinding tables. */
21796
21797 static void add_unwind_adjustsp (offsetT);
21798
21799 /* Generate any deferred unwind frame offset. */
21800
21801 static void
21802 flush_pending_unwind (void)
21803 {
21804 offsetT offset;
21805
21806 offset = unwind.pending_offset;
21807 unwind.pending_offset = 0;
21808 if (offset != 0)
21809 add_unwind_adjustsp (offset);
21810 }
21811
21812 /* Add an opcode to this list for this function. Two-byte opcodes should
21813 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse
21814 order. */
21815
21816 static void
21817 add_unwind_opcode (valueT op, int length)
21818 {
21819 /* Add any deferred stack adjustment. */
21820 if (unwind.pending_offset)
21821 flush_pending_unwind ();
21822
21823 unwind.sp_restored = 0;
21824
21825 if (unwind.opcode_count + length > unwind.opcode_alloc)
21826 {
21827 unwind.opcode_alloc += ARM_OPCODE_CHUNK_SIZE;
21828 if (unwind.opcodes)
21829 unwind.opcodes = XRESIZEVEC (unsigned char, unwind.opcodes,
21830 unwind.opcode_alloc);
21831 else
21832 unwind.opcodes = XNEWVEC (unsigned char, unwind.opcode_alloc);
21833 }
21834 while (length > 0)
21835 {
21836 length--;
21837 unwind.opcodes[unwind.opcode_count] = op & 0xff;
21838 op >>= 8;
21839 unwind.opcode_count++;
21840 }
21841 }
21842
21843 /* Add unwind opcodes to adjust the stack pointer. */
21844
21845 static void
21846 add_unwind_adjustsp (offsetT offset)
21847 {
21848 valueT op;
21849
21850 if (offset > 0x200)
21851 {
21852 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */
21853 char bytes[5];
21854 int n;
21855 valueT o;
21856
21857 /* Long form: 0xb2, uleb128. */
21858 /* This might not fit in a word so add the individual bytes,
21859 remembering the list is built in reverse order. */
21860 o = (valueT) ((offset - 0x204) >> 2);
21861 if (o == 0)
21862 add_unwind_opcode (0, 1);
21863
21864 /* Calculate the uleb128 encoding of the offset. */
21865 n = 0;
21866 while (o)
21867 {
21868 bytes[n] = o & 0x7f;
21869 o >>= 7;
21870 if (o)
21871 bytes[n] |= 0x80;
21872 n++;
21873 }
21874 /* Add the insn. */
21875 for (; n; n--)
21876 add_unwind_opcode (bytes[n - 1], 1);
21877 add_unwind_opcode (0xb2, 1);
21878 }
21879 else if (offset > 0x100)
21880 {
21881 /* Two short opcodes. */
21882 add_unwind_opcode (0x3f, 1);
21883 op = (offset - 0x104) >> 2;
21884 add_unwind_opcode (op, 1);
21885 }
21886 else if (offset > 0)
21887 {
21888 /* Short opcode. */
21889 op = (offset - 4) >> 2;
21890 add_unwind_opcode (op, 1);
21891 }
21892 else if (offset < 0)
21893 {
21894 offset = -offset;
21895 while (offset > 0x100)
21896 {
21897 add_unwind_opcode (0x7f, 1);
21898 offset -= 0x100;
21899 }
21900 op = ((offset - 4) >> 2) | 0x40;
21901 add_unwind_opcode (op, 1);
21902 }
21903 }
21904
21905 /* Finish the list of unwind opcodes for this function. */
21906 static void
21907 finish_unwind_opcodes (void)
21908 {
21909 valueT op;
21910
21911 if (unwind.fp_used)
21912 {
21913 /* Adjust sp as necessary. */
21914 unwind.pending_offset += unwind.fp_offset - unwind.frame_size;
21915 flush_pending_unwind ();
21916
21917 /* After restoring sp from the frame pointer. */
21918 op = 0x90 | unwind.fp_reg;
21919 add_unwind_opcode (op, 1);
21920 }
21921 else
21922 flush_pending_unwind ();
21923 }
21924
21925
21926 /* Start an exception table entry. If idx is nonzero this is an index table
21927 entry. */
21928
21929 static void
21930 start_unwind_section (const segT text_seg, int idx)
21931 {
21932 const char * text_name;
21933 const char * prefix;
21934 const char * prefix_once;
21935 const char * group_name;
21936 char * sec_name;
21937 int type;
21938 int flags;
21939 int linkonce;
21940
21941 if (idx)
21942 {
21943 prefix = ELF_STRING_ARM_unwind;
21944 prefix_once = ELF_STRING_ARM_unwind_once;
21945 type = SHT_ARM_EXIDX;
21946 }
21947 else
21948 {
21949 prefix = ELF_STRING_ARM_unwind_info;
21950 prefix_once = ELF_STRING_ARM_unwind_info_once;
21951 type = SHT_PROGBITS;
21952 }
21953
21954 text_name = segment_name (text_seg);
21955 if (streq (text_name, ".text"))
21956 text_name = "";
21957
21958 if (strncmp (text_name, ".gnu.linkonce.t.",
21959 strlen (".gnu.linkonce.t.")) == 0)
21960 {
21961 prefix = prefix_once;
21962 text_name += strlen (".gnu.linkonce.t.");
21963 }
21964
21965 sec_name = concat (prefix, text_name, (char *) NULL);
21966
21967 flags = SHF_ALLOC;
21968 linkonce = 0;
21969 group_name = 0;
21970
21971 /* Handle COMDAT group. */
21972 if (prefix != prefix_once && (text_seg->flags & SEC_LINK_ONCE) != 0)
21973 {
21974 group_name = elf_group_name (text_seg);
21975 if (group_name == NULL)
21976 {
21977 as_bad (_("Group section `%s' has no group signature"),
21978 segment_name (text_seg));
21979 ignore_rest_of_line ();
21980 return;
21981 }
21982 flags |= SHF_GROUP;
21983 linkonce = 1;
21984 }
21985
21986 obj_elf_change_section (sec_name, type, flags, 0, group_name, linkonce, 0);
21987
21988 /* Set the section link for index tables. */
21989 if (idx)
21990 elf_linked_to_section (now_seg) = text_seg;
21991 }
21992
21993
21994 /* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
21995 personality routine data. Returns zero, or the index table value for
21996 an inline entry. */
21997
21998 static valueT
21999 create_unwind_entry (int have_data)
22000 {
22001 int size;
22002 addressT where;
22003 char *ptr;
22004 /* The current word of data. */
22005 valueT data;
22006 /* The number of bytes left in this word. */
22007 int n;
22008
22009 finish_unwind_opcodes ();
22010
22011 /* Remember the current text section. */
22012 unwind.saved_seg = now_seg;
22013 unwind.saved_subseg = now_subseg;
22014
22015 start_unwind_section (now_seg, 0);
22016
22017 if (unwind.personality_routine == NULL)
22018 {
22019 if (unwind.personality_index == -2)
22020 {
22021 if (have_data)
22022 as_bad (_("handlerdata in cantunwind frame"));
22023 return 1; /* EXIDX_CANTUNWIND. */
22024 }
22025
22026 /* Use a default personality routine if none is specified. */
22027 if (unwind.personality_index == -1)
22028 {
22029 if (unwind.opcode_count > 3)
22030 unwind.personality_index = 1;
22031 else
22032 unwind.personality_index = 0;
22033 }
22034
22035 /* Space for the personality routine entry. */
22036 if (unwind.personality_index == 0)
22037 {
22038 if (unwind.opcode_count > 3)
22039 as_bad (_("too many unwind opcodes for personality routine 0"));
22040
22041 if (!have_data)
22042 {
22043 /* All the data is inline in the index table. */
22044 data = 0x80;
22045 n = 3;
22046 while (unwind.opcode_count > 0)
22047 {
22048 unwind.opcode_count--;
22049 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
22050 n--;
22051 }
22052
22053 /* Pad with "finish" opcodes. */
22054 while (n--)
22055 data = (data << 8) | 0xb0;
22056
22057 return data;
22058 }
22059 size = 0;
22060 }
22061 else
22062 /* We get two opcodes "free" in the first word. */
22063 size = unwind.opcode_count - 2;
22064 }
22065 else
22066 {
22067 /* PR 16765: Missing or misplaced unwind directives can trigger this. */
22068 if (unwind.personality_index != -1)
22069 {
22070 as_bad (_("attempt to recreate an unwind entry"));
22071 return 1;
22072 }
22073
22074 /* An extra byte is required for the opcode count. */
22075 size = unwind.opcode_count + 1;
22076 }
22077
22078 size = (size + 3) >> 2;
22079 if (size > 0xff)
22080 as_bad (_("too many unwind opcodes"));
22081
22082 frag_align (2, 0, 0);
22083 record_alignment (now_seg, 2);
22084 unwind.table_entry = expr_build_dot ();
22085
22086 /* Allocate the table entry. */
22087 ptr = frag_more ((size << 2) + 4);
22088 /* PR 13449: Zero the table entries in case some of them are not used. */
22089 memset (ptr, 0, (size << 2) + 4);
22090 where = frag_now_fix () - ((size << 2) + 4);
22091
22092 switch (unwind.personality_index)
22093 {
22094 case -1:
22095 /* ??? Should this be a PLT generating relocation? */
22096 /* Custom personality routine. */
22097 fix_new (frag_now, where, 4, unwind.personality_routine, 0, 1,
22098 BFD_RELOC_ARM_PREL31);
22099
22100 where += 4;
22101 ptr += 4;
22102
22103 /* Set the first byte to the number of additional words. */
22104 data = size > 0 ? size - 1 : 0;
22105 n = 3;
22106 break;
22107
22108 /* ABI defined personality routines. */
22109 case 0:
22110 /* Three opcodes bytes are packed into the first word. */
22111 data = 0x80;
22112 n = 3;
22113 break;
22114
22115 case 1:
22116 case 2:
22117 /* The size and first two opcode bytes go in the first word. */
22118 data = ((0x80 + unwind.personality_index) << 8) | size;
22119 n = 2;
22120 break;
22121
22122 default:
22123 /* Should never happen. */
22124 abort ();
22125 }
22126
22127 /* Pack the opcodes into words (MSB first), reversing the list at the same
22128 time. */
22129 while (unwind.opcode_count > 0)
22130 {
22131 if (n == 0)
22132 {
22133 md_number_to_chars (ptr, data, 4);
22134 ptr += 4;
22135 n = 4;
22136 data = 0;
22137 }
22138 unwind.opcode_count--;
22139 n--;
22140 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
22141 }
22142
22143 /* Finish off the last word. */
22144 if (n < 4)
22145 {
22146 /* Pad with "finish" opcodes. */
22147 while (n--)
22148 data = (data << 8) | 0xb0;
22149
22150 md_number_to_chars (ptr, data, 4);
22151 }
22152
22153 if (!have_data)
22154 {
22155 /* Add an empty descriptor if there is no user-specified data. */
22156 ptr = frag_more (4);
22157 md_number_to_chars (ptr, 0, 4);
22158 }
22159
22160 return 0;
22161 }
22162
22163
22164 /* Initialize the DWARF-2 unwind information for this procedure. */
22165
22166 void
22167 tc_arm_frame_initial_instructions (void)
22168 {
22169 cfi_add_CFA_def_cfa (REG_SP, 0);
22170 }
22171 #endif /* OBJ_ELF */
22172
22173 /* Convert REGNAME to a DWARF-2 register number. */
22174
22175 int
22176 tc_arm_regname_to_dw2regnum (char *regname)
22177 {
22178 int reg = arm_reg_parse (&regname, REG_TYPE_RN);
22179 if (reg != FAIL)
22180 return reg;
22181
22182 /* PR 16694: Allow VFP registers as well. */
22183 reg = arm_reg_parse (&regname, REG_TYPE_VFS);
22184 if (reg != FAIL)
22185 return 64 + reg;
22186
22187 reg = arm_reg_parse (&regname, REG_TYPE_VFD);
22188 if (reg != FAIL)
22189 return reg + 256;
22190
22191 return -1;
22192 }
22193
22194 #ifdef TE_PE
22195 void
22196 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
22197 {
22198 expressionS exp;
22199
22200 exp.X_op = O_secrel;
22201 exp.X_add_symbol = symbol;
22202 exp.X_add_number = 0;
22203 emit_expr (&exp, size);
22204 }
22205 #endif
22206
22207 /* MD interface: Symbol and relocation handling. */
22208
22209 /* Return the address within the segment that a PC-relative fixup is
22210 relative to. For ARM, PC-relative fixups applied to instructions
22211 are generally relative to the location of the fixup plus 8 bytes.
22212 Thumb branches are offset by 4, and Thumb loads relative to PC
22213 require special handling. */
22214
22215 long
22216 md_pcrel_from_section (fixS * fixP, segT seg)
22217 {
22218 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
22219
22220 /* If this is pc-relative and we are going to emit a relocation
22221 then we just want to put out any pipeline compensation that the linker
22222 will need. Otherwise we want to use the calculated base.
22223 For WinCE we skip the bias for externals as well, since this
22224 is how the MS ARM-CE assembler behaves and we want to be compatible. */
22225 if (fixP->fx_pcrel
22226 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
22227 || (arm_force_relocation (fixP)
22228 #ifdef TE_WINCE
22229 && !S_IS_EXTERNAL (fixP->fx_addsy)
22230 #endif
22231 )))
22232 base = 0;
22233
22234
22235 switch (fixP->fx_r_type)
22236 {
22237 /* PC relative addressing on the Thumb is slightly odd as the
22238 bottom two bits of the PC are forced to zero for the
22239 calculation. This happens *after* application of the
22240 pipeline offset. However, Thumb adrl already adjusts for
22241 this, so we need not do it again. */
22242 case BFD_RELOC_ARM_THUMB_ADD:
22243 return base & ~3;
22244
22245 case BFD_RELOC_ARM_THUMB_OFFSET:
22246 case BFD_RELOC_ARM_T32_OFFSET_IMM:
22247 case BFD_RELOC_ARM_T32_ADD_PC12:
22248 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
22249 return (base + 4) & ~3;
22250
22251 /* Thumb branches are simply offset by +4. */
22252 case BFD_RELOC_THUMB_PCREL_BRANCH7:
22253 case BFD_RELOC_THUMB_PCREL_BRANCH9:
22254 case BFD_RELOC_THUMB_PCREL_BRANCH12:
22255 case BFD_RELOC_THUMB_PCREL_BRANCH20:
22256 case BFD_RELOC_THUMB_PCREL_BRANCH25:
22257 return base + 4;
22258
22259 case BFD_RELOC_THUMB_PCREL_BRANCH23:
22260 if (fixP->fx_addsy
22261 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22262 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22263 && ARM_IS_FUNC (fixP->fx_addsy)
22264 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
22265 base = fixP->fx_where + fixP->fx_frag->fr_address;
22266 return base + 4;
22267
22268 /* BLX is like branches above, but forces the low two bits of PC to
22269 zero. */
22270 case BFD_RELOC_THUMB_PCREL_BLX:
22271 if (fixP->fx_addsy
22272 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22273 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22274 && THUMB_IS_FUNC (fixP->fx_addsy)
22275 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
22276 base = fixP->fx_where + fixP->fx_frag->fr_address;
22277 return (base + 4) & ~3;
22278
22279 /* ARM mode branches are offset by +8. However, the Windows CE
22280 loader expects the relocation not to take this into account. */
22281 case BFD_RELOC_ARM_PCREL_BLX:
22282 if (fixP->fx_addsy
22283 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22284 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22285 && ARM_IS_FUNC (fixP->fx_addsy)
22286 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
22287 base = fixP->fx_where + fixP->fx_frag->fr_address;
22288 return base + 8;
22289
22290 case BFD_RELOC_ARM_PCREL_CALL:
22291 if (fixP->fx_addsy
22292 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22293 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22294 && THUMB_IS_FUNC (fixP->fx_addsy)
22295 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
22296 base = fixP->fx_where + fixP->fx_frag->fr_address;
22297 return base + 8;
22298
22299 case BFD_RELOC_ARM_PCREL_BRANCH:
22300 case BFD_RELOC_ARM_PCREL_JUMP:
22301 case BFD_RELOC_ARM_PLT32:
22302 #ifdef TE_WINCE
22303 /* When handling fixups immediately, because we have already
22304 discovered the value of a symbol, or the address of the frag involved
22305 we must account for the offset by +8, as the OS loader will never see the reloc.
22306 see fixup_segment() in write.c
22307 The S_IS_EXTERNAL test handles the case of global symbols.
22308 Those need the calculated base, not just the pipe compensation the linker will need. */
22309 if (fixP->fx_pcrel
22310 && fixP->fx_addsy != NULL
22311 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22312 && (S_IS_EXTERNAL (fixP->fx_addsy) || !arm_force_relocation (fixP)))
22313 return base + 8;
22314 return base;
22315 #else
22316 return base + 8;
22317 #endif
22318
22319
22320 /* ARM mode loads relative to PC are also offset by +8. Unlike
22321 branches, the Windows CE loader *does* expect the relocation
22322 to take this into account. */
22323 case BFD_RELOC_ARM_OFFSET_IMM:
22324 case BFD_RELOC_ARM_OFFSET_IMM8:
22325 case BFD_RELOC_ARM_HWLITERAL:
22326 case BFD_RELOC_ARM_LITERAL:
22327 case BFD_RELOC_ARM_CP_OFF_IMM:
22328 return base + 8;
22329
22330
22331 /* Other PC-relative relocations are un-offset. */
22332 default:
22333 return base;
22334 }
22335 }
22336
22337 static bfd_boolean flag_warn_syms = TRUE;
22338
22339 bfd_boolean
22340 arm_tc_equal_in_insn (int c ATTRIBUTE_UNUSED, char * name)
22341 {
22342 /* PR 18347 - Warn if the user attempts to create a symbol with the same
22343 name as an ARM instruction. Whilst strictly speaking it is allowed, it
22344 does mean that the resulting code might be very confusing to the reader.
22345 Also this warning can be triggered if the user omits an operand before
22346 an immediate address, eg:
22347
22348 LDR =foo
22349
22350 GAS treats this as an assignment of the value of the symbol foo to a
22351 symbol LDR, and so (without this code) it will not issue any kind of
22352 warning or error message.
22353
22354 Note - ARM instructions are case-insensitive but the strings in the hash
22355 table are all stored in lower case, so we must first ensure that name is
22356 lower case too. */
22357 if (flag_warn_syms && arm_ops_hsh)
22358 {
22359 char * nbuf = strdup (name);
22360 char * p;
22361
22362 for (p = nbuf; *p; p++)
22363 *p = TOLOWER (*p);
22364 if (hash_find (arm_ops_hsh, nbuf) != NULL)
22365 {
22366 static struct hash_control * already_warned = NULL;
22367
22368 if (already_warned == NULL)
22369 already_warned = hash_new ();
22370 /* Only warn about the symbol once. To keep the code
22371 simple we let hash_insert do the lookup for us. */
22372 if (hash_insert (already_warned, name, NULL) == NULL)
22373 as_warn (_("[-mwarn-syms]: Assignment makes a symbol match an ARM instruction: %s"), name);
22374 }
22375 else
22376 free (nbuf);
22377 }
22378
22379 return FALSE;
22380 }
22381
22382 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
22383 Otherwise we have no need to default values of symbols. */
22384
22385 symbolS *
22386 md_undefined_symbol (char * name ATTRIBUTE_UNUSED)
22387 {
22388 #ifdef OBJ_ELF
22389 if (name[0] == '_' && name[1] == 'G'
22390 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
22391 {
22392 if (!GOT_symbol)
22393 {
22394 if (symbol_find (name))
22395 as_bad (_("GOT already in the symbol table"));
22396
22397 GOT_symbol = symbol_new (name, undefined_section,
22398 (valueT) 0, & zero_address_frag);
22399 }
22400
22401 return GOT_symbol;
22402 }
22403 #endif
22404
22405 return NULL;
22406 }
22407
22408 /* Subroutine of md_apply_fix. Check to see if an immediate can be
22409 computed as two separate immediate values, added together. We
22410 already know that this value cannot be computed by just one ARM
22411 instruction. */
22412
22413 static unsigned int
22414 validate_immediate_twopart (unsigned int val,
22415 unsigned int * highpart)
22416 {
22417 unsigned int a;
22418 unsigned int i;
22419
22420 for (i = 0; i < 32; i += 2)
22421 if (((a = rotate_left (val, i)) & 0xff) != 0)
22422 {
22423 if (a & 0xff00)
22424 {
22425 if (a & ~ 0xffff)
22426 continue;
22427 * highpart = (a >> 8) | ((i + 24) << 7);
22428 }
22429 else if (a & 0xff0000)
22430 {
22431 if (a & 0xff000000)
22432 continue;
22433 * highpart = (a >> 16) | ((i + 16) << 7);
22434 }
22435 else
22436 {
22437 gas_assert (a & 0xff000000);
22438 * highpart = (a >> 24) | ((i + 8) << 7);
22439 }
22440
22441 return (a & 0xff) | (i << 7);
22442 }
22443
22444 return FAIL;
22445 }
22446
22447 static int
22448 validate_offset_imm (unsigned int val, int hwse)
22449 {
22450 if ((hwse && val > 255) || val > 4095)
22451 return FAIL;
22452 return val;
22453 }
22454
22455 /* Subroutine of md_apply_fix. Do those data_ops which can take a
22456 negative immediate constant by altering the instruction. A bit of
22457 a hack really.
22458 MOV <-> MVN
22459 AND <-> BIC
22460 ADC <-> SBC
22461 by inverting the second operand, and
22462 ADD <-> SUB
22463 CMP <-> CMN
22464 by negating the second operand. */
22465
22466 static int
22467 negate_data_op (unsigned long * instruction,
22468 unsigned long value)
22469 {
22470 int op, new_inst;
22471 unsigned long negated, inverted;
22472
22473 negated = encode_arm_immediate (-value);
22474 inverted = encode_arm_immediate (~value);
22475
22476 op = (*instruction >> DATA_OP_SHIFT) & 0xf;
22477 switch (op)
22478 {
22479 /* First negates. */
22480 case OPCODE_SUB: /* ADD <-> SUB */
22481 new_inst = OPCODE_ADD;
22482 value = negated;
22483 break;
22484
22485 case OPCODE_ADD:
22486 new_inst = OPCODE_SUB;
22487 value = negated;
22488 break;
22489
22490 case OPCODE_CMP: /* CMP <-> CMN */
22491 new_inst = OPCODE_CMN;
22492 value = negated;
22493 break;
22494
22495 case OPCODE_CMN:
22496 new_inst = OPCODE_CMP;
22497 value = negated;
22498 break;
22499
22500 /* Now Inverted ops. */
22501 case OPCODE_MOV: /* MOV <-> MVN */
22502 new_inst = OPCODE_MVN;
22503 value = inverted;
22504 break;
22505
22506 case OPCODE_MVN:
22507 new_inst = OPCODE_MOV;
22508 value = inverted;
22509 break;
22510
22511 case OPCODE_AND: /* AND <-> BIC */
22512 new_inst = OPCODE_BIC;
22513 value = inverted;
22514 break;
22515
22516 case OPCODE_BIC:
22517 new_inst = OPCODE_AND;
22518 value = inverted;
22519 break;
22520
22521 case OPCODE_ADC: /* ADC <-> SBC */
22522 new_inst = OPCODE_SBC;
22523 value = inverted;
22524 break;
22525
22526 case OPCODE_SBC:
22527 new_inst = OPCODE_ADC;
22528 value = inverted;
22529 break;
22530
22531 /* We cannot do anything. */
22532 default:
22533 return FAIL;
22534 }
22535
22536 if (value == (unsigned) FAIL)
22537 return FAIL;
22538
22539 *instruction &= OPCODE_MASK;
22540 *instruction |= new_inst << DATA_OP_SHIFT;
22541 return value;
22542 }
22543
22544 /* Like negate_data_op, but for Thumb-2. */
22545
22546 static unsigned int
22547 thumb32_negate_data_op (offsetT *instruction, unsigned int value)
22548 {
22549 int op, new_inst;
22550 int rd;
22551 unsigned int negated, inverted;
22552
22553 negated = encode_thumb32_immediate (-value);
22554 inverted = encode_thumb32_immediate (~value);
22555
22556 rd = (*instruction >> 8) & 0xf;
22557 op = (*instruction >> T2_DATA_OP_SHIFT) & 0xf;
22558 switch (op)
22559 {
22560 /* ADD <-> SUB. Includes CMP <-> CMN. */
22561 case T2_OPCODE_SUB:
22562 new_inst = T2_OPCODE_ADD;
22563 value = negated;
22564 break;
22565
22566 case T2_OPCODE_ADD:
22567 new_inst = T2_OPCODE_SUB;
22568 value = negated;
22569 break;
22570
22571 /* ORR <-> ORN. Includes MOV <-> MVN. */
22572 case T2_OPCODE_ORR:
22573 new_inst = T2_OPCODE_ORN;
22574 value = inverted;
22575 break;
22576
22577 case T2_OPCODE_ORN:
22578 new_inst = T2_OPCODE_ORR;
22579 value = inverted;
22580 break;
22581
22582 /* AND <-> BIC. TST has no inverted equivalent. */
22583 case T2_OPCODE_AND:
22584 new_inst = T2_OPCODE_BIC;
22585 if (rd == 15)
22586 value = FAIL;
22587 else
22588 value = inverted;
22589 break;
22590
22591 case T2_OPCODE_BIC:
22592 new_inst = T2_OPCODE_AND;
22593 value = inverted;
22594 break;
22595
22596 /* ADC <-> SBC */
22597 case T2_OPCODE_ADC:
22598 new_inst = T2_OPCODE_SBC;
22599 value = inverted;
22600 break;
22601
22602 case T2_OPCODE_SBC:
22603 new_inst = T2_OPCODE_ADC;
22604 value = inverted;
22605 break;
22606
22607 /* We cannot do anything. */
22608 default:
22609 return FAIL;
22610 }
22611
22612 if (value == (unsigned int)FAIL)
22613 return FAIL;
22614
22615 *instruction &= T2_OPCODE_MASK;
22616 *instruction |= new_inst << T2_DATA_OP_SHIFT;
22617 return value;
22618 }
22619
22620 /* Read a 32-bit thumb instruction from buf. */
22621 static unsigned long
22622 get_thumb32_insn (char * buf)
22623 {
22624 unsigned long insn;
22625 insn = md_chars_to_number (buf, THUMB_SIZE) << 16;
22626 insn |= md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
22627
22628 return insn;
22629 }
22630
22631
22632 /* We usually want to set the low bit on the address of thumb function
22633 symbols. In particular .word foo - . should have the low bit set.
22634 Generic code tries to fold the difference of two symbols to
22635 a constant. Prevent this and force a relocation when the first symbols
22636 is a thumb function. */
22637
22638 bfd_boolean
22639 arm_optimize_expr (expressionS *l, operatorT op, expressionS *r)
22640 {
22641 if (op == O_subtract
22642 && l->X_op == O_symbol
22643 && r->X_op == O_symbol
22644 && THUMB_IS_FUNC (l->X_add_symbol))
22645 {
22646 l->X_op = O_subtract;
22647 l->X_op_symbol = r->X_add_symbol;
22648 l->X_add_number -= r->X_add_number;
22649 return TRUE;
22650 }
22651
22652 /* Process as normal. */
22653 return FALSE;
22654 }
22655
22656 /* Encode Thumb2 unconditional branches and calls. The encoding
22657 for the 2 are identical for the immediate values. */
22658
22659 static void
22660 encode_thumb2_b_bl_offset (char * buf, offsetT value)
22661 {
22662 #define T2I1I2MASK ((1 << 13) | (1 << 11))
22663 offsetT newval;
22664 offsetT newval2;
22665 addressT S, I1, I2, lo, hi;
22666
22667 S = (value >> 24) & 0x01;
22668 I1 = (value >> 23) & 0x01;
22669 I2 = (value >> 22) & 0x01;
22670 hi = (value >> 12) & 0x3ff;
22671 lo = (value >> 1) & 0x7ff;
22672 newval = md_chars_to_number (buf, THUMB_SIZE);
22673 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
22674 newval |= (S << 10) | hi;
22675 newval2 &= ~T2I1I2MASK;
22676 newval2 |= (((I1 ^ S) << 13) | ((I2 ^ S) << 11) | lo) ^ T2I1I2MASK;
22677 md_number_to_chars (buf, newval, THUMB_SIZE);
22678 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
22679 }
22680
22681 void
22682 md_apply_fix (fixS * fixP,
22683 valueT * valP,
22684 segT seg)
22685 {
22686 offsetT value = * valP;
22687 offsetT newval;
22688 unsigned int newimm;
22689 unsigned long temp;
22690 int sign;
22691 char * buf = fixP->fx_where + fixP->fx_frag->fr_literal;
22692
22693 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
22694
22695 /* Note whether this will delete the relocation. */
22696
22697 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
22698 fixP->fx_done = 1;
22699
22700 /* On a 64-bit host, silently truncate 'value' to 32 bits for
22701 consistency with the behaviour on 32-bit hosts. Remember value
22702 for emit_reloc. */
22703 value &= 0xffffffff;
22704 value ^= 0x80000000;
22705 value -= 0x80000000;
22706
22707 *valP = value;
22708 fixP->fx_addnumber = value;
22709
22710 /* Same treatment for fixP->fx_offset. */
22711 fixP->fx_offset &= 0xffffffff;
22712 fixP->fx_offset ^= 0x80000000;
22713 fixP->fx_offset -= 0x80000000;
22714
22715 switch (fixP->fx_r_type)
22716 {
22717 case BFD_RELOC_NONE:
22718 /* This will need to go in the object file. */
22719 fixP->fx_done = 0;
22720 break;
22721
22722 case BFD_RELOC_ARM_IMMEDIATE:
22723 /* We claim that this fixup has been processed here,
22724 even if in fact we generate an error because we do
22725 not have a reloc for it, so tc_gen_reloc will reject it. */
22726 fixP->fx_done = 1;
22727
22728 if (fixP->fx_addsy)
22729 {
22730 const char *msg = 0;
22731
22732 if (! S_IS_DEFINED (fixP->fx_addsy))
22733 msg = _("undefined symbol %s used as an immediate value");
22734 else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
22735 msg = _("symbol %s is in a different section");
22736 else if (S_IS_WEAK (fixP->fx_addsy))
22737 msg = _("symbol %s is weak and may be overridden later");
22738
22739 if (msg)
22740 {
22741 as_bad_where (fixP->fx_file, fixP->fx_line,
22742 msg, S_GET_NAME (fixP->fx_addsy));
22743 break;
22744 }
22745 }
22746
22747 temp = md_chars_to_number (buf, INSN_SIZE);
22748
22749 /* If the offset is negative, we should use encoding A2 for ADR. */
22750 if ((temp & 0xfff0000) == 0x28f0000 && value < 0)
22751 newimm = negate_data_op (&temp, value);
22752 else
22753 {
22754 newimm = encode_arm_immediate (value);
22755
22756 /* If the instruction will fail, see if we can fix things up by
22757 changing the opcode. */
22758 if (newimm == (unsigned int) FAIL)
22759 newimm = negate_data_op (&temp, value);
22760 }
22761
22762 if (newimm == (unsigned int) FAIL)
22763 {
22764 as_bad_where (fixP->fx_file, fixP->fx_line,
22765 _("invalid constant (%lx) after fixup"),
22766 (unsigned long) value);
22767 break;
22768 }
22769
22770 newimm |= (temp & 0xfffff000);
22771 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
22772 break;
22773
22774 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
22775 {
22776 unsigned int highpart = 0;
22777 unsigned int newinsn = 0xe1a00000; /* nop. */
22778
22779 if (fixP->fx_addsy)
22780 {
22781 const char *msg = 0;
22782
22783 if (! S_IS_DEFINED (fixP->fx_addsy))
22784 msg = _("undefined symbol %s used as an immediate value");
22785 else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
22786 msg = _("symbol %s is in a different section");
22787 else if (S_IS_WEAK (fixP->fx_addsy))
22788 msg = _("symbol %s is weak and may be overridden later");
22789
22790 if (msg)
22791 {
22792 as_bad_where (fixP->fx_file, fixP->fx_line,
22793 msg, S_GET_NAME (fixP->fx_addsy));
22794 break;
22795 }
22796 }
22797
22798 newimm = encode_arm_immediate (value);
22799 temp = md_chars_to_number (buf, INSN_SIZE);
22800
22801 /* If the instruction will fail, see if we can fix things up by
22802 changing the opcode. */
22803 if (newimm == (unsigned int) FAIL
22804 && (newimm = negate_data_op (& temp, value)) == (unsigned int) FAIL)
22805 {
22806 /* No ? OK - try using two ADD instructions to generate
22807 the value. */
22808 newimm = validate_immediate_twopart (value, & highpart);
22809
22810 /* Yes - then make sure that the second instruction is
22811 also an add. */
22812 if (newimm != (unsigned int) FAIL)
22813 newinsn = temp;
22814 /* Still No ? Try using a negated value. */
22815 else if ((newimm = validate_immediate_twopart (- value, & highpart)) != (unsigned int) FAIL)
22816 temp = newinsn = (temp & OPCODE_MASK) | OPCODE_SUB << DATA_OP_SHIFT;
22817 /* Otherwise - give up. */
22818 else
22819 {
22820 as_bad_where (fixP->fx_file, fixP->fx_line,
22821 _("unable to compute ADRL instructions for PC offset of 0x%lx"),
22822 (long) value);
22823 break;
22824 }
22825
22826 /* Replace the first operand in the 2nd instruction (which
22827 is the PC) with the destination register. We have
22828 already added in the PC in the first instruction and we
22829 do not want to do it again. */
22830 newinsn &= ~ 0xf0000;
22831 newinsn |= ((newinsn & 0x0f000) << 4);
22832 }
22833
22834 newimm |= (temp & 0xfffff000);
22835 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
22836
22837 highpart |= (newinsn & 0xfffff000);
22838 md_number_to_chars (buf + INSN_SIZE, (valueT) highpart, INSN_SIZE);
22839 }
22840 break;
22841
22842 case BFD_RELOC_ARM_OFFSET_IMM:
22843 if (!fixP->fx_done && seg->use_rela_p)
22844 value = 0;
22845
22846 case BFD_RELOC_ARM_LITERAL:
22847 sign = value > 0;
22848
22849 if (value < 0)
22850 value = - value;
22851
22852 if (validate_offset_imm (value, 0) == FAIL)
22853 {
22854 if (fixP->fx_r_type == BFD_RELOC_ARM_LITERAL)
22855 as_bad_where (fixP->fx_file, fixP->fx_line,
22856 _("invalid literal constant: pool needs to be closer"));
22857 else
22858 as_bad_where (fixP->fx_file, fixP->fx_line,
22859 _("bad immediate value for offset (%ld)"),
22860 (long) value);
22861 break;
22862 }
22863
22864 newval = md_chars_to_number (buf, INSN_SIZE);
22865 if (value == 0)
22866 newval &= 0xfffff000;
22867 else
22868 {
22869 newval &= 0xff7ff000;
22870 newval |= value | (sign ? INDEX_UP : 0);
22871 }
22872 md_number_to_chars (buf, newval, INSN_SIZE);
22873 break;
22874
22875 case BFD_RELOC_ARM_OFFSET_IMM8:
22876 case BFD_RELOC_ARM_HWLITERAL:
22877 sign = value > 0;
22878
22879 if (value < 0)
22880 value = - value;
22881
22882 if (validate_offset_imm (value, 1) == FAIL)
22883 {
22884 if (fixP->fx_r_type == BFD_RELOC_ARM_HWLITERAL)
22885 as_bad_where (fixP->fx_file, fixP->fx_line,
22886 _("invalid literal constant: pool needs to be closer"));
22887 else
22888 as_bad_where (fixP->fx_file, fixP->fx_line,
22889 _("bad immediate value for 8-bit offset (%ld)"),
22890 (long) value);
22891 break;
22892 }
22893
22894 newval = md_chars_to_number (buf, INSN_SIZE);
22895 if (value == 0)
22896 newval &= 0xfffff0f0;
22897 else
22898 {
22899 newval &= 0xff7ff0f0;
22900 newval |= ((value >> 4) << 8) | (value & 0xf) | (sign ? INDEX_UP : 0);
22901 }
22902 md_number_to_chars (buf, newval, INSN_SIZE);
22903 break;
22904
22905 case BFD_RELOC_ARM_T32_OFFSET_U8:
22906 if (value < 0 || value > 1020 || value % 4 != 0)
22907 as_bad_where (fixP->fx_file, fixP->fx_line,
22908 _("bad immediate value for offset (%ld)"), (long) value);
22909 value /= 4;
22910
22911 newval = md_chars_to_number (buf+2, THUMB_SIZE);
22912 newval |= value;
22913 md_number_to_chars (buf+2, newval, THUMB_SIZE);
22914 break;
22915
22916 case BFD_RELOC_ARM_T32_OFFSET_IMM:
22917 /* This is a complicated relocation used for all varieties of Thumb32
22918 load/store instruction with immediate offset:
22919
22920 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
22921 *4, optional writeback(W)
22922 (doubleword load/store)
22923
22924 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
22925 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
22926 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
22927 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
22928 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
22929
22930 Uppercase letters indicate bits that are already encoded at
22931 this point. Lowercase letters are our problem. For the
22932 second block of instructions, the secondary opcode nybble
22933 (bits 8..11) is present, and bit 23 is zero, even if this is
22934 a PC-relative operation. */
22935 newval = md_chars_to_number (buf, THUMB_SIZE);
22936 newval <<= 16;
22937 newval |= md_chars_to_number (buf+THUMB_SIZE, THUMB_SIZE);
22938
22939 if ((newval & 0xf0000000) == 0xe0000000)
22940 {
22941 /* Doubleword load/store: 8-bit offset, scaled by 4. */
22942 if (value >= 0)
22943 newval |= (1 << 23);
22944 else
22945 value = -value;
22946 if (value % 4 != 0)
22947 {
22948 as_bad_where (fixP->fx_file, fixP->fx_line,
22949 _("offset not a multiple of 4"));
22950 break;
22951 }
22952 value /= 4;
22953 if (value > 0xff)
22954 {
22955 as_bad_where (fixP->fx_file, fixP->fx_line,
22956 _("offset out of range"));
22957 break;
22958 }
22959 newval &= ~0xff;
22960 }
22961 else if ((newval & 0x000f0000) == 0x000f0000)
22962 {
22963 /* PC-relative, 12-bit offset. */
22964 if (value >= 0)
22965 newval |= (1 << 23);
22966 else
22967 value = -value;
22968 if (value > 0xfff)
22969 {
22970 as_bad_where (fixP->fx_file, fixP->fx_line,
22971 _("offset out of range"));
22972 break;
22973 }
22974 newval &= ~0xfff;
22975 }
22976 else if ((newval & 0x00000100) == 0x00000100)
22977 {
22978 /* Writeback: 8-bit, +/- offset. */
22979 if (value >= 0)
22980 newval |= (1 << 9);
22981 else
22982 value = -value;
22983 if (value > 0xff)
22984 {
22985 as_bad_where (fixP->fx_file, fixP->fx_line,
22986 _("offset out of range"));
22987 break;
22988 }
22989 newval &= ~0xff;
22990 }
22991 else if ((newval & 0x00000f00) == 0x00000e00)
22992 {
22993 /* T-instruction: positive 8-bit offset. */
22994 if (value < 0 || value > 0xff)
22995 {
22996 as_bad_where (fixP->fx_file, fixP->fx_line,
22997 _("offset out of range"));
22998 break;
22999 }
23000 newval &= ~0xff;
23001 newval |= value;
23002 }
23003 else
23004 {
23005 /* Positive 12-bit or negative 8-bit offset. */
23006 int limit;
23007 if (value >= 0)
23008 {
23009 newval |= (1 << 23);
23010 limit = 0xfff;
23011 }
23012 else
23013 {
23014 value = -value;
23015 limit = 0xff;
23016 }
23017 if (value > limit)
23018 {
23019 as_bad_where (fixP->fx_file, fixP->fx_line,
23020 _("offset out of range"));
23021 break;
23022 }
23023 newval &= ~limit;
23024 }
23025
23026 newval |= value;
23027 md_number_to_chars (buf, (newval >> 16) & 0xffff, THUMB_SIZE);
23028 md_number_to_chars (buf + THUMB_SIZE, newval & 0xffff, THUMB_SIZE);
23029 break;
23030
23031 case BFD_RELOC_ARM_SHIFT_IMM:
23032 newval = md_chars_to_number (buf, INSN_SIZE);
23033 if (((unsigned long) value) > 32
23034 || (value == 32
23035 && (((newval & 0x60) == 0) || (newval & 0x60) == 0x60)))
23036 {
23037 as_bad_where (fixP->fx_file, fixP->fx_line,
23038 _("shift expression is too large"));
23039 break;
23040 }
23041
23042 if (value == 0)
23043 /* Shifts of zero must be done as lsl. */
23044 newval &= ~0x60;
23045 else if (value == 32)
23046 value = 0;
23047 newval &= 0xfffff07f;
23048 newval |= (value & 0x1f) << 7;
23049 md_number_to_chars (buf, newval, INSN_SIZE);
23050 break;
23051
23052 case BFD_RELOC_ARM_T32_IMMEDIATE:
23053 case BFD_RELOC_ARM_T32_ADD_IMM:
23054 case BFD_RELOC_ARM_T32_IMM12:
23055 case BFD_RELOC_ARM_T32_ADD_PC12:
23056 /* We claim that this fixup has been processed here,
23057 even if in fact we generate an error because we do
23058 not have a reloc for it, so tc_gen_reloc will reject it. */
23059 fixP->fx_done = 1;
23060
23061 if (fixP->fx_addsy
23062 && ! S_IS_DEFINED (fixP->fx_addsy))
23063 {
23064 as_bad_where (fixP->fx_file, fixP->fx_line,
23065 _("undefined symbol %s used as an immediate value"),
23066 S_GET_NAME (fixP->fx_addsy));
23067 break;
23068 }
23069
23070 newval = md_chars_to_number (buf, THUMB_SIZE);
23071 newval <<= 16;
23072 newval |= md_chars_to_number (buf+2, THUMB_SIZE);
23073
23074 newimm = FAIL;
23075 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
23076 || fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
23077 {
23078 newimm = encode_thumb32_immediate (value);
23079 if (newimm == (unsigned int) FAIL)
23080 newimm = thumb32_negate_data_op (&newval, value);
23081 }
23082 if (fixP->fx_r_type != BFD_RELOC_ARM_T32_IMMEDIATE
23083 && newimm == (unsigned int) FAIL)
23084 {
23085 /* Turn add/sum into addw/subw. */
23086 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
23087 newval = (newval & 0xfeffffff) | 0x02000000;
23088 /* No flat 12-bit imm encoding for addsw/subsw. */
23089 if ((newval & 0x00100000) == 0)
23090 {
23091 /* 12 bit immediate for addw/subw. */
23092 if (value < 0)
23093 {
23094 value = -value;
23095 newval ^= 0x00a00000;
23096 }
23097 if (value > 0xfff)
23098 newimm = (unsigned int) FAIL;
23099 else
23100 newimm = value;
23101 }
23102 }
23103
23104 if (newimm == (unsigned int)FAIL)
23105 {
23106 as_bad_where (fixP->fx_file, fixP->fx_line,
23107 _("invalid constant (%lx) after fixup"),
23108 (unsigned long) value);
23109 break;
23110 }
23111
23112 newval |= (newimm & 0x800) << 15;
23113 newval |= (newimm & 0x700) << 4;
23114 newval |= (newimm & 0x0ff);
23115
23116 md_number_to_chars (buf, (valueT) ((newval >> 16) & 0xffff), THUMB_SIZE);
23117 md_number_to_chars (buf+2, (valueT) (newval & 0xffff), THUMB_SIZE);
23118 break;
23119
23120 case BFD_RELOC_ARM_SMC:
23121 if (((unsigned long) value) > 0xffff)
23122 as_bad_where (fixP->fx_file, fixP->fx_line,
23123 _("invalid smc expression"));
23124 newval = md_chars_to_number (buf, INSN_SIZE);
23125 newval |= (value & 0xf) | ((value & 0xfff0) << 4);
23126 md_number_to_chars (buf, newval, INSN_SIZE);
23127 break;
23128
23129 case BFD_RELOC_ARM_HVC:
23130 if (((unsigned long) value) > 0xffff)
23131 as_bad_where (fixP->fx_file, fixP->fx_line,
23132 _("invalid hvc expression"));
23133 newval = md_chars_to_number (buf, INSN_SIZE);
23134 newval |= (value & 0xf) | ((value & 0xfff0) << 4);
23135 md_number_to_chars (buf, newval, INSN_SIZE);
23136 break;
23137
23138 case BFD_RELOC_ARM_SWI:
23139 if (fixP->tc_fix_data != 0)
23140 {
23141 if (((unsigned long) value) > 0xff)
23142 as_bad_where (fixP->fx_file, fixP->fx_line,
23143 _("invalid swi expression"));
23144 newval = md_chars_to_number (buf, THUMB_SIZE);
23145 newval |= value;
23146 md_number_to_chars (buf, newval, THUMB_SIZE);
23147 }
23148 else
23149 {
23150 if (((unsigned long) value) > 0x00ffffff)
23151 as_bad_where (fixP->fx_file, fixP->fx_line,
23152 _("invalid swi expression"));
23153 newval = md_chars_to_number (buf, INSN_SIZE);
23154 newval |= value;
23155 md_number_to_chars (buf, newval, INSN_SIZE);
23156 }
23157 break;
23158
23159 case BFD_RELOC_ARM_MULTI:
23160 if (((unsigned long) value) > 0xffff)
23161 as_bad_where (fixP->fx_file, fixP->fx_line,
23162 _("invalid expression in load/store multiple"));
23163 newval = value | md_chars_to_number (buf, INSN_SIZE);
23164 md_number_to_chars (buf, newval, INSN_SIZE);
23165 break;
23166
23167 #ifdef OBJ_ELF
23168 case BFD_RELOC_ARM_PCREL_CALL:
23169
23170 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
23171 && fixP->fx_addsy
23172 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
23173 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23174 && THUMB_IS_FUNC (fixP->fx_addsy))
23175 /* Flip the bl to blx. This is a simple flip
23176 bit here because we generate PCREL_CALL for
23177 unconditional bls. */
23178 {
23179 newval = md_chars_to_number (buf, INSN_SIZE);
23180 newval = newval | 0x10000000;
23181 md_number_to_chars (buf, newval, INSN_SIZE);
23182 temp = 1;
23183 fixP->fx_done = 1;
23184 }
23185 else
23186 temp = 3;
23187 goto arm_branch_common;
23188
23189 case BFD_RELOC_ARM_PCREL_JUMP:
23190 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
23191 && fixP->fx_addsy
23192 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
23193 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23194 && THUMB_IS_FUNC (fixP->fx_addsy))
23195 {
23196 /* This would map to a bl<cond>, b<cond>,
23197 b<always> to a Thumb function. We
23198 need to force a relocation for this particular
23199 case. */
23200 newval = md_chars_to_number (buf, INSN_SIZE);
23201 fixP->fx_done = 0;
23202 }
23203
23204 case BFD_RELOC_ARM_PLT32:
23205 #endif
23206 case BFD_RELOC_ARM_PCREL_BRANCH:
23207 temp = 3;
23208 goto arm_branch_common;
23209
23210 case BFD_RELOC_ARM_PCREL_BLX:
23211
23212 temp = 1;
23213 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
23214 && fixP->fx_addsy
23215 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
23216 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23217 && ARM_IS_FUNC (fixP->fx_addsy))
23218 {
23219 /* Flip the blx to a bl and warn. */
23220 const char *name = S_GET_NAME (fixP->fx_addsy);
23221 newval = 0xeb000000;
23222 as_warn_where (fixP->fx_file, fixP->fx_line,
23223 _("blx to '%s' an ARM ISA state function changed to bl"),
23224 name);
23225 md_number_to_chars (buf, newval, INSN_SIZE);
23226 temp = 3;
23227 fixP->fx_done = 1;
23228 }
23229
23230 #ifdef OBJ_ELF
23231 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
23232 fixP->fx_r_type = BFD_RELOC_ARM_PCREL_CALL;
23233 #endif
23234
23235 arm_branch_common:
23236 /* We are going to store value (shifted right by two) in the
23237 instruction, in a 24 bit, signed field. Bits 26 through 32 either
23238 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
23239 also be be clear. */
23240 if (value & temp)
23241 as_bad_where (fixP->fx_file, fixP->fx_line,
23242 _("misaligned branch destination"));
23243 if ((value & (offsetT)0xfe000000) != (offsetT)0
23244 && (value & (offsetT)0xfe000000) != (offsetT)0xfe000000)
23245 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
23246
23247 if (fixP->fx_done || !seg->use_rela_p)
23248 {
23249 newval = md_chars_to_number (buf, INSN_SIZE);
23250 newval |= (value >> 2) & 0x00ffffff;
23251 /* Set the H bit on BLX instructions. */
23252 if (temp == 1)
23253 {
23254 if (value & 2)
23255 newval |= 0x01000000;
23256 else
23257 newval &= ~0x01000000;
23258 }
23259 md_number_to_chars (buf, newval, INSN_SIZE);
23260 }
23261 break;
23262
23263 case BFD_RELOC_THUMB_PCREL_BRANCH7: /* CBZ */
23264 /* CBZ can only branch forward. */
23265
23266 /* Attempts to use CBZ to branch to the next instruction
23267 (which, strictly speaking, are prohibited) will be turned into
23268 no-ops.
23269
23270 FIXME: It may be better to remove the instruction completely and
23271 perform relaxation. */
23272 if (value == -2)
23273 {
23274 newval = md_chars_to_number (buf, THUMB_SIZE);
23275 newval = 0xbf00; /* NOP encoding T1 */
23276 md_number_to_chars (buf, newval, THUMB_SIZE);
23277 }
23278 else
23279 {
23280 if (value & ~0x7e)
23281 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
23282
23283 if (fixP->fx_done || !seg->use_rela_p)
23284 {
23285 newval = md_chars_to_number (buf, THUMB_SIZE);
23286 newval |= ((value & 0x3e) << 2) | ((value & 0x40) << 3);
23287 md_number_to_chars (buf, newval, THUMB_SIZE);
23288 }
23289 }
23290 break;
23291
23292 case BFD_RELOC_THUMB_PCREL_BRANCH9: /* Conditional branch. */
23293 if ((value & ~0xff) && ((value & ~0xff) != ~0xff))
23294 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
23295
23296 if (fixP->fx_done || !seg->use_rela_p)
23297 {
23298 newval = md_chars_to_number (buf, THUMB_SIZE);
23299 newval |= (value & 0x1ff) >> 1;
23300 md_number_to_chars (buf, newval, THUMB_SIZE);
23301 }
23302 break;
23303
23304 case BFD_RELOC_THUMB_PCREL_BRANCH12: /* Unconditional branch. */
23305 if ((value & ~0x7ff) && ((value & ~0x7ff) != ~0x7ff))
23306 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
23307
23308 if (fixP->fx_done || !seg->use_rela_p)
23309 {
23310 newval = md_chars_to_number (buf, THUMB_SIZE);
23311 newval |= (value & 0xfff) >> 1;
23312 md_number_to_chars (buf, newval, THUMB_SIZE);
23313 }
23314 break;
23315
23316 case BFD_RELOC_THUMB_PCREL_BRANCH20:
23317 if (fixP->fx_addsy
23318 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23319 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
23320 && ARM_IS_FUNC (fixP->fx_addsy)
23321 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
23322 {
23323 /* Force a relocation for a branch 20 bits wide. */
23324 fixP->fx_done = 0;
23325 }
23326 if ((value & ~0x1fffff) && ((value & ~0x0fffff) != ~0x0fffff))
23327 as_bad_where (fixP->fx_file, fixP->fx_line,
23328 _("conditional branch out of range"));
23329
23330 if (fixP->fx_done || !seg->use_rela_p)
23331 {
23332 offsetT newval2;
23333 addressT S, J1, J2, lo, hi;
23334
23335 S = (value & 0x00100000) >> 20;
23336 J2 = (value & 0x00080000) >> 19;
23337 J1 = (value & 0x00040000) >> 18;
23338 hi = (value & 0x0003f000) >> 12;
23339 lo = (value & 0x00000ffe) >> 1;
23340
23341 newval = md_chars_to_number (buf, THUMB_SIZE);
23342 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
23343 newval |= (S << 10) | hi;
23344 newval2 |= (J1 << 13) | (J2 << 11) | lo;
23345 md_number_to_chars (buf, newval, THUMB_SIZE);
23346 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
23347 }
23348 break;
23349
23350 case BFD_RELOC_THUMB_PCREL_BLX:
23351 /* If there is a blx from a thumb state function to
23352 another thumb function flip this to a bl and warn
23353 about it. */
23354
23355 if (fixP->fx_addsy
23356 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
23357 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23358 && THUMB_IS_FUNC (fixP->fx_addsy))
23359 {
23360 const char *name = S_GET_NAME (fixP->fx_addsy);
23361 as_warn_where (fixP->fx_file, fixP->fx_line,
23362 _("blx to Thumb func '%s' from Thumb ISA state changed to bl"),
23363 name);
23364 newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
23365 newval = newval | 0x1000;
23366 md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
23367 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
23368 fixP->fx_done = 1;
23369 }
23370
23371
23372 goto thumb_bl_common;
23373
23374 case BFD_RELOC_THUMB_PCREL_BRANCH23:
23375 /* A bl from Thumb state ISA to an internal ARM state function
23376 is converted to a blx. */
23377 if (fixP->fx_addsy
23378 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23379 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
23380 && ARM_IS_FUNC (fixP->fx_addsy)
23381 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
23382 {
23383 newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
23384 newval = newval & ~0x1000;
23385 md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
23386 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BLX;
23387 fixP->fx_done = 1;
23388 }
23389
23390 thumb_bl_common:
23391
23392 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
23393 /* For a BLX instruction, make sure that the relocation is rounded up
23394 to a word boundary. This follows the semantics of the instruction
23395 which specifies that bit 1 of the target address will come from bit
23396 1 of the base address. */
23397 value = (value + 3) & ~ 3;
23398
23399 #ifdef OBJ_ELF
23400 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4
23401 && fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
23402 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
23403 #endif
23404
23405 if ((value & ~0x3fffff) && ((value & ~0x3fffff) != ~0x3fffff))
23406 {
23407 if (!(ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2)))
23408 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
23409 else if ((value & ~0x1ffffff)
23410 && ((value & ~0x1ffffff) != ~0x1ffffff))
23411 as_bad_where (fixP->fx_file, fixP->fx_line,
23412 _("Thumb2 branch out of range"));
23413 }
23414
23415 if (fixP->fx_done || !seg->use_rela_p)
23416 encode_thumb2_b_bl_offset (buf, value);
23417
23418 break;
23419
23420 case BFD_RELOC_THUMB_PCREL_BRANCH25:
23421 if ((value & ~0x0ffffff) && ((value & ~0x0ffffff) != ~0x0ffffff))
23422 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
23423
23424 if (fixP->fx_done || !seg->use_rela_p)
23425 encode_thumb2_b_bl_offset (buf, value);
23426
23427 break;
23428
23429 case BFD_RELOC_8:
23430 if (fixP->fx_done || !seg->use_rela_p)
23431 *buf = value;
23432 break;
23433
23434 case BFD_RELOC_16:
23435 if (fixP->fx_done || !seg->use_rela_p)
23436 md_number_to_chars (buf, value, 2);
23437 break;
23438
23439 #ifdef OBJ_ELF
23440 case BFD_RELOC_ARM_TLS_CALL:
23441 case BFD_RELOC_ARM_THM_TLS_CALL:
23442 case BFD_RELOC_ARM_TLS_DESCSEQ:
23443 case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
23444 case BFD_RELOC_ARM_TLS_GOTDESC:
23445 case BFD_RELOC_ARM_TLS_GD32:
23446 case BFD_RELOC_ARM_TLS_LE32:
23447 case BFD_RELOC_ARM_TLS_IE32:
23448 case BFD_RELOC_ARM_TLS_LDM32:
23449 case BFD_RELOC_ARM_TLS_LDO32:
23450 S_SET_THREAD_LOCAL (fixP->fx_addsy);
23451 break;
23452
23453 case BFD_RELOC_ARM_GOT32:
23454 case BFD_RELOC_ARM_GOTOFF:
23455 break;
23456
23457 case BFD_RELOC_ARM_GOT_PREL:
23458 if (fixP->fx_done || !seg->use_rela_p)
23459 md_number_to_chars (buf, value, 4);
23460 break;
23461
23462 case BFD_RELOC_ARM_TARGET2:
23463 /* TARGET2 is not partial-inplace, so we need to write the
23464 addend here for REL targets, because it won't be written out
23465 during reloc processing later. */
23466 if (fixP->fx_done || !seg->use_rela_p)
23467 md_number_to_chars (buf, fixP->fx_offset, 4);
23468 break;
23469 #endif
23470
23471 case BFD_RELOC_RVA:
23472 case BFD_RELOC_32:
23473 case BFD_RELOC_ARM_TARGET1:
23474 case BFD_RELOC_ARM_ROSEGREL32:
23475 case BFD_RELOC_ARM_SBREL32:
23476 case BFD_RELOC_32_PCREL:
23477 #ifdef TE_PE
23478 case BFD_RELOC_32_SECREL:
23479 #endif
23480 if (fixP->fx_done || !seg->use_rela_p)
23481 #ifdef TE_WINCE
23482 /* For WinCE we only do this for pcrel fixups. */
23483 if (fixP->fx_done || fixP->fx_pcrel)
23484 #endif
23485 md_number_to_chars (buf, value, 4);
23486 break;
23487
23488 #ifdef OBJ_ELF
23489 case BFD_RELOC_ARM_PREL31:
23490 if (fixP->fx_done || !seg->use_rela_p)
23491 {
23492 newval = md_chars_to_number (buf, 4) & 0x80000000;
23493 if ((value ^ (value >> 1)) & 0x40000000)
23494 {
23495 as_bad_where (fixP->fx_file, fixP->fx_line,
23496 _("rel31 relocation overflow"));
23497 }
23498 newval |= value & 0x7fffffff;
23499 md_number_to_chars (buf, newval, 4);
23500 }
23501 break;
23502 #endif
23503
23504 case BFD_RELOC_ARM_CP_OFF_IMM:
23505 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
23506 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM)
23507 newval = md_chars_to_number (buf, INSN_SIZE);
23508 else
23509 newval = get_thumb32_insn (buf);
23510 if ((newval & 0x0f200f00) == 0x0d000900)
23511 {
23512 /* This is a fp16 vstr/vldr. The immediate offset in the mnemonic
23513 has permitted values that are multiples of 2, in the range 0
23514 to 510. */
23515 if (value < -510 || value > 510 || (value & 1))
23516 as_bad_where (fixP->fx_file, fixP->fx_line,
23517 _("co-processor offset out of range"));
23518 }
23519 else if (value < -1023 || value > 1023 || (value & 3))
23520 as_bad_where (fixP->fx_file, fixP->fx_line,
23521 _("co-processor offset out of range"));
23522 cp_off_common:
23523 sign = value > 0;
23524 if (value < 0)
23525 value = -value;
23526 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
23527 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
23528 newval = md_chars_to_number (buf, INSN_SIZE);
23529 else
23530 newval = get_thumb32_insn (buf);
23531 if (value == 0)
23532 newval &= 0xffffff00;
23533 else
23534 {
23535 newval &= 0xff7fff00;
23536 if ((newval & 0x0f200f00) == 0x0d000900)
23537 {
23538 /* This is a fp16 vstr/vldr.
23539
23540 It requires the immediate offset in the instruction is shifted
23541 left by 1 to be a half-word offset.
23542
23543 Here, left shift by 1 first, and later right shift by 2
23544 should get the right offset. */
23545 value <<= 1;
23546 }
23547 newval |= (value >> 2) | (sign ? INDEX_UP : 0);
23548 }
23549 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
23550 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
23551 md_number_to_chars (buf, newval, INSN_SIZE);
23552 else
23553 put_thumb32_insn (buf, newval);
23554 break;
23555
23556 case BFD_RELOC_ARM_CP_OFF_IMM_S2:
23557 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2:
23558 if (value < -255 || value > 255)
23559 as_bad_where (fixP->fx_file, fixP->fx_line,
23560 _("co-processor offset out of range"));
23561 value *= 4;
23562 goto cp_off_common;
23563
23564 case BFD_RELOC_ARM_THUMB_OFFSET:
23565 newval = md_chars_to_number (buf, THUMB_SIZE);
23566 /* Exactly what ranges, and where the offset is inserted depends
23567 on the type of instruction, we can establish this from the
23568 top 4 bits. */
23569 switch (newval >> 12)
23570 {
23571 case 4: /* PC load. */
23572 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
23573 forced to zero for these loads; md_pcrel_from has already
23574 compensated for this. */
23575 if (value & 3)
23576 as_bad_where (fixP->fx_file, fixP->fx_line,
23577 _("invalid offset, target not word aligned (0x%08lX)"),
23578 (((unsigned long) fixP->fx_frag->fr_address
23579 + (unsigned long) fixP->fx_where) & ~3)
23580 + (unsigned long) value);
23581
23582 if (value & ~0x3fc)
23583 as_bad_where (fixP->fx_file, fixP->fx_line,
23584 _("invalid offset, value too big (0x%08lX)"),
23585 (long) value);
23586
23587 newval |= value >> 2;
23588 break;
23589
23590 case 9: /* SP load/store. */
23591 if (value & ~0x3fc)
23592 as_bad_where (fixP->fx_file, fixP->fx_line,
23593 _("invalid offset, value too big (0x%08lX)"),
23594 (long) value);
23595 newval |= value >> 2;
23596 break;
23597
23598 case 6: /* Word load/store. */
23599 if (value & ~0x7c)
23600 as_bad_where (fixP->fx_file, fixP->fx_line,
23601 _("invalid offset, value too big (0x%08lX)"),
23602 (long) value);
23603 newval |= value << 4; /* 6 - 2. */
23604 break;
23605
23606 case 7: /* Byte load/store. */
23607 if (value & ~0x1f)
23608 as_bad_where (fixP->fx_file, fixP->fx_line,
23609 _("invalid offset, value too big (0x%08lX)"),
23610 (long) value);
23611 newval |= value << 6;
23612 break;
23613
23614 case 8: /* Halfword load/store. */
23615 if (value & ~0x3e)
23616 as_bad_where (fixP->fx_file, fixP->fx_line,
23617 _("invalid offset, value too big (0x%08lX)"),
23618 (long) value);
23619 newval |= value << 5; /* 6 - 1. */
23620 break;
23621
23622 default:
23623 as_bad_where (fixP->fx_file, fixP->fx_line,
23624 "Unable to process relocation for thumb opcode: %lx",
23625 (unsigned long) newval);
23626 break;
23627 }
23628 md_number_to_chars (buf, newval, THUMB_SIZE);
23629 break;
23630
23631 case BFD_RELOC_ARM_THUMB_ADD:
23632 /* This is a complicated relocation, since we use it for all of
23633 the following immediate relocations:
23634
23635 3bit ADD/SUB
23636 8bit ADD/SUB
23637 9bit ADD/SUB SP word-aligned
23638 10bit ADD PC/SP word-aligned
23639
23640 The type of instruction being processed is encoded in the
23641 instruction field:
23642
23643 0x8000 SUB
23644 0x00F0 Rd
23645 0x000F Rs
23646 */
23647 newval = md_chars_to_number (buf, THUMB_SIZE);
23648 {
23649 int rd = (newval >> 4) & 0xf;
23650 int rs = newval & 0xf;
23651 int subtract = !!(newval & 0x8000);
23652
23653 /* Check for HI regs, only very restricted cases allowed:
23654 Adjusting SP, and using PC or SP to get an address. */
23655 if ((rd > 7 && (rd != REG_SP || rs != REG_SP))
23656 || (rs > 7 && rs != REG_SP && rs != REG_PC))
23657 as_bad_where (fixP->fx_file, fixP->fx_line,
23658 _("invalid Hi register with immediate"));
23659
23660 /* If value is negative, choose the opposite instruction. */
23661 if (value < 0)
23662 {
23663 value = -value;
23664 subtract = !subtract;
23665 if (value < 0)
23666 as_bad_where (fixP->fx_file, fixP->fx_line,
23667 _("immediate value out of range"));
23668 }
23669
23670 if (rd == REG_SP)
23671 {
23672 if (value & ~0x1fc)
23673 as_bad_where (fixP->fx_file, fixP->fx_line,
23674 _("invalid immediate for stack address calculation"));
23675 newval = subtract ? T_OPCODE_SUB_ST : T_OPCODE_ADD_ST;
23676 newval |= value >> 2;
23677 }
23678 else if (rs == REG_PC || rs == REG_SP)
23679 {
23680 /* PR gas/18541. If the addition is for a defined symbol
23681 within range of an ADR instruction then accept it. */
23682 if (subtract
23683 && value == 4
23684 && fixP->fx_addsy != NULL)
23685 {
23686 subtract = 0;
23687
23688 if (! S_IS_DEFINED (fixP->fx_addsy)
23689 || S_GET_SEGMENT (fixP->fx_addsy) != seg
23690 || S_IS_WEAK (fixP->fx_addsy))
23691 {
23692 as_bad_where (fixP->fx_file, fixP->fx_line,
23693 _("address calculation needs a strongly defined nearby symbol"));
23694 }
23695 else
23696 {
23697 offsetT v = fixP->fx_where + fixP->fx_frag->fr_address;
23698
23699 /* Round up to the next 4-byte boundary. */
23700 if (v & 3)
23701 v = (v + 3) & ~ 3;
23702 else
23703 v += 4;
23704 v = S_GET_VALUE (fixP->fx_addsy) - v;
23705
23706 if (v & ~0x3fc)
23707 {
23708 as_bad_where (fixP->fx_file, fixP->fx_line,
23709 _("symbol too far away"));
23710 }
23711 else
23712 {
23713 fixP->fx_done = 1;
23714 value = v;
23715 }
23716 }
23717 }
23718
23719 if (subtract || value & ~0x3fc)
23720 as_bad_where (fixP->fx_file, fixP->fx_line,
23721 _("invalid immediate for address calculation (value = 0x%08lX)"),
23722 (unsigned long) (subtract ? - value : value));
23723 newval = (rs == REG_PC ? T_OPCODE_ADD_PC : T_OPCODE_ADD_SP);
23724 newval |= rd << 8;
23725 newval |= value >> 2;
23726 }
23727 else if (rs == rd)
23728 {
23729 if (value & ~0xff)
23730 as_bad_where (fixP->fx_file, fixP->fx_line,
23731 _("immediate value out of range"));
23732 newval = subtract ? T_OPCODE_SUB_I8 : T_OPCODE_ADD_I8;
23733 newval |= (rd << 8) | value;
23734 }
23735 else
23736 {
23737 if (value & ~0x7)
23738 as_bad_where (fixP->fx_file, fixP->fx_line,
23739 _("immediate value out of range"));
23740 newval = subtract ? T_OPCODE_SUB_I3 : T_OPCODE_ADD_I3;
23741 newval |= rd | (rs << 3) | (value << 6);
23742 }
23743 }
23744 md_number_to_chars (buf, newval, THUMB_SIZE);
23745 break;
23746
23747 case BFD_RELOC_ARM_THUMB_IMM:
23748 newval = md_chars_to_number (buf, THUMB_SIZE);
23749 if (value < 0 || value > 255)
23750 as_bad_where (fixP->fx_file, fixP->fx_line,
23751 _("invalid immediate: %ld is out of range"),
23752 (long) value);
23753 newval |= value;
23754 md_number_to_chars (buf, newval, THUMB_SIZE);
23755 break;
23756
23757 case BFD_RELOC_ARM_THUMB_SHIFT:
23758 /* 5bit shift value (0..32). LSL cannot take 32. */
23759 newval = md_chars_to_number (buf, THUMB_SIZE) & 0xf83f;
23760 temp = newval & 0xf800;
23761 if (value < 0 || value > 32 || (value == 32 && temp == T_OPCODE_LSL_I))
23762 as_bad_where (fixP->fx_file, fixP->fx_line,
23763 _("invalid shift value: %ld"), (long) value);
23764 /* Shifts of zero must be encoded as LSL. */
23765 if (value == 0)
23766 newval = (newval & 0x003f) | T_OPCODE_LSL_I;
23767 /* Shifts of 32 are encoded as zero. */
23768 else if (value == 32)
23769 value = 0;
23770 newval |= value << 6;
23771 md_number_to_chars (buf, newval, THUMB_SIZE);
23772 break;
23773
23774 case BFD_RELOC_VTABLE_INHERIT:
23775 case BFD_RELOC_VTABLE_ENTRY:
23776 fixP->fx_done = 0;
23777 return;
23778
23779 case BFD_RELOC_ARM_MOVW:
23780 case BFD_RELOC_ARM_MOVT:
23781 case BFD_RELOC_ARM_THUMB_MOVW:
23782 case BFD_RELOC_ARM_THUMB_MOVT:
23783 if (fixP->fx_done || !seg->use_rela_p)
23784 {
23785 /* REL format relocations are limited to a 16-bit addend. */
23786 if (!fixP->fx_done)
23787 {
23788 if (value < -0x8000 || value > 0x7fff)
23789 as_bad_where (fixP->fx_file, fixP->fx_line,
23790 _("offset out of range"));
23791 }
23792 else if (fixP->fx_r_type == BFD_RELOC_ARM_MOVT
23793 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
23794 {
23795 value >>= 16;
23796 }
23797
23798 if (fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
23799 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
23800 {
23801 newval = get_thumb32_insn (buf);
23802 newval &= 0xfbf08f00;
23803 newval |= (value & 0xf000) << 4;
23804 newval |= (value & 0x0800) << 15;
23805 newval |= (value & 0x0700) << 4;
23806 newval |= (value & 0x00ff);
23807 put_thumb32_insn (buf, newval);
23808 }
23809 else
23810 {
23811 newval = md_chars_to_number (buf, 4);
23812 newval &= 0xfff0f000;
23813 newval |= value & 0x0fff;
23814 newval |= (value & 0xf000) << 4;
23815 md_number_to_chars (buf, newval, 4);
23816 }
23817 }
23818 return;
23819
23820 case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC:
23821 case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC:
23822 case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC:
23823 case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC:
23824 gas_assert (!fixP->fx_done);
23825 {
23826 bfd_vma insn;
23827 bfd_boolean is_mov;
23828 bfd_vma encoded_addend = value;
23829
23830 /* Check that addend can be encoded in instruction. */
23831 if (!seg->use_rela_p && (value < 0 || value > 255))
23832 as_bad_where (fixP->fx_file, fixP->fx_line,
23833 _("the offset 0x%08lX is not representable"),
23834 (unsigned long) encoded_addend);
23835
23836 /* Extract the instruction. */
23837 insn = md_chars_to_number (buf, THUMB_SIZE);
23838 is_mov = (insn & 0xf800) == 0x2000;
23839
23840 /* Encode insn. */
23841 if (is_mov)
23842 {
23843 if (!seg->use_rela_p)
23844 insn |= encoded_addend;
23845 }
23846 else
23847 {
23848 int rd, rs;
23849
23850 /* Extract the instruction. */
23851 /* Encoding is the following
23852 0x8000 SUB
23853 0x00F0 Rd
23854 0x000F Rs
23855 */
23856 /* The following conditions must be true :
23857 - ADD
23858 - Rd == Rs
23859 - Rd <= 7
23860 */
23861 rd = (insn >> 4) & 0xf;
23862 rs = insn & 0xf;
23863 if ((insn & 0x8000) || (rd != rs) || rd > 7)
23864 as_bad_where (fixP->fx_file, fixP->fx_line,
23865 _("Unable to process relocation for thumb opcode: %lx"),
23866 (unsigned long) insn);
23867
23868 /* Encode as ADD immediate8 thumb 1 code. */
23869 insn = 0x3000 | (rd << 8);
23870
23871 /* Place the encoded addend into the first 8 bits of the
23872 instruction. */
23873 if (!seg->use_rela_p)
23874 insn |= encoded_addend;
23875 }
23876
23877 /* Update the instruction. */
23878 md_number_to_chars (buf, insn, THUMB_SIZE);
23879 }
23880 break;
23881
23882 case BFD_RELOC_ARM_ALU_PC_G0_NC:
23883 case BFD_RELOC_ARM_ALU_PC_G0:
23884 case BFD_RELOC_ARM_ALU_PC_G1_NC:
23885 case BFD_RELOC_ARM_ALU_PC_G1:
23886 case BFD_RELOC_ARM_ALU_PC_G2:
23887 case BFD_RELOC_ARM_ALU_SB_G0_NC:
23888 case BFD_RELOC_ARM_ALU_SB_G0:
23889 case BFD_RELOC_ARM_ALU_SB_G1_NC:
23890 case BFD_RELOC_ARM_ALU_SB_G1:
23891 case BFD_RELOC_ARM_ALU_SB_G2:
23892 gas_assert (!fixP->fx_done);
23893 if (!seg->use_rela_p)
23894 {
23895 bfd_vma insn;
23896 bfd_vma encoded_addend;
23897 bfd_vma addend_abs = abs (value);
23898
23899 /* Check that the absolute value of the addend can be
23900 expressed as an 8-bit constant plus a rotation. */
23901 encoded_addend = encode_arm_immediate (addend_abs);
23902 if (encoded_addend == (unsigned int) FAIL)
23903 as_bad_where (fixP->fx_file, fixP->fx_line,
23904 _("the offset 0x%08lX is not representable"),
23905 (unsigned long) addend_abs);
23906
23907 /* Extract the instruction. */
23908 insn = md_chars_to_number (buf, INSN_SIZE);
23909
23910 /* If the addend is positive, use an ADD instruction.
23911 Otherwise use a SUB. Take care not to destroy the S bit. */
23912 insn &= 0xff1fffff;
23913 if (value < 0)
23914 insn |= 1 << 22;
23915 else
23916 insn |= 1 << 23;
23917
23918 /* Place the encoded addend into the first 12 bits of the
23919 instruction. */
23920 insn &= 0xfffff000;
23921 insn |= encoded_addend;
23922
23923 /* Update the instruction. */
23924 md_number_to_chars (buf, insn, INSN_SIZE);
23925 }
23926 break;
23927
23928 case BFD_RELOC_ARM_LDR_PC_G0:
23929 case BFD_RELOC_ARM_LDR_PC_G1:
23930 case BFD_RELOC_ARM_LDR_PC_G2:
23931 case BFD_RELOC_ARM_LDR_SB_G0:
23932 case BFD_RELOC_ARM_LDR_SB_G1:
23933 case BFD_RELOC_ARM_LDR_SB_G2:
23934 gas_assert (!fixP->fx_done);
23935 if (!seg->use_rela_p)
23936 {
23937 bfd_vma insn;
23938 bfd_vma addend_abs = abs (value);
23939
23940 /* Check that the absolute value of the addend can be
23941 encoded in 12 bits. */
23942 if (addend_abs >= 0x1000)
23943 as_bad_where (fixP->fx_file, fixP->fx_line,
23944 _("bad offset 0x%08lX (only 12 bits available for the magnitude)"),
23945 (unsigned long) addend_abs);
23946
23947 /* Extract the instruction. */
23948 insn = md_chars_to_number (buf, INSN_SIZE);
23949
23950 /* If the addend is negative, clear bit 23 of the instruction.
23951 Otherwise set it. */
23952 if (value < 0)
23953 insn &= ~(1 << 23);
23954 else
23955 insn |= 1 << 23;
23956
23957 /* Place the absolute value of the addend into the first 12 bits
23958 of the instruction. */
23959 insn &= 0xfffff000;
23960 insn |= addend_abs;
23961
23962 /* Update the instruction. */
23963 md_number_to_chars (buf, insn, INSN_SIZE);
23964 }
23965 break;
23966
23967 case BFD_RELOC_ARM_LDRS_PC_G0:
23968 case BFD_RELOC_ARM_LDRS_PC_G1:
23969 case BFD_RELOC_ARM_LDRS_PC_G2:
23970 case BFD_RELOC_ARM_LDRS_SB_G0:
23971 case BFD_RELOC_ARM_LDRS_SB_G1:
23972 case BFD_RELOC_ARM_LDRS_SB_G2:
23973 gas_assert (!fixP->fx_done);
23974 if (!seg->use_rela_p)
23975 {
23976 bfd_vma insn;
23977 bfd_vma addend_abs = abs (value);
23978
23979 /* Check that the absolute value of the addend can be
23980 encoded in 8 bits. */
23981 if (addend_abs >= 0x100)
23982 as_bad_where (fixP->fx_file, fixP->fx_line,
23983 _("bad offset 0x%08lX (only 8 bits available for the magnitude)"),
23984 (unsigned long) addend_abs);
23985
23986 /* Extract the instruction. */
23987 insn = md_chars_to_number (buf, INSN_SIZE);
23988
23989 /* If the addend is negative, clear bit 23 of the instruction.
23990 Otherwise set it. */
23991 if (value < 0)
23992 insn &= ~(1 << 23);
23993 else
23994 insn |= 1 << 23;
23995
23996 /* Place the first four bits of the absolute value of the addend
23997 into the first 4 bits of the instruction, and the remaining
23998 four into bits 8 .. 11. */
23999 insn &= 0xfffff0f0;
24000 insn |= (addend_abs & 0xf) | ((addend_abs & 0xf0) << 4);
24001
24002 /* Update the instruction. */
24003 md_number_to_chars (buf, insn, INSN_SIZE);
24004 }
24005 break;
24006
24007 case BFD_RELOC_ARM_LDC_PC_G0:
24008 case BFD_RELOC_ARM_LDC_PC_G1:
24009 case BFD_RELOC_ARM_LDC_PC_G2:
24010 case BFD_RELOC_ARM_LDC_SB_G0:
24011 case BFD_RELOC_ARM_LDC_SB_G1:
24012 case BFD_RELOC_ARM_LDC_SB_G2:
24013 gas_assert (!fixP->fx_done);
24014 if (!seg->use_rela_p)
24015 {
24016 bfd_vma insn;
24017 bfd_vma addend_abs = abs (value);
24018
24019 /* Check that the absolute value of the addend is a multiple of
24020 four and, when divided by four, fits in 8 bits. */
24021 if (addend_abs & 0x3)
24022 as_bad_where (fixP->fx_file, fixP->fx_line,
24023 _("bad offset 0x%08lX (must be word-aligned)"),
24024 (unsigned long) addend_abs);
24025
24026 if ((addend_abs >> 2) > 0xff)
24027 as_bad_where (fixP->fx_file, fixP->fx_line,
24028 _("bad offset 0x%08lX (must be an 8-bit number of words)"),
24029 (unsigned long) addend_abs);
24030
24031 /* Extract the instruction. */
24032 insn = md_chars_to_number (buf, INSN_SIZE);
24033
24034 /* If the addend is negative, clear bit 23 of the instruction.
24035 Otherwise set it. */
24036 if (value < 0)
24037 insn &= ~(1 << 23);
24038 else
24039 insn |= 1 << 23;
24040
24041 /* Place the addend (divided by four) into the first eight
24042 bits of the instruction. */
24043 insn &= 0xfffffff0;
24044 insn |= addend_abs >> 2;
24045
24046 /* Update the instruction. */
24047 md_number_to_chars (buf, insn, INSN_SIZE);
24048 }
24049 break;
24050
24051 case BFD_RELOC_ARM_V4BX:
24052 /* This will need to go in the object file. */
24053 fixP->fx_done = 0;
24054 break;
24055
24056 case BFD_RELOC_UNUSED:
24057 default:
24058 as_bad_where (fixP->fx_file, fixP->fx_line,
24059 _("bad relocation fixup type (%d)"), fixP->fx_r_type);
24060 }
24061 }
24062
24063 /* Translate internal representation of relocation info to BFD target
24064 format. */
24065
24066 arelent *
24067 tc_gen_reloc (asection *section, fixS *fixp)
24068 {
24069 arelent * reloc;
24070 bfd_reloc_code_real_type code;
24071
24072 reloc = XNEW (arelent);
24073
24074 reloc->sym_ptr_ptr = XNEW (asymbol *);
24075 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
24076 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
24077
24078 if (fixp->fx_pcrel)
24079 {
24080 if (section->use_rela_p)
24081 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
24082 else
24083 fixp->fx_offset = reloc->address;
24084 }
24085 reloc->addend = fixp->fx_offset;
24086
24087 switch (fixp->fx_r_type)
24088 {
24089 case BFD_RELOC_8:
24090 if (fixp->fx_pcrel)
24091 {
24092 code = BFD_RELOC_8_PCREL;
24093 break;
24094 }
24095
24096 case BFD_RELOC_16:
24097 if (fixp->fx_pcrel)
24098 {
24099 code = BFD_RELOC_16_PCREL;
24100 break;
24101 }
24102
24103 case BFD_RELOC_32:
24104 if (fixp->fx_pcrel)
24105 {
24106 code = BFD_RELOC_32_PCREL;
24107 break;
24108 }
24109
24110 case BFD_RELOC_ARM_MOVW:
24111 if (fixp->fx_pcrel)
24112 {
24113 code = BFD_RELOC_ARM_MOVW_PCREL;
24114 break;
24115 }
24116
24117 case BFD_RELOC_ARM_MOVT:
24118 if (fixp->fx_pcrel)
24119 {
24120 code = BFD_RELOC_ARM_MOVT_PCREL;
24121 break;
24122 }
24123
24124 case BFD_RELOC_ARM_THUMB_MOVW:
24125 if (fixp->fx_pcrel)
24126 {
24127 code = BFD_RELOC_ARM_THUMB_MOVW_PCREL;
24128 break;
24129 }
24130
24131 case BFD_RELOC_ARM_THUMB_MOVT:
24132 if (fixp->fx_pcrel)
24133 {
24134 code = BFD_RELOC_ARM_THUMB_MOVT_PCREL;
24135 break;
24136 }
24137
24138 case BFD_RELOC_NONE:
24139 case BFD_RELOC_ARM_PCREL_BRANCH:
24140 case BFD_RELOC_ARM_PCREL_BLX:
24141 case BFD_RELOC_RVA:
24142 case BFD_RELOC_THUMB_PCREL_BRANCH7:
24143 case BFD_RELOC_THUMB_PCREL_BRANCH9:
24144 case BFD_RELOC_THUMB_PCREL_BRANCH12:
24145 case BFD_RELOC_THUMB_PCREL_BRANCH20:
24146 case BFD_RELOC_THUMB_PCREL_BRANCH23:
24147 case BFD_RELOC_THUMB_PCREL_BRANCH25:
24148 case BFD_RELOC_VTABLE_ENTRY:
24149 case BFD_RELOC_VTABLE_INHERIT:
24150 #ifdef TE_PE
24151 case BFD_RELOC_32_SECREL:
24152 #endif
24153 code = fixp->fx_r_type;
24154 break;
24155
24156 case BFD_RELOC_THUMB_PCREL_BLX:
24157 #ifdef OBJ_ELF
24158 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
24159 code = BFD_RELOC_THUMB_PCREL_BRANCH23;
24160 else
24161 #endif
24162 code = BFD_RELOC_THUMB_PCREL_BLX;
24163 break;
24164
24165 case BFD_RELOC_ARM_LITERAL:
24166 case BFD_RELOC_ARM_HWLITERAL:
24167 /* If this is called then the a literal has
24168 been referenced across a section boundary. */
24169 as_bad_where (fixp->fx_file, fixp->fx_line,
24170 _("literal referenced across section boundary"));
24171 return NULL;
24172
24173 #ifdef OBJ_ELF
24174 case BFD_RELOC_ARM_TLS_CALL:
24175 case BFD_RELOC_ARM_THM_TLS_CALL:
24176 case BFD_RELOC_ARM_TLS_DESCSEQ:
24177 case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
24178 case BFD_RELOC_ARM_GOT32:
24179 case BFD_RELOC_ARM_GOTOFF:
24180 case BFD_RELOC_ARM_GOT_PREL:
24181 case BFD_RELOC_ARM_PLT32:
24182 case BFD_RELOC_ARM_TARGET1:
24183 case BFD_RELOC_ARM_ROSEGREL32:
24184 case BFD_RELOC_ARM_SBREL32:
24185 case BFD_RELOC_ARM_PREL31:
24186 case BFD_RELOC_ARM_TARGET2:
24187 case BFD_RELOC_ARM_TLS_LDO32:
24188 case BFD_RELOC_ARM_PCREL_CALL:
24189 case BFD_RELOC_ARM_PCREL_JUMP:
24190 case BFD_RELOC_ARM_ALU_PC_G0_NC:
24191 case BFD_RELOC_ARM_ALU_PC_G0:
24192 case BFD_RELOC_ARM_ALU_PC_G1_NC:
24193 case BFD_RELOC_ARM_ALU_PC_G1:
24194 case BFD_RELOC_ARM_ALU_PC_G2:
24195 case BFD_RELOC_ARM_LDR_PC_G0:
24196 case BFD_RELOC_ARM_LDR_PC_G1:
24197 case BFD_RELOC_ARM_LDR_PC_G2:
24198 case BFD_RELOC_ARM_LDRS_PC_G0:
24199 case BFD_RELOC_ARM_LDRS_PC_G1:
24200 case BFD_RELOC_ARM_LDRS_PC_G2:
24201 case BFD_RELOC_ARM_LDC_PC_G0:
24202 case BFD_RELOC_ARM_LDC_PC_G1:
24203 case BFD_RELOC_ARM_LDC_PC_G2:
24204 case BFD_RELOC_ARM_ALU_SB_G0_NC:
24205 case BFD_RELOC_ARM_ALU_SB_G0:
24206 case BFD_RELOC_ARM_ALU_SB_G1_NC:
24207 case BFD_RELOC_ARM_ALU_SB_G1:
24208 case BFD_RELOC_ARM_ALU_SB_G2:
24209 case BFD_RELOC_ARM_LDR_SB_G0:
24210 case BFD_RELOC_ARM_LDR_SB_G1:
24211 case BFD_RELOC_ARM_LDR_SB_G2:
24212 case BFD_RELOC_ARM_LDRS_SB_G0:
24213 case BFD_RELOC_ARM_LDRS_SB_G1:
24214 case BFD_RELOC_ARM_LDRS_SB_G2:
24215 case BFD_RELOC_ARM_LDC_SB_G0:
24216 case BFD_RELOC_ARM_LDC_SB_G1:
24217 case BFD_RELOC_ARM_LDC_SB_G2:
24218 case BFD_RELOC_ARM_V4BX:
24219 case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC:
24220 case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC:
24221 case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC:
24222 case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC:
24223 code = fixp->fx_r_type;
24224 break;
24225
24226 case BFD_RELOC_ARM_TLS_GOTDESC:
24227 case BFD_RELOC_ARM_TLS_GD32:
24228 case BFD_RELOC_ARM_TLS_LE32:
24229 case BFD_RELOC_ARM_TLS_IE32:
24230 case BFD_RELOC_ARM_TLS_LDM32:
24231 /* BFD will include the symbol's address in the addend.
24232 But we don't want that, so subtract it out again here. */
24233 if (!S_IS_COMMON (fixp->fx_addsy))
24234 reloc->addend -= (*reloc->sym_ptr_ptr)->value;
24235 code = fixp->fx_r_type;
24236 break;
24237 #endif
24238
24239 case BFD_RELOC_ARM_IMMEDIATE:
24240 as_bad_where (fixp->fx_file, fixp->fx_line,
24241 _("internal relocation (type: IMMEDIATE) not fixed up"));
24242 return NULL;
24243
24244 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
24245 as_bad_where (fixp->fx_file, fixp->fx_line,
24246 _("ADRL used for a symbol not defined in the same file"));
24247 return NULL;
24248
24249 case BFD_RELOC_ARM_OFFSET_IMM:
24250 if (section->use_rela_p)
24251 {
24252 code = fixp->fx_r_type;
24253 break;
24254 }
24255
24256 if (fixp->fx_addsy != NULL
24257 && !S_IS_DEFINED (fixp->fx_addsy)
24258 && S_IS_LOCAL (fixp->fx_addsy))
24259 {
24260 as_bad_where (fixp->fx_file, fixp->fx_line,
24261 _("undefined local label `%s'"),
24262 S_GET_NAME (fixp->fx_addsy));
24263 return NULL;
24264 }
24265
24266 as_bad_where (fixp->fx_file, fixp->fx_line,
24267 _("internal_relocation (type: OFFSET_IMM) not fixed up"));
24268 return NULL;
24269
24270 default:
24271 {
24272 const char * type;
24273
24274 switch (fixp->fx_r_type)
24275 {
24276 case BFD_RELOC_NONE: type = "NONE"; break;
24277 case BFD_RELOC_ARM_OFFSET_IMM8: type = "OFFSET_IMM8"; break;
24278 case BFD_RELOC_ARM_SHIFT_IMM: type = "SHIFT_IMM"; break;
24279 case BFD_RELOC_ARM_SMC: type = "SMC"; break;
24280 case BFD_RELOC_ARM_SWI: type = "SWI"; break;
24281 case BFD_RELOC_ARM_MULTI: type = "MULTI"; break;
24282 case BFD_RELOC_ARM_CP_OFF_IMM: type = "CP_OFF_IMM"; break;
24283 case BFD_RELOC_ARM_T32_OFFSET_IMM: type = "T32_OFFSET_IMM"; break;
24284 case BFD_RELOC_ARM_T32_CP_OFF_IMM: type = "T32_CP_OFF_IMM"; break;
24285 case BFD_RELOC_ARM_THUMB_ADD: type = "THUMB_ADD"; break;
24286 case BFD_RELOC_ARM_THUMB_SHIFT: type = "THUMB_SHIFT"; break;
24287 case BFD_RELOC_ARM_THUMB_IMM: type = "THUMB_IMM"; break;
24288 case BFD_RELOC_ARM_THUMB_OFFSET: type = "THUMB_OFFSET"; break;
24289 default: type = _("<unknown>"); break;
24290 }
24291 as_bad_where (fixp->fx_file, fixp->fx_line,
24292 _("cannot represent %s relocation in this object file format"),
24293 type);
24294 return NULL;
24295 }
24296 }
24297
24298 #ifdef OBJ_ELF
24299 if ((code == BFD_RELOC_32_PCREL || code == BFD_RELOC_32)
24300 && GOT_symbol
24301 && fixp->fx_addsy == GOT_symbol)
24302 {
24303 code = BFD_RELOC_ARM_GOTPC;
24304 reloc->addend = fixp->fx_offset = reloc->address;
24305 }
24306 #endif
24307
24308 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
24309
24310 if (reloc->howto == NULL)
24311 {
24312 as_bad_where (fixp->fx_file, fixp->fx_line,
24313 _("cannot represent %s relocation in this object file format"),
24314 bfd_get_reloc_code_name (code));
24315 return NULL;
24316 }
24317
24318 /* HACK: Since arm ELF uses Rel instead of Rela, encode the
24319 vtable entry to be used in the relocation's section offset. */
24320 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
24321 reloc->address = fixp->fx_offset;
24322
24323 return reloc;
24324 }
24325
24326 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
24327
24328 void
24329 cons_fix_new_arm (fragS * frag,
24330 int where,
24331 int size,
24332 expressionS * exp,
24333 bfd_reloc_code_real_type reloc)
24334 {
24335 int pcrel = 0;
24336
24337 /* Pick a reloc.
24338 FIXME: @@ Should look at CPU word size. */
24339 switch (size)
24340 {
24341 case 1:
24342 reloc = BFD_RELOC_8;
24343 break;
24344 case 2:
24345 reloc = BFD_RELOC_16;
24346 break;
24347 case 4:
24348 default:
24349 reloc = BFD_RELOC_32;
24350 break;
24351 case 8:
24352 reloc = BFD_RELOC_64;
24353 break;
24354 }
24355
24356 #ifdef TE_PE
24357 if (exp->X_op == O_secrel)
24358 {
24359 exp->X_op = O_symbol;
24360 reloc = BFD_RELOC_32_SECREL;
24361 }
24362 #endif
24363
24364 fix_new_exp (frag, where, size, exp, pcrel, reloc);
24365 }
24366
24367 #if defined (OBJ_COFF)
24368 void
24369 arm_validate_fix (fixS * fixP)
24370 {
24371 /* If the destination of the branch is a defined symbol which does not have
24372 the THUMB_FUNC attribute, then we must be calling a function which has
24373 the (interfacearm) attribute. We look for the Thumb entry point to that
24374 function and change the branch to refer to that function instead. */
24375 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BRANCH23
24376 && fixP->fx_addsy != NULL
24377 && S_IS_DEFINED (fixP->fx_addsy)
24378 && ! THUMB_IS_FUNC (fixP->fx_addsy))
24379 {
24380 fixP->fx_addsy = find_real_start (fixP->fx_addsy);
24381 }
24382 }
24383 #endif
24384
24385
24386 int
24387 arm_force_relocation (struct fix * fixp)
24388 {
24389 #if defined (OBJ_COFF) && defined (TE_PE)
24390 if (fixp->fx_r_type == BFD_RELOC_RVA)
24391 return 1;
24392 #endif
24393
24394 /* In case we have a call or a branch to a function in ARM ISA mode from
24395 a thumb function or vice-versa force the relocation. These relocations
24396 are cleared off for some cores that might have blx and simple transformations
24397 are possible. */
24398
24399 #ifdef OBJ_ELF
24400 switch (fixp->fx_r_type)
24401 {
24402 case BFD_RELOC_ARM_PCREL_JUMP:
24403 case BFD_RELOC_ARM_PCREL_CALL:
24404 case BFD_RELOC_THUMB_PCREL_BLX:
24405 if (THUMB_IS_FUNC (fixp->fx_addsy))
24406 return 1;
24407 break;
24408
24409 case BFD_RELOC_ARM_PCREL_BLX:
24410 case BFD_RELOC_THUMB_PCREL_BRANCH25:
24411 case BFD_RELOC_THUMB_PCREL_BRANCH20:
24412 case BFD_RELOC_THUMB_PCREL_BRANCH23:
24413 if (ARM_IS_FUNC (fixp->fx_addsy))
24414 return 1;
24415 break;
24416
24417 default:
24418 break;
24419 }
24420 #endif
24421
24422 /* Resolve these relocations even if the symbol is extern or weak.
24423 Technically this is probably wrong due to symbol preemption.
24424 In practice these relocations do not have enough range to be useful
24425 at dynamic link time, and some code (e.g. in the Linux kernel)
24426 expects these references to be resolved. */
24427 if (fixp->fx_r_type == BFD_RELOC_ARM_IMMEDIATE
24428 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM
24429 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM8
24430 || fixp->fx_r_type == BFD_RELOC_ARM_ADRL_IMMEDIATE
24431 || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
24432 || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2
24433 || fixp->fx_r_type == BFD_RELOC_ARM_THUMB_OFFSET
24434 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM
24435 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
24436 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMM12
24437 || fixp->fx_r_type == BFD_RELOC_ARM_T32_OFFSET_IMM
24438 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_PC12
24439 || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM
24440 || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM_S2)
24441 return 0;
24442
24443 /* Always leave these relocations for the linker. */
24444 if ((fixp->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
24445 && fixp->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
24446 || fixp->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
24447 return 1;
24448
24449 /* Always generate relocations against function symbols. */
24450 if (fixp->fx_r_type == BFD_RELOC_32
24451 && fixp->fx_addsy
24452 && (symbol_get_bfdsym (fixp->fx_addsy)->flags & BSF_FUNCTION))
24453 return 1;
24454
24455 return generic_force_reloc (fixp);
24456 }
24457
24458 #if defined (OBJ_ELF) || defined (OBJ_COFF)
24459 /* Relocations against function names must be left unadjusted,
24460 so that the linker can use this information to generate interworking
24461 stubs. The MIPS version of this function
24462 also prevents relocations that are mips-16 specific, but I do not
24463 know why it does this.
24464
24465 FIXME:
24466 There is one other problem that ought to be addressed here, but
24467 which currently is not: Taking the address of a label (rather
24468 than a function) and then later jumping to that address. Such
24469 addresses also ought to have their bottom bit set (assuming that
24470 they reside in Thumb code), but at the moment they will not. */
24471
24472 bfd_boolean
24473 arm_fix_adjustable (fixS * fixP)
24474 {
24475 if (fixP->fx_addsy == NULL)
24476 return 1;
24477
24478 /* Preserve relocations against symbols with function type. */
24479 if (symbol_get_bfdsym (fixP->fx_addsy)->flags & BSF_FUNCTION)
24480 return FALSE;
24481
24482 if (THUMB_IS_FUNC (fixP->fx_addsy)
24483 && fixP->fx_subsy == NULL)
24484 return FALSE;
24485
24486 /* We need the symbol name for the VTABLE entries. */
24487 if ( fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
24488 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
24489 return FALSE;
24490
24491 /* Don't allow symbols to be discarded on GOT related relocs. */
24492 if (fixP->fx_r_type == BFD_RELOC_ARM_PLT32
24493 || fixP->fx_r_type == BFD_RELOC_ARM_GOT32
24494 || fixP->fx_r_type == BFD_RELOC_ARM_GOTOFF
24495 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GD32
24496 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LE32
24497 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_IE32
24498 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDM32
24499 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDO32
24500 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GOTDESC
24501 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_CALL
24502 || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_CALL
24503 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_DESCSEQ
24504 || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_DESCSEQ
24505 || fixP->fx_r_type == BFD_RELOC_ARM_TARGET2)
24506 return FALSE;
24507
24508 /* Similarly for group relocations. */
24509 if ((fixP->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
24510 && fixP->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
24511 || fixP->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
24512 return FALSE;
24513
24514 /* MOVW/MOVT REL relocations have limited offsets, so keep the symbols. */
24515 if (fixP->fx_r_type == BFD_RELOC_ARM_MOVW
24516 || fixP->fx_r_type == BFD_RELOC_ARM_MOVT
24517 || fixP->fx_r_type == BFD_RELOC_ARM_MOVW_PCREL
24518 || fixP->fx_r_type == BFD_RELOC_ARM_MOVT_PCREL
24519 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
24520 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT
24521 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW_PCREL
24522 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT_PCREL)
24523 return FALSE;
24524
24525 /* BFD_RELOC_ARM_THUMB_ALU_ABS_Gx_NC relocations have VERY limited
24526 offsets, so keep these symbols. */
24527 if (fixP->fx_r_type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
24528 && fixP->fx_r_type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC)
24529 return FALSE;
24530
24531 return TRUE;
24532 }
24533 #endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */
24534
24535 #ifdef OBJ_ELF
24536 const char *
24537 elf32_arm_target_format (void)
24538 {
24539 #ifdef TE_SYMBIAN
24540 return (target_big_endian
24541 ? "elf32-bigarm-symbian"
24542 : "elf32-littlearm-symbian");
24543 #elif defined (TE_VXWORKS)
24544 return (target_big_endian
24545 ? "elf32-bigarm-vxworks"
24546 : "elf32-littlearm-vxworks");
24547 #elif defined (TE_NACL)
24548 return (target_big_endian
24549 ? "elf32-bigarm-nacl"
24550 : "elf32-littlearm-nacl");
24551 #else
24552 if (target_big_endian)
24553 return "elf32-bigarm";
24554 else
24555 return "elf32-littlearm";
24556 #endif
24557 }
24558
24559 void
24560 armelf_frob_symbol (symbolS * symp,
24561 int * puntp)
24562 {
24563 elf_frob_symbol (symp, puntp);
24564 }
24565 #endif
24566
24567 /* MD interface: Finalization. */
24568
24569 void
24570 arm_cleanup (void)
24571 {
24572 literal_pool * pool;
24573
24574 /* Ensure that all the IT blocks are properly closed. */
24575 check_it_blocks_finished ();
24576
24577 for (pool = list_of_pools; pool; pool = pool->next)
24578 {
24579 /* Put it at the end of the relevant section. */
24580 subseg_set (pool->section, pool->sub_section);
24581 #ifdef OBJ_ELF
24582 arm_elf_change_section ();
24583 #endif
24584 s_ltorg (0);
24585 }
24586 }
24587
24588 #ifdef OBJ_ELF
24589 /* Remove any excess mapping symbols generated for alignment frags in
24590 SEC. We may have created a mapping symbol before a zero byte
24591 alignment; remove it if there's a mapping symbol after the
24592 alignment. */
24593 static void
24594 check_mapping_symbols (bfd *abfd ATTRIBUTE_UNUSED, asection *sec,
24595 void *dummy ATTRIBUTE_UNUSED)
24596 {
24597 segment_info_type *seginfo = seg_info (sec);
24598 fragS *fragp;
24599
24600 if (seginfo == NULL || seginfo->frchainP == NULL)
24601 return;
24602
24603 for (fragp = seginfo->frchainP->frch_root;
24604 fragp != NULL;
24605 fragp = fragp->fr_next)
24606 {
24607 symbolS *sym = fragp->tc_frag_data.last_map;
24608 fragS *next = fragp->fr_next;
24609
24610 /* Variable-sized frags have been converted to fixed size by
24611 this point. But if this was variable-sized to start with,
24612 there will be a fixed-size frag after it. So don't handle
24613 next == NULL. */
24614 if (sym == NULL || next == NULL)
24615 continue;
24616
24617 if (S_GET_VALUE (sym) < next->fr_address)
24618 /* Not at the end of this frag. */
24619 continue;
24620 know (S_GET_VALUE (sym) == next->fr_address);
24621
24622 do
24623 {
24624 if (next->tc_frag_data.first_map != NULL)
24625 {
24626 /* Next frag starts with a mapping symbol. Discard this
24627 one. */
24628 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
24629 break;
24630 }
24631
24632 if (next->fr_next == NULL)
24633 {
24634 /* This mapping symbol is at the end of the section. Discard
24635 it. */
24636 know (next->fr_fix == 0 && next->fr_var == 0);
24637 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
24638 break;
24639 }
24640
24641 /* As long as we have empty frags without any mapping symbols,
24642 keep looking. */
24643 /* If the next frag is non-empty and does not start with a
24644 mapping symbol, then this mapping symbol is required. */
24645 if (next->fr_address != next->fr_next->fr_address)
24646 break;
24647
24648 next = next->fr_next;
24649 }
24650 while (next != NULL);
24651 }
24652 }
24653 #endif
24654
24655 /* Adjust the symbol table. This marks Thumb symbols as distinct from
24656 ARM ones. */
24657
24658 void
24659 arm_adjust_symtab (void)
24660 {
24661 #ifdef OBJ_COFF
24662 symbolS * sym;
24663
24664 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
24665 {
24666 if (ARM_IS_THUMB (sym))
24667 {
24668 if (THUMB_IS_FUNC (sym))
24669 {
24670 /* Mark the symbol as a Thumb function. */
24671 if ( S_GET_STORAGE_CLASS (sym) == C_STAT
24672 || S_GET_STORAGE_CLASS (sym) == C_LABEL) /* This can happen! */
24673 S_SET_STORAGE_CLASS (sym, C_THUMBSTATFUNC);
24674
24675 else if (S_GET_STORAGE_CLASS (sym) == C_EXT)
24676 S_SET_STORAGE_CLASS (sym, C_THUMBEXTFUNC);
24677 else
24678 as_bad (_("%s: unexpected function type: %d"),
24679 S_GET_NAME (sym), S_GET_STORAGE_CLASS (sym));
24680 }
24681 else switch (S_GET_STORAGE_CLASS (sym))
24682 {
24683 case C_EXT:
24684 S_SET_STORAGE_CLASS (sym, C_THUMBEXT);
24685 break;
24686 case C_STAT:
24687 S_SET_STORAGE_CLASS (sym, C_THUMBSTAT);
24688 break;
24689 case C_LABEL:
24690 S_SET_STORAGE_CLASS (sym, C_THUMBLABEL);
24691 break;
24692 default:
24693 /* Do nothing. */
24694 break;
24695 }
24696 }
24697
24698 if (ARM_IS_INTERWORK (sym))
24699 coffsymbol (symbol_get_bfdsym (sym))->native->u.syment.n_flags = 0xFF;
24700 }
24701 #endif
24702 #ifdef OBJ_ELF
24703 symbolS * sym;
24704 char bind;
24705
24706 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
24707 {
24708 if (ARM_IS_THUMB (sym))
24709 {
24710 elf_symbol_type * elf_sym;
24711
24712 elf_sym = elf_symbol (symbol_get_bfdsym (sym));
24713 bind = ELF_ST_BIND (elf_sym->internal_elf_sym.st_info);
24714
24715 if (! bfd_is_arm_special_symbol_name (elf_sym->symbol.name,
24716 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
24717 {
24718 /* If it's a .thumb_func, declare it as so,
24719 otherwise tag label as .code 16. */
24720 if (THUMB_IS_FUNC (sym))
24721 ARM_SET_SYM_BRANCH_TYPE (elf_sym->internal_elf_sym.st_target_internal,
24722 ST_BRANCH_TO_THUMB);
24723 else if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
24724 elf_sym->internal_elf_sym.st_info =
24725 ELF_ST_INFO (bind, STT_ARM_16BIT);
24726 }
24727 }
24728 }
24729
24730 /* Remove any overlapping mapping symbols generated by alignment frags. */
24731 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
24732 /* Now do generic ELF adjustments. */
24733 elf_adjust_symtab ();
24734 #endif
24735 }
24736
24737 /* MD interface: Initialization. */
24738
24739 static void
24740 set_constant_flonums (void)
24741 {
24742 int i;
24743
24744 for (i = 0; i < NUM_FLOAT_VALS; i++)
24745 if (atof_ieee ((char *) fp_const[i], 'x', fp_values[i]) == NULL)
24746 abort ();
24747 }
24748
24749 /* Auto-select Thumb mode if it's the only available instruction set for the
24750 given architecture. */
24751
24752 static void
24753 autoselect_thumb_from_cpu_variant (void)
24754 {
24755 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
24756 opcode_select (16);
24757 }
24758
24759 void
24760 md_begin (void)
24761 {
24762 unsigned mach;
24763 unsigned int i;
24764
24765 if ( (arm_ops_hsh = hash_new ()) == NULL
24766 || (arm_cond_hsh = hash_new ()) == NULL
24767 || (arm_shift_hsh = hash_new ()) == NULL
24768 || (arm_psr_hsh = hash_new ()) == NULL
24769 || (arm_v7m_psr_hsh = hash_new ()) == NULL
24770 || (arm_reg_hsh = hash_new ()) == NULL
24771 || (arm_reloc_hsh = hash_new ()) == NULL
24772 || (arm_barrier_opt_hsh = hash_new ()) == NULL)
24773 as_fatal (_("virtual memory exhausted"));
24774
24775 for (i = 0; i < sizeof (insns) / sizeof (struct asm_opcode); i++)
24776 hash_insert (arm_ops_hsh, insns[i].template_name, (void *) (insns + i));
24777 for (i = 0; i < sizeof (conds) / sizeof (struct asm_cond); i++)
24778 hash_insert (arm_cond_hsh, conds[i].template_name, (void *) (conds + i));
24779 for (i = 0; i < sizeof (shift_names) / sizeof (struct asm_shift_name); i++)
24780 hash_insert (arm_shift_hsh, shift_names[i].name, (void *) (shift_names + i));
24781 for (i = 0; i < sizeof (psrs) / sizeof (struct asm_psr); i++)
24782 hash_insert (arm_psr_hsh, psrs[i].template_name, (void *) (psrs + i));
24783 for (i = 0; i < sizeof (v7m_psrs) / sizeof (struct asm_psr); i++)
24784 hash_insert (arm_v7m_psr_hsh, v7m_psrs[i].template_name,
24785 (void *) (v7m_psrs + i));
24786 for (i = 0; i < sizeof (reg_names) / sizeof (struct reg_entry); i++)
24787 hash_insert (arm_reg_hsh, reg_names[i].name, (void *) (reg_names + i));
24788 for (i = 0;
24789 i < sizeof (barrier_opt_names) / sizeof (struct asm_barrier_opt);
24790 i++)
24791 hash_insert (arm_barrier_opt_hsh, barrier_opt_names[i].template_name,
24792 (void *) (barrier_opt_names + i));
24793 #ifdef OBJ_ELF
24794 for (i = 0; i < ARRAY_SIZE (reloc_names); i++)
24795 {
24796 struct reloc_entry * entry = reloc_names + i;
24797
24798 if (arm_is_eabi() && entry->reloc == BFD_RELOC_ARM_PLT32)
24799 /* This makes encode_branch() use the EABI versions of this relocation. */
24800 entry->reloc = BFD_RELOC_UNUSED;
24801
24802 hash_insert (arm_reloc_hsh, entry->name, (void *) entry);
24803 }
24804 #endif
24805
24806 set_constant_flonums ();
24807
24808 /* Set the cpu variant based on the command-line options. We prefer
24809 -mcpu= over -march= if both are set (as for GCC); and we prefer
24810 -mfpu= over any other way of setting the floating point unit.
24811 Use of legacy options with new options are faulted. */
24812 if (legacy_cpu)
24813 {
24814 if (mcpu_cpu_opt || march_cpu_opt)
24815 as_bad (_("use of old and new-style options to set CPU type"));
24816
24817 mcpu_cpu_opt = legacy_cpu;
24818 }
24819 else if (!mcpu_cpu_opt)
24820 mcpu_cpu_opt = march_cpu_opt;
24821
24822 if (legacy_fpu)
24823 {
24824 if (mfpu_opt)
24825 as_bad (_("use of old and new-style options to set FPU type"));
24826
24827 mfpu_opt = legacy_fpu;
24828 }
24829 else if (!mfpu_opt)
24830 {
24831 #if !(defined (EABI_DEFAULT) || defined (TE_LINUX) \
24832 || defined (TE_NetBSD) || defined (TE_VXWORKS))
24833 /* Some environments specify a default FPU. If they don't, infer it
24834 from the processor. */
24835 if (mcpu_fpu_opt)
24836 mfpu_opt = mcpu_fpu_opt;
24837 else
24838 mfpu_opt = march_fpu_opt;
24839 #else
24840 mfpu_opt = &fpu_default;
24841 #endif
24842 }
24843
24844 if (!mfpu_opt)
24845 {
24846 if (mcpu_cpu_opt != NULL)
24847 mfpu_opt = &fpu_default;
24848 else if (mcpu_fpu_opt != NULL && ARM_CPU_HAS_FEATURE (*mcpu_fpu_opt, arm_ext_v5))
24849 mfpu_opt = &fpu_arch_vfp_v2;
24850 else
24851 mfpu_opt = &fpu_arch_fpa;
24852 }
24853
24854 #ifdef CPU_DEFAULT
24855 if (!mcpu_cpu_opt)
24856 {
24857 mcpu_cpu_opt = &cpu_default;
24858 selected_cpu = cpu_default;
24859 }
24860 else if (no_cpu_selected ())
24861 selected_cpu = cpu_default;
24862 #else
24863 if (mcpu_cpu_opt)
24864 selected_cpu = *mcpu_cpu_opt;
24865 else
24866 mcpu_cpu_opt = &arm_arch_any;
24867 #endif
24868
24869 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
24870
24871 autoselect_thumb_from_cpu_variant ();
24872
24873 arm_arch_used = thumb_arch_used = arm_arch_none;
24874
24875 #if defined OBJ_COFF || defined OBJ_ELF
24876 {
24877 unsigned int flags = 0;
24878
24879 #if defined OBJ_ELF
24880 flags = meabi_flags;
24881
24882 switch (meabi_flags)
24883 {
24884 case EF_ARM_EABI_UNKNOWN:
24885 #endif
24886 /* Set the flags in the private structure. */
24887 if (uses_apcs_26) flags |= F_APCS26;
24888 if (support_interwork) flags |= F_INTERWORK;
24889 if (uses_apcs_float) flags |= F_APCS_FLOAT;
24890 if (pic_code) flags |= F_PIC;
24891 if (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_any_hard))
24892 flags |= F_SOFT_FLOAT;
24893
24894 switch (mfloat_abi_opt)
24895 {
24896 case ARM_FLOAT_ABI_SOFT:
24897 case ARM_FLOAT_ABI_SOFTFP:
24898 flags |= F_SOFT_FLOAT;
24899 break;
24900
24901 case ARM_FLOAT_ABI_HARD:
24902 if (flags & F_SOFT_FLOAT)
24903 as_bad (_("hard-float conflicts with specified fpu"));
24904 break;
24905 }
24906
24907 /* Using pure-endian doubles (even if soft-float). */
24908 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
24909 flags |= F_VFP_FLOAT;
24910
24911 #if defined OBJ_ELF
24912 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_maverick))
24913 flags |= EF_ARM_MAVERICK_FLOAT;
24914 break;
24915
24916 case EF_ARM_EABI_VER4:
24917 case EF_ARM_EABI_VER5:
24918 /* No additional flags to set. */
24919 break;
24920
24921 default:
24922 abort ();
24923 }
24924 #endif
24925 bfd_set_private_flags (stdoutput, flags);
24926
24927 /* We have run out flags in the COFF header to encode the
24928 status of ATPCS support, so instead we create a dummy,
24929 empty, debug section called .arm.atpcs. */
24930 if (atpcs)
24931 {
24932 asection * sec;
24933
24934 sec = bfd_make_section (stdoutput, ".arm.atpcs");
24935
24936 if (sec != NULL)
24937 {
24938 bfd_set_section_flags
24939 (stdoutput, sec, SEC_READONLY | SEC_DEBUGGING /* | SEC_HAS_CONTENTS */);
24940 bfd_set_section_size (stdoutput, sec, 0);
24941 bfd_set_section_contents (stdoutput, sec, NULL, 0, 0);
24942 }
24943 }
24944 }
24945 #endif
24946
24947 /* Record the CPU type as well. */
24948 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2))
24949 mach = bfd_mach_arm_iWMMXt2;
24950 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt))
24951 mach = bfd_mach_arm_iWMMXt;
24952 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_xscale))
24953 mach = bfd_mach_arm_XScale;
24954 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_maverick))
24955 mach = bfd_mach_arm_ep9312;
24956 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5e))
24957 mach = bfd_mach_arm_5TE;
24958 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5))
24959 {
24960 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
24961 mach = bfd_mach_arm_5T;
24962 else
24963 mach = bfd_mach_arm_5;
24964 }
24965 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4))
24966 {
24967 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
24968 mach = bfd_mach_arm_4T;
24969 else
24970 mach = bfd_mach_arm_4;
24971 }
24972 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3m))
24973 mach = bfd_mach_arm_3M;
24974 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3))
24975 mach = bfd_mach_arm_3;
24976 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2s))
24977 mach = bfd_mach_arm_2a;
24978 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2))
24979 mach = bfd_mach_arm_2;
24980 else
24981 mach = bfd_mach_arm_unknown;
24982
24983 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
24984 }
24985
24986 /* Command line processing. */
24987
24988 /* md_parse_option
24989 Invocation line includes a switch not recognized by the base assembler.
24990 See if it's a processor-specific option.
24991
24992 This routine is somewhat complicated by the need for backwards
24993 compatibility (since older releases of gcc can't be changed).
24994 The new options try to make the interface as compatible as
24995 possible with GCC.
24996
24997 New options (supported) are:
24998
24999 -mcpu=<cpu name> Assemble for selected processor
25000 -march=<architecture name> Assemble for selected architecture
25001 -mfpu=<fpu architecture> Assemble for selected FPU.
25002 -EB/-mbig-endian Big-endian
25003 -EL/-mlittle-endian Little-endian
25004 -k Generate PIC code
25005 -mthumb Start in Thumb mode
25006 -mthumb-interwork Code supports ARM/Thumb interworking
25007
25008 -m[no-]warn-deprecated Warn about deprecated features
25009 -m[no-]warn-syms Warn when symbols match instructions
25010
25011 For now we will also provide support for:
25012
25013 -mapcs-32 32-bit Program counter
25014 -mapcs-26 26-bit Program counter
25015 -macps-float Floats passed in FP registers
25016 -mapcs-reentrant Reentrant code
25017 -matpcs
25018 (sometime these will probably be replaced with -mapcs=<list of options>
25019 and -matpcs=<list of options>)
25020
25021 The remaining options are only supported for back-wards compatibility.
25022 Cpu variants, the arm part is optional:
25023 -m[arm]1 Currently not supported.
25024 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor
25025 -m[arm]3 Arm 3 processor
25026 -m[arm]6[xx], Arm 6 processors
25027 -m[arm]7[xx][t][[d]m] Arm 7 processors
25028 -m[arm]8[10] Arm 8 processors
25029 -m[arm]9[20][tdmi] Arm 9 processors
25030 -mstrongarm[110[0]] StrongARM processors
25031 -mxscale XScale processors
25032 -m[arm]v[2345[t[e]]] Arm architectures
25033 -mall All (except the ARM1)
25034 FP variants:
25035 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions
25036 -mfpe-old (No float load/store multiples)
25037 -mvfpxd VFP Single precision
25038 -mvfp All VFP
25039 -mno-fpu Disable all floating point instructions
25040
25041 The following CPU names are recognized:
25042 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
25043 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
25044 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
25045 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
25046 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
25047 arm10t arm10e, arm1020t, arm1020e, arm10200e,
25048 strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
25049
25050 */
25051
25052 const char * md_shortopts = "m:k";
25053
25054 #ifdef ARM_BI_ENDIAN
25055 #define OPTION_EB (OPTION_MD_BASE + 0)
25056 #define OPTION_EL (OPTION_MD_BASE + 1)
25057 #else
25058 #if TARGET_BYTES_BIG_ENDIAN
25059 #define OPTION_EB (OPTION_MD_BASE + 0)
25060 #else
25061 #define OPTION_EL (OPTION_MD_BASE + 1)
25062 #endif
25063 #endif
25064 #define OPTION_FIX_V4BX (OPTION_MD_BASE + 2)
25065
25066 struct option md_longopts[] =
25067 {
25068 #ifdef OPTION_EB
25069 {"EB", no_argument, NULL, OPTION_EB},
25070 #endif
25071 #ifdef OPTION_EL
25072 {"EL", no_argument, NULL, OPTION_EL},
25073 #endif
25074 {"fix-v4bx", no_argument, NULL, OPTION_FIX_V4BX},
25075 {NULL, no_argument, NULL, 0}
25076 };
25077
25078
25079 size_t md_longopts_size = sizeof (md_longopts);
25080
25081 struct arm_option_table
25082 {
25083 const char *option; /* Option name to match. */
25084 const char *help; /* Help information. */
25085 int *var; /* Variable to change. */
25086 int value; /* What to change it to. */
25087 const char *deprecated; /* If non-null, print this message. */
25088 };
25089
25090 struct arm_option_table arm_opts[] =
25091 {
25092 {"k", N_("generate PIC code"), &pic_code, 1, NULL},
25093 {"mthumb", N_("assemble Thumb code"), &thumb_mode, 1, NULL},
25094 {"mthumb-interwork", N_("support ARM/Thumb interworking"),
25095 &support_interwork, 1, NULL},
25096 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26, 0, NULL},
25097 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26, 1, NULL},
25098 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float,
25099 1, NULL},
25100 {"mapcs-reentrant", N_("re-entrant code"), &pic_code, 1, NULL},
25101 {"matpcs", N_("code is ATPCS conformant"), &atpcs, 1, NULL},
25102 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
25103 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
25104 NULL},
25105
25106 /* These are recognized by the assembler, but have no affect on code. */
25107 {"mapcs-frame", N_("use frame pointer"), NULL, 0, NULL},
25108 {"mapcs-stack-check", N_("use stack size checking"), NULL, 0, NULL},
25109
25110 {"mwarn-deprecated", NULL, &warn_on_deprecated, 1, NULL},
25111 {"mno-warn-deprecated", N_("do not warn on use of deprecated feature"),
25112 &warn_on_deprecated, 0, NULL},
25113 {"mwarn-syms", N_("warn about symbols that match instruction names [default]"), (int *) (& flag_warn_syms), TRUE, NULL},
25114 {"mno-warn-syms", N_("disable warnings about symobls that match instructions"), (int *) (& flag_warn_syms), FALSE, NULL},
25115 {NULL, NULL, NULL, 0, NULL}
25116 };
25117
25118 struct arm_legacy_option_table
25119 {
25120 const char *option; /* Option name to match. */
25121 const arm_feature_set **var; /* Variable to change. */
25122 const arm_feature_set value; /* What to change it to. */
25123 const char *deprecated; /* If non-null, print this message. */
25124 };
25125
25126 const struct arm_legacy_option_table arm_legacy_opts[] =
25127 {
25128 /* DON'T add any new processors to this list -- we want the whole list
25129 to go away... Add them to the processors table instead. */
25130 {"marm1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
25131 {"m1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
25132 {"marm2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
25133 {"m2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
25134 {"marm250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
25135 {"m250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
25136 {"marm3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
25137 {"m3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
25138 {"marm6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
25139 {"m6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
25140 {"marm600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
25141 {"m600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
25142 {"marm610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
25143 {"m610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
25144 {"marm620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
25145 {"m620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
25146 {"marm7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
25147 {"m7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
25148 {"marm70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
25149 {"m70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
25150 {"marm700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
25151 {"m700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
25152 {"marm700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
25153 {"m700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
25154 {"marm710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
25155 {"m710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
25156 {"marm710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
25157 {"m710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
25158 {"marm720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
25159 {"m720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
25160 {"marm7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
25161 {"m7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
25162 {"marm7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
25163 {"m7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
25164 {"marm7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
25165 {"m7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
25166 {"marm7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
25167 {"m7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
25168 {"marm7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
25169 {"m7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
25170 {"marm7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
25171 {"m7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
25172 {"marm7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
25173 {"m7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
25174 {"marm7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
25175 {"m7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
25176 {"marm7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
25177 {"m7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
25178 {"marm7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
25179 {"m7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
25180 {"marm710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
25181 {"m710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
25182 {"marm720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
25183 {"m720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
25184 {"marm740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
25185 {"m740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
25186 {"marm8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
25187 {"m8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
25188 {"marm810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
25189 {"m810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
25190 {"marm9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
25191 {"m9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
25192 {"marm9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
25193 {"m9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
25194 {"marm920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
25195 {"m920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
25196 {"marm940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
25197 {"m940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
25198 {"mstrongarm", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=strongarm")},
25199 {"mstrongarm110", &legacy_cpu, ARM_ARCH_V4,
25200 N_("use -mcpu=strongarm110")},
25201 {"mstrongarm1100", &legacy_cpu, ARM_ARCH_V4,
25202 N_("use -mcpu=strongarm1100")},
25203 {"mstrongarm1110", &legacy_cpu, ARM_ARCH_V4,
25204 N_("use -mcpu=strongarm1110")},
25205 {"mxscale", &legacy_cpu, ARM_ARCH_XSCALE, N_("use -mcpu=xscale")},
25206 {"miwmmxt", &legacy_cpu, ARM_ARCH_IWMMXT, N_("use -mcpu=iwmmxt")},
25207 {"mall", &legacy_cpu, ARM_ANY, N_("use -mcpu=all")},
25208
25209 /* Architecture variants -- don't add any more to this list either. */
25210 {"mv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
25211 {"marmv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
25212 {"mv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
25213 {"marmv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
25214 {"mv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
25215 {"marmv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
25216 {"mv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
25217 {"marmv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
25218 {"mv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
25219 {"marmv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
25220 {"mv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
25221 {"marmv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
25222 {"mv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
25223 {"marmv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
25224 {"mv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
25225 {"marmv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
25226 {"mv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
25227 {"marmv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
25228
25229 /* Floating point variants -- don't add any more to this list either. */
25230 {"mfpe-old", &legacy_fpu, FPU_ARCH_FPE, N_("use -mfpu=fpe")},
25231 {"mfpa10", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa10")},
25232 {"mfpa11", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa11")},
25233 {"mno-fpu", &legacy_fpu, ARM_ARCH_NONE,
25234 N_("use either -mfpu=softfpa or -mfpu=softvfp")},
25235
25236 {NULL, NULL, ARM_ARCH_NONE, NULL}
25237 };
25238
25239 struct arm_cpu_option_table
25240 {
25241 const char *name;
25242 size_t name_len;
25243 const arm_feature_set value;
25244 /* For some CPUs we assume an FPU unless the user explicitly sets
25245 -mfpu=... */
25246 const arm_feature_set default_fpu;
25247 /* The canonical name of the CPU, or NULL to use NAME converted to upper
25248 case. */
25249 const char *canonical_name;
25250 };
25251
25252 /* This list should, at a minimum, contain all the cpu names
25253 recognized by GCC. */
25254 #define ARM_CPU_OPT(N, V, DF, CN) { N, sizeof (N) - 1, V, DF, CN }
25255 static const struct arm_cpu_option_table arm_cpus[] =
25256 {
25257 ARM_CPU_OPT ("all", ARM_ANY, FPU_ARCH_FPA, NULL),
25258 ARM_CPU_OPT ("arm1", ARM_ARCH_V1, FPU_ARCH_FPA, NULL),
25259 ARM_CPU_OPT ("arm2", ARM_ARCH_V2, FPU_ARCH_FPA, NULL),
25260 ARM_CPU_OPT ("arm250", ARM_ARCH_V2S, FPU_ARCH_FPA, NULL),
25261 ARM_CPU_OPT ("arm3", ARM_ARCH_V2S, FPU_ARCH_FPA, NULL),
25262 ARM_CPU_OPT ("arm6", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
25263 ARM_CPU_OPT ("arm60", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
25264 ARM_CPU_OPT ("arm600", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
25265 ARM_CPU_OPT ("arm610", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
25266 ARM_CPU_OPT ("arm620", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
25267 ARM_CPU_OPT ("arm7", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
25268 ARM_CPU_OPT ("arm7m", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL),
25269 ARM_CPU_OPT ("arm7d", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
25270 ARM_CPU_OPT ("arm7dm", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL),
25271 ARM_CPU_OPT ("arm7di", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
25272 ARM_CPU_OPT ("arm7dmi", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL),
25273 ARM_CPU_OPT ("arm70", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
25274 ARM_CPU_OPT ("arm700", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
25275 ARM_CPU_OPT ("arm700i", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
25276 ARM_CPU_OPT ("arm710", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
25277 ARM_CPU_OPT ("arm710t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
25278 ARM_CPU_OPT ("arm720", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
25279 ARM_CPU_OPT ("arm720t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
25280 ARM_CPU_OPT ("arm740t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
25281 ARM_CPU_OPT ("arm710c", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
25282 ARM_CPU_OPT ("arm7100", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
25283 ARM_CPU_OPT ("arm7500", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
25284 ARM_CPU_OPT ("arm7500fe", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
25285 ARM_CPU_OPT ("arm7t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
25286 ARM_CPU_OPT ("arm7tdmi", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
25287 ARM_CPU_OPT ("arm7tdmi-s", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
25288 ARM_CPU_OPT ("arm8", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
25289 ARM_CPU_OPT ("arm810", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
25290 ARM_CPU_OPT ("strongarm", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
25291 ARM_CPU_OPT ("strongarm1", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
25292 ARM_CPU_OPT ("strongarm110", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
25293 ARM_CPU_OPT ("strongarm1100", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
25294 ARM_CPU_OPT ("strongarm1110", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
25295 ARM_CPU_OPT ("arm9", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
25296 ARM_CPU_OPT ("arm920", ARM_ARCH_V4T, FPU_ARCH_FPA, "ARM920T"),
25297 ARM_CPU_OPT ("arm920t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
25298 ARM_CPU_OPT ("arm922t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
25299 ARM_CPU_OPT ("arm940t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
25300 ARM_CPU_OPT ("arm9tdmi", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
25301 ARM_CPU_OPT ("fa526", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
25302 ARM_CPU_OPT ("fa626", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
25303 /* For V5 or later processors we default to using VFP; but the user
25304 should really set the FPU type explicitly. */
25305 ARM_CPU_OPT ("arm9e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL),
25306 ARM_CPU_OPT ("arm9e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
25307 ARM_CPU_OPT ("arm926ej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM926EJ-S"),
25308 ARM_CPU_OPT ("arm926ejs", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM926EJ-S"),
25309 ARM_CPU_OPT ("arm926ej-s", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, NULL),
25310 ARM_CPU_OPT ("arm946e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL),
25311 ARM_CPU_OPT ("arm946e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM946E-S"),
25312 ARM_CPU_OPT ("arm946e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
25313 ARM_CPU_OPT ("arm966e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL),
25314 ARM_CPU_OPT ("arm966e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM966E-S"),
25315 ARM_CPU_OPT ("arm966e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
25316 ARM_CPU_OPT ("arm968e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
25317 ARM_CPU_OPT ("arm10t", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL),
25318 ARM_CPU_OPT ("arm10tdmi", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL),
25319 ARM_CPU_OPT ("arm10e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
25320 ARM_CPU_OPT ("arm1020", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM1020E"),
25321 ARM_CPU_OPT ("arm1020t", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL),
25322 ARM_CPU_OPT ("arm1020e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
25323 ARM_CPU_OPT ("arm1022e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
25324 ARM_CPU_OPT ("arm1026ejs", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2,
25325 "ARM1026EJ-S"),
25326 ARM_CPU_OPT ("arm1026ej-s", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, NULL),
25327 ARM_CPU_OPT ("fa606te", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
25328 ARM_CPU_OPT ("fa616te", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
25329 ARM_CPU_OPT ("fa626te", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
25330 ARM_CPU_OPT ("fmp626", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
25331 ARM_CPU_OPT ("fa726te", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
25332 ARM_CPU_OPT ("arm1136js", ARM_ARCH_V6, FPU_NONE, "ARM1136J-S"),
25333 ARM_CPU_OPT ("arm1136j-s", ARM_ARCH_V6, FPU_NONE, NULL),
25334 ARM_CPU_OPT ("arm1136jfs", ARM_ARCH_V6, FPU_ARCH_VFP_V2,
25335 "ARM1136JF-S"),
25336 ARM_CPU_OPT ("arm1136jf-s", ARM_ARCH_V6, FPU_ARCH_VFP_V2, NULL),
25337 ARM_CPU_OPT ("mpcore", ARM_ARCH_V6K, FPU_ARCH_VFP_V2, "MPCore"),
25338 ARM_CPU_OPT ("mpcorenovfp", ARM_ARCH_V6K, FPU_NONE, "MPCore"),
25339 ARM_CPU_OPT ("arm1156t2-s", ARM_ARCH_V6T2, FPU_NONE, NULL),
25340 ARM_CPU_OPT ("arm1156t2f-s", ARM_ARCH_V6T2, FPU_ARCH_VFP_V2, NULL),
25341 ARM_CPU_OPT ("arm1176jz-s", ARM_ARCH_V6KZ, FPU_NONE, NULL),
25342 ARM_CPU_OPT ("arm1176jzf-s", ARM_ARCH_V6KZ, FPU_ARCH_VFP_V2, NULL),
25343 ARM_CPU_OPT ("cortex-a5", ARM_ARCH_V7A_MP_SEC,
25344 FPU_NONE, "Cortex-A5"),
25345 ARM_CPU_OPT ("cortex-a7", ARM_ARCH_V7VE, FPU_ARCH_NEON_VFP_V4,
25346 "Cortex-A7"),
25347 ARM_CPU_OPT ("cortex-a8", ARM_ARCH_V7A_SEC,
25348 ARM_FEATURE_COPROC (FPU_VFP_V3
25349 | FPU_NEON_EXT_V1),
25350 "Cortex-A8"),
25351 ARM_CPU_OPT ("cortex-a9", ARM_ARCH_V7A_MP_SEC,
25352 ARM_FEATURE_COPROC (FPU_VFP_V3
25353 | FPU_NEON_EXT_V1),
25354 "Cortex-A9"),
25355 ARM_CPU_OPT ("cortex-a12", ARM_ARCH_V7VE, FPU_ARCH_NEON_VFP_V4,
25356 "Cortex-A12"),
25357 ARM_CPU_OPT ("cortex-a15", ARM_ARCH_V7VE, FPU_ARCH_NEON_VFP_V4,
25358 "Cortex-A15"),
25359 ARM_CPU_OPT ("cortex-a17", ARM_ARCH_V7VE, FPU_ARCH_NEON_VFP_V4,
25360 "Cortex-A17"),
25361 ARM_CPU_OPT ("cortex-a32", ARM_ARCH_V8A_CRC, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
25362 "Cortex-A32"),
25363 ARM_CPU_OPT ("cortex-a35", ARM_ARCH_V8A_CRC, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
25364 "Cortex-A35"),
25365 ARM_CPU_OPT ("cortex-a53", ARM_ARCH_V8A_CRC, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
25366 "Cortex-A53"),
25367 ARM_CPU_OPT ("cortex-a57", ARM_ARCH_V8A_CRC, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
25368 "Cortex-A57"),
25369 ARM_CPU_OPT ("cortex-a72", ARM_ARCH_V8A_CRC, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
25370 "Cortex-A72"),
25371 ARM_CPU_OPT ("cortex-a73", ARM_ARCH_V8A_CRC, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
25372 "Cortex-A73"),
25373 ARM_CPU_OPT ("cortex-r4", ARM_ARCH_V7R, FPU_NONE, "Cortex-R4"),
25374 ARM_CPU_OPT ("cortex-r4f", ARM_ARCH_V7R, FPU_ARCH_VFP_V3D16,
25375 "Cortex-R4F"),
25376 ARM_CPU_OPT ("cortex-r5", ARM_ARCH_V7R_IDIV,
25377 FPU_NONE, "Cortex-R5"),
25378 ARM_CPU_OPT ("cortex-r7", ARM_ARCH_V7R_IDIV,
25379 FPU_ARCH_VFP_V3D16,
25380 "Cortex-R7"),
25381 ARM_CPU_OPT ("cortex-r8", ARM_ARCH_V7R_IDIV,
25382 FPU_ARCH_VFP_V3D16,
25383 "Cortex-R8"),
25384 ARM_CPU_OPT ("cortex-m7", ARM_ARCH_V7EM, FPU_NONE, "Cortex-M7"),
25385 ARM_CPU_OPT ("cortex-m4", ARM_ARCH_V7EM, FPU_NONE, "Cortex-M4"),
25386 ARM_CPU_OPT ("cortex-m3", ARM_ARCH_V7M, FPU_NONE, "Cortex-M3"),
25387 ARM_CPU_OPT ("cortex-m1", ARM_ARCH_V6SM, FPU_NONE, "Cortex-M1"),
25388 ARM_CPU_OPT ("cortex-m0", ARM_ARCH_V6SM, FPU_NONE, "Cortex-M0"),
25389 ARM_CPU_OPT ("cortex-m0plus", ARM_ARCH_V6SM, FPU_NONE, "Cortex-M0+"),
25390 ARM_CPU_OPT ("exynos-m1", ARM_ARCH_V8A_CRC, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
25391 "Samsung " \
25392 "Exynos M1"),
25393 ARM_CPU_OPT ("qdf24xx", ARM_ARCH_V8A_CRC, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
25394 "Qualcomm "
25395 "QDF24XX"),
25396
25397 /* ??? XSCALE is really an architecture. */
25398 ARM_CPU_OPT ("xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL),
25399 /* ??? iwmmxt is not a processor. */
25400 ARM_CPU_OPT ("iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP_V2, NULL),
25401 ARM_CPU_OPT ("iwmmxt2", ARM_ARCH_IWMMXT2,FPU_ARCH_VFP_V2, NULL),
25402 ARM_CPU_OPT ("i80200", ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL),
25403 /* Maverick */
25404 ARM_CPU_OPT ("ep9312", ARM_FEATURE_LOW (ARM_AEXT_V4T, ARM_CEXT_MAVERICK),
25405 FPU_ARCH_MAVERICK, "ARM920T"),
25406 /* Marvell processors. */
25407 ARM_CPU_OPT ("marvell-pj4", ARM_FEATURE_CORE (ARM_AEXT_V7A | ARM_EXT_MP
25408 | ARM_EXT_SEC,
25409 ARM_EXT2_V6T2_V8M),
25410 FPU_ARCH_VFP_V3D16, NULL),
25411 ARM_CPU_OPT ("marvell-whitney", ARM_FEATURE_CORE (ARM_AEXT_V7A | ARM_EXT_MP
25412 | ARM_EXT_SEC,
25413 ARM_EXT2_V6T2_V8M),
25414 FPU_ARCH_NEON_VFP_V4, NULL),
25415 /* APM X-Gene family. */
25416 ARM_CPU_OPT ("xgene1", ARM_ARCH_V8A, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
25417 "APM X-Gene 1"),
25418 ARM_CPU_OPT ("xgene2", ARM_ARCH_V8A_CRC, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
25419 "APM X-Gene 2"),
25420
25421 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, NULL }
25422 };
25423 #undef ARM_CPU_OPT
25424
25425 struct arm_arch_option_table
25426 {
25427 const char *name;
25428 size_t name_len;
25429 const arm_feature_set value;
25430 const arm_feature_set default_fpu;
25431 };
25432
25433 /* This list should, at a minimum, contain all the architecture names
25434 recognized by GCC. */
25435 #define ARM_ARCH_OPT(N, V, DF) { N, sizeof (N) - 1, V, DF }
25436 static const struct arm_arch_option_table arm_archs[] =
25437 {
25438 ARM_ARCH_OPT ("all", ARM_ANY, FPU_ARCH_FPA),
25439 ARM_ARCH_OPT ("armv1", ARM_ARCH_V1, FPU_ARCH_FPA),
25440 ARM_ARCH_OPT ("armv2", ARM_ARCH_V2, FPU_ARCH_FPA),
25441 ARM_ARCH_OPT ("armv2a", ARM_ARCH_V2S, FPU_ARCH_FPA),
25442 ARM_ARCH_OPT ("armv2s", ARM_ARCH_V2S, FPU_ARCH_FPA),
25443 ARM_ARCH_OPT ("armv3", ARM_ARCH_V3, FPU_ARCH_FPA),
25444 ARM_ARCH_OPT ("armv3m", ARM_ARCH_V3M, FPU_ARCH_FPA),
25445 ARM_ARCH_OPT ("armv4", ARM_ARCH_V4, FPU_ARCH_FPA),
25446 ARM_ARCH_OPT ("armv4xm", ARM_ARCH_V4xM, FPU_ARCH_FPA),
25447 ARM_ARCH_OPT ("armv4t", ARM_ARCH_V4T, FPU_ARCH_FPA),
25448 ARM_ARCH_OPT ("armv4txm", ARM_ARCH_V4TxM, FPU_ARCH_FPA),
25449 ARM_ARCH_OPT ("armv5", ARM_ARCH_V5, FPU_ARCH_VFP),
25450 ARM_ARCH_OPT ("armv5t", ARM_ARCH_V5T, FPU_ARCH_VFP),
25451 ARM_ARCH_OPT ("armv5txm", ARM_ARCH_V5TxM, FPU_ARCH_VFP),
25452 ARM_ARCH_OPT ("armv5te", ARM_ARCH_V5TE, FPU_ARCH_VFP),
25453 ARM_ARCH_OPT ("armv5texp", ARM_ARCH_V5TExP, FPU_ARCH_VFP),
25454 ARM_ARCH_OPT ("armv5tej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP),
25455 ARM_ARCH_OPT ("armv6", ARM_ARCH_V6, FPU_ARCH_VFP),
25456 ARM_ARCH_OPT ("armv6j", ARM_ARCH_V6, FPU_ARCH_VFP),
25457 ARM_ARCH_OPT ("armv6k", ARM_ARCH_V6K, FPU_ARCH_VFP),
25458 ARM_ARCH_OPT ("armv6z", ARM_ARCH_V6Z, FPU_ARCH_VFP),
25459 /* The official spelling of this variant is ARMv6KZ, the name "armv6zk" is
25460 kept to preserve existing behaviour. */
25461 ARM_ARCH_OPT ("armv6kz", ARM_ARCH_V6KZ, FPU_ARCH_VFP),
25462 ARM_ARCH_OPT ("armv6zk", ARM_ARCH_V6KZ, FPU_ARCH_VFP),
25463 ARM_ARCH_OPT ("armv6t2", ARM_ARCH_V6T2, FPU_ARCH_VFP),
25464 ARM_ARCH_OPT ("armv6kt2", ARM_ARCH_V6KT2, FPU_ARCH_VFP),
25465 ARM_ARCH_OPT ("armv6zt2", ARM_ARCH_V6ZT2, FPU_ARCH_VFP),
25466 /* The official spelling of this variant is ARMv6KZ, the name "armv6zkt2" is
25467 kept to preserve existing behaviour. */
25468 ARM_ARCH_OPT ("armv6kzt2", ARM_ARCH_V6KZT2, FPU_ARCH_VFP),
25469 ARM_ARCH_OPT ("armv6zkt2", ARM_ARCH_V6KZT2, FPU_ARCH_VFP),
25470 ARM_ARCH_OPT ("armv6-m", ARM_ARCH_V6M, FPU_ARCH_VFP),
25471 ARM_ARCH_OPT ("armv6s-m", ARM_ARCH_V6SM, FPU_ARCH_VFP),
25472 ARM_ARCH_OPT ("armv7", ARM_ARCH_V7, FPU_ARCH_VFP),
25473 /* The official spelling of the ARMv7 profile variants is the dashed form.
25474 Accept the non-dashed form for compatibility with old toolchains. */
25475 ARM_ARCH_OPT ("armv7a", ARM_ARCH_V7A, FPU_ARCH_VFP),
25476 ARM_ARCH_OPT ("armv7ve", ARM_ARCH_V7VE, FPU_ARCH_VFP),
25477 ARM_ARCH_OPT ("armv7r", ARM_ARCH_V7R, FPU_ARCH_VFP),
25478 ARM_ARCH_OPT ("armv7m", ARM_ARCH_V7M, FPU_ARCH_VFP),
25479 ARM_ARCH_OPT ("armv7-a", ARM_ARCH_V7A, FPU_ARCH_VFP),
25480 ARM_ARCH_OPT ("armv7-r", ARM_ARCH_V7R, FPU_ARCH_VFP),
25481 ARM_ARCH_OPT ("armv7-m", ARM_ARCH_V7M, FPU_ARCH_VFP),
25482 ARM_ARCH_OPT ("armv7e-m", ARM_ARCH_V7EM, FPU_ARCH_VFP),
25483 ARM_ARCH_OPT ("armv8-m.base", ARM_ARCH_V8M_BASE, FPU_ARCH_VFP),
25484 ARM_ARCH_OPT ("armv8-m.main", ARM_ARCH_V8M_MAIN, FPU_ARCH_VFP),
25485 ARM_ARCH_OPT ("armv8-a", ARM_ARCH_V8A, FPU_ARCH_VFP),
25486 ARM_ARCH_OPT ("armv8.1-a", ARM_ARCH_V8_1A, FPU_ARCH_VFP),
25487 ARM_ARCH_OPT ("armv8.2-a", ARM_ARCH_V8_2A, FPU_ARCH_VFP),
25488 ARM_ARCH_OPT ("xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP),
25489 ARM_ARCH_OPT ("iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP),
25490 ARM_ARCH_OPT ("iwmmxt2", ARM_ARCH_IWMMXT2,FPU_ARCH_VFP),
25491 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
25492 };
25493 #undef ARM_ARCH_OPT
25494
25495 /* ISA extensions in the co-processor and main instruction set space. */
25496 struct arm_option_extension_value_table
25497 {
25498 const char *name;
25499 size_t name_len;
25500 const arm_feature_set merge_value;
25501 const arm_feature_set clear_value;
25502 /* List of architectures for which an extension is available. ARM_ARCH_NONE
25503 indicates that an extension is available for all architectures while
25504 ARM_ANY marks an empty entry. */
25505 const arm_feature_set allowed_archs[2];
25506 };
25507
25508 /* The following table must be in alphabetical order with a NULL last entry.
25509 */
25510 #define ARM_EXT_OPT(N, M, C, AA) { N, sizeof (N) - 1, M, C, { AA, ARM_ANY } }
25511 #define ARM_EXT_OPT2(N, M, C, AA1, AA2) { N, sizeof (N) - 1, M, C, {AA1, AA2} }
25512 static const struct arm_option_extension_value_table arm_extensions[] =
25513 {
25514 ARM_EXT_OPT ("crc", ARCH_CRC_ARMV8, ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
25515 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
25516 ARM_EXT_OPT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
25517 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8),
25518 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
25519 ARM_EXT_OPT ("dsp", ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
25520 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
25521 ARM_FEATURE_CORE (ARM_EXT_V7M, ARM_EXT2_V8M)),
25522 ARM_EXT_OPT ("fp", FPU_ARCH_VFP_ARMV8, ARM_FEATURE_COPROC (FPU_VFP_ARMV8),
25523 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
25524 ARM_EXT_OPT ("fp16", ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
25525 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
25526 ARM_ARCH_V8_2A),
25527 ARM_EXT_OPT2 ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV),
25528 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV),
25529 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A),
25530 ARM_FEATURE_CORE_LOW (ARM_EXT_V7R)),
25531 ARM_EXT_OPT ("iwmmxt",ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT),
25532 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT), ARM_ARCH_NONE),
25533 ARM_EXT_OPT ("iwmmxt2", ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2),
25534 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2), ARM_ARCH_NONE),
25535 ARM_EXT_OPT ("maverick", ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK),
25536 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK), ARM_ARCH_NONE),
25537 ARM_EXT_OPT2 ("mp", ARM_FEATURE_CORE_LOW (ARM_EXT_MP),
25538 ARM_FEATURE_CORE_LOW (ARM_EXT_MP),
25539 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A),
25540 ARM_FEATURE_CORE_LOW (ARM_EXT_V7R)),
25541 ARM_EXT_OPT ("os", ARM_FEATURE_CORE_LOW (ARM_EXT_OS),
25542 ARM_FEATURE_CORE_LOW (ARM_EXT_OS),
25543 ARM_FEATURE_CORE_LOW (ARM_EXT_V6M)),
25544 ARM_EXT_OPT ("pan", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN),
25545 ARM_FEATURE (ARM_EXT_V8, ARM_EXT2_PAN, 0),
25546 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
25547 ARM_EXT_OPT ("ras", ARM_FEATURE_CORE_HIGH (ARM_EXT2_RAS),
25548 ARM_FEATURE (ARM_EXT_V8, ARM_EXT2_RAS, 0),
25549 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
25550 ARM_EXT_OPT ("rdma", FPU_ARCH_NEON_VFP_ARMV8_1,
25551 ARM_FEATURE_COPROC (FPU_NEON_ARMV8 | FPU_NEON_EXT_RDMA),
25552 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
25553 ARM_EXT_OPT2 ("sec", ARM_FEATURE_CORE_LOW (ARM_EXT_SEC),
25554 ARM_FEATURE_CORE_LOW (ARM_EXT_SEC),
25555 ARM_FEATURE_CORE_LOW (ARM_EXT_V6K),
25556 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A)),
25557 ARM_EXT_OPT ("simd", FPU_ARCH_NEON_VFP_ARMV8,
25558 ARM_FEATURE_COPROC (FPU_NEON_ARMV8),
25559 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
25560 ARM_EXT_OPT ("virt", ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT | ARM_EXT_ADIV
25561 | ARM_EXT_DIV),
25562 ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT),
25563 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A)),
25564 ARM_EXT_OPT ("xscale",ARM_FEATURE_COPROC (ARM_CEXT_XSCALE),
25565 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE), ARM_ARCH_NONE),
25566 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, { ARM_ARCH_NONE, ARM_ARCH_NONE } }
25567 };
25568 #undef ARM_EXT_OPT
25569
25570 /* ISA floating-point and Advanced SIMD extensions. */
25571 struct arm_option_fpu_value_table
25572 {
25573 const char *name;
25574 const arm_feature_set value;
25575 };
25576
25577 /* This list should, at a minimum, contain all the fpu names
25578 recognized by GCC. */
25579 static const struct arm_option_fpu_value_table arm_fpus[] =
25580 {
25581 {"softfpa", FPU_NONE},
25582 {"fpe", FPU_ARCH_FPE},
25583 {"fpe2", FPU_ARCH_FPE},
25584 {"fpe3", FPU_ARCH_FPA}, /* Third release supports LFM/SFM. */
25585 {"fpa", FPU_ARCH_FPA},
25586 {"fpa10", FPU_ARCH_FPA},
25587 {"fpa11", FPU_ARCH_FPA},
25588 {"arm7500fe", FPU_ARCH_FPA},
25589 {"softvfp", FPU_ARCH_VFP},
25590 {"softvfp+vfp", FPU_ARCH_VFP_V2},
25591 {"vfp", FPU_ARCH_VFP_V2},
25592 {"vfp9", FPU_ARCH_VFP_V2},
25593 {"vfp3", FPU_ARCH_VFP_V3}, /* For backwards compatbility. */
25594 {"vfp10", FPU_ARCH_VFP_V2},
25595 {"vfp10-r0", FPU_ARCH_VFP_V1},
25596 {"vfpxd", FPU_ARCH_VFP_V1xD},
25597 {"vfpv2", FPU_ARCH_VFP_V2},
25598 {"vfpv3", FPU_ARCH_VFP_V3},
25599 {"vfpv3-fp16", FPU_ARCH_VFP_V3_FP16},
25600 {"vfpv3-d16", FPU_ARCH_VFP_V3D16},
25601 {"vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16},
25602 {"vfpv3xd", FPU_ARCH_VFP_V3xD},
25603 {"vfpv3xd-fp16", FPU_ARCH_VFP_V3xD_FP16},
25604 {"arm1020t", FPU_ARCH_VFP_V1},
25605 {"arm1020e", FPU_ARCH_VFP_V2},
25606 {"arm1136jfs", FPU_ARCH_VFP_V2},
25607 {"arm1136jf-s", FPU_ARCH_VFP_V2},
25608 {"maverick", FPU_ARCH_MAVERICK},
25609 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1},
25610 {"neon-fp16", FPU_ARCH_NEON_FP16},
25611 {"vfpv4", FPU_ARCH_VFP_V4},
25612 {"vfpv4-d16", FPU_ARCH_VFP_V4D16},
25613 {"fpv4-sp-d16", FPU_ARCH_VFP_V4_SP_D16},
25614 {"fpv5-d16", FPU_ARCH_VFP_V5D16},
25615 {"fpv5-sp-d16", FPU_ARCH_VFP_V5_SP_D16},
25616 {"neon-vfpv4", FPU_ARCH_NEON_VFP_V4},
25617 {"fp-armv8", FPU_ARCH_VFP_ARMV8},
25618 {"neon-fp-armv8", FPU_ARCH_NEON_VFP_ARMV8},
25619 {"crypto-neon-fp-armv8",
25620 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8},
25621 {"neon-fp-armv8.1", FPU_ARCH_NEON_VFP_ARMV8_1},
25622 {"crypto-neon-fp-armv8.1",
25623 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1},
25624 {NULL, ARM_ARCH_NONE}
25625 };
25626
25627 struct arm_option_value_table
25628 {
25629 const char *name;
25630 long value;
25631 };
25632
25633 static const struct arm_option_value_table arm_float_abis[] =
25634 {
25635 {"hard", ARM_FLOAT_ABI_HARD},
25636 {"softfp", ARM_FLOAT_ABI_SOFTFP},
25637 {"soft", ARM_FLOAT_ABI_SOFT},
25638 {NULL, 0}
25639 };
25640
25641 #ifdef OBJ_ELF
25642 /* We only know how to output GNU and ver 4/5 (AAELF) formats. */
25643 static const struct arm_option_value_table arm_eabis[] =
25644 {
25645 {"gnu", EF_ARM_EABI_UNKNOWN},
25646 {"4", EF_ARM_EABI_VER4},
25647 {"5", EF_ARM_EABI_VER5},
25648 {NULL, 0}
25649 };
25650 #endif
25651
25652 struct arm_long_option_table
25653 {
25654 const char * option; /* Substring to match. */
25655 const char * help; /* Help information. */
25656 int (* func) (const char * subopt); /* Function to decode sub-option. */
25657 const char * deprecated; /* If non-null, print this message. */
25658 };
25659
25660 static bfd_boolean
25661 arm_parse_extension (const char *str, const arm_feature_set **opt_p)
25662 {
25663 arm_feature_set *ext_set = XNEW (arm_feature_set);
25664
25665 /* We insist on extensions being specified in alphabetical order, and with
25666 extensions being added before being removed. We achieve this by having
25667 the global ARM_EXTENSIONS table in alphabetical order, and using the
25668 ADDING_VALUE variable to indicate whether we are adding an extension (1)
25669 or removing it (0) and only allowing it to change in the order
25670 -1 -> 1 -> 0. */
25671 const struct arm_option_extension_value_table * opt = NULL;
25672 const arm_feature_set arm_any = ARM_ANY;
25673 int adding_value = -1;
25674
25675 /* Copy the feature set, so that we can modify it. */
25676 *ext_set = **opt_p;
25677 *opt_p = ext_set;
25678
25679 while (str != NULL && *str != 0)
25680 {
25681 const char *ext;
25682 size_t len;
25683
25684 if (*str != '+')
25685 {
25686 as_bad (_("invalid architectural extension"));
25687 return FALSE;
25688 }
25689
25690 str++;
25691 ext = strchr (str, '+');
25692
25693 if (ext != NULL)
25694 len = ext - str;
25695 else
25696 len = strlen (str);
25697
25698 if (len >= 2 && strncmp (str, "no", 2) == 0)
25699 {
25700 if (adding_value != 0)
25701 {
25702 adding_value = 0;
25703 opt = arm_extensions;
25704 }
25705
25706 len -= 2;
25707 str += 2;
25708 }
25709 else if (len > 0)
25710 {
25711 if (adding_value == -1)
25712 {
25713 adding_value = 1;
25714 opt = arm_extensions;
25715 }
25716 else if (adding_value != 1)
25717 {
25718 as_bad (_("must specify extensions to add before specifying "
25719 "those to remove"));
25720 return FALSE;
25721 }
25722 }
25723
25724 if (len == 0)
25725 {
25726 as_bad (_("missing architectural extension"));
25727 return FALSE;
25728 }
25729
25730 gas_assert (adding_value != -1);
25731 gas_assert (opt != NULL);
25732
25733 /* Scan over the options table trying to find an exact match. */
25734 for (; opt->name != NULL; opt++)
25735 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
25736 {
25737 int i, nb_allowed_archs =
25738 sizeof (opt->allowed_archs) / sizeof (opt->allowed_archs[0]);
25739 /* Check we can apply the extension to this architecture. */
25740 for (i = 0; i < nb_allowed_archs; i++)
25741 {
25742 /* Empty entry. */
25743 if (ARM_FEATURE_EQUAL (opt->allowed_archs[i], arm_any))
25744 continue;
25745 if (ARM_FSET_CPU_SUBSET (opt->allowed_archs[i], *ext_set))
25746 break;
25747 }
25748 if (i == nb_allowed_archs)
25749 {
25750 as_bad (_("extension does not apply to the base architecture"));
25751 return FALSE;
25752 }
25753
25754 /* Add or remove the extension. */
25755 if (adding_value)
25756 ARM_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->merge_value);
25757 else
25758 ARM_CLEAR_FEATURE (*ext_set, *ext_set, opt->clear_value);
25759
25760 break;
25761 }
25762
25763 if (opt->name == NULL)
25764 {
25765 /* Did we fail to find an extension because it wasn't specified in
25766 alphabetical order, or because it does not exist? */
25767
25768 for (opt = arm_extensions; opt->name != NULL; opt++)
25769 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
25770 break;
25771
25772 if (opt->name == NULL)
25773 as_bad (_("unknown architectural extension `%s'"), str);
25774 else
25775 as_bad (_("architectural extensions must be specified in "
25776 "alphabetical order"));
25777
25778 return FALSE;
25779 }
25780 else
25781 {
25782 /* We should skip the extension we've just matched the next time
25783 round. */
25784 opt++;
25785 }
25786
25787 str = ext;
25788 };
25789
25790 return TRUE;
25791 }
25792
25793 static bfd_boolean
25794 arm_parse_cpu (const char *str)
25795 {
25796 const struct arm_cpu_option_table *opt;
25797 const char *ext = strchr (str, '+');
25798 size_t len;
25799
25800 if (ext != NULL)
25801 len = ext - str;
25802 else
25803 len = strlen (str);
25804
25805 if (len == 0)
25806 {
25807 as_bad (_("missing cpu name `%s'"), str);
25808 return FALSE;
25809 }
25810
25811 for (opt = arm_cpus; opt->name != NULL; opt++)
25812 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
25813 {
25814 mcpu_cpu_opt = &opt->value;
25815 mcpu_fpu_opt = &opt->default_fpu;
25816 if (opt->canonical_name)
25817 {
25818 gas_assert (sizeof selected_cpu_name > strlen (opt->canonical_name));
25819 strcpy (selected_cpu_name, opt->canonical_name);
25820 }
25821 else
25822 {
25823 size_t i;
25824
25825 if (len >= sizeof selected_cpu_name)
25826 len = (sizeof selected_cpu_name) - 1;
25827
25828 for (i = 0; i < len; i++)
25829 selected_cpu_name[i] = TOUPPER (opt->name[i]);
25830 selected_cpu_name[i] = 0;
25831 }
25832
25833 if (ext != NULL)
25834 return arm_parse_extension (ext, &mcpu_cpu_opt);
25835
25836 return TRUE;
25837 }
25838
25839 as_bad (_("unknown cpu `%s'"), str);
25840 return FALSE;
25841 }
25842
25843 static bfd_boolean
25844 arm_parse_arch (const char *str)
25845 {
25846 const struct arm_arch_option_table *opt;
25847 const char *ext = strchr (str, '+');
25848 size_t len;
25849
25850 if (ext != NULL)
25851 len = ext - str;
25852 else
25853 len = strlen (str);
25854
25855 if (len == 0)
25856 {
25857 as_bad (_("missing architecture name `%s'"), str);
25858 return FALSE;
25859 }
25860
25861 for (opt = arm_archs; opt->name != NULL; opt++)
25862 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
25863 {
25864 march_cpu_opt = &opt->value;
25865 march_fpu_opt = &opt->default_fpu;
25866 strcpy (selected_cpu_name, opt->name);
25867
25868 if (ext != NULL)
25869 return arm_parse_extension (ext, &march_cpu_opt);
25870
25871 return TRUE;
25872 }
25873
25874 as_bad (_("unknown architecture `%s'\n"), str);
25875 return FALSE;
25876 }
25877
25878 static bfd_boolean
25879 arm_parse_fpu (const char * str)
25880 {
25881 const struct arm_option_fpu_value_table * opt;
25882
25883 for (opt = arm_fpus; opt->name != NULL; opt++)
25884 if (streq (opt->name, str))
25885 {
25886 mfpu_opt = &opt->value;
25887 return TRUE;
25888 }
25889
25890 as_bad (_("unknown floating point format `%s'\n"), str);
25891 return FALSE;
25892 }
25893
25894 static bfd_boolean
25895 arm_parse_float_abi (const char * str)
25896 {
25897 const struct arm_option_value_table * opt;
25898
25899 for (opt = arm_float_abis; opt->name != NULL; opt++)
25900 if (streq (opt->name, str))
25901 {
25902 mfloat_abi_opt = opt->value;
25903 return TRUE;
25904 }
25905
25906 as_bad (_("unknown floating point abi `%s'\n"), str);
25907 return FALSE;
25908 }
25909
25910 #ifdef OBJ_ELF
25911 static bfd_boolean
25912 arm_parse_eabi (const char * str)
25913 {
25914 const struct arm_option_value_table *opt;
25915
25916 for (opt = arm_eabis; opt->name != NULL; opt++)
25917 if (streq (opt->name, str))
25918 {
25919 meabi_flags = opt->value;
25920 return TRUE;
25921 }
25922 as_bad (_("unknown EABI `%s'\n"), str);
25923 return FALSE;
25924 }
25925 #endif
25926
25927 static bfd_boolean
25928 arm_parse_it_mode (const char * str)
25929 {
25930 bfd_boolean ret = TRUE;
25931
25932 if (streq ("arm", str))
25933 implicit_it_mode = IMPLICIT_IT_MODE_ARM;
25934 else if (streq ("thumb", str))
25935 implicit_it_mode = IMPLICIT_IT_MODE_THUMB;
25936 else if (streq ("always", str))
25937 implicit_it_mode = IMPLICIT_IT_MODE_ALWAYS;
25938 else if (streq ("never", str))
25939 implicit_it_mode = IMPLICIT_IT_MODE_NEVER;
25940 else
25941 {
25942 as_bad (_("unknown implicit IT mode `%s', should be "\
25943 "arm, thumb, always, or never."), str);
25944 ret = FALSE;
25945 }
25946
25947 return ret;
25948 }
25949
25950 static bfd_boolean
25951 arm_ccs_mode (const char * unused ATTRIBUTE_UNUSED)
25952 {
25953 codecomposer_syntax = TRUE;
25954 arm_comment_chars[0] = ';';
25955 arm_line_separator_chars[0] = 0;
25956 return TRUE;
25957 }
25958
25959 struct arm_long_option_table arm_long_opts[] =
25960 {
25961 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
25962 arm_parse_cpu, NULL},
25963 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
25964 arm_parse_arch, NULL},
25965 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"),
25966 arm_parse_fpu, NULL},
25967 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"),
25968 arm_parse_float_abi, NULL},
25969 #ifdef OBJ_ELF
25970 {"meabi=", N_("<ver>\t\t assemble for eabi version <ver>"),
25971 arm_parse_eabi, NULL},
25972 #endif
25973 {"mimplicit-it=", N_("<mode>\t controls implicit insertion of IT instructions"),
25974 arm_parse_it_mode, NULL},
25975 {"mccs", N_("\t\t\t TI CodeComposer Studio syntax compatibility mode"),
25976 arm_ccs_mode, NULL},
25977 {NULL, NULL, 0, NULL}
25978 };
25979
25980 int
25981 md_parse_option (int c, const char * arg)
25982 {
25983 struct arm_option_table *opt;
25984 const struct arm_legacy_option_table *fopt;
25985 struct arm_long_option_table *lopt;
25986
25987 switch (c)
25988 {
25989 #ifdef OPTION_EB
25990 case OPTION_EB:
25991 target_big_endian = 1;
25992 break;
25993 #endif
25994
25995 #ifdef OPTION_EL
25996 case OPTION_EL:
25997 target_big_endian = 0;
25998 break;
25999 #endif
26000
26001 case OPTION_FIX_V4BX:
26002 fix_v4bx = TRUE;
26003 break;
26004
26005 case 'a':
26006 /* Listing option. Just ignore these, we don't support additional
26007 ones. */
26008 return 0;
26009
26010 default:
26011 for (opt = arm_opts; opt->option != NULL; opt++)
26012 {
26013 if (c == opt->option[0]
26014 && ((arg == NULL && opt->option[1] == 0)
26015 || streq (arg, opt->option + 1)))
26016 {
26017 /* If the option is deprecated, tell the user. */
26018 if (warn_on_deprecated && opt->deprecated != NULL)
26019 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
26020 arg ? arg : "", _(opt->deprecated));
26021
26022 if (opt->var != NULL)
26023 *opt->var = opt->value;
26024
26025 return 1;
26026 }
26027 }
26028
26029 for (fopt = arm_legacy_opts; fopt->option != NULL; fopt++)
26030 {
26031 if (c == fopt->option[0]
26032 && ((arg == NULL && fopt->option[1] == 0)
26033 || streq (arg, fopt->option + 1)))
26034 {
26035 /* If the option is deprecated, tell the user. */
26036 if (warn_on_deprecated && fopt->deprecated != NULL)
26037 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
26038 arg ? arg : "", _(fopt->deprecated));
26039
26040 if (fopt->var != NULL)
26041 *fopt->var = &fopt->value;
26042
26043 return 1;
26044 }
26045 }
26046
26047 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
26048 {
26049 /* These options are expected to have an argument. */
26050 if (c == lopt->option[0]
26051 && arg != NULL
26052 && strncmp (arg, lopt->option + 1,
26053 strlen (lopt->option + 1)) == 0)
26054 {
26055 /* If the option is deprecated, tell the user. */
26056 if (warn_on_deprecated && lopt->deprecated != NULL)
26057 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
26058 _(lopt->deprecated));
26059
26060 /* Call the sup-option parser. */
26061 return lopt->func (arg + strlen (lopt->option) - 1);
26062 }
26063 }
26064
26065 return 0;
26066 }
26067
26068 return 1;
26069 }
26070
26071 void
26072 md_show_usage (FILE * fp)
26073 {
26074 struct arm_option_table *opt;
26075 struct arm_long_option_table *lopt;
26076
26077 fprintf (fp, _(" ARM-specific assembler options:\n"));
26078
26079 for (opt = arm_opts; opt->option != NULL; opt++)
26080 if (opt->help != NULL)
26081 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
26082
26083 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
26084 if (lopt->help != NULL)
26085 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
26086
26087 #ifdef OPTION_EB
26088 fprintf (fp, _("\
26089 -EB assemble code for a big-endian cpu\n"));
26090 #endif
26091
26092 #ifdef OPTION_EL
26093 fprintf (fp, _("\
26094 -EL assemble code for a little-endian cpu\n"));
26095 #endif
26096
26097 fprintf (fp, _("\
26098 --fix-v4bx Allow BX in ARMv4 code\n"));
26099 }
26100
26101
26102 #ifdef OBJ_ELF
26103 typedef struct
26104 {
26105 int val;
26106 arm_feature_set flags;
26107 } cpu_arch_ver_table;
26108
26109 /* Mapping from CPU features to EABI CPU arch values. As a general rule, table
26110 must be sorted least features first but some reordering is needed, eg. for
26111 Thumb-2 instructions to be detected as coming from ARMv6T2. */
26112 static const cpu_arch_ver_table cpu_arch_ver[] =
26113 {
26114 {1, ARM_ARCH_V4},
26115 {2, ARM_ARCH_V4T},
26116 {3, ARM_ARCH_V5},
26117 {3, ARM_ARCH_V5T},
26118 {4, ARM_ARCH_V5TE},
26119 {5, ARM_ARCH_V5TEJ},
26120 {6, ARM_ARCH_V6},
26121 {9, ARM_ARCH_V6K},
26122 {7, ARM_ARCH_V6Z},
26123 {11, ARM_ARCH_V6M},
26124 {12, ARM_ARCH_V6SM},
26125 {8, ARM_ARCH_V6T2},
26126 {10, ARM_ARCH_V7VE},
26127 {10, ARM_ARCH_V7R},
26128 {10, ARM_ARCH_V7M},
26129 {14, ARM_ARCH_V8A},
26130 {16, ARM_ARCH_V8M_BASE},
26131 {17, ARM_ARCH_V8M_MAIN},
26132 {0, ARM_ARCH_NONE}
26133 };
26134
26135 /* Set an attribute if it has not already been set by the user. */
26136 static void
26137 aeabi_set_attribute_int (int tag, int value)
26138 {
26139 if (tag < 1
26140 || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
26141 || !attributes_set_explicitly[tag])
26142 bfd_elf_add_proc_attr_int (stdoutput, tag, value);
26143 }
26144
26145 static void
26146 aeabi_set_attribute_string (int tag, const char *value)
26147 {
26148 if (tag < 1
26149 || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
26150 || !attributes_set_explicitly[tag])
26151 bfd_elf_add_proc_attr_string (stdoutput, tag, value);
26152 }
26153
26154 /* Set the public EABI object attributes. */
26155 void
26156 aeabi_set_public_attributes (void)
26157 {
26158 int arch;
26159 char profile;
26160 int virt_sec = 0;
26161 int fp16_optional = 0;
26162 arm_feature_set arm_arch = ARM_ARCH_NONE;
26163 arm_feature_set flags;
26164 arm_feature_set tmp;
26165 arm_feature_set arm_arch_v8m_base = ARM_ARCH_V8M_BASE;
26166 const cpu_arch_ver_table *p;
26167
26168 /* Choose the architecture based on the capabilities of the requested cpu
26169 (if any) and/or the instructions actually used. */
26170 ARM_MERGE_FEATURE_SETS (flags, arm_arch_used, thumb_arch_used);
26171 ARM_MERGE_FEATURE_SETS (flags, flags, *mfpu_opt);
26172 ARM_MERGE_FEATURE_SETS (flags, flags, selected_cpu);
26173
26174 if (ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_any))
26175 ARM_MERGE_FEATURE_SETS (flags, flags, arm_ext_v1);
26176
26177 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_any))
26178 ARM_MERGE_FEATURE_SETS (flags, flags, arm_ext_v4t);
26179
26180 selected_cpu = flags;
26181
26182 /* Allow the user to override the reported architecture. */
26183 if (object_arch)
26184 {
26185 ARM_CLEAR_FEATURE (flags, flags, arm_arch_any);
26186 ARM_MERGE_FEATURE_SETS (flags, flags, *object_arch);
26187 }
26188
26189 /* We need to make sure that the attributes do not identify us as v6S-M
26190 when the only v6S-M feature in use is the Operating System Extensions. */
26191 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_os))
26192 if (!ARM_CPU_HAS_FEATURE (flags, arm_arch_v6m_only))
26193 ARM_CLEAR_FEATURE (flags, flags, arm_ext_os);
26194
26195 tmp = flags;
26196 arch = 0;
26197 for (p = cpu_arch_ver; p->val; p++)
26198 {
26199 if (ARM_CPU_HAS_FEATURE (tmp, p->flags))
26200 {
26201 arch = p->val;
26202 arm_arch = p->flags;
26203 ARM_CLEAR_FEATURE (tmp, tmp, p->flags);
26204 }
26205 }
26206
26207 /* The table lookup above finds the last architecture to contribute
26208 a new feature. Unfortunately, Tag13 is a subset of the union of
26209 v6T2 and v7-M, so it is never seen as contributing a new feature.
26210 We can not search for the last entry which is entirely used,
26211 because if no CPU is specified we build up only those flags
26212 actually used. Perhaps we should separate out the specified
26213 and implicit cases. Avoid taking this path for -march=all by
26214 checking for contradictory v7-A / v7-M features. */
26215 if (arch == TAG_CPU_ARCH_V7
26216 && !ARM_CPU_HAS_FEATURE (flags, arm_ext_v7a)
26217 && ARM_CPU_HAS_FEATURE (flags, arm_ext_v7m)
26218 && ARM_CPU_HAS_FEATURE (flags, arm_ext_v6_dsp))
26219 {
26220 arch = TAG_CPU_ARCH_V7E_M;
26221 arm_arch = (arm_feature_set) ARM_ARCH_V7EM;
26222 }
26223
26224 ARM_CLEAR_FEATURE (tmp, flags, arm_arch_v8m_base);
26225 if (arch == TAG_CPU_ARCH_V8M_BASE && ARM_CPU_HAS_FEATURE (tmp, arm_arch_any))
26226 {
26227 arch = TAG_CPU_ARCH_V8M_MAIN;
26228 arm_arch = (arm_feature_set) ARM_ARCH_V8M_MAIN;
26229 }
26230
26231 /* In cpu_arch_ver ARMv8-A is before ARMv8-M for atomics to be detected as
26232 coming from ARMv8-A. However, since ARMv8-A has more instructions than
26233 ARMv8-M, -march=all must be detected as ARMv8-A. */
26234 if (arch == TAG_CPU_ARCH_V8M_MAIN
26235 && ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any))
26236 {
26237 arch = TAG_CPU_ARCH_V8;
26238 arm_arch = (arm_feature_set) ARM_ARCH_V8A;
26239 }
26240
26241 /* Tag_CPU_name. */
26242 if (selected_cpu_name[0])
26243 {
26244 char *q;
26245
26246 q = selected_cpu_name;
26247 if (strncmp (q, "armv", 4) == 0)
26248 {
26249 int i;
26250
26251 q += 4;
26252 for (i = 0; q[i]; i++)
26253 q[i] = TOUPPER (q[i]);
26254 }
26255 aeabi_set_attribute_string (Tag_CPU_name, q);
26256 }
26257
26258 /* Tag_CPU_arch. */
26259 aeabi_set_attribute_int (Tag_CPU_arch, arch);
26260
26261 /* Tag_CPU_arch_profile. */
26262 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7a)
26263 || ARM_CPU_HAS_FEATURE (flags, arm_ext_v8)
26264 || (ARM_CPU_HAS_FEATURE (flags, arm_ext_atomics)
26265 && !ARM_CPU_HAS_FEATURE (flags, arm_ext_v8m_m_only)))
26266 profile = 'A';
26267 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7r))
26268 profile = 'R';
26269 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_m))
26270 profile = 'M';
26271 else
26272 profile = '\0';
26273
26274 if (profile != '\0')
26275 aeabi_set_attribute_int (Tag_CPU_arch_profile, profile);
26276
26277 /* Tag_DSP_extension. */
26278 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_dsp))
26279 {
26280 arm_feature_set ext;
26281
26282 /* DSP instructions not in architecture. */
26283 ARM_CLEAR_FEATURE (ext, flags, arm_arch);
26284 if (ARM_CPU_HAS_FEATURE (ext, arm_ext_dsp))
26285 aeabi_set_attribute_int (Tag_DSP_extension, 1);
26286 }
26287
26288 /* Tag_ARM_ISA_use. */
26289 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v1)
26290 || arch == 0)
26291 aeabi_set_attribute_int (Tag_ARM_ISA_use, 1);
26292
26293 /* Tag_THUMB_ISA_use. */
26294 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v4t)
26295 || arch == 0)
26296 {
26297 int thumb_isa_use;
26298
26299 if (!ARM_CPU_HAS_FEATURE (flags, arm_ext_v8)
26300 && ARM_CPU_HAS_FEATURE (flags, arm_ext_v8m_m_only))
26301 thumb_isa_use = 3;
26302 else if (ARM_CPU_HAS_FEATURE (flags, arm_arch_t2))
26303 thumb_isa_use = 2;
26304 else
26305 thumb_isa_use = 1;
26306 aeabi_set_attribute_int (Tag_THUMB_ISA_use, thumb_isa_use);
26307 }
26308
26309 /* Tag_VFP_arch. */
26310 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_armv8xd))
26311 aeabi_set_attribute_int (Tag_VFP_arch,
26312 ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32)
26313 ? 7 : 8);
26314 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_fma))
26315 aeabi_set_attribute_int (Tag_VFP_arch,
26316 ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32)
26317 ? 5 : 6);
26318 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32))
26319 {
26320 fp16_optional = 1;
26321 aeabi_set_attribute_int (Tag_VFP_arch, 3);
26322 }
26323 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v3xd))
26324 {
26325 aeabi_set_attribute_int (Tag_VFP_arch, 4);
26326 fp16_optional = 1;
26327 }
26328 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v2))
26329 aeabi_set_attribute_int (Tag_VFP_arch, 2);
26330 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1)
26331 || ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd))
26332 aeabi_set_attribute_int (Tag_VFP_arch, 1);
26333
26334 /* Tag_ABI_HardFP_use. */
26335 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd)
26336 && !ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1))
26337 aeabi_set_attribute_int (Tag_ABI_HardFP_use, 1);
26338
26339 /* Tag_WMMX_arch. */
26340 if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt2))
26341 aeabi_set_attribute_int (Tag_WMMX_arch, 2);
26342 else if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt))
26343 aeabi_set_attribute_int (Tag_WMMX_arch, 1);
26344
26345 /* Tag_Advanced_SIMD_arch (formerly Tag_NEON_arch). */
26346 if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_v8_1))
26347 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 4);
26348 else if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_armv8))
26349 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 3);
26350 else if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_v1))
26351 {
26352 if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_fma))
26353 {
26354 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 2);
26355 }
26356 else
26357 {
26358 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 1);
26359 fp16_optional = 1;
26360 }
26361 }
26362
26363 /* Tag_VFP_HP_extension (formerly Tag_NEON_FP16_arch). */
26364 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_fp16) && fp16_optional)
26365 aeabi_set_attribute_int (Tag_VFP_HP_extension, 1);
26366
26367 /* Tag_DIV_use.
26368
26369 We set Tag_DIV_use to two when integer divide instructions have been used
26370 in ARM state, or when Thumb integer divide instructions have been used,
26371 but we have no architecture profile set, nor have we any ARM instructions.
26372
26373 For ARMv8-A and ARMv8-M we set the tag to 0 as integer divide is implied
26374 by the base architecture.
26375
26376 For new architectures we will have to check these tests. */
26377 gas_assert (arch <= TAG_CPU_ARCH_V8
26378 || (arch >= TAG_CPU_ARCH_V8M_BASE
26379 && arch <= TAG_CPU_ARCH_V8M_MAIN));
26380 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v8)
26381 || ARM_CPU_HAS_FEATURE (flags, arm_ext_v8m))
26382 aeabi_set_attribute_int (Tag_DIV_use, 0);
26383 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_adiv)
26384 || (profile == '\0'
26385 && ARM_CPU_HAS_FEATURE (flags, arm_ext_div)
26386 && !ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_any)))
26387 aeabi_set_attribute_int (Tag_DIV_use, 2);
26388
26389 /* Tag_MP_extension_use. */
26390 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_mp))
26391 aeabi_set_attribute_int (Tag_MPextension_use, 1);
26392
26393 /* Tag Virtualization_use. */
26394 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_sec))
26395 virt_sec |= 1;
26396 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_virt))
26397 virt_sec |= 2;
26398 if (virt_sec != 0)
26399 aeabi_set_attribute_int (Tag_Virtualization_use, virt_sec);
26400 }
26401
26402 /* Add the default contents for the .ARM.attributes section. */
26403 void
26404 arm_md_end (void)
26405 {
26406 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
26407 return;
26408
26409 aeabi_set_public_attributes ();
26410 }
26411 #endif /* OBJ_ELF */
26412
26413
26414 /* Parse a .cpu directive. */
26415
26416 static void
26417 s_arm_cpu (int ignored ATTRIBUTE_UNUSED)
26418 {
26419 const struct arm_cpu_option_table *opt;
26420 char *name;
26421 char saved_char;
26422
26423 name = input_line_pointer;
26424 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
26425 input_line_pointer++;
26426 saved_char = *input_line_pointer;
26427 *input_line_pointer = 0;
26428
26429 /* Skip the first "all" entry. */
26430 for (opt = arm_cpus + 1; opt->name != NULL; opt++)
26431 if (streq (opt->name, name))
26432 {
26433 mcpu_cpu_opt = &opt->value;
26434 selected_cpu = opt->value;
26435 if (opt->canonical_name)
26436 strcpy (selected_cpu_name, opt->canonical_name);
26437 else
26438 {
26439 int i;
26440 for (i = 0; opt->name[i]; i++)
26441 selected_cpu_name[i] = TOUPPER (opt->name[i]);
26442
26443 selected_cpu_name[i] = 0;
26444 }
26445 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
26446 *input_line_pointer = saved_char;
26447 demand_empty_rest_of_line ();
26448 return;
26449 }
26450 as_bad (_("unknown cpu `%s'"), name);
26451 *input_line_pointer = saved_char;
26452 ignore_rest_of_line ();
26453 }
26454
26455
26456 /* Parse a .arch directive. */
26457
26458 static void
26459 s_arm_arch (int ignored ATTRIBUTE_UNUSED)
26460 {
26461 const struct arm_arch_option_table *opt;
26462 char saved_char;
26463 char *name;
26464
26465 name = input_line_pointer;
26466 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
26467 input_line_pointer++;
26468 saved_char = *input_line_pointer;
26469 *input_line_pointer = 0;
26470
26471 /* Skip the first "all" entry. */
26472 for (opt = arm_archs + 1; opt->name != NULL; opt++)
26473 if (streq (opt->name, name))
26474 {
26475 mcpu_cpu_opt = &opt->value;
26476 selected_cpu = opt->value;
26477 strcpy (selected_cpu_name, opt->name);
26478 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
26479 *input_line_pointer = saved_char;
26480 demand_empty_rest_of_line ();
26481 return;
26482 }
26483
26484 as_bad (_("unknown architecture `%s'\n"), name);
26485 *input_line_pointer = saved_char;
26486 ignore_rest_of_line ();
26487 }
26488
26489
26490 /* Parse a .object_arch directive. */
26491
26492 static void
26493 s_arm_object_arch (int ignored ATTRIBUTE_UNUSED)
26494 {
26495 const struct arm_arch_option_table *opt;
26496 char saved_char;
26497 char *name;
26498
26499 name = input_line_pointer;
26500 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
26501 input_line_pointer++;
26502 saved_char = *input_line_pointer;
26503 *input_line_pointer = 0;
26504
26505 /* Skip the first "all" entry. */
26506 for (opt = arm_archs + 1; opt->name != NULL; opt++)
26507 if (streq (opt->name, name))
26508 {
26509 object_arch = &opt->value;
26510 *input_line_pointer = saved_char;
26511 demand_empty_rest_of_line ();
26512 return;
26513 }
26514
26515 as_bad (_("unknown architecture `%s'\n"), name);
26516 *input_line_pointer = saved_char;
26517 ignore_rest_of_line ();
26518 }
26519
26520 /* Parse a .arch_extension directive. */
26521
26522 static void
26523 s_arm_arch_extension (int ignored ATTRIBUTE_UNUSED)
26524 {
26525 const struct arm_option_extension_value_table *opt;
26526 const arm_feature_set arm_any = ARM_ANY;
26527 char saved_char;
26528 char *name;
26529 int adding_value = 1;
26530
26531 name = input_line_pointer;
26532 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
26533 input_line_pointer++;
26534 saved_char = *input_line_pointer;
26535 *input_line_pointer = 0;
26536
26537 if (strlen (name) >= 2
26538 && strncmp (name, "no", 2) == 0)
26539 {
26540 adding_value = 0;
26541 name += 2;
26542 }
26543
26544 for (opt = arm_extensions; opt->name != NULL; opt++)
26545 if (streq (opt->name, name))
26546 {
26547 int i, nb_allowed_archs =
26548 sizeof (opt->allowed_archs) / sizeof (opt->allowed_archs[i]);
26549 for (i = 0; i < nb_allowed_archs; i++)
26550 {
26551 /* Empty entry. */
26552 if (ARM_FEATURE_EQUAL (opt->allowed_archs[i], arm_any))
26553 continue;
26554 if (ARM_FSET_CPU_SUBSET (opt->allowed_archs[i], *mcpu_cpu_opt))
26555 break;
26556 }
26557
26558 if (i == nb_allowed_archs)
26559 {
26560 as_bad (_("architectural extension `%s' is not allowed for the "
26561 "current base architecture"), name);
26562 break;
26563 }
26564
26565 if (adding_value)
26566 ARM_MERGE_FEATURE_SETS (selected_cpu, selected_cpu,
26567 opt->merge_value);
26568 else
26569 ARM_CLEAR_FEATURE (selected_cpu, selected_cpu, opt->clear_value);
26570
26571 mcpu_cpu_opt = &selected_cpu;
26572 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
26573 *input_line_pointer = saved_char;
26574 demand_empty_rest_of_line ();
26575 return;
26576 }
26577
26578 if (opt->name == NULL)
26579 as_bad (_("unknown architecture extension `%s'\n"), name);
26580
26581 *input_line_pointer = saved_char;
26582 ignore_rest_of_line ();
26583 }
26584
26585 /* Parse a .fpu directive. */
26586
26587 static void
26588 s_arm_fpu (int ignored ATTRIBUTE_UNUSED)
26589 {
26590 const struct arm_option_fpu_value_table *opt;
26591 char saved_char;
26592 char *name;
26593
26594 name = input_line_pointer;
26595 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
26596 input_line_pointer++;
26597 saved_char = *input_line_pointer;
26598 *input_line_pointer = 0;
26599
26600 for (opt = arm_fpus; opt->name != NULL; opt++)
26601 if (streq (opt->name, name))
26602 {
26603 mfpu_opt = &opt->value;
26604 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
26605 *input_line_pointer = saved_char;
26606 demand_empty_rest_of_line ();
26607 return;
26608 }
26609
26610 as_bad (_("unknown floating point format `%s'\n"), name);
26611 *input_line_pointer = saved_char;
26612 ignore_rest_of_line ();
26613 }
26614
26615 /* Copy symbol information. */
26616
26617 void
26618 arm_copy_symbol_attributes (symbolS *dest, symbolS *src)
26619 {
26620 ARM_GET_FLAG (dest) = ARM_GET_FLAG (src);
26621 }
26622
26623 #ifdef OBJ_ELF
26624 /* Given a symbolic attribute NAME, return the proper integer value.
26625 Returns -1 if the attribute is not known. */
26626
26627 int
26628 arm_convert_symbolic_attribute (const char *name)
26629 {
26630 static const struct
26631 {
26632 const char * name;
26633 const int tag;
26634 }
26635 attribute_table[] =
26636 {
26637 /* When you modify this table you should
26638 also modify the list in doc/c-arm.texi. */
26639 #define T(tag) {#tag, tag}
26640 T (Tag_CPU_raw_name),
26641 T (Tag_CPU_name),
26642 T (Tag_CPU_arch),
26643 T (Tag_CPU_arch_profile),
26644 T (Tag_ARM_ISA_use),
26645 T (Tag_THUMB_ISA_use),
26646 T (Tag_FP_arch),
26647 T (Tag_VFP_arch),
26648 T (Tag_WMMX_arch),
26649 T (Tag_Advanced_SIMD_arch),
26650 T (Tag_PCS_config),
26651 T (Tag_ABI_PCS_R9_use),
26652 T (Tag_ABI_PCS_RW_data),
26653 T (Tag_ABI_PCS_RO_data),
26654 T (Tag_ABI_PCS_GOT_use),
26655 T (Tag_ABI_PCS_wchar_t),
26656 T (Tag_ABI_FP_rounding),
26657 T (Tag_ABI_FP_denormal),
26658 T (Tag_ABI_FP_exceptions),
26659 T (Tag_ABI_FP_user_exceptions),
26660 T (Tag_ABI_FP_number_model),
26661 T (Tag_ABI_align_needed),
26662 T (Tag_ABI_align8_needed),
26663 T (Tag_ABI_align_preserved),
26664 T (Tag_ABI_align8_preserved),
26665 T (Tag_ABI_enum_size),
26666 T (Tag_ABI_HardFP_use),
26667 T (Tag_ABI_VFP_args),
26668 T (Tag_ABI_WMMX_args),
26669 T (Tag_ABI_optimization_goals),
26670 T (Tag_ABI_FP_optimization_goals),
26671 T (Tag_compatibility),
26672 T (Tag_CPU_unaligned_access),
26673 T (Tag_FP_HP_extension),
26674 T (Tag_VFP_HP_extension),
26675 T (Tag_ABI_FP_16bit_format),
26676 T (Tag_MPextension_use),
26677 T (Tag_DIV_use),
26678 T (Tag_nodefaults),
26679 T (Tag_also_compatible_with),
26680 T (Tag_conformance),
26681 T (Tag_T2EE_use),
26682 T (Tag_Virtualization_use),
26683 T (Tag_DSP_extension),
26684 /* We deliberately do not include Tag_MPextension_use_legacy. */
26685 #undef T
26686 };
26687 unsigned int i;
26688
26689 if (name == NULL)
26690 return -1;
26691
26692 for (i = 0; i < ARRAY_SIZE (attribute_table); i++)
26693 if (streq (name, attribute_table[i].name))
26694 return attribute_table[i].tag;
26695
26696 return -1;
26697 }
26698
26699
26700 /* Apply sym value for relocations only in the case that they are for
26701 local symbols in the same segment as the fixup and you have the
26702 respective architectural feature for blx and simple switches. */
26703 int
26704 arm_apply_sym_value (struct fix * fixP, segT this_seg)
26705 {
26706 if (fixP->fx_addsy
26707 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
26708 /* PR 17444: If the local symbol is in a different section then a reloc
26709 will always be generated for it, so applying the symbol value now
26710 will result in a double offset being stored in the relocation. */
26711 && (S_GET_SEGMENT (fixP->fx_addsy) == this_seg)
26712 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE))
26713 {
26714 switch (fixP->fx_r_type)
26715 {
26716 case BFD_RELOC_ARM_PCREL_BLX:
26717 case BFD_RELOC_THUMB_PCREL_BRANCH23:
26718 if (ARM_IS_FUNC (fixP->fx_addsy))
26719 return 1;
26720 break;
26721
26722 case BFD_RELOC_ARM_PCREL_CALL:
26723 case BFD_RELOC_THUMB_PCREL_BLX:
26724 if (THUMB_IS_FUNC (fixP->fx_addsy))
26725 return 1;
26726 break;
26727
26728 default:
26729 break;
26730 }
26731
26732 }
26733 return 0;
26734 }
26735 #endif /* OBJ_ELF */