[arm] Automatically enable CRC instructions on supported ARMv8-A CPUs.
[binutils-gdb.git] / gas / config / tc-arm.c
1 /* tc-arm.c -- Assemble for the ARM
2 Copyright (C) 1994-2016 Free Software Foundation, Inc.
3 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
4 Modified by David Taylor (dtaylor@armltd.co.uk)
5 Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
6 Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
7 Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
8
9 This file is part of GAS, the GNU Assembler.
10
11 GAS is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 3, or (at your option)
14 any later version.
15
16 GAS is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
20
21 You should have received a copy of the GNU General Public License
22 along with GAS; see the file COPYING. If not, write to the Free
23 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
24 02110-1301, USA. */
25
26 #include "as.h"
27 #include <limits.h>
28 #include <stdarg.h>
29 #define NO_RELOC 0
30 #include "safe-ctype.h"
31 #include "subsegs.h"
32 #include "obstack.h"
33 #include "libiberty.h"
34 #include "opcode/arm.h"
35
36 #ifdef OBJ_ELF
37 #include "elf/arm.h"
38 #include "dw2gencfi.h"
39 #endif
40
41 #include "dwarf2dbg.h"
42
43 #ifdef OBJ_ELF
44 /* Must be at least the size of the largest unwind opcode (currently two). */
45 #define ARM_OPCODE_CHUNK_SIZE 8
46
47 /* This structure holds the unwinding state. */
48
49 static struct
50 {
51 symbolS * proc_start;
52 symbolS * table_entry;
53 symbolS * personality_routine;
54 int personality_index;
55 /* The segment containing the function. */
56 segT saved_seg;
57 subsegT saved_subseg;
58 /* Opcodes generated from this function. */
59 unsigned char * opcodes;
60 int opcode_count;
61 int opcode_alloc;
62 /* The number of bytes pushed to the stack. */
63 offsetT frame_size;
64 /* We don't add stack adjustment opcodes immediately so that we can merge
65 multiple adjustments. We can also omit the final adjustment
66 when using a frame pointer. */
67 offsetT pending_offset;
68 /* These two fields are set by both unwind_movsp and unwind_setfp. They
69 hold the reg+offset to use when restoring sp from a frame pointer. */
70 offsetT fp_offset;
71 int fp_reg;
72 /* Nonzero if an unwind_setfp directive has been seen. */
73 unsigned fp_used:1;
74 /* Nonzero if the last opcode restores sp from fp_reg. */
75 unsigned sp_restored:1;
76 } unwind;
77
78 #endif /* OBJ_ELF */
79
80 /* Results from operand parsing worker functions. */
81
82 typedef enum
83 {
84 PARSE_OPERAND_SUCCESS,
85 PARSE_OPERAND_FAIL,
86 PARSE_OPERAND_FAIL_NO_BACKTRACK
87 } parse_operand_result;
88
89 enum arm_float_abi
90 {
91 ARM_FLOAT_ABI_HARD,
92 ARM_FLOAT_ABI_SOFTFP,
93 ARM_FLOAT_ABI_SOFT
94 };
95
96 /* Types of processor to assemble for. */
97 #ifndef CPU_DEFAULT
98 /* The code that was here used to select a default CPU depending on compiler
99 pre-defines which were only present when doing native builds, thus
100 changing gas' default behaviour depending upon the build host.
101
102 If you have a target that requires a default CPU option then the you
103 should define CPU_DEFAULT here. */
104 #endif
105
106 #ifndef FPU_DEFAULT
107 # ifdef TE_LINUX
108 # define FPU_DEFAULT FPU_ARCH_FPA
109 # elif defined (TE_NetBSD)
110 # ifdef OBJ_ELF
111 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */
112 # else
113 /* Legacy a.out format. */
114 # define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */
115 # endif
116 # elif defined (TE_VXWORKS)
117 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */
118 # else
119 /* For backwards compatibility, default to FPA. */
120 # define FPU_DEFAULT FPU_ARCH_FPA
121 # endif
122 #endif /* ifndef FPU_DEFAULT */
123
124 #define streq(a, b) (strcmp (a, b) == 0)
125
126 static arm_feature_set cpu_variant;
127 static arm_feature_set arm_arch_used;
128 static arm_feature_set thumb_arch_used;
129
130 /* Flags stored in private area of BFD structure. */
131 static int uses_apcs_26 = FALSE;
132 static int atpcs = FALSE;
133 static int support_interwork = FALSE;
134 static int uses_apcs_float = FALSE;
135 static int pic_code = FALSE;
136 static int fix_v4bx = FALSE;
137 /* Warn on using deprecated features. */
138 static int warn_on_deprecated = TRUE;
139
140 /* Understand CodeComposer Studio assembly syntax. */
141 bfd_boolean codecomposer_syntax = FALSE;
142
143 /* Variables that we set while parsing command-line options. Once all
144 options have been read we re-process these values to set the real
145 assembly flags. */
146 static const arm_feature_set *legacy_cpu = NULL;
147 static const arm_feature_set *legacy_fpu = NULL;
148
149 static const arm_feature_set *mcpu_cpu_opt = NULL;
150 static const arm_feature_set *mcpu_fpu_opt = NULL;
151 static const arm_feature_set *march_cpu_opt = NULL;
152 static const arm_feature_set *march_fpu_opt = NULL;
153 static const arm_feature_set *mfpu_opt = NULL;
154 static const arm_feature_set *object_arch = NULL;
155
156 /* Constants for known architecture features. */
157 static const arm_feature_set fpu_default = FPU_DEFAULT;
158 static const arm_feature_set fpu_arch_vfp_v1 ATTRIBUTE_UNUSED = FPU_ARCH_VFP_V1;
159 static const arm_feature_set fpu_arch_vfp_v2 = FPU_ARCH_VFP_V2;
160 static const arm_feature_set fpu_arch_vfp_v3 ATTRIBUTE_UNUSED = FPU_ARCH_VFP_V3;
161 static const arm_feature_set fpu_arch_neon_v1 ATTRIBUTE_UNUSED = FPU_ARCH_NEON_V1;
162 static const arm_feature_set fpu_arch_fpa = FPU_ARCH_FPA;
163 static const arm_feature_set fpu_any_hard = FPU_ANY_HARD;
164 #ifdef OBJ_ELF
165 static const arm_feature_set fpu_arch_maverick = FPU_ARCH_MAVERICK;
166 #endif
167 static const arm_feature_set fpu_endian_pure = FPU_ARCH_ENDIAN_PURE;
168
169 #ifdef CPU_DEFAULT
170 static const arm_feature_set cpu_default = CPU_DEFAULT;
171 #endif
172
173 static const arm_feature_set arm_ext_v1 = ARM_FEATURE_CORE_LOW (ARM_EXT_V1);
174 static const arm_feature_set arm_ext_v2 = ARM_FEATURE_CORE_LOW (ARM_EXT_V1);
175 static const arm_feature_set arm_ext_v2s = ARM_FEATURE_CORE_LOW (ARM_EXT_V2S);
176 static const arm_feature_set arm_ext_v3 = ARM_FEATURE_CORE_LOW (ARM_EXT_V3);
177 static const arm_feature_set arm_ext_v3m = ARM_FEATURE_CORE_LOW (ARM_EXT_V3M);
178 static const arm_feature_set arm_ext_v4 = ARM_FEATURE_CORE_LOW (ARM_EXT_V4);
179 static const arm_feature_set arm_ext_v4t = ARM_FEATURE_CORE_LOW (ARM_EXT_V4T);
180 static const arm_feature_set arm_ext_v5 = ARM_FEATURE_CORE_LOW (ARM_EXT_V5);
181 static const arm_feature_set arm_ext_v4t_5 =
182 ARM_FEATURE_CORE_LOW (ARM_EXT_V4T | ARM_EXT_V5);
183 static const arm_feature_set arm_ext_v5t = ARM_FEATURE_CORE_LOW (ARM_EXT_V5T);
184 static const arm_feature_set arm_ext_v5e = ARM_FEATURE_CORE_LOW (ARM_EXT_V5E);
185 static const arm_feature_set arm_ext_v5exp = ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP);
186 static const arm_feature_set arm_ext_v5j = ARM_FEATURE_CORE_LOW (ARM_EXT_V5J);
187 static const arm_feature_set arm_ext_v6 = ARM_FEATURE_CORE_LOW (ARM_EXT_V6);
188 static const arm_feature_set arm_ext_v6k = ARM_FEATURE_CORE_LOW (ARM_EXT_V6K);
189 static const arm_feature_set arm_ext_v6t2 = ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2);
190 static const arm_feature_set arm_ext_v6m = ARM_FEATURE_CORE_LOW (ARM_EXT_V6M);
191 static const arm_feature_set arm_ext_v6_notm =
192 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_NOTM);
193 static const arm_feature_set arm_ext_v6_dsp =
194 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_DSP);
195 static const arm_feature_set arm_ext_barrier =
196 ARM_FEATURE_CORE_LOW (ARM_EXT_BARRIER);
197 static const arm_feature_set arm_ext_msr =
198 ARM_FEATURE_CORE_LOW (ARM_EXT_THUMB_MSR);
199 static const arm_feature_set arm_ext_div = ARM_FEATURE_CORE_LOW (ARM_EXT_DIV);
200 static const arm_feature_set arm_ext_v7 = ARM_FEATURE_CORE_LOW (ARM_EXT_V7);
201 static const arm_feature_set arm_ext_v7a = ARM_FEATURE_CORE_LOW (ARM_EXT_V7A);
202 static const arm_feature_set arm_ext_v7r = ARM_FEATURE_CORE_LOW (ARM_EXT_V7R);
203 #ifdef OBJ_ELF
204 static const arm_feature_set arm_ext_v7m = ARM_FEATURE_CORE_LOW (ARM_EXT_V7M);
205 #endif
206 static const arm_feature_set arm_ext_v8 = ARM_FEATURE_CORE_LOW (ARM_EXT_V8);
207 static const arm_feature_set arm_ext_m =
208 ARM_FEATURE_CORE (ARM_EXT_V6M | ARM_EXT_OS | ARM_EXT_V7M,
209 ARM_EXT2_V8M | ARM_EXT2_V8M_MAIN);
210 static const arm_feature_set arm_ext_mp = ARM_FEATURE_CORE_LOW (ARM_EXT_MP);
211 static const arm_feature_set arm_ext_sec = ARM_FEATURE_CORE_LOW (ARM_EXT_SEC);
212 static const arm_feature_set arm_ext_os = ARM_FEATURE_CORE_LOW (ARM_EXT_OS);
213 static const arm_feature_set arm_ext_adiv = ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV);
214 static const arm_feature_set arm_ext_virt = ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT);
215 static const arm_feature_set arm_ext_pan = ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN);
216 static const arm_feature_set arm_ext_v8m = ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M);
217 static const arm_feature_set arm_ext_v8m_main =
218 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M_MAIN);
219 /* Instructions in ARMv8-M only found in M profile architectures. */
220 static const arm_feature_set arm_ext_v8m_m_only =
221 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M | ARM_EXT2_V8M_MAIN);
222 static const arm_feature_set arm_ext_v6t2_v8m =
223 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V6T2_V8M);
224 /* Instructions shared between ARMv8-A and ARMv8-M. */
225 static const arm_feature_set arm_ext_atomics =
226 ARM_FEATURE_CORE_HIGH (ARM_EXT2_ATOMICS);
227 #ifdef OBJ_ELF
228 /* DSP instructions Tag_DSP_extension refers to. */
229 static const arm_feature_set arm_ext_dsp =
230 ARM_FEATURE_CORE_LOW (ARM_EXT_V5E | ARM_EXT_V5ExP | ARM_EXT_V6_DSP);
231 #endif
232 static const arm_feature_set arm_ext_ras =
233 ARM_FEATURE_CORE_HIGH (ARM_EXT2_RAS);
234 /* FP16 instructions. */
235 static const arm_feature_set arm_ext_fp16 =
236 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST);
237
238 static const arm_feature_set arm_arch_any = ARM_ANY;
239 static const arm_feature_set arm_arch_full ATTRIBUTE_UNUSED = ARM_FEATURE (-1, -1, -1);
240 static const arm_feature_set arm_arch_t2 = ARM_ARCH_THUMB2;
241 static const arm_feature_set arm_arch_none = ARM_ARCH_NONE;
242 #ifdef OBJ_ELF
243 static const arm_feature_set arm_arch_v6m_only = ARM_ARCH_V6M_ONLY;
244 #endif
245
246 static const arm_feature_set arm_cext_iwmmxt2 =
247 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2);
248 static const arm_feature_set arm_cext_iwmmxt =
249 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT);
250 static const arm_feature_set arm_cext_xscale =
251 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE);
252 static const arm_feature_set arm_cext_maverick =
253 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK);
254 static const arm_feature_set fpu_fpa_ext_v1 =
255 ARM_FEATURE_COPROC (FPU_FPA_EXT_V1);
256 static const arm_feature_set fpu_fpa_ext_v2 =
257 ARM_FEATURE_COPROC (FPU_FPA_EXT_V2);
258 static const arm_feature_set fpu_vfp_ext_v1xd =
259 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1xD);
260 static const arm_feature_set fpu_vfp_ext_v1 =
261 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1);
262 static const arm_feature_set fpu_vfp_ext_v2 =
263 ARM_FEATURE_COPROC (FPU_VFP_EXT_V2);
264 static const arm_feature_set fpu_vfp_ext_v3xd =
265 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3xD);
266 static const arm_feature_set fpu_vfp_ext_v3 =
267 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3);
268 static const arm_feature_set fpu_vfp_ext_d32 =
269 ARM_FEATURE_COPROC (FPU_VFP_EXT_D32);
270 static const arm_feature_set fpu_neon_ext_v1 =
271 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1);
272 static const arm_feature_set fpu_vfp_v3_or_neon_ext =
273 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1 | FPU_VFP_EXT_V3);
274 #ifdef OBJ_ELF
275 static const arm_feature_set fpu_vfp_fp16 =
276 ARM_FEATURE_COPROC (FPU_VFP_EXT_FP16);
277 static const arm_feature_set fpu_neon_ext_fma =
278 ARM_FEATURE_COPROC (FPU_NEON_EXT_FMA);
279 #endif
280 static const arm_feature_set fpu_vfp_ext_fma =
281 ARM_FEATURE_COPROC (FPU_VFP_EXT_FMA);
282 static const arm_feature_set fpu_vfp_ext_armv8 =
283 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8);
284 static const arm_feature_set fpu_vfp_ext_armv8xd =
285 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8xD);
286 static const arm_feature_set fpu_neon_ext_armv8 =
287 ARM_FEATURE_COPROC (FPU_NEON_EXT_ARMV8);
288 static const arm_feature_set fpu_crypto_ext_armv8 =
289 ARM_FEATURE_COPROC (FPU_CRYPTO_EXT_ARMV8);
290 static const arm_feature_set crc_ext_armv8 =
291 ARM_FEATURE_COPROC (CRC_EXT_ARMV8);
292 static const arm_feature_set fpu_neon_ext_v8_1 =
293 ARM_FEATURE_COPROC (FPU_NEON_EXT_RDMA);
294
295 static int mfloat_abi_opt = -1;
296 /* Record user cpu selection for object attributes. */
297 static arm_feature_set selected_cpu = ARM_ARCH_NONE;
298 /* Must be long enough to hold any of the names in arm_cpus. */
299 static char selected_cpu_name[20];
300
301 extern FLONUM_TYPE generic_floating_point_number;
302
303 /* Return if no cpu was selected on command-line. */
304 static bfd_boolean
305 no_cpu_selected (void)
306 {
307 return ARM_FEATURE_EQUAL (selected_cpu, arm_arch_none);
308 }
309
310 #ifdef OBJ_ELF
311 # ifdef EABI_DEFAULT
312 static int meabi_flags = EABI_DEFAULT;
313 # else
314 static int meabi_flags = EF_ARM_EABI_UNKNOWN;
315 # endif
316
317 static int attributes_set_explicitly[NUM_KNOWN_OBJ_ATTRIBUTES];
318
319 bfd_boolean
320 arm_is_eabi (void)
321 {
322 return (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4);
323 }
324 #endif
325
326 #ifdef OBJ_ELF
327 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
328 symbolS * GOT_symbol;
329 #endif
330
331 /* 0: assemble for ARM,
332 1: assemble for Thumb,
333 2: assemble for Thumb even though target CPU does not support thumb
334 instructions. */
335 static int thumb_mode = 0;
336 /* A value distinct from the possible values for thumb_mode that we
337 can use to record whether thumb_mode has been copied into the
338 tc_frag_data field of a frag. */
339 #define MODE_RECORDED (1 << 4)
340
341 /* Specifies the intrinsic IT insn behavior mode. */
342 enum implicit_it_mode
343 {
344 IMPLICIT_IT_MODE_NEVER = 0x00,
345 IMPLICIT_IT_MODE_ARM = 0x01,
346 IMPLICIT_IT_MODE_THUMB = 0x02,
347 IMPLICIT_IT_MODE_ALWAYS = (IMPLICIT_IT_MODE_ARM | IMPLICIT_IT_MODE_THUMB)
348 };
349 static int implicit_it_mode = IMPLICIT_IT_MODE_ARM;
350
351 /* If unified_syntax is true, we are processing the new unified
352 ARM/Thumb syntax. Important differences from the old ARM mode:
353
354 - Immediate operands do not require a # prefix.
355 - Conditional affixes always appear at the end of the
356 instruction. (For backward compatibility, those instructions
357 that formerly had them in the middle, continue to accept them
358 there.)
359 - The IT instruction may appear, and if it does is validated
360 against subsequent conditional affixes. It does not generate
361 machine code.
362
363 Important differences from the old Thumb mode:
364
365 - Immediate operands do not require a # prefix.
366 - Most of the V6T2 instructions are only available in unified mode.
367 - The .N and .W suffixes are recognized and honored (it is an error
368 if they cannot be honored).
369 - All instructions set the flags if and only if they have an 's' affix.
370 - Conditional affixes may be used. They are validated against
371 preceding IT instructions. Unlike ARM mode, you cannot use a
372 conditional affix except in the scope of an IT instruction. */
373
374 static bfd_boolean unified_syntax = FALSE;
375
376 /* An immediate operand can start with #, and ld*, st*, pld operands
377 can contain [ and ]. We need to tell APP not to elide whitespace
378 before a [, which can appear as the first operand for pld.
379 Likewise, a { can appear as the first operand for push, pop, vld*, etc. */
380 const char arm_symbol_chars[] = "#[]{}";
381
382 enum neon_el_type
383 {
384 NT_invtype,
385 NT_untyped,
386 NT_integer,
387 NT_float,
388 NT_poly,
389 NT_signed,
390 NT_unsigned
391 };
392
393 struct neon_type_el
394 {
395 enum neon_el_type type;
396 unsigned size;
397 };
398
399 #define NEON_MAX_TYPE_ELS 4
400
401 struct neon_type
402 {
403 struct neon_type_el el[NEON_MAX_TYPE_ELS];
404 unsigned elems;
405 };
406
407 enum it_instruction_type
408 {
409 OUTSIDE_IT_INSN,
410 INSIDE_IT_INSN,
411 INSIDE_IT_LAST_INSN,
412 IF_INSIDE_IT_LAST_INSN, /* Either outside or inside;
413 if inside, should be the last one. */
414 NEUTRAL_IT_INSN, /* This could be either inside or outside,
415 i.e. BKPT and NOP. */
416 IT_INSN /* The IT insn has been parsed. */
417 };
418
419 /* The maximum number of operands we need. */
420 #define ARM_IT_MAX_OPERANDS 6
421
422 struct arm_it
423 {
424 const char * error;
425 unsigned long instruction;
426 int size;
427 int size_req;
428 int cond;
429 /* "uncond_value" is set to the value in place of the conditional field in
430 unconditional versions of the instruction, or -1 if nothing is
431 appropriate. */
432 int uncond_value;
433 struct neon_type vectype;
434 /* This does not indicate an actual NEON instruction, only that
435 the mnemonic accepts neon-style type suffixes. */
436 int is_neon;
437 /* Set to the opcode if the instruction needs relaxation.
438 Zero if the instruction is not relaxed. */
439 unsigned long relax;
440 struct
441 {
442 bfd_reloc_code_real_type type;
443 expressionS exp;
444 int pc_rel;
445 } reloc;
446
447 enum it_instruction_type it_insn_type;
448
449 struct
450 {
451 unsigned reg;
452 signed int imm;
453 struct neon_type_el vectype;
454 unsigned present : 1; /* Operand present. */
455 unsigned isreg : 1; /* Operand was a register. */
456 unsigned immisreg : 1; /* .imm field is a second register. */
457 unsigned isscalar : 1; /* Operand is a (Neon) scalar. */
458 unsigned immisalign : 1; /* Immediate is an alignment specifier. */
459 unsigned immisfloat : 1; /* Immediate was parsed as a float. */
460 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
461 instructions. This allows us to disambiguate ARM <-> vector insns. */
462 unsigned regisimm : 1; /* 64-bit immediate, reg forms high 32 bits. */
463 unsigned isvec : 1; /* Is a single, double or quad VFP/Neon reg. */
464 unsigned isquad : 1; /* Operand is Neon quad-precision register. */
465 unsigned issingle : 1; /* Operand is VFP single-precision register. */
466 unsigned hasreloc : 1; /* Operand has relocation suffix. */
467 unsigned writeback : 1; /* Operand has trailing ! */
468 unsigned preind : 1; /* Preindexed address. */
469 unsigned postind : 1; /* Postindexed address. */
470 unsigned negative : 1; /* Index register was negated. */
471 unsigned shifted : 1; /* Shift applied to operation. */
472 unsigned shift_kind : 3; /* Shift operation (enum shift_kind). */
473 } operands[ARM_IT_MAX_OPERANDS];
474 };
475
476 static struct arm_it inst;
477
478 #define NUM_FLOAT_VALS 8
479
480 const char * fp_const[] =
481 {
482 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
483 };
484
485 /* Number of littlenums required to hold an extended precision number. */
486 #define MAX_LITTLENUMS 6
487
488 LITTLENUM_TYPE fp_values[NUM_FLOAT_VALS][MAX_LITTLENUMS];
489
490 #define FAIL (-1)
491 #define SUCCESS (0)
492
493 #define SUFF_S 1
494 #define SUFF_D 2
495 #define SUFF_E 3
496 #define SUFF_P 4
497
498 #define CP_T_X 0x00008000
499 #define CP_T_Y 0x00400000
500
501 #define CONDS_BIT 0x00100000
502 #define LOAD_BIT 0x00100000
503
504 #define DOUBLE_LOAD_FLAG 0x00000001
505
506 struct asm_cond
507 {
508 const char * template_name;
509 unsigned long value;
510 };
511
512 #define COND_ALWAYS 0xE
513
514 struct asm_psr
515 {
516 const char * template_name;
517 unsigned long field;
518 };
519
520 struct asm_barrier_opt
521 {
522 const char * template_name;
523 unsigned long value;
524 const arm_feature_set arch;
525 };
526
527 /* The bit that distinguishes CPSR and SPSR. */
528 #define SPSR_BIT (1 << 22)
529
530 /* The individual PSR flag bits. */
531 #define PSR_c (1 << 16)
532 #define PSR_x (1 << 17)
533 #define PSR_s (1 << 18)
534 #define PSR_f (1 << 19)
535
536 struct reloc_entry
537 {
538 const char * name;
539 bfd_reloc_code_real_type reloc;
540 };
541
542 enum vfp_reg_pos
543 {
544 VFP_REG_Sd, VFP_REG_Sm, VFP_REG_Sn,
545 VFP_REG_Dd, VFP_REG_Dm, VFP_REG_Dn
546 };
547
548 enum vfp_ldstm_type
549 {
550 VFP_LDSTMIA, VFP_LDSTMDB, VFP_LDSTMIAX, VFP_LDSTMDBX
551 };
552
553 /* Bits for DEFINED field in neon_typed_alias. */
554 #define NTA_HASTYPE 1
555 #define NTA_HASINDEX 2
556
557 struct neon_typed_alias
558 {
559 unsigned char defined;
560 unsigned char index;
561 struct neon_type_el eltype;
562 };
563
564 /* ARM register categories. This includes coprocessor numbers and various
565 architecture extensions' registers. */
566 enum arm_reg_type
567 {
568 REG_TYPE_RN,
569 REG_TYPE_CP,
570 REG_TYPE_CN,
571 REG_TYPE_FN,
572 REG_TYPE_VFS,
573 REG_TYPE_VFD,
574 REG_TYPE_NQ,
575 REG_TYPE_VFSD,
576 REG_TYPE_NDQ,
577 REG_TYPE_NSDQ,
578 REG_TYPE_VFC,
579 REG_TYPE_MVF,
580 REG_TYPE_MVD,
581 REG_TYPE_MVFX,
582 REG_TYPE_MVDX,
583 REG_TYPE_MVAX,
584 REG_TYPE_DSPSC,
585 REG_TYPE_MMXWR,
586 REG_TYPE_MMXWC,
587 REG_TYPE_MMXWCG,
588 REG_TYPE_XSCALE,
589 REG_TYPE_RNB
590 };
591
592 /* Structure for a hash table entry for a register.
593 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
594 information which states whether a vector type or index is specified (for a
595 register alias created with .dn or .qn). Otherwise NEON should be NULL. */
596 struct reg_entry
597 {
598 const char * name;
599 unsigned int number;
600 unsigned char type;
601 unsigned char builtin;
602 struct neon_typed_alias * neon;
603 };
604
605 /* Diagnostics used when we don't get a register of the expected type. */
606 const char * const reg_expected_msgs[] =
607 {
608 N_("ARM register expected"),
609 N_("bad or missing co-processor number"),
610 N_("co-processor register expected"),
611 N_("FPA register expected"),
612 N_("VFP single precision register expected"),
613 N_("VFP/Neon double precision register expected"),
614 N_("Neon quad precision register expected"),
615 N_("VFP single or double precision register expected"),
616 N_("Neon double or quad precision register expected"),
617 N_("VFP single, double or Neon quad precision register expected"),
618 N_("VFP system register expected"),
619 N_("Maverick MVF register expected"),
620 N_("Maverick MVD register expected"),
621 N_("Maverick MVFX register expected"),
622 N_("Maverick MVDX register expected"),
623 N_("Maverick MVAX register expected"),
624 N_("Maverick DSPSC register expected"),
625 N_("iWMMXt data register expected"),
626 N_("iWMMXt control register expected"),
627 N_("iWMMXt scalar register expected"),
628 N_("XScale accumulator register expected"),
629 };
630
631 /* Some well known registers that we refer to directly elsewhere. */
632 #define REG_R12 12
633 #define REG_SP 13
634 #define REG_LR 14
635 #define REG_PC 15
636
637 /* ARM instructions take 4bytes in the object file, Thumb instructions
638 take 2: */
639 #define INSN_SIZE 4
640
641 struct asm_opcode
642 {
643 /* Basic string to match. */
644 const char * template_name;
645
646 /* Parameters to instruction. */
647 unsigned int operands[8];
648
649 /* Conditional tag - see opcode_lookup. */
650 unsigned int tag : 4;
651
652 /* Basic instruction code. */
653 unsigned int avalue : 28;
654
655 /* Thumb-format instruction code. */
656 unsigned int tvalue;
657
658 /* Which architecture variant provides this instruction. */
659 const arm_feature_set * avariant;
660 const arm_feature_set * tvariant;
661
662 /* Function to call to encode instruction in ARM format. */
663 void (* aencode) (void);
664
665 /* Function to call to encode instruction in Thumb format. */
666 void (* tencode) (void);
667 };
668
669 /* Defines for various bits that we will want to toggle. */
670 #define INST_IMMEDIATE 0x02000000
671 #define OFFSET_REG 0x02000000
672 #define HWOFFSET_IMM 0x00400000
673 #define SHIFT_BY_REG 0x00000010
674 #define PRE_INDEX 0x01000000
675 #define INDEX_UP 0x00800000
676 #define WRITE_BACK 0x00200000
677 #define LDM_TYPE_2_OR_3 0x00400000
678 #define CPSI_MMOD 0x00020000
679
680 #define LITERAL_MASK 0xf000f000
681 #define OPCODE_MASK 0xfe1fffff
682 #define V4_STR_BIT 0x00000020
683 #define VLDR_VMOV_SAME 0x0040f000
684
685 #define T2_SUBS_PC_LR 0xf3de8f00
686
687 #define DATA_OP_SHIFT 21
688
689 #define T2_OPCODE_MASK 0xfe1fffff
690 #define T2_DATA_OP_SHIFT 21
691
692 #define A_COND_MASK 0xf0000000
693 #define A_PUSH_POP_OP_MASK 0x0fff0000
694
695 /* Opcodes for pushing/poping registers to/from the stack. */
696 #define A1_OPCODE_PUSH 0x092d0000
697 #define A2_OPCODE_PUSH 0x052d0004
698 #define A2_OPCODE_POP 0x049d0004
699
700 /* Codes to distinguish the arithmetic instructions. */
701 #define OPCODE_AND 0
702 #define OPCODE_EOR 1
703 #define OPCODE_SUB 2
704 #define OPCODE_RSB 3
705 #define OPCODE_ADD 4
706 #define OPCODE_ADC 5
707 #define OPCODE_SBC 6
708 #define OPCODE_RSC 7
709 #define OPCODE_TST 8
710 #define OPCODE_TEQ 9
711 #define OPCODE_CMP 10
712 #define OPCODE_CMN 11
713 #define OPCODE_ORR 12
714 #define OPCODE_MOV 13
715 #define OPCODE_BIC 14
716 #define OPCODE_MVN 15
717
718 #define T2_OPCODE_AND 0
719 #define T2_OPCODE_BIC 1
720 #define T2_OPCODE_ORR 2
721 #define T2_OPCODE_ORN 3
722 #define T2_OPCODE_EOR 4
723 #define T2_OPCODE_ADD 8
724 #define T2_OPCODE_ADC 10
725 #define T2_OPCODE_SBC 11
726 #define T2_OPCODE_SUB 13
727 #define T2_OPCODE_RSB 14
728
729 #define T_OPCODE_MUL 0x4340
730 #define T_OPCODE_TST 0x4200
731 #define T_OPCODE_CMN 0x42c0
732 #define T_OPCODE_NEG 0x4240
733 #define T_OPCODE_MVN 0x43c0
734
735 #define T_OPCODE_ADD_R3 0x1800
736 #define T_OPCODE_SUB_R3 0x1a00
737 #define T_OPCODE_ADD_HI 0x4400
738 #define T_OPCODE_ADD_ST 0xb000
739 #define T_OPCODE_SUB_ST 0xb080
740 #define T_OPCODE_ADD_SP 0xa800
741 #define T_OPCODE_ADD_PC 0xa000
742 #define T_OPCODE_ADD_I8 0x3000
743 #define T_OPCODE_SUB_I8 0x3800
744 #define T_OPCODE_ADD_I3 0x1c00
745 #define T_OPCODE_SUB_I3 0x1e00
746
747 #define T_OPCODE_ASR_R 0x4100
748 #define T_OPCODE_LSL_R 0x4080
749 #define T_OPCODE_LSR_R 0x40c0
750 #define T_OPCODE_ROR_R 0x41c0
751 #define T_OPCODE_ASR_I 0x1000
752 #define T_OPCODE_LSL_I 0x0000
753 #define T_OPCODE_LSR_I 0x0800
754
755 #define T_OPCODE_MOV_I8 0x2000
756 #define T_OPCODE_CMP_I8 0x2800
757 #define T_OPCODE_CMP_LR 0x4280
758 #define T_OPCODE_MOV_HR 0x4600
759 #define T_OPCODE_CMP_HR 0x4500
760
761 #define T_OPCODE_LDR_PC 0x4800
762 #define T_OPCODE_LDR_SP 0x9800
763 #define T_OPCODE_STR_SP 0x9000
764 #define T_OPCODE_LDR_IW 0x6800
765 #define T_OPCODE_STR_IW 0x6000
766 #define T_OPCODE_LDR_IH 0x8800
767 #define T_OPCODE_STR_IH 0x8000
768 #define T_OPCODE_LDR_IB 0x7800
769 #define T_OPCODE_STR_IB 0x7000
770 #define T_OPCODE_LDR_RW 0x5800
771 #define T_OPCODE_STR_RW 0x5000
772 #define T_OPCODE_LDR_RH 0x5a00
773 #define T_OPCODE_STR_RH 0x5200
774 #define T_OPCODE_LDR_RB 0x5c00
775 #define T_OPCODE_STR_RB 0x5400
776
777 #define T_OPCODE_PUSH 0xb400
778 #define T_OPCODE_POP 0xbc00
779
780 #define T_OPCODE_BRANCH 0xe000
781
782 #define THUMB_SIZE 2 /* Size of thumb instruction. */
783 #define THUMB_PP_PC_LR 0x0100
784 #define THUMB_LOAD_BIT 0x0800
785 #define THUMB2_LOAD_BIT 0x00100000
786
787 #define BAD_ARGS _("bad arguments to instruction")
788 #define BAD_SP _("r13 not allowed here")
789 #define BAD_PC _("r15 not allowed here")
790 #define BAD_COND _("instruction cannot be conditional")
791 #define BAD_OVERLAP _("registers may not be the same")
792 #define BAD_HIREG _("lo register required")
793 #define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
794 #define BAD_ADDR_MODE _("instruction does not accept this addressing mode");
795 #define BAD_BRANCH _("branch must be last instruction in IT block")
796 #define BAD_NOT_IT _("instruction not allowed in IT block")
797 #define BAD_FPU _("selected FPU does not support instruction")
798 #define BAD_OUT_IT _("thumb conditional instruction should be in IT block")
799 #define BAD_IT_COND _("incorrect condition in IT block")
800 #define BAD_IT_IT _("IT falling in the range of a previous IT block")
801 #define MISSING_FNSTART _("missing .fnstart before unwinding directive")
802 #define BAD_PC_ADDRESSING \
803 _("cannot use register index with PC-relative addressing")
804 #define BAD_PC_WRITEBACK \
805 _("cannot use writeback with PC-relative addressing")
806 #define BAD_RANGE _("branch out of range")
807 #define BAD_FP16 _("selected processor does not support fp16 instruction")
808 #define UNPRED_REG(R) _("using " R " results in unpredictable behaviour")
809 #define THUMB1_RELOC_ONLY _("relocation valid in thumb1 code only")
810
811 static struct hash_control * arm_ops_hsh;
812 static struct hash_control * arm_cond_hsh;
813 static struct hash_control * arm_shift_hsh;
814 static struct hash_control * arm_psr_hsh;
815 static struct hash_control * arm_v7m_psr_hsh;
816 static struct hash_control * arm_reg_hsh;
817 static struct hash_control * arm_reloc_hsh;
818 static struct hash_control * arm_barrier_opt_hsh;
819
820 /* Stuff needed to resolve the label ambiguity
821 As:
822 ...
823 label: <insn>
824 may differ from:
825 ...
826 label:
827 <insn> */
828
829 symbolS * last_label_seen;
830 static int label_is_thumb_function_name = FALSE;
831
832 /* Literal pool structure. Held on a per-section
833 and per-sub-section basis. */
834
835 #define MAX_LITERAL_POOL_SIZE 1024
836 typedef struct literal_pool
837 {
838 expressionS literals [MAX_LITERAL_POOL_SIZE];
839 unsigned int next_free_entry;
840 unsigned int id;
841 symbolS * symbol;
842 segT section;
843 subsegT sub_section;
844 #ifdef OBJ_ELF
845 struct dwarf2_line_info locs [MAX_LITERAL_POOL_SIZE];
846 #endif
847 struct literal_pool * next;
848 unsigned int alignment;
849 } literal_pool;
850
851 /* Pointer to a linked list of literal pools. */
852 literal_pool * list_of_pools = NULL;
853
854 typedef enum asmfunc_states
855 {
856 OUTSIDE_ASMFUNC,
857 WAITING_ASMFUNC_NAME,
858 WAITING_ENDASMFUNC
859 } asmfunc_states;
860
861 static asmfunc_states asmfunc_state = OUTSIDE_ASMFUNC;
862
863 #ifdef OBJ_ELF
864 # define now_it seg_info (now_seg)->tc_segment_info_data.current_it
865 #else
866 static struct current_it now_it;
867 #endif
868
869 static inline int
870 now_it_compatible (int cond)
871 {
872 return (cond & ~1) == (now_it.cc & ~1);
873 }
874
875 static inline int
876 conditional_insn (void)
877 {
878 return inst.cond != COND_ALWAYS;
879 }
880
881 static int in_it_block (void);
882
883 static int handle_it_state (void);
884
885 static void force_automatic_it_block_close (void);
886
887 static void it_fsm_post_encode (void);
888
889 #define set_it_insn_type(type) \
890 do \
891 { \
892 inst.it_insn_type = type; \
893 if (handle_it_state () == FAIL) \
894 return; \
895 } \
896 while (0)
897
898 #define set_it_insn_type_nonvoid(type, failret) \
899 do \
900 { \
901 inst.it_insn_type = type; \
902 if (handle_it_state () == FAIL) \
903 return failret; \
904 } \
905 while(0)
906
907 #define set_it_insn_type_last() \
908 do \
909 { \
910 if (inst.cond == COND_ALWAYS) \
911 set_it_insn_type (IF_INSIDE_IT_LAST_INSN); \
912 else \
913 set_it_insn_type (INSIDE_IT_LAST_INSN); \
914 } \
915 while (0)
916
917 /* Pure syntax. */
918
919 /* This array holds the chars that always start a comment. If the
920 pre-processor is disabled, these aren't very useful. */
921 char arm_comment_chars[] = "@";
922
923 /* This array holds the chars that only start a comment at the beginning of
924 a line. If the line seems to have the form '# 123 filename'
925 .line and .file directives will appear in the pre-processed output. */
926 /* Note that input_file.c hand checks for '#' at the beginning of the
927 first line of the input file. This is because the compiler outputs
928 #NO_APP at the beginning of its output. */
929 /* Also note that comments like this one will always work. */
930 const char line_comment_chars[] = "#";
931
932 char arm_line_separator_chars[] = ";";
933
934 /* Chars that can be used to separate mant
935 from exp in floating point numbers. */
936 const char EXP_CHARS[] = "eE";
937
938 /* Chars that mean this number is a floating point constant. */
939 /* As in 0f12.456 */
940 /* or 0d1.2345e12 */
941
942 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
943
944 /* Prefix characters that indicate the start of an immediate
945 value. */
946 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
947
948 /* Separator character handling. */
949
950 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
951
952 static inline int
953 skip_past_char (char ** str, char c)
954 {
955 /* PR gas/14987: Allow for whitespace before the expected character. */
956 skip_whitespace (*str);
957
958 if (**str == c)
959 {
960 (*str)++;
961 return SUCCESS;
962 }
963 else
964 return FAIL;
965 }
966
967 #define skip_past_comma(str) skip_past_char (str, ',')
968
969 /* Arithmetic expressions (possibly involving symbols). */
970
971 /* Return TRUE if anything in the expression is a bignum. */
972
973 static int
974 walk_no_bignums (symbolS * sp)
975 {
976 if (symbol_get_value_expression (sp)->X_op == O_big)
977 return 1;
978
979 if (symbol_get_value_expression (sp)->X_add_symbol)
980 {
981 return (walk_no_bignums (symbol_get_value_expression (sp)->X_add_symbol)
982 || (symbol_get_value_expression (sp)->X_op_symbol
983 && walk_no_bignums (symbol_get_value_expression (sp)->X_op_symbol)));
984 }
985
986 return 0;
987 }
988
989 static int in_my_get_expression = 0;
990
991 /* Third argument to my_get_expression. */
992 #define GE_NO_PREFIX 0
993 #define GE_IMM_PREFIX 1
994 #define GE_OPT_PREFIX 2
995 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
996 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
997 #define GE_OPT_PREFIX_BIG 3
998
999 static int
1000 my_get_expression (expressionS * ep, char ** str, int prefix_mode)
1001 {
1002 char * save_in;
1003 segT seg;
1004
1005 /* In unified syntax, all prefixes are optional. */
1006 if (unified_syntax)
1007 prefix_mode = (prefix_mode == GE_OPT_PREFIX_BIG) ? prefix_mode
1008 : GE_OPT_PREFIX;
1009
1010 switch (prefix_mode)
1011 {
1012 case GE_NO_PREFIX: break;
1013 case GE_IMM_PREFIX:
1014 if (!is_immediate_prefix (**str))
1015 {
1016 inst.error = _("immediate expression requires a # prefix");
1017 return FAIL;
1018 }
1019 (*str)++;
1020 break;
1021 case GE_OPT_PREFIX:
1022 case GE_OPT_PREFIX_BIG:
1023 if (is_immediate_prefix (**str))
1024 (*str)++;
1025 break;
1026 default: abort ();
1027 }
1028
1029 memset (ep, 0, sizeof (expressionS));
1030
1031 save_in = input_line_pointer;
1032 input_line_pointer = *str;
1033 in_my_get_expression = 1;
1034 seg = expression (ep);
1035 in_my_get_expression = 0;
1036
1037 if (ep->X_op == O_illegal || ep->X_op == O_absent)
1038 {
1039 /* We found a bad or missing expression in md_operand(). */
1040 *str = input_line_pointer;
1041 input_line_pointer = save_in;
1042 if (inst.error == NULL)
1043 inst.error = (ep->X_op == O_absent
1044 ? _("missing expression") :_("bad expression"));
1045 return 1;
1046 }
1047
1048 #ifdef OBJ_AOUT
1049 if (seg != absolute_section
1050 && seg != text_section
1051 && seg != data_section
1052 && seg != bss_section
1053 && seg != undefined_section)
1054 {
1055 inst.error = _("bad segment");
1056 *str = input_line_pointer;
1057 input_line_pointer = save_in;
1058 return 1;
1059 }
1060 #else
1061 (void) seg;
1062 #endif
1063
1064 /* Get rid of any bignums now, so that we don't generate an error for which
1065 we can't establish a line number later on. Big numbers are never valid
1066 in instructions, which is where this routine is always called. */
1067 if (prefix_mode != GE_OPT_PREFIX_BIG
1068 && (ep->X_op == O_big
1069 || (ep->X_add_symbol
1070 && (walk_no_bignums (ep->X_add_symbol)
1071 || (ep->X_op_symbol
1072 && walk_no_bignums (ep->X_op_symbol))))))
1073 {
1074 inst.error = _("invalid constant");
1075 *str = input_line_pointer;
1076 input_line_pointer = save_in;
1077 return 1;
1078 }
1079
1080 *str = input_line_pointer;
1081 input_line_pointer = save_in;
1082 return 0;
1083 }
1084
1085 /* Turn a string in input_line_pointer into a floating point constant
1086 of type TYPE, and store the appropriate bytes in *LITP. The number
1087 of LITTLENUMS emitted is stored in *SIZEP. An error message is
1088 returned, or NULL on OK.
1089
1090 Note that fp constants aren't represent in the normal way on the ARM.
1091 In big endian mode, things are as expected. However, in little endian
1092 mode fp constants are big-endian word-wise, and little-endian byte-wise
1093 within the words. For example, (double) 1.1 in big endian mode is
1094 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
1095 the byte sequence 99 99 f1 3f 9a 99 99 99.
1096
1097 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
1098
1099 const char *
1100 md_atof (int type, char * litP, int * sizeP)
1101 {
1102 int prec;
1103 LITTLENUM_TYPE words[MAX_LITTLENUMS];
1104 char *t;
1105 int i;
1106
1107 switch (type)
1108 {
1109 case 'f':
1110 case 'F':
1111 case 's':
1112 case 'S':
1113 prec = 2;
1114 break;
1115
1116 case 'd':
1117 case 'D':
1118 case 'r':
1119 case 'R':
1120 prec = 4;
1121 break;
1122
1123 case 'x':
1124 case 'X':
1125 prec = 5;
1126 break;
1127
1128 case 'p':
1129 case 'P':
1130 prec = 5;
1131 break;
1132
1133 default:
1134 *sizeP = 0;
1135 return _("Unrecognized or unsupported floating point constant");
1136 }
1137
1138 t = atof_ieee (input_line_pointer, type, words);
1139 if (t)
1140 input_line_pointer = t;
1141 *sizeP = prec * sizeof (LITTLENUM_TYPE);
1142
1143 if (target_big_endian)
1144 {
1145 for (i = 0; i < prec; i++)
1146 {
1147 md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1148 litP += sizeof (LITTLENUM_TYPE);
1149 }
1150 }
1151 else
1152 {
1153 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
1154 for (i = prec - 1; i >= 0; i--)
1155 {
1156 md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1157 litP += sizeof (LITTLENUM_TYPE);
1158 }
1159 else
1160 /* For a 4 byte float the order of elements in `words' is 1 0.
1161 For an 8 byte float the order is 1 0 3 2. */
1162 for (i = 0; i < prec; i += 2)
1163 {
1164 md_number_to_chars (litP, (valueT) words[i + 1],
1165 sizeof (LITTLENUM_TYPE));
1166 md_number_to_chars (litP + sizeof (LITTLENUM_TYPE),
1167 (valueT) words[i], sizeof (LITTLENUM_TYPE));
1168 litP += 2 * sizeof (LITTLENUM_TYPE);
1169 }
1170 }
1171
1172 return NULL;
1173 }
1174
1175 /* We handle all bad expressions here, so that we can report the faulty
1176 instruction in the error message. */
1177 void
1178 md_operand (expressionS * exp)
1179 {
1180 if (in_my_get_expression)
1181 exp->X_op = O_illegal;
1182 }
1183
1184 /* Immediate values. */
1185
1186 /* Generic immediate-value read function for use in directives.
1187 Accepts anything that 'expression' can fold to a constant.
1188 *val receives the number. */
1189 #ifdef OBJ_ELF
1190 static int
1191 immediate_for_directive (int *val)
1192 {
1193 expressionS exp;
1194 exp.X_op = O_illegal;
1195
1196 if (is_immediate_prefix (*input_line_pointer))
1197 {
1198 input_line_pointer++;
1199 expression (&exp);
1200 }
1201
1202 if (exp.X_op != O_constant)
1203 {
1204 as_bad (_("expected #constant"));
1205 ignore_rest_of_line ();
1206 return FAIL;
1207 }
1208 *val = exp.X_add_number;
1209 return SUCCESS;
1210 }
1211 #endif
1212
1213 /* Register parsing. */
1214
1215 /* Generic register parser. CCP points to what should be the
1216 beginning of a register name. If it is indeed a valid register
1217 name, advance CCP over it and return the reg_entry structure;
1218 otherwise return NULL. Does not issue diagnostics. */
1219
1220 static struct reg_entry *
1221 arm_reg_parse_multi (char **ccp)
1222 {
1223 char *start = *ccp;
1224 char *p;
1225 struct reg_entry *reg;
1226
1227 skip_whitespace (start);
1228
1229 #ifdef REGISTER_PREFIX
1230 if (*start != REGISTER_PREFIX)
1231 return NULL;
1232 start++;
1233 #endif
1234 #ifdef OPTIONAL_REGISTER_PREFIX
1235 if (*start == OPTIONAL_REGISTER_PREFIX)
1236 start++;
1237 #endif
1238
1239 p = start;
1240 if (!ISALPHA (*p) || !is_name_beginner (*p))
1241 return NULL;
1242
1243 do
1244 p++;
1245 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
1246
1247 reg = (struct reg_entry *) hash_find_n (arm_reg_hsh, start, p - start);
1248
1249 if (!reg)
1250 return NULL;
1251
1252 *ccp = p;
1253 return reg;
1254 }
1255
1256 static int
1257 arm_reg_alt_syntax (char **ccp, char *start, struct reg_entry *reg,
1258 enum arm_reg_type type)
1259 {
1260 /* Alternative syntaxes are accepted for a few register classes. */
1261 switch (type)
1262 {
1263 case REG_TYPE_MVF:
1264 case REG_TYPE_MVD:
1265 case REG_TYPE_MVFX:
1266 case REG_TYPE_MVDX:
1267 /* Generic coprocessor register names are allowed for these. */
1268 if (reg && reg->type == REG_TYPE_CN)
1269 return reg->number;
1270 break;
1271
1272 case REG_TYPE_CP:
1273 /* For backward compatibility, a bare number is valid here. */
1274 {
1275 unsigned long processor = strtoul (start, ccp, 10);
1276 if (*ccp != start && processor <= 15)
1277 return processor;
1278 }
1279
1280 case REG_TYPE_MMXWC:
1281 /* WC includes WCG. ??? I'm not sure this is true for all
1282 instructions that take WC registers. */
1283 if (reg && reg->type == REG_TYPE_MMXWCG)
1284 return reg->number;
1285 break;
1286
1287 default:
1288 break;
1289 }
1290
1291 return FAIL;
1292 }
1293
1294 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1295 return value is the register number or FAIL. */
1296
1297 static int
1298 arm_reg_parse (char **ccp, enum arm_reg_type type)
1299 {
1300 char *start = *ccp;
1301 struct reg_entry *reg = arm_reg_parse_multi (ccp);
1302 int ret;
1303
1304 /* Do not allow a scalar (reg+index) to parse as a register. */
1305 if (reg && reg->neon && (reg->neon->defined & NTA_HASINDEX))
1306 return FAIL;
1307
1308 if (reg && reg->type == type)
1309 return reg->number;
1310
1311 if ((ret = arm_reg_alt_syntax (ccp, start, reg, type)) != FAIL)
1312 return ret;
1313
1314 *ccp = start;
1315 return FAIL;
1316 }
1317
1318 /* Parse a Neon type specifier. *STR should point at the leading '.'
1319 character. Does no verification at this stage that the type fits the opcode
1320 properly. E.g.,
1321
1322 .i32.i32.s16
1323 .s32.f32
1324 .u16
1325
1326 Can all be legally parsed by this function.
1327
1328 Fills in neon_type struct pointer with parsed information, and updates STR
1329 to point after the parsed type specifier. Returns SUCCESS if this was a legal
1330 type, FAIL if not. */
1331
1332 static int
1333 parse_neon_type (struct neon_type *type, char **str)
1334 {
1335 char *ptr = *str;
1336
1337 if (type)
1338 type->elems = 0;
1339
1340 while (type->elems < NEON_MAX_TYPE_ELS)
1341 {
1342 enum neon_el_type thistype = NT_untyped;
1343 unsigned thissize = -1u;
1344
1345 if (*ptr != '.')
1346 break;
1347
1348 ptr++;
1349
1350 /* Just a size without an explicit type. */
1351 if (ISDIGIT (*ptr))
1352 goto parsesize;
1353
1354 switch (TOLOWER (*ptr))
1355 {
1356 case 'i': thistype = NT_integer; break;
1357 case 'f': thistype = NT_float; break;
1358 case 'p': thistype = NT_poly; break;
1359 case 's': thistype = NT_signed; break;
1360 case 'u': thistype = NT_unsigned; break;
1361 case 'd':
1362 thistype = NT_float;
1363 thissize = 64;
1364 ptr++;
1365 goto done;
1366 default:
1367 as_bad (_("unexpected character `%c' in type specifier"), *ptr);
1368 return FAIL;
1369 }
1370
1371 ptr++;
1372
1373 /* .f is an abbreviation for .f32. */
1374 if (thistype == NT_float && !ISDIGIT (*ptr))
1375 thissize = 32;
1376 else
1377 {
1378 parsesize:
1379 thissize = strtoul (ptr, &ptr, 10);
1380
1381 if (thissize != 8 && thissize != 16 && thissize != 32
1382 && thissize != 64)
1383 {
1384 as_bad (_("bad size %d in type specifier"), thissize);
1385 return FAIL;
1386 }
1387 }
1388
1389 done:
1390 if (type)
1391 {
1392 type->el[type->elems].type = thistype;
1393 type->el[type->elems].size = thissize;
1394 type->elems++;
1395 }
1396 }
1397
1398 /* Empty/missing type is not a successful parse. */
1399 if (type->elems == 0)
1400 return FAIL;
1401
1402 *str = ptr;
1403
1404 return SUCCESS;
1405 }
1406
1407 /* Errors may be set multiple times during parsing or bit encoding
1408 (particularly in the Neon bits), but usually the earliest error which is set
1409 will be the most meaningful. Avoid overwriting it with later (cascading)
1410 errors by calling this function. */
1411
1412 static void
1413 first_error (const char *err)
1414 {
1415 if (!inst.error)
1416 inst.error = err;
1417 }
1418
1419 /* Parse a single type, e.g. ".s32", leading period included. */
1420 static int
1421 parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
1422 {
1423 char *str = *ccp;
1424 struct neon_type optype;
1425
1426 if (*str == '.')
1427 {
1428 if (parse_neon_type (&optype, &str) == SUCCESS)
1429 {
1430 if (optype.elems == 1)
1431 *vectype = optype.el[0];
1432 else
1433 {
1434 first_error (_("only one type should be specified for operand"));
1435 return FAIL;
1436 }
1437 }
1438 else
1439 {
1440 first_error (_("vector type expected"));
1441 return FAIL;
1442 }
1443 }
1444 else
1445 return FAIL;
1446
1447 *ccp = str;
1448
1449 return SUCCESS;
1450 }
1451
1452 /* Special meanings for indices (which have a range of 0-7), which will fit into
1453 a 4-bit integer. */
1454
1455 #define NEON_ALL_LANES 15
1456 #define NEON_INTERLEAVE_LANES 14
1457
1458 /* Parse either a register or a scalar, with an optional type. Return the
1459 register number, and optionally fill in the actual type of the register
1460 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1461 type/index information in *TYPEINFO. */
1462
1463 static int
1464 parse_typed_reg_or_scalar (char **ccp, enum arm_reg_type type,
1465 enum arm_reg_type *rtype,
1466 struct neon_typed_alias *typeinfo)
1467 {
1468 char *str = *ccp;
1469 struct reg_entry *reg = arm_reg_parse_multi (&str);
1470 struct neon_typed_alias atype;
1471 struct neon_type_el parsetype;
1472
1473 atype.defined = 0;
1474 atype.index = -1;
1475 atype.eltype.type = NT_invtype;
1476 atype.eltype.size = -1;
1477
1478 /* Try alternate syntax for some types of register. Note these are mutually
1479 exclusive with the Neon syntax extensions. */
1480 if (reg == NULL)
1481 {
1482 int altreg = arm_reg_alt_syntax (&str, *ccp, reg, type);
1483 if (altreg != FAIL)
1484 *ccp = str;
1485 if (typeinfo)
1486 *typeinfo = atype;
1487 return altreg;
1488 }
1489
1490 /* Undo polymorphism when a set of register types may be accepted. */
1491 if ((type == REG_TYPE_NDQ
1492 && (reg->type == REG_TYPE_NQ || reg->type == REG_TYPE_VFD))
1493 || (type == REG_TYPE_VFSD
1494 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD))
1495 || (type == REG_TYPE_NSDQ
1496 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD
1497 || reg->type == REG_TYPE_NQ))
1498 || (type == REG_TYPE_MMXWC
1499 && (reg->type == REG_TYPE_MMXWCG)))
1500 type = (enum arm_reg_type) reg->type;
1501
1502 if (type != reg->type)
1503 return FAIL;
1504
1505 if (reg->neon)
1506 atype = *reg->neon;
1507
1508 if (parse_neon_operand_type (&parsetype, &str) == SUCCESS)
1509 {
1510 if ((atype.defined & NTA_HASTYPE) != 0)
1511 {
1512 first_error (_("can't redefine type for operand"));
1513 return FAIL;
1514 }
1515 atype.defined |= NTA_HASTYPE;
1516 atype.eltype = parsetype;
1517 }
1518
1519 if (skip_past_char (&str, '[') == SUCCESS)
1520 {
1521 if (type != REG_TYPE_VFD)
1522 {
1523 first_error (_("only D registers may be indexed"));
1524 return FAIL;
1525 }
1526
1527 if ((atype.defined & NTA_HASINDEX) != 0)
1528 {
1529 first_error (_("can't change index for operand"));
1530 return FAIL;
1531 }
1532
1533 atype.defined |= NTA_HASINDEX;
1534
1535 if (skip_past_char (&str, ']') == SUCCESS)
1536 atype.index = NEON_ALL_LANES;
1537 else
1538 {
1539 expressionS exp;
1540
1541 my_get_expression (&exp, &str, GE_NO_PREFIX);
1542
1543 if (exp.X_op != O_constant)
1544 {
1545 first_error (_("constant expression required"));
1546 return FAIL;
1547 }
1548
1549 if (skip_past_char (&str, ']') == FAIL)
1550 return FAIL;
1551
1552 atype.index = exp.X_add_number;
1553 }
1554 }
1555
1556 if (typeinfo)
1557 *typeinfo = atype;
1558
1559 if (rtype)
1560 *rtype = type;
1561
1562 *ccp = str;
1563
1564 return reg->number;
1565 }
1566
1567 /* Like arm_reg_parse, but allow allow the following extra features:
1568 - If RTYPE is non-zero, return the (possibly restricted) type of the
1569 register (e.g. Neon double or quad reg when either has been requested).
1570 - If this is a Neon vector type with additional type information, fill
1571 in the struct pointed to by VECTYPE (if non-NULL).
1572 This function will fault on encountering a scalar. */
1573
1574 static int
1575 arm_typed_reg_parse (char **ccp, enum arm_reg_type type,
1576 enum arm_reg_type *rtype, struct neon_type_el *vectype)
1577 {
1578 struct neon_typed_alias atype;
1579 char *str = *ccp;
1580 int reg = parse_typed_reg_or_scalar (&str, type, rtype, &atype);
1581
1582 if (reg == FAIL)
1583 return FAIL;
1584
1585 /* Do not allow regname(... to parse as a register. */
1586 if (*str == '(')
1587 return FAIL;
1588
1589 /* Do not allow a scalar (reg+index) to parse as a register. */
1590 if ((atype.defined & NTA_HASINDEX) != 0)
1591 {
1592 first_error (_("register operand expected, but got scalar"));
1593 return FAIL;
1594 }
1595
1596 if (vectype)
1597 *vectype = atype.eltype;
1598
1599 *ccp = str;
1600
1601 return reg;
1602 }
1603
1604 #define NEON_SCALAR_REG(X) ((X) >> 4)
1605 #define NEON_SCALAR_INDEX(X) ((X) & 15)
1606
1607 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1608 have enough information to be able to do a good job bounds-checking. So, we
1609 just do easy checks here, and do further checks later. */
1610
1611 static int
1612 parse_scalar (char **ccp, int elsize, struct neon_type_el *type)
1613 {
1614 int reg;
1615 char *str = *ccp;
1616 struct neon_typed_alias atype;
1617
1618 reg = parse_typed_reg_or_scalar (&str, REG_TYPE_VFD, NULL, &atype);
1619
1620 if (reg == FAIL || (atype.defined & NTA_HASINDEX) == 0)
1621 return FAIL;
1622
1623 if (atype.index == NEON_ALL_LANES)
1624 {
1625 first_error (_("scalar must have an index"));
1626 return FAIL;
1627 }
1628 else if (atype.index >= 64 / elsize)
1629 {
1630 first_error (_("scalar index out of range"));
1631 return FAIL;
1632 }
1633
1634 if (type)
1635 *type = atype.eltype;
1636
1637 *ccp = str;
1638
1639 return reg * 16 + atype.index;
1640 }
1641
1642 /* Parse an ARM register list. Returns the bitmask, or FAIL. */
1643
1644 static long
1645 parse_reg_list (char ** strp)
1646 {
1647 char * str = * strp;
1648 long range = 0;
1649 int another_range;
1650
1651 /* We come back here if we get ranges concatenated by '+' or '|'. */
1652 do
1653 {
1654 skip_whitespace (str);
1655
1656 another_range = 0;
1657
1658 if (*str == '{')
1659 {
1660 int in_range = 0;
1661 int cur_reg = -1;
1662
1663 str++;
1664 do
1665 {
1666 int reg;
1667
1668 if ((reg = arm_reg_parse (&str, REG_TYPE_RN)) == FAIL)
1669 {
1670 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
1671 return FAIL;
1672 }
1673
1674 if (in_range)
1675 {
1676 int i;
1677
1678 if (reg <= cur_reg)
1679 {
1680 first_error (_("bad range in register list"));
1681 return FAIL;
1682 }
1683
1684 for (i = cur_reg + 1; i < reg; i++)
1685 {
1686 if (range & (1 << i))
1687 as_tsktsk
1688 (_("Warning: duplicated register (r%d) in register list"),
1689 i);
1690 else
1691 range |= 1 << i;
1692 }
1693 in_range = 0;
1694 }
1695
1696 if (range & (1 << reg))
1697 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1698 reg);
1699 else if (reg <= cur_reg)
1700 as_tsktsk (_("Warning: register range not in ascending order"));
1701
1702 range |= 1 << reg;
1703 cur_reg = reg;
1704 }
1705 while (skip_past_comma (&str) != FAIL
1706 || (in_range = 1, *str++ == '-'));
1707 str--;
1708
1709 if (skip_past_char (&str, '}') == FAIL)
1710 {
1711 first_error (_("missing `}'"));
1712 return FAIL;
1713 }
1714 }
1715 else
1716 {
1717 expressionS exp;
1718
1719 if (my_get_expression (&exp, &str, GE_NO_PREFIX))
1720 return FAIL;
1721
1722 if (exp.X_op == O_constant)
1723 {
1724 if (exp.X_add_number
1725 != (exp.X_add_number & 0x0000ffff))
1726 {
1727 inst.error = _("invalid register mask");
1728 return FAIL;
1729 }
1730
1731 if ((range & exp.X_add_number) != 0)
1732 {
1733 int regno = range & exp.X_add_number;
1734
1735 regno &= -regno;
1736 regno = (1 << regno) - 1;
1737 as_tsktsk
1738 (_("Warning: duplicated register (r%d) in register list"),
1739 regno);
1740 }
1741
1742 range |= exp.X_add_number;
1743 }
1744 else
1745 {
1746 if (inst.reloc.type != 0)
1747 {
1748 inst.error = _("expression too complex");
1749 return FAIL;
1750 }
1751
1752 memcpy (&inst.reloc.exp, &exp, sizeof (expressionS));
1753 inst.reloc.type = BFD_RELOC_ARM_MULTI;
1754 inst.reloc.pc_rel = 0;
1755 }
1756 }
1757
1758 if (*str == '|' || *str == '+')
1759 {
1760 str++;
1761 another_range = 1;
1762 }
1763 }
1764 while (another_range);
1765
1766 *strp = str;
1767 return range;
1768 }
1769
1770 /* Types of registers in a list. */
1771
1772 enum reg_list_els
1773 {
1774 REGLIST_VFP_S,
1775 REGLIST_VFP_D,
1776 REGLIST_NEON_D
1777 };
1778
1779 /* Parse a VFP register list. If the string is invalid return FAIL.
1780 Otherwise return the number of registers, and set PBASE to the first
1781 register. Parses registers of type ETYPE.
1782 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1783 - Q registers can be used to specify pairs of D registers
1784 - { } can be omitted from around a singleton register list
1785 FIXME: This is not implemented, as it would require backtracking in
1786 some cases, e.g.:
1787 vtbl.8 d3,d4,d5
1788 This could be done (the meaning isn't really ambiguous), but doesn't
1789 fit in well with the current parsing framework.
1790 - 32 D registers may be used (also true for VFPv3).
1791 FIXME: Types are ignored in these register lists, which is probably a
1792 bug. */
1793
1794 static int
1795 parse_vfp_reg_list (char **ccp, unsigned int *pbase, enum reg_list_els etype)
1796 {
1797 char *str = *ccp;
1798 int base_reg;
1799 int new_base;
1800 enum arm_reg_type regtype = (enum arm_reg_type) 0;
1801 int max_regs = 0;
1802 int count = 0;
1803 int warned = 0;
1804 unsigned long mask = 0;
1805 int i;
1806
1807 if (skip_past_char (&str, '{') == FAIL)
1808 {
1809 inst.error = _("expecting {");
1810 return FAIL;
1811 }
1812
1813 switch (etype)
1814 {
1815 case REGLIST_VFP_S:
1816 regtype = REG_TYPE_VFS;
1817 max_regs = 32;
1818 break;
1819
1820 case REGLIST_VFP_D:
1821 regtype = REG_TYPE_VFD;
1822 break;
1823
1824 case REGLIST_NEON_D:
1825 regtype = REG_TYPE_NDQ;
1826 break;
1827 }
1828
1829 if (etype != REGLIST_VFP_S)
1830 {
1831 /* VFPv3 allows 32 D registers, except for the VFPv3-D16 variant. */
1832 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
1833 {
1834 max_regs = 32;
1835 if (thumb_mode)
1836 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
1837 fpu_vfp_ext_d32);
1838 else
1839 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
1840 fpu_vfp_ext_d32);
1841 }
1842 else
1843 max_regs = 16;
1844 }
1845
1846 base_reg = max_regs;
1847
1848 do
1849 {
1850 int setmask = 1, addregs = 1;
1851
1852 new_base = arm_typed_reg_parse (&str, regtype, &regtype, NULL);
1853
1854 if (new_base == FAIL)
1855 {
1856 first_error (_(reg_expected_msgs[regtype]));
1857 return FAIL;
1858 }
1859
1860 if (new_base >= max_regs)
1861 {
1862 first_error (_("register out of range in list"));
1863 return FAIL;
1864 }
1865
1866 /* Note: a value of 2 * n is returned for the register Q<n>. */
1867 if (regtype == REG_TYPE_NQ)
1868 {
1869 setmask = 3;
1870 addregs = 2;
1871 }
1872
1873 if (new_base < base_reg)
1874 base_reg = new_base;
1875
1876 if (mask & (setmask << new_base))
1877 {
1878 first_error (_("invalid register list"));
1879 return FAIL;
1880 }
1881
1882 if ((mask >> new_base) != 0 && ! warned)
1883 {
1884 as_tsktsk (_("register list not in ascending order"));
1885 warned = 1;
1886 }
1887
1888 mask |= setmask << new_base;
1889 count += addregs;
1890
1891 if (*str == '-') /* We have the start of a range expression */
1892 {
1893 int high_range;
1894
1895 str++;
1896
1897 if ((high_range = arm_typed_reg_parse (&str, regtype, NULL, NULL))
1898 == FAIL)
1899 {
1900 inst.error = gettext (reg_expected_msgs[regtype]);
1901 return FAIL;
1902 }
1903
1904 if (high_range >= max_regs)
1905 {
1906 first_error (_("register out of range in list"));
1907 return FAIL;
1908 }
1909
1910 if (regtype == REG_TYPE_NQ)
1911 high_range = high_range + 1;
1912
1913 if (high_range <= new_base)
1914 {
1915 inst.error = _("register range not in ascending order");
1916 return FAIL;
1917 }
1918
1919 for (new_base += addregs; new_base <= high_range; new_base += addregs)
1920 {
1921 if (mask & (setmask << new_base))
1922 {
1923 inst.error = _("invalid register list");
1924 return FAIL;
1925 }
1926
1927 mask |= setmask << new_base;
1928 count += addregs;
1929 }
1930 }
1931 }
1932 while (skip_past_comma (&str) != FAIL);
1933
1934 str++;
1935
1936 /* Sanity check -- should have raised a parse error above. */
1937 if (count == 0 || count > max_regs)
1938 abort ();
1939
1940 *pbase = base_reg;
1941
1942 /* Final test -- the registers must be consecutive. */
1943 mask >>= base_reg;
1944 for (i = 0; i < count; i++)
1945 {
1946 if ((mask & (1u << i)) == 0)
1947 {
1948 inst.error = _("non-contiguous register range");
1949 return FAIL;
1950 }
1951 }
1952
1953 *ccp = str;
1954
1955 return count;
1956 }
1957
1958 /* True if two alias types are the same. */
1959
1960 static bfd_boolean
1961 neon_alias_types_same (struct neon_typed_alias *a, struct neon_typed_alias *b)
1962 {
1963 if (!a && !b)
1964 return TRUE;
1965
1966 if (!a || !b)
1967 return FALSE;
1968
1969 if (a->defined != b->defined)
1970 return FALSE;
1971
1972 if ((a->defined & NTA_HASTYPE) != 0
1973 && (a->eltype.type != b->eltype.type
1974 || a->eltype.size != b->eltype.size))
1975 return FALSE;
1976
1977 if ((a->defined & NTA_HASINDEX) != 0
1978 && (a->index != b->index))
1979 return FALSE;
1980
1981 return TRUE;
1982 }
1983
1984 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
1985 The base register is put in *PBASE.
1986 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
1987 the return value.
1988 The register stride (minus one) is put in bit 4 of the return value.
1989 Bits [6:5] encode the list length (minus one).
1990 The type of the list elements is put in *ELTYPE, if non-NULL. */
1991
1992 #define NEON_LANE(X) ((X) & 0xf)
1993 #define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1)
1994 #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
1995
1996 static int
1997 parse_neon_el_struct_list (char **str, unsigned *pbase,
1998 struct neon_type_el *eltype)
1999 {
2000 char *ptr = *str;
2001 int base_reg = -1;
2002 int reg_incr = -1;
2003 int count = 0;
2004 int lane = -1;
2005 int leading_brace = 0;
2006 enum arm_reg_type rtype = REG_TYPE_NDQ;
2007 const char *const incr_error = _("register stride must be 1 or 2");
2008 const char *const type_error = _("mismatched element/structure types in list");
2009 struct neon_typed_alias firsttype;
2010 firsttype.defined = 0;
2011 firsttype.eltype.type = NT_invtype;
2012 firsttype.eltype.size = -1;
2013 firsttype.index = -1;
2014
2015 if (skip_past_char (&ptr, '{') == SUCCESS)
2016 leading_brace = 1;
2017
2018 do
2019 {
2020 struct neon_typed_alias atype;
2021 int getreg = parse_typed_reg_or_scalar (&ptr, rtype, &rtype, &atype);
2022
2023 if (getreg == FAIL)
2024 {
2025 first_error (_(reg_expected_msgs[rtype]));
2026 return FAIL;
2027 }
2028
2029 if (base_reg == -1)
2030 {
2031 base_reg = getreg;
2032 if (rtype == REG_TYPE_NQ)
2033 {
2034 reg_incr = 1;
2035 }
2036 firsttype = atype;
2037 }
2038 else if (reg_incr == -1)
2039 {
2040 reg_incr = getreg - base_reg;
2041 if (reg_incr < 1 || reg_incr > 2)
2042 {
2043 first_error (_(incr_error));
2044 return FAIL;
2045 }
2046 }
2047 else if (getreg != base_reg + reg_incr * count)
2048 {
2049 first_error (_(incr_error));
2050 return FAIL;
2051 }
2052
2053 if (! neon_alias_types_same (&atype, &firsttype))
2054 {
2055 first_error (_(type_error));
2056 return FAIL;
2057 }
2058
2059 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
2060 modes. */
2061 if (ptr[0] == '-')
2062 {
2063 struct neon_typed_alias htype;
2064 int hireg, dregs = (rtype == REG_TYPE_NQ) ? 2 : 1;
2065 if (lane == -1)
2066 lane = NEON_INTERLEAVE_LANES;
2067 else if (lane != NEON_INTERLEAVE_LANES)
2068 {
2069 first_error (_(type_error));
2070 return FAIL;
2071 }
2072 if (reg_incr == -1)
2073 reg_incr = 1;
2074 else if (reg_incr != 1)
2075 {
2076 first_error (_("don't use Rn-Rm syntax with non-unit stride"));
2077 return FAIL;
2078 }
2079 ptr++;
2080 hireg = parse_typed_reg_or_scalar (&ptr, rtype, NULL, &htype);
2081 if (hireg == FAIL)
2082 {
2083 first_error (_(reg_expected_msgs[rtype]));
2084 return FAIL;
2085 }
2086 if (! neon_alias_types_same (&htype, &firsttype))
2087 {
2088 first_error (_(type_error));
2089 return FAIL;
2090 }
2091 count += hireg + dregs - getreg;
2092 continue;
2093 }
2094
2095 /* If we're using Q registers, we can't use [] or [n] syntax. */
2096 if (rtype == REG_TYPE_NQ)
2097 {
2098 count += 2;
2099 continue;
2100 }
2101
2102 if ((atype.defined & NTA_HASINDEX) != 0)
2103 {
2104 if (lane == -1)
2105 lane = atype.index;
2106 else if (lane != atype.index)
2107 {
2108 first_error (_(type_error));
2109 return FAIL;
2110 }
2111 }
2112 else if (lane == -1)
2113 lane = NEON_INTERLEAVE_LANES;
2114 else if (lane != NEON_INTERLEAVE_LANES)
2115 {
2116 first_error (_(type_error));
2117 return FAIL;
2118 }
2119 count++;
2120 }
2121 while ((count != 1 || leading_brace) && skip_past_comma (&ptr) != FAIL);
2122
2123 /* No lane set by [x]. We must be interleaving structures. */
2124 if (lane == -1)
2125 lane = NEON_INTERLEAVE_LANES;
2126
2127 /* Sanity check. */
2128 if (lane == -1 || base_reg == -1 || count < 1 || count > 4
2129 || (count > 1 && reg_incr == -1))
2130 {
2131 first_error (_("error parsing element/structure list"));
2132 return FAIL;
2133 }
2134
2135 if ((count > 1 || leading_brace) && skip_past_char (&ptr, '}') == FAIL)
2136 {
2137 first_error (_("expected }"));
2138 return FAIL;
2139 }
2140
2141 if (reg_incr == -1)
2142 reg_incr = 1;
2143
2144 if (eltype)
2145 *eltype = firsttype.eltype;
2146
2147 *pbase = base_reg;
2148 *str = ptr;
2149
2150 return lane | ((reg_incr - 1) << 4) | ((count - 1) << 5);
2151 }
2152
2153 /* Parse an explicit relocation suffix on an expression. This is
2154 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
2155 arm_reloc_hsh contains no entries, so this function can only
2156 succeed if there is no () after the word. Returns -1 on error,
2157 BFD_RELOC_UNUSED if there wasn't any suffix. */
2158
2159 static int
2160 parse_reloc (char **str)
2161 {
2162 struct reloc_entry *r;
2163 char *p, *q;
2164
2165 if (**str != '(')
2166 return BFD_RELOC_UNUSED;
2167
2168 p = *str + 1;
2169 q = p;
2170
2171 while (*q && *q != ')' && *q != ',')
2172 q++;
2173 if (*q != ')')
2174 return -1;
2175
2176 if ((r = (struct reloc_entry *)
2177 hash_find_n (arm_reloc_hsh, p, q - p)) == NULL)
2178 return -1;
2179
2180 *str = q + 1;
2181 return r->reloc;
2182 }
2183
2184 /* Directives: register aliases. */
2185
2186 static struct reg_entry *
2187 insert_reg_alias (char *str, unsigned number, int type)
2188 {
2189 struct reg_entry *new_reg;
2190 const char *name;
2191
2192 if ((new_reg = (struct reg_entry *) hash_find (arm_reg_hsh, str)) != 0)
2193 {
2194 if (new_reg->builtin)
2195 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str);
2196
2197 /* Only warn about a redefinition if it's not defined as the
2198 same register. */
2199 else if (new_reg->number != number || new_reg->type != type)
2200 as_warn (_("ignoring redefinition of register alias '%s'"), str);
2201
2202 return NULL;
2203 }
2204
2205 name = xstrdup (str);
2206 new_reg = XNEW (struct reg_entry);
2207
2208 new_reg->name = name;
2209 new_reg->number = number;
2210 new_reg->type = type;
2211 new_reg->builtin = FALSE;
2212 new_reg->neon = NULL;
2213
2214 if (hash_insert (arm_reg_hsh, name, (void *) new_reg))
2215 abort ();
2216
2217 return new_reg;
2218 }
2219
2220 static void
2221 insert_neon_reg_alias (char *str, int number, int type,
2222 struct neon_typed_alias *atype)
2223 {
2224 struct reg_entry *reg = insert_reg_alias (str, number, type);
2225
2226 if (!reg)
2227 {
2228 first_error (_("attempt to redefine typed alias"));
2229 return;
2230 }
2231
2232 if (atype)
2233 {
2234 reg->neon = XNEW (struct neon_typed_alias);
2235 *reg->neon = *atype;
2236 }
2237 }
2238
2239 /* Look for the .req directive. This is of the form:
2240
2241 new_register_name .req existing_register_name
2242
2243 If we find one, or if it looks sufficiently like one that we want to
2244 handle any error here, return TRUE. Otherwise return FALSE. */
2245
2246 static bfd_boolean
2247 create_register_alias (char * newname, char *p)
2248 {
2249 struct reg_entry *old;
2250 char *oldname, *nbuf;
2251 size_t nlen;
2252
2253 /* The input scrubber ensures that whitespace after the mnemonic is
2254 collapsed to single spaces. */
2255 oldname = p;
2256 if (strncmp (oldname, " .req ", 6) != 0)
2257 return FALSE;
2258
2259 oldname += 6;
2260 if (*oldname == '\0')
2261 return FALSE;
2262
2263 old = (struct reg_entry *) hash_find (arm_reg_hsh, oldname);
2264 if (!old)
2265 {
2266 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
2267 return TRUE;
2268 }
2269
2270 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2271 the desired alias name, and p points to its end. If not, then
2272 the desired alias name is in the global original_case_string. */
2273 #ifdef TC_CASE_SENSITIVE
2274 nlen = p - newname;
2275 #else
2276 newname = original_case_string;
2277 nlen = strlen (newname);
2278 #endif
2279
2280 nbuf = xmemdup0 (newname, nlen);
2281
2282 /* Create aliases under the new name as stated; an all-lowercase
2283 version of the new name; and an all-uppercase version of the new
2284 name. */
2285 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
2286 {
2287 for (p = nbuf; *p; p++)
2288 *p = TOUPPER (*p);
2289
2290 if (strncmp (nbuf, newname, nlen))
2291 {
2292 /* If this attempt to create an additional alias fails, do not bother
2293 trying to create the all-lower case alias. We will fail and issue
2294 a second, duplicate error message. This situation arises when the
2295 programmer does something like:
2296 foo .req r0
2297 Foo .req r1
2298 The second .req creates the "Foo" alias but then fails to create
2299 the artificial FOO alias because it has already been created by the
2300 first .req. */
2301 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
2302 {
2303 free (nbuf);
2304 return TRUE;
2305 }
2306 }
2307
2308 for (p = nbuf; *p; p++)
2309 *p = TOLOWER (*p);
2310
2311 if (strncmp (nbuf, newname, nlen))
2312 insert_reg_alias (nbuf, old->number, old->type);
2313 }
2314
2315 free (nbuf);
2316 return TRUE;
2317 }
2318
2319 /* Create a Neon typed/indexed register alias using directives, e.g.:
2320 X .dn d5.s32[1]
2321 Y .qn 6.s16
2322 Z .dn d7
2323 T .dn Z[0]
2324 These typed registers can be used instead of the types specified after the
2325 Neon mnemonic, so long as all operands given have types. Types can also be
2326 specified directly, e.g.:
2327 vadd d0.s32, d1.s32, d2.s32 */
2328
2329 static bfd_boolean
2330 create_neon_reg_alias (char *newname, char *p)
2331 {
2332 enum arm_reg_type basetype;
2333 struct reg_entry *basereg;
2334 struct reg_entry mybasereg;
2335 struct neon_type ntype;
2336 struct neon_typed_alias typeinfo;
2337 char *namebuf, *nameend ATTRIBUTE_UNUSED;
2338 int namelen;
2339
2340 typeinfo.defined = 0;
2341 typeinfo.eltype.type = NT_invtype;
2342 typeinfo.eltype.size = -1;
2343 typeinfo.index = -1;
2344
2345 nameend = p;
2346
2347 if (strncmp (p, " .dn ", 5) == 0)
2348 basetype = REG_TYPE_VFD;
2349 else if (strncmp (p, " .qn ", 5) == 0)
2350 basetype = REG_TYPE_NQ;
2351 else
2352 return FALSE;
2353
2354 p += 5;
2355
2356 if (*p == '\0')
2357 return FALSE;
2358
2359 basereg = arm_reg_parse_multi (&p);
2360
2361 if (basereg && basereg->type != basetype)
2362 {
2363 as_bad (_("bad type for register"));
2364 return FALSE;
2365 }
2366
2367 if (basereg == NULL)
2368 {
2369 expressionS exp;
2370 /* Try parsing as an integer. */
2371 my_get_expression (&exp, &p, GE_NO_PREFIX);
2372 if (exp.X_op != O_constant)
2373 {
2374 as_bad (_("expression must be constant"));
2375 return FALSE;
2376 }
2377 basereg = &mybasereg;
2378 basereg->number = (basetype == REG_TYPE_NQ) ? exp.X_add_number * 2
2379 : exp.X_add_number;
2380 basereg->neon = 0;
2381 }
2382
2383 if (basereg->neon)
2384 typeinfo = *basereg->neon;
2385
2386 if (parse_neon_type (&ntype, &p) == SUCCESS)
2387 {
2388 /* We got a type. */
2389 if (typeinfo.defined & NTA_HASTYPE)
2390 {
2391 as_bad (_("can't redefine the type of a register alias"));
2392 return FALSE;
2393 }
2394
2395 typeinfo.defined |= NTA_HASTYPE;
2396 if (ntype.elems != 1)
2397 {
2398 as_bad (_("you must specify a single type only"));
2399 return FALSE;
2400 }
2401 typeinfo.eltype = ntype.el[0];
2402 }
2403
2404 if (skip_past_char (&p, '[') == SUCCESS)
2405 {
2406 expressionS exp;
2407 /* We got a scalar index. */
2408
2409 if (typeinfo.defined & NTA_HASINDEX)
2410 {
2411 as_bad (_("can't redefine the index of a scalar alias"));
2412 return FALSE;
2413 }
2414
2415 my_get_expression (&exp, &p, GE_NO_PREFIX);
2416
2417 if (exp.X_op != O_constant)
2418 {
2419 as_bad (_("scalar index must be constant"));
2420 return FALSE;
2421 }
2422
2423 typeinfo.defined |= NTA_HASINDEX;
2424 typeinfo.index = exp.X_add_number;
2425
2426 if (skip_past_char (&p, ']') == FAIL)
2427 {
2428 as_bad (_("expecting ]"));
2429 return FALSE;
2430 }
2431 }
2432
2433 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2434 the desired alias name, and p points to its end. If not, then
2435 the desired alias name is in the global original_case_string. */
2436 #ifdef TC_CASE_SENSITIVE
2437 namelen = nameend - newname;
2438 #else
2439 newname = original_case_string;
2440 namelen = strlen (newname);
2441 #endif
2442
2443 namebuf = xmemdup0 (newname, namelen);
2444
2445 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2446 typeinfo.defined != 0 ? &typeinfo : NULL);
2447
2448 /* Insert name in all uppercase. */
2449 for (p = namebuf; *p; p++)
2450 *p = TOUPPER (*p);
2451
2452 if (strncmp (namebuf, newname, namelen))
2453 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2454 typeinfo.defined != 0 ? &typeinfo : NULL);
2455
2456 /* Insert name in all lowercase. */
2457 for (p = namebuf; *p; p++)
2458 *p = TOLOWER (*p);
2459
2460 if (strncmp (namebuf, newname, namelen))
2461 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2462 typeinfo.defined != 0 ? &typeinfo : NULL);
2463
2464 free (namebuf);
2465 return TRUE;
2466 }
2467
2468 /* Should never be called, as .req goes between the alias and the
2469 register name, not at the beginning of the line. */
2470
2471 static void
2472 s_req (int a ATTRIBUTE_UNUSED)
2473 {
2474 as_bad (_("invalid syntax for .req directive"));
2475 }
2476
2477 static void
2478 s_dn (int a ATTRIBUTE_UNUSED)
2479 {
2480 as_bad (_("invalid syntax for .dn directive"));
2481 }
2482
2483 static void
2484 s_qn (int a ATTRIBUTE_UNUSED)
2485 {
2486 as_bad (_("invalid syntax for .qn directive"));
2487 }
2488
2489 /* The .unreq directive deletes an alias which was previously defined
2490 by .req. For example:
2491
2492 my_alias .req r11
2493 .unreq my_alias */
2494
2495 static void
2496 s_unreq (int a ATTRIBUTE_UNUSED)
2497 {
2498 char * name;
2499 char saved_char;
2500
2501 name = input_line_pointer;
2502
2503 while (*input_line_pointer != 0
2504 && *input_line_pointer != ' '
2505 && *input_line_pointer != '\n')
2506 ++input_line_pointer;
2507
2508 saved_char = *input_line_pointer;
2509 *input_line_pointer = 0;
2510
2511 if (!*name)
2512 as_bad (_("invalid syntax for .unreq directive"));
2513 else
2514 {
2515 struct reg_entry *reg = (struct reg_entry *) hash_find (arm_reg_hsh,
2516 name);
2517
2518 if (!reg)
2519 as_bad (_("unknown register alias '%s'"), name);
2520 else if (reg->builtin)
2521 as_warn (_("ignoring attempt to use .unreq on fixed register name: '%s'"),
2522 name);
2523 else
2524 {
2525 char * p;
2526 char * nbuf;
2527
2528 hash_delete (arm_reg_hsh, name, FALSE);
2529 free ((char *) reg->name);
2530 if (reg->neon)
2531 free (reg->neon);
2532 free (reg);
2533
2534 /* Also locate the all upper case and all lower case versions.
2535 Do not complain if we cannot find one or the other as it
2536 was probably deleted above. */
2537
2538 nbuf = strdup (name);
2539 for (p = nbuf; *p; p++)
2540 *p = TOUPPER (*p);
2541 reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
2542 if (reg)
2543 {
2544 hash_delete (arm_reg_hsh, nbuf, FALSE);
2545 free ((char *) reg->name);
2546 if (reg->neon)
2547 free (reg->neon);
2548 free (reg);
2549 }
2550
2551 for (p = nbuf; *p; p++)
2552 *p = TOLOWER (*p);
2553 reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
2554 if (reg)
2555 {
2556 hash_delete (arm_reg_hsh, nbuf, FALSE);
2557 free ((char *) reg->name);
2558 if (reg->neon)
2559 free (reg->neon);
2560 free (reg);
2561 }
2562
2563 free (nbuf);
2564 }
2565 }
2566
2567 *input_line_pointer = saved_char;
2568 demand_empty_rest_of_line ();
2569 }
2570
2571 /* Directives: Instruction set selection. */
2572
2573 #ifdef OBJ_ELF
2574 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
2575 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2576 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2577 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
2578
2579 /* Create a new mapping symbol for the transition to STATE. */
2580
2581 static void
2582 make_mapping_symbol (enum mstate state, valueT value, fragS *frag)
2583 {
2584 symbolS * symbolP;
2585 const char * symname;
2586 int type;
2587
2588 switch (state)
2589 {
2590 case MAP_DATA:
2591 symname = "$d";
2592 type = BSF_NO_FLAGS;
2593 break;
2594 case MAP_ARM:
2595 symname = "$a";
2596 type = BSF_NO_FLAGS;
2597 break;
2598 case MAP_THUMB:
2599 symname = "$t";
2600 type = BSF_NO_FLAGS;
2601 break;
2602 default:
2603 abort ();
2604 }
2605
2606 symbolP = symbol_new (symname, now_seg, value, frag);
2607 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
2608
2609 switch (state)
2610 {
2611 case MAP_ARM:
2612 THUMB_SET_FUNC (symbolP, 0);
2613 ARM_SET_THUMB (symbolP, 0);
2614 ARM_SET_INTERWORK (symbolP, support_interwork);
2615 break;
2616
2617 case MAP_THUMB:
2618 THUMB_SET_FUNC (symbolP, 1);
2619 ARM_SET_THUMB (symbolP, 1);
2620 ARM_SET_INTERWORK (symbolP, support_interwork);
2621 break;
2622
2623 case MAP_DATA:
2624 default:
2625 break;
2626 }
2627
2628 /* Save the mapping symbols for future reference. Also check that
2629 we do not place two mapping symbols at the same offset within a
2630 frag. We'll handle overlap between frags in
2631 check_mapping_symbols.
2632
2633 If .fill or other data filling directive generates zero sized data,
2634 the mapping symbol for the following code will have the same value
2635 as the one generated for the data filling directive. In this case,
2636 we replace the old symbol with the new one at the same address. */
2637 if (value == 0)
2638 {
2639 if (frag->tc_frag_data.first_map != NULL)
2640 {
2641 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
2642 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP, &symbol_lastP);
2643 }
2644 frag->tc_frag_data.first_map = symbolP;
2645 }
2646 if (frag->tc_frag_data.last_map != NULL)
2647 {
2648 know (S_GET_VALUE (frag->tc_frag_data.last_map) <= S_GET_VALUE (symbolP));
2649 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
2650 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP, &symbol_lastP);
2651 }
2652 frag->tc_frag_data.last_map = symbolP;
2653 }
2654
2655 /* We must sometimes convert a region marked as code to data during
2656 code alignment, if an odd number of bytes have to be padded. The
2657 code mapping symbol is pushed to an aligned address. */
2658
2659 static void
2660 insert_data_mapping_symbol (enum mstate state,
2661 valueT value, fragS *frag, offsetT bytes)
2662 {
2663 /* If there was already a mapping symbol, remove it. */
2664 if (frag->tc_frag_data.last_map != NULL
2665 && S_GET_VALUE (frag->tc_frag_data.last_map) == frag->fr_address + value)
2666 {
2667 symbolS *symp = frag->tc_frag_data.last_map;
2668
2669 if (value == 0)
2670 {
2671 know (frag->tc_frag_data.first_map == symp);
2672 frag->tc_frag_data.first_map = NULL;
2673 }
2674 frag->tc_frag_data.last_map = NULL;
2675 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
2676 }
2677
2678 make_mapping_symbol (MAP_DATA, value, frag);
2679 make_mapping_symbol (state, value + bytes, frag);
2680 }
2681
2682 static void mapping_state_2 (enum mstate state, int max_chars);
2683
2684 /* Set the mapping state to STATE. Only call this when about to
2685 emit some STATE bytes to the file. */
2686
2687 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
2688 void
2689 mapping_state (enum mstate state)
2690 {
2691 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2692
2693 if (mapstate == state)
2694 /* The mapping symbol has already been emitted.
2695 There is nothing else to do. */
2696 return;
2697
2698 if (state == MAP_ARM || state == MAP_THUMB)
2699 /* PR gas/12931
2700 All ARM instructions require 4-byte alignment.
2701 (Almost) all Thumb instructions require 2-byte alignment.
2702
2703 When emitting instructions into any section, mark the section
2704 appropriately.
2705
2706 Some Thumb instructions are alignment-sensitive modulo 4 bytes,
2707 but themselves require 2-byte alignment; this applies to some
2708 PC- relative forms. However, these cases will invovle implicit
2709 literal pool generation or an explicit .align >=2, both of
2710 which will cause the section to me marked with sufficient
2711 alignment. Thus, we don't handle those cases here. */
2712 record_alignment (now_seg, state == MAP_ARM ? 2 : 1);
2713
2714 if (TRANSITION (MAP_UNDEFINED, MAP_DATA))
2715 /* This case will be evaluated later. */
2716 return;
2717
2718 mapping_state_2 (state, 0);
2719 }
2720
2721 /* Same as mapping_state, but MAX_CHARS bytes have already been
2722 allocated. Put the mapping symbol that far back. */
2723
2724 static void
2725 mapping_state_2 (enum mstate state, int max_chars)
2726 {
2727 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2728
2729 if (!SEG_NORMAL (now_seg))
2730 return;
2731
2732 if (mapstate == state)
2733 /* The mapping symbol has already been emitted.
2734 There is nothing else to do. */
2735 return;
2736
2737 if (TRANSITION (MAP_UNDEFINED, MAP_ARM)
2738 || TRANSITION (MAP_UNDEFINED, MAP_THUMB))
2739 {
2740 struct frag * const frag_first = seg_info (now_seg)->frchainP->frch_root;
2741 const int add_symbol = (frag_now != frag_first) || (frag_now_fix () > 0);
2742
2743 if (add_symbol)
2744 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
2745 }
2746
2747 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
2748 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
2749 }
2750 #undef TRANSITION
2751 #else
2752 #define mapping_state(x) ((void)0)
2753 #define mapping_state_2(x, y) ((void)0)
2754 #endif
2755
2756 /* Find the real, Thumb encoded start of a Thumb function. */
2757
2758 #ifdef OBJ_COFF
2759 static symbolS *
2760 find_real_start (symbolS * symbolP)
2761 {
2762 char * real_start;
2763 const char * name = S_GET_NAME (symbolP);
2764 symbolS * new_target;
2765
2766 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
2767 #define STUB_NAME ".real_start_of"
2768
2769 if (name == NULL)
2770 abort ();
2771
2772 /* The compiler may generate BL instructions to local labels because
2773 it needs to perform a branch to a far away location. These labels
2774 do not have a corresponding ".real_start_of" label. We check
2775 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2776 the ".real_start_of" convention for nonlocal branches. */
2777 if (S_IS_LOCAL (symbolP) || name[0] == '.')
2778 return symbolP;
2779
2780 real_start = concat (STUB_NAME, name, NULL);
2781 new_target = symbol_find (real_start);
2782 free (real_start);
2783
2784 if (new_target == NULL)
2785 {
2786 as_warn (_("Failed to find real start of function: %s\n"), name);
2787 new_target = symbolP;
2788 }
2789
2790 return new_target;
2791 }
2792 #endif
2793
2794 static void
2795 opcode_select (int width)
2796 {
2797 switch (width)
2798 {
2799 case 16:
2800 if (! thumb_mode)
2801 {
2802 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
2803 as_bad (_("selected processor does not support THUMB opcodes"));
2804
2805 thumb_mode = 1;
2806 /* No need to force the alignment, since we will have been
2807 coming from ARM mode, which is word-aligned. */
2808 record_alignment (now_seg, 1);
2809 }
2810 break;
2811
2812 case 32:
2813 if (thumb_mode)
2814 {
2815 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
2816 as_bad (_("selected processor does not support ARM opcodes"));
2817
2818 thumb_mode = 0;
2819
2820 if (!need_pass_2)
2821 frag_align (2, 0, 0);
2822
2823 record_alignment (now_seg, 1);
2824 }
2825 break;
2826
2827 default:
2828 as_bad (_("invalid instruction size selected (%d)"), width);
2829 }
2830 }
2831
2832 static void
2833 s_arm (int ignore ATTRIBUTE_UNUSED)
2834 {
2835 opcode_select (32);
2836 demand_empty_rest_of_line ();
2837 }
2838
2839 static void
2840 s_thumb (int ignore ATTRIBUTE_UNUSED)
2841 {
2842 opcode_select (16);
2843 demand_empty_rest_of_line ();
2844 }
2845
2846 static void
2847 s_code (int unused ATTRIBUTE_UNUSED)
2848 {
2849 int temp;
2850
2851 temp = get_absolute_expression ();
2852 switch (temp)
2853 {
2854 case 16:
2855 case 32:
2856 opcode_select (temp);
2857 break;
2858
2859 default:
2860 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp);
2861 }
2862 }
2863
2864 static void
2865 s_force_thumb (int ignore ATTRIBUTE_UNUSED)
2866 {
2867 /* If we are not already in thumb mode go into it, EVEN if
2868 the target processor does not support thumb instructions.
2869 This is used by gcc/config/arm/lib1funcs.asm for example
2870 to compile interworking support functions even if the
2871 target processor should not support interworking. */
2872 if (! thumb_mode)
2873 {
2874 thumb_mode = 2;
2875 record_alignment (now_seg, 1);
2876 }
2877
2878 demand_empty_rest_of_line ();
2879 }
2880
2881 static void
2882 s_thumb_func (int ignore ATTRIBUTE_UNUSED)
2883 {
2884 s_thumb (0);
2885
2886 /* The following label is the name/address of the start of a Thumb function.
2887 We need to know this for the interworking support. */
2888 label_is_thumb_function_name = TRUE;
2889 }
2890
2891 /* Perform a .set directive, but also mark the alias as
2892 being a thumb function. */
2893
2894 static void
2895 s_thumb_set (int equiv)
2896 {
2897 /* XXX the following is a duplicate of the code for s_set() in read.c
2898 We cannot just call that code as we need to get at the symbol that
2899 is created. */
2900 char * name;
2901 char delim;
2902 char * end_name;
2903 symbolS * symbolP;
2904
2905 /* Especial apologies for the random logic:
2906 This just grew, and could be parsed much more simply!
2907 Dean - in haste. */
2908 delim = get_symbol_name (& name);
2909 end_name = input_line_pointer;
2910 (void) restore_line_pointer (delim);
2911
2912 if (*input_line_pointer != ',')
2913 {
2914 *end_name = 0;
2915 as_bad (_("expected comma after name \"%s\""), name);
2916 *end_name = delim;
2917 ignore_rest_of_line ();
2918 return;
2919 }
2920
2921 input_line_pointer++;
2922 *end_name = 0;
2923
2924 if (name[0] == '.' && name[1] == '\0')
2925 {
2926 /* XXX - this should not happen to .thumb_set. */
2927 abort ();
2928 }
2929
2930 if ((symbolP = symbol_find (name)) == NULL
2931 && (symbolP = md_undefined_symbol (name)) == NULL)
2932 {
2933 #ifndef NO_LISTING
2934 /* When doing symbol listings, play games with dummy fragments living
2935 outside the normal fragment chain to record the file and line info
2936 for this symbol. */
2937 if (listing & LISTING_SYMBOLS)
2938 {
2939 extern struct list_info_struct * listing_tail;
2940 fragS * dummy_frag = (fragS * ) xmalloc (sizeof (fragS));
2941
2942 memset (dummy_frag, 0, sizeof (fragS));
2943 dummy_frag->fr_type = rs_fill;
2944 dummy_frag->line = listing_tail;
2945 symbolP = symbol_new (name, undefined_section, 0, dummy_frag);
2946 dummy_frag->fr_symbol = symbolP;
2947 }
2948 else
2949 #endif
2950 symbolP = symbol_new (name, undefined_section, 0, &zero_address_frag);
2951
2952 #ifdef OBJ_COFF
2953 /* "set" symbols are local unless otherwise specified. */
2954 SF_SET_LOCAL (symbolP);
2955 #endif /* OBJ_COFF */
2956 } /* Make a new symbol. */
2957
2958 symbol_table_insert (symbolP);
2959
2960 * end_name = delim;
2961
2962 if (equiv
2963 && S_IS_DEFINED (symbolP)
2964 && S_GET_SEGMENT (symbolP) != reg_section)
2965 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP));
2966
2967 pseudo_set (symbolP);
2968
2969 demand_empty_rest_of_line ();
2970
2971 /* XXX Now we come to the Thumb specific bit of code. */
2972
2973 THUMB_SET_FUNC (symbolP, 1);
2974 ARM_SET_THUMB (symbolP, 1);
2975 #if defined OBJ_ELF || defined OBJ_COFF
2976 ARM_SET_INTERWORK (symbolP, support_interwork);
2977 #endif
2978 }
2979
2980 /* Directives: Mode selection. */
2981
2982 /* .syntax [unified|divided] - choose the new unified syntax
2983 (same for Arm and Thumb encoding, modulo slight differences in what
2984 can be represented) or the old divergent syntax for each mode. */
2985 static void
2986 s_syntax (int unused ATTRIBUTE_UNUSED)
2987 {
2988 char *name, delim;
2989
2990 delim = get_symbol_name (& name);
2991
2992 if (!strcasecmp (name, "unified"))
2993 unified_syntax = TRUE;
2994 else if (!strcasecmp (name, "divided"))
2995 unified_syntax = FALSE;
2996 else
2997 {
2998 as_bad (_("unrecognized syntax mode \"%s\""), name);
2999 return;
3000 }
3001 (void) restore_line_pointer (delim);
3002 demand_empty_rest_of_line ();
3003 }
3004
3005 /* Directives: sectioning and alignment. */
3006
3007 static void
3008 s_bss (int ignore ATTRIBUTE_UNUSED)
3009 {
3010 /* We don't support putting frags in the BSS segment, we fake it by
3011 marking in_bss, then looking at s_skip for clues. */
3012 subseg_set (bss_section, 0);
3013 demand_empty_rest_of_line ();
3014
3015 #ifdef md_elf_section_change_hook
3016 md_elf_section_change_hook ();
3017 #endif
3018 }
3019
3020 static void
3021 s_even (int ignore ATTRIBUTE_UNUSED)
3022 {
3023 /* Never make frag if expect extra pass. */
3024 if (!need_pass_2)
3025 frag_align (1, 0, 0);
3026
3027 record_alignment (now_seg, 1);
3028
3029 demand_empty_rest_of_line ();
3030 }
3031
3032 /* Directives: CodeComposer Studio. */
3033
3034 /* .ref (for CodeComposer Studio syntax only). */
3035 static void
3036 s_ccs_ref (int unused ATTRIBUTE_UNUSED)
3037 {
3038 if (codecomposer_syntax)
3039 ignore_rest_of_line ();
3040 else
3041 as_bad (_(".ref pseudo-op only available with -mccs flag."));
3042 }
3043
3044 /* If name is not NULL, then it is used for marking the beginning of a
3045 function, wherease if it is NULL then it means the function end. */
3046 static void
3047 asmfunc_debug (const char * name)
3048 {
3049 static const char * last_name = NULL;
3050
3051 if (name != NULL)
3052 {
3053 gas_assert (last_name == NULL);
3054 last_name = name;
3055
3056 if (debug_type == DEBUG_STABS)
3057 stabs_generate_asm_func (name, name);
3058 }
3059 else
3060 {
3061 gas_assert (last_name != NULL);
3062
3063 if (debug_type == DEBUG_STABS)
3064 stabs_generate_asm_endfunc (last_name, last_name);
3065
3066 last_name = NULL;
3067 }
3068 }
3069
3070 static void
3071 s_ccs_asmfunc (int unused ATTRIBUTE_UNUSED)
3072 {
3073 if (codecomposer_syntax)
3074 {
3075 switch (asmfunc_state)
3076 {
3077 case OUTSIDE_ASMFUNC:
3078 asmfunc_state = WAITING_ASMFUNC_NAME;
3079 break;
3080
3081 case WAITING_ASMFUNC_NAME:
3082 as_bad (_(".asmfunc repeated."));
3083 break;
3084
3085 case WAITING_ENDASMFUNC:
3086 as_bad (_(".asmfunc without function."));
3087 break;
3088 }
3089 demand_empty_rest_of_line ();
3090 }
3091 else
3092 as_bad (_(".asmfunc pseudo-op only available with -mccs flag."));
3093 }
3094
3095 static void
3096 s_ccs_endasmfunc (int unused ATTRIBUTE_UNUSED)
3097 {
3098 if (codecomposer_syntax)
3099 {
3100 switch (asmfunc_state)
3101 {
3102 case OUTSIDE_ASMFUNC:
3103 as_bad (_(".endasmfunc without a .asmfunc."));
3104 break;
3105
3106 case WAITING_ASMFUNC_NAME:
3107 as_bad (_(".endasmfunc without function."));
3108 break;
3109
3110 case WAITING_ENDASMFUNC:
3111 asmfunc_state = OUTSIDE_ASMFUNC;
3112 asmfunc_debug (NULL);
3113 break;
3114 }
3115 demand_empty_rest_of_line ();
3116 }
3117 else
3118 as_bad (_(".endasmfunc pseudo-op only available with -mccs flag."));
3119 }
3120
3121 static void
3122 s_ccs_def (int name)
3123 {
3124 if (codecomposer_syntax)
3125 s_globl (name);
3126 else
3127 as_bad (_(".def pseudo-op only available with -mccs flag."));
3128 }
3129
3130 /* Directives: Literal pools. */
3131
3132 static literal_pool *
3133 find_literal_pool (void)
3134 {
3135 literal_pool * pool;
3136
3137 for (pool = list_of_pools; pool != NULL; pool = pool->next)
3138 {
3139 if (pool->section == now_seg
3140 && pool->sub_section == now_subseg)
3141 break;
3142 }
3143
3144 return pool;
3145 }
3146
3147 static literal_pool *
3148 find_or_make_literal_pool (void)
3149 {
3150 /* Next literal pool ID number. */
3151 static unsigned int latest_pool_num = 1;
3152 literal_pool * pool;
3153
3154 pool = find_literal_pool ();
3155
3156 if (pool == NULL)
3157 {
3158 /* Create a new pool. */
3159 pool = XNEW (literal_pool);
3160 if (! pool)
3161 return NULL;
3162
3163 pool->next_free_entry = 0;
3164 pool->section = now_seg;
3165 pool->sub_section = now_subseg;
3166 pool->next = list_of_pools;
3167 pool->symbol = NULL;
3168 pool->alignment = 2;
3169
3170 /* Add it to the list. */
3171 list_of_pools = pool;
3172 }
3173
3174 /* New pools, and emptied pools, will have a NULL symbol. */
3175 if (pool->symbol == NULL)
3176 {
3177 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
3178 (valueT) 0, &zero_address_frag);
3179 pool->id = latest_pool_num ++;
3180 }
3181
3182 /* Done. */
3183 return pool;
3184 }
3185
3186 /* Add the literal in the global 'inst'
3187 structure to the relevant literal pool. */
3188
3189 static int
3190 add_to_lit_pool (unsigned int nbytes)
3191 {
3192 #define PADDING_SLOT 0x1
3193 #define LIT_ENTRY_SIZE_MASK 0xFF
3194 literal_pool * pool;
3195 unsigned int entry, pool_size = 0;
3196 bfd_boolean padding_slot_p = FALSE;
3197 unsigned imm1 = 0;
3198 unsigned imm2 = 0;
3199
3200 if (nbytes == 8)
3201 {
3202 imm1 = inst.operands[1].imm;
3203 imm2 = (inst.operands[1].regisimm ? inst.operands[1].reg
3204 : inst.reloc.exp.X_unsigned ? 0
3205 : ((bfd_int64_t) inst.operands[1].imm) >> 32);
3206 if (target_big_endian)
3207 {
3208 imm1 = imm2;
3209 imm2 = inst.operands[1].imm;
3210 }
3211 }
3212
3213 pool = find_or_make_literal_pool ();
3214
3215 /* Check if this literal value is already in the pool. */
3216 for (entry = 0; entry < pool->next_free_entry; entry ++)
3217 {
3218 if (nbytes == 4)
3219 {
3220 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
3221 && (inst.reloc.exp.X_op == O_constant)
3222 && (pool->literals[entry].X_add_number
3223 == inst.reloc.exp.X_add_number)
3224 && (pool->literals[entry].X_md == nbytes)
3225 && (pool->literals[entry].X_unsigned
3226 == inst.reloc.exp.X_unsigned))
3227 break;
3228
3229 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
3230 && (inst.reloc.exp.X_op == O_symbol)
3231 && (pool->literals[entry].X_add_number
3232 == inst.reloc.exp.X_add_number)
3233 && (pool->literals[entry].X_add_symbol
3234 == inst.reloc.exp.X_add_symbol)
3235 && (pool->literals[entry].X_op_symbol
3236 == inst.reloc.exp.X_op_symbol)
3237 && (pool->literals[entry].X_md == nbytes))
3238 break;
3239 }
3240 else if ((nbytes == 8)
3241 && !(pool_size & 0x7)
3242 && ((entry + 1) != pool->next_free_entry)
3243 && (pool->literals[entry].X_op == O_constant)
3244 && (pool->literals[entry].X_add_number == (offsetT) imm1)
3245 && (pool->literals[entry].X_unsigned
3246 == inst.reloc.exp.X_unsigned)
3247 && (pool->literals[entry + 1].X_op == O_constant)
3248 && (pool->literals[entry + 1].X_add_number == (offsetT) imm2)
3249 && (pool->literals[entry + 1].X_unsigned
3250 == inst.reloc.exp.X_unsigned))
3251 break;
3252
3253 padding_slot_p = ((pool->literals[entry].X_md >> 8) == PADDING_SLOT);
3254 if (padding_slot_p && (nbytes == 4))
3255 break;
3256
3257 pool_size += 4;
3258 }
3259
3260 /* Do we need to create a new entry? */
3261 if (entry == pool->next_free_entry)
3262 {
3263 if (entry >= MAX_LITERAL_POOL_SIZE)
3264 {
3265 inst.error = _("literal pool overflow");
3266 return FAIL;
3267 }
3268
3269 if (nbytes == 8)
3270 {
3271 /* For 8-byte entries, we align to an 8-byte boundary,
3272 and split it into two 4-byte entries, because on 32-bit
3273 host, 8-byte constants are treated as big num, thus
3274 saved in "generic_bignum" which will be overwritten
3275 by later assignments.
3276
3277 We also need to make sure there is enough space for
3278 the split.
3279
3280 We also check to make sure the literal operand is a
3281 constant number. */
3282 if (!(inst.reloc.exp.X_op == O_constant
3283 || inst.reloc.exp.X_op == O_big))
3284 {
3285 inst.error = _("invalid type for literal pool");
3286 return FAIL;
3287 }
3288 else if (pool_size & 0x7)
3289 {
3290 if ((entry + 2) >= MAX_LITERAL_POOL_SIZE)
3291 {
3292 inst.error = _("literal pool overflow");
3293 return FAIL;
3294 }
3295
3296 pool->literals[entry] = inst.reloc.exp;
3297 pool->literals[entry].X_op = O_constant;
3298 pool->literals[entry].X_add_number = 0;
3299 pool->literals[entry++].X_md = (PADDING_SLOT << 8) | 4;
3300 pool->next_free_entry += 1;
3301 pool_size += 4;
3302 }
3303 else if ((entry + 1) >= MAX_LITERAL_POOL_SIZE)
3304 {
3305 inst.error = _("literal pool overflow");
3306 return FAIL;
3307 }
3308
3309 pool->literals[entry] = inst.reloc.exp;
3310 pool->literals[entry].X_op = O_constant;
3311 pool->literals[entry].X_add_number = imm1;
3312 pool->literals[entry].X_unsigned = inst.reloc.exp.X_unsigned;
3313 pool->literals[entry++].X_md = 4;
3314 pool->literals[entry] = inst.reloc.exp;
3315 pool->literals[entry].X_op = O_constant;
3316 pool->literals[entry].X_add_number = imm2;
3317 pool->literals[entry].X_unsigned = inst.reloc.exp.X_unsigned;
3318 pool->literals[entry].X_md = 4;
3319 pool->alignment = 3;
3320 pool->next_free_entry += 1;
3321 }
3322 else
3323 {
3324 pool->literals[entry] = inst.reloc.exp;
3325 pool->literals[entry].X_md = 4;
3326 }
3327
3328 #ifdef OBJ_ELF
3329 /* PR ld/12974: Record the location of the first source line to reference
3330 this entry in the literal pool. If it turns out during linking that the
3331 symbol does not exist we will be able to give an accurate line number for
3332 the (first use of the) missing reference. */
3333 if (debug_type == DEBUG_DWARF2)
3334 dwarf2_where (pool->locs + entry);
3335 #endif
3336 pool->next_free_entry += 1;
3337 }
3338 else if (padding_slot_p)
3339 {
3340 pool->literals[entry] = inst.reloc.exp;
3341 pool->literals[entry].X_md = nbytes;
3342 }
3343
3344 inst.reloc.exp.X_op = O_symbol;
3345 inst.reloc.exp.X_add_number = pool_size;
3346 inst.reloc.exp.X_add_symbol = pool->symbol;
3347
3348 return SUCCESS;
3349 }
3350
3351 bfd_boolean
3352 tc_start_label_without_colon (void)
3353 {
3354 bfd_boolean ret = TRUE;
3355
3356 if (codecomposer_syntax && asmfunc_state == WAITING_ASMFUNC_NAME)
3357 {
3358 const char *label = input_line_pointer;
3359
3360 while (!is_end_of_line[(int) label[-1]])
3361 --label;
3362
3363 if (*label == '.')
3364 {
3365 as_bad (_("Invalid label '%s'"), label);
3366 ret = FALSE;
3367 }
3368
3369 asmfunc_debug (label);
3370
3371 asmfunc_state = WAITING_ENDASMFUNC;
3372 }
3373
3374 return ret;
3375 }
3376
3377 /* Can't use symbol_new here, so have to create a symbol and then at
3378 a later date assign it a value. Thats what these functions do. */
3379
3380 static void
3381 symbol_locate (symbolS * symbolP,
3382 const char * name, /* It is copied, the caller can modify. */
3383 segT segment, /* Segment identifier (SEG_<something>). */
3384 valueT valu, /* Symbol value. */
3385 fragS * frag) /* Associated fragment. */
3386 {
3387 size_t name_length;
3388 char * preserved_copy_of_name;
3389
3390 name_length = strlen (name) + 1; /* +1 for \0. */
3391 obstack_grow (&notes, name, name_length);
3392 preserved_copy_of_name = (char *) obstack_finish (&notes);
3393
3394 #ifdef tc_canonicalize_symbol_name
3395 preserved_copy_of_name =
3396 tc_canonicalize_symbol_name (preserved_copy_of_name);
3397 #endif
3398
3399 S_SET_NAME (symbolP, preserved_copy_of_name);
3400
3401 S_SET_SEGMENT (symbolP, segment);
3402 S_SET_VALUE (symbolP, valu);
3403 symbol_clear_list_pointers (symbolP);
3404
3405 symbol_set_frag (symbolP, frag);
3406
3407 /* Link to end of symbol chain. */
3408 {
3409 extern int symbol_table_frozen;
3410
3411 if (symbol_table_frozen)
3412 abort ();
3413 }
3414
3415 symbol_append (symbolP, symbol_lastP, & symbol_rootP, & symbol_lastP);
3416
3417 obj_symbol_new_hook (symbolP);
3418
3419 #ifdef tc_symbol_new_hook
3420 tc_symbol_new_hook (symbolP);
3421 #endif
3422
3423 #ifdef DEBUG_SYMS
3424 verify_symbol_chain (symbol_rootP, symbol_lastP);
3425 #endif /* DEBUG_SYMS */
3426 }
3427
3428 static void
3429 s_ltorg (int ignored ATTRIBUTE_UNUSED)
3430 {
3431 unsigned int entry;
3432 literal_pool * pool;
3433 char sym_name[20];
3434
3435 pool = find_literal_pool ();
3436 if (pool == NULL
3437 || pool->symbol == NULL
3438 || pool->next_free_entry == 0)
3439 return;
3440
3441 /* Align pool as you have word accesses.
3442 Only make a frag if we have to. */
3443 if (!need_pass_2)
3444 frag_align (pool->alignment, 0, 0);
3445
3446 record_alignment (now_seg, 2);
3447
3448 #ifdef OBJ_ELF
3449 seg_info (now_seg)->tc_segment_info_data.mapstate = MAP_DATA;
3450 make_mapping_symbol (MAP_DATA, (valueT) frag_now_fix (), frag_now);
3451 #endif
3452 sprintf (sym_name, "$$lit_\002%x", pool->id);
3453
3454 symbol_locate (pool->symbol, sym_name, now_seg,
3455 (valueT) frag_now_fix (), frag_now);
3456 symbol_table_insert (pool->symbol);
3457
3458 ARM_SET_THUMB (pool->symbol, thumb_mode);
3459
3460 #if defined OBJ_COFF || defined OBJ_ELF
3461 ARM_SET_INTERWORK (pool->symbol, support_interwork);
3462 #endif
3463
3464 for (entry = 0; entry < pool->next_free_entry; entry ++)
3465 {
3466 #ifdef OBJ_ELF
3467 if (debug_type == DEBUG_DWARF2)
3468 dwarf2_gen_line_info (frag_now_fix (), pool->locs + entry);
3469 #endif
3470 /* First output the expression in the instruction to the pool. */
3471 emit_expr (&(pool->literals[entry]),
3472 pool->literals[entry].X_md & LIT_ENTRY_SIZE_MASK);
3473 }
3474
3475 /* Mark the pool as empty. */
3476 pool->next_free_entry = 0;
3477 pool->symbol = NULL;
3478 }
3479
3480 #ifdef OBJ_ELF
3481 /* Forward declarations for functions below, in the MD interface
3482 section. */
3483 static void fix_new_arm (fragS *, int, short, expressionS *, int, int);
3484 static valueT create_unwind_entry (int);
3485 static void start_unwind_section (const segT, int);
3486 static void add_unwind_opcode (valueT, int);
3487 static void flush_pending_unwind (void);
3488
3489 /* Directives: Data. */
3490
3491 static void
3492 s_arm_elf_cons (int nbytes)
3493 {
3494 expressionS exp;
3495
3496 #ifdef md_flush_pending_output
3497 md_flush_pending_output ();
3498 #endif
3499
3500 if (is_it_end_of_statement ())
3501 {
3502 demand_empty_rest_of_line ();
3503 return;
3504 }
3505
3506 #ifdef md_cons_align
3507 md_cons_align (nbytes);
3508 #endif
3509
3510 mapping_state (MAP_DATA);
3511 do
3512 {
3513 int reloc;
3514 char *base = input_line_pointer;
3515
3516 expression (& exp);
3517
3518 if (exp.X_op != O_symbol)
3519 emit_expr (&exp, (unsigned int) nbytes);
3520 else
3521 {
3522 char *before_reloc = input_line_pointer;
3523 reloc = parse_reloc (&input_line_pointer);
3524 if (reloc == -1)
3525 {
3526 as_bad (_("unrecognized relocation suffix"));
3527 ignore_rest_of_line ();
3528 return;
3529 }
3530 else if (reloc == BFD_RELOC_UNUSED)
3531 emit_expr (&exp, (unsigned int) nbytes);
3532 else
3533 {
3534 reloc_howto_type *howto = (reloc_howto_type *)
3535 bfd_reloc_type_lookup (stdoutput,
3536 (bfd_reloc_code_real_type) reloc);
3537 int size = bfd_get_reloc_size (howto);
3538
3539 if (reloc == BFD_RELOC_ARM_PLT32)
3540 {
3541 as_bad (_("(plt) is only valid on branch targets"));
3542 reloc = BFD_RELOC_UNUSED;
3543 size = 0;
3544 }
3545
3546 if (size > nbytes)
3547 as_bad (_("%s relocations do not fit in %d bytes"),
3548 howto->name, nbytes);
3549 else
3550 {
3551 /* We've parsed an expression stopping at O_symbol.
3552 But there may be more expression left now that we
3553 have parsed the relocation marker. Parse it again.
3554 XXX Surely there is a cleaner way to do this. */
3555 char *p = input_line_pointer;
3556 int offset;
3557 char *save_buf = XNEWVEC (char, input_line_pointer - base);
3558
3559 memcpy (save_buf, base, input_line_pointer - base);
3560 memmove (base + (input_line_pointer - before_reloc),
3561 base, before_reloc - base);
3562
3563 input_line_pointer = base + (input_line_pointer-before_reloc);
3564 expression (&exp);
3565 memcpy (base, save_buf, p - base);
3566
3567 offset = nbytes - size;
3568 p = frag_more (nbytes);
3569 memset (p, 0, nbytes);
3570 fix_new_exp (frag_now, p - frag_now->fr_literal + offset,
3571 size, &exp, 0, (enum bfd_reloc_code_real) reloc);
3572 free (save_buf);
3573 }
3574 }
3575 }
3576 }
3577 while (*input_line_pointer++ == ',');
3578
3579 /* Put terminator back into stream. */
3580 input_line_pointer --;
3581 demand_empty_rest_of_line ();
3582 }
3583
3584 /* Emit an expression containing a 32-bit thumb instruction.
3585 Implementation based on put_thumb32_insn. */
3586
3587 static void
3588 emit_thumb32_expr (expressionS * exp)
3589 {
3590 expressionS exp_high = *exp;
3591
3592 exp_high.X_add_number = (unsigned long)exp_high.X_add_number >> 16;
3593 emit_expr (& exp_high, (unsigned int) THUMB_SIZE);
3594 exp->X_add_number &= 0xffff;
3595 emit_expr (exp, (unsigned int) THUMB_SIZE);
3596 }
3597
3598 /* Guess the instruction size based on the opcode. */
3599
3600 static int
3601 thumb_insn_size (int opcode)
3602 {
3603 if ((unsigned int) opcode < 0xe800u)
3604 return 2;
3605 else if ((unsigned int) opcode >= 0xe8000000u)
3606 return 4;
3607 else
3608 return 0;
3609 }
3610
3611 static bfd_boolean
3612 emit_insn (expressionS *exp, int nbytes)
3613 {
3614 int size = 0;
3615
3616 if (exp->X_op == O_constant)
3617 {
3618 size = nbytes;
3619
3620 if (size == 0)
3621 size = thumb_insn_size (exp->X_add_number);
3622
3623 if (size != 0)
3624 {
3625 if (size == 2 && (unsigned int)exp->X_add_number > 0xffffu)
3626 {
3627 as_bad (_(".inst.n operand too big. "\
3628 "Use .inst.w instead"));
3629 size = 0;
3630 }
3631 else
3632 {
3633 if (now_it.state == AUTOMATIC_IT_BLOCK)
3634 set_it_insn_type_nonvoid (OUTSIDE_IT_INSN, 0);
3635 else
3636 set_it_insn_type_nonvoid (NEUTRAL_IT_INSN, 0);
3637
3638 if (thumb_mode && (size > THUMB_SIZE) && !target_big_endian)
3639 emit_thumb32_expr (exp);
3640 else
3641 emit_expr (exp, (unsigned int) size);
3642
3643 it_fsm_post_encode ();
3644 }
3645 }
3646 else
3647 as_bad (_("cannot determine Thumb instruction size. " \
3648 "Use .inst.n/.inst.w instead"));
3649 }
3650 else
3651 as_bad (_("constant expression required"));
3652
3653 return (size != 0);
3654 }
3655
3656 /* Like s_arm_elf_cons but do not use md_cons_align and
3657 set the mapping state to MAP_ARM/MAP_THUMB. */
3658
3659 static void
3660 s_arm_elf_inst (int nbytes)
3661 {
3662 if (is_it_end_of_statement ())
3663 {
3664 demand_empty_rest_of_line ();
3665 return;
3666 }
3667
3668 /* Calling mapping_state () here will not change ARM/THUMB,
3669 but will ensure not to be in DATA state. */
3670
3671 if (thumb_mode)
3672 mapping_state (MAP_THUMB);
3673 else
3674 {
3675 if (nbytes != 0)
3676 {
3677 as_bad (_("width suffixes are invalid in ARM mode"));
3678 ignore_rest_of_line ();
3679 return;
3680 }
3681
3682 nbytes = 4;
3683
3684 mapping_state (MAP_ARM);
3685 }
3686
3687 do
3688 {
3689 expressionS exp;
3690
3691 expression (& exp);
3692
3693 if (! emit_insn (& exp, nbytes))
3694 {
3695 ignore_rest_of_line ();
3696 return;
3697 }
3698 }
3699 while (*input_line_pointer++ == ',');
3700
3701 /* Put terminator back into stream. */
3702 input_line_pointer --;
3703 demand_empty_rest_of_line ();
3704 }
3705
3706 /* Parse a .rel31 directive. */
3707
3708 static void
3709 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED)
3710 {
3711 expressionS exp;
3712 char *p;
3713 valueT highbit;
3714
3715 highbit = 0;
3716 if (*input_line_pointer == '1')
3717 highbit = 0x80000000;
3718 else if (*input_line_pointer != '0')
3719 as_bad (_("expected 0 or 1"));
3720
3721 input_line_pointer++;
3722 if (*input_line_pointer != ',')
3723 as_bad (_("missing comma"));
3724 input_line_pointer++;
3725
3726 #ifdef md_flush_pending_output
3727 md_flush_pending_output ();
3728 #endif
3729
3730 #ifdef md_cons_align
3731 md_cons_align (4);
3732 #endif
3733
3734 mapping_state (MAP_DATA);
3735
3736 expression (&exp);
3737
3738 p = frag_more (4);
3739 md_number_to_chars (p, highbit, 4);
3740 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 1,
3741 BFD_RELOC_ARM_PREL31);
3742
3743 demand_empty_rest_of_line ();
3744 }
3745
3746 /* Directives: AEABI stack-unwind tables. */
3747
3748 /* Parse an unwind_fnstart directive. Simply records the current location. */
3749
3750 static void
3751 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED)
3752 {
3753 demand_empty_rest_of_line ();
3754 if (unwind.proc_start)
3755 {
3756 as_bad (_("duplicate .fnstart directive"));
3757 return;
3758 }
3759
3760 /* Mark the start of the function. */
3761 unwind.proc_start = expr_build_dot ();
3762
3763 /* Reset the rest of the unwind info. */
3764 unwind.opcode_count = 0;
3765 unwind.table_entry = NULL;
3766 unwind.personality_routine = NULL;
3767 unwind.personality_index = -1;
3768 unwind.frame_size = 0;
3769 unwind.fp_offset = 0;
3770 unwind.fp_reg = REG_SP;
3771 unwind.fp_used = 0;
3772 unwind.sp_restored = 0;
3773 }
3774
3775
3776 /* Parse a handlerdata directive. Creates the exception handling table entry
3777 for the function. */
3778
3779 static void
3780 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED)
3781 {
3782 demand_empty_rest_of_line ();
3783 if (!unwind.proc_start)
3784 as_bad (MISSING_FNSTART);
3785
3786 if (unwind.table_entry)
3787 as_bad (_("duplicate .handlerdata directive"));
3788
3789 create_unwind_entry (1);
3790 }
3791
3792 /* Parse an unwind_fnend directive. Generates the index table entry. */
3793
3794 static void
3795 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED)
3796 {
3797 long where;
3798 char *ptr;
3799 valueT val;
3800 unsigned int marked_pr_dependency;
3801
3802 demand_empty_rest_of_line ();
3803
3804 if (!unwind.proc_start)
3805 {
3806 as_bad (_(".fnend directive without .fnstart"));
3807 return;
3808 }
3809
3810 /* Add eh table entry. */
3811 if (unwind.table_entry == NULL)
3812 val = create_unwind_entry (0);
3813 else
3814 val = 0;
3815
3816 /* Add index table entry. This is two words. */
3817 start_unwind_section (unwind.saved_seg, 1);
3818 frag_align (2, 0, 0);
3819 record_alignment (now_seg, 2);
3820
3821 ptr = frag_more (8);
3822 memset (ptr, 0, 8);
3823 where = frag_now_fix () - 8;
3824
3825 /* Self relative offset of the function start. */
3826 fix_new (frag_now, where, 4, unwind.proc_start, 0, 1,
3827 BFD_RELOC_ARM_PREL31);
3828
3829 /* Indicate dependency on EHABI-defined personality routines to the
3830 linker, if it hasn't been done already. */
3831 marked_pr_dependency
3832 = seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency;
3833 if (unwind.personality_index >= 0 && unwind.personality_index < 3
3834 && !(marked_pr_dependency & (1 << unwind.personality_index)))
3835 {
3836 static const char *const name[] =
3837 {
3838 "__aeabi_unwind_cpp_pr0",
3839 "__aeabi_unwind_cpp_pr1",
3840 "__aeabi_unwind_cpp_pr2"
3841 };
3842 symbolS *pr = symbol_find_or_make (name[unwind.personality_index]);
3843 fix_new (frag_now, where, 0, pr, 0, 1, BFD_RELOC_NONE);
3844 seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency
3845 |= 1 << unwind.personality_index;
3846 }
3847
3848 if (val)
3849 /* Inline exception table entry. */
3850 md_number_to_chars (ptr + 4, val, 4);
3851 else
3852 /* Self relative offset of the table entry. */
3853 fix_new (frag_now, where + 4, 4, unwind.table_entry, 0, 1,
3854 BFD_RELOC_ARM_PREL31);
3855
3856 /* Restore the original section. */
3857 subseg_set (unwind.saved_seg, unwind.saved_subseg);
3858
3859 unwind.proc_start = NULL;
3860 }
3861
3862
3863 /* Parse an unwind_cantunwind directive. */
3864
3865 static void
3866 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED)
3867 {
3868 demand_empty_rest_of_line ();
3869 if (!unwind.proc_start)
3870 as_bad (MISSING_FNSTART);
3871
3872 if (unwind.personality_routine || unwind.personality_index != -1)
3873 as_bad (_("personality routine specified for cantunwind frame"));
3874
3875 unwind.personality_index = -2;
3876 }
3877
3878
3879 /* Parse a personalityindex directive. */
3880
3881 static void
3882 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED)
3883 {
3884 expressionS exp;
3885
3886 if (!unwind.proc_start)
3887 as_bad (MISSING_FNSTART);
3888
3889 if (unwind.personality_routine || unwind.personality_index != -1)
3890 as_bad (_("duplicate .personalityindex directive"));
3891
3892 expression (&exp);
3893
3894 if (exp.X_op != O_constant
3895 || exp.X_add_number < 0 || exp.X_add_number > 15)
3896 {
3897 as_bad (_("bad personality routine number"));
3898 ignore_rest_of_line ();
3899 return;
3900 }
3901
3902 unwind.personality_index = exp.X_add_number;
3903
3904 demand_empty_rest_of_line ();
3905 }
3906
3907
3908 /* Parse a personality directive. */
3909
3910 static void
3911 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED)
3912 {
3913 char *name, *p, c;
3914
3915 if (!unwind.proc_start)
3916 as_bad (MISSING_FNSTART);
3917
3918 if (unwind.personality_routine || unwind.personality_index != -1)
3919 as_bad (_("duplicate .personality directive"));
3920
3921 c = get_symbol_name (& name);
3922 p = input_line_pointer;
3923 if (c == '"')
3924 ++ input_line_pointer;
3925 unwind.personality_routine = symbol_find_or_make (name);
3926 *p = c;
3927 demand_empty_rest_of_line ();
3928 }
3929
3930
3931 /* Parse a directive saving core registers. */
3932
3933 static void
3934 s_arm_unwind_save_core (void)
3935 {
3936 valueT op;
3937 long range;
3938 int n;
3939
3940 range = parse_reg_list (&input_line_pointer);
3941 if (range == FAIL)
3942 {
3943 as_bad (_("expected register list"));
3944 ignore_rest_of_line ();
3945 return;
3946 }
3947
3948 demand_empty_rest_of_line ();
3949
3950 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
3951 into .unwind_save {..., sp...}. We aren't bothered about the value of
3952 ip because it is clobbered by calls. */
3953 if (unwind.sp_restored && unwind.fp_reg == 12
3954 && (range & 0x3000) == 0x1000)
3955 {
3956 unwind.opcode_count--;
3957 unwind.sp_restored = 0;
3958 range = (range | 0x2000) & ~0x1000;
3959 unwind.pending_offset = 0;
3960 }
3961
3962 /* Pop r4-r15. */
3963 if (range & 0xfff0)
3964 {
3965 /* See if we can use the short opcodes. These pop a block of up to 8
3966 registers starting with r4, plus maybe r14. */
3967 for (n = 0; n < 8; n++)
3968 {
3969 /* Break at the first non-saved register. */
3970 if ((range & (1 << (n + 4))) == 0)
3971 break;
3972 }
3973 /* See if there are any other bits set. */
3974 if (n == 0 || (range & (0xfff0 << n) & 0xbff0) != 0)
3975 {
3976 /* Use the long form. */
3977 op = 0x8000 | ((range >> 4) & 0xfff);
3978 add_unwind_opcode (op, 2);
3979 }
3980 else
3981 {
3982 /* Use the short form. */
3983 if (range & 0x4000)
3984 op = 0xa8; /* Pop r14. */
3985 else
3986 op = 0xa0; /* Do not pop r14. */
3987 op |= (n - 1);
3988 add_unwind_opcode (op, 1);
3989 }
3990 }
3991
3992 /* Pop r0-r3. */
3993 if (range & 0xf)
3994 {
3995 op = 0xb100 | (range & 0xf);
3996 add_unwind_opcode (op, 2);
3997 }
3998
3999 /* Record the number of bytes pushed. */
4000 for (n = 0; n < 16; n++)
4001 {
4002 if (range & (1 << n))
4003 unwind.frame_size += 4;
4004 }
4005 }
4006
4007
4008 /* Parse a directive saving FPA registers. */
4009
4010 static void
4011 s_arm_unwind_save_fpa (int reg)
4012 {
4013 expressionS exp;
4014 int num_regs;
4015 valueT op;
4016
4017 /* Get Number of registers to transfer. */
4018 if (skip_past_comma (&input_line_pointer) != FAIL)
4019 expression (&exp);
4020 else
4021 exp.X_op = O_illegal;
4022
4023 if (exp.X_op != O_constant)
4024 {
4025 as_bad (_("expected , <constant>"));
4026 ignore_rest_of_line ();
4027 return;
4028 }
4029
4030 num_regs = exp.X_add_number;
4031
4032 if (num_regs < 1 || num_regs > 4)
4033 {
4034 as_bad (_("number of registers must be in the range [1:4]"));
4035 ignore_rest_of_line ();
4036 return;
4037 }
4038
4039 demand_empty_rest_of_line ();
4040
4041 if (reg == 4)
4042 {
4043 /* Short form. */
4044 op = 0xb4 | (num_regs - 1);
4045 add_unwind_opcode (op, 1);
4046 }
4047 else
4048 {
4049 /* Long form. */
4050 op = 0xc800 | (reg << 4) | (num_regs - 1);
4051 add_unwind_opcode (op, 2);
4052 }
4053 unwind.frame_size += num_regs * 12;
4054 }
4055
4056
4057 /* Parse a directive saving VFP registers for ARMv6 and above. */
4058
4059 static void
4060 s_arm_unwind_save_vfp_armv6 (void)
4061 {
4062 int count;
4063 unsigned int start;
4064 valueT op;
4065 int num_vfpv3_regs = 0;
4066 int num_regs_below_16;
4067
4068 count = parse_vfp_reg_list (&input_line_pointer, &start, REGLIST_VFP_D);
4069 if (count == FAIL)
4070 {
4071 as_bad (_("expected register list"));
4072 ignore_rest_of_line ();
4073 return;
4074 }
4075
4076 demand_empty_rest_of_line ();
4077
4078 /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather
4079 than FSTMX/FLDMX-style ones). */
4080
4081 /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31. */
4082 if (start >= 16)
4083 num_vfpv3_regs = count;
4084 else if (start + count > 16)
4085 num_vfpv3_regs = start + count - 16;
4086
4087 if (num_vfpv3_regs > 0)
4088 {
4089 int start_offset = start > 16 ? start - 16 : 0;
4090 op = 0xc800 | (start_offset << 4) | (num_vfpv3_regs - 1);
4091 add_unwind_opcode (op, 2);
4092 }
4093
4094 /* Generate opcode for registers numbered in the range 0 .. 15. */
4095 num_regs_below_16 = num_vfpv3_regs > 0 ? 16 - (int) start : count;
4096 gas_assert (num_regs_below_16 + num_vfpv3_regs == count);
4097 if (num_regs_below_16 > 0)
4098 {
4099 op = 0xc900 | (start << 4) | (num_regs_below_16 - 1);
4100 add_unwind_opcode (op, 2);
4101 }
4102
4103 unwind.frame_size += count * 8;
4104 }
4105
4106
4107 /* Parse a directive saving VFP registers for pre-ARMv6. */
4108
4109 static void
4110 s_arm_unwind_save_vfp (void)
4111 {
4112 int count;
4113 unsigned int reg;
4114 valueT op;
4115
4116 count = parse_vfp_reg_list (&input_line_pointer, &reg, REGLIST_VFP_D);
4117 if (count == FAIL)
4118 {
4119 as_bad (_("expected register list"));
4120 ignore_rest_of_line ();
4121 return;
4122 }
4123
4124 demand_empty_rest_of_line ();
4125
4126 if (reg == 8)
4127 {
4128 /* Short form. */
4129 op = 0xb8 | (count - 1);
4130 add_unwind_opcode (op, 1);
4131 }
4132 else
4133 {
4134 /* Long form. */
4135 op = 0xb300 | (reg << 4) | (count - 1);
4136 add_unwind_opcode (op, 2);
4137 }
4138 unwind.frame_size += count * 8 + 4;
4139 }
4140
4141
4142 /* Parse a directive saving iWMMXt data registers. */
4143
4144 static void
4145 s_arm_unwind_save_mmxwr (void)
4146 {
4147 int reg;
4148 int hi_reg;
4149 int i;
4150 unsigned mask = 0;
4151 valueT op;
4152
4153 if (*input_line_pointer == '{')
4154 input_line_pointer++;
4155
4156 do
4157 {
4158 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
4159
4160 if (reg == FAIL)
4161 {
4162 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
4163 goto error;
4164 }
4165
4166 if (mask >> reg)
4167 as_tsktsk (_("register list not in ascending order"));
4168 mask |= 1 << reg;
4169
4170 if (*input_line_pointer == '-')
4171 {
4172 input_line_pointer++;
4173 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
4174 if (hi_reg == FAIL)
4175 {
4176 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
4177 goto error;
4178 }
4179 else if (reg >= hi_reg)
4180 {
4181 as_bad (_("bad register range"));
4182 goto error;
4183 }
4184 for (; reg < hi_reg; reg++)
4185 mask |= 1 << reg;
4186 }
4187 }
4188 while (skip_past_comma (&input_line_pointer) != FAIL);
4189
4190 skip_past_char (&input_line_pointer, '}');
4191
4192 demand_empty_rest_of_line ();
4193
4194 /* Generate any deferred opcodes because we're going to be looking at
4195 the list. */
4196 flush_pending_unwind ();
4197
4198 for (i = 0; i < 16; i++)
4199 {
4200 if (mask & (1 << i))
4201 unwind.frame_size += 8;
4202 }
4203
4204 /* Attempt to combine with a previous opcode. We do this because gcc
4205 likes to output separate unwind directives for a single block of
4206 registers. */
4207 if (unwind.opcode_count > 0)
4208 {
4209 i = unwind.opcodes[unwind.opcode_count - 1];
4210 if ((i & 0xf8) == 0xc0)
4211 {
4212 i &= 7;
4213 /* Only merge if the blocks are contiguous. */
4214 if (i < 6)
4215 {
4216 if ((mask & 0xfe00) == (1 << 9))
4217 {
4218 mask |= ((1 << (i + 11)) - 1) & 0xfc00;
4219 unwind.opcode_count--;
4220 }
4221 }
4222 else if (i == 6 && unwind.opcode_count >= 2)
4223 {
4224 i = unwind.opcodes[unwind.opcode_count - 2];
4225 reg = i >> 4;
4226 i &= 0xf;
4227
4228 op = 0xffff << (reg - 1);
4229 if (reg > 0
4230 && ((mask & op) == (1u << (reg - 1))))
4231 {
4232 op = (1 << (reg + i + 1)) - 1;
4233 op &= ~((1 << reg) - 1);
4234 mask |= op;
4235 unwind.opcode_count -= 2;
4236 }
4237 }
4238 }
4239 }
4240
4241 hi_reg = 15;
4242 /* We want to generate opcodes in the order the registers have been
4243 saved, ie. descending order. */
4244 for (reg = 15; reg >= -1; reg--)
4245 {
4246 /* Save registers in blocks. */
4247 if (reg < 0
4248 || !(mask & (1 << reg)))
4249 {
4250 /* We found an unsaved reg. Generate opcodes to save the
4251 preceding block. */
4252 if (reg != hi_reg)
4253 {
4254 if (reg == 9)
4255 {
4256 /* Short form. */
4257 op = 0xc0 | (hi_reg - 10);
4258 add_unwind_opcode (op, 1);
4259 }
4260 else
4261 {
4262 /* Long form. */
4263 op = 0xc600 | ((reg + 1) << 4) | ((hi_reg - reg) - 1);
4264 add_unwind_opcode (op, 2);
4265 }
4266 }
4267 hi_reg = reg - 1;
4268 }
4269 }
4270
4271 return;
4272 error:
4273 ignore_rest_of_line ();
4274 }
4275
4276 static void
4277 s_arm_unwind_save_mmxwcg (void)
4278 {
4279 int reg;
4280 int hi_reg;
4281 unsigned mask = 0;
4282 valueT op;
4283
4284 if (*input_line_pointer == '{')
4285 input_line_pointer++;
4286
4287 skip_whitespace (input_line_pointer);
4288
4289 do
4290 {
4291 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
4292
4293 if (reg == FAIL)
4294 {
4295 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
4296 goto error;
4297 }
4298
4299 reg -= 8;
4300 if (mask >> reg)
4301 as_tsktsk (_("register list not in ascending order"));
4302 mask |= 1 << reg;
4303
4304 if (*input_line_pointer == '-')
4305 {
4306 input_line_pointer++;
4307 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
4308 if (hi_reg == FAIL)
4309 {
4310 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
4311 goto error;
4312 }
4313 else if (reg >= hi_reg)
4314 {
4315 as_bad (_("bad register range"));
4316 goto error;
4317 }
4318 for (; reg < hi_reg; reg++)
4319 mask |= 1 << reg;
4320 }
4321 }
4322 while (skip_past_comma (&input_line_pointer) != FAIL);
4323
4324 skip_past_char (&input_line_pointer, '}');
4325
4326 demand_empty_rest_of_line ();
4327
4328 /* Generate any deferred opcodes because we're going to be looking at
4329 the list. */
4330 flush_pending_unwind ();
4331
4332 for (reg = 0; reg < 16; reg++)
4333 {
4334 if (mask & (1 << reg))
4335 unwind.frame_size += 4;
4336 }
4337 op = 0xc700 | mask;
4338 add_unwind_opcode (op, 2);
4339 return;
4340 error:
4341 ignore_rest_of_line ();
4342 }
4343
4344
4345 /* Parse an unwind_save directive.
4346 If the argument is non-zero, this is a .vsave directive. */
4347
4348 static void
4349 s_arm_unwind_save (int arch_v6)
4350 {
4351 char *peek;
4352 struct reg_entry *reg;
4353 bfd_boolean had_brace = FALSE;
4354
4355 if (!unwind.proc_start)
4356 as_bad (MISSING_FNSTART);
4357
4358 /* Figure out what sort of save we have. */
4359 peek = input_line_pointer;
4360
4361 if (*peek == '{')
4362 {
4363 had_brace = TRUE;
4364 peek++;
4365 }
4366
4367 reg = arm_reg_parse_multi (&peek);
4368
4369 if (!reg)
4370 {
4371 as_bad (_("register expected"));
4372 ignore_rest_of_line ();
4373 return;
4374 }
4375
4376 switch (reg->type)
4377 {
4378 case REG_TYPE_FN:
4379 if (had_brace)
4380 {
4381 as_bad (_("FPA .unwind_save does not take a register list"));
4382 ignore_rest_of_line ();
4383 return;
4384 }
4385 input_line_pointer = peek;
4386 s_arm_unwind_save_fpa (reg->number);
4387 return;
4388
4389 case REG_TYPE_RN:
4390 s_arm_unwind_save_core ();
4391 return;
4392
4393 case REG_TYPE_VFD:
4394 if (arch_v6)
4395 s_arm_unwind_save_vfp_armv6 ();
4396 else
4397 s_arm_unwind_save_vfp ();
4398 return;
4399
4400 case REG_TYPE_MMXWR:
4401 s_arm_unwind_save_mmxwr ();
4402 return;
4403
4404 case REG_TYPE_MMXWCG:
4405 s_arm_unwind_save_mmxwcg ();
4406 return;
4407
4408 default:
4409 as_bad (_(".unwind_save does not support this kind of register"));
4410 ignore_rest_of_line ();
4411 }
4412 }
4413
4414
4415 /* Parse an unwind_movsp directive. */
4416
4417 static void
4418 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED)
4419 {
4420 int reg;
4421 valueT op;
4422 int offset;
4423
4424 if (!unwind.proc_start)
4425 as_bad (MISSING_FNSTART);
4426
4427 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4428 if (reg == FAIL)
4429 {
4430 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_RN]));
4431 ignore_rest_of_line ();
4432 return;
4433 }
4434
4435 /* Optional constant. */
4436 if (skip_past_comma (&input_line_pointer) != FAIL)
4437 {
4438 if (immediate_for_directive (&offset) == FAIL)
4439 return;
4440 }
4441 else
4442 offset = 0;
4443
4444 demand_empty_rest_of_line ();
4445
4446 if (reg == REG_SP || reg == REG_PC)
4447 {
4448 as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
4449 return;
4450 }
4451
4452 if (unwind.fp_reg != REG_SP)
4453 as_bad (_("unexpected .unwind_movsp directive"));
4454
4455 /* Generate opcode to restore the value. */
4456 op = 0x90 | reg;
4457 add_unwind_opcode (op, 1);
4458
4459 /* Record the information for later. */
4460 unwind.fp_reg = reg;
4461 unwind.fp_offset = unwind.frame_size - offset;
4462 unwind.sp_restored = 1;
4463 }
4464
4465 /* Parse an unwind_pad directive. */
4466
4467 static void
4468 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED)
4469 {
4470 int offset;
4471
4472 if (!unwind.proc_start)
4473 as_bad (MISSING_FNSTART);
4474
4475 if (immediate_for_directive (&offset) == FAIL)
4476 return;
4477
4478 if (offset & 3)
4479 {
4480 as_bad (_("stack increment must be multiple of 4"));
4481 ignore_rest_of_line ();
4482 return;
4483 }
4484
4485 /* Don't generate any opcodes, just record the details for later. */
4486 unwind.frame_size += offset;
4487 unwind.pending_offset += offset;
4488
4489 demand_empty_rest_of_line ();
4490 }
4491
4492 /* Parse an unwind_setfp directive. */
4493
4494 static void
4495 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED)
4496 {
4497 int sp_reg;
4498 int fp_reg;
4499 int offset;
4500
4501 if (!unwind.proc_start)
4502 as_bad (MISSING_FNSTART);
4503
4504 fp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4505 if (skip_past_comma (&input_line_pointer) == FAIL)
4506 sp_reg = FAIL;
4507 else
4508 sp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4509
4510 if (fp_reg == FAIL || sp_reg == FAIL)
4511 {
4512 as_bad (_("expected <reg>, <reg>"));
4513 ignore_rest_of_line ();
4514 return;
4515 }
4516
4517 /* Optional constant. */
4518 if (skip_past_comma (&input_line_pointer) != FAIL)
4519 {
4520 if (immediate_for_directive (&offset) == FAIL)
4521 return;
4522 }
4523 else
4524 offset = 0;
4525
4526 demand_empty_rest_of_line ();
4527
4528 if (sp_reg != REG_SP && sp_reg != unwind.fp_reg)
4529 {
4530 as_bad (_("register must be either sp or set by a previous"
4531 "unwind_movsp directive"));
4532 return;
4533 }
4534
4535 /* Don't generate any opcodes, just record the information for later. */
4536 unwind.fp_reg = fp_reg;
4537 unwind.fp_used = 1;
4538 if (sp_reg == REG_SP)
4539 unwind.fp_offset = unwind.frame_size - offset;
4540 else
4541 unwind.fp_offset -= offset;
4542 }
4543
4544 /* Parse an unwind_raw directive. */
4545
4546 static void
4547 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED)
4548 {
4549 expressionS exp;
4550 /* This is an arbitrary limit. */
4551 unsigned char op[16];
4552 int count;
4553
4554 if (!unwind.proc_start)
4555 as_bad (MISSING_FNSTART);
4556
4557 expression (&exp);
4558 if (exp.X_op == O_constant
4559 && skip_past_comma (&input_line_pointer) != FAIL)
4560 {
4561 unwind.frame_size += exp.X_add_number;
4562 expression (&exp);
4563 }
4564 else
4565 exp.X_op = O_illegal;
4566
4567 if (exp.X_op != O_constant)
4568 {
4569 as_bad (_("expected <offset>, <opcode>"));
4570 ignore_rest_of_line ();
4571 return;
4572 }
4573
4574 count = 0;
4575
4576 /* Parse the opcode. */
4577 for (;;)
4578 {
4579 if (count >= 16)
4580 {
4581 as_bad (_("unwind opcode too long"));
4582 ignore_rest_of_line ();
4583 }
4584 if (exp.X_op != O_constant || exp.X_add_number & ~0xff)
4585 {
4586 as_bad (_("invalid unwind opcode"));
4587 ignore_rest_of_line ();
4588 return;
4589 }
4590 op[count++] = exp.X_add_number;
4591
4592 /* Parse the next byte. */
4593 if (skip_past_comma (&input_line_pointer) == FAIL)
4594 break;
4595
4596 expression (&exp);
4597 }
4598
4599 /* Add the opcode bytes in reverse order. */
4600 while (count--)
4601 add_unwind_opcode (op[count], 1);
4602
4603 demand_empty_rest_of_line ();
4604 }
4605
4606
4607 /* Parse a .eabi_attribute directive. */
4608
4609 static void
4610 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED)
4611 {
4612 int tag = obj_elf_vendor_attribute (OBJ_ATTR_PROC);
4613
4614 if (tag < NUM_KNOWN_OBJ_ATTRIBUTES)
4615 attributes_set_explicitly[tag] = 1;
4616 }
4617
4618 /* Emit a tls fix for the symbol. */
4619
4620 static void
4621 s_arm_tls_descseq (int ignored ATTRIBUTE_UNUSED)
4622 {
4623 char *p;
4624 expressionS exp;
4625 #ifdef md_flush_pending_output
4626 md_flush_pending_output ();
4627 #endif
4628
4629 #ifdef md_cons_align
4630 md_cons_align (4);
4631 #endif
4632
4633 /* Since we're just labelling the code, there's no need to define a
4634 mapping symbol. */
4635 expression (&exp);
4636 p = obstack_next_free (&frchain_now->frch_obstack);
4637 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 0,
4638 thumb_mode ? BFD_RELOC_ARM_THM_TLS_DESCSEQ
4639 : BFD_RELOC_ARM_TLS_DESCSEQ);
4640 }
4641 #endif /* OBJ_ELF */
4642
4643 static void s_arm_arch (int);
4644 static void s_arm_object_arch (int);
4645 static void s_arm_cpu (int);
4646 static void s_arm_fpu (int);
4647 static void s_arm_arch_extension (int);
4648
4649 #ifdef TE_PE
4650
4651 static void
4652 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
4653 {
4654 expressionS exp;
4655
4656 do
4657 {
4658 expression (&exp);
4659 if (exp.X_op == O_symbol)
4660 exp.X_op = O_secrel;
4661
4662 emit_expr (&exp, 4);
4663 }
4664 while (*input_line_pointer++ == ',');
4665
4666 input_line_pointer--;
4667 demand_empty_rest_of_line ();
4668 }
4669 #endif /* TE_PE */
4670
4671 /* This table describes all the machine specific pseudo-ops the assembler
4672 has to support. The fields are:
4673 pseudo-op name without dot
4674 function to call to execute this pseudo-op
4675 Integer arg to pass to the function. */
4676
4677 const pseudo_typeS md_pseudo_table[] =
4678 {
4679 /* Never called because '.req' does not start a line. */
4680 { "req", s_req, 0 },
4681 /* Following two are likewise never called. */
4682 { "dn", s_dn, 0 },
4683 { "qn", s_qn, 0 },
4684 { "unreq", s_unreq, 0 },
4685 { "bss", s_bss, 0 },
4686 { "align", s_align_ptwo, 2 },
4687 { "arm", s_arm, 0 },
4688 { "thumb", s_thumb, 0 },
4689 { "code", s_code, 0 },
4690 { "force_thumb", s_force_thumb, 0 },
4691 { "thumb_func", s_thumb_func, 0 },
4692 { "thumb_set", s_thumb_set, 0 },
4693 { "even", s_even, 0 },
4694 { "ltorg", s_ltorg, 0 },
4695 { "pool", s_ltorg, 0 },
4696 { "syntax", s_syntax, 0 },
4697 { "cpu", s_arm_cpu, 0 },
4698 { "arch", s_arm_arch, 0 },
4699 { "object_arch", s_arm_object_arch, 0 },
4700 { "fpu", s_arm_fpu, 0 },
4701 { "arch_extension", s_arm_arch_extension, 0 },
4702 #ifdef OBJ_ELF
4703 { "word", s_arm_elf_cons, 4 },
4704 { "long", s_arm_elf_cons, 4 },
4705 { "inst.n", s_arm_elf_inst, 2 },
4706 { "inst.w", s_arm_elf_inst, 4 },
4707 { "inst", s_arm_elf_inst, 0 },
4708 { "rel31", s_arm_rel31, 0 },
4709 { "fnstart", s_arm_unwind_fnstart, 0 },
4710 { "fnend", s_arm_unwind_fnend, 0 },
4711 { "cantunwind", s_arm_unwind_cantunwind, 0 },
4712 { "personality", s_arm_unwind_personality, 0 },
4713 { "personalityindex", s_arm_unwind_personalityindex, 0 },
4714 { "handlerdata", s_arm_unwind_handlerdata, 0 },
4715 { "save", s_arm_unwind_save, 0 },
4716 { "vsave", s_arm_unwind_save, 1 },
4717 { "movsp", s_arm_unwind_movsp, 0 },
4718 { "pad", s_arm_unwind_pad, 0 },
4719 { "setfp", s_arm_unwind_setfp, 0 },
4720 { "unwind_raw", s_arm_unwind_raw, 0 },
4721 { "eabi_attribute", s_arm_eabi_attribute, 0 },
4722 { "tlsdescseq", s_arm_tls_descseq, 0 },
4723 #else
4724 { "word", cons, 4},
4725
4726 /* These are used for dwarf. */
4727 {"2byte", cons, 2},
4728 {"4byte", cons, 4},
4729 {"8byte", cons, 8},
4730 /* These are used for dwarf2. */
4731 { "file", (void (*) (int)) dwarf2_directive_file, 0 },
4732 { "loc", dwarf2_directive_loc, 0 },
4733 { "loc_mark_labels", dwarf2_directive_loc_mark_labels, 0 },
4734 #endif
4735 { "extend", float_cons, 'x' },
4736 { "ldouble", float_cons, 'x' },
4737 { "packed", float_cons, 'p' },
4738 #ifdef TE_PE
4739 {"secrel32", pe_directive_secrel, 0},
4740 #endif
4741
4742 /* These are for compatibility with CodeComposer Studio. */
4743 {"ref", s_ccs_ref, 0},
4744 {"def", s_ccs_def, 0},
4745 {"asmfunc", s_ccs_asmfunc, 0},
4746 {"endasmfunc", s_ccs_endasmfunc, 0},
4747
4748 { 0, 0, 0 }
4749 };
4750 \f
4751 /* Parser functions used exclusively in instruction operands. */
4752
4753 /* Generic immediate-value read function for use in insn parsing.
4754 STR points to the beginning of the immediate (the leading #);
4755 VAL receives the value; if the value is outside [MIN, MAX]
4756 issue an error. PREFIX_OPT is true if the immediate prefix is
4757 optional. */
4758
4759 static int
4760 parse_immediate (char **str, int *val, int min, int max,
4761 bfd_boolean prefix_opt)
4762 {
4763 expressionS exp;
4764 my_get_expression (&exp, str, prefix_opt ? GE_OPT_PREFIX : GE_IMM_PREFIX);
4765 if (exp.X_op != O_constant)
4766 {
4767 inst.error = _("constant expression required");
4768 return FAIL;
4769 }
4770
4771 if (exp.X_add_number < min || exp.X_add_number > max)
4772 {
4773 inst.error = _("immediate value out of range");
4774 return FAIL;
4775 }
4776
4777 *val = exp.X_add_number;
4778 return SUCCESS;
4779 }
4780
4781 /* Less-generic immediate-value read function with the possibility of loading a
4782 big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate
4783 instructions. Puts the result directly in inst.operands[i]. */
4784
4785 static int
4786 parse_big_immediate (char **str, int i, expressionS *in_exp,
4787 bfd_boolean allow_symbol_p)
4788 {
4789 expressionS exp;
4790 expressionS *exp_p = in_exp ? in_exp : &exp;
4791 char *ptr = *str;
4792
4793 my_get_expression (exp_p, &ptr, GE_OPT_PREFIX_BIG);
4794
4795 if (exp_p->X_op == O_constant)
4796 {
4797 inst.operands[i].imm = exp_p->X_add_number & 0xffffffff;
4798 /* If we're on a 64-bit host, then a 64-bit number can be returned using
4799 O_constant. We have to be careful not to break compilation for
4800 32-bit X_add_number, though. */
4801 if ((exp_p->X_add_number & ~(offsetT)(0xffffffffU)) != 0)
4802 {
4803 /* X >> 32 is illegal if sizeof (exp_p->X_add_number) == 4. */
4804 inst.operands[i].reg = (((exp_p->X_add_number >> 16) >> 16)
4805 & 0xffffffff);
4806 inst.operands[i].regisimm = 1;
4807 }
4808 }
4809 else if (exp_p->X_op == O_big
4810 && LITTLENUM_NUMBER_OF_BITS * exp_p->X_add_number > 32)
4811 {
4812 unsigned parts = 32 / LITTLENUM_NUMBER_OF_BITS, j, idx = 0;
4813
4814 /* Bignums have their least significant bits in
4815 generic_bignum[0]. Make sure we put 32 bits in imm and
4816 32 bits in reg, in a (hopefully) portable way. */
4817 gas_assert (parts != 0);
4818
4819 /* Make sure that the number is not too big.
4820 PR 11972: Bignums can now be sign-extended to the
4821 size of a .octa so check that the out of range bits
4822 are all zero or all one. */
4823 if (LITTLENUM_NUMBER_OF_BITS * exp_p->X_add_number > 64)
4824 {
4825 LITTLENUM_TYPE m = -1;
4826
4827 if (generic_bignum[parts * 2] != 0
4828 && generic_bignum[parts * 2] != m)
4829 return FAIL;
4830
4831 for (j = parts * 2 + 1; j < (unsigned) exp_p->X_add_number; j++)
4832 if (generic_bignum[j] != generic_bignum[j-1])
4833 return FAIL;
4834 }
4835
4836 inst.operands[i].imm = 0;
4837 for (j = 0; j < parts; j++, idx++)
4838 inst.operands[i].imm |= generic_bignum[idx]
4839 << (LITTLENUM_NUMBER_OF_BITS * j);
4840 inst.operands[i].reg = 0;
4841 for (j = 0; j < parts; j++, idx++)
4842 inst.operands[i].reg |= generic_bignum[idx]
4843 << (LITTLENUM_NUMBER_OF_BITS * j);
4844 inst.operands[i].regisimm = 1;
4845 }
4846 else if (!(exp_p->X_op == O_symbol && allow_symbol_p))
4847 return FAIL;
4848
4849 *str = ptr;
4850
4851 return SUCCESS;
4852 }
4853
4854 /* Returns the pseudo-register number of an FPA immediate constant,
4855 or FAIL if there isn't a valid constant here. */
4856
4857 static int
4858 parse_fpa_immediate (char ** str)
4859 {
4860 LITTLENUM_TYPE words[MAX_LITTLENUMS];
4861 char * save_in;
4862 expressionS exp;
4863 int i;
4864 int j;
4865
4866 /* First try and match exact strings, this is to guarantee
4867 that some formats will work even for cross assembly. */
4868
4869 for (i = 0; fp_const[i]; i++)
4870 {
4871 if (strncmp (*str, fp_const[i], strlen (fp_const[i])) == 0)
4872 {
4873 char *start = *str;
4874
4875 *str += strlen (fp_const[i]);
4876 if (is_end_of_line[(unsigned char) **str])
4877 return i + 8;
4878 *str = start;
4879 }
4880 }
4881
4882 /* Just because we didn't get a match doesn't mean that the constant
4883 isn't valid, just that it is in a format that we don't
4884 automatically recognize. Try parsing it with the standard
4885 expression routines. */
4886
4887 memset (words, 0, MAX_LITTLENUMS * sizeof (LITTLENUM_TYPE));
4888
4889 /* Look for a raw floating point number. */
4890 if ((save_in = atof_ieee (*str, 'x', words)) != NULL
4891 && is_end_of_line[(unsigned char) *save_in])
4892 {
4893 for (i = 0; i < NUM_FLOAT_VALS; i++)
4894 {
4895 for (j = 0; j < MAX_LITTLENUMS; j++)
4896 {
4897 if (words[j] != fp_values[i][j])
4898 break;
4899 }
4900
4901 if (j == MAX_LITTLENUMS)
4902 {
4903 *str = save_in;
4904 return i + 8;
4905 }
4906 }
4907 }
4908
4909 /* Try and parse a more complex expression, this will probably fail
4910 unless the code uses a floating point prefix (eg "0f"). */
4911 save_in = input_line_pointer;
4912 input_line_pointer = *str;
4913 if (expression (&exp) == absolute_section
4914 && exp.X_op == O_big
4915 && exp.X_add_number < 0)
4916 {
4917 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
4918 Ditto for 15. */
4919 #define X_PRECISION 5
4920 #define E_PRECISION 15L
4921 if (gen_to_words (words, X_PRECISION, E_PRECISION) == 0)
4922 {
4923 for (i = 0; i < NUM_FLOAT_VALS; i++)
4924 {
4925 for (j = 0; j < MAX_LITTLENUMS; j++)
4926 {
4927 if (words[j] != fp_values[i][j])
4928 break;
4929 }
4930
4931 if (j == MAX_LITTLENUMS)
4932 {
4933 *str = input_line_pointer;
4934 input_line_pointer = save_in;
4935 return i + 8;
4936 }
4937 }
4938 }
4939 }
4940
4941 *str = input_line_pointer;
4942 input_line_pointer = save_in;
4943 inst.error = _("invalid FPA immediate expression");
4944 return FAIL;
4945 }
4946
4947 /* Returns 1 if a number has "quarter-precision" float format
4948 0baBbbbbbc defgh000 00000000 00000000. */
4949
4950 static int
4951 is_quarter_float (unsigned imm)
4952 {
4953 int bs = (imm & 0x20000000) ? 0x3e000000 : 0x40000000;
4954 return (imm & 0x7ffff) == 0 && ((imm & 0x7e000000) ^ bs) == 0;
4955 }
4956
4957
4958 /* Detect the presence of a floating point or integer zero constant,
4959 i.e. #0.0 or #0. */
4960
4961 static bfd_boolean
4962 parse_ifimm_zero (char **in)
4963 {
4964 int error_code;
4965
4966 if (!is_immediate_prefix (**in))
4967 return FALSE;
4968
4969 ++*in;
4970
4971 /* Accept #0x0 as a synonym for #0. */
4972 if (strncmp (*in, "0x", 2) == 0)
4973 {
4974 int val;
4975 if (parse_immediate (in, &val, 0, 0, TRUE) == FAIL)
4976 return FALSE;
4977 return TRUE;
4978 }
4979
4980 error_code = atof_generic (in, ".", EXP_CHARS,
4981 &generic_floating_point_number);
4982
4983 if (!error_code
4984 && generic_floating_point_number.sign == '+'
4985 && (generic_floating_point_number.low
4986 > generic_floating_point_number.leader))
4987 return TRUE;
4988
4989 return FALSE;
4990 }
4991
4992 /* Parse an 8-bit "quarter-precision" floating point number of the form:
4993 0baBbbbbbc defgh000 00000000 00000000.
4994 The zero and minus-zero cases need special handling, since they can't be
4995 encoded in the "quarter-precision" float format, but can nonetheless be
4996 loaded as integer constants. */
4997
4998 static unsigned
4999 parse_qfloat_immediate (char **ccp, int *immed)
5000 {
5001 char *str = *ccp;
5002 char *fpnum;
5003 LITTLENUM_TYPE words[MAX_LITTLENUMS];
5004 int found_fpchar = 0;
5005
5006 skip_past_char (&str, '#');
5007
5008 /* We must not accidentally parse an integer as a floating-point number. Make
5009 sure that the value we parse is not an integer by checking for special
5010 characters '.' or 'e'.
5011 FIXME: This is a horrible hack, but doing better is tricky because type
5012 information isn't in a very usable state at parse time. */
5013 fpnum = str;
5014 skip_whitespace (fpnum);
5015
5016 if (strncmp (fpnum, "0x", 2) == 0)
5017 return FAIL;
5018 else
5019 {
5020 for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
5021 if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
5022 {
5023 found_fpchar = 1;
5024 break;
5025 }
5026
5027 if (!found_fpchar)
5028 return FAIL;
5029 }
5030
5031 if ((str = atof_ieee (str, 's', words)) != NULL)
5032 {
5033 unsigned fpword = 0;
5034 int i;
5035
5036 /* Our FP word must be 32 bits (single-precision FP). */
5037 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
5038 {
5039 fpword <<= LITTLENUM_NUMBER_OF_BITS;
5040 fpword |= words[i];
5041 }
5042
5043 if (is_quarter_float (fpword) || (fpword & 0x7fffffff) == 0)
5044 *immed = fpword;
5045 else
5046 return FAIL;
5047
5048 *ccp = str;
5049
5050 return SUCCESS;
5051 }
5052
5053 return FAIL;
5054 }
5055
5056 /* Shift operands. */
5057 enum shift_kind
5058 {
5059 SHIFT_LSL, SHIFT_LSR, SHIFT_ASR, SHIFT_ROR, SHIFT_RRX
5060 };
5061
5062 struct asm_shift_name
5063 {
5064 const char *name;
5065 enum shift_kind kind;
5066 };
5067
5068 /* Third argument to parse_shift. */
5069 enum parse_shift_mode
5070 {
5071 NO_SHIFT_RESTRICT, /* Any kind of shift is accepted. */
5072 SHIFT_IMMEDIATE, /* Shift operand must be an immediate. */
5073 SHIFT_LSL_OR_ASR_IMMEDIATE, /* Shift must be LSL or ASR immediate. */
5074 SHIFT_ASR_IMMEDIATE, /* Shift must be ASR immediate. */
5075 SHIFT_LSL_IMMEDIATE, /* Shift must be LSL immediate. */
5076 };
5077
5078 /* Parse a <shift> specifier on an ARM data processing instruction.
5079 This has three forms:
5080
5081 (LSL|LSR|ASL|ASR|ROR) Rs
5082 (LSL|LSR|ASL|ASR|ROR) #imm
5083 RRX
5084
5085 Note that ASL is assimilated to LSL in the instruction encoding, and
5086 RRX to ROR #0 (which cannot be written as such). */
5087
5088 static int
5089 parse_shift (char **str, int i, enum parse_shift_mode mode)
5090 {
5091 const struct asm_shift_name *shift_name;
5092 enum shift_kind shift;
5093 char *s = *str;
5094 char *p = s;
5095 int reg;
5096
5097 for (p = *str; ISALPHA (*p); p++)
5098 ;
5099
5100 if (p == *str)
5101 {
5102 inst.error = _("shift expression expected");
5103 return FAIL;
5104 }
5105
5106 shift_name = (const struct asm_shift_name *) hash_find_n (arm_shift_hsh, *str,
5107 p - *str);
5108
5109 if (shift_name == NULL)
5110 {
5111 inst.error = _("shift expression expected");
5112 return FAIL;
5113 }
5114
5115 shift = shift_name->kind;
5116
5117 switch (mode)
5118 {
5119 case NO_SHIFT_RESTRICT:
5120 case SHIFT_IMMEDIATE: break;
5121
5122 case SHIFT_LSL_OR_ASR_IMMEDIATE:
5123 if (shift != SHIFT_LSL && shift != SHIFT_ASR)
5124 {
5125 inst.error = _("'LSL' or 'ASR' required");
5126 return FAIL;
5127 }
5128 break;
5129
5130 case SHIFT_LSL_IMMEDIATE:
5131 if (shift != SHIFT_LSL)
5132 {
5133 inst.error = _("'LSL' required");
5134 return FAIL;
5135 }
5136 break;
5137
5138 case SHIFT_ASR_IMMEDIATE:
5139 if (shift != SHIFT_ASR)
5140 {
5141 inst.error = _("'ASR' required");
5142 return FAIL;
5143 }
5144 break;
5145
5146 default: abort ();
5147 }
5148
5149 if (shift != SHIFT_RRX)
5150 {
5151 /* Whitespace can appear here if the next thing is a bare digit. */
5152 skip_whitespace (p);
5153
5154 if (mode == NO_SHIFT_RESTRICT
5155 && (reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5156 {
5157 inst.operands[i].imm = reg;
5158 inst.operands[i].immisreg = 1;
5159 }
5160 else if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5161 return FAIL;
5162 }
5163 inst.operands[i].shift_kind = shift;
5164 inst.operands[i].shifted = 1;
5165 *str = p;
5166 return SUCCESS;
5167 }
5168
5169 /* Parse a <shifter_operand> for an ARM data processing instruction:
5170
5171 #<immediate>
5172 #<immediate>, <rotate>
5173 <Rm>
5174 <Rm>, <shift>
5175
5176 where <shift> is defined by parse_shift above, and <rotate> is a
5177 multiple of 2 between 0 and 30. Validation of immediate operands
5178 is deferred to md_apply_fix. */
5179
5180 static int
5181 parse_shifter_operand (char **str, int i)
5182 {
5183 int value;
5184 expressionS exp;
5185
5186 if ((value = arm_reg_parse (str, REG_TYPE_RN)) != FAIL)
5187 {
5188 inst.operands[i].reg = value;
5189 inst.operands[i].isreg = 1;
5190
5191 /* parse_shift will override this if appropriate */
5192 inst.reloc.exp.X_op = O_constant;
5193 inst.reloc.exp.X_add_number = 0;
5194
5195 if (skip_past_comma (str) == FAIL)
5196 return SUCCESS;
5197
5198 /* Shift operation on register. */
5199 return parse_shift (str, i, NO_SHIFT_RESTRICT);
5200 }
5201
5202 if (my_get_expression (&inst.reloc.exp, str, GE_IMM_PREFIX))
5203 return FAIL;
5204
5205 if (skip_past_comma (str) == SUCCESS)
5206 {
5207 /* #x, y -- ie explicit rotation by Y. */
5208 if (my_get_expression (&exp, str, GE_NO_PREFIX))
5209 return FAIL;
5210
5211 if (exp.X_op != O_constant || inst.reloc.exp.X_op != O_constant)
5212 {
5213 inst.error = _("constant expression expected");
5214 return FAIL;
5215 }
5216
5217 value = exp.X_add_number;
5218 if (value < 0 || value > 30 || value % 2 != 0)
5219 {
5220 inst.error = _("invalid rotation");
5221 return FAIL;
5222 }
5223 if (inst.reloc.exp.X_add_number < 0 || inst.reloc.exp.X_add_number > 255)
5224 {
5225 inst.error = _("invalid constant");
5226 return FAIL;
5227 }
5228
5229 /* Encode as specified. */
5230 inst.operands[i].imm = inst.reloc.exp.X_add_number | value << 7;
5231 return SUCCESS;
5232 }
5233
5234 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
5235 inst.reloc.pc_rel = 0;
5236 return SUCCESS;
5237 }
5238
5239 /* Group relocation information. Each entry in the table contains the
5240 textual name of the relocation as may appear in assembler source
5241 and must end with a colon.
5242 Along with this textual name are the relocation codes to be used if
5243 the corresponding instruction is an ALU instruction (ADD or SUB only),
5244 an LDR, an LDRS, or an LDC. */
5245
5246 struct group_reloc_table_entry
5247 {
5248 const char *name;
5249 int alu_code;
5250 int ldr_code;
5251 int ldrs_code;
5252 int ldc_code;
5253 };
5254
5255 typedef enum
5256 {
5257 /* Varieties of non-ALU group relocation. */
5258
5259 GROUP_LDR,
5260 GROUP_LDRS,
5261 GROUP_LDC
5262 } group_reloc_type;
5263
5264 static struct group_reloc_table_entry group_reloc_table[] =
5265 { /* Program counter relative: */
5266 { "pc_g0_nc",
5267 BFD_RELOC_ARM_ALU_PC_G0_NC, /* ALU */
5268 0, /* LDR */
5269 0, /* LDRS */
5270 0 }, /* LDC */
5271 { "pc_g0",
5272 BFD_RELOC_ARM_ALU_PC_G0, /* ALU */
5273 BFD_RELOC_ARM_LDR_PC_G0, /* LDR */
5274 BFD_RELOC_ARM_LDRS_PC_G0, /* LDRS */
5275 BFD_RELOC_ARM_LDC_PC_G0 }, /* LDC */
5276 { "pc_g1_nc",
5277 BFD_RELOC_ARM_ALU_PC_G1_NC, /* ALU */
5278 0, /* LDR */
5279 0, /* LDRS */
5280 0 }, /* LDC */
5281 { "pc_g1",
5282 BFD_RELOC_ARM_ALU_PC_G1, /* ALU */
5283 BFD_RELOC_ARM_LDR_PC_G1, /* LDR */
5284 BFD_RELOC_ARM_LDRS_PC_G1, /* LDRS */
5285 BFD_RELOC_ARM_LDC_PC_G1 }, /* LDC */
5286 { "pc_g2",
5287 BFD_RELOC_ARM_ALU_PC_G2, /* ALU */
5288 BFD_RELOC_ARM_LDR_PC_G2, /* LDR */
5289 BFD_RELOC_ARM_LDRS_PC_G2, /* LDRS */
5290 BFD_RELOC_ARM_LDC_PC_G2 }, /* LDC */
5291 /* Section base relative */
5292 { "sb_g0_nc",
5293 BFD_RELOC_ARM_ALU_SB_G0_NC, /* ALU */
5294 0, /* LDR */
5295 0, /* LDRS */
5296 0 }, /* LDC */
5297 { "sb_g0",
5298 BFD_RELOC_ARM_ALU_SB_G0, /* ALU */
5299 BFD_RELOC_ARM_LDR_SB_G0, /* LDR */
5300 BFD_RELOC_ARM_LDRS_SB_G0, /* LDRS */
5301 BFD_RELOC_ARM_LDC_SB_G0 }, /* LDC */
5302 { "sb_g1_nc",
5303 BFD_RELOC_ARM_ALU_SB_G1_NC, /* ALU */
5304 0, /* LDR */
5305 0, /* LDRS */
5306 0 }, /* LDC */
5307 { "sb_g1",
5308 BFD_RELOC_ARM_ALU_SB_G1, /* ALU */
5309 BFD_RELOC_ARM_LDR_SB_G1, /* LDR */
5310 BFD_RELOC_ARM_LDRS_SB_G1, /* LDRS */
5311 BFD_RELOC_ARM_LDC_SB_G1 }, /* LDC */
5312 { "sb_g2",
5313 BFD_RELOC_ARM_ALU_SB_G2, /* ALU */
5314 BFD_RELOC_ARM_LDR_SB_G2, /* LDR */
5315 BFD_RELOC_ARM_LDRS_SB_G2, /* LDRS */
5316 BFD_RELOC_ARM_LDC_SB_G2 }, /* LDC */
5317 /* Absolute thumb alu relocations. */
5318 { "lower0_7",
5319 BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC,/* ALU. */
5320 0, /* LDR. */
5321 0, /* LDRS. */
5322 0 }, /* LDC. */
5323 { "lower8_15",
5324 BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC,/* ALU. */
5325 0, /* LDR. */
5326 0, /* LDRS. */
5327 0 }, /* LDC. */
5328 { "upper0_7",
5329 BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC,/* ALU. */
5330 0, /* LDR. */
5331 0, /* LDRS. */
5332 0 }, /* LDC. */
5333 { "upper8_15",
5334 BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC,/* ALU. */
5335 0, /* LDR. */
5336 0, /* LDRS. */
5337 0 } }; /* LDC. */
5338
5339 /* Given the address of a pointer pointing to the textual name of a group
5340 relocation as may appear in assembler source, attempt to find its details
5341 in group_reloc_table. The pointer will be updated to the character after
5342 the trailing colon. On failure, FAIL will be returned; SUCCESS
5343 otherwise. On success, *entry will be updated to point at the relevant
5344 group_reloc_table entry. */
5345
5346 static int
5347 find_group_reloc_table_entry (char **str, struct group_reloc_table_entry **out)
5348 {
5349 unsigned int i;
5350 for (i = 0; i < ARRAY_SIZE (group_reloc_table); i++)
5351 {
5352 int length = strlen (group_reloc_table[i].name);
5353
5354 if (strncasecmp (group_reloc_table[i].name, *str, length) == 0
5355 && (*str)[length] == ':')
5356 {
5357 *out = &group_reloc_table[i];
5358 *str += (length + 1);
5359 return SUCCESS;
5360 }
5361 }
5362
5363 return FAIL;
5364 }
5365
5366 /* Parse a <shifter_operand> for an ARM data processing instruction
5367 (as for parse_shifter_operand) where group relocations are allowed:
5368
5369 #<immediate>
5370 #<immediate>, <rotate>
5371 #:<group_reloc>:<expression>
5372 <Rm>
5373 <Rm>, <shift>
5374
5375 where <group_reloc> is one of the strings defined in group_reloc_table.
5376 The hashes are optional.
5377
5378 Everything else is as for parse_shifter_operand. */
5379
5380 static parse_operand_result
5381 parse_shifter_operand_group_reloc (char **str, int i)
5382 {
5383 /* Determine if we have the sequence of characters #: or just :
5384 coming next. If we do, then we check for a group relocation.
5385 If we don't, punt the whole lot to parse_shifter_operand. */
5386
5387 if (((*str)[0] == '#' && (*str)[1] == ':')
5388 || (*str)[0] == ':')
5389 {
5390 struct group_reloc_table_entry *entry;
5391
5392 if ((*str)[0] == '#')
5393 (*str) += 2;
5394 else
5395 (*str)++;
5396
5397 /* Try to parse a group relocation. Anything else is an error. */
5398 if (find_group_reloc_table_entry (str, &entry) == FAIL)
5399 {
5400 inst.error = _("unknown group relocation");
5401 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5402 }
5403
5404 /* We now have the group relocation table entry corresponding to
5405 the name in the assembler source. Next, we parse the expression. */
5406 if (my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX))
5407 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5408
5409 /* Record the relocation type (always the ALU variant here). */
5410 inst.reloc.type = (bfd_reloc_code_real_type) entry->alu_code;
5411 gas_assert (inst.reloc.type != 0);
5412
5413 return PARSE_OPERAND_SUCCESS;
5414 }
5415 else
5416 return parse_shifter_operand (str, i) == SUCCESS
5417 ? PARSE_OPERAND_SUCCESS : PARSE_OPERAND_FAIL;
5418
5419 /* Never reached. */
5420 }
5421
5422 /* Parse a Neon alignment expression. Information is written to
5423 inst.operands[i]. We assume the initial ':' has been skipped.
5424
5425 align .imm = align << 8, .immisalign=1, .preind=0 */
5426 static parse_operand_result
5427 parse_neon_alignment (char **str, int i)
5428 {
5429 char *p = *str;
5430 expressionS exp;
5431
5432 my_get_expression (&exp, &p, GE_NO_PREFIX);
5433
5434 if (exp.X_op != O_constant)
5435 {
5436 inst.error = _("alignment must be constant");
5437 return PARSE_OPERAND_FAIL;
5438 }
5439
5440 inst.operands[i].imm = exp.X_add_number << 8;
5441 inst.operands[i].immisalign = 1;
5442 /* Alignments are not pre-indexes. */
5443 inst.operands[i].preind = 0;
5444
5445 *str = p;
5446 return PARSE_OPERAND_SUCCESS;
5447 }
5448
5449 /* Parse all forms of an ARM address expression. Information is written
5450 to inst.operands[i] and/or inst.reloc.
5451
5452 Preindexed addressing (.preind=1):
5453
5454 [Rn, #offset] .reg=Rn .reloc.exp=offset
5455 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5456 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5457 .shift_kind=shift .reloc.exp=shift_imm
5458
5459 These three may have a trailing ! which causes .writeback to be set also.
5460
5461 Postindexed addressing (.postind=1, .writeback=1):
5462
5463 [Rn], #offset .reg=Rn .reloc.exp=offset
5464 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5465 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5466 .shift_kind=shift .reloc.exp=shift_imm
5467
5468 Unindexed addressing (.preind=0, .postind=0):
5469
5470 [Rn], {option} .reg=Rn .imm=option .immisreg=0
5471
5472 Other:
5473
5474 [Rn]{!} shorthand for [Rn,#0]{!}
5475 =immediate .isreg=0 .reloc.exp=immediate
5476 label .reg=PC .reloc.pc_rel=1 .reloc.exp=label
5477
5478 It is the caller's responsibility to check for addressing modes not
5479 supported by the instruction, and to set inst.reloc.type. */
5480
5481 static parse_operand_result
5482 parse_address_main (char **str, int i, int group_relocations,
5483 group_reloc_type group_type)
5484 {
5485 char *p = *str;
5486 int reg;
5487
5488 if (skip_past_char (&p, '[') == FAIL)
5489 {
5490 if (skip_past_char (&p, '=') == FAIL)
5491 {
5492 /* Bare address - translate to PC-relative offset. */
5493 inst.reloc.pc_rel = 1;
5494 inst.operands[i].reg = REG_PC;
5495 inst.operands[i].isreg = 1;
5496 inst.operands[i].preind = 1;
5497
5498 if (my_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX_BIG))
5499 return PARSE_OPERAND_FAIL;
5500 }
5501 else if (parse_big_immediate (&p, i, &inst.reloc.exp,
5502 /*allow_symbol_p=*/TRUE))
5503 return PARSE_OPERAND_FAIL;
5504
5505 *str = p;
5506 return PARSE_OPERAND_SUCCESS;
5507 }
5508
5509 /* PR gas/14887: Allow for whitespace after the opening bracket. */
5510 skip_whitespace (p);
5511
5512 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5513 {
5514 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
5515 return PARSE_OPERAND_FAIL;
5516 }
5517 inst.operands[i].reg = reg;
5518 inst.operands[i].isreg = 1;
5519
5520 if (skip_past_comma (&p) == SUCCESS)
5521 {
5522 inst.operands[i].preind = 1;
5523
5524 if (*p == '+') p++;
5525 else if (*p == '-') p++, inst.operands[i].negative = 1;
5526
5527 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5528 {
5529 inst.operands[i].imm = reg;
5530 inst.operands[i].immisreg = 1;
5531
5532 if (skip_past_comma (&p) == SUCCESS)
5533 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
5534 return PARSE_OPERAND_FAIL;
5535 }
5536 else if (skip_past_char (&p, ':') == SUCCESS)
5537 {
5538 /* FIXME: '@' should be used here, but it's filtered out by generic
5539 code before we get to see it here. This may be subject to
5540 change. */
5541 parse_operand_result result = parse_neon_alignment (&p, i);
5542
5543 if (result != PARSE_OPERAND_SUCCESS)
5544 return result;
5545 }
5546 else
5547 {
5548 if (inst.operands[i].negative)
5549 {
5550 inst.operands[i].negative = 0;
5551 p--;
5552 }
5553
5554 if (group_relocations
5555 && ((*p == '#' && *(p + 1) == ':') || *p == ':'))
5556 {
5557 struct group_reloc_table_entry *entry;
5558
5559 /* Skip over the #: or : sequence. */
5560 if (*p == '#')
5561 p += 2;
5562 else
5563 p++;
5564
5565 /* Try to parse a group relocation. Anything else is an
5566 error. */
5567 if (find_group_reloc_table_entry (&p, &entry) == FAIL)
5568 {
5569 inst.error = _("unknown group relocation");
5570 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5571 }
5572
5573 /* We now have the group relocation table entry corresponding to
5574 the name in the assembler source. Next, we parse the
5575 expression. */
5576 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
5577 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5578
5579 /* Record the relocation type. */
5580 switch (group_type)
5581 {
5582 case GROUP_LDR:
5583 inst.reloc.type = (bfd_reloc_code_real_type) entry->ldr_code;
5584 break;
5585
5586 case GROUP_LDRS:
5587 inst.reloc.type = (bfd_reloc_code_real_type) entry->ldrs_code;
5588 break;
5589
5590 case GROUP_LDC:
5591 inst.reloc.type = (bfd_reloc_code_real_type) entry->ldc_code;
5592 break;
5593
5594 default:
5595 gas_assert (0);
5596 }
5597
5598 if (inst.reloc.type == 0)
5599 {
5600 inst.error = _("this group relocation is not allowed on this instruction");
5601 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5602 }
5603 }
5604 else
5605 {
5606 char *q = p;
5607 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5608 return PARSE_OPERAND_FAIL;
5609 /* If the offset is 0, find out if it's a +0 or -0. */
5610 if (inst.reloc.exp.X_op == O_constant
5611 && inst.reloc.exp.X_add_number == 0)
5612 {
5613 skip_whitespace (q);
5614 if (*q == '#')
5615 {
5616 q++;
5617 skip_whitespace (q);
5618 }
5619 if (*q == '-')
5620 inst.operands[i].negative = 1;
5621 }
5622 }
5623 }
5624 }
5625 else if (skip_past_char (&p, ':') == SUCCESS)
5626 {
5627 /* FIXME: '@' should be used here, but it's filtered out by generic code
5628 before we get to see it here. This may be subject to change. */
5629 parse_operand_result result = parse_neon_alignment (&p, i);
5630
5631 if (result != PARSE_OPERAND_SUCCESS)
5632 return result;
5633 }
5634
5635 if (skip_past_char (&p, ']') == FAIL)
5636 {
5637 inst.error = _("']' expected");
5638 return PARSE_OPERAND_FAIL;
5639 }
5640
5641 if (skip_past_char (&p, '!') == SUCCESS)
5642 inst.operands[i].writeback = 1;
5643
5644 else if (skip_past_comma (&p) == SUCCESS)
5645 {
5646 if (skip_past_char (&p, '{') == SUCCESS)
5647 {
5648 /* [Rn], {expr} - unindexed, with option */
5649 if (parse_immediate (&p, &inst.operands[i].imm,
5650 0, 255, TRUE) == FAIL)
5651 return PARSE_OPERAND_FAIL;
5652
5653 if (skip_past_char (&p, '}') == FAIL)
5654 {
5655 inst.error = _("'}' expected at end of 'option' field");
5656 return PARSE_OPERAND_FAIL;
5657 }
5658 if (inst.operands[i].preind)
5659 {
5660 inst.error = _("cannot combine index with option");
5661 return PARSE_OPERAND_FAIL;
5662 }
5663 *str = p;
5664 return PARSE_OPERAND_SUCCESS;
5665 }
5666 else
5667 {
5668 inst.operands[i].postind = 1;
5669 inst.operands[i].writeback = 1;
5670
5671 if (inst.operands[i].preind)
5672 {
5673 inst.error = _("cannot combine pre- and post-indexing");
5674 return PARSE_OPERAND_FAIL;
5675 }
5676
5677 if (*p == '+') p++;
5678 else if (*p == '-') p++, inst.operands[i].negative = 1;
5679
5680 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5681 {
5682 /* We might be using the immediate for alignment already. If we
5683 are, OR the register number into the low-order bits. */
5684 if (inst.operands[i].immisalign)
5685 inst.operands[i].imm |= reg;
5686 else
5687 inst.operands[i].imm = reg;
5688 inst.operands[i].immisreg = 1;
5689
5690 if (skip_past_comma (&p) == SUCCESS)
5691 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
5692 return PARSE_OPERAND_FAIL;
5693 }
5694 else
5695 {
5696 char *q = p;
5697 if (inst.operands[i].negative)
5698 {
5699 inst.operands[i].negative = 0;
5700 p--;
5701 }
5702 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5703 return PARSE_OPERAND_FAIL;
5704 /* If the offset is 0, find out if it's a +0 or -0. */
5705 if (inst.reloc.exp.X_op == O_constant
5706 && inst.reloc.exp.X_add_number == 0)
5707 {
5708 skip_whitespace (q);
5709 if (*q == '#')
5710 {
5711 q++;
5712 skip_whitespace (q);
5713 }
5714 if (*q == '-')
5715 inst.operands[i].negative = 1;
5716 }
5717 }
5718 }
5719 }
5720
5721 /* If at this point neither .preind nor .postind is set, we have a
5722 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
5723 if (inst.operands[i].preind == 0 && inst.operands[i].postind == 0)
5724 {
5725 inst.operands[i].preind = 1;
5726 inst.reloc.exp.X_op = O_constant;
5727 inst.reloc.exp.X_add_number = 0;
5728 }
5729 *str = p;
5730 return PARSE_OPERAND_SUCCESS;
5731 }
5732
5733 static int
5734 parse_address (char **str, int i)
5735 {
5736 return parse_address_main (str, i, 0, GROUP_LDR) == PARSE_OPERAND_SUCCESS
5737 ? SUCCESS : FAIL;
5738 }
5739
5740 static parse_operand_result
5741 parse_address_group_reloc (char **str, int i, group_reloc_type type)
5742 {
5743 return parse_address_main (str, i, 1, type);
5744 }
5745
5746 /* Parse an operand for a MOVW or MOVT instruction. */
5747 static int
5748 parse_half (char **str)
5749 {
5750 char * p;
5751
5752 p = *str;
5753 skip_past_char (&p, '#');
5754 if (strncasecmp (p, ":lower16:", 9) == 0)
5755 inst.reloc.type = BFD_RELOC_ARM_MOVW;
5756 else if (strncasecmp (p, ":upper16:", 9) == 0)
5757 inst.reloc.type = BFD_RELOC_ARM_MOVT;
5758
5759 if (inst.reloc.type != BFD_RELOC_UNUSED)
5760 {
5761 p += 9;
5762 skip_whitespace (p);
5763 }
5764
5765 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
5766 return FAIL;
5767
5768 if (inst.reloc.type == BFD_RELOC_UNUSED)
5769 {
5770 if (inst.reloc.exp.X_op != O_constant)
5771 {
5772 inst.error = _("constant expression expected");
5773 return FAIL;
5774 }
5775 if (inst.reloc.exp.X_add_number < 0
5776 || inst.reloc.exp.X_add_number > 0xffff)
5777 {
5778 inst.error = _("immediate value out of range");
5779 return FAIL;
5780 }
5781 }
5782 *str = p;
5783 return SUCCESS;
5784 }
5785
5786 /* Miscellaneous. */
5787
5788 /* Parse a PSR flag operand. The value returned is FAIL on syntax error,
5789 or a bitmask suitable to be or-ed into the ARM msr instruction. */
5790 static int
5791 parse_psr (char **str, bfd_boolean lhs)
5792 {
5793 char *p;
5794 unsigned long psr_field;
5795 const struct asm_psr *psr;
5796 char *start;
5797 bfd_boolean is_apsr = FALSE;
5798 bfd_boolean m_profile = ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m);
5799
5800 /* PR gas/12698: If the user has specified -march=all then m_profile will
5801 be TRUE, but we want to ignore it in this case as we are building for any
5802 CPU type, including non-m variants. */
5803 if (ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any))
5804 m_profile = FALSE;
5805
5806 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
5807 feature for ease of use and backwards compatibility. */
5808 p = *str;
5809 if (strncasecmp (p, "SPSR", 4) == 0)
5810 {
5811 if (m_profile)
5812 goto unsupported_psr;
5813
5814 psr_field = SPSR_BIT;
5815 }
5816 else if (strncasecmp (p, "CPSR", 4) == 0)
5817 {
5818 if (m_profile)
5819 goto unsupported_psr;
5820
5821 psr_field = 0;
5822 }
5823 else if (strncasecmp (p, "APSR", 4) == 0)
5824 {
5825 /* APSR[_<bits>] can be used as a synonym for CPSR[_<flags>] on ARMv7-A
5826 and ARMv7-R architecture CPUs. */
5827 is_apsr = TRUE;
5828 psr_field = 0;
5829 }
5830 else if (m_profile)
5831 {
5832 start = p;
5833 do
5834 p++;
5835 while (ISALNUM (*p) || *p == '_');
5836
5837 if (strncasecmp (start, "iapsr", 5) == 0
5838 || strncasecmp (start, "eapsr", 5) == 0
5839 || strncasecmp (start, "xpsr", 4) == 0
5840 || strncasecmp (start, "psr", 3) == 0)
5841 p = start + strcspn (start, "rR") + 1;
5842
5843 psr = (const struct asm_psr *) hash_find_n (arm_v7m_psr_hsh, start,
5844 p - start);
5845
5846 if (!psr)
5847 return FAIL;
5848
5849 /* If APSR is being written, a bitfield may be specified. Note that
5850 APSR itself is handled above. */
5851 if (psr->field <= 3)
5852 {
5853 psr_field = psr->field;
5854 is_apsr = TRUE;
5855 goto check_suffix;
5856 }
5857
5858 *str = p;
5859 /* M-profile MSR instructions have the mask field set to "10", except
5860 *PSR variants which modify APSR, which may use a different mask (and
5861 have been handled already). Do that by setting the PSR_f field
5862 here. */
5863 return psr->field | (lhs ? PSR_f : 0);
5864 }
5865 else
5866 goto unsupported_psr;
5867
5868 p += 4;
5869 check_suffix:
5870 if (*p == '_')
5871 {
5872 /* A suffix follows. */
5873 p++;
5874 start = p;
5875
5876 do
5877 p++;
5878 while (ISALNUM (*p) || *p == '_');
5879
5880 if (is_apsr)
5881 {
5882 /* APSR uses a notation for bits, rather than fields. */
5883 unsigned int nzcvq_bits = 0;
5884 unsigned int g_bit = 0;
5885 char *bit;
5886
5887 for (bit = start; bit != p; bit++)
5888 {
5889 switch (TOLOWER (*bit))
5890 {
5891 case 'n':
5892 nzcvq_bits |= (nzcvq_bits & 0x01) ? 0x20 : 0x01;
5893 break;
5894
5895 case 'z':
5896 nzcvq_bits |= (nzcvq_bits & 0x02) ? 0x20 : 0x02;
5897 break;
5898
5899 case 'c':
5900 nzcvq_bits |= (nzcvq_bits & 0x04) ? 0x20 : 0x04;
5901 break;
5902
5903 case 'v':
5904 nzcvq_bits |= (nzcvq_bits & 0x08) ? 0x20 : 0x08;
5905 break;
5906
5907 case 'q':
5908 nzcvq_bits |= (nzcvq_bits & 0x10) ? 0x20 : 0x10;
5909 break;
5910
5911 case 'g':
5912 g_bit |= (g_bit & 0x1) ? 0x2 : 0x1;
5913 break;
5914
5915 default:
5916 inst.error = _("unexpected bit specified after APSR");
5917 return FAIL;
5918 }
5919 }
5920
5921 if (nzcvq_bits == 0x1f)
5922 psr_field |= PSR_f;
5923
5924 if (g_bit == 0x1)
5925 {
5926 if (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp))
5927 {
5928 inst.error = _("selected processor does not "
5929 "support DSP extension");
5930 return FAIL;
5931 }
5932
5933 psr_field |= PSR_s;
5934 }
5935
5936 if ((nzcvq_bits & 0x20) != 0
5937 || (nzcvq_bits != 0x1f && nzcvq_bits != 0)
5938 || (g_bit & 0x2) != 0)
5939 {
5940 inst.error = _("bad bitmask specified after APSR");
5941 return FAIL;
5942 }
5943 }
5944 else
5945 {
5946 psr = (const struct asm_psr *) hash_find_n (arm_psr_hsh, start,
5947 p - start);
5948 if (!psr)
5949 goto error;
5950
5951 psr_field |= psr->field;
5952 }
5953 }
5954 else
5955 {
5956 if (ISALNUM (*p))
5957 goto error; /* Garbage after "[CS]PSR". */
5958
5959 /* Unadorned APSR is equivalent to APSR_nzcvq/CPSR_f (for writes). This
5960 is deprecated, but allow it anyway. */
5961 if (is_apsr && lhs)
5962 {
5963 psr_field |= PSR_f;
5964 as_tsktsk (_("writing to APSR without specifying a bitmask is "
5965 "deprecated"));
5966 }
5967 else if (!m_profile)
5968 /* These bits are never right for M-profile devices: don't set them
5969 (only code paths which read/write APSR reach here). */
5970 psr_field |= (PSR_c | PSR_f);
5971 }
5972 *str = p;
5973 return psr_field;
5974
5975 unsupported_psr:
5976 inst.error = _("selected processor does not support requested special "
5977 "purpose register");
5978 return FAIL;
5979
5980 error:
5981 inst.error = _("flag for {c}psr instruction expected");
5982 return FAIL;
5983 }
5984
5985 /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
5986 value suitable for splatting into the AIF field of the instruction. */
5987
5988 static int
5989 parse_cps_flags (char **str)
5990 {
5991 int val = 0;
5992 int saw_a_flag = 0;
5993 char *s = *str;
5994
5995 for (;;)
5996 switch (*s++)
5997 {
5998 case '\0': case ',':
5999 goto done;
6000
6001 case 'a': case 'A': saw_a_flag = 1; val |= 0x4; break;
6002 case 'i': case 'I': saw_a_flag = 1; val |= 0x2; break;
6003 case 'f': case 'F': saw_a_flag = 1; val |= 0x1; break;
6004
6005 default:
6006 inst.error = _("unrecognized CPS flag");
6007 return FAIL;
6008 }
6009
6010 done:
6011 if (saw_a_flag == 0)
6012 {
6013 inst.error = _("missing CPS flags");
6014 return FAIL;
6015 }
6016
6017 *str = s - 1;
6018 return val;
6019 }
6020
6021 /* Parse an endian specifier ("BE" or "LE", case insensitive);
6022 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
6023
6024 static int
6025 parse_endian_specifier (char **str)
6026 {
6027 int little_endian;
6028 char *s = *str;
6029
6030 if (strncasecmp (s, "BE", 2))
6031 little_endian = 0;
6032 else if (strncasecmp (s, "LE", 2))
6033 little_endian = 1;
6034 else
6035 {
6036 inst.error = _("valid endian specifiers are be or le");
6037 return FAIL;
6038 }
6039
6040 if (ISALNUM (s[2]) || s[2] == '_')
6041 {
6042 inst.error = _("valid endian specifiers are be or le");
6043 return FAIL;
6044 }
6045
6046 *str = s + 2;
6047 return little_endian;
6048 }
6049
6050 /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
6051 value suitable for poking into the rotate field of an sxt or sxta
6052 instruction, or FAIL on error. */
6053
6054 static int
6055 parse_ror (char **str)
6056 {
6057 int rot;
6058 char *s = *str;
6059
6060 if (strncasecmp (s, "ROR", 3) == 0)
6061 s += 3;
6062 else
6063 {
6064 inst.error = _("missing rotation field after comma");
6065 return FAIL;
6066 }
6067
6068 if (parse_immediate (&s, &rot, 0, 24, FALSE) == FAIL)
6069 return FAIL;
6070
6071 switch (rot)
6072 {
6073 case 0: *str = s; return 0x0;
6074 case 8: *str = s; return 0x1;
6075 case 16: *str = s; return 0x2;
6076 case 24: *str = s; return 0x3;
6077
6078 default:
6079 inst.error = _("rotation can only be 0, 8, 16, or 24");
6080 return FAIL;
6081 }
6082 }
6083
6084 /* Parse a conditional code (from conds[] below). The value returned is in the
6085 range 0 .. 14, or FAIL. */
6086 static int
6087 parse_cond (char **str)
6088 {
6089 char *q;
6090 const struct asm_cond *c;
6091 int n;
6092 /* Condition codes are always 2 characters, so matching up to
6093 3 characters is sufficient. */
6094 char cond[3];
6095
6096 q = *str;
6097 n = 0;
6098 while (ISALPHA (*q) && n < 3)
6099 {
6100 cond[n] = TOLOWER (*q);
6101 q++;
6102 n++;
6103 }
6104
6105 c = (const struct asm_cond *) hash_find_n (arm_cond_hsh, cond, n);
6106 if (!c)
6107 {
6108 inst.error = _("condition required");
6109 return FAIL;
6110 }
6111
6112 *str = q;
6113 return c->value;
6114 }
6115
6116 /* Record a use of the given feature. */
6117 static void
6118 record_feature_use (const arm_feature_set *feature)
6119 {
6120 if (thumb_mode)
6121 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, *feature);
6122 else
6123 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, *feature);
6124 }
6125
6126 /* If the given feature available in the selected CPU, mark it as used.
6127 Returns TRUE iff feature is available. */
6128 static bfd_boolean
6129 mark_feature_used (const arm_feature_set *feature)
6130 {
6131 /* Ensure the option is valid on the current architecture. */
6132 if (!ARM_CPU_HAS_FEATURE (cpu_variant, *feature))
6133 return FALSE;
6134
6135 /* Add the appropriate architecture feature for the barrier option used.
6136 */
6137 record_feature_use (feature);
6138
6139 return TRUE;
6140 }
6141
6142 /* Parse an option for a barrier instruction. Returns the encoding for the
6143 option, or FAIL. */
6144 static int
6145 parse_barrier (char **str)
6146 {
6147 char *p, *q;
6148 const struct asm_barrier_opt *o;
6149
6150 p = q = *str;
6151 while (ISALPHA (*q))
6152 q++;
6153
6154 o = (const struct asm_barrier_opt *) hash_find_n (arm_barrier_opt_hsh, p,
6155 q - p);
6156 if (!o)
6157 return FAIL;
6158
6159 if (!mark_feature_used (&o->arch))
6160 return FAIL;
6161
6162 *str = q;
6163 return o->value;
6164 }
6165
6166 /* Parse the operands of a table branch instruction. Similar to a memory
6167 operand. */
6168 static int
6169 parse_tb (char **str)
6170 {
6171 char * p = *str;
6172 int reg;
6173
6174 if (skip_past_char (&p, '[') == FAIL)
6175 {
6176 inst.error = _("'[' expected");
6177 return FAIL;
6178 }
6179
6180 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
6181 {
6182 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
6183 return FAIL;
6184 }
6185 inst.operands[0].reg = reg;
6186
6187 if (skip_past_comma (&p) == FAIL)
6188 {
6189 inst.error = _("',' expected");
6190 return FAIL;
6191 }
6192
6193 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
6194 {
6195 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
6196 return FAIL;
6197 }
6198 inst.operands[0].imm = reg;
6199
6200 if (skip_past_comma (&p) == SUCCESS)
6201 {
6202 if (parse_shift (&p, 0, SHIFT_LSL_IMMEDIATE) == FAIL)
6203 return FAIL;
6204 if (inst.reloc.exp.X_add_number != 1)
6205 {
6206 inst.error = _("invalid shift");
6207 return FAIL;
6208 }
6209 inst.operands[0].shifted = 1;
6210 }
6211
6212 if (skip_past_char (&p, ']') == FAIL)
6213 {
6214 inst.error = _("']' expected");
6215 return FAIL;
6216 }
6217 *str = p;
6218 return SUCCESS;
6219 }
6220
6221 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
6222 information on the types the operands can take and how they are encoded.
6223 Up to four operands may be read; this function handles setting the
6224 ".present" field for each read operand itself.
6225 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
6226 else returns FAIL. */
6227
6228 static int
6229 parse_neon_mov (char **str, int *which_operand)
6230 {
6231 int i = *which_operand, val;
6232 enum arm_reg_type rtype;
6233 char *ptr = *str;
6234 struct neon_type_el optype;
6235
6236 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
6237 {
6238 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
6239 inst.operands[i].reg = val;
6240 inst.operands[i].isscalar = 1;
6241 inst.operands[i].vectype = optype;
6242 inst.operands[i++].present = 1;
6243
6244 if (skip_past_comma (&ptr) == FAIL)
6245 goto wanted_comma;
6246
6247 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6248 goto wanted_arm;
6249
6250 inst.operands[i].reg = val;
6251 inst.operands[i].isreg = 1;
6252 inst.operands[i].present = 1;
6253 }
6254 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype, &optype))
6255 != FAIL)
6256 {
6257 /* Cases 0, 1, 2, 3, 5 (D only). */
6258 if (skip_past_comma (&ptr) == FAIL)
6259 goto wanted_comma;
6260
6261 inst.operands[i].reg = val;
6262 inst.operands[i].isreg = 1;
6263 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
6264 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6265 inst.operands[i].isvec = 1;
6266 inst.operands[i].vectype = optype;
6267 inst.operands[i++].present = 1;
6268
6269 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6270 {
6271 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
6272 Case 13: VMOV <Sd>, <Rm> */
6273 inst.operands[i].reg = val;
6274 inst.operands[i].isreg = 1;
6275 inst.operands[i].present = 1;
6276
6277 if (rtype == REG_TYPE_NQ)
6278 {
6279 first_error (_("can't use Neon quad register here"));
6280 return FAIL;
6281 }
6282 else if (rtype != REG_TYPE_VFS)
6283 {
6284 i++;
6285 if (skip_past_comma (&ptr) == FAIL)
6286 goto wanted_comma;
6287 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6288 goto wanted_arm;
6289 inst.operands[i].reg = val;
6290 inst.operands[i].isreg = 1;
6291 inst.operands[i].present = 1;
6292 }
6293 }
6294 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype,
6295 &optype)) != FAIL)
6296 {
6297 /* Case 0: VMOV<c><q> <Qd>, <Qm>
6298 Case 1: VMOV<c><q> <Dd>, <Dm>
6299 Case 8: VMOV.F32 <Sd>, <Sm>
6300 Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm> */
6301
6302 inst.operands[i].reg = val;
6303 inst.operands[i].isreg = 1;
6304 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
6305 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6306 inst.operands[i].isvec = 1;
6307 inst.operands[i].vectype = optype;
6308 inst.operands[i].present = 1;
6309
6310 if (skip_past_comma (&ptr) == SUCCESS)
6311 {
6312 /* Case 15. */
6313 i++;
6314
6315 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6316 goto wanted_arm;
6317
6318 inst.operands[i].reg = val;
6319 inst.operands[i].isreg = 1;
6320 inst.operands[i++].present = 1;
6321
6322 if (skip_past_comma (&ptr) == FAIL)
6323 goto wanted_comma;
6324
6325 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6326 goto wanted_arm;
6327
6328 inst.operands[i].reg = val;
6329 inst.operands[i].isreg = 1;
6330 inst.operands[i].present = 1;
6331 }
6332 }
6333 else if (parse_qfloat_immediate (&ptr, &inst.operands[i].imm) == SUCCESS)
6334 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
6335 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
6336 Case 10: VMOV.F32 <Sd>, #<imm>
6337 Case 11: VMOV.F64 <Dd>, #<imm> */
6338 inst.operands[i].immisfloat = 1;
6339 else if (parse_big_immediate (&ptr, i, NULL, /*allow_symbol_p=*/FALSE)
6340 == SUCCESS)
6341 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
6342 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
6343 ;
6344 else
6345 {
6346 first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
6347 return FAIL;
6348 }
6349 }
6350 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6351 {
6352 /* Cases 6, 7. */
6353 inst.operands[i].reg = val;
6354 inst.operands[i].isreg = 1;
6355 inst.operands[i++].present = 1;
6356
6357 if (skip_past_comma (&ptr) == FAIL)
6358 goto wanted_comma;
6359
6360 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
6361 {
6362 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
6363 inst.operands[i].reg = val;
6364 inst.operands[i].isscalar = 1;
6365 inst.operands[i].present = 1;
6366 inst.operands[i].vectype = optype;
6367 }
6368 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6369 {
6370 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
6371 inst.operands[i].reg = val;
6372 inst.operands[i].isreg = 1;
6373 inst.operands[i++].present = 1;
6374
6375 if (skip_past_comma (&ptr) == FAIL)
6376 goto wanted_comma;
6377
6378 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFSD, &rtype, &optype))
6379 == FAIL)
6380 {
6381 first_error (_(reg_expected_msgs[REG_TYPE_VFSD]));
6382 return FAIL;
6383 }
6384
6385 inst.operands[i].reg = val;
6386 inst.operands[i].isreg = 1;
6387 inst.operands[i].isvec = 1;
6388 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6389 inst.operands[i].vectype = optype;
6390 inst.operands[i].present = 1;
6391
6392 if (rtype == REG_TYPE_VFS)
6393 {
6394 /* Case 14. */
6395 i++;
6396 if (skip_past_comma (&ptr) == FAIL)
6397 goto wanted_comma;
6398 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL,
6399 &optype)) == FAIL)
6400 {
6401 first_error (_(reg_expected_msgs[REG_TYPE_VFS]));
6402 return FAIL;
6403 }
6404 inst.operands[i].reg = val;
6405 inst.operands[i].isreg = 1;
6406 inst.operands[i].isvec = 1;
6407 inst.operands[i].issingle = 1;
6408 inst.operands[i].vectype = optype;
6409 inst.operands[i].present = 1;
6410 }
6411 }
6412 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL, &optype))
6413 != FAIL)
6414 {
6415 /* Case 13. */
6416 inst.operands[i].reg = val;
6417 inst.operands[i].isreg = 1;
6418 inst.operands[i].isvec = 1;
6419 inst.operands[i].issingle = 1;
6420 inst.operands[i].vectype = optype;
6421 inst.operands[i].present = 1;
6422 }
6423 }
6424 else
6425 {
6426 first_error (_("parse error"));
6427 return FAIL;
6428 }
6429
6430 /* Successfully parsed the operands. Update args. */
6431 *which_operand = i;
6432 *str = ptr;
6433 return SUCCESS;
6434
6435 wanted_comma:
6436 first_error (_("expected comma"));
6437 return FAIL;
6438
6439 wanted_arm:
6440 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
6441 return FAIL;
6442 }
6443
6444 /* Use this macro when the operand constraints are different
6445 for ARM and THUMB (e.g. ldrd). */
6446 #define MIX_ARM_THUMB_OPERANDS(arm_operand, thumb_operand) \
6447 ((arm_operand) | ((thumb_operand) << 16))
6448
6449 /* Matcher codes for parse_operands. */
6450 enum operand_parse_code
6451 {
6452 OP_stop, /* end of line */
6453
6454 OP_RR, /* ARM register */
6455 OP_RRnpc, /* ARM register, not r15 */
6456 OP_RRnpcsp, /* ARM register, neither r15 nor r13 (a.k.a. 'BadReg') */
6457 OP_RRnpcb, /* ARM register, not r15, in square brackets */
6458 OP_RRnpctw, /* ARM register, not r15 in Thumb-state or with writeback,
6459 optional trailing ! */
6460 OP_RRw, /* ARM register, not r15, optional trailing ! */
6461 OP_RCP, /* Coprocessor number */
6462 OP_RCN, /* Coprocessor register */
6463 OP_RF, /* FPA register */
6464 OP_RVS, /* VFP single precision register */
6465 OP_RVD, /* VFP double precision register (0..15) */
6466 OP_RND, /* Neon double precision register (0..31) */
6467 OP_RNQ, /* Neon quad precision register */
6468 OP_RVSD, /* VFP single or double precision register */
6469 OP_RNDQ, /* Neon double or quad precision register */
6470 OP_RNSDQ, /* Neon single, double or quad precision register */
6471 OP_RNSC, /* Neon scalar D[X] */
6472 OP_RVC, /* VFP control register */
6473 OP_RMF, /* Maverick F register */
6474 OP_RMD, /* Maverick D register */
6475 OP_RMFX, /* Maverick FX register */
6476 OP_RMDX, /* Maverick DX register */
6477 OP_RMAX, /* Maverick AX register */
6478 OP_RMDS, /* Maverick DSPSC register */
6479 OP_RIWR, /* iWMMXt wR register */
6480 OP_RIWC, /* iWMMXt wC register */
6481 OP_RIWG, /* iWMMXt wCG register */
6482 OP_RXA, /* XScale accumulator register */
6483
6484 OP_REGLST, /* ARM register list */
6485 OP_VRSLST, /* VFP single-precision register list */
6486 OP_VRDLST, /* VFP double-precision register list */
6487 OP_VRSDLST, /* VFP single or double-precision register list (& quad) */
6488 OP_NRDLST, /* Neon double-precision register list (d0-d31, qN aliases) */
6489 OP_NSTRLST, /* Neon element/structure list */
6490
6491 OP_RNDQ_I0, /* Neon D or Q reg, or immediate zero. */
6492 OP_RVSD_I0, /* VFP S or D reg, or immediate zero. */
6493 OP_RSVD_FI0, /* VFP S or D reg, or floating point immediate zero. */
6494 OP_RR_RNSC, /* ARM reg or Neon scalar. */
6495 OP_RNSDQ_RNSC, /* Vector S, D or Q reg, or Neon scalar. */
6496 OP_RNDQ_RNSC, /* Neon D or Q reg, or Neon scalar. */
6497 OP_RND_RNSC, /* Neon D reg, or Neon scalar. */
6498 OP_VMOV, /* Neon VMOV operands. */
6499 OP_RNDQ_Ibig, /* Neon D or Q reg, or big immediate for logic and VMVN. */
6500 OP_RNDQ_I63b, /* Neon D or Q reg, or immediate for shift. */
6501 OP_RIWR_I32z, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2. */
6502
6503 OP_I0, /* immediate zero */
6504 OP_I7, /* immediate value 0 .. 7 */
6505 OP_I15, /* 0 .. 15 */
6506 OP_I16, /* 1 .. 16 */
6507 OP_I16z, /* 0 .. 16 */
6508 OP_I31, /* 0 .. 31 */
6509 OP_I31w, /* 0 .. 31, optional trailing ! */
6510 OP_I32, /* 1 .. 32 */
6511 OP_I32z, /* 0 .. 32 */
6512 OP_I63, /* 0 .. 63 */
6513 OP_I63s, /* -64 .. 63 */
6514 OP_I64, /* 1 .. 64 */
6515 OP_I64z, /* 0 .. 64 */
6516 OP_I255, /* 0 .. 255 */
6517
6518 OP_I4b, /* immediate, prefix optional, 1 .. 4 */
6519 OP_I7b, /* 0 .. 7 */
6520 OP_I15b, /* 0 .. 15 */
6521 OP_I31b, /* 0 .. 31 */
6522
6523 OP_SH, /* shifter operand */
6524 OP_SHG, /* shifter operand with possible group relocation */
6525 OP_ADDR, /* Memory address expression (any mode) */
6526 OP_ADDRGLDR, /* Mem addr expr (any mode) with possible LDR group reloc */
6527 OP_ADDRGLDRS, /* Mem addr expr (any mode) with possible LDRS group reloc */
6528 OP_ADDRGLDC, /* Mem addr expr (any mode) with possible LDC group reloc */
6529 OP_EXP, /* arbitrary expression */
6530 OP_EXPi, /* same, with optional immediate prefix */
6531 OP_EXPr, /* same, with optional relocation suffix */
6532 OP_HALF, /* 0 .. 65535 or low/high reloc. */
6533
6534 OP_CPSF, /* CPS flags */
6535 OP_ENDI, /* Endianness specifier */
6536 OP_wPSR, /* CPSR/SPSR/APSR mask for msr (writing). */
6537 OP_rPSR, /* CPSR/SPSR/APSR mask for msr (reading). */
6538 OP_COND, /* conditional code */
6539 OP_TB, /* Table branch. */
6540
6541 OP_APSR_RR, /* ARM register or "APSR_nzcv". */
6542
6543 OP_RRnpc_I0, /* ARM register or literal 0 */
6544 OP_RR_EXr, /* ARM register or expression with opt. reloc suff. */
6545 OP_RR_EXi, /* ARM register or expression with imm prefix */
6546 OP_RF_IF, /* FPA register or immediate */
6547 OP_RIWR_RIWC, /* iWMMXt R or C reg */
6548 OP_RIWC_RIWG, /* iWMMXt wC or wCG reg */
6549
6550 /* Optional operands. */
6551 OP_oI7b, /* immediate, prefix optional, 0 .. 7 */
6552 OP_oI31b, /* 0 .. 31 */
6553 OP_oI32b, /* 1 .. 32 */
6554 OP_oI32z, /* 0 .. 32 */
6555 OP_oIffffb, /* 0 .. 65535 */
6556 OP_oI255c, /* curly-brace enclosed, 0 .. 255 */
6557
6558 OP_oRR, /* ARM register */
6559 OP_oRRnpc, /* ARM register, not the PC */
6560 OP_oRRnpcsp, /* ARM register, neither the PC nor the SP (a.k.a. BadReg) */
6561 OP_oRRw, /* ARM register, not r15, optional trailing ! */
6562 OP_oRND, /* Optional Neon double precision register */
6563 OP_oRNQ, /* Optional Neon quad precision register */
6564 OP_oRNDQ, /* Optional Neon double or quad precision register */
6565 OP_oRNSDQ, /* Optional single, double or quad precision vector register */
6566 OP_oSHll, /* LSL immediate */
6567 OP_oSHar, /* ASR immediate */
6568 OP_oSHllar, /* LSL or ASR immediate */
6569 OP_oROR, /* ROR 0/8/16/24 */
6570 OP_oBARRIER_I15, /* Option argument for a barrier instruction. */
6571
6572 /* Some pre-defined mixed (ARM/THUMB) operands. */
6573 OP_RR_npcsp = MIX_ARM_THUMB_OPERANDS (OP_RR, OP_RRnpcsp),
6574 OP_RRnpc_npcsp = MIX_ARM_THUMB_OPERANDS (OP_RRnpc, OP_RRnpcsp),
6575 OP_oRRnpc_npcsp = MIX_ARM_THUMB_OPERANDS (OP_oRRnpc, OP_oRRnpcsp),
6576
6577 OP_FIRST_OPTIONAL = OP_oI7b
6578 };
6579
6580 /* Generic instruction operand parser. This does no encoding and no
6581 semantic validation; it merely squirrels values away in the inst
6582 structure. Returns SUCCESS or FAIL depending on whether the
6583 specified grammar matched. */
6584 static int
6585 parse_operands (char *str, const unsigned int *pattern, bfd_boolean thumb)
6586 {
6587 unsigned const int *upat = pattern;
6588 char *backtrack_pos = 0;
6589 const char *backtrack_error = 0;
6590 int i, val = 0, backtrack_index = 0;
6591 enum arm_reg_type rtype;
6592 parse_operand_result result;
6593 unsigned int op_parse_code;
6594
6595 #define po_char_or_fail(chr) \
6596 do \
6597 { \
6598 if (skip_past_char (&str, chr) == FAIL) \
6599 goto bad_args; \
6600 } \
6601 while (0)
6602
6603 #define po_reg_or_fail(regtype) \
6604 do \
6605 { \
6606 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6607 & inst.operands[i].vectype); \
6608 if (val == FAIL) \
6609 { \
6610 first_error (_(reg_expected_msgs[regtype])); \
6611 goto failure; \
6612 } \
6613 inst.operands[i].reg = val; \
6614 inst.operands[i].isreg = 1; \
6615 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6616 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6617 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6618 || rtype == REG_TYPE_VFD \
6619 || rtype == REG_TYPE_NQ); \
6620 } \
6621 while (0)
6622
6623 #define po_reg_or_goto(regtype, label) \
6624 do \
6625 { \
6626 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6627 & inst.operands[i].vectype); \
6628 if (val == FAIL) \
6629 goto label; \
6630 \
6631 inst.operands[i].reg = val; \
6632 inst.operands[i].isreg = 1; \
6633 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6634 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6635 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6636 || rtype == REG_TYPE_VFD \
6637 || rtype == REG_TYPE_NQ); \
6638 } \
6639 while (0)
6640
6641 #define po_imm_or_fail(min, max, popt) \
6642 do \
6643 { \
6644 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
6645 goto failure; \
6646 inst.operands[i].imm = val; \
6647 } \
6648 while (0)
6649
6650 #define po_scalar_or_goto(elsz, label) \
6651 do \
6652 { \
6653 val = parse_scalar (& str, elsz, & inst.operands[i].vectype); \
6654 if (val == FAIL) \
6655 goto label; \
6656 inst.operands[i].reg = val; \
6657 inst.operands[i].isscalar = 1; \
6658 } \
6659 while (0)
6660
6661 #define po_misc_or_fail(expr) \
6662 do \
6663 { \
6664 if (expr) \
6665 goto failure; \
6666 } \
6667 while (0)
6668
6669 #define po_misc_or_fail_no_backtrack(expr) \
6670 do \
6671 { \
6672 result = expr; \
6673 if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK) \
6674 backtrack_pos = 0; \
6675 if (result != PARSE_OPERAND_SUCCESS) \
6676 goto failure; \
6677 } \
6678 while (0)
6679
6680 #define po_barrier_or_imm(str) \
6681 do \
6682 { \
6683 val = parse_barrier (&str); \
6684 if (val == FAIL && ! ISALPHA (*str)) \
6685 goto immediate; \
6686 if (val == FAIL \
6687 /* ISB can only take SY as an option. */ \
6688 || ((inst.instruction & 0xf0) == 0x60 \
6689 && val != 0xf)) \
6690 { \
6691 inst.error = _("invalid barrier type"); \
6692 backtrack_pos = 0; \
6693 goto failure; \
6694 } \
6695 } \
6696 while (0)
6697
6698 skip_whitespace (str);
6699
6700 for (i = 0; upat[i] != OP_stop; i++)
6701 {
6702 op_parse_code = upat[i];
6703 if (op_parse_code >= 1<<16)
6704 op_parse_code = thumb ? (op_parse_code >> 16)
6705 : (op_parse_code & ((1<<16)-1));
6706
6707 if (op_parse_code >= OP_FIRST_OPTIONAL)
6708 {
6709 /* Remember where we are in case we need to backtrack. */
6710 gas_assert (!backtrack_pos);
6711 backtrack_pos = str;
6712 backtrack_error = inst.error;
6713 backtrack_index = i;
6714 }
6715
6716 if (i > 0 && (i > 1 || inst.operands[0].present))
6717 po_char_or_fail (',');
6718
6719 switch (op_parse_code)
6720 {
6721 /* Registers */
6722 case OP_oRRnpc:
6723 case OP_oRRnpcsp:
6724 case OP_RRnpc:
6725 case OP_RRnpcsp:
6726 case OP_oRR:
6727 case OP_RR: po_reg_or_fail (REG_TYPE_RN); break;
6728 case OP_RCP: po_reg_or_fail (REG_TYPE_CP); break;
6729 case OP_RCN: po_reg_or_fail (REG_TYPE_CN); break;
6730 case OP_RF: po_reg_or_fail (REG_TYPE_FN); break;
6731 case OP_RVS: po_reg_or_fail (REG_TYPE_VFS); break;
6732 case OP_RVD: po_reg_or_fail (REG_TYPE_VFD); break;
6733 case OP_oRND:
6734 case OP_RND: po_reg_or_fail (REG_TYPE_VFD); break;
6735 case OP_RVC:
6736 po_reg_or_goto (REG_TYPE_VFC, coproc_reg);
6737 break;
6738 /* Also accept generic coprocessor regs for unknown registers. */
6739 coproc_reg:
6740 po_reg_or_fail (REG_TYPE_CN);
6741 break;
6742 case OP_RMF: po_reg_or_fail (REG_TYPE_MVF); break;
6743 case OP_RMD: po_reg_or_fail (REG_TYPE_MVD); break;
6744 case OP_RMFX: po_reg_or_fail (REG_TYPE_MVFX); break;
6745 case OP_RMDX: po_reg_or_fail (REG_TYPE_MVDX); break;
6746 case OP_RMAX: po_reg_or_fail (REG_TYPE_MVAX); break;
6747 case OP_RMDS: po_reg_or_fail (REG_TYPE_DSPSC); break;
6748 case OP_RIWR: po_reg_or_fail (REG_TYPE_MMXWR); break;
6749 case OP_RIWC: po_reg_or_fail (REG_TYPE_MMXWC); break;
6750 case OP_RIWG: po_reg_or_fail (REG_TYPE_MMXWCG); break;
6751 case OP_RXA: po_reg_or_fail (REG_TYPE_XSCALE); break;
6752 case OP_oRNQ:
6753 case OP_RNQ: po_reg_or_fail (REG_TYPE_NQ); break;
6754 case OP_oRNDQ:
6755 case OP_RNDQ: po_reg_or_fail (REG_TYPE_NDQ); break;
6756 case OP_RVSD: po_reg_or_fail (REG_TYPE_VFSD); break;
6757 case OP_oRNSDQ:
6758 case OP_RNSDQ: po_reg_or_fail (REG_TYPE_NSDQ); break;
6759
6760 /* Neon scalar. Using an element size of 8 means that some invalid
6761 scalars are accepted here, so deal with those in later code. */
6762 case OP_RNSC: po_scalar_or_goto (8, failure); break;
6763
6764 case OP_RNDQ_I0:
6765 {
6766 po_reg_or_goto (REG_TYPE_NDQ, try_imm0);
6767 break;
6768 try_imm0:
6769 po_imm_or_fail (0, 0, TRUE);
6770 }
6771 break;
6772
6773 case OP_RVSD_I0:
6774 po_reg_or_goto (REG_TYPE_VFSD, try_imm0);
6775 break;
6776
6777 case OP_RSVD_FI0:
6778 {
6779 po_reg_or_goto (REG_TYPE_VFSD, try_ifimm0);
6780 break;
6781 try_ifimm0:
6782 if (parse_ifimm_zero (&str))
6783 inst.operands[i].imm = 0;
6784 else
6785 {
6786 inst.error
6787 = _("only floating point zero is allowed as immediate value");
6788 goto failure;
6789 }
6790 }
6791 break;
6792
6793 case OP_RR_RNSC:
6794 {
6795 po_scalar_or_goto (8, try_rr);
6796 break;
6797 try_rr:
6798 po_reg_or_fail (REG_TYPE_RN);
6799 }
6800 break;
6801
6802 case OP_RNSDQ_RNSC:
6803 {
6804 po_scalar_or_goto (8, try_nsdq);
6805 break;
6806 try_nsdq:
6807 po_reg_or_fail (REG_TYPE_NSDQ);
6808 }
6809 break;
6810
6811 case OP_RNDQ_RNSC:
6812 {
6813 po_scalar_or_goto (8, try_ndq);
6814 break;
6815 try_ndq:
6816 po_reg_or_fail (REG_TYPE_NDQ);
6817 }
6818 break;
6819
6820 case OP_RND_RNSC:
6821 {
6822 po_scalar_or_goto (8, try_vfd);
6823 break;
6824 try_vfd:
6825 po_reg_or_fail (REG_TYPE_VFD);
6826 }
6827 break;
6828
6829 case OP_VMOV:
6830 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
6831 not careful then bad things might happen. */
6832 po_misc_or_fail (parse_neon_mov (&str, &i) == FAIL);
6833 break;
6834
6835 case OP_RNDQ_Ibig:
6836 {
6837 po_reg_or_goto (REG_TYPE_NDQ, try_immbig);
6838 break;
6839 try_immbig:
6840 /* There's a possibility of getting a 64-bit immediate here, so
6841 we need special handling. */
6842 if (parse_big_immediate (&str, i, NULL, /*allow_symbol_p=*/FALSE)
6843 == FAIL)
6844 {
6845 inst.error = _("immediate value is out of range");
6846 goto failure;
6847 }
6848 }
6849 break;
6850
6851 case OP_RNDQ_I63b:
6852 {
6853 po_reg_or_goto (REG_TYPE_NDQ, try_shimm);
6854 break;
6855 try_shimm:
6856 po_imm_or_fail (0, 63, TRUE);
6857 }
6858 break;
6859
6860 case OP_RRnpcb:
6861 po_char_or_fail ('[');
6862 po_reg_or_fail (REG_TYPE_RN);
6863 po_char_or_fail (']');
6864 break;
6865
6866 case OP_RRnpctw:
6867 case OP_RRw:
6868 case OP_oRRw:
6869 po_reg_or_fail (REG_TYPE_RN);
6870 if (skip_past_char (&str, '!') == SUCCESS)
6871 inst.operands[i].writeback = 1;
6872 break;
6873
6874 /* Immediates */
6875 case OP_I7: po_imm_or_fail ( 0, 7, FALSE); break;
6876 case OP_I15: po_imm_or_fail ( 0, 15, FALSE); break;
6877 case OP_I16: po_imm_or_fail ( 1, 16, FALSE); break;
6878 case OP_I16z: po_imm_or_fail ( 0, 16, FALSE); break;
6879 case OP_I31: po_imm_or_fail ( 0, 31, FALSE); break;
6880 case OP_I32: po_imm_or_fail ( 1, 32, FALSE); break;
6881 case OP_I32z: po_imm_or_fail ( 0, 32, FALSE); break;
6882 case OP_I63s: po_imm_or_fail (-64, 63, FALSE); break;
6883 case OP_I63: po_imm_or_fail ( 0, 63, FALSE); break;
6884 case OP_I64: po_imm_or_fail ( 1, 64, FALSE); break;
6885 case OP_I64z: po_imm_or_fail ( 0, 64, FALSE); break;
6886 case OP_I255: po_imm_or_fail ( 0, 255, FALSE); break;
6887
6888 case OP_I4b: po_imm_or_fail ( 1, 4, TRUE); break;
6889 case OP_oI7b:
6890 case OP_I7b: po_imm_or_fail ( 0, 7, TRUE); break;
6891 case OP_I15b: po_imm_or_fail ( 0, 15, TRUE); break;
6892 case OP_oI31b:
6893 case OP_I31b: po_imm_or_fail ( 0, 31, TRUE); break;
6894 case OP_oI32b: po_imm_or_fail ( 1, 32, TRUE); break;
6895 case OP_oI32z: po_imm_or_fail ( 0, 32, TRUE); break;
6896 case OP_oIffffb: po_imm_or_fail ( 0, 0xffff, TRUE); break;
6897
6898 /* Immediate variants */
6899 case OP_oI255c:
6900 po_char_or_fail ('{');
6901 po_imm_or_fail (0, 255, TRUE);
6902 po_char_or_fail ('}');
6903 break;
6904
6905 case OP_I31w:
6906 /* The expression parser chokes on a trailing !, so we have
6907 to find it first and zap it. */
6908 {
6909 char *s = str;
6910 while (*s && *s != ',')
6911 s++;
6912 if (s[-1] == '!')
6913 {
6914 s[-1] = '\0';
6915 inst.operands[i].writeback = 1;
6916 }
6917 po_imm_or_fail (0, 31, TRUE);
6918 if (str == s - 1)
6919 str = s;
6920 }
6921 break;
6922
6923 /* Expressions */
6924 case OP_EXPi: EXPi:
6925 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6926 GE_OPT_PREFIX));
6927 break;
6928
6929 case OP_EXP:
6930 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6931 GE_NO_PREFIX));
6932 break;
6933
6934 case OP_EXPr: EXPr:
6935 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6936 GE_NO_PREFIX));
6937 if (inst.reloc.exp.X_op == O_symbol)
6938 {
6939 val = parse_reloc (&str);
6940 if (val == -1)
6941 {
6942 inst.error = _("unrecognized relocation suffix");
6943 goto failure;
6944 }
6945 else if (val != BFD_RELOC_UNUSED)
6946 {
6947 inst.operands[i].imm = val;
6948 inst.operands[i].hasreloc = 1;
6949 }
6950 }
6951 break;
6952
6953 /* Operand for MOVW or MOVT. */
6954 case OP_HALF:
6955 po_misc_or_fail (parse_half (&str));
6956 break;
6957
6958 /* Register or expression. */
6959 case OP_RR_EXr: po_reg_or_goto (REG_TYPE_RN, EXPr); break;
6960 case OP_RR_EXi: po_reg_or_goto (REG_TYPE_RN, EXPi); break;
6961
6962 /* Register or immediate. */
6963 case OP_RRnpc_I0: po_reg_or_goto (REG_TYPE_RN, I0); break;
6964 I0: po_imm_or_fail (0, 0, FALSE); break;
6965
6966 case OP_RF_IF: po_reg_or_goto (REG_TYPE_FN, IF); break;
6967 IF:
6968 if (!is_immediate_prefix (*str))
6969 goto bad_args;
6970 str++;
6971 val = parse_fpa_immediate (&str);
6972 if (val == FAIL)
6973 goto failure;
6974 /* FPA immediates are encoded as registers 8-15.
6975 parse_fpa_immediate has already applied the offset. */
6976 inst.operands[i].reg = val;
6977 inst.operands[i].isreg = 1;
6978 break;
6979
6980 case OP_RIWR_I32z: po_reg_or_goto (REG_TYPE_MMXWR, I32z); break;
6981 I32z: po_imm_or_fail (0, 32, FALSE); break;
6982
6983 /* Two kinds of register. */
6984 case OP_RIWR_RIWC:
6985 {
6986 struct reg_entry *rege = arm_reg_parse_multi (&str);
6987 if (!rege
6988 || (rege->type != REG_TYPE_MMXWR
6989 && rege->type != REG_TYPE_MMXWC
6990 && rege->type != REG_TYPE_MMXWCG))
6991 {
6992 inst.error = _("iWMMXt data or control register expected");
6993 goto failure;
6994 }
6995 inst.operands[i].reg = rege->number;
6996 inst.operands[i].isreg = (rege->type == REG_TYPE_MMXWR);
6997 }
6998 break;
6999
7000 case OP_RIWC_RIWG:
7001 {
7002 struct reg_entry *rege = arm_reg_parse_multi (&str);
7003 if (!rege
7004 || (rege->type != REG_TYPE_MMXWC
7005 && rege->type != REG_TYPE_MMXWCG))
7006 {
7007 inst.error = _("iWMMXt control register expected");
7008 goto failure;
7009 }
7010 inst.operands[i].reg = rege->number;
7011 inst.operands[i].isreg = 1;
7012 }
7013 break;
7014
7015 /* Misc */
7016 case OP_CPSF: val = parse_cps_flags (&str); break;
7017 case OP_ENDI: val = parse_endian_specifier (&str); break;
7018 case OP_oROR: val = parse_ror (&str); break;
7019 case OP_COND: val = parse_cond (&str); break;
7020 case OP_oBARRIER_I15:
7021 po_barrier_or_imm (str); break;
7022 immediate:
7023 if (parse_immediate (&str, &val, 0, 15, TRUE) == FAIL)
7024 goto failure;
7025 break;
7026
7027 case OP_wPSR:
7028 case OP_rPSR:
7029 po_reg_or_goto (REG_TYPE_RNB, try_psr);
7030 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_virt))
7031 {
7032 inst.error = _("Banked registers are not available with this "
7033 "architecture.");
7034 goto failure;
7035 }
7036 break;
7037 try_psr:
7038 val = parse_psr (&str, op_parse_code == OP_wPSR);
7039 break;
7040
7041 case OP_APSR_RR:
7042 po_reg_or_goto (REG_TYPE_RN, try_apsr);
7043 break;
7044 try_apsr:
7045 /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
7046 instruction). */
7047 if (strncasecmp (str, "APSR_", 5) == 0)
7048 {
7049 unsigned found = 0;
7050 str += 5;
7051 while (found < 15)
7052 switch (*str++)
7053 {
7054 case 'c': found = (found & 1) ? 16 : found | 1; break;
7055 case 'n': found = (found & 2) ? 16 : found | 2; break;
7056 case 'z': found = (found & 4) ? 16 : found | 4; break;
7057 case 'v': found = (found & 8) ? 16 : found | 8; break;
7058 default: found = 16;
7059 }
7060 if (found != 15)
7061 goto failure;
7062 inst.operands[i].isvec = 1;
7063 /* APSR_nzcv is encoded in instructions as if it were the REG_PC. */
7064 inst.operands[i].reg = REG_PC;
7065 }
7066 else
7067 goto failure;
7068 break;
7069
7070 case OP_TB:
7071 po_misc_or_fail (parse_tb (&str));
7072 break;
7073
7074 /* Register lists. */
7075 case OP_REGLST:
7076 val = parse_reg_list (&str);
7077 if (*str == '^')
7078 {
7079 inst.operands[i].writeback = 1;
7080 str++;
7081 }
7082 break;
7083
7084 case OP_VRSLST:
7085 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_S);
7086 break;
7087
7088 case OP_VRDLST:
7089 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_D);
7090 break;
7091
7092 case OP_VRSDLST:
7093 /* Allow Q registers too. */
7094 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7095 REGLIST_NEON_D);
7096 if (val == FAIL)
7097 {
7098 inst.error = NULL;
7099 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7100 REGLIST_VFP_S);
7101 inst.operands[i].issingle = 1;
7102 }
7103 break;
7104
7105 case OP_NRDLST:
7106 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7107 REGLIST_NEON_D);
7108 break;
7109
7110 case OP_NSTRLST:
7111 val = parse_neon_el_struct_list (&str, &inst.operands[i].reg,
7112 &inst.operands[i].vectype);
7113 break;
7114
7115 /* Addressing modes */
7116 case OP_ADDR:
7117 po_misc_or_fail (parse_address (&str, i));
7118 break;
7119
7120 case OP_ADDRGLDR:
7121 po_misc_or_fail_no_backtrack (
7122 parse_address_group_reloc (&str, i, GROUP_LDR));
7123 break;
7124
7125 case OP_ADDRGLDRS:
7126 po_misc_or_fail_no_backtrack (
7127 parse_address_group_reloc (&str, i, GROUP_LDRS));
7128 break;
7129
7130 case OP_ADDRGLDC:
7131 po_misc_or_fail_no_backtrack (
7132 parse_address_group_reloc (&str, i, GROUP_LDC));
7133 break;
7134
7135 case OP_SH:
7136 po_misc_or_fail (parse_shifter_operand (&str, i));
7137 break;
7138
7139 case OP_SHG:
7140 po_misc_or_fail_no_backtrack (
7141 parse_shifter_operand_group_reloc (&str, i));
7142 break;
7143
7144 case OP_oSHll:
7145 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_IMMEDIATE));
7146 break;
7147
7148 case OP_oSHar:
7149 po_misc_or_fail (parse_shift (&str, i, SHIFT_ASR_IMMEDIATE));
7150 break;
7151
7152 case OP_oSHllar:
7153 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_OR_ASR_IMMEDIATE));
7154 break;
7155
7156 default:
7157 as_fatal (_("unhandled operand code %d"), op_parse_code);
7158 }
7159
7160 /* Various value-based sanity checks and shared operations. We
7161 do not signal immediate failures for the register constraints;
7162 this allows a syntax error to take precedence. */
7163 switch (op_parse_code)
7164 {
7165 case OP_oRRnpc:
7166 case OP_RRnpc:
7167 case OP_RRnpcb:
7168 case OP_RRw:
7169 case OP_oRRw:
7170 case OP_RRnpc_I0:
7171 if (inst.operands[i].isreg && inst.operands[i].reg == REG_PC)
7172 inst.error = BAD_PC;
7173 break;
7174
7175 case OP_oRRnpcsp:
7176 case OP_RRnpcsp:
7177 if (inst.operands[i].isreg)
7178 {
7179 if (inst.operands[i].reg == REG_PC)
7180 inst.error = BAD_PC;
7181 else if (inst.operands[i].reg == REG_SP)
7182 inst.error = BAD_SP;
7183 }
7184 break;
7185
7186 case OP_RRnpctw:
7187 if (inst.operands[i].isreg
7188 && inst.operands[i].reg == REG_PC
7189 && (inst.operands[i].writeback || thumb))
7190 inst.error = BAD_PC;
7191 break;
7192
7193 case OP_CPSF:
7194 case OP_ENDI:
7195 case OP_oROR:
7196 case OP_wPSR:
7197 case OP_rPSR:
7198 case OP_COND:
7199 case OP_oBARRIER_I15:
7200 case OP_REGLST:
7201 case OP_VRSLST:
7202 case OP_VRDLST:
7203 case OP_VRSDLST:
7204 case OP_NRDLST:
7205 case OP_NSTRLST:
7206 if (val == FAIL)
7207 goto failure;
7208 inst.operands[i].imm = val;
7209 break;
7210
7211 default:
7212 break;
7213 }
7214
7215 /* If we get here, this operand was successfully parsed. */
7216 inst.operands[i].present = 1;
7217 continue;
7218
7219 bad_args:
7220 inst.error = BAD_ARGS;
7221
7222 failure:
7223 if (!backtrack_pos)
7224 {
7225 /* The parse routine should already have set inst.error, but set a
7226 default here just in case. */
7227 if (!inst.error)
7228 inst.error = _("syntax error");
7229 return FAIL;
7230 }
7231
7232 /* Do not backtrack over a trailing optional argument that
7233 absorbed some text. We will only fail again, with the
7234 'garbage following instruction' error message, which is
7235 probably less helpful than the current one. */
7236 if (backtrack_index == i && backtrack_pos != str
7237 && upat[i+1] == OP_stop)
7238 {
7239 if (!inst.error)
7240 inst.error = _("syntax error");
7241 return FAIL;
7242 }
7243
7244 /* Try again, skipping the optional argument at backtrack_pos. */
7245 str = backtrack_pos;
7246 inst.error = backtrack_error;
7247 inst.operands[backtrack_index].present = 0;
7248 i = backtrack_index;
7249 backtrack_pos = 0;
7250 }
7251
7252 /* Check that we have parsed all the arguments. */
7253 if (*str != '\0' && !inst.error)
7254 inst.error = _("garbage following instruction");
7255
7256 return inst.error ? FAIL : SUCCESS;
7257 }
7258
7259 #undef po_char_or_fail
7260 #undef po_reg_or_fail
7261 #undef po_reg_or_goto
7262 #undef po_imm_or_fail
7263 #undef po_scalar_or_fail
7264 #undef po_barrier_or_imm
7265
7266 /* Shorthand macro for instruction encoding functions issuing errors. */
7267 #define constraint(expr, err) \
7268 do \
7269 { \
7270 if (expr) \
7271 { \
7272 inst.error = err; \
7273 return; \
7274 } \
7275 } \
7276 while (0)
7277
7278 /* Reject "bad registers" for Thumb-2 instructions. Many Thumb-2
7279 instructions are unpredictable if these registers are used. This
7280 is the BadReg predicate in ARM's Thumb-2 documentation. */
7281 #define reject_bad_reg(reg) \
7282 do \
7283 if (reg == REG_SP || reg == REG_PC) \
7284 { \
7285 inst.error = (reg == REG_SP) ? BAD_SP : BAD_PC; \
7286 return; \
7287 } \
7288 while (0)
7289
7290 /* If REG is R13 (the stack pointer), warn that its use is
7291 deprecated. */
7292 #define warn_deprecated_sp(reg) \
7293 do \
7294 if (warn_on_deprecated && reg == REG_SP) \
7295 as_tsktsk (_("use of r13 is deprecated")); \
7296 while (0)
7297
7298 /* Functions for operand encoding. ARM, then Thumb. */
7299
7300 #define rotate_left(v, n) (v << (n & 31) | v >> ((32 - n) & 31))
7301
7302 /* If the current inst is scalar ARMv8.2 fp16 instruction, do special encoding.
7303
7304 The only binary encoding difference is the Coprocessor number. Coprocessor
7305 9 is used for half-precision calculations or conversions. The format of the
7306 instruction is the same as the equivalent Coprocessor 10 instuction that
7307 exists for Single-Precision operation. */
7308
7309 static void
7310 do_scalar_fp16_v82_encode (void)
7311 {
7312 if (inst.cond != COND_ALWAYS)
7313 as_warn (_("ARMv8.2 scalar fp16 instruction cannot be conditional,"
7314 " the behaviour is UNPREDICTABLE"));
7315 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16),
7316 _(BAD_FP16));
7317
7318 inst.instruction = (inst.instruction & 0xfffff0ff) | 0x900;
7319 mark_feature_used (&arm_ext_fp16);
7320 }
7321
7322 /* If VAL can be encoded in the immediate field of an ARM instruction,
7323 return the encoded form. Otherwise, return FAIL. */
7324
7325 static unsigned int
7326 encode_arm_immediate (unsigned int val)
7327 {
7328 unsigned int a, i;
7329
7330 if (val <= 0xff)
7331 return val;
7332
7333 for (i = 2; i < 32; i += 2)
7334 if ((a = rotate_left (val, i)) <= 0xff)
7335 return a | (i << 7); /* 12-bit pack: [shift-cnt,const]. */
7336
7337 return FAIL;
7338 }
7339
7340 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
7341 return the encoded form. Otherwise, return FAIL. */
7342 static unsigned int
7343 encode_thumb32_immediate (unsigned int val)
7344 {
7345 unsigned int a, i;
7346
7347 if (val <= 0xff)
7348 return val;
7349
7350 for (i = 1; i <= 24; i++)
7351 {
7352 a = val >> i;
7353 if ((val & ~(0xff << i)) == 0)
7354 return ((val >> i) & 0x7f) | ((32 - i) << 7);
7355 }
7356
7357 a = val & 0xff;
7358 if (val == ((a << 16) | a))
7359 return 0x100 | a;
7360 if (val == ((a << 24) | (a << 16) | (a << 8) | a))
7361 return 0x300 | a;
7362
7363 a = val & 0xff00;
7364 if (val == ((a << 16) | a))
7365 return 0x200 | (a >> 8);
7366
7367 return FAIL;
7368 }
7369 /* Encode a VFP SP or DP register number into inst.instruction. */
7370
7371 static void
7372 encode_arm_vfp_reg (int reg, enum vfp_reg_pos pos)
7373 {
7374 if ((pos == VFP_REG_Dd || pos == VFP_REG_Dn || pos == VFP_REG_Dm)
7375 && reg > 15)
7376 {
7377 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
7378 {
7379 if (thumb_mode)
7380 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
7381 fpu_vfp_ext_d32);
7382 else
7383 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
7384 fpu_vfp_ext_d32);
7385 }
7386 else
7387 {
7388 first_error (_("D register out of range for selected VFP version"));
7389 return;
7390 }
7391 }
7392
7393 switch (pos)
7394 {
7395 case VFP_REG_Sd:
7396 inst.instruction |= ((reg >> 1) << 12) | ((reg & 1) << 22);
7397 break;
7398
7399 case VFP_REG_Sn:
7400 inst.instruction |= ((reg >> 1) << 16) | ((reg & 1) << 7);
7401 break;
7402
7403 case VFP_REG_Sm:
7404 inst.instruction |= ((reg >> 1) << 0) | ((reg & 1) << 5);
7405 break;
7406
7407 case VFP_REG_Dd:
7408 inst.instruction |= ((reg & 15) << 12) | ((reg >> 4) << 22);
7409 break;
7410
7411 case VFP_REG_Dn:
7412 inst.instruction |= ((reg & 15) << 16) | ((reg >> 4) << 7);
7413 break;
7414
7415 case VFP_REG_Dm:
7416 inst.instruction |= (reg & 15) | ((reg >> 4) << 5);
7417 break;
7418
7419 default:
7420 abort ();
7421 }
7422 }
7423
7424 /* Encode a <shift> in an ARM-format instruction. The immediate,
7425 if any, is handled by md_apply_fix. */
7426 static void
7427 encode_arm_shift (int i)
7428 {
7429 if (inst.operands[i].shift_kind == SHIFT_RRX)
7430 inst.instruction |= SHIFT_ROR << 5;
7431 else
7432 {
7433 inst.instruction |= inst.operands[i].shift_kind << 5;
7434 if (inst.operands[i].immisreg)
7435 {
7436 inst.instruction |= SHIFT_BY_REG;
7437 inst.instruction |= inst.operands[i].imm << 8;
7438 }
7439 else
7440 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
7441 }
7442 }
7443
7444 static void
7445 encode_arm_shifter_operand (int i)
7446 {
7447 if (inst.operands[i].isreg)
7448 {
7449 inst.instruction |= inst.operands[i].reg;
7450 encode_arm_shift (i);
7451 }
7452 else
7453 {
7454 inst.instruction |= INST_IMMEDIATE;
7455 if (inst.reloc.type != BFD_RELOC_ARM_IMMEDIATE)
7456 inst.instruction |= inst.operands[i].imm;
7457 }
7458 }
7459
7460 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
7461 static void
7462 encode_arm_addr_mode_common (int i, bfd_boolean is_t)
7463 {
7464 /* PR 14260:
7465 Generate an error if the operand is not a register. */
7466 constraint (!inst.operands[i].isreg,
7467 _("Instruction does not support =N addresses"));
7468
7469 inst.instruction |= inst.operands[i].reg << 16;
7470
7471 if (inst.operands[i].preind)
7472 {
7473 if (is_t)
7474 {
7475 inst.error = _("instruction does not accept preindexed addressing");
7476 return;
7477 }
7478 inst.instruction |= PRE_INDEX;
7479 if (inst.operands[i].writeback)
7480 inst.instruction |= WRITE_BACK;
7481
7482 }
7483 else if (inst.operands[i].postind)
7484 {
7485 gas_assert (inst.operands[i].writeback);
7486 if (is_t)
7487 inst.instruction |= WRITE_BACK;
7488 }
7489 else /* unindexed - only for coprocessor */
7490 {
7491 inst.error = _("instruction does not accept unindexed addressing");
7492 return;
7493 }
7494
7495 if (((inst.instruction & WRITE_BACK) || !(inst.instruction & PRE_INDEX))
7496 && (((inst.instruction & 0x000f0000) >> 16)
7497 == ((inst.instruction & 0x0000f000) >> 12)))
7498 as_warn ((inst.instruction & LOAD_BIT)
7499 ? _("destination register same as write-back base")
7500 : _("source register same as write-back base"));
7501 }
7502
7503 /* inst.operands[i] was set up by parse_address. Encode it into an
7504 ARM-format mode 2 load or store instruction. If is_t is true,
7505 reject forms that cannot be used with a T instruction (i.e. not
7506 post-indexed). */
7507 static void
7508 encode_arm_addr_mode_2 (int i, bfd_boolean is_t)
7509 {
7510 const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
7511
7512 encode_arm_addr_mode_common (i, is_t);
7513
7514 if (inst.operands[i].immisreg)
7515 {
7516 constraint ((inst.operands[i].imm == REG_PC
7517 || (is_pc && inst.operands[i].writeback)),
7518 BAD_PC_ADDRESSING);
7519 inst.instruction |= INST_IMMEDIATE; /* yes, this is backwards */
7520 inst.instruction |= inst.operands[i].imm;
7521 if (!inst.operands[i].negative)
7522 inst.instruction |= INDEX_UP;
7523 if (inst.operands[i].shifted)
7524 {
7525 if (inst.operands[i].shift_kind == SHIFT_RRX)
7526 inst.instruction |= SHIFT_ROR << 5;
7527 else
7528 {
7529 inst.instruction |= inst.operands[i].shift_kind << 5;
7530 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
7531 }
7532 }
7533 }
7534 else /* immediate offset in inst.reloc */
7535 {
7536 if (is_pc && !inst.reloc.pc_rel)
7537 {
7538 const bfd_boolean is_load = ((inst.instruction & LOAD_BIT) != 0);
7539
7540 /* If is_t is TRUE, it's called from do_ldstt. ldrt/strt
7541 cannot use PC in addressing.
7542 PC cannot be used in writeback addressing, either. */
7543 constraint ((is_t || inst.operands[i].writeback),
7544 BAD_PC_ADDRESSING);
7545
7546 /* Use of PC in str is deprecated for ARMv7. */
7547 if (warn_on_deprecated
7548 && !is_load
7549 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7))
7550 as_tsktsk (_("use of PC in this instruction is deprecated"));
7551 }
7552
7553 if (inst.reloc.type == BFD_RELOC_UNUSED)
7554 {
7555 /* Prefer + for zero encoded value. */
7556 if (!inst.operands[i].negative)
7557 inst.instruction |= INDEX_UP;
7558 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM;
7559 }
7560 }
7561 }
7562
7563 /* inst.operands[i] was set up by parse_address. Encode it into an
7564 ARM-format mode 3 load or store instruction. Reject forms that
7565 cannot be used with such instructions. If is_t is true, reject
7566 forms that cannot be used with a T instruction (i.e. not
7567 post-indexed). */
7568 static void
7569 encode_arm_addr_mode_3 (int i, bfd_boolean is_t)
7570 {
7571 if (inst.operands[i].immisreg && inst.operands[i].shifted)
7572 {
7573 inst.error = _("instruction does not accept scaled register index");
7574 return;
7575 }
7576
7577 encode_arm_addr_mode_common (i, is_t);
7578
7579 if (inst.operands[i].immisreg)
7580 {
7581 constraint ((inst.operands[i].imm == REG_PC
7582 || (is_t && inst.operands[i].reg == REG_PC)),
7583 BAD_PC_ADDRESSING);
7584 constraint (inst.operands[i].reg == REG_PC && inst.operands[i].writeback,
7585 BAD_PC_WRITEBACK);
7586 inst.instruction |= inst.operands[i].imm;
7587 if (!inst.operands[i].negative)
7588 inst.instruction |= INDEX_UP;
7589 }
7590 else /* immediate offset in inst.reloc */
7591 {
7592 constraint ((inst.operands[i].reg == REG_PC && !inst.reloc.pc_rel
7593 && inst.operands[i].writeback),
7594 BAD_PC_WRITEBACK);
7595 inst.instruction |= HWOFFSET_IMM;
7596 if (inst.reloc.type == BFD_RELOC_UNUSED)
7597 {
7598 /* Prefer + for zero encoded value. */
7599 if (!inst.operands[i].negative)
7600 inst.instruction |= INDEX_UP;
7601
7602 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM8;
7603 }
7604 }
7605 }
7606
7607 /* Write immediate bits [7:0] to the following locations:
7608
7609 |28/24|23 19|18 16|15 4|3 0|
7610 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
7611
7612 This function is used by VMOV/VMVN/VORR/VBIC. */
7613
7614 static void
7615 neon_write_immbits (unsigned immbits)
7616 {
7617 inst.instruction |= immbits & 0xf;
7618 inst.instruction |= ((immbits >> 4) & 0x7) << 16;
7619 inst.instruction |= ((immbits >> 7) & 0x1) << (thumb_mode ? 28 : 24);
7620 }
7621
7622 /* Invert low-order SIZE bits of XHI:XLO. */
7623
7624 static void
7625 neon_invert_size (unsigned *xlo, unsigned *xhi, int size)
7626 {
7627 unsigned immlo = xlo ? *xlo : 0;
7628 unsigned immhi = xhi ? *xhi : 0;
7629
7630 switch (size)
7631 {
7632 case 8:
7633 immlo = (~immlo) & 0xff;
7634 break;
7635
7636 case 16:
7637 immlo = (~immlo) & 0xffff;
7638 break;
7639
7640 case 64:
7641 immhi = (~immhi) & 0xffffffff;
7642 /* fall through. */
7643
7644 case 32:
7645 immlo = (~immlo) & 0xffffffff;
7646 break;
7647
7648 default:
7649 abort ();
7650 }
7651
7652 if (xlo)
7653 *xlo = immlo;
7654
7655 if (xhi)
7656 *xhi = immhi;
7657 }
7658
7659 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
7660 A, B, C, D. */
7661
7662 static int
7663 neon_bits_same_in_bytes (unsigned imm)
7664 {
7665 return ((imm & 0x000000ff) == 0 || (imm & 0x000000ff) == 0x000000ff)
7666 && ((imm & 0x0000ff00) == 0 || (imm & 0x0000ff00) == 0x0000ff00)
7667 && ((imm & 0x00ff0000) == 0 || (imm & 0x00ff0000) == 0x00ff0000)
7668 && ((imm & 0xff000000) == 0 || (imm & 0xff000000) == 0xff000000);
7669 }
7670
7671 /* For immediate of above form, return 0bABCD. */
7672
7673 static unsigned
7674 neon_squash_bits (unsigned imm)
7675 {
7676 return (imm & 0x01) | ((imm & 0x0100) >> 7) | ((imm & 0x010000) >> 14)
7677 | ((imm & 0x01000000) >> 21);
7678 }
7679
7680 /* Compress quarter-float representation to 0b...000 abcdefgh. */
7681
7682 static unsigned
7683 neon_qfloat_bits (unsigned imm)
7684 {
7685 return ((imm >> 19) & 0x7f) | ((imm >> 24) & 0x80);
7686 }
7687
7688 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
7689 the instruction. *OP is passed as the initial value of the op field, and
7690 may be set to a different value depending on the constant (i.e.
7691 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
7692 MVN). If the immediate looks like a repeated pattern then also
7693 try smaller element sizes. */
7694
7695 static int
7696 neon_cmode_for_move_imm (unsigned immlo, unsigned immhi, int float_p,
7697 unsigned *immbits, int *op, int size,
7698 enum neon_el_type type)
7699 {
7700 /* Only permit float immediates (including 0.0/-0.0) if the operand type is
7701 float. */
7702 if (type == NT_float && !float_p)
7703 return FAIL;
7704
7705 if (type == NT_float && is_quarter_float (immlo) && immhi == 0)
7706 {
7707 if (size != 32 || *op == 1)
7708 return FAIL;
7709 *immbits = neon_qfloat_bits (immlo);
7710 return 0xf;
7711 }
7712
7713 if (size == 64)
7714 {
7715 if (neon_bits_same_in_bytes (immhi)
7716 && neon_bits_same_in_bytes (immlo))
7717 {
7718 if (*op == 1)
7719 return FAIL;
7720 *immbits = (neon_squash_bits (immhi) << 4)
7721 | neon_squash_bits (immlo);
7722 *op = 1;
7723 return 0xe;
7724 }
7725
7726 if (immhi != immlo)
7727 return FAIL;
7728 }
7729
7730 if (size >= 32)
7731 {
7732 if (immlo == (immlo & 0x000000ff))
7733 {
7734 *immbits = immlo;
7735 return 0x0;
7736 }
7737 else if (immlo == (immlo & 0x0000ff00))
7738 {
7739 *immbits = immlo >> 8;
7740 return 0x2;
7741 }
7742 else if (immlo == (immlo & 0x00ff0000))
7743 {
7744 *immbits = immlo >> 16;
7745 return 0x4;
7746 }
7747 else if (immlo == (immlo & 0xff000000))
7748 {
7749 *immbits = immlo >> 24;
7750 return 0x6;
7751 }
7752 else if (immlo == ((immlo & 0x0000ff00) | 0x000000ff))
7753 {
7754 *immbits = (immlo >> 8) & 0xff;
7755 return 0xc;
7756 }
7757 else if (immlo == ((immlo & 0x00ff0000) | 0x0000ffff))
7758 {
7759 *immbits = (immlo >> 16) & 0xff;
7760 return 0xd;
7761 }
7762
7763 if ((immlo & 0xffff) != (immlo >> 16))
7764 return FAIL;
7765 immlo &= 0xffff;
7766 }
7767
7768 if (size >= 16)
7769 {
7770 if (immlo == (immlo & 0x000000ff))
7771 {
7772 *immbits = immlo;
7773 return 0x8;
7774 }
7775 else if (immlo == (immlo & 0x0000ff00))
7776 {
7777 *immbits = immlo >> 8;
7778 return 0xa;
7779 }
7780
7781 if ((immlo & 0xff) != (immlo >> 8))
7782 return FAIL;
7783 immlo &= 0xff;
7784 }
7785
7786 if (immlo == (immlo & 0x000000ff))
7787 {
7788 /* Don't allow MVN with 8-bit immediate. */
7789 if (*op == 1)
7790 return FAIL;
7791 *immbits = immlo;
7792 return 0xe;
7793 }
7794
7795 return FAIL;
7796 }
7797
7798 #if defined BFD_HOST_64_BIT
7799 /* Returns TRUE if double precision value V may be cast
7800 to single precision without loss of accuracy. */
7801
7802 static bfd_boolean
7803 is_double_a_single (bfd_int64_t v)
7804 {
7805 int exp = (int)((v >> 52) & 0x7FF);
7806 bfd_int64_t mantissa = (v & (bfd_int64_t)0xFFFFFFFFFFFFFULL);
7807
7808 return (exp == 0 || exp == 0x7FF
7809 || (exp >= 1023 - 126 && exp <= 1023 + 127))
7810 && (mantissa & 0x1FFFFFFFl) == 0;
7811 }
7812
7813 /* Returns a double precision value casted to single precision
7814 (ignoring the least significant bits in exponent and mantissa). */
7815
7816 static int
7817 double_to_single (bfd_int64_t v)
7818 {
7819 int sign = (int) ((v >> 63) & 1l);
7820 int exp = (int) ((v >> 52) & 0x7FF);
7821 bfd_int64_t mantissa = (v & (bfd_int64_t)0xFFFFFFFFFFFFFULL);
7822
7823 if (exp == 0x7FF)
7824 exp = 0xFF;
7825 else
7826 {
7827 exp = exp - 1023 + 127;
7828 if (exp >= 0xFF)
7829 {
7830 /* Infinity. */
7831 exp = 0x7F;
7832 mantissa = 0;
7833 }
7834 else if (exp < 0)
7835 {
7836 /* No denormalized numbers. */
7837 exp = 0;
7838 mantissa = 0;
7839 }
7840 }
7841 mantissa >>= 29;
7842 return (sign << 31) | (exp << 23) | mantissa;
7843 }
7844 #endif /* BFD_HOST_64_BIT */
7845
7846 enum lit_type
7847 {
7848 CONST_THUMB,
7849 CONST_ARM,
7850 CONST_VEC
7851 };
7852
7853 static void do_vfp_nsyn_opcode (const char *);
7854
7855 /* inst.reloc.exp describes an "=expr" load pseudo-operation.
7856 Determine whether it can be performed with a move instruction; if
7857 it can, convert inst.instruction to that move instruction and
7858 return TRUE; if it can't, convert inst.instruction to a literal-pool
7859 load and return FALSE. If this is not a valid thing to do in the
7860 current context, set inst.error and return TRUE.
7861
7862 inst.operands[i] describes the destination register. */
7863
7864 static bfd_boolean
7865 move_or_literal_pool (int i, enum lit_type t, bfd_boolean mode_3)
7866 {
7867 unsigned long tbit;
7868 bfd_boolean thumb_p = (t == CONST_THUMB);
7869 bfd_boolean arm_p = (t == CONST_ARM);
7870
7871 if (thumb_p)
7872 tbit = (inst.instruction > 0xffff) ? THUMB2_LOAD_BIT : THUMB_LOAD_BIT;
7873 else
7874 tbit = LOAD_BIT;
7875
7876 if ((inst.instruction & tbit) == 0)
7877 {
7878 inst.error = _("invalid pseudo operation");
7879 return TRUE;
7880 }
7881
7882 if (inst.reloc.exp.X_op != O_constant
7883 && inst.reloc.exp.X_op != O_symbol
7884 && inst.reloc.exp.X_op != O_big)
7885 {
7886 inst.error = _("constant expression expected");
7887 return TRUE;
7888 }
7889
7890 if (inst.reloc.exp.X_op == O_constant
7891 || inst.reloc.exp.X_op == O_big)
7892 {
7893 #if defined BFD_HOST_64_BIT
7894 bfd_int64_t v;
7895 #else
7896 offsetT v;
7897 #endif
7898 if (inst.reloc.exp.X_op == O_big)
7899 {
7900 LITTLENUM_TYPE w[X_PRECISION];
7901 LITTLENUM_TYPE * l;
7902
7903 if (inst.reloc.exp.X_add_number == -1)
7904 {
7905 gen_to_words (w, X_PRECISION, E_PRECISION);
7906 l = w;
7907 /* FIXME: Should we check words w[2..5] ? */
7908 }
7909 else
7910 l = generic_bignum;
7911
7912 #if defined BFD_HOST_64_BIT
7913 v =
7914 ((((((((bfd_int64_t) l[3] & LITTLENUM_MASK)
7915 << LITTLENUM_NUMBER_OF_BITS)
7916 | ((bfd_int64_t) l[2] & LITTLENUM_MASK))
7917 << LITTLENUM_NUMBER_OF_BITS)
7918 | ((bfd_int64_t) l[1] & LITTLENUM_MASK))
7919 << LITTLENUM_NUMBER_OF_BITS)
7920 | ((bfd_int64_t) l[0] & LITTLENUM_MASK));
7921 #else
7922 v = ((l[1] & LITTLENUM_MASK) << LITTLENUM_NUMBER_OF_BITS)
7923 | (l[0] & LITTLENUM_MASK);
7924 #endif
7925 }
7926 else
7927 v = inst.reloc.exp.X_add_number;
7928
7929 if (!inst.operands[i].issingle)
7930 {
7931 if (thumb_p)
7932 {
7933 /* This can be encoded only for a low register. */
7934 if ((v & ~0xFF) == 0 && (inst.operands[i].reg < 8))
7935 {
7936 /* This can be done with a mov(1) instruction. */
7937 inst.instruction = T_OPCODE_MOV_I8 | (inst.operands[i].reg << 8);
7938 inst.instruction |= v;
7939 return TRUE;
7940 }
7941
7942 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2)
7943 || ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m))
7944 {
7945 /* Check if on thumb2 it can be done with a mov.w, mvn or
7946 movw instruction. */
7947 unsigned int newimm;
7948 bfd_boolean isNegated;
7949
7950 newimm = encode_thumb32_immediate (v);
7951 if (newimm != (unsigned int) FAIL)
7952 isNegated = FALSE;
7953 else
7954 {
7955 newimm = encode_thumb32_immediate (~v);
7956 if (newimm != (unsigned int) FAIL)
7957 isNegated = TRUE;
7958 }
7959
7960 /* The number can be loaded with a mov.w or mvn
7961 instruction. */
7962 if (newimm != (unsigned int) FAIL
7963 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2))
7964 {
7965 inst.instruction = (0xf04f0000 /* MOV.W. */
7966 | (inst.operands[i].reg << 8));
7967 /* Change to MOVN. */
7968 inst.instruction |= (isNegated ? 0x200000 : 0);
7969 inst.instruction |= (newimm & 0x800) << 15;
7970 inst.instruction |= (newimm & 0x700) << 4;
7971 inst.instruction |= (newimm & 0x0ff);
7972 return TRUE;
7973 }
7974 /* The number can be loaded with a movw instruction. */
7975 else if ((v & ~0xFFFF) == 0
7976 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m))
7977 {
7978 int imm = v & 0xFFFF;
7979
7980 inst.instruction = 0xf2400000; /* MOVW. */
7981 inst.instruction |= (inst.operands[i].reg << 8);
7982 inst.instruction |= (imm & 0xf000) << 4;
7983 inst.instruction |= (imm & 0x0800) << 15;
7984 inst.instruction |= (imm & 0x0700) << 4;
7985 inst.instruction |= (imm & 0x00ff);
7986 return TRUE;
7987 }
7988 }
7989 }
7990 else if (arm_p)
7991 {
7992 int value = encode_arm_immediate (v);
7993
7994 if (value != FAIL)
7995 {
7996 /* This can be done with a mov instruction. */
7997 inst.instruction &= LITERAL_MASK;
7998 inst.instruction |= INST_IMMEDIATE | (OPCODE_MOV << DATA_OP_SHIFT);
7999 inst.instruction |= value & 0xfff;
8000 return TRUE;
8001 }
8002
8003 value = encode_arm_immediate (~ v);
8004 if (value != FAIL)
8005 {
8006 /* This can be done with a mvn instruction. */
8007 inst.instruction &= LITERAL_MASK;
8008 inst.instruction |= INST_IMMEDIATE | (OPCODE_MVN << DATA_OP_SHIFT);
8009 inst.instruction |= value & 0xfff;
8010 return TRUE;
8011 }
8012 }
8013 else if (t == CONST_VEC && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1))
8014 {
8015 int op = 0;
8016 unsigned immbits = 0;
8017 unsigned immlo = inst.operands[1].imm;
8018 unsigned immhi = inst.operands[1].regisimm
8019 ? inst.operands[1].reg
8020 : inst.reloc.exp.X_unsigned
8021 ? 0
8022 : ((bfd_int64_t)((int) immlo)) >> 32;
8023 int cmode = neon_cmode_for_move_imm (immlo, immhi, FALSE, &immbits,
8024 &op, 64, NT_invtype);
8025
8026 if (cmode == FAIL)
8027 {
8028 neon_invert_size (&immlo, &immhi, 64);
8029 op = !op;
8030 cmode = neon_cmode_for_move_imm (immlo, immhi, FALSE, &immbits,
8031 &op, 64, NT_invtype);
8032 }
8033
8034 if (cmode != FAIL)
8035 {
8036 inst.instruction = (inst.instruction & VLDR_VMOV_SAME)
8037 | (1 << 23)
8038 | (cmode << 8)
8039 | (op << 5)
8040 | (1 << 4);
8041
8042 /* Fill other bits in vmov encoding for both thumb and arm. */
8043 if (thumb_mode)
8044 inst.instruction |= (0x7U << 29) | (0xF << 24);
8045 else
8046 inst.instruction |= (0xFU << 28) | (0x1 << 25);
8047 neon_write_immbits (immbits);
8048 return TRUE;
8049 }
8050 }
8051 }
8052
8053 if (t == CONST_VEC)
8054 {
8055 /* Check if vldr Rx, =constant could be optimized to vmov Rx, #constant. */
8056 if (inst.operands[i].issingle
8057 && is_quarter_float (inst.operands[1].imm)
8058 && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3xd))
8059 {
8060 inst.operands[1].imm =
8061 neon_qfloat_bits (v);
8062 do_vfp_nsyn_opcode ("fconsts");
8063 return TRUE;
8064 }
8065
8066 /* If our host does not support a 64-bit type then we cannot perform
8067 the following optimization. This mean that there will be a
8068 discrepancy between the output produced by an assembler built for
8069 a 32-bit-only host and the output produced from a 64-bit host, but
8070 this cannot be helped. */
8071 #if defined BFD_HOST_64_BIT
8072 else if (!inst.operands[1].issingle
8073 && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3))
8074 {
8075 if (is_double_a_single (v)
8076 && is_quarter_float (double_to_single (v)))
8077 {
8078 inst.operands[1].imm =
8079 neon_qfloat_bits (double_to_single (v));
8080 do_vfp_nsyn_opcode ("fconstd");
8081 return TRUE;
8082 }
8083 }
8084 #endif
8085 }
8086 }
8087
8088 if (add_to_lit_pool ((!inst.operands[i].isvec
8089 || inst.operands[i].issingle) ? 4 : 8) == FAIL)
8090 return TRUE;
8091
8092 inst.operands[1].reg = REG_PC;
8093 inst.operands[1].isreg = 1;
8094 inst.operands[1].preind = 1;
8095 inst.reloc.pc_rel = 1;
8096 inst.reloc.type = (thumb_p
8097 ? BFD_RELOC_ARM_THUMB_OFFSET
8098 : (mode_3
8099 ? BFD_RELOC_ARM_HWLITERAL
8100 : BFD_RELOC_ARM_LITERAL));
8101 return FALSE;
8102 }
8103
8104 /* inst.operands[i] was set up by parse_address. Encode it into an
8105 ARM-format instruction. Reject all forms which cannot be encoded
8106 into a coprocessor load/store instruction. If wb_ok is false,
8107 reject use of writeback; if unind_ok is false, reject use of
8108 unindexed addressing. If reloc_override is not 0, use it instead
8109 of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
8110 (in which case it is preserved). */
8111
8112 static int
8113 encode_arm_cp_address (int i, int wb_ok, int unind_ok, int reloc_override)
8114 {
8115 if (!inst.operands[i].isreg)
8116 {
8117 /* PR 18256 */
8118 if (! inst.operands[0].isvec)
8119 {
8120 inst.error = _("invalid co-processor operand");
8121 return FAIL;
8122 }
8123 if (move_or_literal_pool (0, CONST_VEC, /*mode_3=*/FALSE))
8124 return SUCCESS;
8125 }
8126
8127 inst.instruction |= inst.operands[i].reg << 16;
8128
8129 gas_assert (!(inst.operands[i].preind && inst.operands[i].postind));
8130
8131 if (!inst.operands[i].preind && !inst.operands[i].postind) /* unindexed */
8132 {
8133 gas_assert (!inst.operands[i].writeback);
8134 if (!unind_ok)
8135 {
8136 inst.error = _("instruction does not support unindexed addressing");
8137 return FAIL;
8138 }
8139 inst.instruction |= inst.operands[i].imm;
8140 inst.instruction |= INDEX_UP;
8141 return SUCCESS;
8142 }
8143
8144 if (inst.operands[i].preind)
8145 inst.instruction |= PRE_INDEX;
8146
8147 if (inst.operands[i].writeback)
8148 {
8149 if (inst.operands[i].reg == REG_PC)
8150 {
8151 inst.error = _("pc may not be used with write-back");
8152 return FAIL;
8153 }
8154 if (!wb_ok)
8155 {
8156 inst.error = _("instruction does not support writeback");
8157 return FAIL;
8158 }
8159 inst.instruction |= WRITE_BACK;
8160 }
8161
8162 if (reloc_override)
8163 inst.reloc.type = (bfd_reloc_code_real_type) reloc_override;
8164 else if ((inst.reloc.type < BFD_RELOC_ARM_ALU_PC_G0_NC
8165 || inst.reloc.type > BFD_RELOC_ARM_LDC_SB_G2)
8166 && inst.reloc.type != BFD_RELOC_ARM_LDR_PC_G0)
8167 {
8168 if (thumb_mode)
8169 inst.reloc.type = BFD_RELOC_ARM_T32_CP_OFF_IMM;
8170 else
8171 inst.reloc.type = BFD_RELOC_ARM_CP_OFF_IMM;
8172 }
8173
8174 /* Prefer + for zero encoded value. */
8175 if (!inst.operands[i].negative)
8176 inst.instruction |= INDEX_UP;
8177
8178 return SUCCESS;
8179 }
8180
8181 /* Functions for instruction encoding, sorted by sub-architecture.
8182 First some generics; their names are taken from the conventional
8183 bit positions for register arguments in ARM format instructions. */
8184
8185 static void
8186 do_noargs (void)
8187 {
8188 }
8189
8190 static void
8191 do_rd (void)
8192 {
8193 inst.instruction |= inst.operands[0].reg << 12;
8194 }
8195
8196 static void
8197 do_rn (void)
8198 {
8199 inst.instruction |= inst.operands[0].reg << 16;
8200 }
8201
8202 static void
8203 do_rd_rm (void)
8204 {
8205 inst.instruction |= inst.operands[0].reg << 12;
8206 inst.instruction |= inst.operands[1].reg;
8207 }
8208
8209 static void
8210 do_rm_rn (void)
8211 {
8212 inst.instruction |= inst.operands[0].reg;
8213 inst.instruction |= inst.operands[1].reg << 16;
8214 }
8215
8216 static void
8217 do_rd_rn (void)
8218 {
8219 inst.instruction |= inst.operands[0].reg << 12;
8220 inst.instruction |= inst.operands[1].reg << 16;
8221 }
8222
8223 static void
8224 do_rn_rd (void)
8225 {
8226 inst.instruction |= inst.operands[0].reg << 16;
8227 inst.instruction |= inst.operands[1].reg << 12;
8228 }
8229
8230 static void
8231 do_tt (void)
8232 {
8233 inst.instruction |= inst.operands[0].reg << 8;
8234 inst.instruction |= inst.operands[1].reg << 16;
8235 }
8236
8237 static bfd_boolean
8238 check_obsolete (const arm_feature_set *feature, const char *msg)
8239 {
8240 if (ARM_CPU_IS_ANY (cpu_variant))
8241 {
8242 as_tsktsk ("%s", msg);
8243 return TRUE;
8244 }
8245 else if (ARM_CPU_HAS_FEATURE (cpu_variant, *feature))
8246 {
8247 as_bad ("%s", msg);
8248 return TRUE;
8249 }
8250
8251 return FALSE;
8252 }
8253
8254 static void
8255 do_rd_rm_rn (void)
8256 {
8257 unsigned Rn = inst.operands[2].reg;
8258 /* Enforce restrictions on SWP instruction. */
8259 if ((inst.instruction & 0x0fbfffff) == 0x01000090)
8260 {
8261 constraint (Rn == inst.operands[0].reg || Rn == inst.operands[1].reg,
8262 _("Rn must not overlap other operands"));
8263
8264 /* SWP{b} is obsolete for ARMv8-A, and deprecated for ARMv6* and ARMv7.
8265 */
8266 if (!check_obsolete (&arm_ext_v8,
8267 _("swp{b} use is obsoleted for ARMv8 and later"))
8268 && warn_on_deprecated
8269 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6))
8270 as_tsktsk (_("swp{b} use is deprecated for ARMv6 and ARMv7"));
8271 }
8272
8273 inst.instruction |= inst.operands[0].reg << 12;
8274 inst.instruction |= inst.operands[1].reg;
8275 inst.instruction |= Rn << 16;
8276 }
8277
8278 static void
8279 do_rd_rn_rm (void)
8280 {
8281 inst.instruction |= inst.operands[0].reg << 12;
8282 inst.instruction |= inst.operands[1].reg << 16;
8283 inst.instruction |= inst.operands[2].reg;
8284 }
8285
8286 static void
8287 do_rm_rd_rn (void)
8288 {
8289 constraint ((inst.operands[2].reg == REG_PC), BAD_PC);
8290 constraint (((inst.reloc.exp.X_op != O_constant
8291 && inst.reloc.exp.X_op != O_illegal)
8292 || inst.reloc.exp.X_add_number != 0),
8293 BAD_ADDR_MODE);
8294 inst.instruction |= inst.operands[0].reg;
8295 inst.instruction |= inst.operands[1].reg << 12;
8296 inst.instruction |= inst.operands[2].reg << 16;
8297 }
8298
8299 static void
8300 do_imm0 (void)
8301 {
8302 inst.instruction |= inst.operands[0].imm;
8303 }
8304
8305 static void
8306 do_rd_cpaddr (void)
8307 {
8308 inst.instruction |= inst.operands[0].reg << 12;
8309 encode_arm_cp_address (1, TRUE, TRUE, 0);
8310 }
8311
8312 /* ARM instructions, in alphabetical order by function name (except
8313 that wrapper functions appear immediately after the function they
8314 wrap). */
8315
8316 /* This is a pseudo-op of the form "adr rd, label" to be converted
8317 into a relative address of the form "add rd, pc, #label-.-8". */
8318
8319 static void
8320 do_adr (void)
8321 {
8322 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
8323
8324 /* Frag hacking will turn this into a sub instruction if the offset turns
8325 out to be negative. */
8326 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
8327 inst.reloc.pc_rel = 1;
8328 inst.reloc.exp.X_add_number -= 8;
8329 }
8330
8331 /* This is a pseudo-op of the form "adrl rd, label" to be converted
8332 into a relative address of the form:
8333 add rd, pc, #low(label-.-8)"
8334 add rd, rd, #high(label-.-8)" */
8335
8336 static void
8337 do_adrl (void)
8338 {
8339 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
8340
8341 /* Frag hacking will turn this into a sub instruction if the offset turns
8342 out to be negative. */
8343 inst.reloc.type = BFD_RELOC_ARM_ADRL_IMMEDIATE;
8344 inst.reloc.pc_rel = 1;
8345 inst.size = INSN_SIZE * 2;
8346 inst.reloc.exp.X_add_number -= 8;
8347 }
8348
8349 static void
8350 do_arit (void)
8351 {
8352 constraint (inst.reloc.type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
8353 && inst.reloc.type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC ,
8354 THUMB1_RELOC_ONLY);
8355 if (!inst.operands[1].present)
8356 inst.operands[1].reg = inst.operands[0].reg;
8357 inst.instruction |= inst.operands[0].reg << 12;
8358 inst.instruction |= inst.operands[1].reg << 16;
8359 encode_arm_shifter_operand (2);
8360 }
8361
8362 static void
8363 do_barrier (void)
8364 {
8365 if (inst.operands[0].present)
8366 inst.instruction |= inst.operands[0].imm;
8367 else
8368 inst.instruction |= 0xf;
8369 }
8370
8371 static void
8372 do_bfc (void)
8373 {
8374 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
8375 constraint (msb > 32, _("bit-field extends past end of register"));
8376 /* The instruction encoding stores the LSB and MSB,
8377 not the LSB and width. */
8378 inst.instruction |= inst.operands[0].reg << 12;
8379 inst.instruction |= inst.operands[1].imm << 7;
8380 inst.instruction |= (msb - 1) << 16;
8381 }
8382
8383 static void
8384 do_bfi (void)
8385 {
8386 unsigned int msb;
8387
8388 /* #0 in second position is alternative syntax for bfc, which is
8389 the same instruction but with REG_PC in the Rm field. */
8390 if (!inst.operands[1].isreg)
8391 inst.operands[1].reg = REG_PC;
8392
8393 msb = inst.operands[2].imm + inst.operands[3].imm;
8394 constraint (msb > 32, _("bit-field extends past end of register"));
8395 /* The instruction encoding stores the LSB and MSB,
8396 not the LSB and width. */
8397 inst.instruction |= inst.operands[0].reg << 12;
8398 inst.instruction |= inst.operands[1].reg;
8399 inst.instruction |= inst.operands[2].imm << 7;
8400 inst.instruction |= (msb - 1) << 16;
8401 }
8402
8403 static void
8404 do_bfx (void)
8405 {
8406 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
8407 _("bit-field extends past end of register"));
8408 inst.instruction |= inst.operands[0].reg << 12;
8409 inst.instruction |= inst.operands[1].reg;
8410 inst.instruction |= inst.operands[2].imm << 7;
8411 inst.instruction |= (inst.operands[3].imm - 1) << 16;
8412 }
8413
8414 /* ARM V5 breakpoint instruction (argument parse)
8415 BKPT <16 bit unsigned immediate>
8416 Instruction is not conditional.
8417 The bit pattern given in insns[] has the COND_ALWAYS condition,
8418 and it is an error if the caller tried to override that. */
8419
8420 static void
8421 do_bkpt (void)
8422 {
8423 /* Top 12 of 16 bits to bits 19:8. */
8424 inst.instruction |= (inst.operands[0].imm & 0xfff0) << 4;
8425
8426 /* Bottom 4 of 16 bits to bits 3:0. */
8427 inst.instruction |= inst.operands[0].imm & 0xf;
8428 }
8429
8430 static void
8431 encode_branch (int default_reloc)
8432 {
8433 if (inst.operands[0].hasreloc)
8434 {
8435 constraint (inst.operands[0].imm != BFD_RELOC_ARM_PLT32
8436 && inst.operands[0].imm != BFD_RELOC_ARM_TLS_CALL,
8437 _("the only valid suffixes here are '(plt)' and '(tlscall)'"));
8438 inst.reloc.type = inst.operands[0].imm == BFD_RELOC_ARM_PLT32
8439 ? BFD_RELOC_ARM_PLT32
8440 : thumb_mode ? BFD_RELOC_ARM_THM_TLS_CALL : BFD_RELOC_ARM_TLS_CALL;
8441 }
8442 else
8443 inst.reloc.type = (bfd_reloc_code_real_type) default_reloc;
8444 inst.reloc.pc_rel = 1;
8445 }
8446
8447 static void
8448 do_branch (void)
8449 {
8450 #ifdef OBJ_ELF
8451 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
8452 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
8453 else
8454 #endif
8455 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
8456 }
8457
8458 static void
8459 do_bl (void)
8460 {
8461 #ifdef OBJ_ELF
8462 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
8463 {
8464 if (inst.cond == COND_ALWAYS)
8465 encode_branch (BFD_RELOC_ARM_PCREL_CALL);
8466 else
8467 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
8468 }
8469 else
8470 #endif
8471 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
8472 }
8473
8474 /* ARM V5 branch-link-exchange instruction (argument parse)
8475 BLX <target_addr> ie BLX(1)
8476 BLX{<condition>} <Rm> ie BLX(2)
8477 Unfortunately, there are two different opcodes for this mnemonic.
8478 So, the insns[].value is not used, and the code here zaps values
8479 into inst.instruction.
8480 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
8481
8482 static void
8483 do_blx (void)
8484 {
8485 if (inst.operands[0].isreg)
8486 {
8487 /* Arg is a register; the opcode provided by insns[] is correct.
8488 It is not illegal to do "blx pc", just useless. */
8489 if (inst.operands[0].reg == REG_PC)
8490 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
8491
8492 inst.instruction |= inst.operands[0].reg;
8493 }
8494 else
8495 {
8496 /* Arg is an address; this instruction cannot be executed
8497 conditionally, and the opcode must be adjusted.
8498 We retain the BFD_RELOC_ARM_PCREL_BLX till the very end
8499 where we generate out a BFD_RELOC_ARM_PCREL_CALL instead. */
8500 constraint (inst.cond != COND_ALWAYS, BAD_COND);
8501 inst.instruction = 0xfa000000;
8502 encode_branch (BFD_RELOC_ARM_PCREL_BLX);
8503 }
8504 }
8505
8506 static void
8507 do_bx (void)
8508 {
8509 bfd_boolean want_reloc;
8510
8511 if (inst.operands[0].reg == REG_PC)
8512 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
8513
8514 inst.instruction |= inst.operands[0].reg;
8515 /* Output R_ARM_V4BX relocations if is an EABI object that looks like
8516 it is for ARMv4t or earlier. */
8517 want_reloc = !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5);
8518 if (object_arch && !ARM_CPU_HAS_FEATURE (*object_arch, arm_ext_v5))
8519 want_reloc = TRUE;
8520
8521 #ifdef OBJ_ELF
8522 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
8523 #endif
8524 want_reloc = FALSE;
8525
8526 if (want_reloc)
8527 inst.reloc.type = BFD_RELOC_ARM_V4BX;
8528 }
8529
8530
8531 /* ARM v5TEJ. Jump to Jazelle code. */
8532
8533 static void
8534 do_bxj (void)
8535 {
8536 if (inst.operands[0].reg == REG_PC)
8537 as_tsktsk (_("use of r15 in bxj is not really useful"));
8538
8539 inst.instruction |= inst.operands[0].reg;
8540 }
8541
8542 /* Co-processor data operation:
8543 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
8544 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
8545 static void
8546 do_cdp (void)
8547 {
8548 inst.instruction |= inst.operands[0].reg << 8;
8549 inst.instruction |= inst.operands[1].imm << 20;
8550 inst.instruction |= inst.operands[2].reg << 12;
8551 inst.instruction |= inst.operands[3].reg << 16;
8552 inst.instruction |= inst.operands[4].reg;
8553 inst.instruction |= inst.operands[5].imm << 5;
8554 }
8555
8556 static void
8557 do_cmp (void)
8558 {
8559 inst.instruction |= inst.operands[0].reg << 16;
8560 encode_arm_shifter_operand (1);
8561 }
8562
8563 /* Transfer between coprocessor and ARM registers.
8564 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
8565 MRC2
8566 MCR{cond}
8567 MCR2
8568
8569 No special properties. */
8570
8571 struct deprecated_coproc_regs_s
8572 {
8573 unsigned cp;
8574 int opc1;
8575 unsigned crn;
8576 unsigned crm;
8577 int opc2;
8578 arm_feature_set deprecated;
8579 arm_feature_set obsoleted;
8580 const char *dep_msg;
8581 const char *obs_msg;
8582 };
8583
8584 #define DEPR_ACCESS_V8 \
8585 N_("This coprocessor register access is deprecated in ARMv8")
8586
8587 /* Table of all deprecated coprocessor registers. */
8588 static struct deprecated_coproc_regs_s deprecated_coproc_regs[] =
8589 {
8590 {15, 0, 7, 10, 5, /* CP15DMB. */
8591 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8592 DEPR_ACCESS_V8, NULL},
8593 {15, 0, 7, 10, 4, /* CP15DSB. */
8594 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8595 DEPR_ACCESS_V8, NULL},
8596 {15, 0, 7, 5, 4, /* CP15ISB. */
8597 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8598 DEPR_ACCESS_V8, NULL},
8599 {14, 6, 1, 0, 0, /* TEEHBR. */
8600 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8601 DEPR_ACCESS_V8, NULL},
8602 {14, 6, 0, 0, 0, /* TEECR. */
8603 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8604 DEPR_ACCESS_V8, NULL},
8605 };
8606
8607 #undef DEPR_ACCESS_V8
8608
8609 static const size_t deprecated_coproc_reg_count =
8610 sizeof (deprecated_coproc_regs) / sizeof (deprecated_coproc_regs[0]);
8611
8612 static void
8613 do_co_reg (void)
8614 {
8615 unsigned Rd;
8616 size_t i;
8617
8618 Rd = inst.operands[2].reg;
8619 if (thumb_mode)
8620 {
8621 if (inst.instruction == 0xee000010
8622 || inst.instruction == 0xfe000010)
8623 /* MCR, MCR2 */
8624 reject_bad_reg (Rd);
8625 else
8626 /* MRC, MRC2 */
8627 constraint (Rd == REG_SP, BAD_SP);
8628 }
8629 else
8630 {
8631 /* MCR */
8632 if (inst.instruction == 0xe000010)
8633 constraint (Rd == REG_PC, BAD_PC);
8634 }
8635
8636 for (i = 0; i < deprecated_coproc_reg_count; ++i)
8637 {
8638 const struct deprecated_coproc_regs_s *r =
8639 deprecated_coproc_regs + i;
8640
8641 if (inst.operands[0].reg == r->cp
8642 && inst.operands[1].imm == r->opc1
8643 && inst.operands[3].reg == r->crn
8644 && inst.operands[4].reg == r->crm
8645 && inst.operands[5].imm == r->opc2)
8646 {
8647 if (! ARM_CPU_IS_ANY (cpu_variant)
8648 && warn_on_deprecated
8649 && ARM_CPU_HAS_FEATURE (cpu_variant, r->deprecated))
8650 as_tsktsk ("%s", r->dep_msg);
8651 }
8652 }
8653
8654 inst.instruction |= inst.operands[0].reg << 8;
8655 inst.instruction |= inst.operands[1].imm << 21;
8656 inst.instruction |= Rd << 12;
8657 inst.instruction |= inst.operands[3].reg << 16;
8658 inst.instruction |= inst.operands[4].reg;
8659 inst.instruction |= inst.operands[5].imm << 5;
8660 }
8661
8662 /* Transfer between coprocessor register and pair of ARM registers.
8663 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
8664 MCRR2
8665 MRRC{cond}
8666 MRRC2
8667
8668 Two XScale instructions are special cases of these:
8669
8670 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
8671 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
8672
8673 Result unpredictable if Rd or Rn is R15. */
8674
8675 static void
8676 do_co_reg2c (void)
8677 {
8678 unsigned Rd, Rn;
8679
8680 Rd = inst.operands[2].reg;
8681 Rn = inst.operands[3].reg;
8682
8683 if (thumb_mode)
8684 {
8685 reject_bad_reg (Rd);
8686 reject_bad_reg (Rn);
8687 }
8688 else
8689 {
8690 constraint (Rd == REG_PC, BAD_PC);
8691 constraint (Rn == REG_PC, BAD_PC);
8692 }
8693
8694 /* Only check the MRRC{2} variants. */
8695 if ((inst.instruction & 0x0FF00000) == 0x0C500000)
8696 {
8697 /* If Rd == Rn, error that the operation is
8698 unpredictable (example MRRC p3,#1,r1,r1,c4). */
8699 constraint (Rd == Rn, BAD_OVERLAP);
8700 }
8701
8702 inst.instruction |= inst.operands[0].reg << 8;
8703 inst.instruction |= inst.operands[1].imm << 4;
8704 inst.instruction |= Rd << 12;
8705 inst.instruction |= Rn << 16;
8706 inst.instruction |= inst.operands[4].reg;
8707 }
8708
8709 static void
8710 do_cpsi (void)
8711 {
8712 inst.instruction |= inst.operands[0].imm << 6;
8713 if (inst.operands[1].present)
8714 {
8715 inst.instruction |= CPSI_MMOD;
8716 inst.instruction |= inst.operands[1].imm;
8717 }
8718 }
8719
8720 static void
8721 do_dbg (void)
8722 {
8723 inst.instruction |= inst.operands[0].imm;
8724 }
8725
8726 static void
8727 do_div (void)
8728 {
8729 unsigned Rd, Rn, Rm;
8730
8731 Rd = inst.operands[0].reg;
8732 Rn = (inst.operands[1].present
8733 ? inst.operands[1].reg : Rd);
8734 Rm = inst.operands[2].reg;
8735
8736 constraint ((Rd == REG_PC), BAD_PC);
8737 constraint ((Rn == REG_PC), BAD_PC);
8738 constraint ((Rm == REG_PC), BAD_PC);
8739
8740 inst.instruction |= Rd << 16;
8741 inst.instruction |= Rn << 0;
8742 inst.instruction |= Rm << 8;
8743 }
8744
8745 static void
8746 do_it (void)
8747 {
8748 /* There is no IT instruction in ARM mode. We
8749 process it to do the validation as if in
8750 thumb mode, just in case the code gets
8751 assembled for thumb using the unified syntax. */
8752
8753 inst.size = 0;
8754 if (unified_syntax)
8755 {
8756 set_it_insn_type (IT_INSN);
8757 now_it.mask = (inst.instruction & 0xf) | 0x10;
8758 now_it.cc = inst.operands[0].imm;
8759 }
8760 }
8761
8762 /* If there is only one register in the register list,
8763 then return its register number. Otherwise return -1. */
8764 static int
8765 only_one_reg_in_list (int range)
8766 {
8767 int i = ffs (range) - 1;
8768 return (i > 15 || range != (1 << i)) ? -1 : i;
8769 }
8770
8771 static void
8772 encode_ldmstm(int from_push_pop_mnem)
8773 {
8774 int base_reg = inst.operands[0].reg;
8775 int range = inst.operands[1].imm;
8776 int one_reg;
8777
8778 inst.instruction |= base_reg << 16;
8779 inst.instruction |= range;
8780
8781 if (inst.operands[1].writeback)
8782 inst.instruction |= LDM_TYPE_2_OR_3;
8783
8784 if (inst.operands[0].writeback)
8785 {
8786 inst.instruction |= WRITE_BACK;
8787 /* Check for unpredictable uses of writeback. */
8788 if (inst.instruction & LOAD_BIT)
8789 {
8790 /* Not allowed in LDM type 2. */
8791 if ((inst.instruction & LDM_TYPE_2_OR_3)
8792 && ((range & (1 << REG_PC)) == 0))
8793 as_warn (_("writeback of base register is UNPREDICTABLE"));
8794 /* Only allowed if base reg not in list for other types. */
8795 else if (range & (1 << base_reg))
8796 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
8797 }
8798 else /* STM. */
8799 {
8800 /* Not allowed for type 2. */
8801 if (inst.instruction & LDM_TYPE_2_OR_3)
8802 as_warn (_("writeback of base register is UNPREDICTABLE"));
8803 /* Only allowed if base reg not in list, or first in list. */
8804 else if ((range & (1 << base_reg))
8805 && (range & ((1 << base_reg) - 1)))
8806 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
8807 }
8808 }
8809
8810 /* If PUSH/POP has only one register, then use the A2 encoding. */
8811 one_reg = only_one_reg_in_list (range);
8812 if (from_push_pop_mnem && one_reg >= 0)
8813 {
8814 int is_push = (inst.instruction & A_PUSH_POP_OP_MASK) == A1_OPCODE_PUSH;
8815
8816 inst.instruction &= A_COND_MASK;
8817 inst.instruction |= is_push ? A2_OPCODE_PUSH : A2_OPCODE_POP;
8818 inst.instruction |= one_reg << 12;
8819 }
8820 }
8821
8822 static void
8823 do_ldmstm (void)
8824 {
8825 encode_ldmstm (/*from_push_pop_mnem=*/FALSE);
8826 }
8827
8828 /* ARMv5TE load-consecutive (argument parse)
8829 Mode is like LDRH.
8830
8831 LDRccD R, mode
8832 STRccD R, mode. */
8833
8834 static void
8835 do_ldrd (void)
8836 {
8837 constraint (inst.operands[0].reg % 2 != 0,
8838 _("first transfer register must be even"));
8839 constraint (inst.operands[1].present
8840 && inst.operands[1].reg != inst.operands[0].reg + 1,
8841 _("can only transfer two consecutive registers"));
8842 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
8843 constraint (!inst.operands[2].isreg, _("'[' expected"));
8844
8845 if (!inst.operands[1].present)
8846 inst.operands[1].reg = inst.operands[0].reg + 1;
8847
8848 /* encode_arm_addr_mode_3 will diagnose overlap between the base
8849 register and the first register written; we have to diagnose
8850 overlap between the base and the second register written here. */
8851
8852 if (inst.operands[2].reg == inst.operands[1].reg
8853 && (inst.operands[2].writeback || inst.operands[2].postind))
8854 as_warn (_("base register written back, and overlaps "
8855 "second transfer register"));
8856
8857 if (!(inst.instruction & V4_STR_BIT))
8858 {
8859 /* For an index-register load, the index register must not overlap the
8860 destination (even if not write-back). */
8861 if (inst.operands[2].immisreg
8862 && ((unsigned) inst.operands[2].imm == inst.operands[0].reg
8863 || (unsigned) inst.operands[2].imm == inst.operands[1].reg))
8864 as_warn (_("index register overlaps transfer register"));
8865 }
8866 inst.instruction |= inst.operands[0].reg << 12;
8867 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE);
8868 }
8869
8870 static void
8871 do_ldrex (void)
8872 {
8873 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
8874 || inst.operands[1].postind || inst.operands[1].writeback
8875 || inst.operands[1].immisreg || inst.operands[1].shifted
8876 || inst.operands[1].negative
8877 /* This can arise if the programmer has written
8878 strex rN, rM, foo
8879 or if they have mistakenly used a register name as the last
8880 operand, eg:
8881 strex rN, rM, rX
8882 It is very difficult to distinguish between these two cases
8883 because "rX" might actually be a label. ie the register
8884 name has been occluded by a symbol of the same name. So we
8885 just generate a general 'bad addressing mode' type error
8886 message and leave it up to the programmer to discover the
8887 true cause and fix their mistake. */
8888 || (inst.operands[1].reg == REG_PC),
8889 BAD_ADDR_MODE);
8890
8891 constraint (inst.reloc.exp.X_op != O_constant
8892 || inst.reloc.exp.X_add_number != 0,
8893 _("offset must be zero in ARM encoding"));
8894
8895 constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
8896
8897 inst.instruction |= inst.operands[0].reg << 12;
8898 inst.instruction |= inst.operands[1].reg << 16;
8899 inst.reloc.type = BFD_RELOC_UNUSED;
8900 }
8901
8902 static void
8903 do_ldrexd (void)
8904 {
8905 constraint (inst.operands[0].reg % 2 != 0,
8906 _("even register required"));
8907 constraint (inst.operands[1].present
8908 && inst.operands[1].reg != inst.operands[0].reg + 1,
8909 _("can only load two consecutive registers"));
8910 /* If op 1 were present and equal to PC, this function wouldn't
8911 have been called in the first place. */
8912 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
8913
8914 inst.instruction |= inst.operands[0].reg << 12;
8915 inst.instruction |= inst.operands[2].reg << 16;
8916 }
8917
8918 /* In both ARM and thumb state 'ldr pc, #imm' with an immediate
8919 which is not a multiple of four is UNPREDICTABLE. */
8920 static void
8921 check_ldr_r15_aligned (void)
8922 {
8923 constraint (!(inst.operands[1].immisreg)
8924 && (inst.operands[0].reg == REG_PC
8925 && inst.operands[1].reg == REG_PC
8926 && (inst.reloc.exp.X_add_number & 0x3)),
8927 _("ldr to register 15 must be 4-byte alligned"));
8928 }
8929
8930 static void
8931 do_ldst (void)
8932 {
8933 inst.instruction |= inst.operands[0].reg << 12;
8934 if (!inst.operands[1].isreg)
8935 if (move_or_literal_pool (0, CONST_ARM, /*mode_3=*/FALSE))
8936 return;
8937 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE);
8938 check_ldr_r15_aligned ();
8939 }
8940
8941 static void
8942 do_ldstt (void)
8943 {
8944 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
8945 reject [Rn,...]. */
8946 if (inst.operands[1].preind)
8947 {
8948 constraint (inst.reloc.exp.X_op != O_constant
8949 || inst.reloc.exp.X_add_number != 0,
8950 _("this instruction requires a post-indexed address"));
8951
8952 inst.operands[1].preind = 0;
8953 inst.operands[1].postind = 1;
8954 inst.operands[1].writeback = 1;
8955 }
8956 inst.instruction |= inst.operands[0].reg << 12;
8957 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE);
8958 }
8959
8960 /* Halfword and signed-byte load/store operations. */
8961
8962 static void
8963 do_ldstv4 (void)
8964 {
8965 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
8966 inst.instruction |= inst.operands[0].reg << 12;
8967 if (!inst.operands[1].isreg)
8968 if (move_or_literal_pool (0, CONST_ARM, /*mode_3=*/TRUE))
8969 return;
8970 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE);
8971 }
8972
8973 static void
8974 do_ldsttv4 (void)
8975 {
8976 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
8977 reject [Rn,...]. */
8978 if (inst.operands[1].preind)
8979 {
8980 constraint (inst.reloc.exp.X_op != O_constant
8981 || inst.reloc.exp.X_add_number != 0,
8982 _("this instruction requires a post-indexed address"));
8983
8984 inst.operands[1].preind = 0;
8985 inst.operands[1].postind = 1;
8986 inst.operands[1].writeback = 1;
8987 }
8988 inst.instruction |= inst.operands[0].reg << 12;
8989 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE);
8990 }
8991
8992 /* Co-processor register load/store.
8993 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
8994 static void
8995 do_lstc (void)
8996 {
8997 inst.instruction |= inst.operands[0].reg << 8;
8998 inst.instruction |= inst.operands[1].reg << 12;
8999 encode_arm_cp_address (2, TRUE, TRUE, 0);
9000 }
9001
9002 static void
9003 do_mlas (void)
9004 {
9005 /* This restriction does not apply to mls (nor to mla in v6 or later). */
9006 if (inst.operands[0].reg == inst.operands[1].reg
9007 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6)
9008 && !(inst.instruction & 0x00400000))
9009 as_tsktsk (_("Rd and Rm should be different in mla"));
9010
9011 inst.instruction |= inst.operands[0].reg << 16;
9012 inst.instruction |= inst.operands[1].reg;
9013 inst.instruction |= inst.operands[2].reg << 8;
9014 inst.instruction |= inst.operands[3].reg << 12;
9015 }
9016
9017 static void
9018 do_mov (void)
9019 {
9020 constraint (inst.reloc.type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
9021 && inst.reloc.type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC ,
9022 THUMB1_RELOC_ONLY);
9023 inst.instruction |= inst.operands[0].reg << 12;
9024 encode_arm_shifter_operand (1);
9025 }
9026
9027 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
9028 static void
9029 do_mov16 (void)
9030 {
9031 bfd_vma imm;
9032 bfd_boolean top;
9033
9034 top = (inst.instruction & 0x00400000) != 0;
9035 constraint (top && inst.reloc.type == BFD_RELOC_ARM_MOVW,
9036 _(":lower16: not allowed this instruction"));
9037 constraint (!top && inst.reloc.type == BFD_RELOC_ARM_MOVT,
9038 _(":upper16: not allowed instruction"));
9039 inst.instruction |= inst.operands[0].reg << 12;
9040 if (inst.reloc.type == BFD_RELOC_UNUSED)
9041 {
9042 imm = inst.reloc.exp.X_add_number;
9043 /* The value is in two pieces: 0:11, 16:19. */
9044 inst.instruction |= (imm & 0x00000fff);
9045 inst.instruction |= (imm & 0x0000f000) << 4;
9046 }
9047 }
9048
9049 static int
9050 do_vfp_nsyn_mrs (void)
9051 {
9052 if (inst.operands[0].isvec)
9053 {
9054 if (inst.operands[1].reg != 1)
9055 first_error (_("operand 1 must be FPSCR"));
9056 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
9057 memset (&inst.operands[1], '\0', sizeof (inst.operands[1]));
9058 do_vfp_nsyn_opcode ("fmstat");
9059 }
9060 else if (inst.operands[1].isvec)
9061 do_vfp_nsyn_opcode ("fmrx");
9062 else
9063 return FAIL;
9064
9065 return SUCCESS;
9066 }
9067
9068 static int
9069 do_vfp_nsyn_msr (void)
9070 {
9071 if (inst.operands[0].isvec)
9072 do_vfp_nsyn_opcode ("fmxr");
9073 else
9074 return FAIL;
9075
9076 return SUCCESS;
9077 }
9078
9079 static void
9080 do_vmrs (void)
9081 {
9082 unsigned Rt = inst.operands[0].reg;
9083
9084 if (thumb_mode && Rt == REG_SP)
9085 {
9086 inst.error = BAD_SP;
9087 return;
9088 }
9089
9090 /* APSR_ sets isvec. All other refs to PC are illegal. */
9091 if (!inst.operands[0].isvec && Rt == REG_PC)
9092 {
9093 inst.error = BAD_PC;
9094 return;
9095 }
9096
9097 /* If we get through parsing the register name, we just insert the number
9098 generated into the instruction without further validation. */
9099 inst.instruction |= (inst.operands[1].reg << 16);
9100 inst.instruction |= (Rt << 12);
9101 }
9102
9103 static void
9104 do_vmsr (void)
9105 {
9106 unsigned Rt = inst.operands[1].reg;
9107
9108 if (thumb_mode)
9109 reject_bad_reg (Rt);
9110 else if (Rt == REG_PC)
9111 {
9112 inst.error = BAD_PC;
9113 return;
9114 }
9115
9116 /* If we get through parsing the register name, we just insert the number
9117 generated into the instruction without further validation. */
9118 inst.instruction |= (inst.operands[0].reg << 16);
9119 inst.instruction |= (Rt << 12);
9120 }
9121
9122 static void
9123 do_mrs (void)
9124 {
9125 unsigned br;
9126
9127 if (do_vfp_nsyn_mrs () == SUCCESS)
9128 return;
9129
9130 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
9131 inst.instruction |= inst.operands[0].reg << 12;
9132
9133 if (inst.operands[1].isreg)
9134 {
9135 br = inst.operands[1].reg;
9136 if (((br & 0x200) == 0) && ((br & 0xf0000) != 0xf000))
9137 as_bad (_("bad register for mrs"));
9138 }
9139 else
9140 {
9141 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
9142 constraint ((inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f))
9143 != (PSR_c|PSR_f),
9144 _("'APSR', 'CPSR' or 'SPSR' expected"));
9145 br = (15<<16) | (inst.operands[1].imm & SPSR_BIT);
9146 }
9147
9148 inst.instruction |= br;
9149 }
9150
9151 /* Two possible forms:
9152 "{C|S}PSR_<field>, Rm",
9153 "{C|S}PSR_f, #expression". */
9154
9155 static void
9156 do_msr (void)
9157 {
9158 if (do_vfp_nsyn_msr () == SUCCESS)
9159 return;
9160
9161 inst.instruction |= inst.operands[0].imm;
9162 if (inst.operands[1].isreg)
9163 inst.instruction |= inst.operands[1].reg;
9164 else
9165 {
9166 inst.instruction |= INST_IMMEDIATE;
9167 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
9168 inst.reloc.pc_rel = 0;
9169 }
9170 }
9171
9172 static void
9173 do_mul (void)
9174 {
9175 constraint (inst.operands[2].reg == REG_PC, BAD_PC);
9176
9177 if (!inst.operands[2].present)
9178 inst.operands[2].reg = inst.operands[0].reg;
9179 inst.instruction |= inst.operands[0].reg << 16;
9180 inst.instruction |= inst.operands[1].reg;
9181 inst.instruction |= inst.operands[2].reg << 8;
9182
9183 if (inst.operands[0].reg == inst.operands[1].reg
9184 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
9185 as_tsktsk (_("Rd and Rm should be different in mul"));
9186 }
9187
9188 /* Long Multiply Parser
9189 UMULL RdLo, RdHi, Rm, Rs
9190 SMULL RdLo, RdHi, Rm, Rs
9191 UMLAL RdLo, RdHi, Rm, Rs
9192 SMLAL RdLo, RdHi, Rm, Rs. */
9193
9194 static void
9195 do_mull (void)
9196 {
9197 inst.instruction |= inst.operands[0].reg << 12;
9198 inst.instruction |= inst.operands[1].reg << 16;
9199 inst.instruction |= inst.operands[2].reg;
9200 inst.instruction |= inst.operands[3].reg << 8;
9201
9202 /* rdhi and rdlo must be different. */
9203 if (inst.operands[0].reg == inst.operands[1].reg)
9204 as_tsktsk (_("rdhi and rdlo must be different"));
9205
9206 /* rdhi, rdlo and rm must all be different before armv6. */
9207 if ((inst.operands[0].reg == inst.operands[2].reg
9208 || inst.operands[1].reg == inst.operands[2].reg)
9209 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
9210 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
9211 }
9212
9213 static void
9214 do_nop (void)
9215 {
9216 if (inst.operands[0].present
9217 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6k))
9218 {
9219 /* Architectural NOP hints are CPSR sets with no bits selected. */
9220 inst.instruction &= 0xf0000000;
9221 inst.instruction |= 0x0320f000;
9222 if (inst.operands[0].present)
9223 inst.instruction |= inst.operands[0].imm;
9224 }
9225 }
9226
9227 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
9228 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
9229 Condition defaults to COND_ALWAYS.
9230 Error if Rd, Rn or Rm are R15. */
9231
9232 static void
9233 do_pkhbt (void)
9234 {
9235 inst.instruction |= inst.operands[0].reg << 12;
9236 inst.instruction |= inst.operands[1].reg << 16;
9237 inst.instruction |= inst.operands[2].reg;
9238 if (inst.operands[3].present)
9239 encode_arm_shift (3);
9240 }
9241
9242 /* ARM V6 PKHTB (Argument Parse). */
9243
9244 static void
9245 do_pkhtb (void)
9246 {
9247 if (!inst.operands[3].present)
9248 {
9249 /* If the shift specifier is omitted, turn the instruction
9250 into pkhbt rd, rm, rn. */
9251 inst.instruction &= 0xfff00010;
9252 inst.instruction |= inst.operands[0].reg << 12;
9253 inst.instruction |= inst.operands[1].reg;
9254 inst.instruction |= inst.operands[2].reg << 16;
9255 }
9256 else
9257 {
9258 inst.instruction |= inst.operands[0].reg << 12;
9259 inst.instruction |= inst.operands[1].reg << 16;
9260 inst.instruction |= inst.operands[2].reg;
9261 encode_arm_shift (3);
9262 }
9263 }
9264
9265 /* ARMv5TE: Preload-Cache
9266 MP Extensions: Preload for write
9267
9268 PLD(W) <addr_mode>
9269
9270 Syntactically, like LDR with B=1, W=0, L=1. */
9271
9272 static void
9273 do_pld (void)
9274 {
9275 constraint (!inst.operands[0].isreg,
9276 _("'[' expected after PLD mnemonic"));
9277 constraint (inst.operands[0].postind,
9278 _("post-indexed expression used in preload instruction"));
9279 constraint (inst.operands[0].writeback,
9280 _("writeback used in preload instruction"));
9281 constraint (!inst.operands[0].preind,
9282 _("unindexed addressing used in preload instruction"));
9283 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
9284 }
9285
9286 /* ARMv7: PLI <addr_mode> */
9287 static void
9288 do_pli (void)
9289 {
9290 constraint (!inst.operands[0].isreg,
9291 _("'[' expected after PLI mnemonic"));
9292 constraint (inst.operands[0].postind,
9293 _("post-indexed expression used in preload instruction"));
9294 constraint (inst.operands[0].writeback,
9295 _("writeback used in preload instruction"));
9296 constraint (!inst.operands[0].preind,
9297 _("unindexed addressing used in preload instruction"));
9298 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
9299 inst.instruction &= ~PRE_INDEX;
9300 }
9301
9302 static void
9303 do_push_pop (void)
9304 {
9305 constraint (inst.operands[0].writeback,
9306 _("push/pop do not support {reglist}^"));
9307 inst.operands[1] = inst.operands[0];
9308 memset (&inst.operands[0], 0, sizeof inst.operands[0]);
9309 inst.operands[0].isreg = 1;
9310 inst.operands[0].writeback = 1;
9311 inst.operands[0].reg = REG_SP;
9312 encode_ldmstm (/*from_push_pop_mnem=*/TRUE);
9313 }
9314
9315 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
9316 word at the specified address and the following word
9317 respectively.
9318 Unconditionally executed.
9319 Error if Rn is R15. */
9320
9321 static void
9322 do_rfe (void)
9323 {
9324 inst.instruction |= inst.operands[0].reg << 16;
9325 if (inst.operands[0].writeback)
9326 inst.instruction |= WRITE_BACK;
9327 }
9328
9329 /* ARM V6 ssat (argument parse). */
9330
9331 static void
9332 do_ssat (void)
9333 {
9334 inst.instruction |= inst.operands[0].reg << 12;
9335 inst.instruction |= (inst.operands[1].imm - 1) << 16;
9336 inst.instruction |= inst.operands[2].reg;
9337
9338 if (inst.operands[3].present)
9339 encode_arm_shift (3);
9340 }
9341
9342 /* ARM V6 usat (argument parse). */
9343
9344 static void
9345 do_usat (void)
9346 {
9347 inst.instruction |= inst.operands[0].reg << 12;
9348 inst.instruction |= inst.operands[1].imm << 16;
9349 inst.instruction |= inst.operands[2].reg;
9350
9351 if (inst.operands[3].present)
9352 encode_arm_shift (3);
9353 }
9354
9355 /* ARM V6 ssat16 (argument parse). */
9356
9357 static void
9358 do_ssat16 (void)
9359 {
9360 inst.instruction |= inst.operands[0].reg << 12;
9361 inst.instruction |= ((inst.operands[1].imm - 1) << 16);
9362 inst.instruction |= inst.operands[2].reg;
9363 }
9364
9365 static void
9366 do_usat16 (void)
9367 {
9368 inst.instruction |= inst.operands[0].reg << 12;
9369 inst.instruction |= inst.operands[1].imm << 16;
9370 inst.instruction |= inst.operands[2].reg;
9371 }
9372
9373 /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
9374 preserving the other bits.
9375
9376 setend <endian_specifier>, where <endian_specifier> is either
9377 BE or LE. */
9378
9379 static void
9380 do_setend (void)
9381 {
9382 if (warn_on_deprecated
9383 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
9384 as_tsktsk (_("setend use is deprecated for ARMv8"));
9385
9386 if (inst.operands[0].imm)
9387 inst.instruction |= 0x200;
9388 }
9389
9390 static void
9391 do_shift (void)
9392 {
9393 unsigned int Rm = (inst.operands[1].present
9394 ? inst.operands[1].reg
9395 : inst.operands[0].reg);
9396
9397 inst.instruction |= inst.operands[0].reg << 12;
9398 inst.instruction |= Rm;
9399 if (inst.operands[2].isreg) /* Rd, {Rm,} Rs */
9400 {
9401 inst.instruction |= inst.operands[2].reg << 8;
9402 inst.instruction |= SHIFT_BY_REG;
9403 /* PR 12854: Error on extraneous shifts. */
9404 constraint (inst.operands[2].shifted,
9405 _("extraneous shift as part of operand to shift insn"));
9406 }
9407 else
9408 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
9409 }
9410
9411 static void
9412 do_smc (void)
9413 {
9414 inst.reloc.type = BFD_RELOC_ARM_SMC;
9415 inst.reloc.pc_rel = 0;
9416 }
9417
9418 static void
9419 do_hvc (void)
9420 {
9421 inst.reloc.type = BFD_RELOC_ARM_HVC;
9422 inst.reloc.pc_rel = 0;
9423 }
9424
9425 static void
9426 do_swi (void)
9427 {
9428 inst.reloc.type = BFD_RELOC_ARM_SWI;
9429 inst.reloc.pc_rel = 0;
9430 }
9431
9432 static void
9433 do_setpan (void)
9434 {
9435 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_pan),
9436 _("selected processor does not support SETPAN instruction"));
9437
9438 inst.instruction |= ((inst.operands[0].imm & 1) << 9);
9439 }
9440
9441 static void
9442 do_t_setpan (void)
9443 {
9444 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_pan),
9445 _("selected processor does not support SETPAN instruction"));
9446
9447 inst.instruction |= (inst.operands[0].imm << 3);
9448 }
9449
9450 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
9451 SMLAxy{cond} Rd,Rm,Rs,Rn
9452 SMLAWy{cond} Rd,Rm,Rs,Rn
9453 Error if any register is R15. */
9454
9455 static void
9456 do_smla (void)
9457 {
9458 inst.instruction |= inst.operands[0].reg << 16;
9459 inst.instruction |= inst.operands[1].reg;
9460 inst.instruction |= inst.operands[2].reg << 8;
9461 inst.instruction |= inst.operands[3].reg << 12;
9462 }
9463
9464 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
9465 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
9466 Error if any register is R15.
9467 Warning if Rdlo == Rdhi. */
9468
9469 static void
9470 do_smlal (void)
9471 {
9472 inst.instruction |= inst.operands[0].reg << 12;
9473 inst.instruction |= inst.operands[1].reg << 16;
9474 inst.instruction |= inst.operands[2].reg;
9475 inst.instruction |= inst.operands[3].reg << 8;
9476
9477 if (inst.operands[0].reg == inst.operands[1].reg)
9478 as_tsktsk (_("rdhi and rdlo must be different"));
9479 }
9480
9481 /* ARM V5E (El Segundo) signed-multiply (argument parse)
9482 SMULxy{cond} Rd,Rm,Rs
9483 Error if any register is R15. */
9484
9485 static void
9486 do_smul (void)
9487 {
9488 inst.instruction |= inst.operands[0].reg << 16;
9489 inst.instruction |= inst.operands[1].reg;
9490 inst.instruction |= inst.operands[2].reg << 8;
9491 }
9492
9493 /* ARM V6 srs (argument parse). The variable fields in the encoding are
9494 the same for both ARM and Thumb-2. */
9495
9496 static void
9497 do_srs (void)
9498 {
9499 int reg;
9500
9501 if (inst.operands[0].present)
9502 {
9503 reg = inst.operands[0].reg;
9504 constraint (reg != REG_SP, _("SRS base register must be r13"));
9505 }
9506 else
9507 reg = REG_SP;
9508
9509 inst.instruction |= reg << 16;
9510 inst.instruction |= inst.operands[1].imm;
9511 if (inst.operands[0].writeback || inst.operands[1].writeback)
9512 inst.instruction |= WRITE_BACK;
9513 }
9514
9515 /* ARM V6 strex (argument parse). */
9516
9517 static void
9518 do_strex (void)
9519 {
9520 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
9521 || inst.operands[2].postind || inst.operands[2].writeback
9522 || inst.operands[2].immisreg || inst.operands[2].shifted
9523 || inst.operands[2].negative
9524 /* See comment in do_ldrex(). */
9525 || (inst.operands[2].reg == REG_PC),
9526 BAD_ADDR_MODE);
9527
9528 constraint (inst.operands[0].reg == inst.operands[1].reg
9529 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9530
9531 constraint (inst.reloc.exp.X_op != O_constant
9532 || inst.reloc.exp.X_add_number != 0,
9533 _("offset must be zero in ARM encoding"));
9534
9535 inst.instruction |= inst.operands[0].reg << 12;
9536 inst.instruction |= inst.operands[1].reg;
9537 inst.instruction |= inst.operands[2].reg << 16;
9538 inst.reloc.type = BFD_RELOC_UNUSED;
9539 }
9540
9541 static void
9542 do_t_strexbh (void)
9543 {
9544 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
9545 || inst.operands[2].postind || inst.operands[2].writeback
9546 || inst.operands[2].immisreg || inst.operands[2].shifted
9547 || inst.operands[2].negative,
9548 BAD_ADDR_MODE);
9549
9550 constraint (inst.operands[0].reg == inst.operands[1].reg
9551 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9552
9553 do_rm_rd_rn ();
9554 }
9555
9556 static void
9557 do_strexd (void)
9558 {
9559 constraint (inst.operands[1].reg % 2 != 0,
9560 _("even register required"));
9561 constraint (inst.operands[2].present
9562 && inst.operands[2].reg != inst.operands[1].reg + 1,
9563 _("can only store two consecutive registers"));
9564 /* If op 2 were present and equal to PC, this function wouldn't
9565 have been called in the first place. */
9566 constraint (inst.operands[1].reg == REG_LR, _("r14 not allowed here"));
9567
9568 constraint (inst.operands[0].reg == inst.operands[1].reg
9569 || inst.operands[0].reg == inst.operands[1].reg + 1
9570 || inst.operands[0].reg == inst.operands[3].reg,
9571 BAD_OVERLAP);
9572
9573 inst.instruction |= inst.operands[0].reg << 12;
9574 inst.instruction |= inst.operands[1].reg;
9575 inst.instruction |= inst.operands[3].reg << 16;
9576 }
9577
9578 /* ARM V8 STRL. */
9579 static void
9580 do_stlex (void)
9581 {
9582 constraint (inst.operands[0].reg == inst.operands[1].reg
9583 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9584
9585 do_rd_rm_rn ();
9586 }
9587
9588 static void
9589 do_t_stlex (void)
9590 {
9591 constraint (inst.operands[0].reg == inst.operands[1].reg
9592 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9593
9594 do_rm_rd_rn ();
9595 }
9596
9597 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
9598 extends it to 32-bits, and adds the result to a value in another
9599 register. You can specify a rotation by 0, 8, 16, or 24 bits
9600 before extracting the 16-bit value.
9601 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
9602 Condition defaults to COND_ALWAYS.
9603 Error if any register uses R15. */
9604
9605 static void
9606 do_sxtah (void)
9607 {
9608 inst.instruction |= inst.operands[0].reg << 12;
9609 inst.instruction |= inst.operands[1].reg << 16;
9610 inst.instruction |= inst.operands[2].reg;
9611 inst.instruction |= inst.operands[3].imm << 10;
9612 }
9613
9614 /* ARM V6 SXTH.
9615
9616 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
9617 Condition defaults to COND_ALWAYS.
9618 Error if any register uses R15. */
9619
9620 static void
9621 do_sxth (void)
9622 {
9623 inst.instruction |= inst.operands[0].reg << 12;
9624 inst.instruction |= inst.operands[1].reg;
9625 inst.instruction |= inst.operands[2].imm << 10;
9626 }
9627 \f
9628 /* VFP instructions. In a logical order: SP variant first, monad
9629 before dyad, arithmetic then move then load/store. */
9630
9631 static void
9632 do_vfp_sp_monadic (void)
9633 {
9634 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9635 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
9636 }
9637
9638 static void
9639 do_vfp_sp_dyadic (void)
9640 {
9641 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9642 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
9643 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
9644 }
9645
9646 static void
9647 do_vfp_sp_compare_z (void)
9648 {
9649 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9650 }
9651
9652 static void
9653 do_vfp_dp_sp_cvt (void)
9654 {
9655 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9656 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
9657 }
9658
9659 static void
9660 do_vfp_sp_dp_cvt (void)
9661 {
9662 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9663 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
9664 }
9665
9666 static void
9667 do_vfp_reg_from_sp (void)
9668 {
9669 inst.instruction |= inst.operands[0].reg << 12;
9670 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
9671 }
9672
9673 static void
9674 do_vfp_reg2_from_sp2 (void)
9675 {
9676 constraint (inst.operands[2].imm != 2,
9677 _("only two consecutive VFP SP registers allowed here"));
9678 inst.instruction |= inst.operands[0].reg << 12;
9679 inst.instruction |= inst.operands[1].reg << 16;
9680 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
9681 }
9682
9683 static void
9684 do_vfp_sp_from_reg (void)
9685 {
9686 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sn);
9687 inst.instruction |= inst.operands[1].reg << 12;
9688 }
9689
9690 static void
9691 do_vfp_sp2_from_reg2 (void)
9692 {
9693 constraint (inst.operands[0].imm != 2,
9694 _("only two consecutive VFP SP registers allowed here"));
9695 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sm);
9696 inst.instruction |= inst.operands[1].reg << 12;
9697 inst.instruction |= inst.operands[2].reg << 16;
9698 }
9699
9700 static void
9701 do_vfp_sp_ldst (void)
9702 {
9703 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9704 encode_arm_cp_address (1, FALSE, TRUE, 0);
9705 }
9706
9707 static void
9708 do_vfp_dp_ldst (void)
9709 {
9710 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9711 encode_arm_cp_address (1, FALSE, TRUE, 0);
9712 }
9713
9714
9715 static void
9716 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type)
9717 {
9718 if (inst.operands[0].writeback)
9719 inst.instruction |= WRITE_BACK;
9720 else
9721 constraint (ldstm_type != VFP_LDSTMIA,
9722 _("this addressing mode requires base-register writeback"));
9723 inst.instruction |= inst.operands[0].reg << 16;
9724 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sd);
9725 inst.instruction |= inst.operands[1].imm;
9726 }
9727
9728 static void
9729 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type)
9730 {
9731 int count;
9732
9733 if (inst.operands[0].writeback)
9734 inst.instruction |= WRITE_BACK;
9735 else
9736 constraint (ldstm_type != VFP_LDSTMIA && ldstm_type != VFP_LDSTMIAX,
9737 _("this addressing mode requires base-register writeback"));
9738
9739 inst.instruction |= inst.operands[0].reg << 16;
9740 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
9741
9742 count = inst.operands[1].imm << 1;
9743 if (ldstm_type == VFP_LDSTMIAX || ldstm_type == VFP_LDSTMDBX)
9744 count += 1;
9745
9746 inst.instruction |= count;
9747 }
9748
9749 static void
9750 do_vfp_sp_ldstmia (void)
9751 {
9752 vfp_sp_ldstm (VFP_LDSTMIA);
9753 }
9754
9755 static void
9756 do_vfp_sp_ldstmdb (void)
9757 {
9758 vfp_sp_ldstm (VFP_LDSTMDB);
9759 }
9760
9761 static void
9762 do_vfp_dp_ldstmia (void)
9763 {
9764 vfp_dp_ldstm (VFP_LDSTMIA);
9765 }
9766
9767 static void
9768 do_vfp_dp_ldstmdb (void)
9769 {
9770 vfp_dp_ldstm (VFP_LDSTMDB);
9771 }
9772
9773 static void
9774 do_vfp_xp_ldstmia (void)
9775 {
9776 vfp_dp_ldstm (VFP_LDSTMIAX);
9777 }
9778
9779 static void
9780 do_vfp_xp_ldstmdb (void)
9781 {
9782 vfp_dp_ldstm (VFP_LDSTMDBX);
9783 }
9784
9785 static void
9786 do_vfp_dp_rd_rm (void)
9787 {
9788 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9789 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
9790 }
9791
9792 static void
9793 do_vfp_dp_rn_rd (void)
9794 {
9795 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dn);
9796 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
9797 }
9798
9799 static void
9800 do_vfp_dp_rd_rn (void)
9801 {
9802 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9803 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
9804 }
9805
9806 static void
9807 do_vfp_dp_rd_rn_rm (void)
9808 {
9809 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9810 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
9811 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dm);
9812 }
9813
9814 static void
9815 do_vfp_dp_rd (void)
9816 {
9817 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9818 }
9819
9820 static void
9821 do_vfp_dp_rm_rd_rn (void)
9822 {
9823 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dm);
9824 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
9825 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dn);
9826 }
9827
9828 /* VFPv3 instructions. */
9829 static void
9830 do_vfp_sp_const (void)
9831 {
9832 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9833 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
9834 inst.instruction |= (inst.operands[1].imm & 0x0f);
9835 }
9836
9837 static void
9838 do_vfp_dp_const (void)
9839 {
9840 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9841 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
9842 inst.instruction |= (inst.operands[1].imm & 0x0f);
9843 }
9844
9845 static void
9846 vfp_conv (int srcsize)
9847 {
9848 int immbits = srcsize - inst.operands[1].imm;
9849
9850 if (srcsize == 16 && !(immbits >= 0 && immbits <= srcsize))
9851 {
9852 /* If srcsize is 16, inst.operands[1].imm must be in the range 0-16.
9853 i.e. immbits must be in range 0 - 16. */
9854 inst.error = _("immediate value out of range, expected range [0, 16]");
9855 return;
9856 }
9857 else if (srcsize == 32 && !(immbits >= 0 && immbits < srcsize))
9858 {
9859 /* If srcsize is 32, inst.operands[1].imm must be in the range 1-32.
9860 i.e. immbits must be in range 0 - 31. */
9861 inst.error = _("immediate value out of range, expected range [1, 32]");
9862 return;
9863 }
9864
9865 inst.instruction |= (immbits & 1) << 5;
9866 inst.instruction |= (immbits >> 1);
9867 }
9868
9869 static void
9870 do_vfp_sp_conv_16 (void)
9871 {
9872 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9873 vfp_conv (16);
9874 }
9875
9876 static void
9877 do_vfp_dp_conv_16 (void)
9878 {
9879 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9880 vfp_conv (16);
9881 }
9882
9883 static void
9884 do_vfp_sp_conv_32 (void)
9885 {
9886 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9887 vfp_conv (32);
9888 }
9889
9890 static void
9891 do_vfp_dp_conv_32 (void)
9892 {
9893 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9894 vfp_conv (32);
9895 }
9896 \f
9897 /* FPA instructions. Also in a logical order. */
9898
9899 static void
9900 do_fpa_cmp (void)
9901 {
9902 inst.instruction |= inst.operands[0].reg << 16;
9903 inst.instruction |= inst.operands[1].reg;
9904 }
9905
9906 static void
9907 do_fpa_ldmstm (void)
9908 {
9909 inst.instruction |= inst.operands[0].reg << 12;
9910 switch (inst.operands[1].imm)
9911 {
9912 case 1: inst.instruction |= CP_T_X; break;
9913 case 2: inst.instruction |= CP_T_Y; break;
9914 case 3: inst.instruction |= CP_T_Y | CP_T_X; break;
9915 case 4: break;
9916 default: abort ();
9917 }
9918
9919 if (inst.instruction & (PRE_INDEX | INDEX_UP))
9920 {
9921 /* The instruction specified "ea" or "fd", so we can only accept
9922 [Rn]{!}. The instruction does not really support stacking or
9923 unstacking, so we have to emulate these by setting appropriate
9924 bits and offsets. */
9925 constraint (inst.reloc.exp.X_op != O_constant
9926 || inst.reloc.exp.X_add_number != 0,
9927 _("this instruction does not support indexing"));
9928
9929 if ((inst.instruction & PRE_INDEX) || inst.operands[2].writeback)
9930 inst.reloc.exp.X_add_number = 12 * inst.operands[1].imm;
9931
9932 if (!(inst.instruction & INDEX_UP))
9933 inst.reloc.exp.X_add_number = -inst.reloc.exp.X_add_number;
9934
9935 if (!(inst.instruction & PRE_INDEX) && inst.operands[2].writeback)
9936 {
9937 inst.operands[2].preind = 0;
9938 inst.operands[2].postind = 1;
9939 }
9940 }
9941
9942 encode_arm_cp_address (2, TRUE, TRUE, 0);
9943 }
9944 \f
9945 /* iWMMXt instructions: strictly in alphabetical order. */
9946
9947 static void
9948 do_iwmmxt_tandorc (void)
9949 {
9950 constraint (inst.operands[0].reg != REG_PC, _("only r15 allowed here"));
9951 }
9952
9953 static void
9954 do_iwmmxt_textrc (void)
9955 {
9956 inst.instruction |= inst.operands[0].reg << 12;
9957 inst.instruction |= inst.operands[1].imm;
9958 }
9959
9960 static void
9961 do_iwmmxt_textrm (void)
9962 {
9963 inst.instruction |= inst.operands[0].reg << 12;
9964 inst.instruction |= inst.operands[1].reg << 16;
9965 inst.instruction |= inst.operands[2].imm;
9966 }
9967
9968 static void
9969 do_iwmmxt_tinsr (void)
9970 {
9971 inst.instruction |= inst.operands[0].reg << 16;
9972 inst.instruction |= inst.operands[1].reg << 12;
9973 inst.instruction |= inst.operands[2].imm;
9974 }
9975
9976 static void
9977 do_iwmmxt_tmia (void)
9978 {
9979 inst.instruction |= inst.operands[0].reg << 5;
9980 inst.instruction |= inst.operands[1].reg;
9981 inst.instruction |= inst.operands[2].reg << 12;
9982 }
9983
9984 static void
9985 do_iwmmxt_waligni (void)
9986 {
9987 inst.instruction |= inst.operands[0].reg << 12;
9988 inst.instruction |= inst.operands[1].reg << 16;
9989 inst.instruction |= inst.operands[2].reg;
9990 inst.instruction |= inst.operands[3].imm << 20;
9991 }
9992
9993 static void
9994 do_iwmmxt_wmerge (void)
9995 {
9996 inst.instruction |= inst.operands[0].reg << 12;
9997 inst.instruction |= inst.operands[1].reg << 16;
9998 inst.instruction |= inst.operands[2].reg;
9999 inst.instruction |= inst.operands[3].imm << 21;
10000 }
10001
10002 static void
10003 do_iwmmxt_wmov (void)
10004 {
10005 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
10006 inst.instruction |= inst.operands[0].reg << 12;
10007 inst.instruction |= inst.operands[1].reg << 16;
10008 inst.instruction |= inst.operands[1].reg;
10009 }
10010
10011 static void
10012 do_iwmmxt_wldstbh (void)
10013 {
10014 int reloc;
10015 inst.instruction |= inst.operands[0].reg << 12;
10016 if (thumb_mode)
10017 reloc = BFD_RELOC_ARM_T32_CP_OFF_IMM_S2;
10018 else
10019 reloc = BFD_RELOC_ARM_CP_OFF_IMM_S2;
10020 encode_arm_cp_address (1, TRUE, FALSE, reloc);
10021 }
10022
10023 static void
10024 do_iwmmxt_wldstw (void)
10025 {
10026 /* RIWR_RIWC clears .isreg for a control register. */
10027 if (!inst.operands[0].isreg)
10028 {
10029 constraint (inst.cond != COND_ALWAYS, BAD_COND);
10030 inst.instruction |= 0xf0000000;
10031 }
10032
10033 inst.instruction |= inst.operands[0].reg << 12;
10034 encode_arm_cp_address (1, TRUE, TRUE, 0);
10035 }
10036
10037 static void
10038 do_iwmmxt_wldstd (void)
10039 {
10040 inst.instruction |= inst.operands[0].reg << 12;
10041 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2)
10042 && inst.operands[1].immisreg)
10043 {
10044 inst.instruction &= ~0x1a000ff;
10045 inst.instruction |= (0xfU << 28);
10046 if (inst.operands[1].preind)
10047 inst.instruction |= PRE_INDEX;
10048 if (!inst.operands[1].negative)
10049 inst.instruction |= INDEX_UP;
10050 if (inst.operands[1].writeback)
10051 inst.instruction |= WRITE_BACK;
10052 inst.instruction |= inst.operands[1].reg << 16;
10053 inst.instruction |= inst.reloc.exp.X_add_number << 4;
10054 inst.instruction |= inst.operands[1].imm;
10055 }
10056 else
10057 encode_arm_cp_address (1, TRUE, FALSE, 0);
10058 }
10059
10060 static void
10061 do_iwmmxt_wshufh (void)
10062 {
10063 inst.instruction |= inst.operands[0].reg << 12;
10064 inst.instruction |= inst.operands[1].reg << 16;
10065 inst.instruction |= ((inst.operands[2].imm & 0xf0) << 16);
10066 inst.instruction |= (inst.operands[2].imm & 0x0f);
10067 }
10068
10069 static void
10070 do_iwmmxt_wzero (void)
10071 {
10072 /* WZERO reg is an alias for WANDN reg, reg, reg. */
10073 inst.instruction |= inst.operands[0].reg;
10074 inst.instruction |= inst.operands[0].reg << 12;
10075 inst.instruction |= inst.operands[0].reg << 16;
10076 }
10077
10078 static void
10079 do_iwmmxt_wrwrwr_or_imm5 (void)
10080 {
10081 if (inst.operands[2].isreg)
10082 do_rd_rn_rm ();
10083 else {
10084 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2),
10085 _("immediate operand requires iWMMXt2"));
10086 do_rd_rn ();
10087 if (inst.operands[2].imm == 0)
10088 {
10089 switch ((inst.instruction >> 20) & 0xf)
10090 {
10091 case 4:
10092 case 5:
10093 case 6:
10094 case 7:
10095 /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16. */
10096 inst.operands[2].imm = 16;
10097 inst.instruction = (inst.instruction & 0xff0fffff) | (0x7 << 20);
10098 break;
10099 case 8:
10100 case 9:
10101 case 10:
10102 case 11:
10103 /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32. */
10104 inst.operands[2].imm = 32;
10105 inst.instruction = (inst.instruction & 0xff0fffff) | (0xb << 20);
10106 break;
10107 case 12:
10108 case 13:
10109 case 14:
10110 case 15:
10111 {
10112 /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn. */
10113 unsigned long wrn;
10114 wrn = (inst.instruction >> 16) & 0xf;
10115 inst.instruction &= 0xff0fff0f;
10116 inst.instruction |= wrn;
10117 /* Bail out here; the instruction is now assembled. */
10118 return;
10119 }
10120 }
10121 }
10122 /* Map 32 -> 0, etc. */
10123 inst.operands[2].imm &= 0x1f;
10124 inst.instruction |= (0xfU << 28) | ((inst.operands[2].imm & 0x10) << 4) | (inst.operands[2].imm & 0xf);
10125 }
10126 }
10127 \f
10128 /* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
10129 operations first, then control, shift, and load/store. */
10130
10131 /* Insns like "foo X,Y,Z". */
10132
10133 static void
10134 do_mav_triple (void)
10135 {
10136 inst.instruction |= inst.operands[0].reg << 16;
10137 inst.instruction |= inst.operands[1].reg;
10138 inst.instruction |= inst.operands[2].reg << 12;
10139 }
10140
10141 /* Insns like "foo W,X,Y,Z".
10142 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
10143
10144 static void
10145 do_mav_quad (void)
10146 {
10147 inst.instruction |= inst.operands[0].reg << 5;
10148 inst.instruction |= inst.operands[1].reg << 12;
10149 inst.instruction |= inst.operands[2].reg << 16;
10150 inst.instruction |= inst.operands[3].reg;
10151 }
10152
10153 /* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
10154 static void
10155 do_mav_dspsc (void)
10156 {
10157 inst.instruction |= inst.operands[1].reg << 12;
10158 }
10159
10160 /* Maverick shift immediate instructions.
10161 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
10162 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
10163
10164 static void
10165 do_mav_shift (void)
10166 {
10167 int imm = inst.operands[2].imm;
10168
10169 inst.instruction |= inst.operands[0].reg << 12;
10170 inst.instruction |= inst.operands[1].reg << 16;
10171
10172 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
10173 Bits 5-7 of the insn should have bits 4-6 of the immediate.
10174 Bit 4 should be 0. */
10175 imm = (imm & 0xf) | ((imm & 0x70) << 1);
10176
10177 inst.instruction |= imm;
10178 }
10179 \f
10180 /* XScale instructions. Also sorted arithmetic before move. */
10181
10182 /* Xscale multiply-accumulate (argument parse)
10183 MIAcc acc0,Rm,Rs
10184 MIAPHcc acc0,Rm,Rs
10185 MIAxycc acc0,Rm,Rs. */
10186
10187 static void
10188 do_xsc_mia (void)
10189 {
10190 inst.instruction |= inst.operands[1].reg;
10191 inst.instruction |= inst.operands[2].reg << 12;
10192 }
10193
10194 /* Xscale move-accumulator-register (argument parse)
10195
10196 MARcc acc0,RdLo,RdHi. */
10197
10198 static void
10199 do_xsc_mar (void)
10200 {
10201 inst.instruction |= inst.operands[1].reg << 12;
10202 inst.instruction |= inst.operands[2].reg << 16;
10203 }
10204
10205 /* Xscale move-register-accumulator (argument parse)
10206
10207 MRAcc RdLo,RdHi,acc0. */
10208
10209 static void
10210 do_xsc_mra (void)
10211 {
10212 constraint (inst.operands[0].reg == inst.operands[1].reg, BAD_OVERLAP);
10213 inst.instruction |= inst.operands[0].reg << 12;
10214 inst.instruction |= inst.operands[1].reg << 16;
10215 }
10216 \f
10217 /* Encoding functions relevant only to Thumb. */
10218
10219 /* inst.operands[i] is a shifted-register operand; encode
10220 it into inst.instruction in the format used by Thumb32. */
10221
10222 static void
10223 encode_thumb32_shifted_operand (int i)
10224 {
10225 unsigned int value = inst.reloc.exp.X_add_number;
10226 unsigned int shift = inst.operands[i].shift_kind;
10227
10228 constraint (inst.operands[i].immisreg,
10229 _("shift by register not allowed in thumb mode"));
10230 inst.instruction |= inst.operands[i].reg;
10231 if (shift == SHIFT_RRX)
10232 inst.instruction |= SHIFT_ROR << 4;
10233 else
10234 {
10235 constraint (inst.reloc.exp.X_op != O_constant,
10236 _("expression too complex"));
10237
10238 constraint (value > 32
10239 || (value == 32 && (shift == SHIFT_LSL
10240 || shift == SHIFT_ROR)),
10241 _("shift expression is too large"));
10242
10243 if (value == 0)
10244 shift = SHIFT_LSL;
10245 else if (value == 32)
10246 value = 0;
10247
10248 inst.instruction |= shift << 4;
10249 inst.instruction |= (value & 0x1c) << 10;
10250 inst.instruction |= (value & 0x03) << 6;
10251 }
10252 }
10253
10254
10255 /* inst.operands[i] was set up by parse_address. Encode it into a
10256 Thumb32 format load or store instruction. Reject forms that cannot
10257 be used with such instructions. If is_t is true, reject forms that
10258 cannot be used with a T instruction; if is_d is true, reject forms
10259 that cannot be used with a D instruction. If it is a store insn,
10260 reject PC in Rn. */
10261
10262 static void
10263 encode_thumb32_addr_mode (int i, bfd_boolean is_t, bfd_boolean is_d)
10264 {
10265 const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
10266
10267 constraint (!inst.operands[i].isreg,
10268 _("Instruction does not support =N addresses"));
10269
10270 inst.instruction |= inst.operands[i].reg << 16;
10271 if (inst.operands[i].immisreg)
10272 {
10273 constraint (is_pc, BAD_PC_ADDRESSING);
10274 constraint (is_t || is_d, _("cannot use register index with this instruction"));
10275 constraint (inst.operands[i].negative,
10276 _("Thumb does not support negative register indexing"));
10277 constraint (inst.operands[i].postind,
10278 _("Thumb does not support register post-indexing"));
10279 constraint (inst.operands[i].writeback,
10280 _("Thumb does not support register indexing with writeback"));
10281 constraint (inst.operands[i].shifted && inst.operands[i].shift_kind != SHIFT_LSL,
10282 _("Thumb supports only LSL in shifted register indexing"));
10283
10284 inst.instruction |= inst.operands[i].imm;
10285 if (inst.operands[i].shifted)
10286 {
10287 constraint (inst.reloc.exp.X_op != O_constant,
10288 _("expression too complex"));
10289 constraint (inst.reloc.exp.X_add_number < 0
10290 || inst.reloc.exp.X_add_number > 3,
10291 _("shift out of range"));
10292 inst.instruction |= inst.reloc.exp.X_add_number << 4;
10293 }
10294 inst.reloc.type = BFD_RELOC_UNUSED;
10295 }
10296 else if (inst.operands[i].preind)
10297 {
10298 constraint (is_pc && inst.operands[i].writeback, BAD_PC_WRITEBACK);
10299 constraint (is_t && inst.operands[i].writeback,
10300 _("cannot use writeback with this instruction"));
10301 constraint (is_pc && ((inst.instruction & THUMB2_LOAD_BIT) == 0),
10302 BAD_PC_ADDRESSING);
10303
10304 if (is_d)
10305 {
10306 inst.instruction |= 0x01000000;
10307 if (inst.operands[i].writeback)
10308 inst.instruction |= 0x00200000;
10309 }
10310 else
10311 {
10312 inst.instruction |= 0x00000c00;
10313 if (inst.operands[i].writeback)
10314 inst.instruction |= 0x00000100;
10315 }
10316 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
10317 }
10318 else if (inst.operands[i].postind)
10319 {
10320 gas_assert (inst.operands[i].writeback);
10321 constraint (is_pc, _("cannot use post-indexing with PC-relative addressing"));
10322 constraint (is_t, _("cannot use post-indexing with this instruction"));
10323
10324 if (is_d)
10325 inst.instruction |= 0x00200000;
10326 else
10327 inst.instruction |= 0x00000900;
10328 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
10329 }
10330 else /* unindexed - only for coprocessor */
10331 inst.error = _("instruction does not accept unindexed addressing");
10332 }
10333
10334 /* Table of Thumb instructions which exist in both 16- and 32-bit
10335 encodings (the latter only in post-V6T2 cores). The index is the
10336 value used in the insns table below. When there is more than one
10337 possible 16-bit encoding for the instruction, this table always
10338 holds variant (1).
10339 Also contains several pseudo-instructions used during relaxation. */
10340 #define T16_32_TAB \
10341 X(_adc, 4140, eb400000), \
10342 X(_adcs, 4140, eb500000), \
10343 X(_add, 1c00, eb000000), \
10344 X(_adds, 1c00, eb100000), \
10345 X(_addi, 0000, f1000000), \
10346 X(_addis, 0000, f1100000), \
10347 X(_add_pc,000f, f20f0000), \
10348 X(_add_sp,000d, f10d0000), \
10349 X(_adr, 000f, f20f0000), \
10350 X(_and, 4000, ea000000), \
10351 X(_ands, 4000, ea100000), \
10352 X(_asr, 1000, fa40f000), \
10353 X(_asrs, 1000, fa50f000), \
10354 X(_b, e000, f000b000), \
10355 X(_bcond, d000, f0008000), \
10356 X(_bic, 4380, ea200000), \
10357 X(_bics, 4380, ea300000), \
10358 X(_cmn, 42c0, eb100f00), \
10359 X(_cmp, 2800, ebb00f00), \
10360 X(_cpsie, b660, f3af8400), \
10361 X(_cpsid, b670, f3af8600), \
10362 X(_cpy, 4600, ea4f0000), \
10363 X(_dec_sp,80dd, f1ad0d00), \
10364 X(_eor, 4040, ea800000), \
10365 X(_eors, 4040, ea900000), \
10366 X(_inc_sp,00dd, f10d0d00), \
10367 X(_ldmia, c800, e8900000), \
10368 X(_ldr, 6800, f8500000), \
10369 X(_ldrb, 7800, f8100000), \
10370 X(_ldrh, 8800, f8300000), \
10371 X(_ldrsb, 5600, f9100000), \
10372 X(_ldrsh, 5e00, f9300000), \
10373 X(_ldr_pc,4800, f85f0000), \
10374 X(_ldr_pc2,4800, f85f0000), \
10375 X(_ldr_sp,9800, f85d0000), \
10376 X(_lsl, 0000, fa00f000), \
10377 X(_lsls, 0000, fa10f000), \
10378 X(_lsr, 0800, fa20f000), \
10379 X(_lsrs, 0800, fa30f000), \
10380 X(_mov, 2000, ea4f0000), \
10381 X(_movs, 2000, ea5f0000), \
10382 X(_mul, 4340, fb00f000), \
10383 X(_muls, 4340, ffffffff), /* no 32b muls */ \
10384 X(_mvn, 43c0, ea6f0000), \
10385 X(_mvns, 43c0, ea7f0000), \
10386 X(_neg, 4240, f1c00000), /* rsb #0 */ \
10387 X(_negs, 4240, f1d00000), /* rsbs #0 */ \
10388 X(_orr, 4300, ea400000), \
10389 X(_orrs, 4300, ea500000), \
10390 X(_pop, bc00, e8bd0000), /* ldmia sp!,... */ \
10391 X(_push, b400, e92d0000), /* stmdb sp!,... */ \
10392 X(_rev, ba00, fa90f080), \
10393 X(_rev16, ba40, fa90f090), \
10394 X(_revsh, bac0, fa90f0b0), \
10395 X(_ror, 41c0, fa60f000), \
10396 X(_rors, 41c0, fa70f000), \
10397 X(_sbc, 4180, eb600000), \
10398 X(_sbcs, 4180, eb700000), \
10399 X(_stmia, c000, e8800000), \
10400 X(_str, 6000, f8400000), \
10401 X(_strb, 7000, f8000000), \
10402 X(_strh, 8000, f8200000), \
10403 X(_str_sp,9000, f84d0000), \
10404 X(_sub, 1e00, eba00000), \
10405 X(_subs, 1e00, ebb00000), \
10406 X(_subi, 8000, f1a00000), \
10407 X(_subis, 8000, f1b00000), \
10408 X(_sxtb, b240, fa4ff080), \
10409 X(_sxth, b200, fa0ff080), \
10410 X(_tst, 4200, ea100f00), \
10411 X(_uxtb, b2c0, fa5ff080), \
10412 X(_uxth, b280, fa1ff080), \
10413 X(_nop, bf00, f3af8000), \
10414 X(_yield, bf10, f3af8001), \
10415 X(_wfe, bf20, f3af8002), \
10416 X(_wfi, bf30, f3af8003), \
10417 X(_sev, bf40, f3af8004), \
10418 X(_sevl, bf50, f3af8005), \
10419 X(_udf, de00, f7f0a000)
10420
10421 /* To catch errors in encoding functions, the codes are all offset by
10422 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
10423 as 16-bit instructions. */
10424 #define X(a,b,c) T_MNEM##a
10425 enum t16_32_codes { T16_32_OFFSET = 0xF7FF, T16_32_TAB };
10426 #undef X
10427
10428 #define X(a,b,c) 0x##b
10429 static const unsigned short thumb_op16[] = { T16_32_TAB };
10430 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
10431 #undef X
10432
10433 #define X(a,b,c) 0x##c
10434 static const unsigned int thumb_op32[] = { T16_32_TAB };
10435 #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
10436 #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
10437 #undef X
10438 #undef T16_32_TAB
10439
10440 /* Thumb instruction encoders, in alphabetical order. */
10441
10442 /* ADDW or SUBW. */
10443
10444 static void
10445 do_t_add_sub_w (void)
10446 {
10447 int Rd, Rn;
10448
10449 Rd = inst.operands[0].reg;
10450 Rn = inst.operands[1].reg;
10451
10452 /* If Rn is REG_PC, this is ADR; if Rn is REG_SP, then this
10453 is the SP-{plus,minus}-immediate form of the instruction. */
10454 if (Rn == REG_SP)
10455 constraint (Rd == REG_PC, BAD_PC);
10456 else
10457 reject_bad_reg (Rd);
10458
10459 inst.instruction |= (Rn << 16) | (Rd << 8);
10460 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
10461 }
10462
10463 /* Parse an add or subtract instruction. We get here with inst.instruction
10464 equalling any of THUMB_OPCODE_add, adds, sub, or subs. */
10465
10466 static void
10467 do_t_add_sub (void)
10468 {
10469 int Rd, Rs, Rn;
10470
10471 Rd = inst.operands[0].reg;
10472 Rs = (inst.operands[1].present
10473 ? inst.operands[1].reg /* Rd, Rs, foo */
10474 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
10475
10476 if (Rd == REG_PC)
10477 set_it_insn_type_last ();
10478
10479 if (unified_syntax)
10480 {
10481 bfd_boolean flags;
10482 bfd_boolean narrow;
10483 int opcode;
10484
10485 flags = (inst.instruction == T_MNEM_adds
10486 || inst.instruction == T_MNEM_subs);
10487 if (flags)
10488 narrow = !in_it_block ();
10489 else
10490 narrow = in_it_block ();
10491 if (!inst.operands[2].isreg)
10492 {
10493 int add;
10494
10495 constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
10496
10497 add = (inst.instruction == T_MNEM_add
10498 || inst.instruction == T_MNEM_adds);
10499 opcode = 0;
10500 if (inst.size_req != 4)
10501 {
10502 /* Attempt to use a narrow opcode, with relaxation if
10503 appropriate. */
10504 if (Rd == REG_SP && Rs == REG_SP && !flags)
10505 opcode = add ? T_MNEM_inc_sp : T_MNEM_dec_sp;
10506 else if (Rd <= 7 && Rs == REG_SP && add && !flags)
10507 opcode = T_MNEM_add_sp;
10508 else if (Rd <= 7 && Rs == REG_PC && add && !flags)
10509 opcode = T_MNEM_add_pc;
10510 else if (Rd <= 7 && Rs <= 7 && narrow)
10511 {
10512 if (flags)
10513 opcode = add ? T_MNEM_addis : T_MNEM_subis;
10514 else
10515 opcode = add ? T_MNEM_addi : T_MNEM_subi;
10516 }
10517 if (opcode)
10518 {
10519 inst.instruction = THUMB_OP16(opcode);
10520 inst.instruction |= (Rd << 4) | Rs;
10521 if (inst.reloc.type < BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
10522 || inst.reloc.type > BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC)
10523 {
10524 if (inst.size_req == 2)
10525 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
10526 else
10527 inst.relax = opcode;
10528 }
10529 }
10530 else
10531 constraint (inst.size_req == 2, BAD_HIREG);
10532 }
10533 if (inst.size_req == 4
10534 || (inst.size_req != 2 && !opcode))
10535 {
10536 constraint (inst.reloc.type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
10537 && inst.reloc.type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC ,
10538 THUMB1_RELOC_ONLY);
10539 if (Rd == REG_PC)
10540 {
10541 constraint (add, BAD_PC);
10542 constraint (Rs != REG_LR || inst.instruction != T_MNEM_subs,
10543 _("only SUBS PC, LR, #const allowed"));
10544 constraint (inst.reloc.exp.X_op != O_constant,
10545 _("expression too complex"));
10546 constraint (inst.reloc.exp.X_add_number < 0
10547 || inst.reloc.exp.X_add_number > 0xff,
10548 _("immediate value out of range"));
10549 inst.instruction = T2_SUBS_PC_LR
10550 | inst.reloc.exp.X_add_number;
10551 inst.reloc.type = BFD_RELOC_UNUSED;
10552 return;
10553 }
10554 else if (Rs == REG_PC)
10555 {
10556 /* Always use addw/subw. */
10557 inst.instruction = add ? 0xf20f0000 : 0xf2af0000;
10558 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
10559 }
10560 else
10561 {
10562 inst.instruction = THUMB_OP32 (inst.instruction);
10563 inst.instruction = (inst.instruction & 0xe1ffffff)
10564 | 0x10000000;
10565 if (flags)
10566 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10567 else
10568 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_IMM;
10569 }
10570 inst.instruction |= Rd << 8;
10571 inst.instruction |= Rs << 16;
10572 }
10573 }
10574 else
10575 {
10576 unsigned int value = inst.reloc.exp.X_add_number;
10577 unsigned int shift = inst.operands[2].shift_kind;
10578
10579 Rn = inst.operands[2].reg;
10580 /* See if we can do this with a 16-bit instruction. */
10581 if (!inst.operands[2].shifted && inst.size_req != 4)
10582 {
10583 if (Rd > 7 || Rs > 7 || Rn > 7)
10584 narrow = FALSE;
10585
10586 if (narrow)
10587 {
10588 inst.instruction = ((inst.instruction == T_MNEM_adds
10589 || inst.instruction == T_MNEM_add)
10590 ? T_OPCODE_ADD_R3
10591 : T_OPCODE_SUB_R3);
10592 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
10593 return;
10594 }
10595
10596 if (inst.instruction == T_MNEM_add && (Rd == Rs || Rd == Rn))
10597 {
10598 /* Thumb-1 cores (except v6-M) require at least one high
10599 register in a narrow non flag setting add. */
10600 if (Rd > 7 || Rn > 7
10601 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2)
10602 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_msr))
10603 {
10604 if (Rd == Rn)
10605 {
10606 Rn = Rs;
10607 Rs = Rd;
10608 }
10609 inst.instruction = T_OPCODE_ADD_HI;
10610 inst.instruction |= (Rd & 8) << 4;
10611 inst.instruction |= (Rd & 7);
10612 inst.instruction |= Rn << 3;
10613 return;
10614 }
10615 }
10616 }
10617
10618 constraint (Rd == REG_PC, BAD_PC);
10619 constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
10620 constraint (Rs == REG_PC, BAD_PC);
10621 reject_bad_reg (Rn);
10622
10623 /* If we get here, it can't be done in 16 bits. */
10624 constraint (inst.operands[2].shifted && inst.operands[2].immisreg,
10625 _("shift must be constant"));
10626 inst.instruction = THUMB_OP32 (inst.instruction);
10627 inst.instruction |= Rd << 8;
10628 inst.instruction |= Rs << 16;
10629 constraint (Rd == REG_SP && Rs == REG_SP && value > 3,
10630 _("shift value over 3 not allowed in thumb mode"));
10631 constraint (Rd == REG_SP && Rs == REG_SP && shift != SHIFT_LSL,
10632 _("only LSL shift allowed in thumb mode"));
10633 encode_thumb32_shifted_operand (2);
10634 }
10635 }
10636 else
10637 {
10638 constraint (inst.instruction == T_MNEM_adds
10639 || inst.instruction == T_MNEM_subs,
10640 BAD_THUMB32);
10641
10642 if (!inst.operands[2].isreg) /* Rd, Rs, #imm */
10643 {
10644 constraint ((Rd > 7 && (Rd != REG_SP || Rs != REG_SP))
10645 || (Rs > 7 && Rs != REG_SP && Rs != REG_PC),
10646 BAD_HIREG);
10647
10648 inst.instruction = (inst.instruction == T_MNEM_add
10649 ? 0x0000 : 0x8000);
10650 inst.instruction |= (Rd << 4) | Rs;
10651 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
10652 return;
10653 }
10654
10655 Rn = inst.operands[2].reg;
10656 constraint (inst.operands[2].shifted, _("unshifted register required"));
10657
10658 /* We now have Rd, Rs, and Rn set to registers. */
10659 if (Rd > 7 || Rs > 7 || Rn > 7)
10660 {
10661 /* Can't do this for SUB. */
10662 constraint (inst.instruction == T_MNEM_sub, BAD_HIREG);
10663 inst.instruction = T_OPCODE_ADD_HI;
10664 inst.instruction |= (Rd & 8) << 4;
10665 inst.instruction |= (Rd & 7);
10666 if (Rs == Rd)
10667 inst.instruction |= Rn << 3;
10668 else if (Rn == Rd)
10669 inst.instruction |= Rs << 3;
10670 else
10671 constraint (1, _("dest must overlap one source register"));
10672 }
10673 else
10674 {
10675 inst.instruction = (inst.instruction == T_MNEM_add
10676 ? T_OPCODE_ADD_R3 : T_OPCODE_SUB_R3);
10677 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
10678 }
10679 }
10680 }
10681
10682 static void
10683 do_t_adr (void)
10684 {
10685 unsigned Rd;
10686
10687 Rd = inst.operands[0].reg;
10688 reject_bad_reg (Rd);
10689
10690 if (unified_syntax && inst.size_req == 0 && Rd <= 7)
10691 {
10692 /* Defer to section relaxation. */
10693 inst.relax = inst.instruction;
10694 inst.instruction = THUMB_OP16 (inst.instruction);
10695 inst.instruction |= Rd << 4;
10696 }
10697 else if (unified_syntax && inst.size_req != 2)
10698 {
10699 /* Generate a 32-bit opcode. */
10700 inst.instruction = THUMB_OP32 (inst.instruction);
10701 inst.instruction |= Rd << 8;
10702 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_PC12;
10703 inst.reloc.pc_rel = 1;
10704 }
10705 else
10706 {
10707 /* Generate a 16-bit opcode. */
10708 inst.instruction = THUMB_OP16 (inst.instruction);
10709 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
10710 inst.reloc.exp.X_add_number -= 4; /* PC relative adjust. */
10711 inst.reloc.pc_rel = 1;
10712
10713 inst.instruction |= Rd << 4;
10714 }
10715 }
10716
10717 /* Arithmetic instructions for which there is just one 16-bit
10718 instruction encoding, and it allows only two low registers.
10719 For maximal compatibility with ARM syntax, we allow three register
10720 operands even when Thumb-32 instructions are not available, as long
10721 as the first two are identical. For instance, both "sbc r0,r1" and
10722 "sbc r0,r0,r1" are allowed. */
10723 static void
10724 do_t_arit3 (void)
10725 {
10726 int Rd, Rs, Rn;
10727
10728 Rd = inst.operands[0].reg;
10729 Rs = (inst.operands[1].present
10730 ? inst.operands[1].reg /* Rd, Rs, foo */
10731 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
10732 Rn = inst.operands[2].reg;
10733
10734 reject_bad_reg (Rd);
10735 reject_bad_reg (Rs);
10736 if (inst.operands[2].isreg)
10737 reject_bad_reg (Rn);
10738
10739 if (unified_syntax)
10740 {
10741 if (!inst.operands[2].isreg)
10742 {
10743 /* For an immediate, we always generate a 32-bit opcode;
10744 section relaxation will shrink it later if possible. */
10745 inst.instruction = THUMB_OP32 (inst.instruction);
10746 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
10747 inst.instruction |= Rd << 8;
10748 inst.instruction |= Rs << 16;
10749 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10750 }
10751 else
10752 {
10753 bfd_boolean narrow;
10754
10755 /* See if we can do this with a 16-bit instruction. */
10756 if (THUMB_SETS_FLAGS (inst.instruction))
10757 narrow = !in_it_block ();
10758 else
10759 narrow = in_it_block ();
10760
10761 if (Rd > 7 || Rn > 7 || Rs > 7)
10762 narrow = FALSE;
10763 if (inst.operands[2].shifted)
10764 narrow = FALSE;
10765 if (inst.size_req == 4)
10766 narrow = FALSE;
10767
10768 if (narrow
10769 && Rd == Rs)
10770 {
10771 inst.instruction = THUMB_OP16 (inst.instruction);
10772 inst.instruction |= Rd;
10773 inst.instruction |= Rn << 3;
10774 return;
10775 }
10776
10777 /* If we get here, it can't be done in 16 bits. */
10778 constraint (inst.operands[2].shifted
10779 && inst.operands[2].immisreg,
10780 _("shift must be constant"));
10781 inst.instruction = THUMB_OP32 (inst.instruction);
10782 inst.instruction |= Rd << 8;
10783 inst.instruction |= Rs << 16;
10784 encode_thumb32_shifted_operand (2);
10785 }
10786 }
10787 else
10788 {
10789 /* On its face this is a lie - the instruction does set the
10790 flags. However, the only supported mnemonic in this mode
10791 says it doesn't. */
10792 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
10793
10794 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
10795 _("unshifted register required"));
10796 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
10797 constraint (Rd != Rs,
10798 _("dest and source1 must be the same register"));
10799
10800 inst.instruction = THUMB_OP16 (inst.instruction);
10801 inst.instruction |= Rd;
10802 inst.instruction |= Rn << 3;
10803 }
10804 }
10805
10806 /* Similarly, but for instructions where the arithmetic operation is
10807 commutative, so we can allow either of them to be different from
10808 the destination operand in a 16-bit instruction. For instance, all
10809 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
10810 accepted. */
10811 static void
10812 do_t_arit3c (void)
10813 {
10814 int Rd, Rs, Rn;
10815
10816 Rd = inst.operands[0].reg;
10817 Rs = (inst.operands[1].present
10818 ? inst.operands[1].reg /* Rd, Rs, foo */
10819 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
10820 Rn = inst.operands[2].reg;
10821
10822 reject_bad_reg (Rd);
10823 reject_bad_reg (Rs);
10824 if (inst.operands[2].isreg)
10825 reject_bad_reg (Rn);
10826
10827 if (unified_syntax)
10828 {
10829 if (!inst.operands[2].isreg)
10830 {
10831 /* For an immediate, we always generate a 32-bit opcode;
10832 section relaxation will shrink it later if possible. */
10833 inst.instruction = THUMB_OP32 (inst.instruction);
10834 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
10835 inst.instruction |= Rd << 8;
10836 inst.instruction |= Rs << 16;
10837 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10838 }
10839 else
10840 {
10841 bfd_boolean narrow;
10842
10843 /* See if we can do this with a 16-bit instruction. */
10844 if (THUMB_SETS_FLAGS (inst.instruction))
10845 narrow = !in_it_block ();
10846 else
10847 narrow = in_it_block ();
10848
10849 if (Rd > 7 || Rn > 7 || Rs > 7)
10850 narrow = FALSE;
10851 if (inst.operands[2].shifted)
10852 narrow = FALSE;
10853 if (inst.size_req == 4)
10854 narrow = FALSE;
10855
10856 if (narrow)
10857 {
10858 if (Rd == Rs)
10859 {
10860 inst.instruction = THUMB_OP16 (inst.instruction);
10861 inst.instruction |= Rd;
10862 inst.instruction |= Rn << 3;
10863 return;
10864 }
10865 if (Rd == Rn)
10866 {
10867 inst.instruction = THUMB_OP16 (inst.instruction);
10868 inst.instruction |= Rd;
10869 inst.instruction |= Rs << 3;
10870 return;
10871 }
10872 }
10873
10874 /* If we get here, it can't be done in 16 bits. */
10875 constraint (inst.operands[2].shifted
10876 && inst.operands[2].immisreg,
10877 _("shift must be constant"));
10878 inst.instruction = THUMB_OP32 (inst.instruction);
10879 inst.instruction |= Rd << 8;
10880 inst.instruction |= Rs << 16;
10881 encode_thumb32_shifted_operand (2);
10882 }
10883 }
10884 else
10885 {
10886 /* On its face this is a lie - the instruction does set the
10887 flags. However, the only supported mnemonic in this mode
10888 says it doesn't. */
10889 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
10890
10891 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
10892 _("unshifted register required"));
10893 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
10894
10895 inst.instruction = THUMB_OP16 (inst.instruction);
10896 inst.instruction |= Rd;
10897
10898 if (Rd == Rs)
10899 inst.instruction |= Rn << 3;
10900 else if (Rd == Rn)
10901 inst.instruction |= Rs << 3;
10902 else
10903 constraint (1, _("dest must overlap one source register"));
10904 }
10905 }
10906
10907 static void
10908 do_t_bfc (void)
10909 {
10910 unsigned Rd;
10911 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
10912 constraint (msb > 32, _("bit-field extends past end of register"));
10913 /* The instruction encoding stores the LSB and MSB,
10914 not the LSB and width. */
10915 Rd = inst.operands[0].reg;
10916 reject_bad_reg (Rd);
10917 inst.instruction |= Rd << 8;
10918 inst.instruction |= (inst.operands[1].imm & 0x1c) << 10;
10919 inst.instruction |= (inst.operands[1].imm & 0x03) << 6;
10920 inst.instruction |= msb - 1;
10921 }
10922
10923 static void
10924 do_t_bfi (void)
10925 {
10926 int Rd, Rn;
10927 unsigned int msb;
10928
10929 Rd = inst.operands[0].reg;
10930 reject_bad_reg (Rd);
10931
10932 /* #0 in second position is alternative syntax for bfc, which is
10933 the same instruction but with REG_PC in the Rm field. */
10934 if (!inst.operands[1].isreg)
10935 Rn = REG_PC;
10936 else
10937 {
10938 Rn = inst.operands[1].reg;
10939 reject_bad_reg (Rn);
10940 }
10941
10942 msb = inst.operands[2].imm + inst.operands[3].imm;
10943 constraint (msb > 32, _("bit-field extends past end of register"));
10944 /* The instruction encoding stores the LSB and MSB,
10945 not the LSB and width. */
10946 inst.instruction |= Rd << 8;
10947 inst.instruction |= Rn << 16;
10948 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
10949 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
10950 inst.instruction |= msb - 1;
10951 }
10952
10953 static void
10954 do_t_bfx (void)
10955 {
10956 unsigned Rd, Rn;
10957
10958 Rd = inst.operands[0].reg;
10959 Rn = inst.operands[1].reg;
10960
10961 reject_bad_reg (Rd);
10962 reject_bad_reg (Rn);
10963
10964 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
10965 _("bit-field extends past end of register"));
10966 inst.instruction |= Rd << 8;
10967 inst.instruction |= Rn << 16;
10968 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
10969 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
10970 inst.instruction |= inst.operands[3].imm - 1;
10971 }
10972
10973 /* ARM V5 Thumb BLX (argument parse)
10974 BLX <target_addr> which is BLX(1)
10975 BLX <Rm> which is BLX(2)
10976 Unfortunately, there are two different opcodes for this mnemonic.
10977 So, the insns[].value is not used, and the code here zaps values
10978 into inst.instruction.
10979
10980 ??? How to take advantage of the additional two bits of displacement
10981 available in Thumb32 mode? Need new relocation? */
10982
10983 static void
10984 do_t_blx (void)
10985 {
10986 set_it_insn_type_last ();
10987
10988 if (inst.operands[0].isreg)
10989 {
10990 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
10991 /* We have a register, so this is BLX(2). */
10992 inst.instruction |= inst.operands[0].reg << 3;
10993 }
10994 else
10995 {
10996 /* No register. This must be BLX(1). */
10997 inst.instruction = 0xf000e800;
10998 encode_branch (BFD_RELOC_THUMB_PCREL_BLX);
10999 }
11000 }
11001
11002 static void
11003 do_t_branch (void)
11004 {
11005 int opcode;
11006 int cond;
11007 bfd_reloc_code_real_type reloc;
11008
11009 cond = inst.cond;
11010 set_it_insn_type (IF_INSIDE_IT_LAST_INSN);
11011
11012 if (in_it_block ())
11013 {
11014 /* Conditional branches inside IT blocks are encoded as unconditional
11015 branches. */
11016 cond = COND_ALWAYS;
11017 }
11018 else
11019 cond = inst.cond;
11020
11021 if (cond != COND_ALWAYS)
11022 opcode = T_MNEM_bcond;
11023 else
11024 opcode = inst.instruction;
11025
11026 if (unified_syntax
11027 && (inst.size_req == 4
11028 || (inst.size_req != 2
11029 && (inst.operands[0].hasreloc
11030 || inst.reloc.exp.X_op == O_constant))))
11031 {
11032 inst.instruction = THUMB_OP32(opcode);
11033 if (cond == COND_ALWAYS)
11034 reloc = BFD_RELOC_THUMB_PCREL_BRANCH25;
11035 else
11036 {
11037 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2),
11038 _("selected architecture does not support "
11039 "wide conditional branch instruction"));
11040
11041 gas_assert (cond != 0xF);
11042 inst.instruction |= cond << 22;
11043 reloc = BFD_RELOC_THUMB_PCREL_BRANCH20;
11044 }
11045 }
11046 else
11047 {
11048 inst.instruction = THUMB_OP16(opcode);
11049 if (cond == COND_ALWAYS)
11050 reloc = BFD_RELOC_THUMB_PCREL_BRANCH12;
11051 else
11052 {
11053 inst.instruction |= cond << 8;
11054 reloc = BFD_RELOC_THUMB_PCREL_BRANCH9;
11055 }
11056 /* Allow section relaxation. */
11057 if (unified_syntax && inst.size_req != 2)
11058 inst.relax = opcode;
11059 }
11060 inst.reloc.type = reloc;
11061 inst.reloc.pc_rel = 1;
11062 }
11063
11064 /* Actually do the work for Thumb state bkpt and hlt. The only difference
11065 between the two is the maximum immediate allowed - which is passed in
11066 RANGE. */
11067 static void
11068 do_t_bkpt_hlt1 (int range)
11069 {
11070 constraint (inst.cond != COND_ALWAYS,
11071 _("instruction is always unconditional"));
11072 if (inst.operands[0].present)
11073 {
11074 constraint (inst.operands[0].imm > range,
11075 _("immediate value out of range"));
11076 inst.instruction |= inst.operands[0].imm;
11077 }
11078
11079 set_it_insn_type (NEUTRAL_IT_INSN);
11080 }
11081
11082 static void
11083 do_t_hlt (void)
11084 {
11085 do_t_bkpt_hlt1 (63);
11086 }
11087
11088 static void
11089 do_t_bkpt (void)
11090 {
11091 do_t_bkpt_hlt1 (255);
11092 }
11093
11094 static void
11095 do_t_branch23 (void)
11096 {
11097 set_it_insn_type_last ();
11098 encode_branch (BFD_RELOC_THUMB_PCREL_BRANCH23);
11099
11100 /* md_apply_fix blows up with 'bl foo(PLT)' where foo is defined in
11101 this file. We used to simply ignore the PLT reloc type here --
11102 the branch encoding is now needed to deal with TLSCALL relocs.
11103 So if we see a PLT reloc now, put it back to how it used to be to
11104 keep the preexisting behaviour. */
11105 if (inst.reloc.type == BFD_RELOC_ARM_PLT32)
11106 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH23;
11107
11108 #if defined(OBJ_COFF)
11109 /* If the destination of the branch is a defined symbol which does not have
11110 the THUMB_FUNC attribute, then we must be calling a function which has
11111 the (interfacearm) attribute. We look for the Thumb entry point to that
11112 function and change the branch to refer to that function instead. */
11113 if ( inst.reloc.exp.X_op == O_symbol
11114 && inst.reloc.exp.X_add_symbol != NULL
11115 && S_IS_DEFINED (inst.reloc.exp.X_add_symbol)
11116 && ! THUMB_IS_FUNC (inst.reloc.exp.X_add_symbol))
11117 inst.reloc.exp.X_add_symbol =
11118 find_real_start (inst.reloc.exp.X_add_symbol);
11119 #endif
11120 }
11121
11122 static void
11123 do_t_bx (void)
11124 {
11125 set_it_insn_type_last ();
11126 inst.instruction |= inst.operands[0].reg << 3;
11127 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
11128 should cause the alignment to be checked once it is known. This is
11129 because BX PC only works if the instruction is word aligned. */
11130 }
11131
11132 static void
11133 do_t_bxj (void)
11134 {
11135 int Rm;
11136
11137 set_it_insn_type_last ();
11138 Rm = inst.operands[0].reg;
11139 reject_bad_reg (Rm);
11140 inst.instruction |= Rm << 16;
11141 }
11142
11143 static void
11144 do_t_clz (void)
11145 {
11146 unsigned Rd;
11147 unsigned Rm;
11148
11149 Rd = inst.operands[0].reg;
11150 Rm = inst.operands[1].reg;
11151
11152 reject_bad_reg (Rd);
11153 reject_bad_reg (Rm);
11154
11155 inst.instruction |= Rd << 8;
11156 inst.instruction |= Rm << 16;
11157 inst.instruction |= Rm;
11158 }
11159
11160 static void
11161 do_t_cps (void)
11162 {
11163 set_it_insn_type (OUTSIDE_IT_INSN);
11164 inst.instruction |= inst.operands[0].imm;
11165 }
11166
11167 static void
11168 do_t_cpsi (void)
11169 {
11170 set_it_insn_type (OUTSIDE_IT_INSN);
11171 if (unified_syntax
11172 && (inst.operands[1].present || inst.size_req == 4)
11173 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6_notm))
11174 {
11175 unsigned int imod = (inst.instruction & 0x0030) >> 4;
11176 inst.instruction = 0xf3af8000;
11177 inst.instruction |= imod << 9;
11178 inst.instruction |= inst.operands[0].imm << 5;
11179 if (inst.operands[1].present)
11180 inst.instruction |= 0x100 | inst.operands[1].imm;
11181 }
11182 else
11183 {
11184 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1)
11185 && (inst.operands[0].imm & 4),
11186 _("selected processor does not support 'A' form "
11187 "of this instruction"));
11188 constraint (inst.operands[1].present || inst.size_req == 4,
11189 _("Thumb does not support the 2-argument "
11190 "form of this instruction"));
11191 inst.instruction |= inst.operands[0].imm;
11192 }
11193 }
11194
11195 /* THUMB CPY instruction (argument parse). */
11196
11197 static void
11198 do_t_cpy (void)
11199 {
11200 if (inst.size_req == 4)
11201 {
11202 inst.instruction = THUMB_OP32 (T_MNEM_mov);
11203 inst.instruction |= inst.operands[0].reg << 8;
11204 inst.instruction |= inst.operands[1].reg;
11205 }
11206 else
11207 {
11208 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
11209 inst.instruction |= (inst.operands[0].reg & 0x7);
11210 inst.instruction |= inst.operands[1].reg << 3;
11211 }
11212 }
11213
11214 static void
11215 do_t_cbz (void)
11216 {
11217 set_it_insn_type (OUTSIDE_IT_INSN);
11218 constraint (inst.operands[0].reg > 7, BAD_HIREG);
11219 inst.instruction |= inst.operands[0].reg;
11220 inst.reloc.pc_rel = 1;
11221 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH7;
11222 }
11223
11224 static void
11225 do_t_dbg (void)
11226 {
11227 inst.instruction |= inst.operands[0].imm;
11228 }
11229
11230 static void
11231 do_t_div (void)
11232 {
11233 unsigned Rd, Rn, Rm;
11234
11235 Rd = inst.operands[0].reg;
11236 Rn = (inst.operands[1].present
11237 ? inst.operands[1].reg : Rd);
11238 Rm = inst.operands[2].reg;
11239
11240 reject_bad_reg (Rd);
11241 reject_bad_reg (Rn);
11242 reject_bad_reg (Rm);
11243
11244 inst.instruction |= Rd << 8;
11245 inst.instruction |= Rn << 16;
11246 inst.instruction |= Rm;
11247 }
11248
11249 static void
11250 do_t_hint (void)
11251 {
11252 if (unified_syntax && inst.size_req == 4)
11253 inst.instruction = THUMB_OP32 (inst.instruction);
11254 else
11255 inst.instruction = THUMB_OP16 (inst.instruction);
11256 }
11257
11258 static void
11259 do_t_it (void)
11260 {
11261 unsigned int cond = inst.operands[0].imm;
11262
11263 set_it_insn_type (IT_INSN);
11264 now_it.mask = (inst.instruction & 0xf) | 0x10;
11265 now_it.cc = cond;
11266 now_it.warn_deprecated = FALSE;
11267
11268 /* If the condition is a negative condition, invert the mask. */
11269 if ((cond & 0x1) == 0x0)
11270 {
11271 unsigned int mask = inst.instruction & 0x000f;
11272
11273 if ((mask & 0x7) == 0)
11274 {
11275 /* No conversion needed. */
11276 now_it.block_length = 1;
11277 }
11278 else if ((mask & 0x3) == 0)
11279 {
11280 mask ^= 0x8;
11281 now_it.block_length = 2;
11282 }
11283 else if ((mask & 0x1) == 0)
11284 {
11285 mask ^= 0xC;
11286 now_it.block_length = 3;
11287 }
11288 else
11289 {
11290 mask ^= 0xE;
11291 now_it.block_length = 4;
11292 }
11293
11294 inst.instruction &= 0xfff0;
11295 inst.instruction |= mask;
11296 }
11297
11298 inst.instruction |= cond << 4;
11299 }
11300
11301 /* Helper function used for both push/pop and ldm/stm. */
11302 static void
11303 encode_thumb2_ldmstm (int base, unsigned mask, bfd_boolean writeback)
11304 {
11305 bfd_boolean load;
11306
11307 load = (inst.instruction & (1 << 20)) != 0;
11308
11309 if (mask & (1 << 13))
11310 inst.error = _("SP not allowed in register list");
11311
11312 if ((mask & (1 << base)) != 0
11313 && writeback)
11314 inst.error = _("having the base register in the register list when "
11315 "using write back is UNPREDICTABLE");
11316
11317 if (load)
11318 {
11319 if (mask & (1 << 15))
11320 {
11321 if (mask & (1 << 14))
11322 inst.error = _("LR and PC should not both be in register list");
11323 else
11324 set_it_insn_type_last ();
11325 }
11326 }
11327 else
11328 {
11329 if (mask & (1 << 15))
11330 inst.error = _("PC not allowed in register list");
11331 }
11332
11333 if ((mask & (mask - 1)) == 0)
11334 {
11335 /* Single register transfers implemented as str/ldr. */
11336 if (writeback)
11337 {
11338 if (inst.instruction & (1 << 23))
11339 inst.instruction = 0x00000b04; /* ia! -> [base], #4 */
11340 else
11341 inst.instruction = 0x00000d04; /* db! -> [base, #-4]! */
11342 }
11343 else
11344 {
11345 if (inst.instruction & (1 << 23))
11346 inst.instruction = 0x00800000; /* ia -> [base] */
11347 else
11348 inst.instruction = 0x00000c04; /* db -> [base, #-4] */
11349 }
11350
11351 inst.instruction |= 0xf8400000;
11352 if (load)
11353 inst.instruction |= 0x00100000;
11354
11355 mask = ffs (mask) - 1;
11356 mask <<= 12;
11357 }
11358 else if (writeback)
11359 inst.instruction |= WRITE_BACK;
11360
11361 inst.instruction |= mask;
11362 inst.instruction |= base << 16;
11363 }
11364
11365 static void
11366 do_t_ldmstm (void)
11367 {
11368 /* This really doesn't seem worth it. */
11369 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
11370 _("expression too complex"));
11371 constraint (inst.operands[1].writeback,
11372 _("Thumb load/store multiple does not support {reglist}^"));
11373
11374 if (unified_syntax)
11375 {
11376 bfd_boolean narrow;
11377 unsigned mask;
11378
11379 narrow = FALSE;
11380 /* See if we can use a 16-bit instruction. */
11381 if (inst.instruction < 0xffff /* not ldmdb/stmdb */
11382 && inst.size_req != 4
11383 && !(inst.operands[1].imm & ~0xff))
11384 {
11385 mask = 1 << inst.operands[0].reg;
11386
11387 if (inst.operands[0].reg <= 7)
11388 {
11389 if (inst.instruction == T_MNEM_stmia
11390 ? inst.operands[0].writeback
11391 : (inst.operands[0].writeback
11392 == !(inst.operands[1].imm & mask)))
11393 {
11394 if (inst.instruction == T_MNEM_stmia
11395 && (inst.operands[1].imm & mask)
11396 && (inst.operands[1].imm & (mask - 1)))
11397 as_warn (_("value stored for r%d is UNKNOWN"),
11398 inst.operands[0].reg);
11399
11400 inst.instruction = THUMB_OP16 (inst.instruction);
11401 inst.instruction |= inst.operands[0].reg << 8;
11402 inst.instruction |= inst.operands[1].imm;
11403 narrow = TRUE;
11404 }
11405 else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
11406 {
11407 /* This means 1 register in reg list one of 3 situations:
11408 1. Instruction is stmia, but without writeback.
11409 2. lmdia without writeback, but with Rn not in
11410 reglist.
11411 3. ldmia with writeback, but with Rn in reglist.
11412 Case 3 is UNPREDICTABLE behaviour, so we handle
11413 case 1 and 2 which can be converted into a 16-bit
11414 str or ldr. The SP cases are handled below. */
11415 unsigned long opcode;
11416 /* First, record an error for Case 3. */
11417 if (inst.operands[1].imm & mask
11418 && inst.operands[0].writeback)
11419 inst.error =
11420 _("having the base register in the register list when "
11421 "using write back is UNPREDICTABLE");
11422
11423 opcode = (inst.instruction == T_MNEM_stmia ? T_MNEM_str
11424 : T_MNEM_ldr);
11425 inst.instruction = THUMB_OP16 (opcode);
11426 inst.instruction |= inst.operands[0].reg << 3;
11427 inst.instruction |= (ffs (inst.operands[1].imm)-1);
11428 narrow = TRUE;
11429 }
11430 }
11431 else if (inst.operands[0] .reg == REG_SP)
11432 {
11433 if (inst.operands[0].writeback)
11434 {
11435 inst.instruction =
11436 THUMB_OP16 (inst.instruction == T_MNEM_stmia
11437 ? T_MNEM_push : T_MNEM_pop);
11438 inst.instruction |= inst.operands[1].imm;
11439 narrow = TRUE;
11440 }
11441 else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
11442 {
11443 inst.instruction =
11444 THUMB_OP16 (inst.instruction == T_MNEM_stmia
11445 ? T_MNEM_str_sp : T_MNEM_ldr_sp);
11446 inst.instruction |= ((ffs (inst.operands[1].imm)-1) << 8);
11447 narrow = TRUE;
11448 }
11449 }
11450 }
11451
11452 if (!narrow)
11453 {
11454 if (inst.instruction < 0xffff)
11455 inst.instruction = THUMB_OP32 (inst.instruction);
11456
11457 encode_thumb2_ldmstm (inst.operands[0].reg, inst.operands[1].imm,
11458 inst.operands[0].writeback);
11459 }
11460 }
11461 else
11462 {
11463 constraint (inst.operands[0].reg > 7
11464 || (inst.operands[1].imm & ~0xff), BAD_HIREG);
11465 constraint (inst.instruction != T_MNEM_ldmia
11466 && inst.instruction != T_MNEM_stmia,
11467 _("Thumb-2 instruction only valid in unified syntax"));
11468 if (inst.instruction == T_MNEM_stmia)
11469 {
11470 if (!inst.operands[0].writeback)
11471 as_warn (_("this instruction will write back the base register"));
11472 if ((inst.operands[1].imm & (1 << inst.operands[0].reg))
11473 && (inst.operands[1].imm & ((1 << inst.operands[0].reg) - 1)))
11474 as_warn (_("value stored for r%d is UNKNOWN"),
11475 inst.operands[0].reg);
11476 }
11477 else
11478 {
11479 if (!inst.operands[0].writeback
11480 && !(inst.operands[1].imm & (1 << inst.operands[0].reg)))
11481 as_warn (_("this instruction will write back the base register"));
11482 else if (inst.operands[0].writeback
11483 && (inst.operands[1].imm & (1 << inst.operands[0].reg)))
11484 as_warn (_("this instruction will not write back the base register"));
11485 }
11486
11487 inst.instruction = THUMB_OP16 (inst.instruction);
11488 inst.instruction |= inst.operands[0].reg << 8;
11489 inst.instruction |= inst.operands[1].imm;
11490 }
11491 }
11492
11493 static void
11494 do_t_ldrex (void)
11495 {
11496 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
11497 || inst.operands[1].postind || inst.operands[1].writeback
11498 || inst.operands[1].immisreg || inst.operands[1].shifted
11499 || inst.operands[1].negative,
11500 BAD_ADDR_MODE);
11501
11502 constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
11503
11504 inst.instruction |= inst.operands[0].reg << 12;
11505 inst.instruction |= inst.operands[1].reg << 16;
11506 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
11507 }
11508
11509 static void
11510 do_t_ldrexd (void)
11511 {
11512 if (!inst.operands[1].present)
11513 {
11514 constraint (inst.operands[0].reg == REG_LR,
11515 _("r14 not allowed as first register "
11516 "when second register is omitted"));
11517 inst.operands[1].reg = inst.operands[0].reg + 1;
11518 }
11519 constraint (inst.operands[0].reg == inst.operands[1].reg,
11520 BAD_OVERLAP);
11521
11522 inst.instruction |= inst.operands[0].reg << 12;
11523 inst.instruction |= inst.operands[1].reg << 8;
11524 inst.instruction |= inst.operands[2].reg << 16;
11525 }
11526
11527 static void
11528 do_t_ldst (void)
11529 {
11530 unsigned long opcode;
11531 int Rn;
11532
11533 if (inst.operands[0].isreg
11534 && !inst.operands[0].preind
11535 && inst.operands[0].reg == REG_PC)
11536 set_it_insn_type_last ();
11537
11538 opcode = inst.instruction;
11539 if (unified_syntax)
11540 {
11541 if (!inst.operands[1].isreg)
11542 {
11543 if (opcode <= 0xffff)
11544 inst.instruction = THUMB_OP32 (opcode);
11545 if (move_or_literal_pool (0, CONST_THUMB, /*mode_3=*/FALSE))
11546 return;
11547 }
11548 if (inst.operands[1].isreg
11549 && !inst.operands[1].writeback
11550 && !inst.operands[1].shifted && !inst.operands[1].postind
11551 && !inst.operands[1].negative && inst.operands[0].reg <= 7
11552 && opcode <= 0xffff
11553 && inst.size_req != 4)
11554 {
11555 /* Insn may have a 16-bit form. */
11556 Rn = inst.operands[1].reg;
11557 if (inst.operands[1].immisreg)
11558 {
11559 inst.instruction = THUMB_OP16 (opcode);
11560 /* [Rn, Rik] */
11561 if (Rn <= 7 && inst.operands[1].imm <= 7)
11562 goto op16;
11563 else if (opcode != T_MNEM_ldr && opcode != T_MNEM_str)
11564 reject_bad_reg (inst.operands[1].imm);
11565 }
11566 else if ((Rn <= 7 && opcode != T_MNEM_ldrsh
11567 && opcode != T_MNEM_ldrsb)
11568 || ((Rn == REG_PC || Rn == REG_SP) && opcode == T_MNEM_ldr)
11569 || (Rn == REG_SP && opcode == T_MNEM_str))
11570 {
11571 /* [Rn, #const] */
11572 if (Rn > 7)
11573 {
11574 if (Rn == REG_PC)
11575 {
11576 if (inst.reloc.pc_rel)
11577 opcode = T_MNEM_ldr_pc2;
11578 else
11579 opcode = T_MNEM_ldr_pc;
11580 }
11581 else
11582 {
11583 if (opcode == T_MNEM_ldr)
11584 opcode = T_MNEM_ldr_sp;
11585 else
11586 opcode = T_MNEM_str_sp;
11587 }
11588 inst.instruction = inst.operands[0].reg << 8;
11589 }
11590 else
11591 {
11592 inst.instruction = inst.operands[0].reg;
11593 inst.instruction |= inst.operands[1].reg << 3;
11594 }
11595 inst.instruction |= THUMB_OP16 (opcode);
11596 if (inst.size_req == 2)
11597 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
11598 else
11599 inst.relax = opcode;
11600 return;
11601 }
11602 }
11603 /* Definitely a 32-bit variant. */
11604
11605 /* Warning for Erratum 752419. */
11606 if (opcode == T_MNEM_ldr
11607 && inst.operands[0].reg == REG_SP
11608 && inst.operands[1].writeback == 1
11609 && !inst.operands[1].immisreg)
11610 {
11611 if (no_cpu_selected ()
11612 || (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7)
11613 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a)
11614 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7r)))
11615 as_warn (_("This instruction may be unpredictable "
11616 "if executed on M-profile cores "
11617 "with interrupts enabled."));
11618 }
11619
11620 /* Do some validations regarding addressing modes. */
11621 if (inst.operands[1].immisreg)
11622 reject_bad_reg (inst.operands[1].imm);
11623
11624 constraint (inst.operands[1].writeback == 1
11625 && inst.operands[0].reg == inst.operands[1].reg,
11626 BAD_OVERLAP);
11627
11628 inst.instruction = THUMB_OP32 (opcode);
11629 inst.instruction |= inst.operands[0].reg << 12;
11630 encode_thumb32_addr_mode (1, /*is_t=*/FALSE, /*is_d=*/FALSE);
11631 check_ldr_r15_aligned ();
11632 return;
11633 }
11634
11635 constraint (inst.operands[0].reg > 7, BAD_HIREG);
11636
11637 if (inst.instruction == T_MNEM_ldrsh || inst.instruction == T_MNEM_ldrsb)
11638 {
11639 /* Only [Rn,Rm] is acceptable. */
11640 constraint (inst.operands[1].reg > 7 || inst.operands[1].imm > 7, BAD_HIREG);
11641 constraint (!inst.operands[1].isreg || !inst.operands[1].immisreg
11642 || inst.operands[1].postind || inst.operands[1].shifted
11643 || inst.operands[1].negative,
11644 _("Thumb does not support this addressing mode"));
11645 inst.instruction = THUMB_OP16 (inst.instruction);
11646 goto op16;
11647 }
11648
11649 inst.instruction = THUMB_OP16 (inst.instruction);
11650 if (!inst.operands[1].isreg)
11651 if (move_or_literal_pool (0, CONST_THUMB, /*mode_3=*/FALSE))
11652 return;
11653
11654 constraint (!inst.operands[1].preind
11655 || inst.operands[1].shifted
11656 || inst.operands[1].writeback,
11657 _("Thumb does not support this addressing mode"));
11658 if (inst.operands[1].reg == REG_PC || inst.operands[1].reg == REG_SP)
11659 {
11660 constraint (inst.instruction & 0x0600,
11661 _("byte or halfword not valid for base register"));
11662 constraint (inst.operands[1].reg == REG_PC
11663 && !(inst.instruction & THUMB_LOAD_BIT),
11664 _("r15 based store not allowed"));
11665 constraint (inst.operands[1].immisreg,
11666 _("invalid base register for register offset"));
11667
11668 if (inst.operands[1].reg == REG_PC)
11669 inst.instruction = T_OPCODE_LDR_PC;
11670 else if (inst.instruction & THUMB_LOAD_BIT)
11671 inst.instruction = T_OPCODE_LDR_SP;
11672 else
11673 inst.instruction = T_OPCODE_STR_SP;
11674
11675 inst.instruction |= inst.operands[0].reg << 8;
11676 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
11677 return;
11678 }
11679
11680 constraint (inst.operands[1].reg > 7, BAD_HIREG);
11681 if (!inst.operands[1].immisreg)
11682 {
11683 /* Immediate offset. */
11684 inst.instruction |= inst.operands[0].reg;
11685 inst.instruction |= inst.operands[1].reg << 3;
11686 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
11687 return;
11688 }
11689
11690 /* Register offset. */
11691 constraint (inst.operands[1].imm > 7, BAD_HIREG);
11692 constraint (inst.operands[1].negative,
11693 _("Thumb does not support this addressing mode"));
11694
11695 op16:
11696 switch (inst.instruction)
11697 {
11698 case T_OPCODE_STR_IW: inst.instruction = T_OPCODE_STR_RW; break;
11699 case T_OPCODE_STR_IH: inst.instruction = T_OPCODE_STR_RH; break;
11700 case T_OPCODE_STR_IB: inst.instruction = T_OPCODE_STR_RB; break;
11701 case T_OPCODE_LDR_IW: inst.instruction = T_OPCODE_LDR_RW; break;
11702 case T_OPCODE_LDR_IH: inst.instruction = T_OPCODE_LDR_RH; break;
11703 case T_OPCODE_LDR_IB: inst.instruction = T_OPCODE_LDR_RB; break;
11704 case 0x5600 /* ldrsb */:
11705 case 0x5e00 /* ldrsh */: break;
11706 default: abort ();
11707 }
11708
11709 inst.instruction |= inst.operands[0].reg;
11710 inst.instruction |= inst.operands[1].reg << 3;
11711 inst.instruction |= inst.operands[1].imm << 6;
11712 }
11713
11714 static void
11715 do_t_ldstd (void)
11716 {
11717 if (!inst.operands[1].present)
11718 {
11719 inst.operands[1].reg = inst.operands[0].reg + 1;
11720 constraint (inst.operands[0].reg == REG_LR,
11721 _("r14 not allowed here"));
11722 constraint (inst.operands[0].reg == REG_R12,
11723 _("r12 not allowed here"));
11724 }
11725
11726 if (inst.operands[2].writeback
11727 && (inst.operands[0].reg == inst.operands[2].reg
11728 || inst.operands[1].reg == inst.operands[2].reg))
11729 as_warn (_("base register written back, and overlaps "
11730 "one of transfer registers"));
11731
11732 inst.instruction |= inst.operands[0].reg << 12;
11733 inst.instruction |= inst.operands[1].reg << 8;
11734 encode_thumb32_addr_mode (2, /*is_t=*/FALSE, /*is_d=*/TRUE);
11735 }
11736
11737 static void
11738 do_t_ldstt (void)
11739 {
11740 inst.instruction |= inst.operands[0].reg << 12;
11741 encode_thumb32_addr_mode (1, /*is_t=*/TRUE, /*is_d=*/FALSE);
11742 }
11743
11744 static void
11745 do_t_mla (void)
11746 {
11747 unsigned Rd, Rn, Rm, Ra;
11748
11749 Rd = inst.operands[0].reg;
11750 Rn = inst.operands[1].reg;
11751 Rm = inst.operands[2].reg;
11752 Ra = inst.operands[3].reg;
11753
11754 reject_bad_reg (Rd);
11755 reject_bad_reg (Rn);
11756 reject_bad_reg (Rm);
11757 reject_bad_reg (Ra);
11758
11759 inst.instruction |= Rd << 8;
11760 inst.instruction |= Rn << 16;
11761 inst.instruction |= Rm;
11762 inst.instruction |= Ra << 12;
11763 }
11764
11765 static void
11766 do_t_mlal (void)
11767 {
11768 unsigned RdLo, RdHi, Rn, Rm;
11769
11770 RdLo = inst.operands[0].reg;
11771 RdHi = inst.operands[1].reg;
11772 Rn = inst.operands[2].reg;
11773 Rm = inst.operands[3].reg;
11774
11775 reject_bad_reg (RdLo);
11776 reject_bad_reg (RdHi);
11777 reject_bad_reg (Rn);
11778 reject_bad_reg (Rm);
11779
11780 inst.instruction |= RdLo << 12;
11781 inst.instruction |= RdHi << 8;
11782 inst.instruction |= Rn << 16;
11783 inst.instruction |= Rm;
11784 }
11785
11786 static void
11787 do_t_mov_cmp (void)
11788 {
11789 unsigned Rn, Rm;
11790
11791 Rn = inst.operands[0].reg;
11792 Rm = inst.operands[1].reg;
11793
11794 if (Rn == REG_PC)
11795 set_it_insn_type_last ();
11796
11797 if (unified_syntax)
11798 {
11799 int r0off = (inst.instruction == T_MNEM_mov
11800 || inst.instruction == T_MNEM_movs) ? 8 : 16;
11801 unsigned long opcode;
11802 bfd_boolean narrow;
11803 bfd_boolean low_regs;
11804
11805 low_regs = (Rn <= 7 && Rm <= 7);
11806 opcode = inst.instruction;
11807 if (in_it_block ())
11808 narrow = opcode != T_MNEM_movs;
11809 else
11810 narrow = opcode != T_MNEM_movs || low_regs;
11811 if (inst.size_req == 4
11812 || inst.operands[1].shifted)
11813 narrow = FALSE;
11814
11815 /* MOVS PC, LR is encoded as SUBS PC, LR, #0. */
11816 if (opcode == T_MNEM_movs && inst.operands[1].isreg
11817 && !inst.operands[1].shifted
11818 && Rn == REG_PC
11819 && Rm == REG_LR)
11820 {
11821 inst.instruction = T2_SUBS_PC_LR;
11822 return;
11823 }
11824
11825 if (opcode == T_MNEM_cmp)
11826 {
11827 constraint (Rn == REG_PC, BAD_PC);
11828 if (narrow)
11829 {
11830 /* In the Thumb-2 ISA, use of R13 as Rm is deprecated,
11831 but valid. */
11832 warn_deprecated_sp (Rm);
11833 /* R15 was documented as a valid choice for Rm in ARMv6,
11834 but as UNPREDICTABLE in ARMv7. ARM's proprietary
11835 tools reject R15, so we do too. */
11836 constraint (Rm == REG_PC, BAD_PC);
11837 }
11838 else
11839 reject_bad_reg (Rm);
11840 }
11841 else if (opcode == T_MNEM_mov
11842 || opcode == T_MNEM_movs)
11843 {
11844 if (inst.operands[1].isreg)
11845 {
11846 if (opcode == T_MNEM_movs)
11847 {
11848 reject_bad_reg (Rn);
11849 reject_bad_reg (Rm);
11850 }
11851 else if (narrow)
11852 {
11853 /* This is mov.n. */
11854 if ((Rn == REG_SP || Rn == REG_PC)
11855 && (Rm == REG_SP || Rm == REG_PC))
11856 {
11857 as_tsktsk (_("Use of r%u as a source register is "
11858 "deprecated when r%u is the destination "
11859 "register."), Rm, Rn);
11860 }
11861 }
11862 else
11863 {
11864 /* This is mov.w. */
11865 constraint (Rn == REG_PC, BAD_PC);
11866 constraint (Rm == REG_PC, BAD_PC);
11867 constraint (Rn == REG_SP && Rm == REG_SP, BAD_SP);
11868 }
11869 }
11870 else
11871 reject_bad_reg (Rn);
11872 }
11873
11874 if (!inst.operands[1].isreg)
11875 {
11876 /* Immediate operand. */
11877 if (!in_it_block () && opcode == T_MNEM_mov)
11878 narrow = 0;
11879 if (low_regs && narrow)
11880 {
11881 inst.instruction = THUMB_OP16 (opcode);
11882 inst.instruction |= Rn << 8;
11883 if (inst.reloc.type < BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
11884 || inst.reloc.type > BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC)
11885 {
11886 if (inst.size_req == 2)
11887 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
11888 else
11889 inst.relax = opcode;
11890 }
11891 }
11892 else
11893 {
11894 constraint (inst.reloc.type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
11895 && inst.reloc.type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC ,
11896 THUMB1_RELOC_ONLY);
11897
11898 inst.instruction = THUMB_OP32 (inst.instruction);
11899 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
11900 inst.instruction |= Rn << r0off;
11901 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
11902 }
11903 }
11904 else if (inst.operands[1].shifted && inst.operands[1].immisreg
11905 && (inst.instruction == T_MNEM_mov
11906 || inst.instruction == T_MNEM_movs))
11907 {
11908 /* Register shifts are encoded as separate shift instructions. */
11909 bfd_boolean flags = (inst.instruction == T_MNEM_movs);
11910
11911 if (in_it_block ())
11912 narrow = !flags;
11913 else
11914 narrow = flags;
11915
11916 if (inst.size_req == 4)
11917 narrow = FALSE;
11918
11919 if (!low_regs || inst.operands[1].imm > 7)
11920 narrow = FALSE;
11921
11922 if (Rn != Rm)
11923 narrow = FALSE;
11924
11925 switch (inst.operands[1].shift_kind)
11926 {
11927 case SHIFT_LSL:
11928 opcode = narrow ? T_OPCODE_LSL_R : THUMB_OP32 (T_MNEM_lsl);
11929 break;
11930 case SHIFT_ASR:
11931 opcode = narrow ? T_OPCODE_ASR_R : THUMB_OP32 (T_MNEM_asr);
11932 break;
11933 case SHIFT_LSR:
11934 opcode = narrow ? T_OPCODE_LSR_R : THUMB_OP32 (T_MNEM_lsr);
11935 break;
11936 case SHIFT_ROR:
11937 opcode = narrow ? T_OPCODE_ROR_R : THUMB_OP32 (T_MNEM_ror);
11938 break;
11939 default:
11940 abort ();
11941 }
11942
11943 inst.instruction = opcode;
11944 if (narrow)
11945 {
11946 inst.instruction |= Rn;
11947 inst.instruction |= inst.operands[1].imm << 3;
11948 }
11949 else
11950 {
11951 if (flags)
11952 inst.instruction |= CONDS_BIT;
11953
11954 inst.instruction |= Rn << 8;
11955 inst.instruction |= Rm << 16;
11956 inst.instruction |= inst.operands[1].imm;
11957 }
11958 }
11959 else if (!narrow)
11960 {
11961 /* Some mov with immediate shift have narrow variants.
11962 Register shifts are handled above. */
11963 if (low_regs && inst.operands[1].shifted
11964 && (inst.instruction == T_MNEM_mov
11965 || inst.instruction == T_MNEM_movs))
11966 {
11967 if (in_it_block ())
11968 narrow = (inst.instruction == T_MNEM_mov);
11969 else
11970 narrow = (inst.instruction == T_MNEM_movs);
11971 }
11972
11973 if (narrow)
11974 {
11975 switch (inst.operands[1].shift_kind)
11976 {
11977 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
11978 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
11979 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
11980 default: narrow = FALSE; break;
11981 }
11982 }
11983
11984 if (narrow)
11985 {
11986 inst.instruction |= Rn;
11987 inst.instruction |= Rm << 3;
11988 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
11989 }
11990 else
11991 {
11992 inst.instruction = THUMB_OP32 (inst.instruction);
11993 inst.instruction |= Rn << r0off;
11994 encode_thumb32_shifted_operand (1);
11995 }
11996 }
11997 else
11998 switch (inst.instruction)
11999 {
12000 case T_MNEM_mov:
12001 /* In v4t or v5t a move of two lowregs produces unpredictable
12002 results. Don't allow this. */
12003 if (low_regs)
12004 {
12005 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6),
12006 "MOV Rd, Rs with two low registers is not "
12007 "permitted on this architecture");
12008 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
12009 arm_ext_v6);
12010 }
12011
12012 inst.instruction = T_OPCODE_MOV_HR;
12013 inst.instruction |= (Rn & 0x8) << 4;
12014 inst.instruction |= (Rn & 0x7);
12015 inst.instruction |= Rm << 3;
12016 break;
12017
12018 case T_MNEM_movs:
12019 /* We know we have low registers at this point.
12020 Generate LSLS Rd, Rs, #0. */
12021 inst.instruction = T_OPCODE_LSL_I;
12022 inst.instruction |= Rn;
12023 inst.instruction |= Rm << 3;
12024 break;
12025
12026 case T_MNEM_cmp:
12027 if (low_regs)
12028 {
12029 inst.instruction = T_OPCODE_CMP_LR;
12030 inst.instruction |= Rn;
12031 inst.instruction |= Rm << 3;
12032 }
12033 else
12034 {
12035 inst.instruction = T_OPCODE_CMP_HR;
12036 inst.instruction |= (Rn & 0x8) << 4;
12037 inst.instruction |= (Rn & 0x7);
12038 inst.instruction |= Rm << 3;
12039 }
12040 break;
12041 }
12042 return;
12043 }
12044
12045 inst.instruction = THUMB_OP16 (inst.instruction);
12046
12047 /* PR 10443: Do not silently ignore shifted operands. */
12048 constraint (inst.operands[1].shifted,
12049 _("shifts in CMP/MOV instructions are only supported in unified syntax"));
12050
12051 if (inst.operands[1].isreg)
12052 {
12053 if (Rn < 8 && Rm < 8)
12054 {
12055 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
12056 since a MOV instruction produces unpredictable results. */
12057 if (inst.instruction == T_OPCODE_MOV_I8)
12058 inst.instruction = T_OPCODE_ADD_I3;
12059 else
12060 inst.instruction = T_OPCODE_CMP_LR;
12061
12062 inst.instruction |= Rn;
12063 inst.instruction |= Rm << 3;
12064 }
12065 else
12066 {
12067 if (inst.instruction == T_OPCODE_MOV_I8)
12068 inst.instruction = T_OPCODE_MOV_HR;
12069 else
12070 inst.instruction = T_OPCODE_CMP_HR;
12071 do_t_cpy ();
12072 }
12073 }
12074 else
12075 {
12076 constraint (Rn > 7,
12077 _("only lo regs allowed with immediate"));
12078 inst.instruction |= Rn << 8;
12079 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
12080 }
12081 }
12082
12083 static void
12084 do_t_mov16 (void)
12085 {
12086 unsigned Rd;
12087 bfd_vma imm;
12088 bfd_boolean top;
12089
12090 top = (inst.instruction & 0x00800000) != 0;
12091 if (inst.reloc.type == BFD_RELOC_ARM_MOVW)
12092 {
12093 constraint (top, _(":lower16: not allowed this instruction"));
12094 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVW;
12095 }
12096 else if (inst.reloc.type == BFD_RELOC_ARM_MOVT)
12097 {
12098 constraint (!top, _(":upper16: not allowed this instruction"));
12099 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVT;
12100 }
12101
12102 Rd = inst.operands[0].reg;
12103 reject_bad_reg (Rd);
12104
12105 inst.instruction |= Rd << 8;
12106 if (inst.reloc.type == BFD_RELOC_UNUSED)
12107 {
12108 imm = inst.reloc.exp.X_add_number;
12109 inst.instruction |= (imm & 0xf000) << 4;
12110 inst.instruction |= (imm & 0x0800) << 15;
12111 inst.instruction |= (imm & 0x0700) << 4;
12112 inst.instruction |= (imm & 0x00ff);
12113 }
12114 }
12115
12116 static void
12117 do_t_mvn_tst (void)
12118 {
12119 unsigned Rn, Rm;
12120
12121 Rn = inst.operands[0].reg;
12122 Rm = inst.operands[1].reg;
12123
12124 if (inst.instruction == T_MNEM_cmp
12125 || inst.instruction == T_MNEM_cmn)
12126 constraint (Rn == REG_PC, BAD_PC);
12127 else
12128 reject_bad_reg (Rn);
12129 reject_bad_reg (Rm);
12130
12131 if (unified_syntax)
12132 {
12133 int r0off = (inst.instruction == T_MNEM_mvn
12134 || inst.instruction == T_MNEM_mvns) ? 8 : 16;
12135 bfd_boolean narrow;
12136
12137 if (inst.size_req == 4
12138 || inst.instruction > 0xffff
12139 || inst.operands[1].shifted
12140 || Rn > 7 || Rm > 7)
12141 narrow = FALSE;
12142 else if (inst.instruction == T_MNEM_cmn
12143 || inst.instruction == T_MNEM_tst)
12144 narrow = TRUE;
12145 else if (THUMB_SETS_FLAGS (inst.instruction))
12146 narrow = !in_it_block ();
12147 else
12148 narrow = in_it_block ();
12149
12150 if (!inst.operands[1].isreg)
12151 {
12152 /* For an immediate, we always generate a 32-bit opcode;
12153 section relaxation will shrink it later if possible. */
12154 if (inst.instruction < 0xffff)
12155 inst.instruction = THUMB_OP32 (inst.instruction);
12156 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
12157 inst.instruction |= Rn << r0off;
12158 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
12159 }
12160 else
12161 {
12162 /* See if we can do this with a 16-bit instruction. */
12163 if (narrow)
12164 {
12165 inst.instruction = THUMB_OP16 (inst.instruction);
12166 inst.instruction |= Rn;
12167 inst.instruction |= Rm << 3;
12168 }
12169 else
12170 {
12171 constraint (inst.operands[1].shifted
12172 && inst.operands[1].immisreg,
12173 _("shift must be constant"));
12174 if (inst.instruction < 0xffff)
12175 inst.instruction = THUMB_OP32 (inst.instruction);
12176 inst.instruction |= Rn << r0off;
12177 encode_thumb32_shifted_operand (1);
12178 }
12179 }
12180 }
12181 else
12182 {
12183 constraint (inst.instruction > 0xffff
12184 || inst.instruction == T_MNEM_mvns, BAD_THUMB32);
12185 constraint (!inst.operands[1].isreg || inst.operands[1].shifted,
12186 _("unshifted register required"));
12187 constraint (Rn > 7 || Rm > 7,
12188 BAD_HIREG);
12189
12190 inst.instruction = THUMB_OP16 (inst.instruction);
12191 inst.instruction |= Rn;
12192 inst.instruction |= Rm << 3;
12193 }
12194 }
12195
12196 static void
12197 do_t_mrs (void)
12198 {
12199 unsigned Rd;
12200
12201 if (do_vfp_nsyn_mrs () == SUCCESS)
12202 return;
12203
12204 Rd = inst.operands[0].reg;
12205 reject_bad_reg (Rd);
12206 inst.instruction |= Rd << 8;
12207
12208 if (inst.operands[1].isreg)
12209 {
12210 unsigned br = inst.operands[1].reg;
12211 if (((br & 0x200) == 0) && ((br & 0xf000) != 0xf000))
12212 as_bad (_("bad register for mrs"));
12213
12214 inst.instruction |= br & (0xf << 16);
12215 inst.instruction |= (br & 0x300) >> 4;
12216 inst.instruction |= (br & SPSR_BIT) >> 2;
12217 }
12218 else
12219 {
12220 int flags = inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
12221
12222 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
12223 {
12224 /* PR gas/12698: The constraint is only applied for m_profile.
12225 If the user has specified -march=all, we want to ignore it as
12226 we are building for any CPU type, including non-m variants. */
12227 bfd_boolean m_profile =
12228 !ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any);
12229 constraint ((flags != 0) && m_profile, _("selected processor does "
12230 "not support requested special purpose register"));
12231 }
12232 else
12233 /* mrs only accepts APSR/CPSR/SPSR/CPSR_all/SPSR_all (for non-M profile
12234 devices). */
12235 constraint ((flags & ~SPSR_BIT) != (PSR_c|PSR_f),
12236 _("'APSR', 'CPSR' or 'SPSR' expected"));
12237
12238 inst.instruction |= (flags & SPSR_BIT) >> 2;
12239 inst.instruction |= inst.operands[1].imm & 0xff;
12240 inst.instruction |= 0xf0000;
12241 }
12242 }
12243
12244 static void
12245 do_t_msr (void)
12246 {
12247 int flags;
12248 unsigned Rn;
12249
12250 if (do_vfp_nsyn_msr () == SUCCESS)
12251 return;
12252
12253 constraint (!inst.operands[1].isreg,
12254 _("Thumb encoding does not support an immediate here"));
12255
12256 if (inst.operands[0].isreg)
12257 flags = (int)(inst.operands[0].reg);
12258 else
12259 flags = inst.operands[0].imm;
12260
12261 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
12262 {
12263 int bits = inst.operands[0].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
12264
12265 /* PR gas/12698: The constraint is only applied for m_profile.
12266 If the user has specified -march=all, we want to ignore it as
12267 we are building for any CPU type, including non-m variants. */
12268 bfd_boolean m_profile =
12269 !ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any);
12270 constraint (((ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
12271 && (bits & ~(PSR_s | PSR_f)) != 0)
12272 || (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
12273 && bits != PSR_f)) && m_profile,
12274 _("selected processor does not support requested special "
12275 "purpose register"));
12276 }
12277 else
12278 constraint ((flags & 0xff) != 0, _("selected processor does not support "
12279 "requested special purpose register"));
12280
12281 Rn = inst.operands[1].reg;
12282 reject_bad_reg (Rn);
12283
12284 inst.instruction |= (flags & SPSR_BIT) >> 2;
12285 inst.instruction |= (flags & 0xf0000) >> 8;
12286 inst.instruction |= (flags & 0x300) >> 4;
12287 inst.instruction |= (flags & 0xff);
12288 inst.instruction |= Rn << 16;
12289 }
12290
12291 static void
12292 do_t_mul (void)
12293 {
12294 bfd_boolean narrow;
12295 unsigned Rd, Rn, Rm;
12296
12297 if (!inst.operands[2].present)
12298 inst.operands[2].reg = inst.operands[0].reg;
12299
12300 Rd = inst.operands[0].reg;
12301 Rn = inst.operands[1].reg;
12302 Rm = inst.operands[2].reg;
12303
12304 if (unified_syntax)
12305 {
12306 if (inst.size_req == 4
12307 || (Rd != Rn
12308 && Rd != Rm)
12309 || Rn > 7
12310 || Rm > 7)
12311 narrow = FALSE;
12312 else if (inst.instruction == T_MNEM_muls)
12313 narrow = !in_it_block ();
12314 else
12315 narrow = in_it_block ();
12316 }
12317 else
12318 {
12319 constraint (inst.instruction == T_MNEM_muls, BAD_THUMB32);
12320 constraint (Rn > 7 || Rm > 7,
12321 BAD_HIREG);
12322 narrow = TRUE;
12323 }
12324
12325 if (narrow)
12326 {
12327 /* 16-bit MULS/Conditional MUL. */
12328 inst.instruction = THUMB_OP16 (inst.instruction);
12329 inst.instruction |= Rd;
12330
12331 if (Rd == Rn)
12332 inst.instruction |= Rm << 3;
12333 else if (Rd == Rm)
12334 inst.instruction |= Rn << 3;
12335 else
12336 constraint (1, _("dest must overlap one source register"));
12337 }
12338 else
12339 {
12340 constraint (inst.instruction != T_MNEM_mul,
12341 _("Thumb-2 MUL must not set flags"));
12342 /* 32-bit MUL. */
12343 inst.instruction = THUMB_OP32 (inst.instruction);
12344 inst.instruction |= Rd << 8;
12345 inst.instruction |= Rn << 16;
12346 inst.instruction |= Rm << 0;
12347
12348 reject_bad_reg (Rd);
12349 reject_bad_reg (Rn);
12350 reject_bad_reg (Rm);
12351 }
12352 }
12353
12354 static void
12355 do_t_mull (void)
12356 {
12357 unsigned RdLo, RdHi, Rn, Rm;
12358
12359 RdLo = inst.operands[0].reg;
12360 RdHi = inst.operands[1].reg;
12361 Rn = inst.operands[2].reg;
12362 Rm = inst.operands[3].reg;
12363
12364 reject_bad_reg (RdLo);
12365 reject_bad_reg (RdHi);
12366 reject_bad_reg (Rn);
12367 reject_bad_reg (Rm);
12368
12369 inst.instruction |= RdLo << 12;
12370 inst.instruction |= RdHi << 8;
12371 inst.instruction |= Rn << 16;
12372 inst.instruction |= Rm;
12373
12374 if (RdLo == RdHi)
12375 as_tsktsk (_("rdhi and rdlo must be different"));
12376 }
12377
12378 static void
12379 do_t_nop (void)
12380 {
12381 set_it_insn_type (NEUTRAL_IT_INSN);
12382
12383 if (unified_syntax)
12384 {
12385 if (inst.size_req == 4 || inst.operands[0].imm > 15)
12386 {
12387 inst.instruction = THUMB_OP32 (inst.instruction);
12388 inst.instruction |= inst.operands[0].imm;
12389 }
12390 else
12391 {
12392 /* PR9722: Check for Thumb2 availability before
12393 generating a thumb2 nop instruction. */
12394 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2))
12395 {
12396 inst.instruction = THUMB_OP16 (inst.instruction);
12397 inst.instruction |= inst.operands[0].imm << 4;
12398 }
12399 else
12400 inst.instruction = 0x46c0;
12401 }
12402 }
12403 else
12404 {
12405 constraint (inst.operands[0].present,
12406 _("Thumb does not support NOP with hints"));
12407 inst.instruction = 0x46c0;
12408 }
12409 }
12410
12411 static void
12412 do_t_neg (void)
12413 {
12414 if (unified_syntax)
12415 {
12416 bfd_boolean narrow;
12417
12418 if (THUMB_SETS_FLAGS (inst.instruction))
12419 narrow = !in_it_block ();
12420 else
12421 narrow = in_it_block ();
12422 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
12423 narrow = FALSE;
12424 if (inst.size_req == 4)
12425 narrow = FALSE;
12426
12427 if (!narrow)
12428 {
12429 inst.instruction = THUMB_OP32 (inst.instruction);
12430 inst.instruction |= inst.operands[0].reg << 8;
12431 inst.instruction |= inst.operands[1].reg << 16;
12432 }
12433 else
12434 {
12435 inst.instruction = THUMB_OP16 (inst.instruction);
12436 inst.instruction |= inst.operands[0].reg;
12437 inst.instruction |= inst.operands[1].reg << 3;
12438 }
12439 }
12440 else
12441 {
12442 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
12443 BAD_HIREG);
12444 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
12445
12446 inst.instruction = THUMB_OP16 (inst.instruction);
12447 inst.instruction |= inst.operands[0].reg;
12448 inst.instruction |= inst.operands[1].reg << 3;
12449 }
12450 }
12451
12452 static void
12453 do_t_orn (void)
12454 {
12455 unsigned Rd, Rn;
12456
12457 Rd = inst.operands[0].reg;
12458 Rn = inst.operands[1].present ? inst.operands[1].reg : Rd;
12459
12460 reject_bad_reg (Rd);
12461 /* Rn == REG_SP is unpredictable; Rn == REG_PC is MVN. */
12462 reject_bad_reg (Rn);
12463
12464 inst.instruction |= Rd << 8;
12465 inst.instruction |= Rn << 16;
12466
12467 if (!inst.operands[2].isreg)
12468 {
12469 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
12470 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
12471 }
12472 else
12473 {
12474 unsigned Rm;
12475
12476 Rm = inst.operands[2].reg;
12477 reject_bad_reg (Rm);
12478
12479 constraint (inst.operands[2].shifted
12480 && inst.operands[2].immisreg,
12481 _("shift must be constant"));
12482 encode_thumb32_shifted_operand (2);
12483 }
12484 }
12485
12486 static void
12487 do_t_pkhbt (void)
12488 {
12489 unsigned Rd, Rn, Rm;
12490
12491 Rd = inst.operands[0].reg;
12492 Rn = inst.operands[1].reg;
12493 Rm = inst.operands[2].reg;
12494
12495 reject_bad_reg (Rd);
12496 reject_bad_reg (Rn);
12497 reject_bad_reg (Rm);
12498
12499 inst.instruction |= Rd << 8;
12500 inst.instruction |= Rn << 16;
12501 inst.instruction |= Rm;
12502 if (inst.operands[3].present)
12503 {
12504 unsigned int val = inst.reloc.exp.X_add_number;
12505 constraint (inst.reloc.exp.X_op != O_constant,
12506 _("expression too complex"));
12507 inst.instruction |= (val & 0x1c) << 10;
12508 inst.instruction |= (val & 0x03) << 6;
12509 }
12510 }
12511
12512 static void
12513 do_t_pkhtb (void)
12514 {
12515 if (!inst.operands[3].present)
12516 {
12517 unsigned Rtmp;
12518
12519 inst.instruction &= ~0x00000020;
12520
12521 /* PR 10168. Swap the Rm and Rn registers. */
12522 Rtmp = inst.operands[1].reg;
12523 inst.operands[1].reg = inst.operands[2].reg;
12524 inst.operands[2].reg = Rtmp;
12525 }
12526 do_t_pkhbt ();
12527 }
12528
12529 static void
12530 do_t_pld (void)
12531 {
12532 if (inst.operands[0].immisreg)
12533 reject_bad_reg (inst.operands[0].imm);
12534
12535 encode_thumb32_addr_mode (0, /*is_t=*/FALSE, /*is_d=*/FALSE);
12536 }
12537
12538 static void
12539 do_t_push_pop (void)
12540 {
12541 unsigned mask;
12542
12543 constraint (inst.operands[0].writeback,
12544 _("push/pop do not support {reglist}^"));
12545 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
12546 _("expression too complex"));
12547
12548 mask = inst.operands[0].imm;
12549 if (inst.size_req != 4 && (mask & ~0xff) == 0)
12550 inst.instruction = THUMB_OP16 (inst.instruction) | mask;
12551 else if (inst.size_req != 4
12552 && (mask & ~0xff) == (1U << (inst.instruction == T_MNEM_push
12553 ? REG_LR : REG_PC)))
12554 {
12555 inst.instruction = THUMB_OP16 (inst.instruction);
12556 inst.instruction |= THUMB_PP_PC_LR;
12557 inst.instruction |= mask & 0xff;
12558 }
12559 else if (unified_syntax)
12560 {
12561 inst.instruction = THUMB_OP32 (inst.instruction);
12562 encode_thumb2_ldmstm (13, mask, TRUE);
12563 }
12564 else
12565 {
12566 inst.error = _("invalid register list to push/pop instruction");
12567 return;
12568 }
12569 }
12570
12571 static void
12572 do_t_rbit (void)
12573 {
12574 unsigned Rd, Rm;
12575
12576 Rd = inst.operands[0].reg;
12577 Rm = inst.operands[1].reg;
12578
12579 reject_bad_reg (Rd);
12580 reject_bad_reg (Rm);
12581
12582 inst.instruction |= Rd << 8;
12583 inst.instruction |= Rm << 16;
12584 inst.instruction |= Rm;
12585 }
12586
12587 static void
12588 do_t_rev (void)
12589 {
12590 unsigned Rd, Rm;
12591
12592 Rd = inst.operands[0].reg;
12593 Rm = inst.operands[1].reg;
12594
12595 reject_bad_reg (Rd);
12596 reject_bad_reg (Rm);
12597
12598 if (Rd <= 7 && Rm <= 7
12599 && inst.size_req != 4)
12600 {
12601 inst.instruction = THUMB_OP16 (inst.instruction);
12602 inst.instruction |= Rd;
12603 inst.instruction |= Rm << 3;
12604 }
12605 else if (unified_syntax)
12606 {
12607 inst.instruction = THUMB_OP32 (inst.instruction);
12608 inst.instruction |= Rd << 8;
12609 inst.instruction |= Rm << 16;
12610 inst.instruction |= Rm;
12611 }
12612 else
12613 inst.error = BAD_HIREG;
12614 }
12615
12616 static void
12617 do_t_rrx (void)
12618 {
12619 unsigned Rd, Rm;
12620
12621 Rd = inst.operands[0].reg;
12622 Rm = inst.operands[1].reg;
12623
12624 reject_bad_reg (Rd);
12625 reject_bad_reg (Rm);
12626
12627 inst.instruction |= Rd << 8;
12628 inst.instruction |= Rm;
12629 }
12630
12631 static void
12632 do_t_rsb (void)
12633 {
12634 unsigned Rd, Rs;
12635
12636 Rd = inst.operands[0].reg;
12637 Rs = (inst.operands[1].present
12638 ? inst.operands[1].reg /* Rd, Rs, foo */
12639 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
12640
12641 reject_bad_reg (Rd);
12642 reject_bad_reg (Rs);
12643 if (inst.operands[2].isreg)
12644 reject_bad_reg (inst.operands[2].reg);
12645
12646 inst.instruction |= Rd << 8;
12647 inst.instruction |= Rs << 16;
12648 if (!inst.operands[2].isreg)
12649 {
12650 bfd_boolean narrow;
12651
12652 if ((inst.instruction & 0x00100000) != 0)
12653 narrow = !in_it_block ();
12654 else
12655 narrow = in_it_block ();
12656
12657 if (Rd > 7 || Rs > 7)
12658 narrow = FALSE;
12659
12660 if (inst.size_req == 4 || !unified_syntax)
12661 narrow = FALSE;
12662
12663 if (inst.reloc.exp.X_op != O_constant
12664 || inst.reloc.exp.X_add_number != 0)
12665 narrow = FALSE;
12666
12667 /* Turn rsb #0 into 16-bit neg. We should probably do this via
12668 relaxation, but it doesn't seem worth the hassle. */
12669 if (narrow)
12670 {
12671 inst.reloc.type = BFD_RELOC_UNUSED;
12672 inst.instruction = THUMB_OP16 (T_MNEM_negs);
12673 inst.instruction |= Rs << 3;
12674 inst.instruction |= Rd;
12675 }
12676 else
12677 {
12678 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
12679 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
12680 }
12681 }
12682 else
12683 encode_thumb32_shifted_operand (2);
12684 }
12685
12686 static void
12687 do_t_setend (void)
12688 {
12689 if (warn_on_deprecated
12690 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
12691 as_tsktsk (_("setend use is deprecated for ARMv8"));
12692
12693 set_it_insn_type (OUTSIDE_IT_INSN);
12694 if (inst.operands[0].imm)
12695 inst.instruction |= 0x8;
12696 }
12697
12698 static void
12699 do_t_shift (void)
12700 {
12701 if (!inst.operands[1].present)
12702 inst.operands[1].reg = inst.operands[0].reg;
12703
12704 if (unified_syntax)
12705 {
12706 bfd_boolean narrow;
12707 int shift_kind;
12708
12709 switch (inst.instruction)
12710 {
12711 case T_MNEM_asr:
12712 case T_MNEM_asrs: shift_kind = SHIFT_ASR; break;
12713 case T_MNEM_lsl:
12714 case T_MNEM_lsls: shift_kind = SHIFT_LSL; break;
12715 case T_MNEM_lsr:
12716 case T_MNEM_lsrs: shift_kind = SHIFT_LSR; break;
12717 case T_MNEM_ror:
12718 case T_MNEM_rors: shift_kind = SHIFT_ROR; break;
12719 default: abort ();
12720 }
12721
12722 if (THUMB_SETS_FLAGS (inst.instruction))
12723 narrow = !in_it_block ();
12724 else
12725 narrow = in_it_block ();
12726 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
12727 narrow = FALSE;
12728 if (!inst.operands[2].isreg && shift_kind == SHIFT_ROR)
12729 narrow = FALSE;
12730 if (inst.operands[2].isreg
12731 && (inst.operands[1].reg != inst.operands[0].reg
12732 || inst.operands[2].reg > 7))
12733 narrow = FALSE;
12734 if (inst.size_req == 4)
12735 narrow = FALSE;
12736
12737 reject_bad_reg (inst.operands[0].reg);
12738 reject_bad_reg (inst.operands[1].reg);
12739
12740 if (!narrow)
12741 {
12742 if (inst.operands[2].isreg)
12743 {
12744 reject_bad_reg (inst.operands[2].reg);
12745 inst.instruction = THUMB_OP32 (inst.instruction);
12746 inst.instruction |= inst.operands[0].reg << 8;
12747 inst.instruction |= inst.operands[1].reg << 16;
12748 inst.instruction |= inst.operands[2].reg;
12749
12750 /* PR 12854: Error on extraneous shifts. */
12751 constraint (inst.operands[2].shifted,
12752 _("extraneous shift as part of operand to shift insn"));
12753 }
12754 else
12755 {
12756 inst.operands[1].shifted = 1;
12757 inst.operands[1].shift_kind = shift_kind;
12758 inst.instruction = THUMB_OP32 (THUMB_SETS_FLAGS (inst.instruction)
12759 ? T_MNEM_movs : T_MNEM_mov);
12760 inst.instruction |= inst.operands[0].reg << 8;
12761 encode_thumb32_shifted_operand (1);
12762 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
12763 inst.reloc.type = BFD_RELOC_UNUSED;
12764 }
12765 }
12766 else
12767 {
12768 if (inst.operands[2].isreg)
12769 {
12770 switch (shift_kind)
12771 {
12772 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_R; break;
12773 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_R; break;
12774 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_R; break;
12775 case SHIFT_ROR: inst.instruction = T_OPCODE_ROR_R; break;
12776 default: abort ();
12777 }
12778
12779 inst.instruction |= inst.operands[0].reg;
12780 inst.instruction |= inst.operands[2].reg << 3;
12781
12782 /* PR 12854: Error on extraneous shifts. */
12783 constraint (inst.operands[2].shifted,
12784 _("extraneous shift as part of operand to shift insn"));
12785 }
12786 else
12787 {
12788 switch (shift_kind)
12789 {
12790 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
12791 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
12792 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
12793 default: abort ();
12794 }
12795 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
12796 inst.instruction |= inst.operands[0].reg;
12797 inst.instruction |= inst.operands[1].reg << 3;
12798 }
12799 }
12800 }
12801 else
12802 {
12803 constraint (inst.operands[0].reg > 7
12804 || inst.operands[1].reg > 7, BAD_HIREG);
12805 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
12806
12807 if (inst.operands[2].isreg) /* Rd, {Rs,} Rn */
12808 {
12809 constraint (inst.operands[2].reg > 7, BAD_HIREG);
12810 constraint (inst.operands[0].reg != inst.operands[1].reg,
12811 _("source1 and dest must be same register"));
12812
12813 switch (inst.instruction)
12814 {
12815 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_R; break;
12816 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_R; break;
12817 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_R; break;
12818 case T_MNEM_ror: inst.instruction = T_OPCODE_ROR_R; break;
12819 default: abort ();
12820 }
12821
12822 inst.instruction |= inst.operands[0].reg;
12823 inst.instruction |= inst.operands[2].reg << 3;
12824
12825 /* PR 12854: Error on extraneous shifts. */
12826 constraint (inst.operands[2].shifted,
12827 _("extraneous shift as part of operand to shift insn"));
12828 }
12829 else
12830 {
12831 switch (inst.instruction)
12832 {
12833 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_I; break;
12834 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_I; break;
12835 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_I; break;
12836 case T_MNEM_ror: inst.error = _("ror #imm not supported"); return;
12837 default: abort ();
12838 }
12839 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
12840 inst.instruction |= inst.operands[0].reg;
12841 inst.instruction |= inst.operands[1].reg << 3;
12842 }
12843 }
12844 }
12845
12846 static void
12847 do_t_simd (void)
12848 {
12849 unsigned Rd, Rn, Rm;
12850
12851 Rd = inst.operands[0].reg;
12852 Rn = inst.operands[1].reg;
12853 Rm = inst.operands[2].reg;
12854
12855 reject_bad_reg (Rd);
12856 reject_bad_reg (Rn);
12857 reject_bad_reg (Rm);
12858
12859 inst.instruction |= Rd << 8;
12860 inst.instruction |= Rn << 16;
12861 inst.instruction |= Rm;
12862 }
12863
12864 static void
12865 do_t_simd2 (void)
12866 {
12867 unsigned Rd, Rn, Rm;
12868
12869 Rd = inst.operands[0].reg;
12870 Rm = inst.operands[1].reg;
12871 Rn = inst.operands[2].reg;
12872
12873 reject_bad_reg (Rd);
12874 reject_bad_reg (Rn);
12875 reject_bad_reg (Rm);
12876
12877 inst.instruction |= Rd << 8;
12878 inst.instruction |= Rn << 16;
12879 inst.instruction |= Rm;
12880 }
12881
12882 static void
12883 do_t_smc (void)
12884 {
12885 unsigned int value = inst.reloc.exp.X_add_number;
12886 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a),
12887 _("SMC is not permitted on this architecture"));
12888 constraint (inst.reloc.exp.X_op != O_constant,
12889 _("expression too complex"));
12890 inst.reloc.type = BFD_RELOC_UNUSED;
12891 inst.instruction |= (value & 0xf000) >> 12;
12892 inst.instruction |= (value & 0x0ff0);
12893 inst.instruction |= (value & 0x000f) << 16;
12894 /* PR gas/15623: SMC instructions must be last in an IT block. */
12895 set_it_insn_type_last ();
12896 }
12897
12898 static void
12899 do_t_hvc (void)
12900 {
12901 unsigned int value = inst.reloc.exp.X_add_number;
12902
12903 inst.reloc.type = BFD_RELOC_UNUSED;
12904 inst.instruction |= (value & 0x0fff);
12905 inst.instruction |= (value & 0xf000) << 4;
12906 }
12907
12908 static void
12909 do_t_ssat_usat (int bias)
12910 {
12911 unsigned Rd, Rn;
12912
12913 Rd = inst.operands[0].reg;
12914 Rn = inst.operands[2].reg;
12915
12916 reject_bad_reg (Rd);
12917 reject_bad_reg (Rn);
12918
12919 inst.instruction |= Rd << 8;
12920 inst.instruction |= inst.operands[1].imm - bias;
12921 inst.instruction |= Rn << 16;
12922
12923 if (inst.operands[3].present)
12924 {
12925 offsetT shift_amount = inst.reloc.exp.X_add_number;
12926
12927 inst.reloc.type = BFD_RELOC_UNUSED;
12928
12929 constraint (inst.reloc.exp.X_op != O_constant,
12930 _("expression too complex"));
12931
12932 if (shift_amount != 0)
12933 {
12934 constraint (shift_amount > 31,
12935 _("shift expression is too large"));
12936
12937 if (inst.operands[3].shift_kind == SHIFT_ASR)
12938 inst.instruction |= 0x00200000; /* sh bit. */
12939
12940 inst.instruction |= (shift_amount & 0x1c) << 10;
12941 inst.instruction |= (shift_amount & 0x03) << 6;
12942 }
12943 }
12944 }
12945
12946 static void
12947 do_t_ssat (void)
12948 {
12949 do_t_ssat_usat (1);
12950 }
12951
12952 static void
12953 do_t_ssat16 (void)
12954 {
12955 unsigned Rd, Rn;
12956
12957 Rd = inst.operands[0].reg;
12958 Rn = inst.operands[2].reg;
12959
12960 reject_bad_reg (Rd);
12961 reject_bad_reg (Rn);
12962
12963 inst.instruction |= Rd << 8;
12964 inst.instruction |= inst.operands[1].imm - 1;
12965 inst.instruction |= Rn << 16;
12966 }
12967
12968 static void
12969 do_t_strex (void)
12970 {
12971 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
12972 || inst.operands[2].postind || inst.operands[2].writeback
12973 || inst.operands[2].immisreg || inst.operands[2].shifted
12974 || inst.operands[2].negative,
12975 BAD_ADDR_MODE);
12976
12977 constraint (inst.operands[2].reg == REG_PC, BAD_PC);
12978
12979 inst.instruction |= inst.operands[0].reg << 8;
12980 inst.instruction |= inst.operands[1].reg << 12;
12981 inst.instruction |= inst.operands[2].reg << 16;
12982 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
12983 }
12984
12985 static void
12986 do_t_strexd (void)
12987 {
12988 if (!inst.operands[2].present)
12989 inst.operands[2].reg = inst.operands[1].reg + 1;
12990
12991 constraint (inst.operands[0].reg == inst.operands[1].reg
12992 || inst.operands[0].reg == inst.operands[2].reg
12993 || inst.operands[0].reg == inst.operands[3].reg,
12994 BAD_OVERLAP);
12995
12996 inst.instruction |= inst.operands[0].reg;
12997 inst.instruction |= inst.operands[1].reg << 12;
12998 inst.instruction |= inst.operands[2].reg << 8;
12999 inst.instruction |= inst.operands[3].reg << 16;
13000 }
13001
13002 static void
13003 do_t_sxtah (void)
13004 {
13005 unsigned Rd, Rn, Rm;
13006
13007 Rd = inst.operands[0].reg;
13008 Rn = inst.operands[1].reg;
13009 Rm = inst.operands[2].reg;
13010
13011 reject_bad_reg (Rd);
13012 reject_bad_reg (Rn);
13013 reject_bad_reg (Rm);
13014
13015 inst.instruction |= Rd << 8;
13016 inst.instruction |= Rn << 16;
13017 inst.instruction |= Rm;
13018 inst.instruction |= inst.operands[3].imm << 4;
13019 }
13020
13021 static void
13022 do_t_sxth (void)
13023 {
13024 unsigned Rd, Rm;
13025
13026 Rd = inst.operands[0].reg;
13027 Rm = inst.operands[1].reg;
13028
13029 reject_bad_reg (Rd);
13030 reject_bad_reg (Rm);
13031
13032 if (inst.instruction <= 0xffff
13033 && inst.size_req != 4
13034 && Rd <= 7 && Rm <= 7
13035 && (!inst.operands[2].present || inst.operands[2].imm == 0))
13036 {
13037 inst.instruction = THUMB_OP16 (inst.instruction);
13038 inst.instruction |= Rd;
13039 inst.instruction |= Rm << 3;
13040 }
13041 else if (unified_syntax)
13042 {
13043 if (inst.instruction <= 0xffff)
13044 inst.instruction = THUMB_OP32 (inst.instruction);
13045 inst.instruction |= Rd << 8;
13046 inst.instruction |= Rm;
13047 inst.instruction |= inst.operands[2].imm << 4;
13048 }
13049 else
13050 {
13051 constraint (inst.operands[2].present && inst.operands[2].imm != 0,
13052 _("Thumb encoding does not support rotation"));
13053 constraint (1, BAD_HIREG);
13054 }
13055 }
13056
13057 static void
13058 do_t_swi (void)
13059 {
13060 /* We have to do the following check manually as ARM_EXT_OS only applies
13061 to ARM_EXT_V6M. */
13062 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6m))
13063 {
13064 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_os)
13065 /* This only applies to the v6m howver, not later architectures. */
13066 && ! ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7))
13067 as_bad (_("SVC is not permitted on this architecture"));
13068 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, arm_ext_os);
13069 }
13070
13071 inst.reloc.type = BFD_RELOC_ARM_SWI;
13072 }
13073
13074 static void
13075 do_t_tb (void)
13076 {
13077 unsigned Rn, Rm;
13078 int half;
13079
13080 half = (inst.instruction & 0x10) != 0;
13081 set_it_insn_type_last ();
13082 constraint (inst.operands[0].immisreg,
13083 _("instruction requires register index"));
13084
13085 Rn = inst.operands[0].reg;
13086 Rm = inst.operands[0].imm;
13087
13088 constraint (Rn == REG_SP, BAD_SP);
13089 reject_bad_reg (Rm);
13090
13091 constraint (!half && inst.operands[0].shifted,
13092 _("instruction does not allow shifted index"));
13093 inst.instruction |= (Rn << 16) | Rm;
13094 }
13095
13096 static void
13097 do_t_udf (void)
13098 {
13099 if (!inst.operands[0].present)
13100 inst.operands[0].imm = 0;
13101
13102 if ((unsigned int) inst.operands[0].imm > 255 || inst.size_req == 4)
13103 {
13104 constraint (inst.size_req == 2,
13105 _("immediate value out of range"));
13106 inst.instruction = THUMB_OP32 (inst.instruction);
13107 inst.instruction |= (inst.operands[0].imm & 0xf000u) << 4;
13108 inst.instruction |= (inst.operands[0].imm & 0x0fffu) << 0;
13109 }
13110 else
13111 {
13112 inst.instruction = THUMB_OP16 (inst.instruction);
13113 inst.instruction |= inst.operands[0].imm;
13114 }
13115
13116 set_it_insn_type (NEUTRAL_IT_INSN);
13117 }
13118
13119
13120 static void
13121 do_t_usat (void)
13122 {
13123 do_t_ssat_usat (0);
13124 }
13125
13126 static void
13127 do_t_usat16 (void)
13128 {
13129 unsigned Rd, Rn;
13130
13131 Rd = inst.operands[0].reg;
13132 Rn = inst.operands[2].reg;
13133
13134 reject_bad_reg (Rd);
13135 reject_bad_reg (Rn);
13136
13137 inst.instruction |= Rd << 8;
13138 inst.instruction |= inst.operands[1].imm;
13139 inst.instruction |= Rn << 16;
13140 }
13141
13142 /* Neon instruction encoder helpers. */
13143
13144 /* Encodings for the different types for various Neon opcodes. */
13145
13146 /* An "invalid" code for the following tables. */
13147 #define N_INV -1u
13148
13149 struct neon_tab_entry
13150 {
13151 unsigned integer;
13152 unsigned float_or_poly;
13153 unsigned scalar_or_imm;
13154 };
13155
13156 /* Map overloaded Neon opcodes to their respective encodings. */
13157 #define NEON_ENC_TAB \
13158 X(vabd, 0x0000700, 0x1200d00, N_INV), \
13159 X(vmax, 0x0000600, 0x0000f00, N_INV), \
13160 X(vmin, 0x0000610, 0x0200f00, N_INV), \
13161 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
13162 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
13163 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
13164 X(vadd, 0x0000800, 0x0000d00, N_INV), \
13165 X(vsub, 0x1000800, 0x0200d00, N_INV), \
13166 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
13167 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
13168 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
13169 /* Register variants of the following two instructions are encoded as
13170 vcge / vcgt with the operands reversed. */ \
13171 X(vclt, 0x0000300, 0x1200e00, 0x1b10200), \
13172 X(vcle, 0x0000310, 0x1000e00, 0x1b10180), \
13173 X(vfma, N_INV, 0x0000c10, N_INV), \
13174 X(vfms, N_INV, 0x0200c10, N_INV), \
13175 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
13176 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
13177 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
13178 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
13179 X(vmlal, 0x0800800, N_INV, 0x0800240), \
13180 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
13181 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
13182 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
13183 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
13184 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
13185 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
13186 X(vqrdmlah, 0x3000b10, N_INV, 0x0800e40), \
13187 X(vqrdmlsh, 0x3000c10, N_INV, 0x0800f40), \
13188 X(vshl, 0x0000400, N_INV, 0x0800510), \
13189 X(vqshl, 0x0000410, N_INV, 0x0800710), \
13190 X(vand, 0x0000110, N_INV, 0x0800030), \
13191 X(vbic, 0x0100110, N_INV, 0x0800030), \
13192 X(veor, 0x1000110, N_INV, N_INV), \
13193 X(vorn, 0x0300110, N_INV, 0x0800010), \
13194 X(vorr, 0x0200110, N_INV, 0x0800010), \
13195 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
13196 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
13197 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
13198 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
13199 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
13200 X(vst1, 0x0000000, 0x0800000, N_INV), \
13201 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
13202 X(vst2, 0x0000100, 0x0800100, N_INV), \
13203 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
13204 X(vst3, 0x0000200, 0x0800200, N_INV), \
13205 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
13206 X(vst4, 0x0000300, 0x0800300, N_INV), \
13207 X(vmovn, 0x1b20200, N_INV, N_INV), \
13208 X(vtrn, 0x1b20080, N_INV, N_INV), \
13209 X(vqmovn, 0x1b20200, N_INV, N_INV), \
13210 X(vqmovun, 0x1b20240, N_INV, N_INV), \
13211 X(vnmul, 0xe200a40, 0xe200b40, N_INV), \
13212 X(vnmla, 0xe100a40, 0xe100b40, N_INV), \
13213 X(vnmls, 0xe100a00, 0xe100b00, N_INV), \
13214 X(vfnma, 0xe900a40, 0xe900b40, N_INV), \
13215 X(vfnms, 0xe900a00, 0xe900b00, N_INV), \
13216 X(vcmp, 0xeb40a40, 0xeb40b40, N_INV), \
13217 X(vcmpz, 0xeb50a40, 0xeb50b40, N_INV), \
13218 X(vcmpe, 0xeb40ac0, 0xeb40bc0, N_INV), \
13219 X(vcmpez, 0xeb50ac0, 0xeb50bc0, N_INV), \
13220 X(vseleq, 0xe000a00, N_INV, N_INV), \
13221 X(vselvs, 0xe100a00, N_INV, N_INV), \
13222 X(vselge, 0xe200a00, N_INV, N_INV), \
13223 X(vselgt, 0xe300a00, N_INV, N_INV), \
13224 X(vmaxnm, 0xe800a00, 0x3000f10, N_INV), \
13225 X(vminnm, 0xe800a40, 0x3200f10, N_INV), \
13226 X(vcvta, 0xebc0a40, 0x3bb0000, N_INV), \
13227 X(vrintr, 0xeb60a40, 0x3ba0400, N_INV), \
13228 X(vrinta, 0xeb80a40, 0x3ba0400, N_INV), \
13229 X(aes, 0x3b00300, N_INV, N_INV), \
13230 X(sha3op, 0x2000c00, N_INV, N_INV), \
13231 X(sha1h, 0x3b902c0, N_INV, N_INV), \
13232 X(sha2op, 0x3ba0380, N_INV, N_INV)
13233
13234 enum neon_opc
13235 {
13236 #define X(OPC,I,F,S) N_MNEM_##OPC
13237 NEON_ENC_TAB
13238 #undef X
13239 };
13240
13241 static const struct neon_tab_entry neon_enc_tab[] =
13242 {
13243 #define X(OPC,I,F,S) { (I), (F), (S) }
13244 NEON_ENC_TAB
13245 #undef X
13246 };
13247
13248 /* Do not use these macros; instead, use NEON_ENCODE defined below. */
13249 #define NEON_ENC_INTEGER_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13250 #define NEON_ENC_ARMREG_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13251 #define NEON_ENC_POLY_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13252 #define NEON_ENC_FLOAT_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13253 #define NEON_ENC_SCALAR_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13254 #define NEON_ENC_IMMED_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13255 #define NEON_ENC_INTERLV_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13256 #define NEON_ENC_LANE_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13257 #define NEON_ENC_DUP_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13258 #define NEON_ENC_SINGLE_(X) \
13259 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
13260 #define NEON_ENC_DOUBLE_(X) \
13261 ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
13262 #define NEON_ENC_FPV8_(X) \
13263 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf000000))
13264
13265 #define NEON_ENCODE(type, inst) \
13266 do \
13267 { \
13268 inst.instruction = NEON_ENC_##type##_ (inst.instruction); \
13269 inst.is_neon = 1; \
13270 } \
13271 while (0)
13272
13273 #define check_neon_suffixes \
13274 do \
13275 { \
13276 if (!inst.error && inst.vectype.elems > 0 && !inst.is_neon) \
13277 { \
13278 as_bad (_("invalid neon suffix for non neon instruction")); \
13279 return; \
13280 } \
13281 } \
13282 while (0)
13283
13284 /* Define shapes for instruction operands. The following mnemonic characters
13285 are used in this table:
13286
13287 F - VFP S<n> register
13288 D - Neon D<n> register
13289 Q - Neon Q<n> register
13290 I - Immediate
13291 S - Scalar
13292 R - ARM register
13293 L - D<n> register list
13294
13295 This table is used to generate various data:
13296 - enumerations of the form NS_DDR to be used as arguments to
13297 neon_select_shape.
13298 - a table classifying shapes into single, double, quad, mixed.
13299 - a table used to drive neon_select_shape. */
13300
13301 #define NEON_SHAPE_DEF \
13302 X(3, (D, D, D), DOUBLE), \
13303 X(3, (Q, Q, Q), QUAD), \
13304 X(3, (D, D, I), DOUBLE), \
13305 X(3, (Q, Q, I), QUAD), \
13306 X(3, (D, D, S), DOUBLE), \
13307 X(3, (Q, Q, S), QUAD), \
13308 X(2, (D, D), DOUBLE), \
13309 X(2, (Q, Q), QUAD), \
13310 X(2, (D, S), DOUBLE), \
13311 X(2, (Q, S), QUAD), \
13312 X(2, (D, R), DOUBLE), \
13313 X(2, (Q, R), QUAD), \
13314 X(2, (D, I), DOUBLE), \
13315 X(2, (Q, I), QUAD), \
13316 X(3, (D, L, D), DOUBLE), \
13317 X(2, (D, Q), MIXED), \
13318 X(2, (Q, D), MIXED), \
13319 X(3, (D, Q, I), MIXED), \
13320 X(3, (Q, D, I), MIXED), \
13321 X(3, (Q, D, D), MIXED), \
13322 X(3, (D, Q, Q), MIXED), \
13323 X(3, (Q, Q, D), MIXED), \
13324 X(3, (Q, D, S), MIXED), \
13325 X(3, (D, Q, S), MIXED), \
13326 X(4, (D, D, D, I), DOUBLE), \
13327 X(4, (Q, Q, Q, I), QUAD), \
13328 X(2, (F, F), SINGLE), \
13329 X(3, (F, F, F), SINGLE), \
13330 X(2, (F, I), SINGLE), \
13331 X(2, (F, D), MIXED), \
13332 X(2, (D, F), MIXED), \
13333 X(3, (F, F, I), MIXED), \
13334 X(4, (R, R, F, F), SINGLE), \
13335 X(4, (F, F, R, R), SINGLE), \
13336 X(3, (D, R, R), DOUBLE), \
13337 X(3, (R, R, D), DOUBLE), \
13338 X(2, (S, R), SINGLE), \
13339 X(2, (R, S), SINGLE), \
13340 X(2, (F, R), SINGLE), \
13341 X(2, (R, F), SINGLE), \
13342 /* Half float shape supported so far. */\
13343 X (2, (H, D), MIXED), \
13344 X (2, (D, H), MIXED), \
13345 X (2, (H, F), MIXED), \
13346 X (2, (F, H), MIXED), \
13347 X (2, (H, H), HALF), \
13348 X (2, (H, R), HALF), \
13349 X (2, (R, H), HALF), \
13350 X (2, (H, I), HALF), \
13351 X (3, (H, H, H), HALF), \
13352 X (3, (H, F, I), MIXED), \
13353 X (3, (F, H, I), MIXED)
13354
13355 #define S2(A,B) NS_##A##B
13356 #define S3(A,B,C) NS_##A##B##C
13357 #define S4(A,B,C,D) NS_##A##B##C##D
13358
13359 #define X(N, L, C) S##N L
13360
13361 enum neon_shape
13362 {
13363 NEON_SHAPE_DEF,
13364 NS_NULL
13365 };
13366
13367 #undef X
13368 #undef S2
13369 #undef S3
13370 #undef S4
13371
13372 enum neon_shape_class
13373 {
13374 SC_HALF,
13375 SC_SINGLE,
13376 SC_DOUBLE,
13377 SC_QUAD,
13378 SC_MIXED
13379 };
13380
13381 #define X(N, L, C) SC_##C
13382
13383 static enum neon_shape_class neon_shape_class[] =
13384 {
13385 NEON_SHAPE_DEF
13386 };
13387
13388 #undef X
13389
13390 enum neon_shape_el
13391 {
13392 SE_H,
13393 SE_F,
13394 SE_D,
13395 SE_Q,
13396 SE_I,
13397 SE_S,
13398 SE_R,
13399 SE_L
13400 };
13401
13402 /* Register widths of above. */
13403 static unsigned neon_shape_el_size[] =
13404 {
13405 16,
13406 32,
13407 64,
13408 128,
13409 0,
13410 32,
13411 32,
13412 0
13413 };
13414
13415 struct neon_shape_info
13416 {
13417 unsigned els;
13418 enum neon_shape_el el[NEON_MAX_TYPE_ELS];
13419 };
13420
13421 #define S2(A,B) { SE_##A, SE_##B }
13422 #define S3(A,B,C) { SE_##A, SE_##B, SE_##C }
13423 #define S4(A,B,C,D) { SE_##A, SE_##B, SE_##C, SE_##D }
13424
13425 #define X(N, L, C) { N, S##N L }
13426
13427 static struct neon_shape_info neon_shape_tab[] =
13428 {
13429 NEON_SHAPE_DEF
13430 };
13431
13432 #undef X
13433 #undef S2
13434 #undef S3
13435 #undef S4
13436
13437 /* Bit masks used in type checking given instructions.
13438 'N_EQK' means the type must be the same as (or based on in some way) the key
13439 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
13440 set, various other bits can be set as well in order to modify the meaning of
13441 the type constraint. */
13442
13443 enum neon_type_mask
13444 {
13445 N_S8 = 0x0000001,
13446 N_S16 = 0x0000002,
13447 N_S32 = 0x0000004,
13448 N_S64 = 0x0000008,
13449 N_U8 = 0x0000010,
13450 N_U16 = 0x0000020,
13451 N_U32 = 0x0000040,
13452 N_U64 = 0x0000080,
13453 N_I8 = 0x0000100,
13454 N_I16 = 0x0000200,
13455 N_I32 = 0x0000400,
13456 N_I64 = 0x0000800,
13457 N_8 = 0x0001000,
13458 N_16 = 0x0002000,
13459 N_32 = 0x0004000,
13460 N_64 = 0x0008000,
13461 N_P8 = 0x0010000,
13462 N_P16 = 0x0020000,
13463 N_F16 = 0x0040000,
13464 N_F32 = 0x0080000,
13465 N_F64 = 0x0100000,
13466 N_P64 = 0x0200000,
13467 N_KEY = 0x1000000, /* Key element (main type specifier). */
13468 N_EQK = 0x2000000, /* Given operand has the same type & size as the key. */
13469 N_VFP = 0x4000000, /* VFP mode: operand size must match register width. */
13470 N_UNT = 0x8000000, /* Must be explicitly untyped. */
13471 N_DBL = 0x0000001, /* If N_EQK, this operand is twice the size. */
13472 N_HLF = 0x0000002, /* If N_EQK, this operand is half the size. */
13473 N_SGN = 0x0000004, /* If N_EQK, this operand is forced to be signed. */
13474 N_UNS = 0x0000008, /* If N_EQK, this operand is forced to be unsigned. */
13475 N_INT = 0x0000010, /* If N_EQK, this operand is forced to be integer. */
13476 N_FLT = 0x0000020, /* If N_EQK, this operand is forced to be float. */
13477 N_SIZ = 0x0000040, /* If N_EQK, this operand is forced to be size-only. */
13478 N_UTYP = 0,
13479 N_MAX_NONSPECIAL = N_P64
13480 };
13481
13482 #define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
13483
13484 #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
13485 #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
13486 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
13487 #define N_S_32 (N_S8 | N_S16 | N_S32)
13488 #define N_F_16_32 (N_F16 | N_F32)
13489 #define N_SUF_32 (N_SU_32 | N_F_16_32)
13490 #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
13491 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F16 | N_F32)
13492 #define N_F_ALL (N_F16 | N_F32 | N_F64)
13493
13494 /* Pass this as the first type argument to neon_check_type to ignore types
13495 altogether. */
13496 #define N_IGNORE_TYPE (N_KEY | N_EQK)
13497
13498 /* Select a "shape" for the current instruction (describing register types or
13499 sizes) from a list of alternatives. Return NS_NULL if the current instruction
13500 doesn't fit. For non-polymorphic shapes, checking is usually done as a
13501 function of operand parsing, so this function doesn't need to be called.
13502 Shapes should be listed in order of decreasing length. */
13503
13504 static enum neon_shape
13505 neon_select_shape (enum neon_shape shape, ...)
13506 {
13507 va_list ap;
13508 enum neon_shape first_shape = shape;
13509
13510 /* Fix missing optional operands. FIXME: we don't know at this point how
13511 many arguments we should have, so this makes the assumption that we have
13512 > 1. This is true of all current Neon opcodes, I think, but may not be
13513 true in the future. */
13514 if (!inst.operands[1].present)
13515 inst.operands[1] = inst.operands[0];
13516
13517 va_start (ap, shape);
13518
13519 for (; shape != NS_NULL; shape = (enum neon_shape) va_arg (ap, int))
13520 {
13521 unsigned j;
13522 int matches = 1;
13523
13524 for (j = 0; j < neon_shape_tab[shape].els; j++)
13525 {
13526 if (!inst.operands[j].present)
13527 {
13528 matches = 0;
13529 break;
13530 }
13531
13532 switch (neon_shape_tab[shape].el[j])
13533 {
13534 /* If a .f16, .16, .u16, .s16 type specifier is given over
13535 a VFP single precision register operand, it's essentially
13536 means only half of the register is used.
13537
13538 If the type specifier is given after the mnemonics, the
13539 information is stored in inst.vectype. If the type specifier
13540 is given after register operand, the information is stored
13541 in inst.operands[].vectype.
13542
13543 When there is only one type specifier, and all the register
13544 operands are the same type of hardware register, the type
13545 specifier applies to all register operands.
13546
13547 If no type specifier is given, the shape is inferred from
13548 operand information.
13549
13550 for example:
13551 vadd.f16 s0, s1, s2: NS_HHH
13552 vabs.f16 s0, s1: NS_HH
13553 vmov.f16 s0, r1: NS_HR
13554 vmov.f16 r0, s1: NS_RH
13555 vcvt.f16 r0, s1: NS_RH
13556 vcvt.f16.s32 s2, s2, #29: NS_HFI
13557 vcvt.f16.s32 s2, s2: NS_HF
13558 */
13559 case SE_H:
13560 if (!(inst.operands[j].isreg
13561 && inst.operands[j].isvec
13562 && inst.operands[j].issingle
13563 && !inst.operands[j].isquad
13564 && ((inst.vectype.elems == 1
13565 && inst.vectype.el[0].size == 16)
13566 || (inst.vectype.elems > 1
13567 && inst.vectype.el[j].size == 16)
13568 || (inst.vectype.elems == 0
13569 && inst.operands[j].vectype.type != NT_invtype
13570 && inst.operands[j].vectype.size == 16))))
13571 matches = 0;
13572 break;
13573
13574 case SE_F:
13575 if (!(inst.operands[j].isreg
13576 && inst.operands[j].isvec
13577 && inst.operands[j].issingle
13578 && !inst.operands[j].isquad
13579 && ((inst.vectype.elems == 1 && inst.vectype.el[0].size == 32)
13580 || (inst.vectype.elems > 1 && inst.vectype.el[j].size == 32)
13581 || (inst.vectype.elems == 0
13582 && (inst.operands[j].vectype.size == 32
13583 || inst.operands[j].vectype.type == NT_invtype)))))
13584 matches = 0;
13585 break;
13586
13587 case SE_D:
13588 if (!(inst.operands[j].isreg
13589 && inst.operands[j].isvec
13590 && !inst.operands[j].isquad
13591 && !inst.operands[j].issingle))
13592 matches = 0;
13593 break;
13594
13595 case SE_R:
13596 if (!(inst.operands[j].isreg
13597 && !inst.operands[j].isvec))
13598 matches = 0;
13599 break;
13600
13601 case SE_Q:
13602 if (!(inst.operands[j].isreg
13603 && inst.operands[j].isvec
13604 && inst.operands[j].isquad
13605 && !inst.operands[j].issingle))
13606 matches = 0;
13607 break;
13608
13609 case SE_I:
13610 if (!(!inst.operands[j].isreg
13611 && !inst.operands[j].isscalar))
13612 matches = 0;
13613 break;
13614
13615 case SE_S:
13616 if (!(!inst.operands[j].isreg
13617 && inst.operands[j].isscalar))
13618 matches = 0;
13619 break;
13620
13621 case SE_L:
13622 break;
13623 }
13624 if (!matches)
13625 break;
13626 }
13627 if (matches && (j >= ARM_IT_MAX_OPERANDS || !inst.operands[j].present))
13628 /* We've matched all the entries in the shape table, and we don't
13629 have any left over operands which have not been matched. */
13630 break;
13631 }
13632
13633 va_end (ap);
13634
13635 if (shape == NS_NULL && first_shape != NS_NULL)
13636 first_error (_("invalid instruction shape"));
13637
13638 return shape;
13639 }
13640
13641 /* True if SHAPE is predominantly a quadword operation (most of the time, this
13642 means the Q bit should be set). */
13643
13644 static int
13645 neon_quad (enum neon_shape shape)
13646 {
13647 return neon_shape_class[shape] == SC_QUAD;
13648 }
13649
13650 static void
13651 neon_modify_type_size (unsigned typebits, enum neon_el_type *g_type,
13652 unsigned *g_size)
13653 {
13654 /* Allow modification to be made to types which are constrained to be
13655 based on the key element, based on bits set alongside N_EQK. */
13656 if ((typebits & N_EQK) != 0)
13657 {
13658 if ((typebits & N_HLF) != 0)
13659 *g_size /= 2;
13660 else if ((typebits & N_DBL) != 0)
13661 *g_size *= 2;
13662 if ((typebits & N_SGN) != 0)
13663 *g_type = NT_signed;
13664 else if ((typebits & N_UNS) != 0)
13665 *g_type = NT_unsigned;
13666 else if ((typebits & N_INT) != 0)
13667 *g_type = NT_integer;
13668 else if ((typebits & N_FLT) != 0)
13669 *g_type = NT_float;
13670 else if ((typebits & N_SIZ) != 0)
13671 *g_type = NT_untyped;
13672 }
13673 }
13674
13675 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
13676 operand type, i.e. the single type specified in a Neon instruction when it
13677 is the only one given. */
13678
13679 static struct neon_type_el
13680 neon_type_promote (struct neon_type_el *key, unsigned thisarg)
13681 {
13682 struct neon_type_el dest = *key;
13683
13684 gas_assert ((thisarg & N_EQK) != 0);
13685
13686 neon_modify_type_size (thisarg, &dest.type, &dest.size);
13687
13688 return dest;
13689 }
13690
13691 /* Convert Neon type and size into compact bitmask representation. */
13692
13693 static enum neon_type_mask
13694 type_chk_of_el_type (enum neon_el_type type, unsigned size)
13695 {
13696 switch (type)
13697 {
13698 case NT_untyped:
13699 switch (size)
13700 {
13701 case 8: return N_8;
13702 case 16: return N_16;
13703 case 32: return N_32;
13704 case 64: return N_64;
13705 default: ;
13706 }
13707 break;
13708
13709 case NT_integer:
13710 switch (size)
13711 {
13712 case 8: return N_I8;
13713 case 16: return N_I16;
13714 case 32: return N_I32;
13715 case 64: return N_I64;
13716 default: ;
13717 }
13718 break;
13719
13720 case NT_float:
13721 switch (size)
13722 {
13723 case 16: return N_F16;
13724 case 32: return N_F32;
13725 case 64: return N_F64;
13726 default: ;
13727 }
13728 break;
13729
13730 case NT_poly:
13731 switch (size)
13732 {
13733 case 8: return N_P8;
13734 case 16: return N_P16;
13735 case 64: return N_P64;
13736 default: ;
13737 }
13738 break;
13739
13740 case NT_signed:
13741 switch (size)
13742 {
13743 case 8: return N_S8;
13744 case 16: return N_S16;
13745 case 32: return N_S32;
13746 case 64: return N_S64;
13747 default: ;
13748 }
13749 break;
13750
13751 case NT_unsigned:
13752 switch (size)
13753 {
13754 case 8: return N_U8;
13755 case 16: return N_U16;
13756 case 32: return N_U32;
13757 case 64: return N_U64;
13758 default: ;
13759 }
13760 break;
13761
13762 default: ;
13763 }
13764
13765 return N_UTYP;
13766 }
13767
13768 /* Convert compact Neon bitmask type representation to a type and size. Only
13769 handles the case where a single bit is set in the mask. */
13770
13771 static int
13772 el_type_of_type_chk (enum neon_el_type *type, unsigned *size,
13773 enum neon_type_mask mask)
13774 {
13775 if ((mask & N_EQK) != 0)
13776 return FAIL;
13777
13778 if ((mask & (N_S8 | N_U8 | N_I8 | N_8 | N_P8)) != 0)
13779 *size = 8;
13780 else if ((mask & (N_S16 | N_U16 | N_I16 | N_16 | N_F16 | N_P16)) != 0)
13781 *size = 16;
13782 else if ((mask & (N_S32 | N_U32 | N_I32 | N_32 | N_F32)) != 0)
13783 *size = 32;
13784 else if ((mask & (N_S64 | N_U64 | N_I64 | N_64 | N_F64 | N_P64)) != 0)
13785 *size = 64;
13786 else
13787 return FAIL;
13788
13789 if ((mask & (N_S8 | N_S16 | N_S32 | N_S64)) != 0)
13790 *type = NT_signed;
13791 else if ((mask & (N_U8 | N_U16 | N_U32 | N_U64)) != 0)
13792 *type = NT_unsigned;
13793 else if ((mask & (N_I8 | N_I16 | N_I32 | N_I64)) != 0)
13794 *type = NT_integer;
13795 else if ((mask & (N_8 | N_16 | N_32 | N_64)) != 0)
13796 *type = NT_untyped;
13797 else if ((mask & (N_P8 | N_P16 | N_P64)) != 0)
13798 *type = NT_poly;
13799 else if ((mask & (N_F_ALL)) != 0)
13800 *type = NT_float;
13801 else
13802 return FAIL;
13803
13804 return SUCCESS;
13805 }
13806
13807 /* Modify a bitmask of allowed types. This is only needed for type
13808 relaxation. */
13809
13810 static unsigned
13811 modify_types_allowed (unsigned allowed, unsigned mods)
13812 {
13813 unsigned size;
13814 enum neon_el_type type;
13815 unsigned destmask;
13816 int i;
13817
13818 destmask = 0;
13819
13820 for (i = 1; i <= N_MAX_NONSPECIAL; i <<= 1)
13821 {
13822 if (el_type_of_type_chk (&type, &size,
13823 (enum neon_type_mask) (allowed & i)) == SUCCESS)
13824 {
13825 neon_modify_type_size (mods, &type, &size);
13826 destmask |= type_chk_of_el_type (type, size);
13827 }
13828 }
13829
13830 return destmask;
13831 }
13832
13833 /* Check type and return type classification.
13834 The manual states (paraphrase): If one datatype is given, it indicates the
13835 type given in:
13836 - the second operand, if there is one
13837 - the operand, if there is no second operand
13838 - the result, if there are no operands.
13839 This isn't quite good enough though, so we use a concept of a "key" datatype
13840 which is set on a per-instruction basis, which is the one which matters when
13841 only one data type is written.
13842 Note: this function has side-effects (e.g. filling in missing operands). All
13843 Neon instructions should call it before performing bit encoding. */
13844
13845 static struct neon_type_el
13846 neon_check_type (unsigned els, enum neon_shape ns, ...)
13847 {
13848 va_list ap;
13849 unsigned i, pass, key_el = 0;
13850 unsigned types[NEON_MAX_TYPE_ELS];
13851 enum neon_el_type k_type = NT_invtype;
13852 unsigned k_size = -1u;
13853 struct neon_type_el badtype = {NT_invtype, -1};
13854 unsigned key_allowed = 0;
13855
13856 /* Optional registers in Neon instructions are always (not) in operand 1.
13857 Fill in the missing operand here, if it was omitted. */
13858 if (els > 1 && !inst.operands[1].present)
13859 inst.operands[1] = inst.operands[0];
13860
13861 /* Suck up all the varargs. */
13862 va_start (ap, ns);
13863 for (i = 0; i < els; i++)
13864 {
13865 unsigned thisarg = va_arg (ap, unsigned);
13866 if (thisarg == N_IGNORE_TYPE)
13867 {
13868 va_end (ap);
13869 return badtype;
13870 }
13871 types[i] = thisarg;
13872 if ((thisarg & N_KEY) != 0)
13873 key_el = i;
13874 }
13875 va_end (ap);
13876
13877 if (inst.vectype.elems > 0)
13878 for (i = 0; i < els; i++)
13879 if (inst.operands[i].vectype.type != NT_invtype)
13880 {
13881 first_error (_("types specified in both the mnemonic and operands"));
13882 return badtype;
13883 }
13884
13885 /* Duplicate inst.vectype elements here as necessary.
13886 FIXME: No idea if this is exactly the same as the ARM assembler,
13887 particularly when an insn takes one register and one non-register
13888 operand. */
13889 if (inst.vectype.elems == 1 && els > 1)
13890 {
13891 unsigned j;
13892 inst.vectype.elems = els;
13893 inst.vectype.el[key_el] = inst.vectype.el[0];
13894 for (j = 0; j < els; j++)
13895 if (j != key_el)
13896 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
13897 types[j]);
13898 }
13899 else if (inst.vectype.elems == 0 && els > 0)
13900 {
13901 unsigned j;
13902 /* No types were given after the mnemonic, so look for types specified
13903 after each operand. We allow some flexibility here; as long as the
13904 "key" operand has a type, we can infer the others. */
13905 for (j = 0; j < els; j++)
13906 if (inst.operands[j].vectype.type != NT_invtype)
13907 inst.vectype.el[j] = inst.operands[j].vectype;
13908
13909 if (inst.operands[key_el].vectype.type != NT_invtype)
13910 {
13911 for (j = 0; j < els; j++)
13912 if (inst.operands[j].vectype.type == NT_invtype)
13913 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
13914 types[j]);
13915 }
13916 else
13917 {
13918 first_error (_("operand types can't be inferred"));
13919 return badtype;
13920 }
13921 }
13922 else if (inst.vectype.elems != els)
13923 {
13924 first_error (_("type specifier has the wrong number of parts"));
13925 return badtype;
13926 }
13927
13928 for (pass = 0; pass < 2; pass++)
13929 {
13930 for (i = 0; i < els; i++)
13931 {
13932 unsigned thisarg = types[i];
13933 unsigned types_allowed = ((thisarg & N_EQK) != 0 && pass != 0)
13934 ? modify_types_allowed (key_allowed, thisarg) : thisarg;
13935 enum neon_el_type g_type = inst.vectype.el[i].type;
13936 unsigned g_size = inst.vectype.el[i].size;
13937
13938 /* Decay more-specific signed & unsigned types to sign-insensitive
13939 integer types if sign-specific variants are unavailable. */
13940 if ((g_type == NT_signed || g_type == NT_unsigned)
13941 && (types_allowed & N_SU_ALL) == 0)
13942 g_type = NT_integer;
13943
13944 /* If only untyped args are allowed, decay any more specific types to
13945 them. Some instructions only care about signs for some element
13946 sizes, so handle that properly. */
13947 if (((types_allowed & N_UNT) == 0)
13948 && ((g_size == 8 && (types_allowed & N_8) != 0)
13949 || (g_size == 16 && (types_allowed & N_16) != 0)
13950 || (g_size == 32 && (types_allowed & N_32) != 0)
13951 || (g_size == 64 && (types_allowed & N_64) != 0)))
13952 g_type = NT_untyped;
13953
13954 if (pass == 0)
13955 {
13956 if ((thisarg & N_KEY) != 0)
13957 {
13958 k_type = g_type;
13959 k_size = g_size;
13960 key_allowed = thisarg & ~N_KEY;
13961
13962 /* Check architecture constraint on FP16 extension. */
13963 if (k_size == 16
13964 && k_type == NT_float
13965 && ! ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16))
13966 {
13967 inst.error = _(BAD_FP16);
13968 return badtype;
13969 }
13970 }
13971 }
13972 else
13973 {
13974 if ((thisarg & N_VFP) != 0)
13975 {
13976 enum neon_shape_el regshape;
13977 unsigned regwidth, match;
13978
13979 /* PR 11136: Catch the case where we are passed a shape of NS_NULL. */
13980 if (ns == NS_NULL)
13981 {
13982 first_error (_("invalid instruction shape"));
13983 return badtype;
13984 }
13985 regshape = neon_shape_tab[ns].el[i];
13986 regwidth = neon_shape_el_size[regshape];
13987
13988 /* In VFP mode, operands must match register widths. If we
13989 have a key operand, use its width, else use the width of
13990 the current operand. */
13991 if (k_size != -1u)
13992 match = k_size;
13993 else
13994 match = g_size;
13995
13996 /* FP16 will use a single precision register. */
13997 if (regwidth == 32 && match == 16)
13998 {
13999 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16))
14000 match = regwidth;
14001 else
14002 {
14003 inst.error = _(BAD_FP16);
14004 return badtype;
14005 }
14006 }
14007
14008 if (regwidth != match)
14009 {
14010 first_error (_("operand size must match register width"));
14011 return badtype;
14012 }
14013 }
14014
14015 if ((thisarg & N_EQK) == 0)
14016 {
14017 unsigned given_type = type_chk_of_el_type (g_type, g_size);
14018
14019 if ((given_type & types_allowed) == 0)
14020 {
14021 first_error (_("bad type in Neon instruction"));
14022 return badtype;
14023 }
14024 }
14025 else
14026 {
14027 enum neon_el_type mod_k_type = k_type;
14028 unsigned mod_k_size = k_size;
14029 neon_modify_type_size (thisarg, &mod_k_type, &mod_k_size);
14030 if (g_type != mod_k_type || g_size != mod_k_size)
14031 {
14032 first_error (_("inconsistent types in Neon instruction"));
14033 return badtype;
14034 }
14035 }
14036 }
14037 }
14038 }
14039
14040 return inst.vectype.el[key_el];
14041 }
14042
14043 /* Neon-style VFP instruction forwarding. */
14044
14045 /* Thumb VFP instructions have 0xE in the condition field. */
14046
14047 static void
14048 do_vfp_cond_or_thumb (void)
14049 {
14050 inst.is_neon = 1;
14051
14052 if (thumb_mode)
14053 inst.instruction |= 0xe0000000;
14054 else
14055 inst.instruction |= inst.cond << 28;
14056 }
14057
14058 /* Look up and encode a simple mnemonic, for use as a helper function for the
14059 Neon-style VFP syntax. This avoids duplication of bits of the insns table,
14060 etc. It is assumed that operand parsing has already been done, and that the
14061 operands are in the form expected by the given opcode (this isn't necessarily
14062 the same as the form in which they were parsed, hence some massaging must
14063 take place before this function is called).
14064 Checks current arch version against that in the looked-up opcode. */
14065
14066 static void
14067 do_vfp_nsyn_opcode (const char *opname)
14068 {
14069 const struct asm_opcode *opcode;
14070
14071 opcode = (const struct asm_opcode *) hash_find (arm_ops_hsh, opname);
14072
14073 if (!opcode)
14074 abort ();
14075
14076 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant,
14077 thumb_mode ? *opcode->tvariant : *opcode->avariant),
14078 _(BAD_FPU));
14079
14080 inst.is_neon = 1;
14081
14082 if (thumb_mode)
14083 {
14084 inst.instruction = opcode->tvalue;
14085 opcode->tencode ();
14086 }
14087 else
14088 {
14089 inst.instruction = (inst.cond << 28) | opcode->avalue;
14090 opcode->aencode ();
14091 }
14092 }
14093
14094 static void
14095 do_vfp_nsyn_add_sub (enum neon_shape rs)
14096 {
14097 int is_add = (inst.instruction & 0x0fffffff) == N_MNEM_vadd;
14098
14099 if (rs == NS_FFF || rs == NS_HHH)
14100 {
14101 if (is_add)
14102 do_vfp_nsyn_opcode ("fadds");
14103 else
14104 do_vfp_nsyn_opcode ("fsubs");
14105
14106 /* ARMv8.2 fp16 instruction. */
14107 if (rs == NS_HHH)
14108 do_scalar_fp16_v82_encode ();
14109 }
14110 else
14111 {
14112 if (is_add)
14113 do_vfp_nsyn_opcode ("faddd");
14114 else
14115 do_vfp_nsyn_opcode ("fsubd");
14116 }
14117 }
14118
14119 /* Check operand types to see if this is a VFP instruction, and if so call
14120 PFN (). */
14121
14122 static int
14123 try_vfp_nsyn (int args, void (*pfn) (enum neon_shape))
14124 {
14125 enum neon_shape rs;
14126 struct neon_type_el et;
14127
14128 switch (args)
14129 {
14130 case 2:
14131 rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_NULL);
14132 et = neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY | N_VFP);
14133 break;
14134
14135 case 3:
14136 rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_NULL);
14137 et = neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
14138 N_F_ALL | N_KEY | N_VFP);
14139 break;
14140
14141 default:
14142 abort ();
14143 }
14144
14145 if (et.type != NT_invtype)
14146 {
14147 pfn (rs);
14148 return SUCCESS;
14149 }
14150
14151 inst.error = NULL;
14152 return FAIL;
14153 }
14154
14155 static void
14156 do_vfp_nsyn_mla_mls (enum neon_shape rs)
14157 {
14158 int is_mla = (inst.instruction & 0x0fffffff) == N_MNEM_vmla;
14159
14160 if (rs == NS_FFF || rs == NS_HHH)
14161 {
14162 if (is_mla)
14163 do_vfp_nsyn_opcode ("fmacs");
14164 else
14165 do_vfp_nsyn_opcode ("fnmacs");
14166
14167 /* ARMv8.2 fp16 instruction. */
14168 if (rs == NS_HHH)
14169 do_scalar_fp16_v82_encode ();
14170 }
14171 else
14172 {
14173 if (is_mla)
14174 do_vfp_nsyn_opcode ("fmacd");
14175 else
14176 do_vfp_nsyn_opcode ("fnmacd");
14177 }
14178 }
14179
14180 static void
14181 do_vfp_nsyn_fma_fms (enum neon_shape rs)
14182 {
14183 int is_fma = (inst.instruction & 0x0fffffff) == N_MNEM_vfma;
14184
14185 if (rs == NS_FFF || rs == NS_HHH)
14186 {
14187 if (is_fma)
14188 do_vfp_nsyn_opcode ("ffmas");
14189 else
14190 do_vfp_nsyn_opcode ("ffnmas");
14191
14192 /* ARMv8.2 fp16 instruction. */
14193 if (rs == NS_HHH)
14194 do_scalar_fp16_v82_encode ();
14195 }
14196 else
14197 {
14198 if (is_fma)
14199 do_vfp_nsyn_opcode ("ffmad");
14200 else
14201 do_vfp_nsyn_opcode ("ffnmad");
14202 }
14203 }
14204
14205 static void
14206 do_vfp_nsyn_mul (enum neon_shape rs)
14207 {
14208 if (rs == NS_FFF || rs == NS_HHH)
14209 {
14210 do_vfp_nsyn_opcode ("fmuls");
14211
14212 /* ARMv8.2 fp16 instruction. */
14213 if (rs == NS_HHH)
14214 do_scalar_fp16_v82_encode ();
14215 }
14216 else
14217 do_vfp_nsyn_opcode ("fmuld");
14218 }
14219
14220 static void
14221 do_vfp_nsyn_abs_neg (enum neon_shape rs)
14222 {
14223 int is_neg = (inst.instruction & 0x80) != 0;
14224 neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_VFP | N_KEY);
14225
14226 if (rs == NS_FF || rs == NS_HH)
14227 {
14228 if (is_neg)
14229 do_vfp_nsyn_opcode ("fnegs");
14230 else
14231 do_vfp_nsyn_opcode ("fabss");
14232
14233 /* ARMv8.2 fp16 instruction. */
14234 if (rs == NS_HH)
14235 do_scalar_fp16_v82_encode ();
14236 }
14237 else
14238 {
14239 if (is_neg)
14240 do_vfp_nsyn_opcode ("fnegd");
14241 else
14242 do_vfp_nsyn_opcode ("fabsd");
14243 }
14244 }
14245
14246 /* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
14247 insns belong to Neon, and are handled elsewhere. */
14248
14249 static void
14250 do_vfp_nsyn_ldm_stm (int is_dbmode)
14251 {
14252 int is_ldm = (inst.instruction & (1 << 20)) != 0;
14253 if (is_ldm)
14254 {
14255 if (is_dbmode)
14256 do_vfp_nsyn_opcode ("fldmdbs");
14257 else
14258 do_vfp_nsyn_opcode ("fldmias");
14259 }
14260 else
14261 {
14262 if (is_dbmode)
14263 do_vfp_nsyn_opcode ("fstmdbs");
14264 else
14265 do_vfp_nsyn_opcode ("fstmias");
14266 }
14267 }
14268
14269 static void
14270 do_vfp_nsyn_sqrt (void)
14271 {
14272 enum neon_shape rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_NULL);
14273 neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY | N_VFP);
14274
14275 if (rs == NS_FF || rs == NS_HH)
14276 {
14277 do_vfp_nsyn_opcode ("fsqrts");
14278
14279 /* ARMv8.2 fp16 instruction. */
14280 if (rs == NS_HH)
14281 do_scalar_fp16_v82_encode ();
14282 }
14283 else
14284 do_vfp_nsyn_opcode ("fsqrtd");
14285 }
14286
14287 static void
14288 do_vfp_nsyn_div (void)
14289 {
14290 enum neon_shape rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_NULL);
14291 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
14292 N_F_ALL | N_KEY | N_VFP);
14293
14294 if (rs == NS_FFF || rs == NS_HHH)
14295 {
14296 do_vfp_nsyn_opcode ("fdivs");
14297
14298 /* ARMv8.2 fp16 instruction. */
14299 if (rs == NS_HHH)
14300 do_scalar_fp16_v82_encode ();
14301 }
14302 else
14303 do_vfp_nsyn_opcode ("fdivd");
14304 }
14305
14306 static void
14307 do_vfp_nsyn_nmul (void)
14308 {
14309 enum neon_shape rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_NULL);
14310 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
14311 N_F_ALL | N_KEY | N_VFP);
14312
14313 if (rs == NS_FFF || rs == NS_HHH)
14314 {
14315 NEON_ENCODE (SINGLE, inst);
14316 do_vfp_sp_dyadic ();
14317
14318 /* ARMv8.2 fp16 instruction. */
14319 if (rs == NS_HHH)
14320 do_scalar_fp16_v82_encode ();
14321 }
14322 else
14323 {
14324 NEON_ENCODE (DOUBLE, inst);
14325 do_vfp_dp_rd_rn_rm ();
14326 }
14327 do_vfp_cond_or_thumb ();
14328
14329 }
14330
14331 static void
14332 do_vfp_nsyn_cmp (void)
14333 {
14334 enum neon_shape rs;
14335 if (inst.operands[1].isreg)
14336 {
14337 rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_NULL);
14338 neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY | N_VFP);
14339
14340 if (rs == NS_FF || rs == NS_HH)
14341 {
14342 NEON_ENCODE (SINGLE, inst);
14343 do_vfp_sp_monadic ();
14344 }
14345 else
14346 {
14347 NEON_ENCODE (DOUBLE, inst);
14348 do_vfp_dp_rd_rm ();
14349 }
14350 }
14351 else
14352 {
14353 rs = neon_select_shape (NS_HI, NS_FI, NS_DI, NS_NULL);
14354 neon_check_type (2, rs, N_F_ALL | N_KEY | N_VFP, N_EQK);
14355
14356 switch (inst.instruction & 0x0fffffff)
14357 {
14358 case N_MNEM_vcmp:
14359 inst.instruction += N_MNEM_vcmpz - N_MNEM_vcmp;
14360 break;
14361 case N_MNEM_vcmpe:
14362 inst.instruction += N_MNEM_vcmpez - N_MNEM_vcmpe;
14363 break;
14364 default:
14365 abort ();
14366 }
14367
14368 if (rs == NS_FI || rs == NS_HI)
14369 {
14370 NEON_ENCODE (SINGLE, inst);
14371 do_vfp_sp_compare_z ();
14372 }
14373 else
14374 {
14375 NEON_ENCODE (DOUBLE, inst);
14376 do_vfp_dp_rd ();
14377 }
14378 }
14379 do_vfp_cond_or_thumb ();
14380
14381 /* ARMv8.2 fp16 instruction. */
14382 if (rs == NS_HI || rs == NS_HH)
14383 do_scalar_fp16_v82_encode ();
14384 }
14385
14386 static void
14387 nsyn_insert_sp (void)
14388 {
14389 inst.operands[1] = inst.operands[0];
14390 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
14391 inst.operands[0].reg = REG_SP;
14392 inst.operands[0].isreg = 1;
14393 inst.operands[0].writeback = 1;
14394 inst.operands[0].present = 1;
14395 }
14396
14397 static void
14398 do_vfp_nsyn_push (void)
14399 {
14400 nsyn_insert_sp ();
14401
14402 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
14403 _("register list must contain at least 1 and at most 16 "
14404 "registers"));
14405
14406 if (inst.operands[1].issingle)
14407 do_vfp_nsyn_opcode ("fstmdbs");
14408 else
14409 do_vfp_nsyn_opcode ("fstmdbd");
14410 }
14411
14412 static void
14413 do_vfp_nsyn_pop (void)
14414 {
14415 nsyn_insert_sp ();
14416
14417 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
14418 _("register list must contain at least 1 and at most 16 "
14419 "registers"));
14420
14421 if (inst.operands[1].issingle)
14422 do_vfp_nsyn_opcode ("fldmias");
14423 else
14424 do_vfp_nsyn_opcode ("fldmiad");
14425 }
14426
14427 /* Fix up Neon data-processing instructions, ORing in the correct bits for
14428 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
14429
14430 static void
14431 neon_dp_fixup (struct arm_it* insn)
14432 {
14433 unsigned int i = insn->instruction;
14434 insn->is_neon = 1;
14435
14436 if (thumb_mode)
14437 {
14438 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
14439 if (i & (1 << 24))
14440 i |= 1 << 28;
14441
14442 i &= ~(1 << 24);
14443
14444 i |= 0xef000000;
14445 }
14446 else
14447 i |= 0xf2000000;
14448
14449 insn->instruction = i;
14450 }
14451
14452 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
14453 (0, 1, 2, 3). */
14454
14455 static unsigned
14456 neon_logbits (unsigned x)
14457 {
14458 return ffs (x) - 4;
14459 }
14460
14461 #define LOW4(R) ((R) & 0xf)
14462 #define HI1(R) (((R) >> 4) & 1)
14463
14464 /* Encode insns with bit pattern:
14465
14466 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
14467 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
14468
14469 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
14470 different meaning for some instruction. */
14471
14472 static void
14473 neon_three_same (int isquad, int ubit, int size)
14474 {
14475 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14476 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14477 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
14478 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
14479 inst.instruction |= LOW4 (inst.operands[2].reg);
14480 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
14481 inst.instruction |= (isquad != 0) << 6;
14482 inst.instruction |= (ubit != 0) << 24;
14483 if (size != -1)
14484 inst.instruction |= neon_logbits (size) << 20;
14485
14486 neon_dp_fixup (&inst);
14487 }
14488
14489 /* Encode instructions of the form:
14490
14491 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
14492 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
14493
14494 Don't write size if SIZE == -1. */
14495
14496 static void
14497 neon_two_same (int qbit, int ubit, int size)
14498 {
14499 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14500 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14501 inst.instruction |= LOW4 (inst.operands[1].reg);
14502 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14503 inst.instruction |= (qbit != 0) << 6;
14504 inst.instruction |= (ubit != 0) << 24;
14505
14506 if (size != -1)
14507 inst.instruction |= neon_logbits (size) << 18;
14508
14509 neon_dp_fixup (&inst);
14510 }
14511
14512 /* Neon instruction encoders, in approximate order of appearance. */
14513
14514 static void
14515 do_neon_dyadic_i_su (void)
14516 {
14517 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14518 struct neon_type_el et = neon_check_type (3, rs,
14519 N_EQK, N_EQK, N_SU_32 | N_KEY);
14520 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14521 }
14522
14523 static void
14524 do_neon_dyadic_i64_su (void)
14525 {
14526 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14527 struct neon_type_el et = neon_check_type (3, rs,
14528 N_EQK, N_EQK, N_SU_ALL | N_KEY);
14529 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14530 }
14531
14532 static void
14533 neon_imm_shift (int write_ubit, int uval, int isquad, struct neon_type_el et,
14534 unsigned immbits)
14535 {
14536 unsigned size = et.size >> 3;
14537 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14538 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14539 inst.instruction |= LOW4 (inst.operands[1].reg);
14540 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14541 inst.instruction |= (isquad != 0) << 6;
14542 inst.instruction |= immbits << 16;
14543 inst.instruction |= (size >> 3) << 7;
14544 inst.instruction |= (size & 0x7) << 19;
14545 if (write_ubit)
14546 inst.instruction |= (uval != 0) << 24;
14547
14548 neon_dp_fixup (&inst);
14549 }
14550
14551 static void
14552 do_neon_shl_imm (void)
14553 {
14554 if (!inst.operands[2].isreg)
14555 {
14556 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14557 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_KEY | N_I_ALL);
14558 int imm = inst.operands[2].imm;
14559
14560 constraint (imm < 0 || (unsigned)imm >= et.size,
14561 _("immediate out of range for shift"));
14562 NEON_ENCODE (IMMED, inst);
14563 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
14564 }
14565 else
14566 {
14567 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14568 struct neon_type_el et = neon_check_type (3, rs,
14569 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
14570 unsigned int tmp;
14571
14572 /* VSHL/VQSHL 3-register variants have syntax such as:
14573 vshl.xx Dd, Dm, Dn
14574 whereas other 3-register operations encoded by neon_three_same have
14575 syntax like:
14576 vadd.xx Dd, Dn, Dm
14577 (i.e. with Dn & Dm reversed). Swap operands[1].reg and operands[2].reg
14578 here. */
14579 tmp = inst.operands[2].reg;
14580 inst.operands[2].reg = inst.operands[1].reg;
14581 inst.operands[1].reg = tmp;
14582 NEON_ENCODE (INTEGER, inst);
14583 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14584 }
14585 }
14586
14587 static void
14588 do_neon_qshl_imm (void)
14589 {
14590 if (!inst.operands[2].isreg)
14591 {
14592 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14593 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
14594 int imm = inst.operands[2].imm;
14595
14596 constraint (imm < 0 || (unsigned)imm >= et.size,
14597 _("immediate out of range for shift"));
14598 NEON_ENCODE (IMMED, inst);
14599 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et, imm);
14600 }
14601 else
14602 {
14603 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14604 struct neon_type_el et = neon_check_type (3, rs,
14605 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
14606 unsigned int tmp;
14607
14608 /* See note in do_neon_shl_imm. */
14609 tmp = inst.operands[2].reg;
14610 inst.operands[2].reg = inst.operands[1].reg;
14611 inst.operands[1].reg = tmp;
14612 NEON_ENCODE (INTEGER, inst);
14613 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14614 }
14615 }
14616
14617 static void
14618 do_neon_rshl (void)
14619 {
14620 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14621 struct neon_type_el et = neon_check_type (3, rs,
14622 N_EQK, N_EQK, N_SU_ALL | N_KEY);
14623 unsigned int tmp;
14624
14625 tmp = inst.operands[2].reg;
14626 inst.operands[2].reg = inst.operands[1].reg;
14627 inst.operands[1].reg = tmp;
14628 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14629 }
14630
14631 static int
14632 neon_cmode_for_logic_imm (unsigned immediate, unsigned *immbits, int size)
14633 {
14634 /* Handle .I8 pseudo-instructions. */
14635 if (size == 8)
14636 {
14637 /* Unfortunately, this will make everything apart from zero out-of-range.
14638 FIXME is this the intended semantics? There doesn't seem much point in
14639 accepting .I8 if so. */
14640 immediate |= immediate << 8;
14641 size = 16;
14642 }
14643
14644 if (size >= 32)
14645 {
14646 if (immediate == (immediate & 0x000000ff))
14647 {
14648 *immbits = immediate;
14649 return 0x1;
14650 }
14651 else if (immediate == (immediate & 0x0000ff00))
14652 {
14653 *immbits = immediate >> 8;
14654 return 0x3;
14655 }
14656 else if (immediate == (immediate & 0x00ff0000))
14657 {
14658 *immbits = immediate >> 16;
14659 return 0x5;
14660 }
14661 else if (immediate == (immediate & 0xff000000))
14662 {
14663 *immbits = immediate >> 24;
14664 return 0x7;
14665 }
14666 if ((immediate & 0xffff) != (immediate >> 16))
14667 goto bad_immediate;
14668 immediate &= 0xffff;
14669 }
14670
14671 if (immediate == (immediate & 0x000000ff))
14672 {
14673 *immbits = immediate;
14674 return 0x9;
14675 }
14676 else if (immediate == (immediate & 0x0000ff00))
14677 {
14678 *immbits = immediate >> 8;
14679 return 0xb;
14680 }
14681
14682 bad_immediate:
14683 first_error (_("immediate value out of range"));
14684 return FAIL;
14685 }
14686
14687 static void
14688 do_neon_logic (void)
14689 {
14690 if (inst.operands[2].present && inst.operands[2].isreg)
14691 {
14692 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14693 neon_check_type (3, rs, N_IGNORE_TYPE);
14694 /* U bit and size field were set as part of the bitmask. */
14695 NEON_ENCODE (INTEGER, inst);
14696 neon_three_same (neon_quad (rs), 0, -1);
14697 }
14698 else
14699 {
14700 const int three_ops_form = (inst.operands[2].present
14701 && !inst.operands[2].isreg);
14702 const int immoperand = (three_ops_form ? 2 : 1);
14703 enum neon_shape rs = (three_ops_form
14704 ? neon_select_shape (NS_DDI, NS_QQI, NS_NULL)
14705 : neon_select_shape (NS_DI, NS_QI, NS_NULL));
14706 struct neon_type_el et = neon_check_type (2, rs,
14707 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
14708 enum neon_opc opcode = (enum neon_opc) inst.instruction & 0x0fffffff;
14709 unsigned immbits;
14710 int cmode;
14711
14712 if (et.type == NT_invtype)
14713 return;
14714
14715 if (three_ops_form)
14716 constraint (inst.operands[0].reg != inst.operands[1].reg,
14717 _("first and second operands shall be the same register"));
14718
14719 NEON_ENCODE (IMMED, inst);
14720
14721 immbits = inst.operands[immoperand].imm;
14722 if (et.size == 64)
14723 {
14724 /* .i64 is a pseudo-op, so the immediate must be a repeating
14725 pattern. */
14726 if (immbits != (inst.operands[immoperand].regisimm ?
14727 inst.operands[immoperand].reg : 0))
14728 {
14729 /* Set immbits to an invalid constant. */
14730 immbits = 0xdeadbeef;
14731 }
14732 }
14733
14734 switch (opcode)
14735 {
14736 case N_MNEM_vbic:
14737 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14738 break;
14739
14740 case N_MNEM_vorr:
14741 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14742 break;
14743
14744 case N_MNEM_vand:
14745 /* Pseudo-instruction for VBIC. */
14746 neon_invert_size (&immbits, 0, et.size);
14747 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14748 break;
14749
14750 case N_MNEM_vorn:
14751 /* Pseudo-instruction for VORR. */
14752 neon_invert_size (&immbits, 0, et.size);
14753 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14754 break;
14755
14756 default:
14757 abort ();
14758 }
14759
14760 if (cmode == FAIL)
14761 return;
14762
14763 inst.instruction |= neon_quad (rs) << 6;
14764 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14765 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14766 inst.instruction |= cmode << 8;
14767 neon_write_immbits (immbits);
14768
14769 neon_dp_fixup (&inst);
14770 }
14771 }
14772
14773 static void
14774 do_neon_bitfield (void)
14775 {
14776 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14777 neon_check_type (3, rs, N_IGNORE_TYPE);
14778 neon_three_same (neon_quad (rs), 0, -1);
14779 }
14780
14781 static void
14782 neon_dyadic_misc (enum neon_el_type ubit_meaning, unsigned types,
14783 unsigned destbits)
14784 {
14785 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14786 struct neon_type_el et = neon_check_type (3, rs, N_EQK | destbits, N_EQK,
14787 types | N_KEY);
14788 if (et.type == NT_float)
14789 {
14790 NEON_ENCODE (FLOAT, inst);
14791 neon_three_same (neon_quad (rs), 0, et.size == 16 ? (int) et.size : -1);
14792 }
14793 else
14794 {
14795 NEON_ENCODE (INTEGER, inst);
14796 neon_three_same (neon_quad (rs), et.type == ubit_meaning, et.size);
14797 }
14798 }
14799
14800 static void
14801 do_neon_dyadic_if_su (void)
14802 {
14803 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
14804 }
14805
14806 static void
14807 do_neon_dyadic_if_su_d (void)
14808 {
14809 /* This version only allow D registers, but that constraint is enforced during
14810 operand parsing so we don't need to do anything extra here. */
14811 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
14812 }
14813
14814 static void
14815 do_neon_dyadic_if_i_d (void)
14816 {
14817 /* The "untyped" case can't happen. Do this to stop the "U" bit being
14818 affected if we specify unsigned args. */
14819 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
14820 }
14821
14822 enum vfp_or_neon_is_neon_bits
14823 {
14824 NEON_CHECK_CC = 1,
14825 NEON_CHECK_ARCH = 2,
14826 NEON_CHECK_ARCH8 = 4
14827 };
14828
14829 /* Call this function if an instruction which may have belonged to the VFP or
14830 Neon instruction sets, but turned out to be a Neon instruction (due to the
14831 operand types involved, etc.). We have to check and/or fix-up a couple of
14832 things:
14833
14834 - Make sure the user hasn't attempted to make a Neon instruction
14835 conditional.
14836 - Alter the value in the condition code field if necessary.
14837 - Make sure that the arch supports Neon instructions.
14838
14839 Which of these operations take place depends on bits from enum
14840 vfp_or_neon_is_neon_bits.
14841
14842 WARNING: This function has side effects! If NEON_CHECK_CC is used and the
14843 current instruction's condition is COND_ALWAYS, the condition field is
14844 changed to inst.uncond_value. This is necessary because instructions shared
14845 between VFP and Neon may be conditional for the VFP variants only, and the
14846 unconditional Neon version must have, e.g., 0xF in the condition field. */
14847
14848 static int
14849 vfp_or_neon_is_neon (unsigned check)
14850 {
14851 /* Conditions are always legal in Thumb mode (IT blocks). */
14852 if (!thumb_mode && (check & NEON_CHECK_CC))
14853 {
14854 if (inst.cond != COND_ALWAYS)
14855 {
14856 first_error (_(BAD_COND));
14857 return FAIL;
14858 }
14859 if (inst.uncond_value != -1)
14860 inst.instruction |= inst.uncond_value << 28;
14861 }
14862
14863 if ((check & NEON_CHECK_ARCH)
14864 && !mark_feature_used (&fpu_neon_ext_v1))
14865 {
14866 first_error (_(BAD_FPU));
14867 return FAIL;
14868 }
14869
14870 if ((check & NEON_CHECK_ARCH8)
14871 && !mark_feature_used (&fpu_neon_ext_armv8))
14872 {
14873 first_error (_(BAD_FPU));
14874 return FAIL;
14875 }
14876
14877 return SUCCESS;
14878 }
14879
14880 static void
14881 do_neon_addsub_if_i (void)
14882 {
14883 if (try_vfp_nsyn (3, do_vfp_nsyn_add_sub) == SUCCESS)
14884 return;
14885
14886 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14887 return;
14888
14889 /* The "untyped" case can't happen. Do this to stop the "U" bit being
14890 affected if we specify unsigned args. */
14891 neon_dyadic_misc (NT_untyped, N_IF_32 | N_I64, 0);
14892 }
14893
14894 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
14895 result to be:
14896 V<op> A,B (A is operand 0, B is operand 2)
14897 to mean:
14898 V<op> A,B,A
14899 not:
14900 V<op> A,B,B
14901 so handle that case specially. */
14902
14903 static void
14904 neon_exchange_operands (void)
14905 {
14906 if (inst.operands[1].present)
14907 {
14908 void *scratch = xmalloc (sizeof (inst.operands[0]));
14909
14910 /* Swap operands[1] and operands[2]. */
14911 memcpy (scratch, &inst.operands[1], sizeof (inst.operands[0]));
14912 inst.operands[1] = inst.operands[2];
14913 memcpy (&inst.operands[2], scratch, sizeof (inst.operands[0]));
14914 free (scratch);
14915 }
14916 else
14917 {
14918 inst.operands[1] = inst.operands[2];
14919 inst.operands[2] = inst.operands[0];
14920 }
14921 }
14922
14923 static void
14924 neon_compare (unsigned regtypes, unsigned immtypes, int invert)
14925 {
14926 if (inst.operands[2].isreg)
14927 {
14928 if (invert)
14929 neon_exchange_operands ();
14930 neon_dyadic_misc (NT_unsigned, regtypes, N_SIZ);
14931 }
14932 else
14933 {
14934 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14935 struct neon_type_el et = neon_check_type (2, rs,
14936 N_EQK | N_SIZ, immtypes | N_KEY);
14937
14938 NEON_ENCODE (IMMED, inst);
14939 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14940 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14941 inst.instruction |= LOW4 (inst.operands[1].reg);
14942 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14943 inst.instruction |= neon_quad (rs) << 6;
14944 inst.instruction |= (et.type == NT_float) << 10;
14945 inst.instruction |= neon_logbits (et.size) << 18;
14946
14947 neon_dp_fixup (&inst);
14948 }
14949 }
14950
14951 static void
14952 do_neon_cmp (void)
14953 {
14954 neon_compare (N_SUF_32, N_S_32 | N_F_16_32, FALSE);
14955 }
14956
14957 static void
14958 do_neon_cmp_inv (void)
14959 {
14960 neon_compare (N_SUF_32, N_S_32 | N_F_16_32, TRUE);
14961 }
14962
14963 static void
14964 do_neon_ceq (void)
14965 {
14966 neon_compare (N_IF_32, N_IF_32, FALSE);
14967 }
14968
14969 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
14970 scalars, which are encoded in 5 bits, M : Rm.
14971 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
14972 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
14973 index in M. */
14974
14975 static unsigned
14976 neon_scalar_for_mul (unsigned scalar, unsigned elsize)
14977 {
14978 unsigned regno = NEON_SCALAR_REG (scalar);
14979 unsigned elno = NEON_SCALAR_INDEX (scalar);
14980
14981 switch (elsize)
14982 {
14983 case 16:
14984 if (regno > 7 || elno > 3)
14985 goto bad_scalar;
14986 return regno | (elno << 3);
14987
14988 case 32:
14989 if (regno > 15 || elno > 1)
14990 goto bad_scalar;
14991 return regno | (elno << 4);
14992
14993 default:
14994 bad_scalar:
14995 first_error (_("scalar out of range for multiply instruction"));
14996 }
14997
14998 return 0;
14999 }
15000
15001 /* Encode multiply / multiply-accumulate scalar instructions. */
15002
15003 static void
15004 neon_mul_mac (struct neon_type_el et, int ubit)
15005 {
15006 unsigned scalar;
15007
15008 /* Give a more helpful error message if we have an invalid type. */
15009 if (et.type == NT_invtype)
15010 return;
15011
15012 scalar = neon_scalar_for_mul (inst.operands[2].reg, et.size);
15013 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15014 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15015 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
15016 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
15017 inst.instruction |= LOW4 (scalar);
15018 inst.instruction |= HI1 (scalar) << 5;
15019 inst.instruction |= (et.type == NT_float) << 8;
15020 inst.instruction |= neon_logbits (et.size) << 20;
15021 inst.instruction |= (ubit != 0) << 24;
15022
15023 neon_dp_fixup (&inst);
15024 }
15025
15026 static void
15027 do_neon_mac_maybe_scalar (void)
15028 {
15029 if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls) == SUCCESS)
15030 return;
15031
15032 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15033 return;
15034
15035 if (inst.operands[2].isscalar)
15036 {
15037 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
15038 struct neon_type_el et = neon_check_type (3, rs,
15039 N_EQK, N_EQK, N_I16 | N_I32 | N_F_16_32 | N_KEY);
15040 NEON_ENCODE (SCALAR, inst);
15041 neon_mul_mac (et, neon_quad (rs));
15042 }
15043 else
15044 {
15045 /* The "untyped" case can't happen. Do this to stop the "U" bit being
15046 affected if we specify unsigned args. */
15047 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
15048 }
15049 }
15050
15051 static void
15052 do_neon_fmac (void)
15053 {
15054 if (try_vfp_nsyn (3, do_vfp_nsyn_fma_fms) == SUCCESS)
15055 return;
15056
15057 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15058 return;
15059
15060 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
15061 }
15062
15063 static void
15064 do_neon_tst (void)
15065 {
15066 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15067 struct neon_type_el et = neon_check_type (3, rs,
15068 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_KEY);
15069 neon_three_same (neon_quad (rs), 0, et.size);
15070 }
15071
15072 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
15073 same types as the MAC equivalents. The polynomial type for this instruction
15074 is encoded the same as the integer type. */
15075
15076 static void
15077 do_neon_mul (void)
15078 {
15079 if (try_vfp_nsyn (3, do_vfp_nsyn_mul) == SUCCESS)
15080 return;
15081
15082 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15083 return;
15084
15085 if (inst.operands[2].isscalar)
15086 do_neon_mac_maybe_scalar ();
15087 else
15088 neon_dyadic_misc (NT_poly, N_I8 | N_I16 | N_I32 | N_F16 | N_F32 | N_P8, 0);
15089 }
15090
15091 static void
15092 do_neon_qdmulh (void)
15093 {
15094 if (inst.operands[2].isscalar)
15095 {
15096 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
15097 struct neon_type_el et = neon_check_type (3, rs,
15098 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
15099 NEON_ENCODE (SCALAR, inst);
15100 neon_mul_mac (et, neon_quad (rs));
15101 }
15102 else
15103 {
15104 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15105 struct neon_type_el et = neon_check_type (3, rs,
15106 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
15107 NEON_ENCODE (INTEGER, inst);
15108 /* The U bit (rounding) comes from bit mask. */
15109 neon_three_same (neon_quad (rs), 0, et.size);
15110 }
15111 }
15112
15113 static void
15114 do_neon_qrdmlah (void)
15115 {
15116 /* Check we're on the correct architecture. */
15117 if (!mark_feature_used (&fpu_neon_ext_armv8))
15118 inst.error =
15119 _("instruction form not available on this architecture.");
15120 else if (!mark_feature_used (&fpu_neon_ext_v8_1))
15121 {
15122 as_warn (_("this instruction implies use of ARMv8.1 AdvSIMD."));
15123 record_feature_use (&fpu_neon_ext_v8_1);
15124 }
15125
15126 if (inst.operands[2].isscalar)
15127 {
15128 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
15129 struct neon_type_el et = neon_check_type (3, rs,
15130 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
15131 NEON_ENCODE (SCALAR, inst);
15132 neon_mul_mac (et, neon_quad (rs));
15133 }
15134 else
15135 {
15136 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15137 struct neon_type_el et = neon_check_type (3, rs,
15138 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
15139 NEON_ENCODE (INTEGER, inst);
15140 /* The U bit (rounding) comes from bit mask. */
15141 neon_three_same (neon_quad (rs), 0, et.size);
15142 }
15143 }
15144
15145 static void
15146 do_neon_fcmp_absolute (void)
15147 {
15148 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15149 struct neon_type_el et = neon_check_type (3, rs, N_EQK, N_EQK,
15150 N_F_16_32 | N_KEY);
15151 /* Size field comes from bit mask. */
15152 neon_three_same (neon_quad (rs), 1, et.size == 16 ? (int) et.size : -1);
15153 }
15154
15155 static void
15156 do_neon_fcmp_absolute_inv (void)
15157 {
15158 neon_exchange_operands ();
15159 do_neon_fcmp_absolute ();
15160 }
15161
15162 static void
15163 do_neon_step (void)
15164 {
15165 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15166 struct neon_type_el et = neon_check_type (3, rs, N_EQK, N_EQK,
15167 N_F_16_32 | N_KEY);
15168 neon_three_same (neon_quad (rs), 0, et.size == 16 ? (int) et.size : -1);
15169 }
15170
15171 static void
15172 do_neon_abs_neg (void)
15173 {
15174 enum neon_shape rs;
15175 struct neon_type_el et;
15176
15177 if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg) == SUCCESS)
15178 return;
15179
15180 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15181 return;
15182
15183 rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15184 et = neon_check_type (2, rs, N_EQK, N_S_32 | N_F_16_32 | N_KEY);
15185
15186 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15187 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15188 inst.instruction |= LOW4 (inst.operands[1].reg);
15189 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15190 inst.instruction |= neon_quad (rs) << 6;
15191 inst.instruction |= (et.type == NT_float) << 10;
15192 inst.instruction |= neon_logbits (et.size) << 18;
15193
15194 neon_dp_fixup (&inst);
15195 }
15196
15197 static void
15198 do_neon_sli (void)
15199 {
15200 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
15201 struct neon_type_el et = neon_check_type (2, rs,
15202 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
15203 int imm = inst.operands[2].imm;
15204 constraint (imm < 0 || (unsigned)imm >= et.size,
15205 _("immediate out of range for insert"));
15206 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
15207 }
15208
15209 static void
15210 do_neon_sri (void)
15211 {
15212 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
15213 struct neon_type_el et = neon_check_type (2, rs,
15214 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
15215 int imm = inst.operands[2].imm;
15216 constraint (imm < 1 || (unsigned)imm > et.size,
15217 _("immediate out of range for insert"));
15218 neon_imm_shift (FALSE, 0, neon_quad (rs), et, et.size - imm);
15219 }
15220
15221 static void
15222 do_neon_qshlu_imm (void)
15223 {
15224 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
15225 struct neon_type_el et = neon_check_type (2, rs,
15226 N_EQK | N_UNS, N_S8 | N_S16 | N_S32 | N_S64 | N_KEY);
15227 int imm = inst.operands[2].imm;
15228 constraint (imm < 0 || (unsigned)imm >= et.size,
15229 _("immediate out of range for shift"));
15230 /* Only encodes the 'U present' variant of the instruction.
15231 In this case, signed types have OP (bit 8) set to 0.
15232 Unsigned types have OP set to 1. */
15233 inst.instruction |= (et.type == NT_unsigned) << 8;
15234 /* The rest of the bits are the same as other immediate shifts. */
15235 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
15236 }
15237
15238 static void
15239 do_neon_qmovn (void)
15240 {
15241 struct neon_type_el et = neon_check_type (2, NS_DQ,
15242 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
15243 /* Saturating move where operands can be signed or unsigned, and the
15244 destination has the same signedness. */
15245 NEON_ENCODE (INTEGER, inst);
15246 if (et.type == NT_unsigned)
15247 inst.instruction |= 0xc0;
15248 else
15249 inst.instruction |= 0x80;
15250 neon_two_same (0, 1, et.size / 2);
15251 }
15252
15253 static void
15254 do_neon_qmovun (void)
15255 {
15256 struct neon_type_el et = neon_check_type (2, NS_DQ,
15257 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
15258 /* Saturating move with unsigned results. Operands must be signed. */
15259 NEON_ENCODE (INTEGER, inst);
15260 neon_two_same (0, 1, et.size / 2);
15261 }
15262
15263 static void
15264 do_neon_rshift_sat_narrow (void)
15265 {
15266 /* FIXME: Types for narrowing. If operands are signed, results can be signed
15267 or unsigned. If operands are unsigned, results must also be unsigned. */
15268 struct neon_type_el et = neon_check_type (2, NS_DQI,
15269 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
15270 int imm = inst.operands[2].imm;
15271 /* This gets the bounds check, size encoding and immediate bits calculation
15272 right. */
15273 et.size /= 2;
15274
15275 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
15276 VQMOVN.I<size> <Dd>, <Qm>. */
15277 if (imm == 0)
15278 {
15279 inst.operands[2].present = 0;
15280 inst.instruction = N_MNEM_vqmovn;
15281 do_neon_qmovn ();
15282 return;
15283 }
15284
15285 constraint (imm < 1 || (unsigned)imm > et.size,
15286 _("immediate out of range"));
15287 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, et.size - imm);
15288 }
15289
15290 static void
15291 do_neon_rshift_sat_narrow_u (void)
15292 {
15293 /* FIXME: Types for narrowing. If operands are signed, results can be signed
15294 or unsigned. If operands are unsigned, results must also be unsigned. */
15295 struct neon_type_el et = neon_check_type (2, NS_DQI,
15296 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
15297 int imm = inst.operands[2].imm;
15298 /* This gets the bounds check, size encoding and immediate bits calculation
15299 right. */
15300 et.size /= 2;
15301
15302 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
15303 VQMOVUN.I<size> <Dd>, <Qm>. */
15304 if (imm == 0)
15305 {
15306 inst.operands[2].present = 0;
15307 inst.instruction = N_MNEM_vqmovun;
15308 do_neon_qmovun ();
15309 return;
15310 }
15311
15312 constraint (imm < 1 || (unsigned)imm > et.size,
15313 _("immediate out of range"));
15314 /* FIXME: The manual is kind of unclear about what value U should have in
15315 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
15316 must be 1. */
15317 neon_imm_shift (TRUE, 1, 0, et, et.size - imm);
15318 }
15319
15320 static void
15321 do_neon_movn (void)
15322 {
15323 struct neon_type_el et = neon_check_type (2, NS_DQ,
15324 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
15325 NEON_ENCODE (INTEGER, inst);
15326 neon_two_same (0, 1, et.size / 2);
15327 }
15328
15329 static void
15330 do_neon_rshift_narrow (void)
15331 {
15332 struct neon_type_el et = neon_check_type (2, NS_DQI,
15333 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
15334 int imm = inst.operands[2].imm;
15335 /* This gets the bounds check, size encoding and immediate bits calculation
15336 right. */
15337 et.size /= 2;
15338
15339 /* If immediate is zero then we are a pseudo-instruction for
15340 VMOVN.I<size> <Dd>, <Qm> */
15341 if (imm == 0)
15342 {
15343 inst.operands[2].present = 0;
15344 inst.instruction = N_MNEM_vmovn;
15345 do_neon_movn ();
15346 return;
15347 }
15348
15349 constraint (imm < 1 || (unsigned)imm > et.size,
15350 _("immediate out of range for narrowing operation"));
15351 neon_imm_shift (FALSE, 0, 0, et, et.size - imm);
15352 }
15353
15354 static void
15355 do_neon_shll (void)
15356 {
15357 /* FIXME: Type checking when lengthening. */
15358 struct neon_type_el et = neon_check_type (2, NS_QDI,
15359 N_EQK | N_DBL, N_I8 | N_I16 | N_I32 | N_KEY);
15360 unsigned imm = inst.operands[2].imm;
15361
15362 if (imm == et.size)
15363 {
15364 /* Maximum shift variant. */
15365 NEON_ENCODE (INTEGER, inst);
15366 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15367 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15368 inst.instruction |= LOW4 (inst.operands[1].reg);
15369 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15370 inst.instruction |= neon_logbits (et.size) << 18;
15371
15372 neon_dp_fixup (&inst);
15373 }
15374 else
15375 {
15376 /* A more-specific type check for non-max versions. */
15377 et = neon_check_type (2, NS_QDI,
15378 N_EQK | N_DBL, N_SU_32 | N_KEY);
15379 NEON_ENCODE (IMMED, inst);
15380 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, imm);
15381 }
15382 }
15383
15384 /* Check the various types for the VCVT instruction, and return which version
15385 the current instruction is. */
15386
15387 #define CVT_FLAVOUR_VAR \
15388 CVT_VAR (s32_f32, N_S32, N_F32, whole_reg, "ftosls", "ftosis", "ftosizs") \
15389 CVT_VAR (u32_f32, N_U32, N_F32, whole_reg, "ftouls", "ftouis", "ftouizs") \
15390 CVT_VAR (f32_s32, N_F32, N_S32, whole_reg, "fsltos", "fsitos", NULL) \
15391 CVT_VAR (f32_u32, N_F32, N_U32, whole_reg, "fultos", "fuitos", NULL) \
15392 /* Half-precision conversions. */ \
15393 CVT_VAR (s16_f16, N_S16, N_F16 | N_KEY, whole_reg, NULL, NULL, NULL) \
15394 CVT_VAR (u16_f16, N_U16, N_F16 | N_KEY, whole_reg, NULL, NULL, NULL) \
15395 CVT_VAR (f16_s16, N_F16 | N_KEY, N_S16, whole_reg, NULL, NULL, NULL) \
15396 CVT_VAR (f16_u16, N_F16 | N_KEY, N_U16, whole_reg, NULL, NULL, NULL) \
15397 CVT_VAR (f32_f16, N_F32, N_F16, whole_reg, NULL, NULL, NULL) \
15398 CVT_VAR (f16_f32, N_F16, N_F32, whole_reg, NULL, NULL, NULL) \
15399 /* New VCVT instructions introduced by ARMv8.2 fp16 extension. \
15400 Compared with single/double precision variants, only the co-processor \
15401 field is different, so the encoding flow is reused here. */ \
15402 CVT_VAR (f16_s32, N_F16 | N_KEY, N_S32, N_VFP, "fsltos", "fsitos", NULL) \
15403 CVT_VAR (f16_u32, N_F16 | N_KEY, N_U32, N_VFP, "fultos", "fuitos", NULL) \
15404 CVT_VAR (u32_f16, N_U32, N_F16 | N_KEY, N_VFP, "ftouls", "ftouis", "ftouizs")\
15405 CVT_VAR (s32_f16, N_S32, N_F16 | N_KEY, N_VFP, "ftosls", "ftosis", "ftosizs")\
15406 /* VFP instructions. */ \
15407 CVT_VAR (f32_f64, N_F32, N_F64, N_VFP, NULL, "fcvtsd", NULL) \
15408 CVT_VAR (f64_f32, N_F64, N_F32, N_VFP, NULL, "fcvtds", NULL) \
15409 CVT_VAR (s32_f64, N_S32, N_F64 | key, N_VFP, "ftosld", "ftosid", "ftosizd") \
15410 CVT_VAR (u32_f64, N_U32, N_F64 | key, N_VFP, "ftould", "ftouid", "ftouizd") \
15411 CVT_VAR (f64_s32, N_F64 | key, N_S32, N_VFP, "fsltod", "fsitod", NULL) \
15412 CVT_VAR (f64_u32, N_F64 | key, N_U32, N_VFP, "fultod", "fuitod", NULL) \
15413 /* VFP instructions with bitshift. */ \
15414 CVT_VAR (f32_s16, N_F32 | key, N_S16, N_VFP, "fshtos", NULL, NULL) \
15415 CVT_VAR (f32_u16, N_F32 | key, N_U16, N_VFP, "fuhtos", NULL, NULL) \
15416 CVT_VAR (f64_s16, N_F64 | key, N_S16, N_VFP, "fshtod", NULL, NULL) \
15417 CVT_VAR (f64_u16, N_F64 | key, N_U16, N_VFP, "fuhtod", NULL, NULL) \
15418 CVT_VAR (s16_f32, N_S16, N_F32 | key, N_VFP, "ftoshs", NULL, NULL) \
15419 CVT_VAR (u16_f32, N_U16, N_F32 | key, N_VFP, "ftouhs", NULL, NULL) \
15420 CVT_VAR (s16_f64, N_S16, N_F64 | key, N_VFP, "ftoshd", NULL, NULL) \
15421 CVT_VAR (u16_f64, N_U16, N_F64 | key, N_VFP, "ftouhd", NULL, NULL)
15422
15423 #define CVT_VAR(C, X, Y, R, BSN, CN, ZN) \
15424 neon_cvt_flavour_##C,
15425
15426 /* The different types of conversions we can do. */
15427 enum neon_cvt_flavour
15428 {
15429 CVT_FLAVOUR_VAR
15430 neon_cvt_flavour_invalid,
15431 neon_cvt_flavour_first_fp = neon_cvt_flavour_f32_f64
15432 };
15433
15434 #undef CVT_VAR
15435
15436 static enum neon_cvt_flavour
15437 get_neon_cvt_flavour (enum neon_shape rs)
15438 {
15439 #define CVT_VAR(C,X,Y,R,BSN,CN,ZN) \
15440 et = neon_check_type (2, rs, (R) | (X), (R) | (Y)); \
15441 if (et.type != NT_invtype) \
15442 { \
15443 inst.error = NULL; \
15444 return (neon_cvt_flavour_##C); \
15445 }
15446
15447 struct neon_type_el et;
15448 unsigned whole_reg = (rs == NS_FFI || rs == NS_FD || rs == NS_DF
15449 || rs == NS_FF) ? N_VFP : 0;
15450 /* The instruction versions which take an immediate take one register
15451 argument, which is extended to the width of the full register. Thus the
15452 "source" and "destination" registers must have the same width. Hack that
15453 here by making the size equal to the key (wider, in this case) operand. */
15454 unsigned key = (rs == NS_QQI || rs == NS_DDI || rs == NS_FFI) ? N_KEY : 0;
15455
15456 CVT_FLAVOUR_VAR;
15457
15458 return neon_cvt_flavour_invalid;
15459 #undef CVT_VAR
15460 }
15461
15462 enum neon_cvt_mode
15463 {
15464 neon_cvt_mode_a,
15465 neon_cvt_mode_n,
15466 neon_cvt_mode_p,
15467 neon_cvt_mode_m,
15468 neon_cvt_mode_z,
15469 neon_cvt_mode_x,
15470 neon_cvt_mode_r
15471 };
15472
15473 /* Neon-syntax VFP conversions. */
15474
15475 static void
15476 do_vfp_nsyn_cvt (enum neon_shape rs, enum neon_cvt_flavour flavour)
15477 {
15478 const char *opname = 0;
15479
15480 if (rs == NS_DDI || rs == NS_QQI || rs == NS_FFI
15481 || rs == NS_FHI || rs == NS_HFI)
15482 {
15483 /* Conversions with immediate bitshift. */
15484 const char *enc[] =
15485 {
15486 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) BSN,
15487 CVT_FLAVOUR_VAR
15488 NULL
15489 #undef CVT_VAR
15490 };
15491
15492 if (flavour < (int) ARRAY_SIZE (enc))
15493 {
15494 opname = enc[flavour];
15495 constraint (inst.operands[0].reg != inst.operands[1].reg,
15496 _("operands 0 and 1 must be the same register"));
15497 inst.operands[1] = inst.operands[2];
15498 memset (&inst.operands[2], '\0', sizeof (inst.operands[2]));
15499 }
15500 }
15501 else
15502 {
15503 /* Conversions without bitshift. */
15504 const char *enc[] =
15505 {
15506 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) CN,
15507 CVT_FLAVOUR_VAR
15508 NULL
15509 #undef CVT_VAR
15510 };
15511
15512 if (flavour < (int) ARRAY_SIZE (enc))
15513 opname = enc[flavour];
15514 }
15515
15516 if (opname)
15517 do_vfp_nsyn_opcode (opname);
15518
15519 /* ARMv8.2 fp16 VCVT instruction. */
15520 if (flavour == neon_cvt_flavour_s32_f16
15521 || flavour == neon_cvt_flavour_u32_f16
15522 || flavour == neon_cvt_flavour_f16_u32
15523 || flavour == neon_cvt_flavour_f16_s32)
15524 do_scalar_fp16_v82_encode ();
15525 }
15526
15527 static void
15528 do_vfp_nsyn_cvtz (void)
15529 {
15530 enum neon_shape rs = neon_select_shape (NS_FH, NS_FF, NS_FD, NS_NULL);
15531 enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
15532 const char *enc[] =
15533 {
15534 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) ZN,
15535 CVT_FLAVOUR_VAR
15536 NULL
15537 #undef CVT_VAR
15538 };
15539
15540 if (flavour < (int) ARRAY_SIZE (enc) && enc[flavour])
15541 do_vfp_nsyn_opcode (enc[flavour]);
15542 }
15543
15544 static void
15545 do_vfp_nsyn_cvt_fpv8 (enum neon_cvt_flavour flavour,
15546 enum neon_cvt_mode mode)
15547 {
15548 int sz, op;
15549 int rm;
15550
15551 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
15552 D register operands. */
15553 if (flavour == neon_cvt_flavour_s32_f64
15554 || flavour == neon_cvt_flavour_u32_f64)
15555 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
15556 _(BAD_FPU));
15557
15558 if (flavour == neon_cvt_flavour_s32_f16
15559 || flavour == neon_cvt_flavour_u32_f16)
15560 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16),
15561 _(BAD_FP16));
15562
15563 set_it_insn_type (OUTSIDE_IT_INSN);
15564
15565 switch (flavour)
15566 {
15567 case neon_cvt_flavour_s32_f64:
15568 sz = 1;
15569 op = 1;
15570 break;
15571 case neon_cvt_flavour_s32_f32:
15572 sz = 0;
15573 op = 1;
15574 break;
15575 case neon_cvt_flavour_s32_f16:
15576 sz = 0;
15577 op = 1;
15578 break;
15579 case neon_cvt_flavour_u32_f64:
15580 sz = 1;
15581 op = 0;
15582 break;
15583 case neon_cvt_flavour_u32_f32:
15584 sz = 0;
15585 op = 0;
15586 break;
15587 case neon_cvt_flavour_u32_f16:
15588 sz = 0;
15589 op = 0;
15590 break;
15591 default:
15592 first_error (_("invalid instruction shape"));
15593 return;
15594 }
15595
15596 switch (mode)
15597 {
15598 case neon_cvt_mode_a: rm = 0; break;
15599 case neon_cvt_mode_n: rm = 1; break;
15600 case neon_cvt_mode_p: rm = 2; break;
15601 case neon_cvt_mode_m: rm = 3; break;
15602 default: first_error (_("invalid rounding mode")); return;
15603 }
15604
15605 NEON_ENCODE (FPV8, inst);
15606 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
15607 encode_arm_vfp_reg (inst.operands[1].reg, sz == 1 ? VFP_REG_Dm : VFP_REG_Sm);
15608 inst.instruction |= sz << 8;
15609
15610 /* ARMv8.2 fp16 VCVT instruction. */
15611 if (flavour == neon_cvt_flavour_s32_f16
15612 ||flavour == neon_cvt_flavour_u32_f16)
15613 do_scalar_fp16_v82_encode ();
15614 inst.instruction |= op << 7;
15615 inst.instruction |= rm << 16;
15616 inst.instruction |= 0xf0000000;
15617 inst.is_neon = TRUE;
15618 }
15619
15620 static void
15621 do_neon_cvt_1 (enum neon_cvt_mode mode)
15622 {
15623 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_FFI, NS_DD, NS_QQ,
15624 NS_FD, NS_DF, NS_FF, NS_QD, NS_DQ,
15625 NS_FH, NS_HF, NS_FHI, NS_HFI,
15626 NS_NULL);
15627 enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
15628
15629 if (flavour == neon_cvt_flavour_invalid)
15630 return;
15631
15632 /* PR11109: Handle round-to-zero for VCVT conversions. */
15633 if (mode == neon_cvt_mode_z
15634 && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_vfp_v2)
15635 && (flavour == neon_cvt_flavour_s16_f16
15636 || flavour == neon_cvt_flavour_u16_f16
15637 || flavour == neon_cvt_flavour_s32_f32
15638 || flavour == neon_cvt_flavour_u32_f32
15639 || flavour == neon_cvt_flavour_s32_f64
15640 || flavour == neon_cvt_flavour_u32_f64)
15641 && (rs == NS_FD || rs == NS_FF))
15642 {
15643 do_vfp_nsyn_cvtz ();
15644 return;
15645 }
15646
15647 /* ARMv8.2 fp16 VCVT conversions. */
15648 if (mode == neon_cvt_mode_z
15649 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16)
15650 && (flavour == neon_cvt_flavour_s32_f16
15651 || flavour == neon_cvt_flavour_u32_f16)
15652 && (rs == NS_FH))
15653 {
15654 do_vfp_nsyn_cvtz ();
15655 do_scalar_fp16_v82_encode ();
15656 return;
15657 }
15658
15659 /* VFP rather than Neon conversions. */
15660 if (flavour >= neon_cvt_flavour_first_fp)
15661 {
15662 if (mode == neon_cvt_mode_x || mode == neon_cvt_mode_z)
15663 do_vfp_nsyn_cvt (rs, flavour);
15664 else
15665 do_vfp_nsyn_cvt_fpv8 (flavour, mode);
15666
15667 return;
15668 }
15669
15670 switch (rs)
15671 {
15672 case NS_DDI:
15673 case NS_QQI:
15674 {
15675 unsigned immbits;
15676 unsigned enctab[] = {0x0000100, 0x1000100, 0x0, 0x1000000,
15677 0x0000100, 0x1000100, 0x0, 0x1000000};
15678
15679 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15680 return;
15681
15682 /* Fixed-point conversion with #0 immediate is encoded as an
15683 integer conversion. */
15684 if (inst.operands[2].present && inst.operands[2].imm == 0)
15685 goto int_encode;
15686 NEON_ENCODE (IMMED, inst);
15687 if (flavour != neon_cvt_flavour_invalid)
15688 inst.instruction |= enctab[flavour];
15689 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15690 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15691 inst.instruction |= LOW4 (inst.operands[1].reg);
15692 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15693 inst.instruction |= neon_quad (rs) << 6;
15694 inst.instruction |= 1 << 21;
15695 if (flavour < neon_cvt_flavour_s16_f16)
15696 {
15697 inst.instruction |= 1 << 21;
15698 immbits = 32 - inst.operands[2].imm;
15699 inst.instruction |= immbits << 16;
15700 }
15701 else
15702 {
15703 inst.instruction |= 3 << 20;
15704 immbits = 16 - inst.operands[2].imm;
15705 inst.instruction |= immbits << 16;
15706 inst.instruction &= ~(1 << 9);
15707 }
15708
15709 neon_dp_fixup (&inst);
15710 }
15711 break;
15712
15713 case NS_DD:
15714 case NS_QQ:
15715 if (mode != neon_cvt_mode_x && mode != neon_cvt_mode_z)
15716 {
15717 NEON_ENCODE (FLOAT, inst);
15718 set_it_insn_type (OUTSIDE_IT_INSN);
15719
15720 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
15721 return;
15722
15723 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15724 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15725 inst.instruction |= LOW4 (inst.operands[1].reg);
15726 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15727 inst.instruction |= neon_quad (rs) << 6;
15728 inst.instruction |= (flavour == neon_cvt_flavour_u16_f16
15729 || flavour == neon_cvt_flavour_u32_f32) << 7;
15730 inst.instruction |= mode << 8;
15731 if (flavour == neon_cvt_flavour_u16_f16
15732 || flavour == neon_cvt_flavour_s16_f16)
15733 /* Mask off the original size bits and reencode them. */
15734 inst.instruction = ((inst.instruction & 0xfff3ffff) | (1 << 18));
15735
15736 if (thumb_mode)
15737 inst.instruction |= 0xfc000000;
15738 else
15739 inst.instruction |= 0xf0000000;
15740 }
15741 else
15742 {
15743 int_encode:
15744 {
15745 unsigned enctab[] = { 0x100, 0x180, 0x0, 0x080,
15746 0x100, 0x180, 0x0, 0x080};
15747
15748 NEON_ENCODE (INTEGER, inst);
15749
15750 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15751 return;
15752
15753 if (flavour != neon_cvt_flavour_invalid)
15754 inst.instruction |= enctab[flavour];
15755
15756 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15757 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15758 inst.instruction |= LOW4 (inst.operands[1].reg);
15759 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15760 inst.instruction |= neon_quad (rs) << 6;
15761 if (flavour >= neon_cvt_flavour_s16_f16
15762 && flavour <= neon_cvt_flavour_f16_u16)
15763 /* Half precision. */
15764 inst.instruction |= 1 << 18;
15765 else
15766 inst.instruction |= 2 << 18;
15767
15768 neon_dp_fixup (&inst);
15769 }
15770 }
15771 break;
15772
15773 /* Half-precision conversions for Advanced SIMD -- neon. */
15774 case NS_QD:
15775 case NS_DQ:
15776
15777 if ((rs == NS_DQ)
15778 && (inst.vectype.el[0].size != 16 || inst.vectype.el[1].size != 32))
15779 {
15780 as_bad (_("operand size must match register width"));
15781 break;
15782 }
15783
15784 if ((rs == NS_QD)
15785 && ((inst.vectype.el[0].size != 32 || inst.vectype.el[1].size != 16)))
15786 {
15787 as_bad (_("operand size must match register width"));
15788 break;
15789 }
15790
15791 if (rs == NS_DQ)
15792 inst.instruction = 0x3b60600;
15793 else
15794 inst.instruction = 0x3b60700;
15795
15796 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15797 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15798 inst.instruction |= LOW4 (inst.operands[1].reg);
15799 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15800 neon_dp_fixup (&inst);
15801 break;
15802
15803 default:
15804 /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32). */
15805 if (mode == neon_cvt_mode_x || mode == neon_cvt_mode_z)
15806 do_vfp_nsyn_cvt (rs, flavour);
15807 else
15808 do_vfp_nsyn_cvt_fpv8 (flavour, mode);
15809 }
15810 }
15811
15812 static void
15813 do_neon_cvtr (void)
15814 {
15815 do_neon_cvt_1 (neon_cvt_mode_x);
15816 }
15817
15818 static void
15819 do_neon_cvt (void)
15820 {
15821 do_neon_cvt_1 (neon_cvt_mode_z);
15822 }
15823
15824 static void
15825 do_neon_cvta (void)
15826 {
15827 do_neon_cvt_1 (neon_cvt_mode_a);
15828 }
15829
15830 static void
15831 do_neon_cvtn (void)
15832 {
15833 do_neon_cvt_1 (neon_cvt_mode_n);
15834 }
15835
15836 static void
15837 do_neon_cvtp (void)
15838 {
15839 do_neon_cvt_1 (neon_cvt_mode_p);
15840 }
15841
15842 static void
15843 do_neon_cvtm (void)
15844 {
15845 do_neon_cvt_1 (neon_cvt_mode_m);
15846 }
15847
15848 static void
15849 do_neon_cvttb_2 (bfd_boolean t, bfd_boolean to, bfd_boolean is_double)
15850 {
15851 if (is_double)
15852 mark_feature_used (&fpu_vfp_ext_armv8);
15853
15854 encode_arm_vfp_reg (inst.operands[0].reg,
15855 (is_double && !to) ? VFP_REG_Dd : VFP_REG_Sd);
15856 encode_arm_vfp_reg (inst.operands[1].reg,
15857 (is_double && to) ? VFP_REG_Dm : VFP_REG_Sm);
15858 inst.instruction |= to ? 0x10000 : 0;
15859 inst.instruction |= t ? 0x80 : 0;
15860 inst.instruction |= is_double ? 0x100 : 0;
15861 do_vfp_cond_or_thumb ();
15862 }
15863
15864 static void
15865 do_neon_cvttb_1 (bfd_boolean t)
15866 {
15867 enum neon_shape rs = neon_select_shape (NS_HF, NS_HD, NS_FH, NS_FF, NS_FD,
15868 NS_DF, NS_DH, NS_NULL);
15869
15870 if (rs == NS_NULL)
15871 return;
15872 else if (neon_check_type (2, rs, N_F16, N_F32 | N_VFP).type != NT_invtype)
15873 {
15874 inst.error = NULL;
15875 do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/FALSE);
15876 }
15877 else if (neon_check_type (2, rs, N_F32 | N_VFP, N_F16).type != NT_invtype)
15878 {
15879 inst.error = NULL;
15880 do_neon_cvttb_2 (t, /*to=*/FALSE, /*is_double=*/FALSE);
15881 }
15882 else if (neon_check_type (2, rs, N_F16, N_F64 | N_VFP).type != NT_invtype)
15883 {
15884 /* The VCVTB and VCVTT instructions with D-register operands
15885 don't work for SP only targets. */
15886 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
15887 _(BAD_FPU));
15888
15889 inst.error = NULL;
15890 do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/TRUE);
15891 }
15892 else if (neon_check_type (2, rs, N_F64 | N_VFP, N_F16).type != NT_invtype)
15893 {
15894 /* The VCVTB and VCVTT instructions with D-register operands
15895 don't work for SP only targets. */
15896 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
15897 _(BAD_FPU));
15898
15899 inst.error = NULL;
15900 do_neon_cvttb_2 (t, /*to=*/FALSE, /*is_double=*/TRUE);
15901 }
15902 else
15903 return;
15904 }
15905
15906 static void
15907 do_neon_cvtb (void)
15908 {
15909 do_neon_cvttb_1 (FALSE);
15910 }
15911
15912
15913 static void
15914 do_neon_cvtt (void)
15915 {
15916 do_neon_cvttb_1 (TRUE);
15917 }
15918
15919 static void
15920 neon_move_immediate (void)
15921 {
15922 enum neon_shape rs = neon_select_shape (NS_DI, NS_QI, NS_NULL);
15923 struct neon_type_el et = neon_check_type (2, rs,
15924 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
15925 unsigned immlo, immhi = 0, immbits;
15926 int op, cmode, float_p;
15927
15928 constraint (et.type == NT_invtype,
15929 _("operand size must be specified for immediate VMOV"));
15930
15931 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
15932 op = (inst.instruction & (1 << 5)) != 0;
15933
15934 immlo = inst.operands[1].imm;
15935 if (inst.operands[1].regisimm)
15936 immhi = inst.operands[1].reg;
15937
15938 constraint (et.size < 32 && (immlo & ~((1 << et.size) - 1)) != 0,
15939 _("immediate has bits set outside the operand size"));
15940
15941 float_p = inst.operands[1].immisfloat;
15942
15943 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits, &op,
15944 et.size, et.type)) == FAIL)
15945 {
15946 /* Invert relevant bits only. */
15947 neon_invert_size (&immlo, &immhi, et.size);
15948 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
15949 with one or the other; those cases are caught by
15950 neon_cmode_for_move_imm. */
15951 op = !op;
15952 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits,
15953 &op, et.size, et.type)) == FAIL)
15954 {
15955 first_error (_("immediate out of range"));
15956 return;
15957 }
15958 }
15959
15960 inst.instruction &= ~(1 << 5);
15961 inst.instruction |= op << 5;
15962
15963 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15964 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15965 inst.instruction |= neon_quad (rs) << 6;
15966 inst.instruction |= cmode << 8;
15967
15968 neon_write_immbits (immbits);
15969 }
15970
15971 static void
15972 do_neon_mvn (void)
15973 {
15974 if (inst.operands[1].isreg)
15975 {
15976 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15977
15978 NEON_ENCODE (INTEGER, inst);
15979 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15980 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15981 inst.instruction |= LOW4 (inst.operands[1].reg);
15982 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15983 inst.instruction |= neon_quad (rs) << 6;
15984 }
15985 else
15986 {
15987 NEON_ENCODE (IMMED, inst);
15988 neon_move_immediate ();
15989 }
15990
15991 neon_dp_fixup (&inst);
15992 }
15993
15994 /* Encode instructions of form:
15995
15996 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
15997 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm | */
15998
15999 static void
16000 neon_mixed_length (struct neon_type_el et, unsigned size)
16001 {
16002 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16003 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16004 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16005 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16006 inst.instruction |= LOW4 (inst.operands[2].reg);
16007 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16008 inst.instruction |= (et.type == NT_unsigned) << 24;
16009 inst.instruction |= neon_logbits (size) << 20;
16010
16011 neon_dp_fixup (&inst);
16012 }
16013
16014 static void
16015 do_neon_dyadic_long (void)
16016 {
16017 /* FIXME: Type checking for lengthening op. */
16018 struct neon_type_el et = neon_check_type (3, NS_QDD,
16019 N_EQK | N_DBL, N_EQK, N_SU_32 | N_KEY);
16020 neon_mixed_length (et, et.size);
16021 }
16022
16023 static void
16024 do_neon_abal (void)
16025 {
16026 struct neon_type_el et = neon_check_type (3, NS_QDD,
16027 N_EQK | N_INT | N_DBL, N_EQK, N_SU_32 | N_KEY);
16028 neon_mixed_length (et, et.size);
16029 }
16030
16031 static void
16032 neon_mac_reg_scalar_long (unsigned regtypes, unsigned scalartypes)
16033 {
16034 if (inst.operands[2].isscalar)
16035 {
16036 struct neon_type_el et = neon_check_type (3, NS_QDS,
16037 N_EQK | N_DBL, N_EQK, regtypes | N_KEY);
16038 NEON_ENCODE (SCALAR, inst);
16039 neon_mul_mac (et, et.type == NT_unsigned);
16040 }
16041 else
16042 {
16043 struct neon_type_el et = neon_check_type (3, NS_QDD,
16044 N_EQK | N_DBL, N_EQK, scalartypes | N_KEY);
16045 NEON_ENCODE (INTEGER, inst);
16046 neon_mixed_length (et, et.size);
16047 }
16048 }
16049
16050 static void
16051 do_neon_mac_maybe_scalar_long (void)
16052 {
16053 neon_mac_reg_scalar_long (N_S16 | N_S32 | N_U16 | N_U32, N_SU_32);
16054 }
16055
16056 static void
16057 do_neon_dyadic_wide (void)
16058 {
16059 struct neon_type_el et = neon_check_type (3, NS_QQD,
16060 N_EQK | N_DBL, N_EQK | N_DBL, N_SU_32 | N_KEY);
16061 neon_mixed_length (et, et.size);
16062 }
16063
16064 static void
16065 do_neon_dyadic_narrow (void)
16066 {
16067 struct neon_type_el et = neon_check_type (3, NS_QDD,
16068 N_EQK | N_DBL, N_EQK, N_I16 | N_I32 | N_I64 | N_KEY);
16069 /* Operand sign is unimportant, and the U bit is part of the opcode,
16070 so force the operand type to integer. */
16071 et.type = NT_integer;
16072 neon_mixed_length (et, et.size / 2);
16073 }
16074
16075 static void
16076 do_neon_mul_sat_scalar_long (void)
16077 {
16078 neon_mac_reg_scalar_long (N_S16 | N_S32, N_S16 | N_S32);
16079 }
16080
16081 static void
16082 do_neon_vmull (void)
16083 {
16084 if (inst.operands[2].isscalar)
16085 do_neon_mac_maybe_scalar_long ();
16086 else
16087 {
16088 struct neon_type_el et = neon_check_type (3, NS_QDD,
16089 N_EQK | N_DBL, N_EQK, N_SU_32 | N_P8 | N_P64 | N_KEY);
16090
16091 if (et.type == NT_poly)
16092 NEON_ENCODE (POLY, inst);
16093 else
16094 NEON_ENCODE (INTEGER, inst);
16095
16096 /* For polynomial encoding the U bit must be zero, and the size must
16097 be 8 (encoded as 0b00) or, on ARMv8 or later 64 (encoded, non
16098 obviously, as 0b10). */
16099 if (et.size == 64)
16100 {
16101 /* Check we're on the correct architecture. */
16102 if (!mark_feature_used (&fpu_crypto_ext_armv8))
16103 inst.error =
16104 _("Instruction form not available on this architecture.");
16105
16106 et.size = 32;
16107 }
16108
16109 neon_mixed_length (et, et.size);
16110 }
16111 }
16112
16113 static void
16114 do_neon_ext (void)
16115 {
16116 enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
16117 struct neon_type_el et = neon_check_type (3, rs,
16118 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
16119 unsigned imm = (inst.operands[3].imm * et.size) / 8;
16120
16121 constraint (imm >= (unsigned) (neon_quad (rs) ? 16 : 8),
16122 _("shift out of range"));
16123 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16124 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16125 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16126 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16127 inst.instruction |= LOW4 (inst.operands[2].reg);
16128 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16129 inst.instruction |= neon_quad (rs) << 6;
16130 inst.instruction |= imm << 8;
16131
16132 neon_dp_fixup (&inst);
16133 }
16134
16135 static void
16136 do_neon_rev (void)
16137 {
16138 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16139 struct neon_type_el et = neon_check_type (2, rs,
16140 N_EQK, N_8 | N_16 | N_32 | N_KEY);
16141 unsigned op = (inst.instruction >> 7) & 3;
16142 /* N (width of reversed regions) is encoded as part of the bitmask. We
16143 extract it here to check the elements to be reversed are smaller.
16144 Otherwise we'd get a reserved instruction. */
16145 unsigned elsize = (op == 2) ? 16 : (op == 1) ? 32 : (op == 0) ? 64 : 0;
16146 gas_assert (elsize != 0);
16147 constraint (et.size >= elsize,
16148 _("elements must be smaller than reversal region"));
16149 neon_two_same (neon_quad (rs), 1, et.size);
16150 }
16151
16152 static void
16153 do_neon_dup (void)
16154 {
16155 if (inst.operands[1].isscalar)
16156 {
16157 enum neon_shape rs = neon_select_shape (NS_DS, NS_QS, NS_NULL);
16158 struct neon_type_el et = neon_check_type (2, rs,
16159 N_EQK, N_8 | N_16 | N_32 | N_KEY);
16160 unsigned sizebits = et.size >> 3;
16161 unsigned dm = NEON_SCALAR_REG (inst.operands[1].reg);
16162 int logsize = neon_logbits (et.size);
16163 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg) << logsize;
16164
16165 if (vfp_or_neon_is_neon (NEON_CHECK_CC) == FAIL)
16166 return;
16167
16168 NEON_ENCODE (SCALAR, inst);
16169 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16170 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16171 inst.instruction |= LOW4 (dm);
16172 inst.instruction |= HI1 (dm) << 5;
16173 inst.instruction |= neon_quad (rs) << 6;
16174 inst.instruction |= x << 17;
16175 inst.instruction |= sizebits << 16;
16176
16177 neon_dp_fixup (&inst);
16178 }
16179 else
16180 {
16181 enum neon_shape rs = neon_select_shape (NS_DR, NS_QR, NS_NULL);
16182 struct neon_type_el et = neon_check_type (2, rs,
16183 N_8 | N_16 | N_32 | N_KEY, N_EQK);
16184 /* Duplicate ARM register to lanes of vector. */
16185 NEON_ENCODE (ARMREG, inst);
16186 switch (et.size)
16187 {
16188 case 8: inst.instruction |= 0x400000; break;
16189 case 16: inst.instruction |= 0x000020; break;
16190 case 32: inst.instruction |= 0x000000; break;
16191 default: break;
16192 }
16193 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
16194 inst.instruction |= LOW4 (inst.operands[0].reg) << 16;
16195 inst.instruction |= HI1 (inst.operands[0].reg) << 7;
16196 inst.instruction |= neon_quad (rs) << 21;
16197 /* The encoding for this instruction is identical for the ARM and Thumb
16198 variants, except for the condition field. */
16199 do_vfp_cond_or_thumb ();
16200 }
16201 }
16202
16203 /* VMOV has particularly many variations. It can be one of:
16204 0. VMOV<c><q> <Qd>, <Qm>
16205 1. VMOV<c><q> <Dd>, <Dm>
16206 (Register operations, which are VORR with Rm = Rn.)
16207 2. VMOV<c><q>.<dt> <Qd>, #<imm>
16208 3. VMOV<c><q>.<dt> <Dd>, #<imm>
16209 (Immediate loads.)
16210 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
16211 (ARM register to scalar.)
16212 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
16213 (Two ARM registers to vector.)
16214 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
16215 (Scalar to ARM register.)
16216 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
16217 (Vector to two ARM registers.)
16218 8. VMOV.F32 <Sd>, <Sm>
16219 9. VMOV.F64 <Dd>, <Dm>
16220 (VFP register moves.)
16221 10. VMOV.F32 <Sd>, #imm
16222 11. VMOV.F64 <Dd>, #imm
16223 (VFP float immediate load.)
16224 12. VMOV <Rd>, <Sm>
16225 (VFP single to ARM reg.)
16226 13. VMOV <Sd>, <Rm>
16227 (ARM reg to VFP single.)
16228 14. VMOV <Rd>, <Re>, <Sn>, <Sm>
16229 (Two ARM regs to two VFP singles.)
16230 15. VMOV <Sd>, <Se>, <Rn>, <Rm>
16231 (Two VFP singles to two ARM regs.)
16232
16233 These cases can be disambiguated using neon_select_shape, except cases 1/9
16234 and 3/11 which depend on the operand type too.
16235
16236 All the encoded bits are hardcoded by this function.
16237
16238 Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
16239 Cases 5, 7 may be used with VFPv2 and above.
16240
16241 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
16242 can specify a type where it doesn't make sense to, and is ignored). */
16243
16244 static void
16245 do_neon_mov (void)
16246 {
16247 enum neon_shape rs = neon_select_shape (NS_RRFF, NS_FFRR, NS_DRR, NS_RRD,
16248 NS_QQ, NS_DD, NS_QI, NS_DI, NS_SR,
16249 NS_RS, NS_FF, NS_FI, NS_RF, NS_FR,
16250 NS_HR, NS_RH, NS_HI, NS_NULL);
16251 struct neon_type_el et;
16252 const char *ldconst = 0;
16253
16254 switch (rs)
16255 {
16256 case NS_DD: /* case 1/9. */
16257 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
16258 /* It is not an error here if no type is given. */
16259 inst.error = NULL;
16260 if (et.type == NT_float && et.size == 64)
16261 {
16262 do_vfp_nsyn_opcode ("fcpyd");
16263 break;
16264 }
16265 /* fall through. */
16266
16267 case NS_QQ: /* case 0/1. */
16268 {
16269 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
16270 return;
16271 /* The architecture manual I have doesn't explicitly state which
16272 value the U bit should have for register->register moves, but
16273 the equivalent VORR instruction has U = 0, so do that. */
16274 inst.instruction = 0x0200110;
16275 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16276 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16277 inst.instruction |= LOW4 (inst.operands[1].reg);
16278 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16279 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16280 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16281 inst.instruction |= neon_quad (rs) << 6;
16282
16283 neon_dp_fixup (&inst);
16284 }
16285 break;
16286
16287 case NS_DI: /* case 3/11. */
16288 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
16289 inst.error = NULL;
16290 if (et.type == NT_float && et.size == 64)
16291 {
16292 /* case 11 (fconstd). */
16293 ldconst = "fconstd";
16294 goto encode_fconstd;
16295 }
16296 /* fall through. */
16297
16298 case NS_QI: /* case 2/3. */
16299 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
16300 return;
16301 inst.instruction = 0x0800010;
16302 neon_move_immediate ();
16303 neon_dp_fixup (&inst);
16304 break;
16305
16306 case NS_SR: /* case 4. */
16307 {
16308 unsigned bcdebits = 0;
16309 int logsize;
16310 unsigned dn = NEON_SCALAR_REG (inst.operands[0].reg);
16311 unsigned x = NEON_SCALAR_INDEX (inst.operands[0].reg);
16312
16313 /* .<size> is optional here, defaulting to .32. */
16314 if (inst.vectype.elems == 0
16315 && inst.operands[0].vectype.type == NT_invtype
16316 && inst.operands[1].vectype.type == NT_invtype)
16317 {
16318 inst.vectype.el[0].type = NT_untyped;
16319 inst.vectype.el[0].size = 32;
16320 inst.vectype.elems = 1;
16321 }
16322
16323 et = neon_check_type (2, NS_NULL, N_8 | N_16 | N_32 | N_KEY, N_EQK);
16324 logsize = neon_logbits (et.size);
16325
16326 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
16327 _(BAD_FPU));
16328 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
16329 && et.size != 32, _(BAD_FPU));
16330 constraint (et.type == NT_invtype, _("bad type for scalar"));
16331 constraint (x >= 64 / et.size, _("scalar index out of range"));
16332
16333 switch (et.size)
16334 {
16335 case 8: bcdebits = 0x8; break;
16336 case 16: bcdebits = 0x1; break;
16337 case 32: bcdebits = 0x0; break;
16338 default: ;
16339 }
16340
16341 bcdebits |= x << logsize;
16342
16343 inst.instruction = 0xe000b10;
16344 do_vfp_cond_or_thumb ();
16345 inst.instruction |= LOW4 (dn) << 16;
16346 inst.instruction |= HI1 (dn) << 7;
16347 inst.instruction |= inst.operands[1].reg << 12;
16348 inst.instruction |= (bcdebits & 3) << 5;
16349 inst.instruction |= (bcdebits >> 2) << 21;
16350 }
16351 break;
16352
16353 case NS_DRR: /* case 5 (fmdrr). */
16354 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
16355 _(BAD_FPU));
16356
16357 inst.instruction = 0xc400b10;
16358 do_vfp_cond_or_thumb ();
16359 inst.instruction |= LOW4 (inst.operands[0].reg);
16360 inst.instruction |= HI1 (inst.operands[0].reg) << 5;
16361 inst.instruction |= inst.operands[1].reg << 12;
16362 inst.instruction |= inst.operands[2].reg << 16;
16363 break;
16364
16365 case NS_RS: /* case 6. */
16366 {
16367 unsigned logsize;
16368 unsigned dn = NEON_SCALAR_REG (inst.operands[1].reg);
16369 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg);
16370 unsigned abcdebits = 0;
16371
16372 /* .<dt> is optional here, defaulting to .32. */
16373 if (inst.vectype.elems == 0
16374 && inst.operands[0].vectype.type == NT_invtype
16375 && inst.operands[1].vectype.type == NT_invtype)
16376 {
16377 inst.vectype.el[0].type = NT_untyped;
16378 inst.vectype.el[0].size = 32;
16379 inst.vectype.elems = 1;
16380 }
16381
16382 et = neon_check_type (2, NS_NULL,
16383 N_EQK, N_S8 | N_S16 | N_U8 | N_U16 | N_32 | N_KEY);
16384 logsize = neon_logbits (et.size);
16385
16386 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
16387 _(BAD_FPU));
16388 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
16389 && et.size != 32, _(BAD_FPU));
16390 constraint (et.type == NT_invtype, _("bad type for scalar"));
16391 constraint (x >= 64 / et.size, _("scalar index out of range"));
16392
16393 switch (et.size)
16394 {
16395 case 8: abcdebits = (et.type == NT_signed) ? 0x08 : 0x18; break;
16396 case 16: abcdebits = (et.type == NT_signed) ? 0x01 : 0x11; break;
16397 case 32: abcdebits = 0x00; break;
16398 default: ;
16399 }
16400
16401 abcdebits |= x << logsize;
16402 inst.instruction = 0xe100b10;
16403 do_vfp_cond_or_thumb ();
16404 inst.instruction |= LOW4 (dn) << 16;
16405 inst.instruction |= HI1 (dn) << 7;
16406 inst.instruction |= inst.operands[0].reg << 12;
16407 inst.instruction |= (abcdebits & 3) << 5;
16408 inst.instruction |= (abcdebits >> 2) << 21;
16409 }
16410 break;
16411
16412 case NS_RRD: /* case 7 (fmrrd). */
16413 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
16414 _(BAD_FPU));
16415
16416 inst.instruction = 0xc500b10;
16417 do_vfp_cond_or_thumb ();
16418 inst.instruction |= inst.operands[0].reg << 12;
16419 inst.instruction |= inst.operands[1].reg << 16;
16420 inst.instruction |= LOW4 (inst.operands[2].reg);
16421 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16422 break;
16423
16424 case NS_FF: /* case 8 (fcpys). */
16425 do_vfp_nsyn_opcode ("fcpys");
16426 break;
16427
16428 case NS_HI:
16429 case NS_FI: /* case 10 (fconsts). */
16430 ldconst = "fconsts";
16431 encode_fconstd:
16432 if (is_quarter_float (inst.operands[1].imm))
16433 {
16434 inst.operands[1].imm = neon_qfloat_bits (inst.operands[1].imm);
16435 do_vfp_nsyn_opcode (ldconst);
16436
16437 /* ARMv8.2 fp16 vmov.f16 instruction. */
16438 if (rs == NS_HI)
16439 do_scalar_fp16_v82_encode ();
16440 }
16441 else
16442 first_error (_("immediate out of range"));
16443 break;
16444
16445 case NS_RH:
16446 case NS_RF: /* case 12 (fmrs). */
16447 do_vfp_nsyn_opcode ("fmrs");
16448 /* ARMv8.2 fp16 vmov.f16 instruction. */
16449 if (rs == NS_RH)
16450 do_scalar_fp16_v82_encode ();
16451 break;
16452
16453 case NS_HR:
16454 case NS_FR: /* case 13 (fmsr). */
16455 do_vfp_nsyn_opcode ("fmsr");
16456 /* ARMv8.2 fp16 vmov.f16 instruction. */
16457 if (rs == NS_HR)
16458 do_scalar_fp16_v82_encode ();
16459 break;
16460
16461 /* The encoders for the fmrrs and fmsrr instructions expect three operands
16462 (one of which is a list), but we have parsed four. Do some fiddling to
16463 make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
16464 expect. */
16465 case NS_RRFF: /* case 14 (fmrrs). */
16466 constraint (inst.operands[3].reg != inst.operands[2].reg + 1,
16467 _("VFP registers must be adjacent"));
16468 inst.operands[2].imm = 2;
16469 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
16470 do_vfp_nsyn_opcode ("fmrrs");
16471 break;
16472
16473 case NS_FFRR: /* case 15 (fmsrr). */
16474 constraint (inst.operands[1].reg != inst.operands[0].reg + 1,
16475 _("VFP registers must be adjacent"));
16476 inst.operands[1] = inst.operands[2];
16477 inst.operands[2] = inst.operands[3];
16478 inst.operands[0].imm = 2;
16479 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
16480 do_vfp_nsyn_opcode ("fmsrr");
16481 break;
16482
16483 case NS_NULL:
16484 /* neon_select_shape has determined that the instruction
16485 shape is wrong and has already set the error message. */
16486 break;
16487
16488 default:
16489 abort ();
16490 }
16491 }
16492
16493 static void
16494 do_neon_rshift_round_imm (void)
16495 {
16496 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
16497 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
16498 int imm = inst.operands[2].imm;
16499
16500 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
16501 if (imm == 0)
16502 {
16503 inst.operands[2].present = 0;
16504 do_neon_mov ();
16505 return;
16506 }
16507
16508 constraint (imm < 1 || (unsigned)imm > et.size,
16509 _("immediate out of range for shift"));
16510 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et,
16511 et.size - imm);
16512 }
16513
16514 static void
16515 do_neon_movhf (void)
16516 {
16517 enum neon_shape rs = neon_select_shape (NS_HH, NS_NULL);
16518 constraint (rs != NS_HH, _("invalid suffix"));
16519
16520 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
16521 _(BAD_FPU));
16522
16523 do_vfp_sp_monadic ();
16524
16525 inst.is_neon = 1;
16526 inst.instruction |= 0xf0000000;
16527 }
16528
16529 static void
16530 do_neon_movl (void)
16531 {
16532 struct neon_type_el et = neon_check_type (2, NS_QD,
16533 N_EQK | N_DBL, N_SU_32 | N_KEY);
16534 unsigned sizebits = et.size >> 3;
16535 inst.instruction |= sizebits << 19;
16536 neon_two_same (0, et.type == NT_unsigned, -1);
16537 }
16538
16539 static void
16540 do_neon_trn (void)
16541 {
16542 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16543 struct neon_type_el et = neon_check_type (2, rs,
16544 N_EQK, N_8 | N_16 | N_32 | N_KEY);
16545 NEON_ENCODE (INTEGER, inst);
16546 neon_two_same (neon_quad (rs), 1, et.size);
16547 }
16548
16549 static void
16550 do_neon_zip_uzp (void)
16551 {
16552 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16553 struct neon_type_el et = neon_check_type (2, rs,
16554 N_EQK, N_8 | N_16 | N_32 | N_KEY);
16555 if (rs == NS_DD && et.size == 32)
16556 {
16557 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
16558 inst.instruction = N_MNEM_vtrn;
16559 do_neon_trn ();
16560 return;
16561 }
16562 neon_two_same (neon_quad (rs), 1, et.size);
16563 }
16564
16565 static void
16566 do_neon_sat_abs_neg (void)
16567 {
16568 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16569 struct neon_type_el et = neon_check_type (2, rs,
16570 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
16571 neon_two_same (neon_quad (rs), 1, et.size);
16572 }
16573
16574 static void
16575 do_neon_pair_long (void)
16576 {
16577 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16578 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_32 | N_KEY);
16579 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
16580 inst.instruction |= (et.type == NT_unsigned) << 7;
16581 neon_two_same (neon_quad (rs), 1, et.size);
16582 }
16583
16584 static void
16585 do_neon_recip_est (void)
16586 {
16587 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16588 struct neon_type_el et = neon_check_type (2, rs,
16589 N_EQK | N_FLT, N_F_16_32 | N_U32 | N_KEY);
16590 inst.instruction |= (et.type == NT_float) << 8;
16591 neon_two_same (neon_quad (rs), 1, et.size);
16592 }
16593
16594 static void
16595 do_neon_cls (void)
16596 {
16597 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16598 struct neon_type_el et = neon_check_type (2, rs,
16599 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
16600 neon_two_same (neon_quad (rs), 1, et.size);
16601 }
16602
16603 static void
16604 do_neon_clz (void)
16605 {
16606 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16607 struct neon_type_el et = neon_check_type (2, rs,
16608 N_EQK, N_I8 | N_I16 | N_I32 | N_KEY);
16609 neon_two_same (neon_quad (rs), 1, et.size);
16610 }
16611
16612 static void
16613 do_neon_cnt (void)
16614 {
16615 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16616 struct neon_type_el et = neon_check_type (2, rs,
16617 N_EQK | N_INT, N_8 | N_KEY);
16618 neon_two_same (neon_quad (rs), 1, et.size);
16619 }
16620
16621 static void
16622 do_neon_swp (void)
16623 {
16624 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16625 neon_two_same (neon_quad (rs), 1, -1);
16626 }
16627
16628 static void
16629 do_neon_tbl_tbx (void)
16630 {
16631 unsigned listlenbits;
16632 neon_check_type (3, NS_DLD, N_EQK, N_EQK, N_8 | N_KEY);
16633
16634 if (inst.operands[1].imm < 1 || inst.operands[1].imm > 4)
16635 {
16636 first_error (_("bad list length for table lookup"));
16637 return;
16638 }
16639
16640 listlenbits = inst.operands[1].imm - 1;
16641 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16642 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16643 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16644 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16645 inst.instruction |= LOW4 (inst.operands[2].reg);
16646 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16647 inst.instruction |= listlenbits << 8;
16648
16649 neon_dp_fixup (&inst);
16650 }
16651
16652 static void
16653 do_neon_ldm_stm (void)
16654 {
16655 /* P, U and L bits are part of bitmask. */
16656 int is_dbmode = (inst.instruction & (1 << 24)) != 0;
16657 unsigned offsetbits = inst.operands[1].imm * 2;
16658
16659 if (inst.operands[1].issingle)
16660 {
16661 do_vfp_nsyn_ldm_stm (is_dbmode);
16662 return;
16663 }
16664
16665 constraint (is_dbmode && !inst.operands[0].writeback,
16666 _("writeback (!) must be used for VLDMDB and VSTMDB"));
16667
16668 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
16669 _("register list must contain at least 1 and at most 16 "
16670 "registers"));
16671
16672 inst.instruction |= inst.operands[0].reg << 16;
16673 inst.instruction |= inst.operands[0].writeback << 21;
16674 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
16675 inst.instruction |= HI1 (inst.operands[1].reg) << 22;
16676
16677 inst.instruction |= offsetbits;
16678
16679 do_vfp_cond_or_thumb ();
16680 }
16681
16682 static void
16683 do_neon_ldr_str (void)
16684 {
16685 int is_ldr = (inst.instruction & (1 << 20)) != 0;
16686
16687 /* Use of PC in vstr in ARM mode is deprecated in ARMv7.
16688 And is UNPREDICTABLE in thumb mode. */
16689 if (!is_ldr
16690 && inst.operands[1].reg == REG_PC
16691 && (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7) || thumb_mode))
16692 {
16693 if (thumb_mode)
16694 inst.error = _("Use of PC here is UNPREDICTABLE");
16695 else if (warn_on_deprecated)
16696 as_tsktsk (_("Use of PC here is deprecated"));
16697 }
16698
16699 if (inst.operands[0].issingle)
16700 {
16701 if (is_ldr)
16702 do_vfp_nsyn_opcode ("flds");
16703 else
16704 do_vfp_nsyn_opcode ("fsts");
16705
16706 /* ARMv8.2 vldr.16/vstr.16 instruction. */
16707 if (inst.vectype.el[0].size == 16)
16708 do_scalar_fp16_v82_encode ();
16709 }
16710 else
16711 {
16712 if (is_ldr)
16713 do_vfp_nsyn_opcode ("fldd");
16714 else
16715 do_vfp_nsyn_opcode ("fstd");
16716 }
16717 }
16718
16719 /* "interleave" version also handles non-interleaving register VLD1/VST1
16720 instructions. */
16721
16722 static void
16723 do_neon_ld_st_interleave (void)
16724 {
16725 struct neon_type_el et = neon_check_type (1, NS_NULL,
16726 N_8 | N_16 | N_32 | N_64);
16727 unsigned alignbits = 0;
16728 unsigned idx;
16729 /* The bits in this table go:
16730 0: register stride of one (0) or two (1)
16731 1,2: register list length, minus one (1, 2, 3, 4).
16732 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
16733 We use -1 for invalid entries. */
16734 const int typetable[] =
16735 {
16736 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
16737 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
16738 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
16739 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
16740 };
16741 int typebits;
16742
16743 if (et.type == NT_invtype)
16744 return;
16745
16746 if (inst.operands[1].immisalign)
16747 switch (inst.operands[1].imm >> 8)
16748 {
16749 case 64: alignbits = 1; break;
16750 case 128:
16751 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2
16752 && NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
16753 goto bad_alignment;
16754 alignbits = 2;
16755 break;
16756 case 256:
16757 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
16758 goto bad_alignment;
16759 alignbits = 3;
16760 break;
16761 default:
16762 bad_alignment:
16763 first_error (_("bad alignment"));
16764 return;
16765 }
16766
16767 inst.instruction |= alignbits << 4;
16768 inst.instruction |= neon_logbits (et.size) << 6;
16769
16770 /* Bits [4:6] of the immediate in a list specifier encode register stride
16771 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
16772 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
16773 up the right value for "type" in a table based on this value and the given
16774 list style, then stick it back. */
16775 idx = ((inst.operands[0].imm >> 4) & 7)
16776 | (((inst.instruction >> 8) & 3) << 3);
16777
16778 typebits = typetable[idx];
16779
16780 constraint (typebits == -1, _("bad list type for instruction"));
16781 constraint (((inst.instruction >> 8) & 3) && et.size == 64,
16782 _("bad element type for instruction"));
16783
16784 inst.instruction &= ~0xf00;
16785 inst.instruction |= typebits << 8;
16786 }
16787
16788 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
16789 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
16790 otherwise. The variable arguments are a list of pairs of legal (size, align)
16791 values, terminated with -1. */
16792
16793 static int
16794 neon_alignment_bit (int size, int align, int *do_alignment, ...)
16795 {
16796 va_list ap;
16797 int result = FAIL, thissize, thisalign;
16798
16799 if (!inst.operands[1].immisalign)
16800 {
16801 *do_alignment = 0;
16802 return SUCCESS;
16803 }
16804
16805 va_start (ap, do_alignment);
16806
16807 do
16808 {
16809 thissize = va_arg (ap, int);
16810 if (thissize == -1)
16811 break;
16812 thisalign = va_arg (ap, int);
16813
16814 if (size == thissize && align == thisalign)
16815 result = SUCCESS;
16816 }
16817 while (result != SUCCESS);
16818
16819 va_end (ap);
16820
16821 if (result == SUCCESS)
16822 *do_alignment = 1;
16823 else
16824 first_error (_("unsupported alignment for instruction"));
16825
16826 return result;
16827 }
16828
16829 static void
16830 do_neon_ld_st_lane (void)
16831 {
16832 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
16833 int align_good, do_alignment = 0;
16834 int logsize = neon_logbits (et.size);
16835 int align = inst.operands[1].imm >> 8;
16836 int n = (inst.instruction >> 8) & 3;
16837 int max_el = 64 / et.size;
16838
16839 if (et.type == NT_invtype)
16840 return;
16841
16842 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != n + 1,
16843 _("bad list length"));
16844 constraint (NEON_LANE (inst.operands[0].imm) >= max_el,
16845 _("scalar index out of range"));
16846 constraint (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2
16847 && et.size == 8,
16848 _("stride of 2 unavailable when element size is 8"));
16849
16850 switch (n)
16851 {
16852 case 0: /* VLD1 / VST1. */
16853 align_good = neon_alignment_bit (et.size, align, &do_alignment, 16, 16,
16854 32, 32, -1);
16855 if (align_good == FAIL)
16856 return;
16857 if (do_alignment)
16858 {
16859 unsigned alignbits = 0;
16860 switch (et.size)
16861 {
16862 case 16: alignbits = 0x1; break;
16863 case 32: alignbits = 0x3; break;
16864 default: ;
16865 }
16866 inst.instruction |= alignbits << 4;
16867 }
16868 break;
16869
16870 case 1: /* VLD2 / VST2. */
16871 align_good = neon_alignment_bit (et.size, align, &do_alignment, 8, 16,
16872 16, 32, 32, 64, -1);
16873 if (align_good == FAIL)
16874 return;
16875 if (do_alignment)
16876 inst.instruction |= 1 << 4;
16877 break;
16878
16879 case 2: /* VLD3 / VST3. */
16880 constraint (inst.operands[1].immisalign,
16881 _("can't use alignment with this instruction"));
16882 break;
16883
16884 case 3: /* VLD4 / VST4. */
16885 align_good = neon_alignment_bit (et.size, align, &do_alignment, 8, 32,
16886 16, 64, 32, 64, 32, 128, -1);
16887 if (align_good == FAIL)
16888 return;
16889 if (do_alignment)
16890 {
16891 unsigned alignbits = 0;
16892 switch (et.size)
16893 {
16894 case 8: alignbits = 0x1; break;
16895 case 16: alignbits = 0x1; break;
16896 case 32: alignbits = (align == 64) ? 0x1 : 0x2; break;
16897 default: ;
16898 }
16899 inst.instruction |= alignbits << 4;
16900 }
16901 break;
16902
16903 default: ;
16904 }
16905
16906 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
16907 if (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2)
16908 inst.instruction |= 1 << (4 + logsize);
16909
16910 inst.instruction |= NEON_LANE (inst.operands[0].imm) << (logsize + 5);
16911 inst.instruction |= logsize << 10;
16912 }
16913
16914 /* Encode single n-element structure to all lanes VLD<n> instructions. */
16915
16916 static void
16917 do_neon_ld_dup (void)
16918 {
16919 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
16920 int align_good, do_alignment = 0;
16921
16922 if (et.type == NT_invtype)
16923 return;
16924
16925 switch ((inst.instruction >> 8) & 3)
16926 {
16927 case 0: /* VLD1. */
16928 gas_assert (NEON_REG_STRIDE (inst.operands[0].imm) != 2);
16929 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
16930 &do_alignment, 16, 16, 32, 32, -1);
16931 if (align_good == FAIL)
16932 return;
16933 switch (NEON_REGLIST_LENGTH (inst.operands[0].imm))
16934 {
16935 case 1: break;
16936 case 2: inst.instruction |= 1 << 5; break;
16937 default: first_error (_("bad list length")); return;
16938 }
16939 inst.instruction |= neon_logbits (et.size) << 6;
16940 break;
16941
16942 case 1: /* VLD2. */
16943 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
16944 &do_alignment, 8, 16, 16, 32, 32, 64,
16945 -1);
16946 if (align_good == FAIL)
16947 return;
16948 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2,
16949 _("bad list length"));
16950 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
16951 inst.instruction |= 1 << 5;
16952 inst.instruction |= neon_logbits (et.size) << 6;
16953 break;
16954
16955 case 2: /* VLD3. */
16956 constraint (inst.operands[1].immisalign,
16957 _("can't use alignment with this instruction"));
16958 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 3,
16959 _("bad list length"));
16960 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
16961 inst.instruction |= 1 << 5;
16962 inst.instruction |= neon_logbits (et.size) << 6;
16963 break;
16964
16965 case 3: /* VLD4. */
16966 {
16967 int align = inst.operands[1].imm >> 8;
16968 align_good = neon_alignment_bit (et.size, align, &do_alignment, 8, 32,
16969 16, 64, 32, 64, 32, 128, -1);
16970 if (align_good == FAIL)
16971 return;
16972 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4,
16973 _("bad list length"));
16974 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
16975 inst.instruction |= 1 << 5;
16976 if (et.size == 32 && align == 128)
16977 inst.instruction |= 0x3 << 6;
16978 else
16979 inst.instruction |= neon_logbits (et.size) << 6;
16980 }
16981 break;
16982
16983 default: ;
16984 }
16985
16986 inst.instruction |= do_alignment << 4;
16987 }
16988
16989 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
16990 apart from bits [11:4]. */
16991
16992 static void
16993 do_neon_ldx_stx (void)
16994 {
16995 if (inst.operands[1].isreg)
16996 constraint (inst.operands[1].reg == REG_PC, BAD_PC);
16997
16998 switch (NEON_LANE (inst.operands[0].imm))
16999 {
17000 case NEON_INTERLEAVE_LANES:
17001 NEON_ENCODE (INTERLV, inst);
17002 do_neon_ld_st_interleave ();
17003 break;
17004
17005 case NEON_ALL_LANES:
17006 NEON_ENCODE (DUP, inst);
17007 if (inst.instruction == N_INV)
17008 {
17009 first_error ("only loads support such operands");
17010 break;
17011 }
17012 do_neon_ld_dup ();
17013 break;
17014
17015 default:
17016 NEON_ENCODE (LANE, inst);
17017 do_neon_ld_st_lane ();
17018 }
17019
17020 /* L bit comes from bit mask. */
17021 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17022 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17023 inst.instruction |= inst.operands[1].reg << 16;
17024
17025 if (inst.operands[1].postind)
17026 {
17027 int postreg = inst.operands[1].imm & 0xf;
17028 constraint (!inst.operands[1].immisreg,
17029 _("post-index must be a register"));
17030 constraint (postreg == 0xd || postreg == 0xf,
17031 _("bad register for post-index"));
17032 inst.instruction |= postreg;
17033 }
17034 else
17035 {
17036 constraint (inst.operands[1].immisreg, BAD_ADDR_MODE);
17037 constraint (inst.reloc.exp.X_op != O_constant
17038 || inst.reloc.exp.X_add_number != 0,
17039 BAD_ADDR_MODE);
17040
17041 if (inst.operands[1].writeback)
17042 {
17043 inst.instruction |= 0xd;
17044 }
17045 else
17046 inst.instruction |= 0xf;
17047 }
17048
17049 if (thumb_mode)
17050 inst.instruction |= 0xf9000000;
17051 else
17052 inst.instruction |= 0xf4000000;
17053 }
17054
17055 /* FP v8. */
17056 static void
17057 do_vfp_nsyn_fpv8 (enum neon_shape rs)
17058 {
17059 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
17060 D register operands. */
17061 if (neon_shape_class[rs] == SC_DOUBLE)
17062 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
17063 _(BAD_FPU));
17064
17065 NEON_ENCODE (FPV8, inst);
17066
17067 if (rs == NS_FFF || rs == NS_HHH)
17068 {
17069 do_vfp_sp_dyadic ();
17070
17071 /* ARMv8.2 fp16 instruction. */
17072 if (rs == NS_HHH)
17073 do_scalar_fp16_v82_encode ();
17074 }
17075 else
17076 do_vfp_dp_rd_rn_rm ();
17077
17078 if (rs == NS_DDD)
17079 inst.instruction |= 0x100;
17080
17081 inst.instruction |= 0xf0000000;
17082 }
17083
17084 static void
17085 do_vsel (void)
17086 {
17087 set_it_insn_type (OUTSIDE_IT_INSN);
17088
17089 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8) != SUCCESS)
17090 first_error (_("invalid instruction shape"));
17091 }
17092
17093 static void
17094 do_vmaxnm (void)
17095 {
17096 set_it_insn_type (OUTSIDE_IT_INSN);
17097
17098 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8) == SUCCESS)
17099 return;
17100
17101 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
17102 return;
17103
17104 neon_dyadic_misc (NT_untyped, N_F_16_32, 0);
17105 }
17106
17107 static void
17108 do_vrint_1 (enum neon_cvt_mode mode)
17109 {
17110 enum neon_shape rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_QQ, NS_NULL);
17111 struct neon_type_el et;
17112
17113 if (rs == NS_NULL)
17114 return;
17115
17116 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
17117 D register operands. */
17118 if (neon_shape_class[rs] == SC_DOUBLE)
17119 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
17120 _(BAD_FPU));
17121
17122 et = neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY
17123 | N_VFP);
17124 if (et.type != NT_invtype)
17125 {
17126 /* VFP encodings. */
17127 if (mode == neon_cvt_mode_a || mode == neon_cvt_mode_n
17128 || mode == neon_cvt_mode_p || mode == neon_cvt_mode_m)
17129 set_it_insn_type (OUTSIDE_IT_INSN);
17130
17131 NEON_ENCODE (FPV8, inst);
17132 if (rs == NS_FF || rs == NS_HH)
17133 do_vfp_sp_monadic ();
17134 else
17135 do_vfp_dp_rd_rm ();
17136
17137 switch (mode)
17138 {
17139 case neon_cvt_mode_r: inst.instruction |= 0x00000000; break;
17140 case neon_cvt_mode_z: inst.instruction |= 0x00000080; break;
17141 case neon_cvt_mode_x: inst.instruction |= 0x00010000; break;
17142 case neon_cvt_mode_a: inst.instruction |= 0xf0000000; break;
17143 case neon_cvt_mode_n: inst.instruction |= 0xf0010000; break;
17144 case neon_cvt_mode_p: inst.instruction |= 0xf0020000; break;
17145 case neon_cvt_mode_m: inst.instruction |= 0xf0030000; break;
17146 default: abort ();
17147 }
17148
17149 inst.instruction |= (rs == NS_DD) << 8;
17150 do_vfp_cond_or_thumb ();
17151
17152 /* ARMv8.2 fp16 vrint instruction. */
17153 if (rs == NS_HH)
17154 do_scalar_fp16_v82_encode ();
17155 }
17156 else
17157 {
17158 /* Neon encodings (or something broken...). */
17159 inst.error = NULL;
17160 et = neon_check_type (2, rs, N_EQK, N_F_16_32 | N_KEY);
17161
17162 if (et.type == NT_invtype)
17163 return;
17164
17165 set_it_insn_type (OUTSIDE_IT_INSN);
17166 NEON_ENCODE (FLOAT, inst);
17167
17168 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
17169 return;
17170
17171 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17172 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17173 inst.instruction |= LOW4 (inst.operands[1].reg);
17174 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
17175 inst.instruction |= neon_quad (rs) << 6;
17176 /* Mask off the original size bits and reencode them. */
17177 inst.instruction = ((inst.instruction & 0xfff3ffff)
17178 | neon_logbits (et.size) << 18);
17179
17180 switch (mode)
17181 {
17182 case neon_cvt_mode_z: inst.instruction |= 3 << 7; break;
17183 case neon_cvt_mode_x: inst.instruction |= 1 << 7; break;
17184 case neon_cvt_mode_a: inst.instruction |= 2 << 7; break;
17185 case neon_cvt_mode_n: inst.instruction |= 0 << 7; break;
17186 case neon_cvt_mode_p: inst.instruction |= 7 << 7; break;
17187 case neon_cvt_mode_m: inst.instruction |= 5 << 7; break;
17188 case neon_cvt_mode_r: inst.error = _("invalid rounding mode"); break;
17189 default: abort ();
17190 }
17191
17192 if (thumb_mode)
17193 inst.instruction |= 0xfc000000;
17194 else
17195 inst.instruction |= 0xf0000000;
17196 }
17197 }
17198
17199 static void
17200 do_vrintx (void)
17201 {
17202 do_vrint_1 (neon_cvt_mode_x);
17203 }
17204
17205 static void
17206 do_vrintz (void)
17207 {
17208 do_vrint_1 (neon_cvt_mode_z);
17209 }
17210
17211 static void
17212 do_vrintr (void)
17213 {
17214 do_vrint_1 (neon_cvt_mode_r);
17215 }
17216
17217 static void
17218 do_vrinta (void)
17219 {
17220 do_vrint_1 (neon_cvt_mode_a);
17221 }
17222
17223 static void
17224 do_vrintn (void)
17225 {
17226 do_vrint_1 (neon_cvt_mode_n);
17227 }
17228
17229 static void
17230 do_vrintp (void)
17231 {
17232 do_vrint_1 (neon_cvt_mode_p);
17233 }
17234
17235 static void
17236 do_vrintm (void)
17237 {
17238 do_vrint_1 (neon_cvt_mode_m);
17239 }
17240
17241 /* Crypto v1 instructions. */
17242 static void
17243 do_crypto_2op_1 (unsigned elttype, int op)
17244 {
17245 set_it_insn_type (OUTSIDE_IT_INSN);
17246
17247 if (neon_check_type (2, NS_QQ, N_EQK | N_UNT, elttype | N_UNT | N_KEY).type
17248 == NT_invtype)
17249 return;
17250
17251 inst.error = NULL;
17252
17253 NEON_ENCODE (INTEGER, inst);
17254 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17255 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17256 inst.instruction |= LOW4 (inst.operands[1].reg);
17257 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
17258 if (op != -1)
17259 inst.instruction |= op << 6;
17260
17261 if (thumb_mode)
17262 inst.instruction |= 0xfc000000;
17263 else
17264 inst.instruction |= 0xf0000000;
17265 }
17266
17267 static void
17268 do_crypto_3op_1 (int u, int op)
17269 {
17270 set_it_insn_type (OUTSIDE_IT_INSN);
17271
17272 if (neon_check_type (3, NS_QQQ, N_EQK | N_UNT, N_EQK | N_UNT,
17273 N_32 | N_UNT | N_KEY).type == NT_invtype)
17274 return;
17275
17276 inst.error = NULL;
17277
17278 NEON_ENCODE (INTEGER, inst);
17279 neon_three_same (1, u, 8 << op);
17280 }
17281
17282 static void
17283 do_aese (void)
17284 {
17285 do_crypto_2op_1 (N_8, 0);
17286 }
17287
17288 static void
17289 do_aesd (void)
17290 {
17291 do_crypto_2op_1 (N_8, 1);
17292 }
17293
17294 static void
17295 do_aesmc (void)
17296 {
17297 do_crypto_2op_1 (N_8, 2);
17298 }
17299
17300 static void
17301 do_aesimc (void)
17302 {
17303 do_crypto_2op_1 (N_8, 3);
17304 }
17305
17306 static void
17307 do_sha1c (void)
17308 {
17309 do_crypto_3op_1 (0, 0);
17310 }
17311
17312 static void
17313 do_sha1p (void)
17314 {
17315 do_crypto_3op_1 (0, 1);
17316 }
17317
17318 static void
17319 do_sha1m (void)
17320 {
17321 do_crypto_3op_1 (0, 2);
17322 }
17323
17324 static void
17325 do_sha1su0 (void)
17326 {
17327 do_crypto_3op_1 (0, 3);
17328 }
17329
17330 static void
17331 do_sha256h (void)
17332 {
17333 do_crypto_3op_1 (1, 0);
17334 }
17335
17336 static void
17337 do_sha256h2 (void)
17338 {
17339 do_crypto_3op_1 (1, 1);
17340 }
17341
17342 static void
17343 do_sha256su1 (void)
17344 {
17345 do_crypto_3op_1 (1, 2);
17346 }
17347
17348 static void
17349 do_sha1h (void)
17350 {
17351 do_crypto_2op_1 (N_32, -1);
17352 }
17353
17354 static void
17355 do_sha1su1 (void)
17356 {
17357 do_crypto_2op_1 (N_32, 0);
17358 }
17359
17360 static void
17361 do_sha256su0 (void)
17362 {
17363 do_crypto_2op_1 (N_32, 1);
17364 }
17365
17366 static void
17367 do_crc32_1 (unsigned int poly, unsigned int sz)
17368 {
17369 unsigned int Rd = inst.operands[0].reg;
17370 unsigned int Rn = inst.operands[1].reg;
17371 unsigned int Rm = inst.operands[2].reg;
17372
17373 set_it_insn_type (OUTSIDE_IT_INSN);
17374 inst.instruction |= LOW4 (Rd) << (thumb_mode ? 8 : 12);
17375 inst.instruction |= LOW4 (Rn) << 16;
17376 inst.instruction |= LOW4 (Rm);
17377 inst.instruction |= sz << (thumb_mode ? 4 : 21);
17378 inst.instruction |= poly << (thumb_mode ? 20 : 9);
17379
17380 if (Rd == REG_PC || Rn == REG_PC || Rm == REG_PC)
17381 as_warn (UNPRED_REG ("r15"));
17382 if (thumb_mode && (Rd == REG_SP || Rn == REG_SP || Rm == REG_SP))
17383 as_warn (UNPRED_REG ("r13"));
17384 }
17385
17386 static void
17387 do_crc32b (void)
17388 {
17389 do_crc32_1 (0, 0);
17390 }
17391
17392 static void
17393 do_crc32h (void)
17394 {
17395 do_crc32_1 (0, 1);
17396 }
17397
17398 static void
17399 do_crc32w (void)
17400 {
17401 do_crc32_1 (0, 2);
17402 }
17403
17404 static void
17405 do_crc32cb (void)
17406 {
17407 do_crc32_1 (1, 0);
17408 }
17409
17410 static void
17411 do_crc32ch (void)
17412 {
17413 do_crc32_1 (1, 1);
17414 }
17415
17416 static void
17417 do_crc32cw (void)
17418 {
17419 do_crc32_1 (1, 2);
17420 }
17421
17422 \f
17423 /* Overall per-instruction processing. */
17424
17425 /* We need to be able to fix up arbitrary expressions in some statements.
17426 This is so that we can handle symbols that are an arbitrary distance from
17427 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
17428 which returns part of an address in a form which will be valid for
17429 a data instruction. We do this by pushing the expression into a symbol
17430 in the expr_section, and creating a fix for that. */
17431
17432 static void
17433 fix_new_arm (fragS * frag,
17434 int where,
17435 short int size,
17436 expressionS * exp,
17437 int pc_rel,
17438 int reloc)
17439 {
17440 fixS * new_fix;
17441
17442 switch (exp->X_op)
17443 {
17444 case O_constant:
17445 if (pc_rel)
17446 {
17447 /* Create an absolute valued symbol, so we have something to
17448 refer to in the object file. Unfortunately for us, gas's
17449 generic expression parsing will already have folded out
17450 any use of .set foo/.type foo %function that may have
17451 been used to set type information of the target location,
17452 that's being specified symbolically. We have to presume
17453 the user knows what they are doing. */
17454 char name[16 + 8];
17455 symbolS *symbol;
17456
17457 sprintf (name, "*ABS*0x%lx", (unsigned long)exp->X_add_number);
17458
17459 symbol = symbol_find_or_make (name);
17460 S_SET_SEGMENT (symbol, absolute_section);
17461 symbol_set_frag (symbol, &zero_address_frag);
17462 S_SET_VALUE (symbol, exp->X_add_number);
17463 exp->X_op = O_symbol;
17464 exp->X_add_symbol = symbol;
17465 exp->X_add_number = 0;
17466 }
17467 /* FALLTHROUGH */
17468 case O_symbol:
17469 case O_add:
17470 case O_subtract:
17471 new_fix = fix_new_exp (frag, where, size, exp, pc_rel,
17472 (enum bfd_reloc_code_real) reloc);
17473 break;
17474
17475 default:
17476 new_fix = (fixS *) fix_new (frag, where, size, make_expr_symbol (exp), 0,
17477 pc_rel, (enum bfd_reloc_code_real) reloc);
17478 break;
17479 }
17480
17481 /* Mark whether the fix is to a THUMB instruction, or an ARM
17482 instruction. */
17483 new_fix->tc_fix_data = thumb_mode;
17484 }
17485
17486 /* Create a frg for an instruction requiring relaxation. */
17487 static void
17488 output_relax_insn (void)
17489 {
17490 char * to;
17491 symbolS *sym;
17492 int offset;
17493
17494 /* The size of the instruction is unknown, so tie the debug info to the
17495 start of the instruction. */
17496 dwarf2_emit_insn (0);
17497
17498 switch (inst.reloc.exp.X_op)
17499 {
17500 case O_symbol:
17501 sym = inst.reloc.exp.X_add_symbol;
17502 offset = inst.reloc.exp.X_add_number;
17503 break;
17504 case O_constant:
17505 sym = NULL;
17506 offset = inst.reloc.exp.X_add_number;
17507 break;
17508 default:
17509 sym = make_expr_symbol (&inst.reloc.exp);
17510 offset = 0;
17511 break;
17512 }
17513 to = frag_var (rs_machine_dependent, INSN_SIZE, THUMB_SIZE,
17514 inst.relax, sym, offset, NULL/*offset, opcode*/);
17515 md_number_to_chars (to, inst.instruction, THUMB_SIZE);
17516 }
17517
17518 /* Write a 32-bit thumb instruction to buf. */
17519 static void
17520 put_thumb32_insn (char * buf, unsigned long insn)
17521 {
17522 md_number_to_chars (buf, insn >> 16, THUMB_SIZE);
17523 md_number_to_chars (buf + THUMB_SIZE, insn, THUMB_SIZE);
17524 }
17525
17526 static void
17527 output_inst (const char * str)
17528 {
17529 char * to = NULL;
17530
17531 if (inst.error)
17532 {
17533 as_bad ("%s -- `%s'", inst.error, str);
17534 return;
17535 }
17536 if (inst.relax)
17537 {
17538 output_relax_insn ();
17539 return;
17540 }
17541 if (inst.size == 0)
17542 return;
17543
17544 to = frag_more (inst.size);
17545 /* PR 9814: Record the thumb mode into the current frag so that we know
17546 what type of NOP padding to use, if necessary. We override any previous
17547 setting so that if the mode has changed then the NOPS that we use will
17548 match the encoding of the last instruction in the frag. */
17549 frag_now->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
17550
17551 if (thumb_mode && (inst.size > THUMB_SIZE))
17552 {
17553 gas_assert (inst.size == (2 * THUMB_SIZE));
17554 put_thumb32_insn (to, inst.instruction);
17555 }
17556 else if (inst.size > INSN_SIZE)
17557 {
17558 gas_assert (inst.size == (2 * INSN_SIZE));
17559 md_number_to_chars (to, inst.instruction, INSN_SIZE);
17560 md_number_to_chars (to + INSN_SIZE, inst.instruction, INSN_SIZE);
17561 }
17562 else
17563 md_number_to_chars (to, inst.instruction, inst.size);
17564
17565 if (inst.reloc.type != BFD_RELOC_UNUSED)
17566 fix_new_arm (frag_now, to - frag_now->fr_literal,
17567 inst.size, & inst.reloc.exp, inst.reloc.pc_rel,
17568 inst.reloc.type);
17569
17570 dwarf2_emit_insn (inst.size);
17571 }
17572
17573 static char *
17574 output_it_inst (int cond, int mask, char * to)
17575 {
17576 unsigned long instruction = 0xbf00;
17577
17578 mask &= 0xf;
17579 instruction |= mask;
17580 instruction |= cond << 4;
17581
17582 if (to == NULL)
17583 {
17584 to = frag_more (2);
17585 #ifdef OBJ_ELF
17586 dwarf2_emit_insn (2);
17587 #endif
17588 }
17589
17590 md_number_to_chars (to, instruction, 2);
17591
17592 return to;
17593 }
17594
17595 /* Tag values used in struct asm_opcode's tag field. */
17596 enum opcode_tag
17597 {
17598 OT_unconditional, /* Instruction cannot be conditionalized.
17599 The ARM condition field is still 0xE. */
17600 OT_unconditionalF, /* Instruction cannot be conditionalized
17601 and carries 0xF in its ARM condition field. */
17602 OT_csuffix, /* Instruction takes a conditional suffix. */
17603 OT_csuffixF, /* Some forms of the instruction take a conditional
17604 suffix, others place 0xF where the condition field
17605 would be. */
17606 OT_cinfix3, /* Instruction takes a conditional infix,
17607 beginning at character index 3. (In
17608 unified mode, it becomes a suffix.) */
17609 OT_cinfix3_deprecated, /* The same as OT_cinfix3. This is used for
17610 tsts, cmps, cmns, and teqs. */
17611 OT_cinfix3_legacy, /* Legacy instruction takes a conditional infix at
17612 character index 3, even in unified mode. Used for
17613 legacy instructions where suffix and infix forms
17614 may be ambiguous. */
17615 OT_csuf_or_in3, /* Instruction takes either a conditional
17616 suffix or an infix at character index 3. */
17617 OT_odd_infix_unc, /* This is the unconditional variant of an
17618 instruction that takes a conditional infix
17619 at an unusual position. In unified mode,
17620 this variant will accept a suffix. */
17621 OT_odd_infix_0 /* Values greater than or equal to OT_odd_infix_0
17622 are the conditional variants of instructions that
17623 take conditional infixes in unusual positions.
17624 The infix appears at character index
17625 (tag - OT_odd_infix_0). These are not accepted
17626 in unified mode. */
17627 };
17628
17629 /* Subroutine of md_assemble, responsible for looking up the primary
17630 opcode from the mnemonic the user wrote. STR points to the
17631 beginning of the mnemonic.
17632
17633 This is not simply a hash table lookup, because of conditional
17634 variants. Most instructions have conditional variants, which are
17635 expressed with a _conditional affix_ to the mnemonic. If we were
17636 to encode each conditional variant as a literal string in the opcode
17637 table, it would have approximately 20,000 entries.
17638
17639 Most mnemonics take this affix as a suffix, and in unified syntax,
17640 'most' is upgraded to 'all'. However, in the divided syntax, some
17641 instructions take the affix as an infix, notably the s-variants of
17642 the arithmetic instructions. Of those instructions, all but six
17643 have the infix appear after the third character of the mnemonic.
17644
17645 Accordingly, the algorithm for looking up primary opcodes given
17646 an identifier is:
17647
17648 1. Look up the identifier in the opcode table.
17649 If we find a match, go to step U.
17650
17651 2. Look up the last two characters of the identifier in the
17652 conditions table. If we find a match, look up the first N-2
17653 characters of the identifier in the opcode table. If we
17654 find a match, go to step CE.
17655
17656 3. Look up the fourth and fifth characters of the identifier in
17657 the conditions table. If we find a match, extract those
17658 characters from the identifier, and look up the remaining
17659 characters in the opcode table. If we find a match, go
17660 to step CM.
17661
17662 4. Fail.
17663
17664 U. Examine the tag field of the opcode structure, in case this is
17665 one of the six instructions with its conditional infix in an
17666 unusual place. If it is, the tag tells us where to find the
17667 infix; look it up in the conditions table and set inst.cond
17668 accordingly. Otherwise, this is an unconditional instruction.
17669 Again set inst.cond accordingly. Return the opcode structure.
17670
17671 CE. Examine the tag field to make sure this is an instruction that
17672 should receive a conditional suffix. If it is not, fail.
17673 Otherwise, set inst.cond from the suffix we already looked up,
17674 and return the opcode structure.
17675
17676 CM. Examine the tag field to make sure this is an instruction that
17677 should receive a conditional infix after the third character.
17678 If it is not, fail. Otherwise, undo the edits to the current
17679 line of input and proceed as for case CE. */
17680
17681 static const struct asm_opcode *
17682 opcode_lookup (char **str)
17683 {
17684 char *end, *base;
17685 char *affix;
17686 const struct asm_opcode *opcode;
17687 const struct asm_cond *cond;
17688 char save[2];
17689
17690 /* Scan up to the end of the mnemonic, which must end in white space,
17691 '.' (in unified mode, or for Neon/VFP instructions), or end of string. */
17692 for (base = end = *str; *end != '\0'; end++)
17693 if (*end == ' ' || *end == '.')
17694 break;
17695
17696 if (end == base)
17697 return NULL;
17698
17699 /* Handle a possible width suffix and/or Neon type suffix. */
17700 if (end[0] == '.')
17701 {
17702 int offset = 2;
17703
17704 /* The .w and .n suffixes are only valid if the unified syntax is in
17705 use. */
17706 if (unified_syntax && end[1] == 'w')
17707 inst.size_req = 4;
17708 else if (unified_syntax && end[1] == 'n')
17709 inst.size_req = 2;
17710 else
17711 offset = 0;
17712
17713 inst.vectype.elems = 0;
17714
17715 *str = end + offset;
17716
17717 if (end[offset] == '.')
17718 {
17719 /* See if we have a Neon type suffix (possible in either unified or
17720 non-unified ARM syntax mode). */
17721 if (parse_neon_type (&inst.vectype, str) == FAIL)
17722 return NULL;
17723 }
17724 else if (end[offset] != '\0' && end[offset] != ' ')
17725 return NULL;
17726 }
17727 else
17728 *str = end;
17729
17730 /* Look for unaffixed or special-case affixed mnemonic. */
17731 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
17732 end - base);
17733 if (opcode)
17734 {
17735 /* step U */
17736 if (opcode->tag < OT_odd_infix_0)
17737 {
17738 inst.cond = COND_ALWAYS;
17739 return opcode;
17740 }
17741
17742 if (warn_on_deprecated && unified_syntax)
17743 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
17744 affix = base + (opcode->tag - OT_odd_infix_0);
17745 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
17746 gas_assert (cond);
17747
17748 inst.cond = cond->value;
17749 return opcode;
17750 }
17751
17752 /* Cannot have a conditional suffix on a mnemonic of less than two
17753 characters. */
17754 if (end - base < 3)
17755 return NULL;
17756
17757 /* Look for suffixed mnemonic. */
17758 affix = end - 2;
17759 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
17760 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
17761 affix - base);
17762 if (opcode && cond)
17763 {
17764 /* step CE */
17765 switch (opcode->tag)
17766 {
17767 case OT_cinfix3_legacy:
17768 /* Ignore conditional suffixes matched on infix only mnemonics. */
17769 break;
17770
17771 case OT_cinfix3:
17772 case OT_cinfix3_deprecated:
17773 case OT_odd_infix_unc:
17774 if (!unified_syntax)
17775 return 0;
17776 /* else fall through */
17777
17778 case OT_csuffix:
17779 case OT_csuffixF:
17780 case OT_csuf_or_in3:
17781 inst.cond = cond->value;
17782 return opcode;
17783
17784 case OT_unconditional:
17785 case OT_unconditionalF:
17786 if (thumb_mode)
17787 inst.cond = cond->value;
17788 else
17789 {
17790 /* Delayed diagnostic. */
17791 inst.error = BAD_COND;
17792 inst.cond = COND_ALWAYS;
17793 }
17794 return opcode;
17795
17796 default:
17797 return NULL;
17798 }
17799 }
17800
17801 /* Cannot have a usual-position infix on a mnemonic of less than
17802 six characters (five would be a suffix). */
17803 if (end - base < 6)
17804 return NULL;
17805
17806 /* Look for infixed mnemonic in the usual position. */
17807 affix = base + 3;
17808 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
17809 if (!cond)
17810 return NULL;
17811
17812 memcpy (save, affix, 2);
17813 memmove (affix, affix + 2, (end - affix) - 2);
17814 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
17815 (end - base) - 2);
17816 memmove (affix + 2, affix, (end - affix) - 2);
17817 memcpy (affix, save, 2);
17818
17819 if (opcode
17820 && (opcode->tag == OT_cinfix3
17821 || opcode->tag == OT_cinfix3_deprecated
17822 || opcode->tag == OT_csuf_or_in3
17823 || opcode->tag == OT_cinfix3_legacy))
17824 {
17825 /* Step CM. */
17826 if (warn_on_deprecated && unified_syntax
17827 && (opcode->tag == OT_cinfix3
17828 || opcode->tag == OT_cinfix3_deprecated))
17829 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
17830
17831 inst.cond = cond->value;
17832 return opcode;
17833 }
17834
17835 return NULL;
17836 }
17837
17838 /* This function generates an initial IT instruction, leaving its block
17839 virtually open for the new instructions. Eventually,
17840 the mask will be updated by now_it_add_mask () each time
17841 a new instruction needs to be included in the IT block.
17842 Finally, the block is closed with close_automatic_it_block ().
17843 The block closure can be requested either from md_assemble (),
17844 a tencode (), or due to a label hook. */
17845
17846 static void
17847 new_automatic_it_block (int cond)
17848 {
17849 now_it.state = AUTOMATIC_IT_BLOCK;
17850 now_it.mask = 0x18;
17851 now_it.cc = cond;
17852 now_it.block_length = 1;
17853 mapping_state (MAP_THUMB);
17854 now_it.insn = output_it_inst (cond, now_it.mask, NULL);
17855 now_it.warn_deprecated = FALSE;
17856 now_it.insn_cond = TRUE;
17857 }
17858
17859 /* Close an automatic IT block.
17860 See comments in new_automatic_it_block (). */
17861
17862 static void
17863 close_automatic_it_block (void)
17864 {
17865 now_it.mask = 0x10;
17866 now_it.block_length = 0;
17867 }
17868
17869 /* Update the mask of the current automatically-generated IT
17870 instruction. See comments in new_automatic_it_block (). */
17871
17872 static void
17873 now_it_add_mask (int cond)
17874 {
17875 #define CLEAR_BIT(value, nbit) ((value) & ~(1 << (nbit)))
17876 #define SET_BIT_VALUE(value, bitvalue, nbit) (CLEAR_BIT (value, nbit) \
17877 | ((bitvalue) << (nbit)))
17878 const int resulting_bit = (cond & 1);
17879
17880 now_it.mask &= 0xf;
17881 now_it.mask = SET_BIT_VALUE (now_it.mask,
17882 resulting_bit,
17883 (5 - now_it.block_length));
17884 now_it.mask = SET_BIT_VALUE (now_it.mask,
17885 1,
17886 ((5 - now_it.block_length) - 1) );
17887 output_it_inst (now_it.cc, now_it.mask, now_it.insn);
17888
17889 #undef CLEAR_BIT
17890 #undef SET_BIT_VALUE
17891 }
17892
17893 /* The IT blocks handling machinery is accessed through the these functions:
17894 it_fsm_pre_encode () from md_assemble ()
17895 set_it_insn_type () optional, from the tencode functions
17896 set_it_insn_type_last () ditto
17897 in_it_block () ditto
17898 it_fsm_post_encode () from md_assemble ()
17899 force_automatic_it_block_close () from label habdling functions
17900
17901 Rationale:
17902 1) md_assemble () calls it_fsm_pre_encode () before calling tencode (),
17903 initializing the IT insn type with a generic initial value depending
17904 on the inst.condition.
17905 2) During the tencode function, two things may happen:
17906 a) The tencode function overrides the IT insn type by
17907 calling either set_it_insn_type (type) or set_it_insn_type_last ().
17908 b) The tencode function queries the IT block state by
17909 calling in_it_block () (i.e. to determine narrow/not narrow mode).
17910
17911 Both set_it_insn_type and in_it_block run the internal FSM state
17912 handling function (handle_it_state), because: a) setting the IT insn
17913 type may incur in an invalid state (exiting the function),
17914 and b) querying the state requires the FSM to be updated.
17915 Specifically we want to avoid creating an IT block for conditional
17916 branches, so it_fsm_pre_encode is actually a guess and we can't
17917 determine whether an IT block is required until the tencode () routine
17918 has decided what type of instruction this actually it.
17919 Because of this, if set_it_insn_type and in_it_block have to be used,
17920 set_it_insn_type has to be called first.
17921
17922 set_it_insn_type_last () is a wrapper of set_it_insn_type (type), that
17923 determines the insn IT type depending on the inst.cond code.
17924 When a tencode () routine encodes an instruction that can be
17925 either outside an IT block, or, in the case of being inside, has to be
17926 the last one, set_it_insn_type_last () will determine the proper
17927 IT instruction type based on the inst.cond code. Otherwise,
17928 set_it_insn_type can be called for overriding that logic or
17929 for covering other cases.
17930
17931 Calling handle_it_state () may not transition the IT block state to
17932 OUTSIDE_IT_BLOCK immediatelly, since the (current) state could be
17933 still queried. Instead, if the FSM determines that the state should
17934 be transitioned to OUTSIDE_IT_BLOCK, a flag is marked to be closed
17935 after the tencode () function: that's what it_fsm_post_encode () does.
17936
17937 Since in_it_block () calls the state handling function to get an
17938 updated state, an error may occur (due to invalid insns combination).
17939 In that case, inst.error is set.
17940 Therefore, inst.error has to be checked after the execution of
17941 the tencode () routine.
17942
17943 3) Back in md_assemble(), it_fsm_post_encode () is called to commit
17944 any pending state change (if any) that didn't take place in
17945 handle_it_state () as explained above. */
17946
17947 static void
17948 it_fsm_pre_encode (void)
17949 {
17950 if (inst.cond != COND_ALWAYS)
17951 inst.it_insn_type = INSIDE_IT_INSN;
17952 else
17953 inst.it_insn_type = OUTSIDE_IT_INSN;
17954
17955 now_it.state_handled = 0;
17956 }
17957
17958 /* IT state FSM handling function. */
17959
17960 static int
17961 handle_it_state (void)
17962 {
17963 now_it.state_handled = 1;
17964 now_it.insn_cond = FALSE;
17965
17966 switch (now_it.state)
17967 {
17968 case OUTSIDE_IT_BLOCK:
17969 switch (inst.it_insn_type)
17970 {
17971 case OUTSIDE_IT_INSN:
17972 break;
17973
17974 case INSIDE_IT_INSN:
17975 case INSIDE_IT_LAST_INSN:
17976 if (thumb_mode == 0)
17977 {
17978 if (unified_syntax
17979 && !(implicit_it_mode & IMPLICIT_IT_MODE_ARM))
17980 as_tsktsk (_("Warning: conditional outside an IT block"\
17981 " for Thumb."));
17982 }
17983 else
17984 {
17985 if ((implicit_it_mode & IMPLICIT_IT_MODE_THUMB)
17986 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2))
17987 {
17988 /* Automatically generate the IT instruction. */
17989 new_automatic_it_block (inst.cond);
17990 if (inst.it_insn_type == INSIDE_IT_LAST_INSN)
17991 close_automatic_it_block ();
17992 }
17993 else
17994 {
17995 inst.error = BAD_OUT_IT;
17996 return FAIL;
17997 }
17998 }
17999 break;
18000
18001 case IF_INSIDE_IT_LAST_INSN:
18002 case NEUTRAL_IT_INSN:
18003 break;
18004
18005 case IT_INSN:
18006 now_it.state = MANUAL_IT_BLOCK;
18007 now_it.block_length = 0;
18008 break;
18009 }
18010 break;
18011
18012 case AUTOMATIC_IT_BLOCK:
18013 /* Three things may happen now:
18014 a) We should increment current it block size;
18015 b) We should close current it block (closing insn or 4 insns);
18016 c) We should close current it block and start a new one (due
18017 to incompatible conditions or
18018 4 insns-length block reached). */
18019
18020 switch (inst.it_insn_type)
18021 {
18022 case OUTSIDE_IT_INSN:
18023 /* The closure of the block shall happen immediatelly,
18024 so any in_it_block () call reports the block as closed. */
18025 force_automatic_it_block_close ();
18026 break;
18027
18028 case INSIDE_IT_INSN:
18029 case INSIDE_IT_LAST_INSN:
18030 case IF_INSIDE_IT_LAST_INSN:
18031 now_it.block_length++;
18032
18033 if (now_it.block_length > 4
18034 || !now_it_compatible (inst.cond))
18035 {
18036 force_automatic_it_block_close ();
18037 if (inst.it_insn_type != IF_INSIDE_IT_LAST_INSN)
18038 new_automatic_it_block (inst.cond);
18039 }
18040 else
18041 {
18042 now_it.insn_cond = TRUE;
18043 now_it_add_mask (inst.cond);
18044 }
18045
18046 if (now_it.state == AUTOMATIC_IT_BLOCK
18047 && (inst.it_insn_type == INSIDE_IT_LAST_INSN
18048 || inst.it_insn_type == IF_INSIDE_IT_LAST_INSN))
18049 close_automatic_it_block ();
18050 break;
18051
18052 case NEUTRAL_IT_INSN:
18053 now_it.block_length++;
18054 now_it.insn_cond = TRUE;
18055
18056 if (now_it.block_length > 4)
18057 force_automatic_it_block_close ();
18058 else
18059 now_it_add_mask (now_it.cc & 1);
18060 break;
18061
18062 case IT_INSN:
18063 close_automatic_it_block ();
18064 now_it.state = MANUAL_IT_BLOCK;
18065 break;
18066 }
18067 break;
18068
18069 case MANUAL_IT_BLOCK:
18070 {
18071 /* Check conditional suffixes. */
18072 const int cond = now_it.cc ^ ((now_it.mask >> 4) & 1) ^ 1;
18073 int is_last;
18074 now_it.mask <<= 1;
18075 now_it.mask &= 0x1f;
18076 is_last = (now_it.mask == 0x10);
18077 now_it.insn_cond = TRUE;
18078
18079 switch (inst.it_insn_type)
18080 {
18081 case OUTSIDE_IT_INSN:
18082 inst.error = BAD_NOT_IT;
18083 return FAIL;
18084
18085 case INSIDE_IT_INSN:
18086 if (cond != inst.cond)
18087 {
18088 inst.error = BAD_IT_COND;
18089 return FAIL;
18090 }
18091 break;
18092
18093 case INSIDE_IT_LAST_INSN:
18094 case IF_INSIDE_IT_LAST_INSN:
18095 if (cond != inst.cond)
18096 {
18097 inst.error = BAD_IT_COND;
18098 return FAIL;
18099 }
18100 if (!is_last)
18101 {
18102 inst.error = BAD_BRANCH;
18103 return FAIL;
18104 }
18105 break;
18106
18107 case NEUTRAL_IT_INSN:
18108 /* The BKPT instruction is unconditional even in an IT block. */
18109 break;
18110
18111 case IT_INSN:
18112 inst.error = BAD_IT_IT;
18113 return FAIL;
18114 }
18115 }
18116 break;
18117 }
18118
18119 return SUCCESS;
18120 }
18121
18122 struct depr_insn_mask
18123 {
18124 unsigned long pattern;
18125 unsigned long mask;
18126 const char* description;
18127 };
18128
18129 /* List of 16-bit instruction patterns deprecated in an IT block in
18130 ARMv8. */
18131 static const struct depr_insn_mask depr_it_insns[] = {
18132 { 0xc000, 0xc000, N_("Short branches, Undefined, SVC, LDM/STM") },
18133 { 0xb000, 0xb000, N_("Miscellaneous 16-bit instructions") },
18134 { 0xa000, 0xb800, N_("ADR") },
18135 { 0x4800, 0xf800, N_("Literal loads") },
18136 { 0x4478, 0xf478, N_("Hi-register ADD, MOV, CMP, BX, BLX using pc") },
18137 { 0x4487, 0xfc87, N_("Hi-register ADD, MOV, CMP using pc") },
18138 /* NOTE: 0x00dd is not the real encoding, instead, it is the 'tvalue'
18139 field in asm_opcode. 'tvalue' is used at the stage this check happen. */
18140 { 0x00dd, 0x7fff, N_("ADD/SUB sp, sp #imm") },
18141 { 0, 0, NULL }
18142 };
18143
18144 static void
18145 it_fsm_post_encode (void)
18146 {
18147 int is_last;
18148
18149 if (!now_it.state_handled)
18150 handle_it_state ();
18151
18152 if (now_it.insn_cond
18153 && !now_it.warn_deprecated
18154 && warn_on_deprecated
18155 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
18156 {
18157 if (inst.instruction >= 0x10000)
18158 {
18159 as_tsktsk (_("IT blocks containing 32-bit Thumb instructions are "
18160 "deprecated in ARMv8"));
18161 now_it.warn_deprecated = TRUE;
18162 }
18163 else
18164 {
18165 const struct depr_insn_mask *p = depr_it_insns;
18166
18167 while (p->mask != 0)
18168 {
18169 if ((inst.instruction & p->mask) == p->pattern)
18170 {
18171 as_tsktsk (_("IT blocks containing 16-bit Thumb instructions "
18172 "of the following class are deprecated in ARMv8: "
18173 "%s"), p->description);
18174 now_it.warn_deprecated = TRUE;
18175 break;
18176 }
18177
18178 ++p;
18179 }
18180 }
18181
18182 if (now_it.block_length > 1)
18183 {
18184 as_tsktsk (_("IT blocks containing more than one conditional "
18185 "instruction are deprecated in ARMv8"));
18186 now_it.warn_deprecated = TRUE;
18187 }
18188 }
18189
18190 is_last = (now_it.mask == 0x10);
18191 if (is_last)
18192 {
18193 now_it.state = OUTSIDE_IT_BLOCK;
18194 now_it.mask = 0;
18195 }
18196 }
18197
18198 static void
18199 force_automatic_it_block_close (void)
18200 {
18201 if (now_it.state == AUTOMATIC_IT_BLOCK)
18202 {
18203 close_automatic_it_block ();
18204 now_it.state = OUTSIDE_IT_BLOCK;
18205 now_it.mask = 0;
18206 }
18207 }
18208
18209 static int
18210 in_it_block (void)
18211 {
18212 if (!now_it.state_handled)
18213 handle_it_state ();
18214
18215 return now_it.state != OUTSIDE_IT_BLOCK;
18216 }
18217
18218 /* Whether OPCODE only has T32 encoding. Since this function is only used by
18219 t32_insn_ok, OPCODE enabled by v6t2 extension bit do not need to be listed
18220 here, hence the "known" in the function name. */
18221
18222 static bfd_boolean
18223 known_t32_only_insn (const struct asm_opcode *opcode)
18224 {
18225 /* Original Thumb-1 wide instruction. */
18226 if (opcode->tencode == do_t_blx
18227 || opcode->tencode == do_t_branch23
18228 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_msr)
18229 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_barrier))
18230 return TRUE;
18231
18232 /* Wide-only instruction added to ARMv8-M Baseline. */
18233 if (ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_v8m_m_only)
18234 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_atomics)
18235 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_v6t2_v8m)
18236 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_div))
18237 return TRUE;
18238
18239 return FALSE;
18240 }
18241
18242 /* Whether wide instruction variant can be used if available for a valid OPCODE
18243 in ARCH. */
18244
18245 static bfd_boolean
18246 t32_insn_ok (arm_feature_set arch, const struct asm_opcode *opcode)
18247 {
18248 if (known_t32_only_insn (opcode))
18249 return TRUE;
18250
18251 /* Instruction with narrow and wide encoding added to ARMv8-M. Availability
18252 of variant T3 of B.W is checked in do_t_branch. */
18253 if (ARM_CPU_HAS_FEATURE (arch, arm_ext_v8m)
18254 && opcode->tencode == do_t_branch)
18255 return TRUE;
18256
18257 /* Wide instruction variants of all instructions with narrow *and* wide
18258 variants become available with ARMv6t2. Other opcodes are either
18259 narrow-only or wide-only and are thus available if OPCODE is valid. */
18260 if (ARM_CPU_HAS_FEATURE (arch, arm_ext_v6t2))
18261 return TRUE;
18262
18263 /* OPCODE with narrow only instruction variant or wide variant not
18264 available. */
18265 return FALSE;
18266 }
18267
18268 void
18269 md_assemble (char *str)
18270 {
18271 char *p = str;
18272 const struct asm_opcode * opcode;
18273
18274 /* Align the previous label if needed. */
18275 if (last_label_seen != NULL)
18276 {
18277 symbol_set_frag (last_label_seen, frag_now);
18278 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
18279 S_SET_SEGMENT (last_label_seen, now_seg);
18280 }
18281
18282 memset (&inst, '\0', sizeof (inst));
18283 inst.reloc.type = BFD_RELOC_UNUSED;
18284
18285 opcode = opcode_lookup (&p);
18286 if (!opcode)
18287 {
18288 /* It wasn't an instruction, but it might be a register alias of
18289 the form alias .req reg, or a Neon .dn/.qn directive. */
18290 if (! create_register_alias (str, p)
18291 && ! create_neon_reg_alias (str, p))
18292 as_bad (_("bad instruction `%s'"), str);
18293
18294 return;
18295 }
18296
18297 if (warn_on_deprecated && opcode->tag == OT_cinfix3_deprecated)
18298 as_tsktsk (_("s suffix on comparison instruction is deprecated"));
18299
18300 /* The value which unconditional instructions should have in place of the
18301 condition field. */
18302 inst.uncond_value = (opcode->tag == OT_csuffixF) ? 0xf : -1;
18303
18304 if (thumb_mode)
18305 {
18306 arm_feature_set variant;
18307
18308 variant = cpu_variant;
18309 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
18310 if (!ARM_CPU_HAS_FEATURE (variant, arm_arch_t2))
18311 ARM_CLEAR_FEATURE (variant, variant, fpu_any_hard);
18312 /* Check that this instruction is supported for this CPU. */
18313 if (!opcode->tvariant
18314 || (thumb_mode == 1
18315 && !ARM_CPU_HAS_FEATURE (variant, *opcode->tvariant)))
18316 {
18317 as_bad (_("selected processor does not support `%s' in Thumb mode"), str);
18318 return;
18319 }
18320 if (inst.cond != COND_ALWAYS && !unified_syntax
18321 && opcode->tencode != do_t_branch)
18322 {
18323 as_bad (_("Thumb does not support conditional execution"));
18324 return;
18325 }
18326
18327 /* Two things are addressed here:
18328 1) Implicit require narrow instructions on Thumb-1.
18329 This avoids relaxation accidentally introducing Thumb-2
18330 instructions.
18331 2) Reject wide instructions in non Thumb-2 cores.
18332
18333 Only instructions with narrow and wide variants need to be handled
18334 but selecting all non wide-only instructions is easier. */
18335 if (!ARM_CPU_HAS_FEATURE (variant, arm_ext_v6t2)
18336 && !t32_insn_ok (variant, opcode))
18337 {
18338 if (inst.size_req == 0)
18339 inst.size_req = 2;
18340 else if (inst.size_req == 4)
18341 {
18342 if (ARM_CPU_HAS_FEATURE (variant, arm_ext_v8m))
18343 as_bad (_("selected processor does not support 32bit wide "
18344 "variant of instruction `%s'"), str);
18345 else
18346 as_bad (_("selected processor does not support `%s' in "
18347 "Thumb-2 mode"), str);
18348 return;
18349 }
18350 }
18351
18352 inst.instruction = opcode->tvalue;
18353
18354 if (!parse_operands (p, opcode->operands, /*thumb=*/TRUE))
18355 {
18356 /* Prepare the it_insn_type for those encodings that don't set
18357 it. */
18358 it_fsm_pre_encode ();
18359
18360 opcode->tencode ();
18361
18362 it_fsm_post_encode ();
18363 }
18364
18365 if (!(inst.error || inst.relax))
18366 {
18367 gas_assert (inst.instruction < 0xe800 || inst.instruction > 0xffff);
18368 inst.size = (inst.instruction > 0xffff ? 4 : 2);
18369 if (inst.size_req && inst.size_req != inst.size)
18370 {
18371 as_bad (_("cannot honor width suffix -- `%s'"), str);
18372 return;
18373 }
18374 }
18375
18376 /* Something has gone badly wrong if we try to relax a fixed size
18377 instruction. */
18378 gas_assert (inst.size_req == 0 || !inst.relax);
18379
18380 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
18381 *opcode->tvariant);
18382 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
18383 set those bits when Thumb-2 32-bit instructions are seen. The impact
18384 of relaxable instructions will be considered later after we finish all
18385 relaxation. */
18386 if (ARM_FEATURE_CORE_EQUAL (cpu_variant, arm_arch_any))
18387 variant = arm_arch_none;
18388 else
18389 variant = cpu_variant;
18390 if (inst.size == 4 && !t32_insn_ok (variant, opcode))
18391 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
18392 arm_ext_v6t2);
18393
18394 check_neon_suffixes;
18395
18396 if (!inst.error)
18397 {
18398 mapping_state (MAP_THUMB);
18399 }
18400 }
18401 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
18402 {
18403 bfd_boolean is_bx;
18404
18405 /* bx is allowed on v5 cores, and sometimes on v4 cores. */
18406 is_bx = (opcode->aencode == do_bx);
18407
18408 /* Check that this instruction is supported for this CPU. */
18409 if (!(is_bx && fix_v4bx)
18410 && !(opcode->avariant &&
18411 ARM_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant)))
18412 {
18413 as_bad (_("selected processor does not support `%s' in ARM mode"), str);
18414 return;
18415 }
18416 if (inst.size_req)
18417 {
18418 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str);
18419 return;
18420 }
18421
18422 inst.instruction = opcode->avalue;
18423 if (opcode->tag == OT_unconditionalF)
18424 inst.instruction |= 0xFU << 28;
18425 else
18426 inst.instruction |= inst.cond << 28;
18427 inst.size = INSN_SIZE;
18428 if (!parse_operands (p, opcode->operands, /*thumb=*/FALSE))
18429 {
18430 it_fsm_pre_encode ();
18431 opcode->aencode ();
18432 it_fsm_post_encode ();
18433 }
18434 /* Arm mode bx is marked as both v4T and v5 because it's still required
18435 on a hypothetical non-thumb v5 core. */
18436 if (is_bx)
18437 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, arm_ext_v4t);
18438 else
18439 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
18440 *opcode->avariant);
18441
18442 check_neon_suffixes;
18443
18444 if (!inst.error)
18445 {
18446 mapping_state (MAP_ARM);
18447 }
18448 }
18449 else
18450 {
18451 as_bad (_("attempt to use an ARM instruction on a Thumb-only processor "
18452 "-- `%s'"), str);
18453 return;
18454 }
18455 output_inst (str);
18456 }
18457
18458 static void
18459 check_it_blocks_finished (void)
18460 {
18461 #ifdef OBJ_ELF
18462 asection *sect;
18463
18464 for (sect = stdoutput->sections; sect != NULL; sect = sect->next)
18465 if (seg_info (sect)->tc_segment_info_data.current_it.state
18466 == MANUAL_IT_BLOCK)
18467 {
18468 as_warn (_("section '%s' finished with an open IT block."),
18469 sect->name);
18470 }
18471 #else
18472 if (now_it.state == MANUAL_IT_BLOCK)
18473 as_warn (_("file finished with an open IT block."));
18474 #endif
18475 }
18476
18477 /* Various frobbings of labels and their addresses. */
18478
18479 void
18480 arm_start_line_hook (void)
18481 {
18482 last_label_seen = NULL;
18483 }
18484
18485 void
18486 arm_frob_label (symbolS * sym)
18487 {
18488 last_label_seen = sym;
18489
18490 ARM_SET_THUMB (sym, thumb_mode);
18491
18492 #if defined OBJ_COFF || defined OBJ_ELF
18493 ARM_SET_INTERWORK (sym, support_interwork);
18494 #endif
18495
18496 force_automatic_it_block_close ();
18497
18498 /* Note - do not allow local symbols (.Lxxx) to be labelled
18499 as Thumb functions. This is because these labels, whilst
18500 they exist inside Thumb code, are not the entry points for
18501 possible ARM->Thumb calls. Also, these labels can be used
18502 as part of a computed goto or switch statement. eg gcc
18503 can generate code that looks like this:
18504
18505 ldr r2, [pc, .Laaa]
18506 lsl r3, r3, #2
18507 ldr r2, [r3, r2]
18508 mov pc, r2
18509
18510 .Lbbb: .word .Lxxx
18511 .Lccc: .word .Lyyy
18512 ..etc...
18513 .Laaa: .word Lbbb
18514
18515 The first instruction loads the address of the jump table.
18516 The second instruction converts a table index into a byte offset.
18517 The third instruction gets the jump address out of the table.
18518 The fourth instruction performs the jump.
18519
18520 If the address stored at .Laaa is that of a symbol which has the
18521 Thumb_Func bit set, then the linker will arrange for this address
18522 to have the bottom bit set, which in turn would mean that the
18523 address computation performed by the third instruction would end
18524 up with the bottom bit set. Since the ARM is capable of unaligned
18525 word loads, the instruction would then load the incorrect address
18526 out of the jump table, and chaos would ensue. */
18527 if (label_is_thumb_function_name
18528 && (S_GET_NAME (sym)[0] != '.' || S_GET_NAME (sym)[1] != 'L')
18529 && (bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE) != 0)
18530 {
18531 /* When the address of a Thumb function is taken the bottom
18532 bit of that address should be set. This will allow
18533 interworking between Arm and Thumb functions to work
18534 correctly. */
18535
18536 THUMB_SET_FUNC (sym, 1);
18537
18538 label_is_thumb_function_name = FALSE;
18539 }
18540
18541 dwarf2_emit_label (sym);
18542 }
18543
18544 bfd_boolean
18545 arm_data_in_code (void)
18546 {
18547 if (thumb_mode && ! strncmp (input_line_pointer + 1, "data:", 5))
18548 {
18549 *input_line_pointer = '/';
18550 input_line_pointer += 5;
18551 *input_line_pointer = 0;
18552 return TRUE;
18553 }
18554
18555 return FALSE;
18556 }
18557
18558 char *
18559 arm_canonicalize_symbol_name (char * name)
18560 {
18561 int len;
18562
18563 if (thumb_mode && (len = strlen (name)) > 5
18564 && streq (name + len - 5, "/data"))
18565 *(name + len - 5) = 0;
18566
18567 return name;
18568 }
18569 \f
18570 /* Table of all register names defined by default. The user can
18571 define additional names with .req. Note that all register names
18572 should appear in both upper and lowercase variants. Some registers
18573 also have mixed-case names. */
18574
18575 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
18576 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
18577 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
18578 #define REGSET(p,t) \
18579 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
18580 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
18581 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
18582 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
18583 #define REGSETH(p,t) \
18584 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
18585 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
18586 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
18587 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
18588 #define REGSET2(p,t) \
18589 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
18590 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
18591 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
18592 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
18593 #define SPLRBANK(base,bank,t) \
18594 REGDEF(lr_##bank, 768|((base+0)<<16), t), \
18595 REGDEF(sp_##bank, 768|((base+1)<<16), t), \
18596 REGDEF(spsr_##bank, 768|(base<<16)|SPSR_BIT, t), \
18597 REGDEF(LR_##bank, 768|((base+0)<<16), t), \
18598 REGDEF(SP_##bank, 768|((base+1)<<16), t), \
18599 REGDEF(SPSR_##bank, 768|(base<<16)|SPSR_BIT, t)
18600
18601 static const struct reg_entry reg_names[] =
18602 {
18603 /* ARM integer registers. */
18604 REGSET(r, RN), REGSET(R, RN),
18605
18606 /* ATPCS synonyms. */
18607 REGDEF(a1,0,RN), REGDEF(a2,1,RN), REGDEF(a3, 2,RN), REGDEF(a4, 3,RN),
18608 REGDEF(v1,4,RN), REGDEF(v2,5,RN), REGDEF(v3, 6,RN), REGDEF(v4, 7,RN),
18609 REGDEF(v5,8,RN), REGDEF(v6,9,RN), REGDEF(v7,10,RN), REGDEF(v8,11,RN),
18610
18611 REGDEF(A1,0,RN), REGDEF(A2,1,RN), REGDEF(A3, 2,RN), REGDEF(A4, 3,RN),
18612 REGDEF(V1,4,RN), REGDEF(V2,5,RN), REGDEF(V3, 6,RN), REGDEF(V4, 7,RN),
18613 REGDEF(V5,8,RN), REGDEF(V6,9,RN), REGDEF(V7,10,RN), REGDEF(V8,11,RN),
18614
18615 /* Well-known aliases. */
18616 REGDEF(wr, 7,RN), REGDEF(sb, 9,RN), REGDEF(sl,10,RN), REGDEF(fp,11,RN),
18617 REGDEF(ip,12,RN), REGDEF(sp,13,RN), REGDEF(lr,14,RN), REGDEF(pc,15,RN),
18618
18619 REGDEF(WR, 7,RN), REGDEF(SB, 9,RN), REGDEF(SL,10,RN), REGDEF(FP,11,RN),
18620 REGDEF(IP,12,RN), REGDEF(SP,13,RN), REGDEF(LR,14,RN), REGDEF(PC,15,RN),
18621
18622 /* Coprocessor numbers. */
18623 REGSET(p, CP), REGSET(P, CP),
18624
18625 /* Coprocessor register numbers. The "cr" variants are for backward
18626 compatibility. */
18627 REGSET(c, CN), REGSET(C, CN),
18628 REGSET(cr, CN), REGSET(CR, CN),
18629
18630 /* ARM banked registers. */
18631 REGDEF(R8_usr,512|(0<<16),RNB), REGDEF(r8_usr,512|(0<<16),RNB),
18632 REGDEF(R9_usr,512|(1<<16),RNB), REGDEF(r9_usr,512|(1<<16),RNB),
18633 REGDEF(R10_usr,512|(2<<16),RNB), REGDEF(r10_usr,512|(2<<16),RNB),
18634 REGDEF(R11_usr,512|(3<<16),RNB), REGDEF(r11_usr,512|(3<<16),RNB),
18635 REGDEF(R12_usr,512|(4<<16),RNB), REGDEF(r12_usr,512|(4<<16),RNB),
18636 REGDEF(SP_usr,512|(5<<16),RNB), REGDEF(sp_usr,512|(5<<16),RNB),
18637 REGDEF(LR_usr,512|(6<<16),RNB), REGDEF(lr_usr,512|(6<<16),RNB),
18638
18639 REGDEF(R8_fiq,512|(8<<16),RNB), REGDEF(r8_fiq,512|(8<<16),RNB),
18640 REGDEF(R9_fiq,512|(9<<16),RNB), REGDEF(r9_fiq,512|(9<<16),RNB),
18641 REGDEF(R10_fiq,512|(10<<16),RNB), REGDEF(r10_fiq,512|(10<<16),RNB),
18642 REGDEF(R11_fiq,512|(11<<16),RNB), REGDEF(r11_fiq,512|(11<<16),RNB),
18643 REGDEF(R12_fiq,512|(12<<16),RNB), REGDEF(r12_fiq,512|(12<<16),RNB),
18644 REGDEF(SP_fiq,512|(13<<16),RNB), REGDEF(sp_fiq,512|(13<<16),RNB),
18645 REGDEF(LR_fiq,512|(14<<16),RNB), REGDEF(lr_fiq,512|(14<<16),RNB),
18646 REGDEF(SPSR_fiq,512|(14<<16)|SPSR_BIT,RNB), REGDEF(spsr_fiq,512|(14<<16)|SPSR_BIT,RNB),
18647
18648 SPLRBANK(0,IRQ,RNB), SPLRBANK(0,irq,RNB),
18649 SPLRBANK(2,SVC,RNB), SPLRBANK(2,svc,RNB),
18650 SPLRBANK(4,ABT,RNB), SPLRBANK(4,abt,RNB),
18651 SPLRBANK(6,UND,RNB), SPLRBANK(6,und,RNB),
18652 SPLRBANK(12,MON,RNB), SPLRBANK(12,mon,RNB),
18653 REGDEF(elr_hyp,768|(14<<16),RNB), REGDEF(ELR_hyp,768|(14<<16),RNB),
18654 REGDEF(sp_hyp,768|(15<<16),RNB), REGDEF(SP_hyp,768|(15<<16),RNB),
18655 REGDEF(spsr_hyp,768|(14<<16)|SPSR_BIT,RNB),
18656 REGDEF(SPSR_hyp,768|(14<<16)|SPSR_BIT,RNB),
18657
18658 /* FPA registers. */
18659 REGNUM(f,0,FN), REGNUM(f,1,FN), REGNUM(f,2,FN), REGNUM(f,3,FN),
18660 REGNUM(f,4,FN), REGNUM(f,5,FN), REGNUM(f,6,FN), REGNUM(f,7, FN),
18661
18662 REGNUM(F,0,FN), REGNUM(F,1,FN), REGNUM(F,2,FN), REGNUM(F,3,FN),
18663 REGNUM(F,4,FN), REGNUM(F,5,FN), REGNUM(F,6,FN), REGNUM(F,7, FN),
18664
18665 /* VFP SP registers. */
18666 REGSET(s,VFS), REGSET(S,VFS),
18667 REGSETH(s,VFS), REGSETH(S,VFS),
18668
18669 /* VFP DP Registers. */
18670 REGSET(d,VFD), REGSET(D,VFD),
18671 /* Extra Neon DP registers. */
18672 REGSETH(d,VFD), REGSETH(D,VFD),
18673
18674 /* Neon QP registers. */
18675 REGSET2(q,NQ), REGSET2(Q,NQ),
18676
18677 /* VFP control registers. */
18678 REGDEF(fpsid,0,VFC), REGDEF(fpscr,1,VFC), REGDEF(fpexc,8,VFC),
18679 REGDEF(FPSID,0,VFC), REGDEF(FPSCR,1,VFC), REGDEF(FPEXC,8,VFC),
18680 REGDEF(fpinst,9,VFC), REGDEF(fpinst2,10,VFC),
18681 REGDEF(FPINST,9,VFC), REGDEF(FPINST2,10,VFC),
18682 REGDEF(mvfr0,7,VFC), REGDEF(mvfr1,6,VFC),
18683 REGDEF(MVFR0,7,VFC), REGDEF(MVFR1,6,VFC),
18684
18685 /* Maverick DSP coprocessor registers. */
18686 REGSET(mvf,MVF), REGSET(mvd,MVD), REGSET(mvfx,MVFX), REGSET(mvdx,MVDX),
18687 REGSET(MVF,MVF), REGSET(MVD,MVD), REGSET(MVFX,MVFX), REGSET(MVDX,MVDX),
18688
18689 REGNUM(mvax,0,MVAX), REGNUM(mvax,1,MVAX),
18690 REGNUM(mvax,2,MVAX), REGNUM(mvax,3,MVAX),
18691 REGDEF(dspsc,0,DSPSC),
18692
18693 REGNUM(MVAX,0,MVAX), REGNUM(MVAX,1,MVAX),
18694 REGNUM(MVAX,2,MVAX), REGNUM(MVAX,3,MVAX),
18695 REGDEF(DSPSC,0,DSPSC),
18696
18697 /* iWMMXt data registers - p0, c0-15. */
18698 REGSET(wr,MMXWR), REGSET(wR,MMXWR), REGSET(WR, MMXWR),
18699
18700 /* iWMMXt control registers - p1, c0-3. */
18701 REGDEF(wcid, 0,MMXWC), REGDEF(wCID, 0,MMXWC), REGDEF(WCID, 0,MMXWC),
18702 REGDEF(wcon, 1,MMXWC), REGDEF(wCon, 1,MMXWC), REGDEF(WCON, 1,MMXWC),
18703 REGDEF(wcssf, 2,MMXWC), REGDEF(wCSSF, 2,MMXWC), REGDEF(WCSSF, 2,MMXWC),
18704 REGDEF(wcasf, 3,MMXWC), REGDEF(wCASF, 3,MMXWC), REGDEF(WCASF, 3,MMXWC),
18705
18706 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
18707 REGDEF(wcgr0, 8,MMXWCG), REGDEF(wCGR0, 8,MMXWCG), REGDEF(WCGR0, 8,MMXWCG),
18708 REGDEF(wcgr1, 9,MMXWCG), REGDEF(wCGR1, 9,MMXWCG), REGDEF(WCGR1, 9,MMXWCG),
18709 REGDEF(wcgr2,10,MMXWCG), REGDEF(wCGR2,10,MMXWCG), REGDEF(WCGR2,10,MMXWCG),
18710 REGDEF(wcgr3,11,MMXWCG), REGDEF(wCGR3,11,MMXWCG), REGDEF(WCGR3,11,MMXWCG),
18711
18712 /* XScale accumulator registers. */
18713 REGNUM(acc,0,XSCALE), REGNUM(ACC,0,XSCALE),
18714 };
18715 #undef REGDEF
18716 #undef REGNUM
18717 #undef REGSET
18718
18719 /* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
18720 within psr_required_here. */
18721 static const struct asm_psr psrs[] =
18722 {
18723 /* Backward compatibility notation. Note that "all" is no longer
18724 truly all possible PSR bits. */
18725 {"all", PSR_c | PSR_f},
18726 {"flg", PSR_f},
18727 {"ctl", PSR_c},
18728
18729 /* Individual flags. */
18730 {"f", PSR_f},
18731 {"c", PSR_c},
18732 {"x", PSR_x},
18733 {"s", PSR_s},
18734
18735 /* Combinations of flags. */
18736 {"fs", PSR_f | PSR_s},
18737 {"fx", PSR_f | PSR_x},
18738 {"fc", PSR_f | PSR_c},
18739 {"sf", PSR_s | PSR_f},
18740 {"sx", PSR_s | PSR_x},
18741 {"sc", PSR_s | PSR_c},
18742 {"xf", PSR_x | PSR_f},
18743 {"xs", PSR_x | PSR_s},
18744 {"xc", PSR_x | PSR_c},
18745 {"cf", PSR_c | PSR_f},
18746 {"cs", PSR_c | PSR_s},
18747 {"cx", PSR_c | PSR_x},
18748 {"fsx", PSR_f | PSR_s | PSR_x},
18749 {"fsc", PSR_f | PSR_s | PSR_c},
18750 {"fxs", PSR_f | PSR_x | PSR_s},
18751 {"fxc", PSR_f | PSR_x | PSR_c},
18752 {"fcs", PSR_f | PSR_c | PSR_s},
18753 {"fcx", PSR_f | PSR_c | PSR_x},
18754 {"sfx", PSR_s | PSR_f | PSR_x},
18755 {"sfc", PSR_s | PSR_f | PSR_c},
18756 {"sxf", PSR_s | PSR_x | PSR_f},
18757 {"sxc", PSR_s | PSR_x | PSR_c},
18758 {"scf", PSR_s | PSR_c | PSR_f},
18759 {"scx", PSR_s | PSR_c | PSR_x},
18760 {"xfs", PSR_x | PSR_f | PSR_s},
18761 {"xfc", PSR_x | PSR_f | PSR_c},
18762 {"xsf", PSR_x | PSR_s | PSR_f},
18763 {"xsc", PSR_x | PSR_s | PSR_c},
18764 {"xcf", PSR_x | PSR_c | PSR_f},
18765 {"xcs", PSR_x | PSR_c | PSR_s},
18766 {"cfs", PSR_c | PSR_f | PSR_s},
18767 {"cfx", PSR_c | PSR_f | PSR_x},
18768 {"csf", PSR_c | PSR_s | PSR_f},
18769 {"csx", PSR_c | PSR_s | PSR_x},
18770 {"cxf", PSR_c | PSR_x | PSR_f},
18771 {"cxs", PSR_c | PSR_x | PSR_s},
18772 {"fsxc", PSR_f | PSR_s | PSR_x | PSR_c},
18773 {"fscx", PSR_f | PSR_s | PSR_c | PSR_x},
18774 {"fxsc", PSR_f | PSR_x | PSR_s | PSR_c},
18775 {"fxcs", PSR_f | PSR_x | PSR_c | PSR_s},
18776 {"fcsx", PSR_f | PSR_c | PSR_s | PSR_x},
18777 {"fcxs", PSR_f | PSR_c | PSR_x | PSR_s},
18778 {"sfxc", PSR_s | PSR_f | PSR_x | PSR_c},
18779 {"sfcx", PSR_s | PSR_f | PSR_c | PSR_x},
18780 {"sxfc", PSR_s | PSR_x | PSR_f | PSR_c},
18781 {"sxcf", PSR_s | PSR_x | PSR_c | PSR_f},
18782 {"scfx", PSR_s | PSR_c | PSR_f | PSR_x},
18783 {"scxf", PSR_s | PSR_c | PSR_x | PSR_f},
18784 {"xfsc", PSR_x | PSR_f | PSR_s | PSR_c},
18785 {"xfcs", PSR_x | PSR_f | PSR_c | PSR_s},
18786 {"xsfc", PSR_x | PSR_s | PSR_f | PSR_c},
18787 {"xscf", PSR_x | PSR_s | PSR_c | PSR_f},
18788 {"xcfs", PSR_x | PSR_c | PSR_f | PSR_s},
18789 {"xcsf", PSR_x | PSR_c | PSR_s | PSR_f},
18790 {"cfsx", PSR_c | PSR_f | PSR_s | PSR_x},
18791 {"cfxs", PSR_c | PSR_f | PSR_x | PSR_s},
18792 {"csfx", PSR_c | PSR_s | PSR_f | PSR_x},
18793 {"csxf", PSR_c | PSR_s | PSR_x | PSR_f},
18794 {"cxfs", PSR_c | PSR_x | PSR_f | PSR_s},
18795 {"cxsf", PSR_c | PSR_x | PSR_s | PSR_f},
18796 };
18797
18798 /* Table of V7M psr names. */
18799 static const struct asm_psr v7m_psrs[] =
18800 {
18801 {"apsr", 0x0 }, {"APSR", 0x0 },
18802 {"iapsr", 0x1 }, {"IAPSR", 0x1 },
18803 {"eapsr", 0x2 }, {"EAPSR", 0x2 },
18804 {"psr", 0x3 }, {"PSR", 0x3 },
18805 {"xpsr", 0x3 }, {"XPSR", 0x3 }, {"xPSR", 3 },
18806 {"ipsr", 0x5 }, {"IPSR", 0x5 },
18807 {"epsr", 0x6 }, {"EPSR", 0x6 },
18808 {"iepsr", 0x7 }, {"IEPSR", 0x7 },
18809 {"msp", 0x8 }, {"MSP", 0x8 },
18810 {"psp", 0x9 }, {"PSP", 0x9 },
18811 {"msplim", 0xa }, {"MSPLIM", 0xa },
18812 {"psplim", 0xb }, {"PSPLIM", 0xb },
18813 {"primask", 0x10}, {"PRIMASK", 0x10},
18814 {"basepri", 0x11}, {"BASEPRI", 0x11},
18815 {"basepri_max", 0x12}, {"BASEPRI_MAX", 0x12},
18816 {"basepri_max", 0x12}, {"BASEPRI_MASK", 0x12}, /* Typo, preserved for backwards compatibility. */
18817 {"faultmask", 0x13}, {"FAULTMASK", 0x13},
18818 {"control", 0x14}, {"CONTROL", 0x14},
18819 {"msp_ns", 0x88}, {"MSP_NS", 0x88},
18820 {"psp_ns", 0x89}, {"PSP_NS", 0x89},
18821 {"msplim_ns", 0x8a}, {"MSPLIM_NS", 0x8a},
18822 {"psplim_ns", 0x8b}, {"PSPLIM_NS", 0x8b},
18823 {"primask_ns", 0x90}, {"PRIMASK_NS", 0x90},
18824 {"basepri_ns", 0x91}, {"BASEPRI_NS", 0x91},
18825 {"faultmask_ns", 0x93}, {"FAULTMASK_NS", 0x93},
18826 {"control_ns", 0x94}, {"CONTROL_NS", 0x94},
18827 {"sp_ns", 0x98}, {"SP_NS", 0x98 }
18828 };
18829
18830 /* Table of all shift-in-operand names. */
18831 static const struct asm_shift_name shift_names [] =
18832 {
18833 { "asl", SHIFT_LSL }, { "ASL", SHIFT_LSL },
18834 { "lsl", SHIFT_LSL }, { "LSL", SHIFT_LSL },
18835 { "lsr", SHIFT_LSR }, { "LSR", SHIFT_LSR },
18836 { "asr", SHIFT_ASR }, { "ASR", SHIFT_ASR },
18837 { "ror", SHIFT_ROR }, { "ROR", SHIFT_ROR },
18838 { "rrx", SHIFT_RRX }, { "RRX", SHIFT_RRX }
18839 };
18840
18841 /* Table of all explicit relocation names. */
18842 #ifdef OBJ_ELF
18843 static struct reloc_entry reloc_names[] =
18844 {
18845 { "got", BFD_RELOC_ARM_GOT32 }, { "GOT", BFD_RELOC_ARM_GOT32 },
18846 { "gotoff", BFD_RELOC_ARM_GOTOFF }, { "GOTOFF", BFD_RELOC_ARM_GOTOFF },
18847 { "plt", BFD_RELOC_ARM_PLT32 }, { "PLT", BFD_RELOC_ARM_PLT32 },
18848 { "target1", BFD_RELOC_ARM_TARGET1 }, { "TARGET1", BFD_RELOC_ARM_TARGET1 },
18849 { "target2", BFD_RELOC_ARM_TARGET2 }, { "TARGET2", BFD_RELOC_ARM_TARGET2 },
18850 { "sbrel", BFD_RELOC_ARM_SBREL32 }, { "SBREL", BFD_RELOC_ARM_SBREL32 },
18851 { "tlsgd", BFD_RELOC_ARM_TLS_GD32}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32},
18852 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32},
18853 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32},
18854 { "gottpoff",BFD_RELOC_ARM_TLS_IE32}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32},
18855 { "tpoff", BFD_RELOC_ARM_TLS_LE32}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32},
18856 { "got_prel", BFD_RELOC_ARM_GOT_PREL}, { "GOT_PREL", BFD_RELOC_ARM_GOT_PREL},
18857 { "tlsdesc", BFD_RELOC_ARM_TLS_GOTDESC},
18858 { "TLSDESC", BFD_RELOC_ARM_TLS_GOTDESC},
18859 { "tlscall", BFD_RELOC_ARM_TLS_CALL},
18860 { "TLSCALL", BFD_RELOC_ARM_TLS_CALL},
18861 { "tlsdescseq", BFD_RELOC_ARM_TLS_DESCSEQ},
18862 { "TLSDESCSEQ", BFD_RELOC_ARM_TLS_DESCSEQ}
18863 };
18864 #endif
18865
18866 /* Table of all conditional affixes. 0xF is not defined as a condition code. */
18867 static const struct asm_cond conds[] =
18868 {
18869 {"eq", 0x0},
18870 {"ne", 0x1},
18871 {"cs", 0x2}, {"hs", 0x2},
18872 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
18873 {"mi", 0x4},
18874 {"pl", 0x5},
18875 {"vs", 0x6},
18876 {"vc", 0x7},
18877 {"hi", 0x8},
18878 {"ls", 0x9},
18879 {"ge", 0xa},
18880 {"lt", 0xb},
18881 {"gt", 0xc},
18882 {"le", 0xd},
18883 {"al", 0xe}
18884 };
18885
18886 #define UL_BARRIER(L,U,CODE,FEAT) \
18887 { L, CODE, ARM_FEATURE_CORE_LOW (FEAT) }, \
18888 { U, CODE, ARM_FEATURE_CORE_LOW (FEAT) }
18889
18890 static struct asm_barrier_opt barrier_opt_names[] =
18891 {
18892 UL_BARRIER ("sy", "SY", 0xf, ARM_EXT_BARRIER),
18893 UL_BARRIER ("st", "ST", 0xe, ARM_EXT_BARRIER),
18894 UL_BARRIER ("ld", "LD", 0xd, ARM_EXT_V8),
18895 UL_BARRIER ("ish", "ISH", 0xb, ARM_EXT_BARRIER),
18896 UL_BARRIER ("sh", "SH", 0xb, ARM_EXT_BARRIER),
18897 UL_BARRIER ("ishst", "ISHST", 0xa, ARM_EXT_BARRIER),
18898 UL_BARRIER ("shst", "SHST", 0xa, ARM_EXT_BARRIER),
18899 UL_BARRIER ("ishld", "ISHLD", 0x9, ARM_EXT_V8),
18900 UL_BARRIER ("un", "UN", 0x7, ARM_EXT_BARRIER),
18901 UL_BARRIER ("nsh", "NSH", 0x7, ARM_EXT_BARRIER),
18902 UL_BARRIER ("unst", "UNST", 0x6, ARM_EXT_BARRIER),
18903 UL_BARRIER ("nshst", "NSHST", 0x6, ARM_EXT_BARRIER),
18904 UL_BARRIER ("nshld", "NSHLD", 0x5, ARM_EXT_V8),
18905 UL_BARRIER ("osh", "OSH", 0x3, ARM_EXT_BARRIER),
18906 UL_BARRIER ("oshst", "OSHST", 0x2, ARM_EXT_BARRIER),
18907 UL_BARRIER ("oshld", "OSHLD", 0x1, ARM_EXT_V8)
18908 };
18909
18910 #undef UL_BARRIER
18911
18912 /* Table of ARM-format instructions. */
18913
18914 /* Macros for gluing together operand strings. N.B. In all cases
18915 other than OPS0, the trailing OP_stop comes from default
18916 zero-initialization of the unspecified elements of the array. */
18917 #define OPS0() { OP_stop, }
18918 #define OPS1(a) { OP_##a, }
18919 #define OPS2(a,b) { OP_##a,OP_##b, }
18920 #define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
18921 #define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
18922 #define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
18923 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
18924
18925 /* These macros are similar to the OPSn, but do not prepend the OP_ prefix.
18926 This is useful when mixing operands for ARM and THUMB, i.e. using the
18927 MIX_ARM_THUMB_OPERANDS macro.
18928 In order to use these macros, prefix the number of operands with _
18929 e.g. _3. */
18930 #define OPS_1(a) { a, }
18931 #define OPS_2(a,b) { a,b, }
18932 #define OPS_3(a,b,c) { a,b,c, }
18933 #define OPS_4(a,b,c,d) { a,b,c,d, }
18934 #define OPS_5(a,b,c,d,e) { a,b,c,d,e, }
18935 #define OPS_6(a,b,c,d,e,f) { a,b,c,d,e,f, }
18936
18937 /* These macros abstract out the exact format of the mnemonic table and
18938 save some repeated characters. */
18939
18940 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
18941 #define TxCE(mnem, op, top, nops, ops, ae, te) \
18942 { mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
18943 THUMB_VARIANT, do_##ae, do_##te }
18944
18945 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
18946 a T_MNEM_xyz enumerator. */
18947 #define TCE(mnem, aop, top, nops, ops, ae, te) \
18948 TxCE (mnem, aop, 0x##top, nops, ops, ae, te)
18949 #define tCE(mnem, aop, top, nops, ops, ae, te) \
18950 TxCE (mnem, aop, T_MNEM##top, nops, ops, ae, te)
18951
18952 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
18953 infix after the third character. */
18954 #define TxC3(mnem, op, top, nops, ops, ae, te) \
18955 { mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
18956 THUMB_VARIANT, do_##ae, do_##te }
18957 #define TxC3w(mnem, op, top, nops, ops, ae, te) \
18958 { mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
18959 THUMB_VARIANT, do_##ae, do_##te }
18960 #define TC3(mnem, aop, top, nops, ops, ae, te) \
18961 TxC3 (mnem, aop, 0x##top, nops, ops, ae, te)
18962 #define TC3w(mnem, aop, top, nops, ops, ae, te) \
18963 TxC3w (mnem, aop, 0x##top, nops, ops, ae, te)
18964 #define tC3(mnem, aop, top, nops, ops, ae, te) \
18965 TxC3 (mnem, aop, T_MNEM##top, nops, ops, ae, te)
18966 #define tC3w(mnem, aop, top, nops, ops, ae, te) \
18967 TxC3w (mnem, aop, T_MNEM##top, nops, ops, ae, te)
18968
18969 /* Mnemonic that cannot be conditionalized. The ARM condition-code
18970 field is still 0xE. Many of the Thumb variants can be executed
18971 conditionally, so this is checked separately. */
18972 #define TUE(mnem, op, top, nops, ops, ae, te) \
18973 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
18974 THUMB_VARIANT, do_##ae, do_##te }
18975
18976 /* Same as TUE but the encoding function for ARM and Thumb modes is the same.
18977 Used by mnemonics that have very minimal differences in the encoding for
18978 ARM and Thumb variants and can be handled in a common function. */
18979 #define TUEc(mnem, op, top, nops, ops, en) \
18980 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
18981 THUMB_VARIANT, do_##en, do_##en }
18982
18983 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
18984 condition code field. */
18985 #define TUF(mnem, op, top, nops, ops, ae, te) \
18986 { mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
18987 THUMB_VARIANT, do_##ae, do_##te }
18988
18989 /* ARM-only variants of all the above. */
18990 #define CE(mnem, op, nops, ops, ae) \
18991 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
18992
18993 #define C3(mnem, op, nops, ops, ae) \
18994 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
18995
18996 /* Legacy mnemonics that always have conditional infix after the third
18997 character. */
18998 #define CL(mnem, op, nops, ops, ae) \
18999 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
19000 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
19001
19002 /* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
19003 #define cCE(mnem, op, nops, ops, ae) \
19004 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
19005
19006 /* Legacy coprocessor instructions where conditional infix and conditional
19007 suffix are ambiguous. For consistency this includes all FPA instructions,
19008 not just the potentially ambiguous ones. */
19009 #define cCL(mnem, op, nops, ops, ae) \
19010 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
19011 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
19012
19013 /* Coprocessor, takes either a suffix or a position-3 infix
19014 (for an FPA corner case). */
19015 #define C3E(mnem, op, nops, ops, ae) \
19016 { mnem, OPS##nops ops, OT_csuf_or_in3, \
19017 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
19018
19019 #define xCM_(m1, m2, m3, op, nops, ops, ae) \
19020 { m1 #m2 m3, OPS##nops ops, \
19021 sizeof (#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof (m1) - 1, \
19022 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
19023
19024 #define CM(m1, m2, op, nops, ops, ae) \
19025 xCM_ (m1, , m2, op, nops, ops, ae), \
19026 xCM_ (m1, eq, m2, op, nops, ops, ae), \
19027 xCM_ (m1, ne, m2, op, nops, ops, ae), \
19028 xCM_ (m1, cs, m2, op, nops, ops, ae), \
19029 xCM_ (m1, hs, m2, op, nops, ops, ae), \
19030 xCM_ (m1, cc, m2, op, nops, ops, ae), \
19031 xCM_ (m1, ul, m2, op, nops, ops, ae), \
19032 xCM_ (m1, lo, m2, op, nops, ops, ae), \
19033 xCM_ (m1, mi, m2, op, nops, ops, ae), \
19034 xCM_ (m1, pl, m2, op, nops, ops, ae), \
19035 xCM_ (m1, vs, m2, op, nops, ops, ae), \
19036 xCM_ (m1, vc, m2, op, nops, ops, ae), \
19037 xCM_ (m1, hi, m2, op, nops, ops, ae), \
19038 xCM_ (m1, ls, m2, op, nops, ops, ae), \
19039 xCM_ (m1, ge, m2, op, nops, ops, ae), \
19040 xCM_ (m1, lt, m2, op, nops, ops, ae), \
19041 xCM_ (m1, gt, m2, op, nops, ops, ae), \
19042 xCM_ (m1, le, m2, op, nops, ops, ae), \
19043 xCM_ (m1, al, m2, op, nops, ops, ae)
19044
19045 #define UE(mnem, op, nops, ops, ae) \
19046 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
19047
19048 #define UF(mnem, op, nops, ops, ae) \
19049 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
19050
19051 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
19052 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
19053 use the same encoding function for each. */
19054 #define NUF(mnem, op, nops, ops, enc) \
19055 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
19056 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
19057
19058 /* Neon data processing, version which indirects through neon_enc_tab for
19059 the various overloaded versions of opcodes. */
19060 #define nUF(mnem, op, nops, ops, enc) \
19061 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM##op, N_MNEM##op, \
19062 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
19063
19064 /* Neon insn with conditional suffix for the ARM version, non-overloaded
19065 version. */
19066 #define NCE_tag(mnem, op, nops, ops, enc, tag) \
19067 { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT, \
19068 THUMB_VARIANT, do_##enc, do_##enc }
19069
19070 #define NCE(mnem, op, nops, ops, enc) \
19071 NCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
19072
19073 #define NCEF(mnem, op, nops, ops, enc) \
19074 NCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
19075
19076 /* Neon insn with conditional suffix for the ARM version, overloaded types. */
19077 #define nCE_tag(mnem, op, nops, ops, enc, tag) \
19078 { #mnem, OPS##nops ops, tag, N_MNEM##op, N_MNEM##op, \
19079 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
19080
19081 #define nCE(mnem, op, nops, ops, enc) \
19082 nCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
19083
19084 #define nCEF(mnem, op, nops, ops, enc) \
19085 nCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
19086
19087 #define do_0 0
19088
19089 static const struct asm_opcode insns[] =
19090 {
19091 #define ARM_VARIANT & arm_ext_v1 /* Core ARM Instructions. */
19092 #define THUMB_VARIANT & arm_ext_v4t
19093 tCE("and", 0000000, _and, 3, (RR, oRR, SH), arit, t_arit3c),
19094 tC3("ands", 0100000, _ands, 3, (RR, oRR, SH), arit, t_arit3c),
19095 tCE("eor", 0200000, _eor, 3, (RR, oRR, SH), arit, t_arit3c),
19096 tC3("eors", 0300000, _eors, 3, (RR, oRR, SH), arit, t_arit3c),
19097 tCE("sub", 0400000, _sub, 3, (RR, oRR, SH), arit, t_add_sub),
19098 tC3("subs", 0500000, _subs, 3, (RR, oRR, SH), arit, t_add_sub),
19099 tCE("add", 0800000, _add, 3, (RR, oRR, SHG), arit, t_add_sub),
19100 tC3("adds", 0900000, _adds, 3, (RR, oRR, SHG), arit, t_add_sub),
19101 tCE("adc", 0a00000, _adc, 3, (RR, oRR, SH), arit, t_arit3c),
19102 tC3("adcs", 0b00000, _adcs, 3, (RR, oRR, SH), arit, t_arit3c),
19103 tCE("sbc", 0c00000, _sbc, 3, (RR, oRR, SH), arit, t_arit3),
19104 tC3("sbcs", 0d00000, _sbcs, 3, (RR, oRR, SH), arit, t_arit3),
19105 tCE("orr", 1800000, _orr, 3, (RR, oRR, SH), arit, t_arit3c),
19106 tC3("orrs", 1900000, _orrs, 3, (RR, oRR, SH), arit, t_arit3c),
19107 tCE("bic", 1c00000, _bic, 3, (RR, oRR, SH), arit, t_arit3),
19108 tC3("bics", 1d00000, _bics, 3, (RR, oRR, SH), arit, t_arit3),
19109
19110 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
19111 for setting PSR flag bits. They are obsolete in V6 and do not
19112 have Thumb equivalents. */
19113 tCE("tst", 1100000, _tst, 2, (RR, SH), cmp, t_mvn_tst),
19114 tC3w("tsts", 1100000, _tst, 2, (RR, SH), cmp, t_mvn_tst),
19115 CL("tstp", 110f000, 2, (RR, SH), cmp),
19116 tCE("cmp", 1500000, _cmp, 2, (RR, SH), cmp, t_mov_cmp),
19117 tC3w("cmps", 1500000, _cmp, 2, (RR, SH), cmp, t_mov_cmp),
19118 CL("cmpp", 150f000, 2, (RR, SH), cmp),
19119 tCE("cmn", 1700000, _cmn, 2, (RR, SH), cmp, t_mvn_tst),
19120 tC3w("cmns", 1700000, _cmn, 2, (RR, SH), cmp, t_mvn_tst),
19121 CL("cmnp", 170f000, 2, (RR, SH), cmp),
19122
19123 tCE("mov", 1a00000, _mov, 2, (RR, SH), mov, t_mov_cmp),
19124 tC3("movs", 1b00000, _movs, 2, (RR, SHG), mov, t_mov_cmp),
19125 tCE("mvn", 1e00000, _mvn, 2, (RR, SH), mov, t_mvn_tst),
19126 tC3("mvns", 1f00000, _mvns, 2, (RR, SH), mov, t_mvn_tst),
19127
19128 tCE("ldr", 4100000, _ldr, 2, (RR, ADDRGLDR),ldst, t_ldst),
19129 tC3("ldrb", 4500000, _ldrb, 2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
19130 tCE("str", 4000000, _str, _2, (MIX_ARM_THUMB_OPERANDS (OP_RR,
19131 OP_RRnpc),
19132 OP_ADDRGLDR),ldst, t_ldst),
19133 tC3("strb", 4400000, _strb, 2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
19134
19135 tCE("stm", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19136 tC3("stmia", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19137 tC3("stmea", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19138 tCE("ldm", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19139 tC3("ldmia", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19140 tC3("ldmfd", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19141
19142 TCE("swi", f000000, df00, 1, (EXPi), swi, t_swi),
19143 TCE("svc", f000000, df00, 1, (EXPi), swi, t_swi),
19144 tCE("b", a000000, _b, 1, (EXPr), branch, t_branch),
19145 TCE("bl", b000000, f000f800, 1, (EXPr), bl, t_branch23),
19146
19147 /* Pseudo ops. */
19148 tCE("adr", 28f0000, _adr, 2, (RR, EXP), adr, t_adr),
19149 C3(adrl, 28f0000, 2, (RR, EXP), adrl),
19150 tCE("nop", 1a00000, _nop, 1, (oI255c), nop, t_nop),
19151 tCE("udf", 7f000f0, _udf, 1, (oIffffb), bkpt, t_udf),
19152
19153 /* Thumb-compatibility pseudo ops. */
19154 tCE("lsl", 1a00000, _lsl, 3, (RR, oRR, SH), shift, t_shift),
19155 tC3("lsls", 1b00000, _lsls, 3, (RR, oRR, SH), shift, t_shift),
19156 tCE("lsr", 1a00020, _lsr, 3, (RR, oRR, SH), shift, t_shift),
19157 tC3("lsrs", 1b00020, _lsrs, 3, (RR, oRR, SH), shift, t_shift),
19158 tCE("asr", 1a00040, _asr, 3, (RR, oRR, SH), shift, t_shift),
19159 tC3("asrs", 1b00040, _asrs, 3, (RR, oRR, SH), shift, t_shift),
19160 tCE("ror", 1a00060, _ror, 3, (RR, oRR, SH), shift, t_shift),
19161 tC3("rors", 1b00060, _rors, 3, (RR, oRR, SH), shift, t_shift),
19162 tCE("neg", 2600000, _neg, 2, (RR, RR), rd_rn, t_neg),
19163 tC3("negs", 2700000, _negs, 2, (RR, RR), rd_rn, t_neg),
19164 tCE("push", 92d0000, _push, 1, (REGLST), push_pop, t_push_pop),
19165 tCE("pop", 8bd0000, _pop, 1, (REGLST), push_pop, t_push_pop),
19166
19167 /* These may simplify to neg. */
19168 TCE("rsb", 0600000, ebc00000, 3, (RR, oRR, SH), arit, t_rsb),
19169 TC3("rsbs", 0700000, ebd00000, 3, (RR, oRR, SH), arit, t_rsb),
19170
19171 #undef THUMB_VARIANT
19172 #define THUMB_VARIANT & arm_ext_v6
19173
19174 TCE("cpy", 1a00000, 4600, 2, (RR, RR), rd_rm, t_cpy),
19175
19176 /* V1 instructions with no Thumb analogue prior to V6T2. */
19177 #undef THUMB_VARIANT
19178 #define THUMB_VARIANT & arm_ext_v6t2
19179
19180 TCE("teq", 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
19181 TC3w("teqs", 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
19182 CL("teqp", 130f000, 2, (RR, SH), cmp),
19183
19184 TC3("ldrt", 4300000, f8500e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
19185 TC3("ldrbt", 4700000, f8100e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
19186 TC3("strt", 4200000, f8400e00, 2, (RR_npcsp, ADDR), ldstt, t_ldstt),
19187 TC3("strbt", 4600000, f8000e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
19188
19189 TC3("stmdb", 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19190 TC3("stmfd", 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19191
19192 TC3("ldmdb", 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19193 TC3("ldmea", 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19194
19195 /* V1 instructions with no Thumb analogue at all. */
19196 CE("rsc", 0e00000, 3, (RR, oRR, SH), arit),
19197 C3(rscs, 0f00000, 3, (RR, oRR, SH), arit),
19198
19199 C3(stmib, 9800000, 2, (RRw, REGLST), ldmstm),
19200 C3(stmfa, 9800000, 2, (RRw, REGLST), ldmstm),
19201 C3(stmda, 8000000, 2, (RRw, REGLST), ldmstm),
19202 C3(stmed, 8000000, 2, (RRw, REGLST), ldmstm),
19203 C3(ldmib, 9900000, 2, (RRw, REGLST), ldmstm),
19204 C3(ldmed, 9900000, 2, (RRw, REGLST), ldmstm),
19205 C3(ldmda, 8100000, 2, (RRw, REGLST), ldmstm),
19206 C3(ldmfa, 8100000, 2, (RRw, REGLST), ldmstm),
19207
19208 #undef ARM_VARIANT
19209 #define ARM_VARIANT & arm_ext_v2 /* ARM 2 - multiplies. */
19210 #undef THUMB_VARIANT
19211 #define THUMB_VARIANT & arm_ext_v4t
19212
19213 tCE("mul", 0000090, _mul, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
19214 tC3("muls", 0100090, _muls, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
19215
19216 #undef THUMB_VARIANT
19217 #define THUMB_VARIANT & arm_ext_v6t2
19218
19219 TCE("mla", 0200090, fb000000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
19220 C3(mlas, 0300090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas),
19221
19222 /* Generic coprocessor instructions. */
19223 TCE("cdp", e000000, ee000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
19224 TCE("ldc", c100000, ec100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
19225 TC3("ldcl", c500000, ec500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
19226 TCE("stc", c000000, ec000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
19227 TC3("stcl", c400000, ec400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
19228 TCE("mcr", e000010, ee000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
19229 TCE("mrc", e100010, ee100010, 6, (RCP, I7b, APSR_RR, RCN, RCN, oI7b), co_reg, co_reg),
19230
19231 #undef ARM_VARIANT
19232 #define ARM_VARIANT & arm_ext_v2s /* ARM 3 - swp instructions. */
19233
19234 CE("swp", 1000090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
19235 C3(swpb, 1400090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
19236
19237 #undef ARM_VARIANT
19238 #define ARM_VARIANT & arm_ext_v3 /* ARM 6 Status register instructions. */
19239 #undef THUMB_VARIANT
19240 #define THUMB_VARIANT & arm_ext_msr
19241
19242 TCE("mrs", 1000000, f3e08000, 2, (RRnpc, rPSR), mrs, t_mrs),
19243 TCE("msr", 120f000, f3808000, 2, (wPSR, RR_EXi), msr, t_msr),
19244
19245 #undef ARM_VARIANT
19246 #define ARM_VARIANT & arm_ext_v3m /* ARM 7M long multiplies. */
19247 #undef THUMB_VARIANT
19248 #define THUMB_VARIANT & arm_ext_v6t2
19249
19250 TCE("smull", 0c00090, fb800000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
19251 CM("smull","s", 0d00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
19252 TCE("umull", 0800090, fba00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
19253 CM("umull","s", 0900090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
19254 TCE("smlal", 0e00090, fbc00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
19255 CM("smlal","s", 0f00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
19256 TCE("umlal", 0a00090, fbe00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
19257 CM("umlal","s", 0b00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
19258
19259 #undef ARM_VARIANT
19260 #define ARM_VARIANT & arm_ext_v4 /* ARM Architecture 4. */
19261 #undef THUMB_VARIANT
19262 #define THUMB_VARIANT & arm_ext_v4t
19263
19264 tC3("ldrh", 01000b0, _ldrh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
19265 tC3("strh", 00000b0, _strh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
19266 tC3("ldrsh", 01000f0, _ldrsh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
19267 tC3("ldrsb", 01000d0, _ldrsb, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
19268 tC3("ldsh", 01000f0, _ldrsh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
19269 tC3("ldsb", 01000d0, _ldrsb, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
19270
19271 #undef ARM_VARIANT
19272 #define ARM_VARIANT & arm_ext_v4t_5
19273
19274 /* ARM Architecture 4T. */
19275 /* Note: bx (and blx) are required on V5, even if the processor does
19276 not support Thumb. */
19277 TCE("bx", 12fff10, 4700, 1, (RR), bx, t_bx),
19278
19279 #undef ARM_VARIANT
19280 #define ARM_VARIANT & arm_ext_v5 /* ARM Architecture 5T. */
19281 #undef THUMB_VARIANT
19282 #define THUMB_VARIANT & arm_ext_v5t
19283
19284 /* Note: blx has 2 variants; the .value coded here is for
19285 BLX(2). Only this variant has conditional execution. */
19286 TCE("blx", 12fff30, 4780, 1, (RR_EXr), blx, t_blx),
19287 TUE("bkpt", 1200070, be00, 1, (oIffffb), bkpt, t_bkpt),
19288
19289 #undef THUMB_VARIANT
19290 #define THUMB_VARIANT & arm_ext_v6t2
19291
19292 TCE("clz", 16f0f10, fab0f080, 2, (RRnpc, RRnpc), rd_rm, t_clz),
19293 TUF("ldc2", c100000, fc100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
19294 TUF("ldc2l", c500000, fc500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
19295 TUF("stc2", c000000, fc000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
19296 TUF("stc2l", c400000, fc400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
19297 TUF("cdp2", e000000, fe000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
19298 TUF("mcr2", e000010, fe000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
19299 TUF("mrc2", e100010, fe100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
19300
19301 #undef ARM_VARIANT
19302 #define ARM_VARIANT & arm_ext_v5exp /* ARM Architecture 5TExP. */
19303 #undef THUMB_VARIANT
19304 #define THUMB_VARIANT & arm_ext_v5exp
19305
19306 TCE("smlabb", 1000080, fb100000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
19307 TCE("smlatb", 10000a0, fb100020, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
19308 TCE("smlabt", 10000c0, fb100010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
19309 TCE("smlatt", 10000e0, fb100030, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
19310
19311 TCE("smlawb", 1200080, fb300000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
19312 TCE("smlawt", 12000c0, fb300010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
19313
19314 TCE("smlalbb", 1400080, fbc00080, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
19315 TCE("smlaltb", 14000a0, fbc000a0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
19316 TCE("smlalbt", 14000c0, fbc00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
19317 TCE("smlaltt", 14000e0, fbc000b0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
19318
19319 TCE("smulbb", 1600080, fb10f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19320 TCE("smultb", 16000a0, fb10f020, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19321 TCE("smulbt", 16000c0, fb10f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19322 TCE("smultt", 16000e0, fb10f030, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19323
19324 TCE("smulwb", 12000a0, fb30f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19325 TCE("smulwt", 12000e0, fb30f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19326
19327 TCE("qadd", 1000050, fa80f080, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
19328 TCE("qdadd", 1400050, fa80f090, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
19329 TCE("qsub", 1200050, fa80f0a0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
19330 TCE("qdsub", 1600050, fa80f0b0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
19331
19332 #undef ARM_VARIANT
19333 #define ARM_VARIANT & arm_ext_v5e /* ARM Architecture 5TE. */
19334 #undef THUMB_VARIANT
19335 #define THUMB_VARIANT & arm_ext_v6t2
19336
19337 TUF("pld", 450f000, f810f000, 1, (ADDR), pld, t_pld),
19338 TC3("ldrd", 00000d0, e8500000, 3, (RRnpc_npcsp, oRRnpc_npcsp, ADDRGLDRS),
19339 ldrd, t_ldstd),
19340 TC3("strd", 00000f0, e8400000, 3, (RRnpc_npcsp, oRRnpc_npcsp,
19341 ADDRGLDRS), ldrd, t_ldstd),
19342
19343 TCE("mcrr", c400000, ec400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
19344 TCE("mrrc", c500000, ec500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
19345
19346 #undef ARM_VARIANT
19347 #define ARM_VARIANT & arm_ext_v5j /* ARM Architecture 5TEJ. */
19348
19349 TCE("bxj", 12fff20, f3c08f00, 1, (RR), bxj, t_bxj),
19350
19351 #undef ARM_VARIANT
19352 #define ARM_VARIANT & arm_ext_v6 /* ARM V6. */
19353 #undef THUMB_VARIANT
19354 #define THUMB_VARIANT & arm_ext_v6
19355
19356 TUF("cpsie", 1080000, b660, 2, (CPSF, oI31b), cpsi, t_cpsi),
19357 TUF("cpsid", 10c0000, b670, 2, (CPSF, oI31b), cpsi, t_cpsi),
19358 tCE("rev", 6bf0f30, _rev, 2, (RRnpc, RRnpc), rd_rm, t_rev),
19359 tCE("rev16", 6bf0fb0, _rev16, 2, (RRnpc, RRnpc), rd_rm, t_rev),
19360 tCE("revsh", 6ff0fb0, _revsh, 2, (RRnpc, RRnpc), rd_rm, t_rev),
19361 tCE("sxth", 6bf0070, _sxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
19362 tCE("uxth", 6ff0070, _uxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
19363 tCE("sxtb", 6af0070, _sxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
19364 tCE("uxtb", 6ef0070, _uxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
19365 TUF("setend", 1010000, b650, 1, (ENDI), setend, t_setend),
19366
19367 #undef THUMB_VARIANT
19368 #define THUMB_VARIANT & arm_ext_v6t2_v8m
19369
19370 TCE("ldrex", 1900f9f, e8500f00, 2, (RRnpc_npcsp, ADDR), ldrex, t_ldrex),
19371 TCE("strex", 1800f90, e8400000, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
19372 strex, t_strex),
19373 #undef THUMB_VARIANT
19374 #define THUMB_VARIANT & arm_ext_v6t2
19375
19376 TUF("mcrr2", c400000, fc400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
19377 TUF("mrrc2", c500000, fc500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
19378
19379 TCE("ssat", 6a00010, f3000000, 4, (RRnpc, I32, RRnpc, oSHllar),ssat, t_ssat),
19380 TCE("usat", 6e00010, f3800000, 4, (RRnpc, I31, RRnpc, oSHllar),usat, t_usat),
19381
19382 /* ARM V6 not included in V7M. */
19383 #undef THUMB_VARIANT
19384 #define THUMB_VARIANT & arm_ext_v6_notm
19385 TUF("rfeia", 8900a00, e990c000, 1, (RRw), rfe, rfe),
19386 TUF("rfe", 8900a00, e990c000, 1, (RRw), rfe, rfe),
19387 UF(rfeib, 9900a00, 1, (RRw), rfe),
19388 UF(rfeda, 8100a00, 1, (RRw), rfe),
19389 TUF("rfedb", 9100a00, e810c000, 1, (RRw), rfe, rfe),
19390 TUF("rfefd", 8900a00, e990c000, 1, (RRw), rfe, rfe),
19391 UF(rfefa, 8100a00, 1, (RRw), rfe),
19392 TUF("rfeea", 9100a00, e810c000, 1, (RRw), rfe, rfe),
19393 UF(rfeed, 9900a00, 1, (RRw), rfe),
19394 TUF("srsia", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
19395 TUF("srs", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
19396 TUF("srsea", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
19397 UF(srsib, 9c00500, 2, (oRRw, I31w), srs),
19398 UF(srsfa, 9c00500, 2, (oRRw, I31w), srs),
19399 UF(srsda, 8400500, 2, (oRRw, I31w), srs),
19400 UF(srsed, 8400500, 2, (oRRw, I31w), srs),
19401 TUF("srsdb", 9400500, e800c000, 2, (oRRw, I31w), srs, srs),
19402 TUF("srsfd", 9400500, e800c000, 2, (oRRw, I31w), srs, srs),
19403 TUF("cps", 1020000, f3af8100, 1, (I31b), imm0, t_cps),
19404
19405 /* ARM V6 not included in V7M (eg. integer SIMD). */
19406 #undef THUMB_VARIANT
19407 #define THUMB_VARIANT & arm_ext_v6_dsp
19408 TCE("pkhbt", 6800010, eac00000, 4, (RRnpc, RRnpc, RRnpc, oSHll), pkhbt, t_pkhbt),
19409 TCE("pkhtb", 6800050, eac00020, 4, (RRnpc, RRnpc, RRnpc, oSHar), pkhtb, t_pkhtb),
19410 TCE("qadd16", 6200f10, fa90f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19411 TCE("qadd8", 6200f90, fa80f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19412 TCE("qasx", 6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19413 /* Old name for QASX. */
19414 TCE("qaddsubx",6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19415 TCE("qsax", 6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19416 /* Old name for QSAX. */
19417 TCE("qsubaddx",6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19418 TCE("qsub16", 6200f70, fad0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19419 TCE("qsub8", 6200ff0, fac0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19420 TCE("sadd16", 6100f10, fa90f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19421 TCE("sadd8", 6100f90, fa80f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19422 TCE("sasx", 6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19423 /* Old name for SASX. */
19424 TCE("saddsubx",6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19425 TCE("shadd16", 6300f10, fa90f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19426 TCE("shadd8", 6300f90, fa80f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19427 TCE("shasx", 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19428 /* Old name for SHASX. */
19429 TCE("shaddsubx", 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19430 TCE("shsax", 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19431 /* Old name for SHSAX. */
19432 TCE("shsubaddx", 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19433 TCE("shsub16", 6300f70, fad0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19434 TCE("shsub8", 6300ff0, fac0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19435 TCE("ssax", 6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19436 /* Old name for SSAX. */
19437 TCE("ssubaddx",6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19438 TCE("ssub16", 6100f70, fad0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19439 TCE("ssub8", 6100ff0, fac0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19440 TCE("uadd16", 6500f10, fa90f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19441 TCE("uadd8", 6500f90, fa80f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19442 TCE("uasx", 6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19443 /* Old name for UASX. */
19444 TCE("uaddsubx",6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19445 TCE("uhadd16", 6700f10, fa90f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19446 TCE("uhadd8", 6700f90, fa80f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19447 TCE("uhasx", 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19448 /* Old name for UHASX. */
19449 TCE("uhaddsubx", 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19450 TCE("uhsax", 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19451 /* Old name for UHSAX. */
19452 TCE("uhsubaddx", 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19453 TCE("uhsub16", 6700f70, fad0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19454 TCE("uhsub8", 6700ff0, fac0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19455 TCE("uqadd16", 6600f10, fa90f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19456 TCE("uqadd8", 6600f90, fa80f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19457 TCE("uqasx", 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19458 /* Old name for UQASX. */
19459 TCE("uqaddsubx", 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19460 TCE("uqsax", 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19461 /* Old name for UQSAX. */
19462 TCE("uqsubaddx", 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19463 TCE("uqsub16", 6600f70, fad0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19464 TCE("uqsub8", 6600ff0, fac0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19465 TCE("usub16", 6500f70, fad0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19466 TCE("usax", 6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19467 /* Old name for USAX. */
19468 TCE("usubaddx",6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19469 TCE("usub8", 6500ff0, fac0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19470 TCE("sxtah", 6b00070, fa00f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19471 TCE("sxtab16", 6800070, fa20f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19472 TCE("sxtab", 6a00070, fa40f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19473 TCE("sxtb16", 68f0070, fa2ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
19474 TCE("uxtah", 6f00070, fa10f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19475 TCE("uxtab16", 6c00070, fa30f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19476 TCE("uxtab", 6e00070, fa50f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19477 TCE("uxtb16", 6cf0070, fa3ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
19478 TCE("sel", 6800fb0, faa0f080, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19479 TCE("smlad", 7000010, fb200000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19480 TCE("smladx", 7000030, fb200010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19481 TCE("smlald", 7400010, fbc000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
19482 TCE("smlaldx", 7400030, fbc000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
19483 TCE("smlsd", 7000050, fb400000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19484 TCE("smlsdx", 7000070, fb400010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19485 TCE("smlsld", 7400050, fbd000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
19486 TCE("smlsldx", 7400070, fbd000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
19487 TCE("smmla", 7500010, fb500000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19488 TCE("smmlar", 7500030, fb500010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19489 TCE("smmls", 75000d0, fb600000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19490 TCE("smmlsr", 75000f0, fb600010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19491 TCE("smmul", 750f010, fb50f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19492 TCE("smmulr", 750f030, fb50f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19493 TCE("smuad", 700f010, fb20f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19494 TCE("smuadx", 700f030, fb20f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19495 TCE("smusd", 700f050, fb40f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19496 TCE("smusdx", 700f070, fb40f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19497 TCE("ssat16", 6a00f30, f3200000, 3, (RRnpc, I16, RRnpc), ssat16, t_ssat16),
19498 TCE("umaal", 0400090, fbe00060, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal, t_mlal),
19499 TCE("usad8", 780f010, fb70f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19500 TCE("usada8", 7800010, fb700000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19501 TCE("usat16", 6e00f30, f3a00000, 3, (RRnpc, I15, RRnpc), usat16, t_usat16),
19502
19503 #undef ARM_VARIANT
19504 #define ARM_VARIANT & arm_ext_v6k
19505 #undef THUMB_VARIANT
19506 #define THUMB_VARIANT & arm_ext_v6k
19507
19508 tCE("yield", 320f001, _yield, 0, (), noargs, t_hint),
19509 tCE("wfe", 320f002, _wfe, 0, (), noargs, t_hint),
19510 tCE("wfi", 320f003, _wfi, 0, (), noargs, t_hint),
19511 tCE("sev", 320f004, _sev, 0, (), noargs, t_hint),
19512
19513 #undef THUMB_VARIANT
19514 #define THUMB_VARIANT & arm_ext_v6_notm
19515 TCE("ldrexd", 1b00f9f, e8d0007f, 3, (RRnpc_npcsp, oRRnpc_npcsp, RRnpcb),
19516 ldrexd, t_ldrexd),
19517 TCE("strexd", 1a00f90, e8c00070, 4, (RRnpc_npcsp, RRnpc_npcsp, oRRnpc_npcsp,
19518 RRnpcb), strexd, t_strexd),
19519
19520 #undef THUMB_VARIANT
19521 #define THUMB_VARIANT & arm_ext_v6t2_v8m
19522 TCE("ldrexb", 1d00f9f, e8d00f4f, 2, (RRnpc_npcsp,RRnpcb),
19523 rd_rn, rd_rn),
19524 TCE("ldrexh", 1f00f9f, e8d00f5f, 2, (RRnpc_npcsp, RRnpcb),
19525 rd_rn, rd_rn),
19526 TCE("strexb", 1c00f90, e8c00f40, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
19527 strex, t_strexbh),
19528 TCE("strexh", 1e00f90, e8c00f50, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
19529 strex, t_strexbh),
19530 TUF("clrex", 57ff01f, f3bf8f2f, 0, (), noargs, noargs),
19531
19532 #undef ARM_VARIANT
19533 #define ARM_VARIANT & arm_ext_sec
19534 #undef THUMB_VARIANT
19535 #define THUMB_VARIANT & arm_ext_sec
19536
19537 TCE("smc", 1600070, f7f08000, 1, (EXPi), smc, t_smc),
19538
19539 #undef ARM_VARIANT
19540 #define ARM_VARIANT & arm_ext_virt
19541 #undef THUMB_VARIANT
19542 #define THUMB_VARIANT & arm_ext_virt
19543
19544 TCE("hvc", 1400070, f7e08000, 1, (EXPi), hvc, t_hvc),
19545 TCE("eret", 160006e, f3de8f00, 0, (), noargs, noargs),
19546
19547 #undef ARM_VARIANT
19548 #define ARM_VARIANT & arm_ext_pan
19549 #undef THUMB_VARIANT
19550 #define THUMB_VARIANT & arm_ext_pan
19551
19552 TUF("setpan", 1100000, b610, 1, (I7), setpan, t_setpan),
19553
19554 #undef ARM_VARIANT
19555 #define ARM_VARIANT & arm_ext_v6t2
19556 #undef THUMB_VARIANT
19557 #define THUMB_VARIANT & arm_ext_v6t2
19558
19559 TCE("bfc", 7c0001f, f36f0000, 3, (RRnpc, I31, I32), bfc, t_bfc),
19560 TCE("bfi", 7c00010, f3600000, 4, (RRnpc, RRnpc_I0, I31, I32), bfi, t_bfi),
19561 TCE("sbfx", 7a00050, f3400000, 4, (RR, RR, I31, I32), bfx, t_bfx),
19562 TCE("ubfx", 7e00050, f3c00000, 4, (RR, RR, I31, I32), bfx, t_bfx),
19563
19564 TCE("mls", 0600090, fb000010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
19565 TCE("rbit", 6ff0f30, fa90f0a0, 2, (RR, RR), rd_rm, t_rbit),
19566
19567 TC3("ldrht", 03000b0, f8300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
19568 TC3("ldrsht", 03000f0, f9300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
19569 TC3("ldrsbt", 03000d0, f9100e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
19570 TC3("strht", 02000b0, f8200e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
19571
19572 #undef THUMB_VARIANT
19573 #define THUMB_VARIANT & arm_ext_v6t2_v8m
19574 TCE("movw", 3000000, f2400000, 2, (RRnpc, HALF), mov16, t_mov16),
19575 TCE("movt", 3400000, f2c00000, 2, (RRnpc, HALF), mov16, t_mov16),
19576
19577 /* Thumb-only instructions. */
19578 #undef ARM_VARIANT
19579 #define ARM_VARIANT NULL
19580 TUE("cbnz", 0, b900, 2, (RR, EXP), 0, t_cbz),
19581 TUE("cbz", 0, b100, 2, (RR, EXP), 0, t_cbz),
19582
19583 /* ARM does not really have an IT instruction, so always allow it.
19584 The opcode is copied from Thumb in order to allow warnings in
19585 -mimplicit-it=[never | arm] modes. */
19586 #undef ARM_VARIANT
19587 #define ARM_VARIANT & arm_ext_v1
19588 #undef THUMB_VARIANT
19589 #define THUMB_VARIANT & arm_ext_v6t2
19590
19591 TUE("it", bf08, bf08, 1, (COND), it, t_it),
19592 TUE("itt", bf0c, bf0c, 1, (COND), it, t_it),
19593 TUE("ite", bf04, bf04, 1, (COND), it, t_it),
19594 TUE("ittt", bf0e, bf0e, 1, (COND), it, t_it),
19595 TUE("itet", bf06, bf06, 1, (COND), it, t_it),
19596 TUE("itte", bf0a, bf0a, 1, (COND), it, t_it),
19597 TUE("itee", bf02, bf02, 1, (COND), it, t_it),
19598 TUE("itttt", bf0f, bf0f, 1, (COND), it, t_it),
19599 TUE("itett", bf07, bf07, 1, (COND), it, t_it),
19600 TUE("ittet", bf0b, bf0b, 1, (COND), it, t_it),
19601 TUE("iteet", bf03, bf03, 1, (COND), it, t_it),
19602 TUE("ittte", bf0d, bf0d, 1, (COND), it, t_it),
19603 TUE("itete", bf05, bf05, 1, (COND), it, t_it),
19604 TUE("ittee", bf09, bf09, 1, (COND), it, t_it),
19605 TUE("iteee", bf01, bf01, 1, (COND), it, t_it),
19606 /* ARM/Thumb-2 instructions with no Thumb-1 equivalent. */
19607 TC3("rrx", 01a00060, ea4f0030, 2, (RR, RR), rd_rm, t_rrx),
19608 TC3("rrxs", 01b00060, ea5f0030, 2, (RR, RR), rd_rm, t_rrx),
19609
19610 /* Thumb2 only instructions. */
19611 #undef ARM_VARIANT
19612 #define ARM_VARIANT NULL
19613
19614 TCE("addw", 0, f2000000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
19615 TCE("subw", 0, f2a00000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
19616 TCE("orn", 0, ea600000, 3, (RR, oRR, SH), 0, t_orn),
19617 TCE("orns", 0, ea700000, 3, (RR, oRR, SH), 0, t_orn),
19618 TCE("tbb", 0, e8d0f000, 1, (TB), 0, t_tb),
19619 TCE("tbh", 0, e8d0f010, 1, (TB), 0, t_tb),
19620
19621 /* Hardware division instructions. */
19622 #undef ARM_VARIANT
19623 #define ARM_VARIANT & arm_ext_adiv
19624 #undef THUMB_VARIANT
19625 #define THUMB_VARIANT & arm_ext_div
19626
19627 TCE("sdiv", 710f010, fb90f0f0, 3, (RR, oRR, RR), div, t_div),
19628 TCE("udiv", 730f010, fbb0f0f0, 3, (RR, oRR, RR), div, t_div),
19629
19630 /* ARM V6M/V7 instructions. */
19631 #undef ARM_VARIANT
19632 #define ARM_VARIANT & arm_ext_barrier
19633 #undef THUMB_VARIANT
19634 #define THUMB_VARIANT & arm_ext_barrier
19635
19636 TUF("dmb", 57ff050, f3bf8f50, 1, (oBARRIER_I15), barrier, barrier),
19637 TUF("dsb", 57ff040, f3bf8f40, 1, (oBARRIER_I15), barrier, barrier),
19638 TUF("isb", 57ff060, f3bf8f60, 1, (oBARRIER_I15), barrier, barrier),
19639
19640 /* ARM V7 instructions. */
19641 #undef ARM_VARIANT
19642 #define ARM_VARIANT & arm_ext_v7
19643 #undef THUMB_VARIANT
19644 #define THUMB_VARIANT & arm_ext_v7
19645
19646 TUF("pli", 450f000, f910f000, 1, (ADDR), pli, t_pld),
19647 TCE("dbg", 320f0f0, f3af80f0, 1, (I15), dbg, t_dbg),
19648
19649 #undef ARM_VARIANT
19650 #define ARM_VARIANT & arm_ext_mp
19651 #undef THUMB_VARIANT
19652 #define THUMB_VARIANT & arm_ext_mp
19653
19654 TUF("pldw", 410f000, f830f000, 1, (ADDR), pld, t_pld),
19655
19656 /* AArchv8 instructions. */
19657 #undef ARM_VARIANT
19658 #define ARM_VARIANT & arm_ext_v8
19659
19660 /* Instructions shared between armv8-a and armv8-m. */
19661 #undef THUMB_VARIANT
19662 #define THUMB_VARIANT & arm_ext_atomics
19663
19664 TCE("lda", 1900c9f, e8d00faf, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
19665 TCE("ldab", 1d00c9f, e8d00f8f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
19666 TCE("ldah", 1f00c9f, e8d00f9f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
19667 TCE("stl", 180fc90, e8c00faf, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
19668 TCE("stlb", 1c0fc90, e8c00f8f, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
19669 TCE("stlh", 1e0fc90, e8c00f9f, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
19670 TCE("ldaex", 1900e9f, e8d00fef, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
19671 TCE("ldaexb", 1d00e9f, e8d00fcf, 2, (RRnpc,RRnpcb), rd_rn, rd_rn),
19672 TCE("ldaexh", 1f00e9f, e8d00fdf, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
19673 TCE("stlex", 1800e90, e8c00fe0, 3, (RRnpc, RRnpc, RRnpcb),
19674 stlex, t_stlex),
19675 TCE("stlexb", 1c00e90, e8c00fc0, 3, (RRnpc, RRnpc, RRnpcb),
19676 stlex, t_stlex),
19677 TCE("stlexh", 1e00e90, e8c00fd0, 3, (RRnpc, RRnpc, RRnpcb),
19678 stlex, t_stlex),
19679 #undef THUMB_VARIANT
19680 #define THUMB_VARIANT & arm_ext_v8
19681
19682 tCE("sevl", 320f005, _sevl, 0, (), noargs, t_hint),
19683 TUE("hlt", 1000070, ba80, 1, (oIffffb), bkpt, t_hlt),
19684 TCE("ldaexd", 1b00e9f, e8d000ff, 3, (RRnpc, oRRnpc, RRnpcb),
19685 ldrexd, t_ldrexd),
19686 TCE("stlexd", 1a00e90, e8c000f0, 4, (RRnpc, RRnpc, oRRnpc, RRnpcb),
19687 strexd, t_strexd),
19688 /* ARMv8 T32 only. */
19689 #undef ARM_VARIANT
19690 #define ARM_VARIANT NULL
19691 TUF("dcps1", 0, f78f8001, 0, (), noargs, noargs),
19692 TUF("dcps2", 0, f78f8002, 0, (), noargs, noargs),
19693 TUF("dcps3", 0, f78f8003, 0, (), noargs, noargs),
19694
19695 /* FP for ARMv8. */
19696 #undef ARM_VARIANT
19697 #define ARM_VARIANT & fpu_vfp_ext_armv8xd
19698 #undef THUMB_VARIANT
19699 #define THUMB_VARIANT & fpu_vfp_ext_armv8xd
19700
19701 nUF(vseleq, _vseleq, 3, (RVSD, RVSD, RVSD), vsel),
19702 nUF(vselvs, _vselvs, 3, (RVSD, RVSD, RVSD), vsel),
19703 nUF(vselge, _vselge, 3, (RVSD, RVSD, RVSD), vsel),
19704 nUF(vselgt, _vselgt, 3, (RVSD, RVSD, RVSD), vsel),
19705 nUF(vmaxnm, _vmaxnm, 3, (RNSDQ, oRNSDQ, RNSDQ), vmaxnm),
19706 nUF(vminnm, _vminnm, 3, (RNSDQ, oRNSDQ, RNSDQ), vmaxnm),
19707 nUF(vcvta, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvta),
19708 nUF(vcvtn, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvtn),
19709 nUF(vcvtp, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvtp),
19710 nUF(vcvtm, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvtm),
19711 nCE(vrintr, _vrintr, 2, (RNSDQ, oRNSDQ), vrintr),
19712 nCE(vrintz, _vrintr, 2, (RNSDQ, oRNSDQ), vrintz),
19713 nCE(vrintx, _vrintr, 2, (RNSDQ, oRNSDQ), vrintx),
19714 nUF(vrinta, _vrinta, 2, (RNSDQ, oRNSDQ), vrinta),
19715 nUF(vrintn, _vrinta, 2, (RNSDQ, oRNSDQ), vrintn),
19716 nUF(vrintp, _vrinta, 2, (RNSDQ, oRNSDQ), vrintp),
19717 nUF(vrintm, _vrinta, 2, (RNSDQ, oRNSDQ), vrintm),
19718
19719 /* Crypto v1 extensions. */
19720 #undef ARM_VARIANT
19721 #define ARM_VARIANT & fpu_crypto_ext_armv8
19722 #undef THUMB_VARIANT
19723 #define THUMB_VARIANT & fpu_crypto_ext_armv8
19724
19725 nUF(aese, _aes, 2, (RNQ, RNQ), aese),
19726 nUF(aesd, _aes, 2, (RNQ, RNQ), aesd),
19727 nUF(aesmc, _aes, 2, (RNQ, RNQ), aesmc),
19728 nUF(aesimc, _aes, 2, (RNQ, RNQ), aesimc),
19729 nUF(sha1c, _sha3op, 3, (RNQ, RNQ, RNQ), sha1c),
19730 nUF(sha1p, _sha3op, 3, (RNQ, RNQ, RNQ), sha1p),
19731 nUF(sha1m, _sha3op, 3, (RNQ, RNQ, RNQ), sha1m),
19732 nUF(sha1su0, _sha3op, 3, (RNQ, RNQ, RNQ), sha1su0),
19733 nUF(sha256h, _sha3op, 3, (RNQ, RNQ, RNQ), sha256h),
19734 nUF(sha256h2, _sha3op, 3, (RNQ, RNQ, RNQ), sha256h2),
19735 nUF(sha256su1, _sha3op, 3, (RNQ, RNQ, RNQ), sha256su1),
19736 nUF(sha1h, _sha1h, 2, (RNQ, RNQ), sha1h),
19737 nUF(sha1su1, _sha2op, 2, (RNQ, RNQ), sha1su1),
19738 nUF(sha256su0, _sha2op, 2, (RNQ, RNQ), sha256su0),
19739
19740 #undef ARM_VARIANT
19741 #define ARM_VARIANT & crc_ext_armv8
19742 #undef THUMB_VARIANT
19743 #define THUMB_VARIANT & crc_ext_armv8
19744 TUEc("crc32b", 1000040, fac0f080, 3, (RR, oRR, RR), crc32b),
19745 TUEc("crc32h", 1200040, fac0f090, 3, (RR, oRR, RR), crc32h),
19746 TUEc("crc32w", 1400040, fac0f0a0, 3, (RR, oRR, RR), crc32w),
19747 TUEc("crc32cb",1000240, fad0f080, 3, (RR, oRR, RR), crc32cb),
19748 TUEc("crc32ch",1200240, fad0f090, 3, (RR, oRR, RR), crc32ch),
19749 TUEc("crc32cw",1400240, fad0f0a0, 3, (RR, oRR, RR), crc32cw),
19750
19751 /* ARMv8.2 RAS extension. */
19752 #undef ARM_VARIANT
19753 #define ARM_VARIANT & arm_ext_ras
19754 #undef THUMB_VARIANT
19755 #define THUMB_VARIANT & arm_ext_ras
19756 TUE ("esb", 320f010, f3af8010, 0, (), noargs, noargs),
19757
19758 #undef ARM_VARIANT
19759 #define ARM_VARIANT & fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
19760 #undef THUMB_VARIANT
19761 #define THUMB_VARIANT NULL
19762
19763 cCE("wfs", e200110, 1, (RR), rd),
19764 cCE("rfs", e300110, 1, (RR), rd),
19765 cCE("wfc", e400110, 1, (RR), rd),
19766 cCE("rfc", e500110, 1, (RR), rd),
19767
19768 cCL("ldfs", c100100, 2, (RF, ADDRGLDC), rd_cpaddr),
19769 cCL("ldfd", c108100, 2, (RF, ADDRGLDC), rd_cpaddr),
19770 cCL("ldfe", c500100, 2, (RF, ADDRGLDC), rd_cpaddr),
19771 cCL("ldfp", c508100, 2, (RF, ADDRGLDC), rd_cpaddr),
19772
19773 cCL("stfs", c000100, 2, (RF, ADDRGLDC), rd_cpaddr),
19774 cCL("stfd", c008100, 2, (RF, ADDRGLDC), rd_cpaddr),
19775 cCL("stfe", c400100, 2, (RF, ADDRGLDC), rd_cpaddr),
19776 cCL("stfp", c408100, 2, (RF, ADDRGLDC), rd_cpaddr),
19777
19778 cCL("mvfs", e008100, 2, (RF, RF_IF), rd_rm),
19779 cCL("mvfsp", e008120, 2, (RF, RF_IF), rd_rm),
19780 cCL("mvfsm", e008140, 2, (RF, RF_IF), rd_rm),
19781 cCL("mvfsz", e008160, 2, (RF, RF_IF), rd_rm),
19782 cCL("mvfd", e008180, 2, (RF, RF_IF), rd_rm),
19783 cCL("mvfdp", e0081a0, 2, (RF, RF_IF), rd_rm),
19784 cCL("mvfdm", e0081c0, 2, (RF, RF_IF), rd_rm),
19785 cCL("mvfdz", e0081e0, 2, (RF, RF_IF), rd_rm),
19786 cCL("mvfe", e088100, 2, (RF, RF_IF), rd_rm),
19787 cCL("mvfep", e088120, 2, (RF, RF_IF), rd_rm),
19788 cCL("mvfem", e088140, 2, (RF, RF_IF), rd_rm),
19789 cCL("mvfez", e088160, 2, (RF, RF_IF), rd_rm),
19790
19791 cCL("mnfs", e108100, 2, (RF, RF_IF), rd_rm),
19792 cCL("mnfsp", e108120, 2, (RF, RF_IF), rd_rm),
19793 cCL("mnfsm", e108140, 2, (RF, RF_IF), rd_rm),
19794 cCL("mnfsz", e108160, 2, (RF, RF_IF), rd_rm),
19795 cCL("mnfd", e108180, 2, (RF, RF_IF), rd_rm),
19796 cCL("mnfdp", e1081a0, 2, (RF, RF_IF), rd_rm),
19797 cCL("mnfdm", e1081c0, 2, (RF, RF_IF), rd_rm),
19798 cCL("mnfdz", e1081e0, 2, (RF, RF_IF), rd_rm),
19799 cCL("mnfe", e188100, 2, (RF, RF_IF), rd_rm),
19800 cCL("mnfep", e188120, 2, (RF, RF_IF), rd_rm),
19801 cCL("mnfem", e188140, 2, (RF, RF_IF), rd_rm),
19802 cCL("mnfez", e188160, 2, (RF, RF_IF), rd_rm),
19803
19804 cCL("abss", e208100, 2, (RF, RF_IF), rd_rm),
19805 cCL("abssp", e208120, 2, (RF, RF_IF), rd_rm),
19806 cCL("abssm", e208140, 2, (RF, RF_IF), rd_rm),
19807 cCL("abssz", e208160, 2, (RF, RF_IF), rd_rm),
19808 cCL("absd", e208180, 2, (RF, RF_IF), rd_rm),
19809 cCL("absdp", e2081a0, 2, (RF, RF_IF), rd_rm),
19810 cCL("absdm", e2081c0, 2, (RF, RF_IF), rd_rm),
19811 cCL("absdz", e2081e0, 2, (RF, RF_IF), rd_rm),
19812 cCL("abse", e288100, 2, (RF, RF_IF), rd_rm),
19813 cCL("absep", e288120, 2, (RF, RF_IF), rd_rm),
19814 cCL("absem", e288140, 2, (RF, RF_IF), rd_rm),
19815 cCL("absez", e288160, 2, (RF, RF_IF), rd_rm),
19816
19817 cCL("rnds", e308100, 2, (RF, RF_IF), rd_rm),
19818 cCL("rndsp", e308120, 2, (RF, RF_IF), rd_rm),
19819 cCL("rndsm", e308140, 2, (RF, RF_IF), rd_rm),
19820 cCL("rndsz", e308160, 2, (RF, RF_IF), rd_rm),
19821 cCL("rndd", e308180, 2, (RF, RF_IF), rd_rm),
19822 cCL("rnddp", e3081a0, 2, (RF, RF_IF), rd_rm),
19823 cCL("rnddm", e3081c0, 2, (RF, RF_IF), rd_rm),
19824 cCL("rnddz", e3081e0, 2, (RF, RF_IF), rd_rm),
19825 cCL("rnde", e388100, 2, (RF, RF_IF), rd_rm),
19826 cCL("rndep", e388120, 2, (RF, RF_IF), rd_rm),
19827 cCL("rndem", e388140, 2, (RF, RF_IF), rd_rm),
19828 cCL("rndez", e388160, 2, (RF, RF_IF), rd_rm),
19829
19830 cCL("sqts", e408100, 2, (RF, RF_IF), rd_rm),
19831 cCL("sqtsp", e408120, 2, (RF, RF_IF), rd_rm),
19832 cCL("sqtsm", e408140, 2, (RF, RF_IF), rd_rm),
19833 cCL("sqtsz", e408160, 2, (RF, RF_IF), rd_rm),
19834 cCL("sqtd", e408180, 2, (RF, RF_IF), rd_rm),
19835 cCL("sqtdp", e4081a0, 2, (RF, RF_IF), rd_rm),
19836 cCL("sqtdm", e4081c0, 2, (RF, RF_IF), rd_rm),
19837 cCL("sqtdz", e4081e0, 2, (RF, RF_IF), rd_rm),
19838 cCL("sqte", e488100, 2, (RF, RF_IF), rd_rm),
19839 cCL("sqtep", e488120, 2, (RF, RF_IF), rd_rm),
19840 cCL("sqtem", e488140, 2, (RF, RF_IF), rd_rm),
19841 cCL("sqtez", e488160, 2, (RF, RF_IF), rd_rm),
19842
19843 cCL("logs", e508100, 2, (RF, RF_IF), rd_rm),
19844 cCL("logsp", e508120, 2, (RF, RF_IF), rd_rm),
19845 cCL("logsm", e508140, 2, (RF, RF_IF), rd_rm),
19846 cCL("logsz", e508160, 2, (RF, RF_IF), rd_rm),
19847 cCL("logd", e508180, 2, (RF, RF_IF), rd_rm),
19848 cCL("logdp", e5081a0, 2, (RF, RF_IF), rd_rm),
19849 cCL("logdm", e5081c0, 2, (RF, RF_IF), rd_rm),
19850 cCL("logdz", e5081e0, 2, (RF, RF_IF), rd_rm),
19851 cCL("loge", e588100, 2, (RF, RF_IF), rd_rm),
19852 cCL("logep", e588120, 2, (RF, RF_IF), rd_rm),
19853 cCL("logem", e588140, 2, (RF, RF_IF), rd_rm),
19854 cCL("logez", e588160, 2, (RF, RF_IF), rd_rm),
19855
19856 cCL("lgns", e608100, 2, (RF, RF_IF), rd_rm),
19857 cCL("lgnsp", e608120, 2, (RF, RF_IF), rd_rm),
19858 cCL("lgnsm", e608140, 2, (RF, RF_IF), rd_rm),
19859 cCL("lgnsz", e608160, 2, (RF, RF_IF), rd_rm),
19860 cCL("lgnd", e608180, 2, (RF, RF_IF), rd_rm),
19861 cCL("lgndp", e6081a0, 2, (RF, RF_IF), rd_rm),
19862 cCL("lgndm", e6081c0, 2, (RF, RF_IF), rd_rm),
19863 cCL("lgndz", e6081e0, 2, (RF, RF_IF), rd_rm),
19864 cCL("lgne", e688100, 2, (RF, RF_IF), rd_rm),
19865 cCL("lgnep", e688120, 2, (RF, RF_IF), rd_rm),
19866 cCL("lgnem", e688140, 2, (RF, RF_IF), rd_rm),
19867 cCL("lgnez", e688160, 2, (RF, RF_IF), rd_rm),
19868
19869 cCL("exps", e708100, 2, (RF, RF_IF), rd_rm),
19870 cCL("expsp", e708120, 2, (RF, RF_IF), rd_rm),
19871 cCL("expsm", e708140, 2, (RF, RF_IF), rd_rm),
19872 cCL("expsz", e708160, 2, (RF, RF_IF), rd_rm),
19873 cCL("expd", e708180, 2, (RF, RF_IF), rd_rm),
19874 cCL("expdp", e7081a0, 2, (RF, RF_IF), rd_rm),
19875 cCL("expdm", e7081c0, 2, (RF, RF_IF), rd_rm),
19876 cCL("expdz", e7081e0, 2, (RF, RF_IF), rd_rm),
19877 cCL("expe", e788100, 2, (RF, RF_IF), rd_rm),
19878 cCL("expep", e788120, 2, (RF, RF_IF), rd_rm),
19879 cCL("expem", e788140, 2, (RF, RF_IF), rd_rm),
19880 cCL("expdz", e788160, 2, (RF, RF_IF), rd_rm),
19881
19882 cCL("sins", e808100, 2, (RF, RF_IF), rd_rm),
19883 cCL("sinsp", e808120, 2, (RF, RF_IF), rd_rm),
19884 cCL("sinsm", e808140, 2, (RF, RF_IF), rd_rm),
19885 cCL("sinsz", e808160, 2, (RF, RF_IF), rd_rm),
19886 cCL("sind", e808180, 2, (RF, RF_IF), rd_rm),
19887 cCL("sindp", e8081a0, 2, (RF, RF_IF), rd_rm),
19888 cCL("sindm", e8081c0, 2, (RF, RF_IF), rd_rm),
19889 cCL("sindz", e8081e0, 2, (RF, RF_IF), rd_rm),
19890 cCL("sine", e888100, 2, (RF, RF_IF), rd_rm),
19891 cCL("sinep", e888120, 2, (RF, RF_IF), rd_rm),
19892 cCL("sinem", e888140, 2, (RF, RF_IF), rd_rm),
19893 cCL("sinez", e888160, 2, (RF, RF_IF), rd_rm),
19894
19895 cCL("coss", e908100, 2, (RF, RF_IF), rd_rm),
19896 cCL("cossp", e908120, 2, (RF, RF_IF), rd_rm),
19897 cCL("cossm", e908140, 2, (RF, RF_IF), rd_rm),
19898 cCL("cossz", e908160, 2, (RF, RF_IF), rd_rm),
19899 cCL("cosd", e908180, 2, (RF, RF_IF), rd_rm),
19900 cCL("cosdp", e9081a0, 2, (RF, RF_IF), rd_rm),
19901 cCL("cosdm", e9081c0, 2, (RF, RF_IF), rd_rm),
19902 cCL("cosdz", e9081e0, 2, (RF, RF_IF), rd_rm),
19903 cCL("cose", e988100, 2, (RF, RF_IF), rd_rm),
19904 cCL("cosep", e988120, 2, (RF, RF_IF), rd_rm),
19905 cCL("cosem", e988140, 2, (RF, RF_IF), rd_rm),
19906 cCL("cosez", e988160, 2, (RF, RF_IF), rd_rm),
19907
19908 cCL("tans", ea08100, 2, (RF, RF_IF), rd_rm),
19909 cCL("tansp", ea08120, 2, (RF, RF_IF), rd_rm),
19910 cCL("tansm", ea08140, 2, (RF, RF_IF), rd_rm),
19911 cCL("tansz", ea08160, 2, (RF, RF_IF), rd_rm),
19912 cCL("tand", ea08180, 2, (RF, RF_IF), rd_rm),
19913 cCL("tandp", ea081a0, 2, (RF, RF_IF), rd_rm),
19914 cCL("tandm", ea081c0, 2, (RF, RF_IF), rd_rm),
19915 cCL("tandz", ea081e0, 2, (RF, RF_IF), rd_rm),
19916 cCL("tane", ea88100, 2, (RF, RF_IF), rd_rm),
19917 cCL("tanep", ea88120, 2, (RF, RF_IF), rd_rm),
19918 cCL("tanem", ea88140, 2, (RF, RF_IF), rd_rm),
19919 cCL("tanez", ea88160, 2, (RF, RF_IF), rd_rm),
19920
19921 cCL("asns", eb08100, 2, (RF, RF_IF), rd_rm),
19922 cCL("asnsp", eb08120, 2, (RF, RF_IF), rd_rm),
19923 cCL("asnsm", eb08140, 2, (RF, RF_IF), rd_rm),
19924 cCL("asnsz", eb08160, 2, (RF, RF_IF), rd_rm),
19925 cCL("asnd", eb08180, 2, (RF, RF_IF), rd_rm),
19926 cCL("asndp", eb081a0, 2, (RF, RF_IF), rd_rm),
19927 cCL("asndm", eb081c0, 2, (RF, RF_IF), rd_rm),
19928 cCL("asndz", eb081e0, 2, (RF, RF_IF), rd_rm),
19929 cCL("asne", eb88100, 2, (RF, RF_IF), rd_rm),
19930 cCL("asnep", eb88120, 2, (RF, RF_IF), rd_rm),
19931 cCL("asnem", eb88140, 2, (RF, RF_IF), rd_rm),
19932 cCL("asnez", eb88160, 2, (RF, RF_IF), rd_rm),
19933
19934 cCL("acss", ec08100, 2, (RF, RF_IF), rd_rm),
19935 cCL("acssp", ec08120, 2, (RF, RF_IF), rd_rm),
19936 cCL("acssm", ec08140, 2, (RF, RF_IF), rd_rm),
19937 cCL("acssz", ec08160, 2, (RF, RF_IF), rd_rm),
19938 cCL("acsd", ec08180, 2, (RF, RF_IF), rd_rm),
19939 cCL("acsdp", ec081a0, 2, (RF, RF_IF), rd_rm),
19940 cCL("acsdm", ec081c0, 2, (RF, RF_IF), rd_rm),
19941 cCL("acsdz", ec081e0, 2, (RF, RF_IF), rd_rm),
19942 cCL("acse", ec88100, 2, (RF, RF_IF), rd_rm),
19943 cCL("acsep", ec88120, 2, (RF, RF_IF), rd_rm),
19944 cCL("acsem", ec88140, 2, (RF, RF_IF), rd_rm),
19945 cCL("acsez", ec88160, 2, (RF, RF_IF), rd_rm),
19946
19947 cCL("atns", ed08100, 2, (RF, RF_IF), rd_rm),
19948 cCL("atnsp", ed08120, 2, (RF, RF_IF), rd_rm),
19949 cCL("atnsm", ed08140, 2, (RF, RF_IF), rd_rm),
19950 cCL("atnsz", ed08160, 2, (RF, RF_IF), rd_rm),
19951 cCL("atnd", ed08180, 2, (RF, RF_IF), rd_rm),
19952 cCL("atndp", ed081a0, 2, (RF, RF_IF), rd_rm),
19953 cCL("atndm", ed081c0, 2, (RF, RF_IF), rd_rm),
19954 cCL("atndz", ed081e0, 2, (RF, RF_IF), rd_rm),
19955 cCL("atne", ed88100, 2, (RF, RF_IF), rd_rm),
19956 cCL("atnep", ed88120, 2, (RF, RF_IF), rd_rm),
19957 cCL("atnem", ed88140, 2, (RF, RF_IF), rd_rm),
19958 cCL("atnez", ed88160, 2, (RF, RF_IF), rd_rm),
19959
19960 cCL("urds", ee08100, 2, (RF, RF_IF), rd_rm),
19961 cCL("urdsp", ee08120, 2, (RF, RF_IF), rd_rm),
19962 cCL("urdsm", ee08140, 2, (RF, RF_IF), rd_rm),
19963 cCL("urdsz", ee08160, 2, (RF, RF_IF), rd_rm),
19964 cCL("urdd", ee08180, 2, (RF, RF_IF), rd_rm),
19965 cCL("urddp", ee081a0, 2, (RF, RF_IF), rd_rm),
19966 cCL("urddm", ee081c0, 2, (RF, RF_IF), rd_rm),
19967 cCL("urddz", ee081e0, 2, (RF, RF_IF), rd_rm),
19968 cCL("urde", ee88100, 2, (RF, RF_IF), rd_rm),
19969 cCL("urdep", ee88120, 2, (RF, RF_IF), rd_rm),
19970 cCL("urdem", ee88140, 2, (RF, RF_IF), rd_rm),
19971 cCL("urdez", ee88160, 2, (RF, RF_IF), rd_rm),
19972
19973 cCL("nrms", ef08100, 2, (RF, RF_IF), rd_rm),
19974 cCL("nrmsp", ef08120, 2, (RF, RF_IF), rd_rm),
19975 cCL("nrmsm", ef08140, 2, (RF, RF_IF), rd_rm),
19976 cCL("nrmsz", ef08160, 2, (RF, RF_IF), rd_rm),
19977 cCL("nrmd", ef08180, 2, (RF, RF_IF), rd_rm),
19978 cCL("nrmdp", ef081a0, 2, (RF, RF_IF), rd_rm),
19979 cCL("nrmdm", ef081c0, 2, (RF, RF_IF), rd_rm),
19980 cCL("nrmdz", ef081e0, 2, (RF, RF_IF), rd_rm),
19981 cCL("nrme", ef88100, 2, (RF, RF_IF), rd_rm),
19982 cCL("nrmep", ef88120, 2, (RF, RF_IF), rd_rm),
19983 cCL("nrmem", ef88140, 2, (RF, RF_IF), rd_rm),
19984 cCL("nrmez", ef88160, 2, (RF, RF_IF), rd_rm),
19985
19986 cCL("adfs", e000100, 3, (RF, RF, RF_IF), rd_rn_rm),
19987 cCL("adfsp", e000120, 3, (RF, RF, RF_IF), rd_rn_rm),
19988 cCL("adfsm", e000140, 3, (RF, RF, RF_IF), rd_rn_rm),
19989 cCL("adfsz", e000160, 3, (RF, RF, RF_IF), rd_rn_rm),
19990 cCL("adfd", e000180, 3, (RF, RF, RF_IF), rd_rn_rm),
19991 cCL("adfdp", e0001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19992 cCL("adfdm", e0001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19993 cCL("adfdz", e0001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19994 cCL("adfe", e080100, 3, (RF, RF, RF_IF), rd_rn_rm),
19995 cCL("adfep", e080120, 3, (RF, RF, RF_IF), rd_rn_rm),
19996 cCL("adfem", e080140, 3, (RF, RF, RF_IF), rd_rn_rm),
19997 cCL("adfez", e080160, 3, (RF, RF, RF_IF), rd_rn_rm),
19998
19999 cCL("sufs", e200100, 3, (RF, RF, RF_IF), rd_rn_rm),
20000 cCL("sufsp", e200120, 3, (RF, RF, RF_IF), rd_rn_rm),
20001 cCL("sufsm", e200140, 3, (RF, RF, RF_IF), rd_rn_rm),
20002 cCL("sufsz", e200160, 3, (RF, RF, RF_IF), rd_rn_rm),
20003 cCL("sufd", e200180, 3, (RF, RF, RF_IF), rd_rn_rm),
20004 cCL("sufdp", e2001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20005 cCL("sufdm", e2001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20006 cCL("sufdz", e2001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20007 cCL("sufe", e280100, 3, (RF, RF, RF_IF), rd_rn_rm),
20008 cCL("sufep", e280120, 3, (RF, RF, RF_IF), rd_rn_rm),
20009 cCL("sufem", e280140, 3, (RF, RF, RF_IF), rd_rn_rm),
20010 cCL("sufez", e280160, 3, (RF, RF, RF_IF), rd_rn_rm),
20011
20012 cCL("rsfs", e300100, 3, (RF, RF, RF_IF), rd_rn_rm),
20013 cCL("rsfsp", e300120, 3, (RF, RF, RF_IF), rd_rn_rm),
20014 cCL("rsfsm", e300140, 3, (RF, RF, RF_IF), rd_rn_rm),
20015 cCL("rsfsz", e300160, 3, (RF, RF, RF_IF), rd_rn_rm),
20016 cCL("rsfd", e300180, 3, (RF, RF, RF_IF), rd_rn_rm),
20017 cCL("rsfdp", e3001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20018 cCL("rsfdm", e3001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20019 cCL("rsfdz", e3001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20020 cCL("rsfe", e380100, 3, (RF, RF, RF_IF), rd_rn_rm),
20021 cCL("rsfep", e380120, 3, (RF, RF, RF_IF), rd_rn_rm),
20022 cCL("rsfem", e380140, 3, (RF, RF, RF_IF), rd_rn_rm),
20023 cCL("rsfez", e380160, 3, (RF, RF, RF_IF), rd_rn_rm),
20024
20025 cCL("mufs", e100100, 3, (RF, RF, RF_IF), rd_rn_rm),
20026 cCL("mufsp", e100120, 3, (RF, RF, RF_IF), rd_rn_rm),
20027 cCL("mufsm", e100140, 3, (RF, RF, RF_IF), rd_rn_rm),
20028 cCL("mufsz", e100160, 3, (RF, RF, RF_IF), rd_rn_rm),
20029 cCL("mufd", e100180, 3, (RF, RF, RF_IF), rd_rn_rm),
20030 cCL("mufdp", e1001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20031 cCL("mufdm", e1001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20032 cCL("mufdz", e1001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20033 cCL("mufe", e180100, 3, (RF, RF, RF_IF), rd_rn_rm),
20034 cCL("mufep", e180120, 3, (RF, RF, RF_IF), rd_rn_rm),
20035 cCL("mufem", e180140, 3, (RF, RF, RF_IF), rd_rn_rm),
20036 cCL("mufez", e180160, 3, (RF, RF, RF_IF), rd_rn_rm),
20037
20038 cCL("dvfs", e400100, 3, (RF, RF, RF_IF), rd_rn_rm),
20039 cCL("dvfsp", e400120, 3, (RF, RF, RF_IF), rd_rn_rm),
20040 cCL("dvfsm", e400140, 3, (RF, RF, RF_IF), rd_rn_rm),
20041 cCL("dvfsz", e400160, 3, (RF, RF, RF_IF), rd_rn_rm),
20042 cCL("dvfd", e400180, 3, (RF, RF, RF_IF), rd_rn_rm),
20043 cCL("dvfdp", e4001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20044 cCL("dvfdm", e4001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20045 cCL("dvfdz", e4001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20046 cCL("dvfe", e480100, 3, (RF, RF, RF_IF), rd_rn_rm),
20047 cCL("dvfep", e480120, 3, (RF, RF, RF_IF), rd_rn_rm),
20048 cCL("dvfem", e480140, 3, (RF, RF, RF_IF), rd_rn_rm),
20049 cCL("dvfez", e480160, 3, (RF, RF, RF_IF), rd_rn_rm),
20050
20051 cCL("rdfs", e500100, 3, (RF, RF, RF_IF), rd_rn_rm),
20052 cCL("rdfsp", e500120, 3, (RF, RF, RF_IF), rd_rn_rm),
20053 cCL("rdfsm", e500140, 3, (RF, RF, RF_IF), rd_rn_rm),
20054 cCL("rdfsz", e500160, 3, (RF, RF, RF_IF), rd_rn_rm),
20055 cCL("rdfd", e500180, 3, (RF, RF, RF_IF), rd_rn_rm),
20056 cCL("rdfdp", e5001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20057 cCL("rdfdm", e5001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20058 cCL("rdfdz", e5001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20059 cCL("rdfe", e580100, 3, (RF, RF, RF_IF), rd_rn_rm),
20060 cCL("rdfep", e580120, 3, (RF, RF, RF_IF), rd_rn_rm),
20061 cCL("rdfem", e580140, 3, (RF, RF, RF_IF), rd_rn_rm),
20062 cCL("rdfez", e580160, 3, (RF, RF, RF_IF), rd_rn_rm),
20063
20064 cCL("pows", e600100, 3, (RF, RF, RF_IF), rd_rn_rm),
20065 cCL("powsp", e600120, 3, (RF, RF, RF_IF), rd_rn_rm),
20066 cCL("powsm", e600140, 3, (RF, RF, RF_IF), rd_rn_rm),
20067 cCL("powsz", e600160, 3, (RF, RF, RF_IF), rd_rn_rm),
20068 cCL("powd", e600180, 3, (RF, RF, RF_IF), rd_rn_rm),
20069 cCL("powdp", e6001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20070 cCL("powdm", e6001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20071 cCL("powdz", e6001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20072 cCL("powe", e680100, 3, (RF, RF, RF_IF), rd_rn_rm),
20073 cCL("powep", e680120, 3, (RF, RF, RF_IF), rd_rn_rm),
20074 cCL("powem", e680140, 3, (RF, RF, RF_IF), rd_rn_rm),
20075 cCL("powez", e680160, 3, (RF, RF, RF_IF), rd_rn_rm),
20076
20077 cCL("rpws", e700100, 3, (RF, RF, RF_IF), rd_rn_rm),
20078 cCL("rpwsp", e700120, 3, (RF, RF, RF_IF), rd_rn_rm),
20079 cCL("rpwsm", e700140, 3, (RF, RF, RF_IF), rd_rn_rm),
20080 cCL("rpwsz", e700160, 3, (RF, RF, RF_IF), rd_rn_rm),
20081 cCL("rpwd", e700180, 3, (RF, RF, RF_IF), rd_rn_rm),
20082 cCL("rpwdp", e7001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20083 cCL("rpwdm", e7001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20084 cCL("rpwdz", e7001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20085 cCL("rpwe", e780100, 3, (RF, RF, RF_IF), rd_rn_rm),
20086 cCL("rpwep", e780120, 3, (RF, RF, RF_IF), rd_rn_rm),
20087 cCL("rpwem", e780140, 3, (RF, RF, RF_IF), rd_rn_rm),
20088 cCL("rpwez", e780160, 3, (RF, RF, RF_IF), rd_rn_rm),
20089
20090 cCL("rmfs", e800100, 3, (RF, RF, RF_IF), rd_rn_rm),
20091 cCL("rmfsp", e800120, 3, (RF, RF, RF_IF), rd_rn_rm),
20092 cCL("rmfsm", e800140, 3, (RF, RF, RF_IF), rd_rn_rm),
20093 cCL("rmfsz", e800160, 3, (RF, RF, RF_IF), rd_rn_rm),
20094 cCL("rmfd", e800180, 3, (RF, RF, RF_IF), rd_rn_rm),
20095 cCL("rmfdp", e8001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20096 cCL("rmfdm", e8001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20097 cCL("rmfdz", e8001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20098 cCL("rmfe", e880100, 3, (RF, RF, RF_IF), rd_rn_rm),
20099 cCL("rmfep", e880120, 3, (RF, RF, RF_IF), rd_rn_rm),
20100 cCL("rmfem", e880140, 3, (RF, RF, RF_IF), rd_rn_rm),
20101 cCL("rmfez", e880160, 3, (RF, RF, RF_IF), rd_rn_rm),
20102
20103 cCL("fmls", e900100, 3, (RF, RF, RF_IF), rd_rn_rm),
20104 cCL("fmlsp", e900120, 3, (RF, RF, RF_IF), rd_rn_rm),
20105 cCL("fmlsm", e900140, 3, (RF, RF, RF_IF), rd_rn_rm),
20106 cCL("fmlsz", e900160, 3, (RF, RF, RF_IF), rd_rn_rm),
20107 cCL("fmld", e900180, 3, (RF, RF, RF_IF), rd_rn_rm),
20108 cCL("fmldp", e9001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20109 cCL("fmldm", e9001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20110 cCL("fmldz", e9001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20111 cCL("fmle", e980100, 3, (RF, RF, RF_IF), rd_rn_rm),
20112 cCL("fmlep", e980120, 3, (RF, RF, RF_IF), rd_rn_rm),
20113 cCL("fmlem", e980140, 3, (RF, RF, RF_IF), rd_rn_rm),
20114 cCL("fmlez", e980160, 3, (RF, RF, RF_IF), rd_rn_rm),
20115
20116 cCL("fdvs", ea00100, 3, (RF, RF, RF_IF), rd_rn_rm),
20117 cCL("fdvsp", ea00120, 3, (RF, RF, RF_IF), rd_rn_rm),
20118 cCL("fdvsm", ea00140, 3, (RF, RF, RF_IF), rd_rn_rm),
20119 cCL("fdvsz", ea00160, 3, (RF, RF, RF_IF), rd_rn_rm),
20120 cCL("fdvd", ea00180, 3, (RF, RF, RF_IF), rd_rn_rm),
20121 cCL("fdvdp", ea001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20122 cCL("fdvdm", ea001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20123 cCL("fdvdz", ea001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20124 cCL("fdve", ea80100, 3, (RF, RF, RF_IF), rd_rn_rm),
20125 cCL("fdvep", ea80120, 3, (RF, RF, RF_IF), rd_rn_rm),
20126 cCL("fdvem", ea80140, 3, (RF, RF, RF_IF), rd_rn_rm),
20127 cCL("fdvez", ea80160, 3, (RF, RF, RF_IF), rd_rn_rm),
20128
20129 cCL("frds", eb00100, 3, (RF, RF, RF_IF), rd_rn_rm),
20130 cCL("frdsp", eb00120, 3, (RF, RF, RF_IF), rd_rn_rm),
20131 cCL("frdsm", eb00140, 3, (RF, RF, RF_IF), rd_rn_rm),
20132 cCL("frdsz", eb00160, 3, (RF, RF, RF_IF), rd_rn_rm),
20133 cCL("frdd", eb00180, 3, (RF, RF, RF_IF), rd_rn_rm),
20134 cCL("frddp", eb001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20135 cCL("frddm", eb001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20136 cCL("frddz", eb001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20137 cCL("frde", eb80100, 3, (RF, RF, RF_IF), rd_rn_rm),
20138 cCL("frdep", eb80120, 3, (RF, RF, RF_IF), rd_rn_rm),
20139 cCL("frdem", eb80140, 3, (RF, RF, RF_IF), rd_rn_rm),
20140 cCL("frdez", eb80160, 3, (RF, RF, RF_IF), rd_rn_rm),
20141
20142 cCL("pols", ec00100, 3, (RF, RF, RF_IF), rd_rn_rm),
20143 cCL("polsp", ec00120, 3, (RF, RF, RF_IF), rd_rn_rm),
20144 cCL("polsm", ec00140, 3, (RF, RF, RF_IF), rd_rn_rm),
20145 cCL("polsz", ec00160, 3, (RF, RF, RF_IF), rd_rn_rm),
20146 cCL("pold", ec00180, 3, (RF, RF, RF_IF), rd_rn_rm),
20147 cCL("poldp", ec001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20148 cCL("poldm", ec001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20149 cCL("poldz", ec001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20150 cCL("pole", ec80100, 3, (RF, RF, RF_IF), rd_rn_rm),
20151 cCL("polep", ec80120, 3, (RF, RF, RF_IF), rd_rn_rm),
20152 cCL("polem", ec80140, 3, (RF, RF, RF_IF), rd_rn_rm),
20153 cCL("polez", ec80160, 3, (RF, RF, RF_IF), rd_rn_rm),
20154
20155 cCE("cmf", e90f110, 2, (RF, RF_IF), fpa_cmp),
20156 C3E("cmfe", ed0f110, 2, (RF, RF_IF), fpa_cmp),
20157 cCE("cnf", eb0f110, 2, (RF, RF_IF), fpa_cmp),
20158 C3E("cnfe", ef0f110, 2, (RF, RF_IF), fpa_cmp),
20159
20160 cCL("flts", e000110, 2, (RF, RR), rn_rd),
20161 cCL("fltsp", e000130, 2, (RF, RR), rn_rd),
20162 cCL("fltsm", e000150, 2, (RF, RR), rn_rd),
20163 cCL("fltsz", e000170, 2, (RF, RR), rn_rd),
20164 cCL("fltd", e000190, 2, (RF, RR), rn_rd),
20165 cCL("fltdp", e0001b0, 2, (RF, RR), rn_rd),
20166 cCL("fltdm", e0001d0, 2, (RF, RR), rn_rd),
20167 cCL("fltdz", e0001f0, 2, (RF, RR), rn_rd),
20168 cCL("flte", e080110, 2, (RF, RR), rn_rd),
20169 cCL("fltep", e080130, 2, (RF, RR), rn_rd),
20170 cCL("fltem", e080150, 2, (RF, RR), rn_rd),
20171 cCL("fltez", e080170, 2, (RF, RR), rn_rd),
20172
20173 /* The implementation of the FIX instruction is broken on some
20174 assemblers, in that it accepts a precision specifier as well as a
20175 rounding specifier, despite the fact that this is meaningless.
20176 To be more compatible, we accept it as well, though of course it
20177 does not set any bits. */
20178 cCE("fix", e100110, 2, (RR, RF), rd_rm),
20179 cCL("fixp", e100130, 2, (RR, RF), rd_rm),
20180 cCL("fixm", e100150, 2, (RR, RF), rd_rm),
20181 cCL("fixz", e100170, 2, (RR, RF), rd_rm),
20182 cCL("fixsp", e100130, 2, (RR, RF), rd_rm),
20183 cCL("fixsm", e100150, 2, (RR, RF), rd_rm),
20184 cCL("fixsz", e100170, 2, (RR, RF), rd_rm),
20185 cCL("fixdp", e100130, 2, (RR, RF), rd_rm),
20186 cCL("fixdm", e100150, 2, (RR, RF), rd_rm),
20187 cCL("fixdz", e100170, 2, (RR, RF), rd_rm),
20188 cCL("fixep", e100130, 2, (RR, RF), rd_rm),
20189 cCL("fixem", e100150, 2, (RR, RF), rd_rm),
20190 cCL("fixez", e100170, 2, (RR, RF), rd_rm),
20191
20192 /* Instructions that were new with the real FPA, call them V2. */
20193 #undef ARM_VARIANT
20194 #define ARM_VARIANT & fpu_fpa_ext_v2
20195
20196 cCE("lfm", c100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
20197 cCL("lfmfd", c900200, 3, (RF, I4b, ADDR), fpa_ldmstm),
20198 cCL("lfmea", d100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
20199 cCE("sfm", c000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
20200 cCL("sfmfd", d000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
20201 cCL("sfmea", c800200, 3, (RF, I4b, ADDR), fpa_ldmstm),
20202
20203 #undef ARM_VARIANT
20204 #define ARM_VARIANT & fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
20205
20206 /* Moves and type conversions. */
20207 cCE("fcpys", eb00a40, 2, (RVS, RVS), vfp_sp_monadic),
20208 cCE("fmrs", e100a10, 2, (RR, RVS), vfp_reg_from_sp),
20209 cCE("fmsr", e000a10, 2, (RVS, RR), vfp_sp_from_reg),
20210 cCE("fmstat", ef1fa10, 0, (), noargs),
20211 cCE("vmrs", ef00a10, 2, (APSR_RR, RVC), vmrs),
20212 cCE("vmsr", ee00a10, 2, (RVC, RR), vmsr),
20213 cCE("fsitos", eb80ac0, 2, (RVS, RVS), vfp_sp_monadic),
20214 cCE("fuitos", eb80a40, 2, (RVS, RVS), vfp_sp_monadic),
20215 cCE("ftosis", ebd0a40, 2, (RVS, RVS), vfp_sp_monadic),
20216 cCE("ftosizs", ebd0ac0, 2, (RVS, RVS), vfp_sp_monadic),
20217 cCE("ftouis", ebc0a40, 2, (RVS, RVS), vfp_sp_monadic),
20218 cCE("ftouizs", ebc0ac0, 2, (RVS, RVS), vfp_sp_monadic),
20219 cCE("fmrx", ef00a10, 2, (RR, RVC), rd_rn),
20220 cCE("fmxr", ee00a10, 2, (RVC, RR), rn_rd),
20221
20222 /* Memory operations. */
20223 cCE("flds", d100a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
20224 cCE("fsts", d000a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
20225 cCE("fldmias", c900a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
20226 cCE("fldmfds", c900a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
20227 cCE("fldmdbs", d300a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
20228 cCE("fldmeas", d300a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
20229 cCE("fldmiax", c900b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
20230 cCE("fldmfdx", c900b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
20231 cCE("fldmdbx", d300b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
20232 cCE("fldmeax", d300b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
20233 cCE("fstmias", c800a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
20234 cCE("fstmeas", c800a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
20235 cCE("fstmdbs", d200a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
20236 cCE("fstmfds", d200a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
20237 cCE("fstmiax", c800b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
20238 cCE("fstmeax", c800b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
20239 cCE("fstmdbx", d200b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
20240 cCE("fstmfdx", d200b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
20241
20242 /* Monadic operations. */
20243 cCE("fabss", eb00ac0, 2, (RVS, RVS), vfp_sp_monadic),
20244 cCE("fnegs", eb10a40, 2, (RVS, RVS), vfp_sp_monadic),
20245 cCE("fsqrts", eb10ac0, 2, (RVS, RVS), vfp_sp_monadic),
20246
20247 /* Dyadic operations. */
20248 cCE("fadds", e300a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20249 cCE("fsubs", e300a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20250 cCE("fmuls", e200a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20251 cCE("fdivs", e800a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20252 cCE("fmacs", e000a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20253 cCE("fmscs", e100a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20254 cCE("fnmuls", e200a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20255 cCE("fnmacs", e000a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20256 cCE("fnmscs", e100a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20257
20258 /* Comparisons. */
20259 cCE("fcmps", eb40a40, 2, (RVS, RVS), vfp_sp_monadic),
20260 cCE("fcmpzs", eb50a40, 1, (RVS), vfp_sp_compare_z),
20261 cCE("fcmpes", eb40ac0, 2, (RVS, RVS), vfp_sp_monadic),
20262 cCE("fcmpezs", eb50ac0, 1, (RVS), vfp_sp_compare_z),
20263
20264 /* Double precision load/store are still present on single precision
20265 implementations. */
20266 cCE("fldd", d100b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
20267 cCE("fstd", d000b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
20268 cCE("fldmiad", c900b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
20269 cCE("fldmfdd", c900b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
20270 cCE("fldmdbd", d300b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
20271 cCE("fldmead", d300b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
20272 cCE("fstmiad", c800b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
20273 cCE("fstmead", c800b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
20274 cCE("fstmdbd", d200b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
20275 cCE("fstmfdd", d200b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
20276
20277 #undef ARM_VARIANT
20278 #define ARM_VARIANT & fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
20279
20280 /* Moves and type conversions. */
20281 cCE("fcpyd", eb00b40, 2, (RVD, RVD), vfp_dp_rd_rm),
20282 cCE("fcvtds", eb70ac0, 2, (RVD, RVS), vfp_dp_sp_cvt),
20283 cCE("fcvtsd", eb70bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
20284 cCE("fmdhr", e200b10, 2, (RVD, RR), vfp_dp_rn_rd),
20285 cCE("fmdlr", e000b10, 2, (RVD, RR), vfp_dp_rn_rd),
20286 cCE("fmrdh", e300b10, 2, (RR, RVD), vfp_dp_rd_rn),
20287 cCE("fmrdl", e100b10, 2, (RR, RVD), vfp_dp_rd_rn),
20288 cCE("fsitod", eb80bc0, 2, (RVD, RVS), vfp_dp_sp_cvt),
20289 cCE("fuitod", eb80b40, 2, (RVD, RVS), vfp_dp_sp_cvt),
20290 cCE("ftosid", ebd0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
20291 cCE("ftosizd", ebd0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
20292 cCE("ftouid", ebc0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
20293 cCE("ftouizd", ebc0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
20294
20295 /* Monadic operations. */
20296 cCE("fabsd", eb00bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
20297 cCE("fnegd", eb10b40, 2, (RVD, RVD), vfp_dp_rd_rm),
20298 cCE("fsqrtd", eb10bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
20299
20300 /* Dyadic operations. */
20301 cCE("faddd", e300b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20302 cCE("fsubd", e300b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20303 cCE("fmuld", e200b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20304 cCE("fdivd", e800b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20305 cCE("fmacd", e000b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20306 cCE("fmscd", e100b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20307 cCE("fnmuld", e200b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20308 cCE("fnmacd", e000b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20309 cCE("fnmscd", e100b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20310
20311 /* Comparisons. */
20312 cCE("fcmpd", eb40b40, 2, (RVD, RVD), vfp_dp_rd_rm),
20313 cCE("fcmpzd", eb50b40, 1, (RVD), vfp_dp_rd),
20314 cCE("fcmped", eb40bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
20315 cCE("fcmpezd", eb50bc0, 1, (RVD), vfp_dp_rd),
20316
20317 #undef ARM_VARIANT
20318 #define ARM_VARIANT & fpu_vfp_ext_v2
20319
20320 cCE("fmsrr", c400a10, 3, (VRSLST, RR, RR), vfp_sp2_from_reg2),
20321 cCE("fmrrs", c500a10, 3, (RR, RR, VRSLST), vfp_reg2_from_sp2),
20322 cCE("fmdrr", c400b10, 3, (RVD, RR, RR), vfp_dp_rm_rd_rn),
20323 cCE("fmrrd", c500b10, 3, (RR, RR, RVD), vfp_dp_rd_rn_rm),
20324
20325 /* Instructions which may belong to either the Neon or VFP instruction sets.
20326 Individual encoder functions perform additional architecture checks. */
20327 #undef ARM_VARIANT
20328 #define ARM_VARIANT & fpu_vfp_ext_v1xd
20329 #undef THUMB_VARIANT
20330 #define THUMB_VARIANT & fpu_vfp_ext_v1xd
20331
20332 /* These mnemonics are unique to VFP. */
20333 NCE(vsqrt, 0, 2, (RVSD, RVSD), vfp_nsyn_sqrt),
20334 NCE(vdiv, 0, 3, (RVSD, RVSD, RVSD), vfp_nsyn_div),
20335 nCE(vnmul, _vnmul, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
20336 nCE(vnmla, _vnmla, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
20337 nCE(vnmls, _vnmls, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
20338 nCE(vcmp, _vcmp, 2, (RVSD, RSVD_FI0), vfp_nsyn_cmp),
20339 nCE(vcmpe, _vcmpe, 2, (RVSD, RSVD_FI0), vfp_nsyn_cmp),
20340 NCE(vpush, 0, 1, (VRSDLST), vfp_nsyn_push),
20341 NCE(vpop, 0, 1, (VRSDLST), vfp_nsyn_pop),
20342 NCE(vcvtz, 0, 2, (RVSD, RVSD), vfp_nsyn_cvtz),
20343
20344 /* Mnemonics shared by Neon and VFP. */
20345 nCEF(vmul, _vmul, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mul),
20346 nCEF(vmla, _vmla, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
20347 nCEF(vmls, _vmls, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
20348
20349 nCEF(vadd, _vadd, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
20350 nCEF(vsub, _vsub, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
20351
20352 NCEF(vabs, 1b10300, 2, (RNSDQ, RNSDQ), neon_abs_neg),
20353 NCEF(vneg, 1b10380, 2, (RNSDQ, RNSDQ), neon_abs_neg),
20354
20355 NCE(vldm, c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
20356 NCE(vldmia, c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
20357 NCE(vldmdb, d100b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
20358 NCE(vstm, c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
20359 NCE(vstmia, c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
20360 NCE(vstmdb, d000b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
20361 NCE(vldr, d100b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
20362 NCE(vstr, d000b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
20363
20364 nCEF(vcvt, _vcvt, 3, (RNSDQ, RNSDQ, oI32z), neon_cvt),
20365 nCEF(vcvtr, _vcvt, 2, (RNSDQ, RNSDQ), neon_cvtr),
20366 NCEF(vcvtb, eb20a40, 2, (RVSD, RVSD), neon_cvtb),
20367 NCEF(vcvtt, eb20a40, 2, (RVSD, RVSD), neon_cvtt),
20368
20369
20370 /* NOTE: All VMOV encoding is special-cased! */
20371 NCE(vmov, 0, 1, (VMOV), neon_mov),
20372 NCE(vmovq, 0, 1, (VMOV), neon_mov),
20373
20374 #undef ARM_VARIANT
20375 #define ARM_VARIANT & arm_ext_fp16
20376 #undef THUMB_VARIANT
20377 #define THUMB_VARIANT & arm_ext_fp16
20378 /* New instructions added from v8.2, allowing the extraction and insertion of
20379 the upper 16 bits of a 32-bit vector register. */
20380 NCE (vmovx, eb00a40, 2, (RVS, RVS), neon_movhf),
20381 NCE (vins, eb00ac0, 2, (RVS, RVS), neon_movhf),
20382
20383 #undef THUMB_VARIANT
20384 #define THUMB_VARIANT & fpu_neon_ext_v1
20385 #undef ARM_VARIANT
20386 #define ARM_VARIANT & fpu_neon_ext_v1
20387
20388 /* Data processing with three registers of the same length. */
20389 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
20390 NUF(vaba, 0000710, 3, (RNDQ, RNDQ, RNDQ), neon_dyadic_i_su),
20391 NUF(vabaq, 0000710, 3, (RNQ, RNQ, RNQ), neon_dyadic_i_su),
20392 NUF(vhadd, 0000000, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
20393 NUF(vhaddq, 0000000, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
20394 NUF(vrhadd, 0000100, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
20395 NUF(vrhaddq, 0000100, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
20396 NUF(vhsub, 0000200, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
20397 NUF(vhsubq, 0000200, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
20398 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
20399 NUF(vqadd, 0000010, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
20400 NUF(vqaddq, 0000010, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
20401 NUF(vqsub, 0000210, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
20402 NUF(vqsubq, 0000210, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
20403 NUF(vrshl, 0000500, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
20404 NUF(vrshlq, 0000500, 3, (RNQ, oRNQ, RNQ), neon_rshl),
20405 NUF(vqrshl, 0000510, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
20406 NUF(vqrshlq, 0000510, 3, (RNQ, oRNQ, RNQ), neon_rshl),
20407 /* If not immediate, fall back to neon_dyadic_i64_su.
20408 shl_imm should accept I8 I16 I32 I64,
20409 qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64. */
20410 nUF(vshl, _vshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_shl_imm),
20411 nUF(vshlq, _vshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_shl_imm),
20412 nUF(vqshl, _vqshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_qshl_imm),
20413 nUF(vqshlq, _vqshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_qshl_imm),
20414 /* Logic ops, types optional & ignored. */
20415 nUF(vand, _vand, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
20416 nUF(vandq, _vand, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
20417 nUF(vbic, _vbic, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
20418 nUF(vbicq, _vbic, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
20419 nUF(vorr, _vorr, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
20420 nUF(vorrq, _vorr, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
20421 nUF(vorn, _vorn, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
20422 nUF(vornq, _vorn, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
20423 nUF(veor, _veor, 3, (RNDQ, oRNDQ, RNDQ), neon_logic),
20424 nUF(veorq, _veor, 3, (RNQ, oRNQ, RNQ), neon_logic),
20425 /* Bitfield ops, untyped. */
20426 NUF(vbsl, 1100110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
20427 NUF(vbslq, 1100110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
20428 NUF(vbit, 1200110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
20429 NUF(vbitq, 1200110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
20430 NUF(vbif, 1300110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
20431 NUF(vbifq, 1300110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
20432 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F16 F32. */
20433 nUF(vabd, _vabd, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
20434 nUF(vabdq, _vabd, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
20435 nUF(vmax, _vmax, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
20436 nUF(vmaxq, _vmax, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
20437 nUF(vmin, _vmin, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
20438 nUF(vminq, _vmin, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
20439 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
20440 back to neon_dyadic_if_su. */
20441 nUF(vcge, _vcge, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
20442 nUF(vcgeq, _vcge, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
20443 nUF(vcgt, _vcgt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
20444 nUF(vcgtq, _vcgt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
20445 nUF(vclt, _vclt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
20446 nUF(vcltq, _vclt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
20447 nUF(vcle, _vcle, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
20448 nUF(vcleq, _vcle, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
20449 /* Comparison. Type I8 I16 I32 F32. */
20450 nUF(vceq, _vceq, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_ceq),
20451 nUF(vceqq, _vceq, 3, (RNQ, oRNQ, RNDQ_I0), neon_ceq),
20452 /* As above, D registers only. */
20453 nUF(vpmax, _vpmax, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
20454 nUF(vpmin, _vpmin, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
20455 /* Int and float variants, signedness unimportant. */
20456 nUF(vmlaq, _vmla, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
20457 nUF(vmlsq, _vmls, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
20458 nUF(vpadd, _vpadd, 3, (RND, oRND, RND), neon_dyadic_if_i_d),
20459 /* Add/sub take types I8 I16 I32 I64 F32. */
20460 nUF(vaddq, _vadd, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
20461 nUF(vsubq, _vsub, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
20462 /* vtst takes sizes 8, 16, 32. */
20463 NUF(vtst, 0000810, 3, (RNDQ, oRNDQ, RNDQ), neon_tst),
20464 NUF(vtstq, 0000810, 3, (RNQ, oRNQ, RNQ), neon_tst),
20465 /* VMUL takes I8 I16 I32 F32 P8. */
20466 nUF(vmulq, _vmul, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mul),
20467 /* VQD{R}MULH takes S16 S32. */
20468 nUF(vqdmulh, _vqdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
20469 nUF(vqdmulhq, _vqdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
20470 nUF(vqrdmulh, _vqrdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
20471 nUF(vqrdmulhq, _vqrdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
20472 NUF(vacge, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
20473 NUF(vacgeq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
20474 NUF(vacgt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
20475 NUF(vacgtq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
20476 NUF(vaclt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
20477 NUF(vacltq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
20478 NUF(vacle, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
20479 NUF(vacleq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
20480 NUF(vrecps, 0000f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
20481 NUF(vrecpsq, 0000f10, 3, (RNQ, oRNQ, RNQ), neon_step),
20482 NUF(vrsqrts, 0200f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
20483 NUF(vrsqrtsq, 0200f10, 3, (RNQ, oRNQ, RNQ), neon_step),
20484 /* ARM v8.1 extension. */
20485 nUF (vqrdmlah, _vqrdmlah, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qrdmlah),
20486 nUF (vqrdmlahq, _vqrdmlah, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qrdmlah),
20487 nUF (vqrdmlsh, _vqrdmlsh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qrdmlah),
20488 nUF (vqrdmlshq, _vqrdmlsh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qrdmlah),
20489
20490 /* Two address, int/float. Types S8 S16 S32 F32. */
20491 NUF(vabsq, 1b10300, 2, (RNQ, RNQ), neon_abs_neg),
20492 NUF(vnegq, 1b10380, 2, (RNQ, RNQ), neon_abs_neg),
20493
20494 /* Data processing with two registers and a shift amount. */
20495 /* Right shifts, and variants with rounding.
20496 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
20497 NUF(vshr, 0800010, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
20498 NUF(vshrq, 0800010, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
20499 NUF(vrshr, 0800210, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
20500 NUF(vrshrq, 0800210, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
20501 NUF(vsra, 0800110, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
20502 NUF(vsraq, 0800110, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
20503 NUF(vrsra, 0800310, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
20504 NUF(vrsraq, 0800310, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
20505 /* Shift and insert. Sizes accepted 8 16 32 64. */
20506 NUF(vsli, 1800510, 3, (RNDQ, oRNDQ, I63), neon_sli),
20507 NUF(vsliq, 1800510, 3, (RNQ, oRNQ, I63), neon_sli),
20508 NUF(vsri, 1800410, 3, (RNDQ, oRNDQ, I64), neon_sri),
20509 NUF(vsriq, 1800410, 3, (RNQ, oRNQ, I64), neon_sri),
20510 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
20511 NUF(vqshlu, 1800610, 3, (RNDQ, oRNDQ, I63), neon_qshlu_imm),
20512 NUF(vqshluq, 1800610, 3, (RNQ, oRNQ, I63), neon_qshlu_imm),
20513 /* Right shift immediate, saturating & narrowing, with rounding variants.
20514 Types accepted S16 S32 S64 U16 U32 U64. */
20515 NUF(vqshrn, 0800910, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
20516 NUF(vqrshrn, 0800950, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
20517 /* As above, unsigned. Types accepted S16 S32 S64. */
20518 NUF(vqshrun, 0800810, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
20519 NUF(vqrshrun, 0800850, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
20520 /* Right shift narrowing. Types accepted I16 I32 I64. */
20521 NUF(vshrn, 0800810, 3, (RND, RNQ, I32z), neon_rshift_narrow),
20522 NUF(vrshrn, 0800850, 3, (RND, RNQ, I32z), neon_rshift_narrow),
20523 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
20524 nUF(vshll, _vshll, 3, (RNQ, RND, I32), neon_shll),
20525 /* CVT with optional immediate for fixed-point variant. */
20526 nUF(vcvtq, _vcvt, 3, (RNQ, RNQ, oI32b), neon_cvt),
20527
20528 nUF(vmvn, _vmvn, 2, (RNDQ, RNDQ_Ibig), neon_mvn),
20529 nUF(vmvnq, _vmvn, 2, (RNQ, RNDQ_Ibig), neon_mvn),
20530
20531 /* Data processing, three registers of different lengths. */
20532 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
20533 NUF(vabal, 0800500, 3, (RNQ, RND, RND), neon_abal),
20534 NUF(vabdl, 0800700, 3, (RNQ, RND, RND), neon_dyadic_long),
20535 NUF(vaddl, 0800000, 3, (RNQ, RND, RND), neon_dyadic_long),
20536 NUF(vsubl, 0800200, 3, (RNQ, RND, RND), neon_dyadic_long),
20537 /* If not scalar, fall back to neon_dyadic_long.
20538 Vector types as above, scalar types S16 S32 U16 U32. */
20539 nUF(vmlal, _vmlal, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
20540 nUF(vmlsl, _vmlsl, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
20541 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
20542 NUF(vaddw, 0800100, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
20543 NUF(vsubw, 0800300, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
20544 /* Dyadic, narrowing insns. Types I16 I32 I64. */
20545 NUF(vaddhn, 0800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
20546 NUF(vraddhn, 1800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
20547 NUF(vsubhn, 0800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
20548 NUF(vrsubhn, 1800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
20549 /* Saturating doubling multiplies. Types S16 S32. */
20550 nUF(vqdmlal, _vqdmlal, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
20551 nUF(vqdmlsl, _vqdmlsl, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
20552 nUF(vqdmull, _vqdmull, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
20553 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
20554 S16 S32 U16 U32. */
20555 nUF(vmull, _vmull, 3, (RNQ, RND, RND_RNSC), neon_vmull),
20556
20557 /* Extract. Size 8. */
20558 NUF(vext, 0b00000, 4, (RNDQ, oRNDQ, RNDQ, I15), neon_ext),
20559 NUF(vextq, 0b00000, 4, (RNQ, oRNQ, RNQ, I15), neon_ext),
20560
20561 /* Two registers, miscellaneous. */
20562 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
20563 NUF(vrev64, 1b00000, 2, (RNDQ, RNDQ), neon_rev),
20564 NUF(vrev64q, 1b00000, 2, (RNQ, RNQ), neon_rev),
20565 NUF(vrev32, 1b00080, 2, (RNDQ, RNDQ), neon_rev),
20566 NUF(vrev32q, 1b00080, 2, (RNQ, RNQ), neon_rev),
20567 NUF(vrev16, 1b00100, 2, (RNDQ, RNDQ), neon_rev),
20568 NUF(vrev16q, 1b00100, 2, (RNQ, RNQ), neon_rev),
20569 /* Vector replicate. Sizes 8 16 32. */
20570 nCE(vdup, _vdup, 2, (RNDQ, RR_RNSC), neon_dup),
20571 nCE(vdupq, _vdup, 2, (RNQ, RR_RNSC), neon_dup),
20572 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
20573 NUF(vmovl, 0800a10, 2, (RNQ, RND), neon_movl),
20574 /* VMOVN. Types I16 I32 I64. */
20575 nUF(vmovn, _vmovn, 2, (RND, RNQ), neon_movn),
20576 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
20577 nUF(vqmovn, _vqmovn, 2, (RND, RNQ), neon_qmovn),
20578 /* VQMOVUN. Types S16 S32 S64. */
20579 nUF(vqmovun, _vqmovun, 2, (RND, RNQ), neon_qmovun),
20580 /* VZIP / VUZP. Sizes 8 16 32. */
20581 NUF(vzip, 1b20180, 2, (RNDQ, RNDQ), neon_zip_uzp),
20582 NUF(vzipq, 1b20180, 2, (RNQ, RNQ), neon_zip_uzp),
20583 NUF(vuzp, 1b20100, 2, (RNDQ, RNDQ), neon_zip_uzp),
20584 NUF(vuzpq, 1b20100, 2, (RNQ, RNQ), neon_zip_uzp),
20585 /* VQABS / VQNEG. Types S8 S16 S32. */
20586 NUF(vqabs, 1b00700, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
20587 NUF(vqabsq, 1b00700, 2, (RNQ, RNQ), neon_sat_abs_neg),
20588 NUF(vqneg, 1b00780, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
20589 NUF(vqnegq, 1b00780, 2, (RNQ, RNQ), neon_sat_abs_neg),
20590 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
20591 NUF(vpadal, 1b00600, 2, (RNDQ, RNDQ), neon_pair_long),
20592 NUF(vpadalq, 1b00600, 2, (RNQ, RNQ), neon_pair_long),
20593 NUF(vpaddl, 1b00200, 2, (RNDQ, RNDQ), neon_pair_long),
20594 NUF(vpaddlq, 1b00200, 2, (RNQ, RNQ), neon_pair_long),
20595 /* Reciprocal estimates. Types U32 F16 F32. */
20596 NUF(vrecpe, 1b30400, 2, (RNDQ, RNDQ), neon_recip_est),
20597 NUF(vrecpeq, 1b30400, 2, (RNQ, RNQ), neon_recip_est),
20598 NUF(vrsqrte, 1b30480, 2, (RNDQ, RNDQ), neon_recip_est),
20599 NUF(vrsqrteq, 1b30480, 2, (RNQ, RNQ), neon_recip_est),
20600 /* VCLS. Types S8 S16 S32. */
20601 NUF(vcls, 1b00400, 2, (RNDQ, RNDQ), neon_cls),
20602 NUF(vclsq, 1b00400, 2, (RNQ, RNQ), neon_cls),
20603 /* VCLZ. Types I8 I16 I32. */
20604 NUF(vclz, 1b00480, 2, (RNDQ, RNDQ), neon_clz),
20605 NUF(vclzq, 1b00480, 2, (RNQ, RNQ), neon_clz),
20606 /* VCNT. Size 8. */
20607 NUF(vcnt, 1b00500, 2, (RNDQ, RNDQ), neon_cnt),
20608 NUF(vcntq, 1b00500, 2, (RNQ, RNQ), neon_cnt),
20609 /* Two address, untyped. */
20610 NUF(vswp, 1b20000, 2, (RNDQ, RNDQ), neon_swp),
20611 NUF(vswpq, 1b20000, 2, (RNQ, RNQ), neon_swp),
20612 /* VTRN. Sizes 8 16 32. */
20613 nUF(vtrn, _vtrn, 2, (RNDQ, RNDQ), neon_trn),
20614 nUF(vtrnq, _vtrn, 2, (RNQ, RNQ), neon_trn),
20615
20616 /* Table lookup. Size 8. */
20617 NUF(vtbl, 1b00800, 3, (RND, NRDLST, RND), neon_tbl_tbx),
20618 NUF(vtbx, 1b00840, 3, (RND, NRDLST, RND), neon_tbl_tbx),
20619
20620 #undef THUMB_VARIANT
20621 #define THUMB_VARIANT & fpu_vfp_v3_or_neon_ext
20622 #undef ARM_VARIANT
20623 #define ARM_VARIANT & fpu_vfp_v3_or_neon_ext
20624
20625 /* Neon element/structure load/store. */
20626 nUF(vld1, _vld1, 2, (NSTRLST, ADDR), neon_ldx_stx),
20627 nUF(vst1, _vst1, 2, (NSTRLST, ADDR), neon_ldx_stx),
20628 nUF(vld2, _vld2, 2, (NSTRLST, ADDR), neon_ldx_stx),
20629 nUF(vst2, _vst2, 2, (NSTRLST, ADDR), neon_ldx_stx),
20630 nUF(vld3, _vld3, 2, (NSTRLST, ADDR), neon_ldx_stx),
20631 nUF(vst3, _vst3, 2, (NSTRLST, ADDR), neon_ldx_stx),
20632 nUF(vld4, _vld4, 2, (NSTRLST, ADDR), neon_ldx_stx),
20633 nUF(vst4, _vst4, 2, (NSTRLST, ADDR), neon_ldx_stx),
20634
20635 #undef THUMB_VARIANT
20636 #define THUMB_VARIANT & fpu_vfp_ext_v3xd
20637 #undef ARM_VARIANT
20638 #define ARM_VARIANT & fpu_vfp_ext_v3xd
20639 cCE("fconsts", eb00a00, 2, (RVS, I255), vfp_sp_const),
20640 cCE("fshtos", eba0a40, 2, (RVS, I16z), vfp_sp_conv_16),
20641 cCE("fsltos", eba0ac0, 2, (RVS, I32), vfp_sp_conv_32),
20642 cCE("fuhtos", ebb0a40, 2, (RVS, I16z), vfp_sp_conv_16),
20643 cCE("fultos", ebb0ac0, 2, (RVS, I32), vfp_sp_conv_32),
20644 cCE("ftoshs", ebe0a40, 2, (RVS, I16z), vfp_sp_conv_16),
20645 cCE("ftosls", ebe0ac0, 2, (RVS, I32), vfp_sp_conv_32),
20646 cCE("ftouhs", ebf0a40, 2, (RVS, I16z), vfp_sp_conv_16),
20647 cCE("ftouls", ebf0ac0, 2, (RVS, I32), vfp_sp_conv_32),
20648
20649 #undef THUMB_VARIANT
20650 #define THUMB_VARIANT & fpu_vfp_ext_v3
20651 #undef ARM_VARIANT
20652 #define ARM_VARIANT & fpu_vfp_ext_v3
20653
20654 cCE("fconstd", eb00b00, 2, (RVD, I255), vfp_dp_const),
20655 cCE("fshtod", eba0b40, 2, (RVD, I16z), vfp_dp_conv_16),
20656 cCE("fsltod", eba0bc0, 2, (RVD, I32), vfp_dp_conv_32),
20657 cCE("fuhtod", ebb0b40, 2, (RVD, I16z), vfp_dp_conv_16),
20658 cCE("fultod", ebb0bc0, 2, (RVD, I32), vfp_dp_conv_32),
20659 cCE("ftoshd", ebe0b40, 2, (RVD, I16z), vfp_dp_conv_16),
20660 cCE("ftosld", ebe0bc0, 2, (RVD, I32), vfp_dp_conv_32),
20661 cCE("ftouhd", ebf0b40, 2, (RVD, I16z), vfp_dp_conv_16),
20662 cCE("ftould", ebf0bc0, 2, (RVD, I32), vfp_dp_conv_32),
20663
20664 #undef ARM_VARIANT
20665 #define ARM_VARIANT & fpu_vfp_ext_fma
20666 #undef THUMB_VARIANT
20667 #define THUMB_VARIANT & fpu_vfp_ext_fma
20668 /* Mnemonics shared by Neon and VFP. These are included in the
20669 VFP FMA variant; NEON and VFP FMA always includes the NEON
20670 FMA instructions. */
20671 nCEF(vfma, _vfma, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_fmac),
20672 nCEF(vfms, _vfms, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_fmac),
20673 /* ffmas/ffmad/ffmss/ffmsd are dummy mnemonics to satisfy gas;
20674 the v form should always be used. */
20675 cCE("ffmas", ea00a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20676 cCE("ffnmas", ea00a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20677 cCE("ffmad", ea00b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20678 cCE("ffnmad", ea00b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20679 nCE(vfnma, _vfnma, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
20680 nCE(vfnms, _vfnms, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
20681
20682 #undef THUMB_VARIANT
20683 #undef ARM_VARIANT
20684 #define ARM_VARIANT & arm_cext_xscale /* Intel XScale extensions. */
20685
20686 cCE("mia", e200010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
20687 cCE("miaph", e280010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
20688 cCE("miabb", e2c0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
20689 cCE("miabt", e2d0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
20690 cCE("miatb", e2e0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
20691 cCE("miatt", e2f0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
20692 cCE("mar", c400000, 3, (RXA, RRnpc, RRnpc), xsc_mar),
20693 cCE("mra", c500000, 3, (RRnpc, RRnpc, RXA), xsc_mra),
20694
20695 #undef ARM_VARIANT
20696 #define ARM_VARIANT & arm_cext_iwmmxt /* Intel Wireless MMX technology. */
20697
20698 cCE("tandcb", e13f130, 1, (RR), iwmmxt_tandorc),
20699 cCE("tandch", e53f130, 1, (RR), iwmmxt_tandorc),
20700 cCE("tandcw", e93f130, 1, (RR), iwmmxt_tandorc),
20701 cCE("tbcstb", e400010, 2, (RIWR, RR), rn_rd),
20702 cCE("tbcsth", e400050, 2, (RIWR, RR), rn_rd),
20703 cCE("tbcstw", e400090, 2, (RIWR, RR), rn_rd),
20704 cCE("textrcb", e130170, 2, (RR, I7), iwmmxt_textrc),
20705 cCE("textrch", e530170, 2, (RR, I7), iwmmxt_textrc),
20706 cCE("textrcw", e930170, 2, (RR, I7), iwmmxt_textrc),
20707 cCE("textrmub",e100070, 3, (RR, RIWR, I7), iwmmxt_textrm),
20708 cCE("textrmuh",e500070, 3, (RR, RIWR, I7), iwmmxt_textrm),
20709 cCE("textrmuw",e900070, 3, (RR, RIWR, I7), iwmmxt_textrm),
20710 cCE("textrmsb",e100078, 3, (RR, RIWR, I7), iwmmxt_textrm),
20711 cCE("textrmsh",e500078, 3, (RR, RIWR, I7), iwmmxt_textrm),
20712 cCE("textrmsw",e900078, 3, (RR, RIWR, I7), iwmmxt_textrm),
20713 cCE("tinsrb", e600010, 3, (RIWR, RR, I7), iwmmxt_tinsr),
20714 cCE("tinsrh", e600050, 3, (RIWR, RR, I7), iwmmxt_tinsr),
20715 cCE("tinsrw", e600090, 3, (RIWR, RR, I7), iwmmxt_tinsr),
20716 cCE("tmcr", e000110, 2, (RIWC_RIWG, RR), rn_rd),
20717 cCE("tmcrr", c400000, 3, (RIWR, RR, RR), rm_rd_rn),
20718 cCE("tmia", e200010, 3, (RIWR, RR, RR), iwmmxt_tmia),
20719 cCE("tmiaph", e280010, 3, (RIWR, RR, RR), iwmmxt_tmia),
20720 cCE("tmiabb", e2c0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
20721 cCE("tmiabt", e2d0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
20722 cCE("tmiatb", e2e0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
20723 cCE("tmiatt", e2f0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
20724 cCE("tmovmskb",e100030, 2, (RR, RIWR), rd_rn),
20725 cCE("tmovmskh",e500030, 2, (RR, RIWR), rd_rn),
20726 cCE("tmovmskw",e900030, 2, (RR, RIWR), rd_rn),
20727 cCE("tmrc", e100110, 2, (RR, RIWC_RIWG), rd_rn),
20728 cCE("tmrrc", c500000, 3, (RR, RR, RIWR), rd_rn_rm),
20729 cCE("torcb", e13f150, 1, (RR), iwmmxt_tandorc),
20730 cCE("torch", e53f150, 1, (RR), iwmmxt_tandorc),
20731 cCE("torcw", e93f150, 1, (RR), iwmmxt_tandorc),
20732 cCE("waccb", e0001c0, 2, (RIWR, RIWR), rd_rn),
20733 cCE("wacch", e4001c0, 2, (RIWR, RIWR), rd_rn),
20734 cCE("waccw", e8001c0, 2, (RIWR, RIWR), rd_rn),
20735 cCE("waddbss", e300180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20736 cCE("waddb", e000180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20737 cCE("waddbus", e100180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20738 cCE("waddhss", e700180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20739 cCE("waddh", e400180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20740 cCE("waddhus", e500180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20741 cCE("waddwss", eb00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20742 cCE("waddw", e800180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20743 cCE("waddwus", e900180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20744 cCE("waligni", e000020, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_waligni),
20745 cCE("walignr0",e800020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20746 cCE("walignr1",e900020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20747 cCE("walignr2",ea00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20748 cCE("walignr3",eb00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20749 cCE("wand", e200000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20750 cCE("wandn", e300000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20751 cCE("wavg2b", e800000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20752 cCE("wavg2br", e900000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20753 cCE("wavg2h", ec00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20754 cCE("wavg2hr", ed00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20755 cCE("wcmpeqb", e000060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20756 cCE("wcmpeqh", e400060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20757 cCE("wcmpeqw", e800060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20758 cCE("wcmpgtub",e100060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20759 cCE("wcmpgtuh",e500060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20760 cCE("wcmpgtuw",e900060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20761 cCE("wcmpgtsb",e300060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20762 cCE("wcmpgtsh",e700060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20763 cCE("wcmpgtsw",eb00060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20764 cCE("wldrb", c100000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
20765 cCE("wldrh", c500000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
20766 cCE("wldrw", c100100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
20767 cCE("wldrd", c500100, 2, (RIWR, ADDR), iwmmxt_wldstd),
20768 cCE("wmacs", e600100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20769 cCE("wmacsz", e700100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20770 cCE("wmacu", e400100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20771 cCE("wmacuz", e500100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20772 cCE("wmadds", ea00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20773 cCE("wmaddu", e800100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20774 cCE("wmaxsb", e200160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20775 cCE("wmaxsh", e600160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20776 cCE("wmaxsw", ea00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20777 cCE("wmaxub", e000160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20778 cCE("wmaxuh", e400160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20779 cCE("wmaxuw", e800160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20780 cCE("wminsb", e300160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20781 cCE("wminsh", e700160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20782 cCE("wminsw", eb00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20783 cCE("wminub", e100160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20784 cCE("wminuh", e500160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20785 cCE("wminuw", e900160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20786 cCE("wmov", e000000, 2, (RIWR, RIWR), iwmmxt_wmov),
20787 cCE("wmulsm", e300100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20788 cCE("wmulsl", e200100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20789 cCE("wmulum", e100100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20790 cCE("wmulul", e000100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20791 cCE("wor", e000000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20792 cCE("wpackhss",e700080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20793 cCE("wpackhus",e500080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20794 cCE("wpackwss",eb00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20795 cCE("wpackwus",e900080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20796 cCE("wpackdss",ef00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20797 cCE("wpackdus",ed00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20798 cCE("wrorh", e700040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20799 cCE("wrorhg", e700148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20800 cCE("wrorw", eb00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20801 cCE("wrorwg", eb00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20802 cCE("wrord", ef00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20803 cCE("wrordg", ef00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20804 cCE("wsadb", e000120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20805 cCE("wsadbz", e100120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20806 cCE("wsadh", e400120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20807 cCE("wsadhz", e500120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20808 cCE("wshufh", e0001e0, 3, (RIWR, RIWR, I255), iwmmxt_wshufh),
20809 cCE("wsllh", e500040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20810 cCE("wsllhg", e500148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20811 cCE("wsllw", e900040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20812 cCE("wsllwg", e900148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20813 cCE("wslld", ed00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20814 cCE("wslldg", ed00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20815 cCE("wsrah", e400040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20816 cCE("wsrahg", e400148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20817 cCE("wsraw", e800040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20818 cCE("wsrawg", e800148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20819 cCE("wsrad", ec00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20820 cCE("wsradg", ec00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20821 cCE("wsrlh", e600040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20822 cCE("wsrlhg", e600148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20823 cCE("wsrlw", ea00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20824 cCE("wsrlwg", ea00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20825 cCE("wsrld", ee00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20826 cCE("wsrldg", ee00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20827 cCE("wstrb", c000000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
20828 cCE("wstrh", c400000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
20829 cCE("wstrw", c000100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
20830 cCE("wstrd", c400100, 2, (RIWR, ADDR), iwmmxt_wldstd),
20831 cCE("wsubbss", e3001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20832 cCE("wsubb", e0001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20833 cCE("wsubbus", e1001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20834 cCE("wsubhss", e7001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20835 cCE("wsubh", e4001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20836 cCE("wsubhus", e5001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20837 cCE("wsubwss", eb001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20838 cCE("wsubw", e8001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20839 cCE("wsubwus", e9001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20840 cCE("wunpckehub",e0000c0, 2, (RIWR, RIWR), rd_rn),
20841 cCE("wunpckehuh",e4000c0, 2, (RIWR, RIWR), rd_rn),
20842 cCE("wunpckehuw",e8000c0, 2, (RIWR, RIWR), rd_rn),
20843 cCE("wunpckehsb",e2000c0, 2, (RIWR, RIWR), rd_rn),
20844 cCE("wunpckehsh",e6000c0, 2, (RIWR, RIWR), rd_rn),
20845 cCE("wunpckehsw",ea000c0, 2, (RIWR, RIWR), rd_rn),
20846 cCE("wunpckihb", e1000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20847 cCE("wunpckihh", e5000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20848 cCE("wunpckihw", e9000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20849 cCE("wunpckelub",e0000e0, 2, (RIWR, RIWR), rd_rn),
20850 cCE("wunpckeluh",e4000e0, 2, (RIWR, RIWR), rd_rn),
20851 cCE("wunpckeluw",e8000e0, 2, (RIWR, RIWR), rd_rn),
20852 cCE("wunpckelsb",e2000e0, 2, (RIWR, RIWR), rd_rn),
20853 cCE("wunpckelsh",e6000e0, 2, (RIWR, RIWR), rd_rn),
20854 cCE("wunpckelsw",ea000e0, 2, (RIWR, RIWR), rd_rn),
20855 cCE("wunpckilb", e1000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20856 cCE("wunpckilh", e5000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20857 cCE("wunpckilw", e9000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20858 cCE("wxor", e100000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20859 cCE("wzero", e300000, 1, (RIWR), iwmmxt_wzero),
20860
20861 #undef ARM_VARIANT
20862 #define ARM_VARIANT & arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2. */
20863
20864 cCE("torvscb", e12f190, 1, (RR), iwmmxt_tandorc),
20865 cCE("torvsch", e52f190, 1, (RR), iwmmxt_tandorc),
20866 cCE("torvscw", e92f190, 1, (RR), iwmmxt_tandorc),
20867 cCE("wabsb", e2001c0, 2, (RIWR, RIWR), rd_rn),
20868 cCE("wabsh", e6001c0, 2, (RIWR, RIWR), rd_rn),
20869 cCE("wabsw", ea001c0, 2, (RIWR, RIWR), rd_rn),
20870 cCE("wabsdiffb", e1001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20871 cCE("wabsdiffh", e5001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20872 cCE("wabsdiffw", e9001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20873 cCE("waddbhusl", e2001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20874 cCE("waddbhusm", e6001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20875 cCE("waddhc", e600180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20876 cCE("waddwc", ea00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20877 cCE("waddsubhx", ea001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20878 cCE("wavg4", e400000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20879 cCE("wavg4r", e500000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20880 cCE("wmaddsn", ee00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20881 cCE("wmaddsx", eb00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20882 cCE("wmaddun", ec00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20883 cCE("wmaddux", e900100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20884 cCE("wmerge", e000080, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_wmerge),
20885 cCE("wmiabb", e0000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20886 cCE("wmiabt", e1000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20887 cCE("wmiatb", e2000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20888 cCE("wmiatt", e3000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20889 cCE("wmiabbn", e4000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20890 cCE("wmiabtn", e5000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20891 cCE("wmiatbn", e6000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20892 cCE("wmiattn", e7000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20893 cCE("wmiawbb", e800120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20894 cCE("wmiawbt", e900120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20895 cCE("wmiawtb", ea00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20896 cCE("wmiawtt", eb00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20897 cCE("wmiawbbn", ec00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20898 cCE("wmiawbtn", ed00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20899 cCE("wmiawtbn", ee00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20900 cCE("wmiawttn", ef00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20901 cCE("wmulsmr", ef00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20902 cCE("wmulumr", ed00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20903 cCE("wmulwumr", ec000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20904 cCE("wmulwsmr", ee000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20905 cCE("wmulwum", ed000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20906 cCE("wmulwsm", ef000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20907 cCE("wmulwl", eb000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20908 cCE("wqmiabb", e8000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20909 cCE("wqmiabt", e9000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20910 cCE("wqmiatb", ea000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20911 cCE("wqmiatt", eb000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20912 cCE("wqmiabbn", ec000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20913 cCE("wqmiabtn", ed000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20914 cCE("wqmiatbn", ee000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20915 cCE("wqmiattn", ef000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20916 cCE("wqmulm", e100080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20917 cCE("wqmulmr", e300080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20918 cCE("wqmulwm", ec000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20919 cCE("wqmulwmr", ee000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20920 cCE("wsubaddhx", ed001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20921
20922 #undef ARM_VARIANT
20923 #define ARM_VARIANT & arm_cext_maverick /* Cirrus Maverick instructions. */
20924
20925 cCE("cfldrs", c100400, 2, (RMF, ADDRGLDC), rd_cpaddr),
20926 cCE("cfldrd", c500400, 2, (RMD, ADDRGLDC), rd_cpaddr),
20927 cCE("cfldr32", c100500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
20928 cCE("cfldr64", c500500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
20929 cCE("cfstrs", c000400, 2, (RMF, ADDRGLDC), rd_cpaddr),
20930 cCE("cfstrd", c400400, 2, (RMD, ADDRGLDC), rd_cpaddr),
20931 cCE("cfstr32", c000500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
20932 cCE("cfstr64", c400500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
20933 cCE("cfmvsr", e000450, 2, (RMF, RR), rn_rd),
20934 cCE("cfmvrs", e100450, 2, (RR, RMF), rd_rn),
20935 cCE("cfmvdlr", e000410, 2, (RMD, RR), rn_rd),
20936 cCE("cfmvrdl", e100410, 2, (RR, RMD), rd_rn),
20937 cCE("cfmvdhr", e000430, 2, (RMD, RR), rn_rd),
20938 cCE("cfmvrdh", e100430, 2, (RR, RMD), rd_rn),
20939 cCE("cfmv64lr",e000510, 2, (RMDX, RR), rn_rd),
20940 cCE("cfmvr64l",e100510, 2, (RR, RMDX), rd_rn),
20941 cCE("cfmv64hr",e000530, 2, (RMDX, RR), rn_rd),
20942 cCE("cfmvr64h",e100530, 2, (RR, RMDX), rd_rn),
20943 cCE("cfmval32",e200440, 2, (RMAX, RMFX), rd_rn),
20944 cCE("cfmv32al",e100440, 2, (RMFX, RMAX), rd_rn),
20945 cCE("cfmvam32",e200460, 2, (RMAX, RMFX), rd_rn),
20946 cCE("cfmv32am",e100460, 2, (RMFX, RMAX), rd_rn),
20947 cCE("cfmvah32",e200480, 2, (RMAX, RMFX), rd_rn),
20948 cCE("cfmv32ah",e100480, 2, (RMFX, RMAX), rd_rn),
20949 cCE("cfmva32", e2004a0, 2, (RMAX, RMFX), rd_rn),
20950 cCE("cfmv32a", e1004a0, 2, (RMFX, RMAX), rd_rn),
20951 cCE("cfmva64", e2004c0, 2, (RMAX, RMDX), rd_rn),
20952 cCE("cfmv64a", e1004c0, 2, (RMDX, RMAX), rd_rn),
20953 cCE("cfmvsc32",e2004e0, 2, (RMDS, RMDX), mav_dspsc),
20954 cCE("cfmv32sc",e1004e0, 2, (RMDX, RMDS), rd),
20955 cCE("cfcpys", e000400, 2, (RMF, RMF), rd_rn),
20956 cCE("cfcpyd", e000420, 2, (RMD, RMD), rd_rn),
20957 cCE("cfcvtsd", e000460, 2, (RMD, RMF), rd_rn),
20958 cCE("cfcvtds", e000440, 2, (RMF, RMD), rd_rn),
20959 cCE("cfcvt32s",e000480, 2, (RMF, RMFX), rd_rn),
20960 cCE("cfcvt32d",e0004a0, 2, (RMD, RMFX), rd_rn),
20961 cCE("cfcvt64s",e0004c0, 2, (RMF, RMDX), rd_rn),
20962 cCE("cfcvt64d",e0004e0, 2, (RMD, RMDX), rd_rn),
20963 cCE("cfcvts32",e100580, 2, (RMFX, RMF), rd_rn),
20964 cCE("cfcvtd32",e1005a0, 2, (RMFX, RMD), rd_rn),
20965 cCE("cftruncs32",e1005c0, 2, (RMFX, RMF), rd_rn),
20966 cCE("cftruncd32",e1005e0, 2, (RMFX, RMD), rd_rn),
20967 cCE("cfrshl32",e000550, 3, (RMFX, RMFX, RR), mav_triple),
20968 cCE("cfrshl64",e000570, 3, (RMDX, RMDX, RR), mav_triple),
20969 cCE("cfsh32", e000500, 3, (RMFX, RMFX, I63s), mav_shift),
20970 cCE("cfsh64", e200500, 3, (RMDX, RMDX, I63s), mav_shift),
20971 cCE("cfcmps", e100490, 3, (RR, RMF, RMF), rd_rn_rm),
20972 cCE("cfcmpd", e1004b0, 3, (RR, RMD, RMD), rd_rn_rm),
20973 cCE("cfcmp32", e100590, 3, (RR, RMFX, RMFX), rd_rn_rm),
20974 cCE("cfcmp64", e1005b0, 3, (RR, RMDX, RMDX), rd_rn_rm),
20975 cCE("cfabss", e300400, 2, (RMF, RMF), rd_rn),
20976 cCE("cfabsd", e300420, 2, (RMD, RMD), rd_rn),
20977 cCE("cfnegs", e300440, 2, (RMF, RMF), rd_rn),
20978 cCE("cfnegd", e300460, 2, (RMD, RMD), rd_rn),
20979 cCE("cfadds", e300480, 3, (RMF, RMF, RMF), rd_rn_rm),
20980 cCE("cfaddd", e3004a0, 3, (RMD, RMD, RMD), rd_rn_rm),
20981 cCE("cfsubs", e3004c0, 3, (RMF, RMF, RMF), rd_rn_rm),
20982 cCE("cfsubd", e3004e0, 3, (RMD, RMD, RMD), rd_rn_rm),
20983 cCE("cfmuls", e100400, 3, (RMF, RMF, RMF), rd_rn_rm),
20984 cCE("cfmuld", e100420, 3, (RMD, RMD, RMD), rd_rn_rm),
20985 cCE("cfabs32", e300500, 2, (RMFX, RMFX), rd_rn),
20986 cCE("cfabs64", e300520, 2, (RMDX, RMDX), rd_rn),
20987 cCE("cfneg32", e300540, 2, (RMFX, RMFX), rd_rn),
20988 cCE("cfneg64", e300560, 2, (RMDX, RMDX), rd_rn),
20989 cCE("cfadd32", e300580, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
20990 cCE("cfadd64", e3005a0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
20991 cCE("cfsub32", e3005c0, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
20992 cCE("cfsub64", e3005e0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
20993 cCE("cfmul32", e100500, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
20994 cCE("cfmul64", e100520, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
20995 cCE("cfmac32", e100540, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
20996 cCE("cfmsc32", e100560, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
20997 cCE("cfmadd32",e000600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
20998 cCE("cfmsub32",e100600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
20999 cCE("cfmadda32", e200600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
21000 cCE("cfmsuba32", e300600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
21001
21002 /* ARMv8-M instructions. */
21003 #undef ARM_VARIANT
21004 #define ARM_VARIANT NULL
21005 #undef THUMB_VARIANT
21006 #define THUMB_VARIANT & arm_ext_v8m
21007 TUE("sg", 0, e97fe97f, 0, (), 0, noargs),
21008 TUE("blxns", 0, 4784, 1, (RRnpc), 0, t_blx),
21009 TUE("bxns", 0, 4704, 1, (RRnpc), 0, t_bx),
21010 TUE("tt", 0, e840f000, 2, (RRnpc, RRnpc), 0, tt),
21011 TUE("ttt", 0, e840f040, 2, (RRnpc, RRnpc), 0, tt),
21012 TUE("tta", 0, e840f080, 2, (RRnpc, RRnpc), 0, tt),
21013 TUE("ttat", 0, e840f0c0, 2, (RRnpc, RRnpc), 0, tt),
21014
21015 /* FP for ARMv8-M Mainline. Enabled for ARMv8-M Mainline because the
21016 instructions behave as nop if no VFP is present. */
21017 #undef THUMB_VARIANT
21018 #define THUMB_VARIANT & arm_ext_v8m_main
21019 TUEc("vlldm", 0, ec300a00, 1, (RRnpc), rn),
21020 TUEc("vlstm", 0, ec200a00, 1, (RRnpc), rn),
21021 };
21022 #undef ARM_VARIANT
21023 #undef THUMB_VARIANT
21024 #undef TCE
21025 #undef TUE
21026 #undef TUF
21027 #undef TCC
21028 #undef cCE
21029 #undef cCL
21030 #undef C3E
21031 #undef CE
21032 #undef CM
21033 #undef UE
21034 #undef UF
21035 #undef UT
21036 #undef NUF
21037 #undef nUF
21038 #undef NCE
21039 #undef nCE
21040 #undef OPS0
21041 #undef OPS1
21042 #undef OPS2
21043 #undef OPS3
21044 #undef OPS4
21045 #undef OPS5
21046 #undef OPS6
21047 #undef do_0
21048 \f
21049 /* MD interface: bits in the object file. */
21050
21051 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
21052 for use in the a.out file, and stores them in the array pointed to by buf.
21053 This knows about the endian-ness of the target machine and does
21054 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
21055 2 (short) and 4 (long) Floating numbers are put out as a series of
21056 LITTLENUMS (shorts, here at least). */
21057
21058 void
21059 md_number_to_chars (char * buf, valueT val, int n)
21060 {
21061 if (target_big_endian)
21062 number_to_chars_bigendian (buf, val, n);
21063 else
21064 number_to_chars_littleendian (buf, val, n);
21065 }
21066
21067 static valueT
21068 md_chars_to_number (char * buf, int n)
21069 {
21070 valueT result = 0;
21071 unsigned char * where = (unsigned char *) buf;
21072
21073 if (target_big_endian)
21074 {
21075 while (n--)
21076 {
21077 result <<= 8;
21078 result |= (*where++ & 255);
21079 }
21080 }
21081 else
21082 {
21083 while (n--)
21084 {
21085 result <<= 8;
21086 result |= (where[n] & 255);
21087 }
21088 }
21089
21090 return result;
21091 }
21092
21093 /* MD interface: Sections. */
21094
21095 /* Calculate the maximum variable size (i.e., excluding fr_fix)
21096 that an rs_machine_dependent frag may reach. */
21097
21098 unsigned int
21099 arm_frag_max_var (fragS *fragp)
21100 {
21101 /* We only use rs_machine_dependent for variable-size Thumb instructions,
21102 which are either THUMB_SIZE (2) or INSN_SIZE (4).
21103
21104 Note that we generate relaxable instructions even for cases that don't
21105 really need it, like an immediate that's a trivial constant. So we're
21106 overestimating the instruction size for some of those cases. Rather
21107 than putting more intelligence here, it would probably be better to
21108 avoid generating a relaxation frag in the first place when it can be
21109 determined up front that a short instruction will suffice. */
21110
21111 gas_assert (fragp->fr_type == rs_machine_dependent);
21112 return INSN_SIZE;
21113 }
21114
21115 /* Estimate the size of a frag before relaxing. Assume everything fits in
21116 2 bytes. */
21117
21118 int
21119 md_estimate_size_before_relax (fragS * fragp,
21120 segT segtype ATTRIBUTE_UNUSED)
21121 {
21122 fragp->fr_var = 2;
21123 return 2;
21124 }
21125
21126 /* Convert a machine dependent frag. */
21127
21128 void
21129 md_convert_frag (bfd *abfd, segT asec ATTRIBUTE_UNUSED, fragS *fragp)
21130 {
21131 unsigned long insn;
21132 unsigned long old_op;
21133 char *buf;
21134 expressionS exp;
21135 fixS *fixp;
21136 int reloc_type;
21137 int pc_rel;
21138 int opcode;
21139
21140 buf = fragp->fr_literal + fragp->fr_fix;
21141
21142 old_op = bfd_get_16(abfd, buf);
21143 if (fragp->fr_symbol)
21144 {
21145 exp.X_op = O_symbol;
21146 exp.X_add_symbol = fragp->fr_symbol;
21147 }
21148 else
21149 {
21150 exp.X_op = O_constant;
21151 }
21152 exp.X_add_number = fragp->fr_offset;
21153 opcode = fragp->fr_subtype;
21154 switch (opcode)
21155 {
21156 case T_MNEM_ldr_pc:
21157 case T_MNEM_ldr_pc2:
21158 case T_MNEM_ldr_sp:
21159 case T_MNEM_str_sp:
21160 case T_MNEM_ldr:
21161 case T_MNEM_ldrb:
21162 case T_MNEM_ldrh:
21163 case T_MNEM_str:
21164 case T_MNEM_strb:
21165 case T_MNEM_strh:
21166 if (fragp->fr_var == 4)
21167 {
21168 insn = THUMB_OP32 (opcode);
21169 if ((old_op >> 12) == 4 || (old_op >> 12) == 9)
21170 {
21171 insn |= (old_op & 0x700) << 4;
21172 }
21173 else
21174 {
21175 insn |= (old_op & 7) << 12;
21176 insn |= (old_op & 0x38) << 13;
21177 }
21178 insn |= 0x00000c00;
21179 put_thumb32_insn (buf, insn);
21180 reloc_type = BFD_RELOC_ARM_T32_OFFSET_IMM;
21181 }
21182 else
21183 {
21184 reloc_type = BFD_RELOC_ARM_THUMB_OFFSET;
21185 }
21186 pc_rel = (opcode == T_MNEM_ldr_pc2);
21187 break;
21188 case T_MNEM_adr:
21189 if (fragp->fr_var == 4)
21190 {
21191 insn = THUMB_OP32 (opcode);
21192 insn |= (old_op & 0xf0) << 4;
21193 put_thumb32_insn (buf, insn);
21194 reloc_type = BFD_RELOC_ARM_T32_ADD_PC12;
21195 }
21196 else
21197 {
21198 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
21199 exp.X_add_number -= 4;
21200 }
21201 pc_rel = 1;
21202 break;
21203 case T_MNEM_mov:
21204 case T_MNEM_movs:
21205 case T_MNEM_cmp:
21206 case T_MNEM_cmn:
21207 if (fragp->fr_var == 4)
21208 {
21209 int r0off = (opcode == T_MNEM_mov
21210 || opcode == T_MNEM_movs) ? 0 : 8;
21211 insn = THUMB_OP32 (opcode);
21212 insn = (insn & 0xe1ffffff) | 0x10000000;
21213 insn |= (old_op & 0x700) << r0off;
21214 put_thumb32_insn (buf, insn);
21215 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
21216 }
21217 else
21218 {
21219 reloc_type = BFD_RELOC_ARM_THUMB_IMM;
21220 }
21221 pc_rel = 0;
21222 break;
21223 case T_MNEM_b:
21224 if (fragp->fr_var == 4)
21225 {
21226 insn = THUMB_OP32(opcode);
21227 put_thumb32_insn (buf, insn);
21228 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH25;
21229 }
21230 else
21231 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH12;
21232 pc_rel = 1;
21233 break;
21234 case T_MNEM_bcond:
21235 if (fragp->fr_var == 4)
21236 {
21237 insn = THUMB_OP32(opcode);
21238 insn |= (old_op & 0xf00) << 14;
21239 put_thumb32_insn (buf, insn);
21240 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH20;
21241 }
21242 else
21243 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH9;
21244 pc_rel = 1;
21245 break;
21246 case T_MNEM_add_sp:
21247 case T_MNEM_add_pc:
21248 case T_MNEM_inc_sp:
21249 case T_MNEM_dec_sp:
21250 if (fragp->fr_var == 4)
21251 {
21252 /* ??? Choose between add and addw. */
21253 insn = THUMB_OP32 (opcode);
21254 insn |= (old_op & 0xf0) << 4;
21255 put_thumb32_insn (buf, insn);
21256 if (opcode == T_MNEM_add_pc)
21257 reloc_type = BFD_RELOC_ARM_T32_IMM12;
21258 else
21259 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
21260 }
21261 else
21262 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
21263 pc_rel = 0;
21264 break;
21265
21266 case T_MNEM_addi:
21267 case T_MNEM_addis:
21268 case T_MNEM_subi:
21269 case T_MNEM_subis:
21270 if (fragp->fr_var == 4)
21271 {
21272 insn = THUMB_OP32 (opcode);
21273 insn |= (old_op & 0xf0) << 4;
21274 insn |= (old_op & 0xf) << 16;
21275 put_thumb32_insn (buf, insn);
21276 if (insn & (1 << 20))
21277 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
21278 else
21279 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
21280 }
21281 else
21282 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
21283 pc_rel = 0;
21284 break;
21285 default:
21286 abort ();
21287 }
21288 fixp = fix_new_exp (fragp, fragp->fr_fix, fragp->fr_var, &exp, pc_rel,
21289 (enum bfd_reloc_code_real) reloc_type);
21290 fixp->fx_file = fragp->fr_file;
21291 fixp->fx_line = fragp->fr_line;
21292 fragp->fr_fix += fragp->fr_var;
21293
21294 /* Set whether we use thumb-2 ISA based on final relaxation results. */
21295 if (thumb_mode && fragp->fr_var == 4 && no_cpu_selected ()
21296 && !ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_t2))
21297 ARM_MERGE_FEATURE_SETS (arm_arch_used, thumb_arch_used, arm_ext_v6t2);
21298 }
21299
21300 /* Return the size of a relaxable immediate operand instruction.
21301 SHIFT and SIZE specify the form of the allowable immediate. */
21302 static int
21303 relax_immediate (fragS *fragp, int size, int shift)
21304 {
21305 offsetT offset;
21306 offsetT mask;
21307 offsetT low;
21308
21309 /* ??? Should be able to do better than this. */
21310 if (fragp->fr_symbol)
21311 return 4;
21312
21313 low = (1 << shift) - 1;
21314 mask = (1 << (shift + size)) - (1 << shift);
21315 offset = fragp->fr_offset;
21316 /* Force misaligned offsets to 32-bit variant. */
21317 if (offset & low)
21318 return 4;
21319 if (offset & ~mask)
21320 return 4;
21321 return 2;
21322 }
21323
21324 /* Get the address of a symbol during relaxation. */
21325 static addressT
21326 relaxed_symbol_addr (fragS *fragp, long stretch)
21327 {
21328 fragS *sym_frag;
21329 addressT addr;
21330 symbolS *sym;
21331
21332 sym = fragp->fr_symbol;
21333 sym_frag = symbol_get_frag (sym);
21334 know (S_GET_SEGMENT (sym) != absolute_section
21335 || sym_frag == &zero_address_frag);
21336 addr = S_GET_VALUE (sym) + fragp->fr_offset;
21337
21338 /* If frag has yet to be reached on this pass, assume it will
21339 move by STRETCH just as we did. If this is not so, it will
21340 be because some frag between grows, and that will force
21341 another pass. */
21342
21343 if (stretch != 0
21344 && sym_frag->relax_marker != fragp->relax_marker)
21345 {
21346 fragS *f;
21347
21348 /* Adjust stretch for any alignment frag. Note that if have
21349 been expanding the earlier code, the symbol may be
21350 defined in what appears to be an earlier frag. FIXME:
21351 This doesn't handle the fr_subtype field, which specifies
21352 a maximum number of bytes to skip when doing an
21353 alignment. */
21354 for (f = fragp; f != NULL && f != sym_frag; f = f->fr_next)
21355 {
21356 if (f->fr_type == rs_align || f->fr_type == rs_align_code)
21357 {
21358 if (stretch < 0)
21359 stretch = - ((- stretch)
21360 & ~ ((1 << (int) f->fr_offset) - 1));
21361 else
21362 stretch &= ~ ((1 << (int) f->fr_offset) - 1);
21363 if (stretch == 0)
21364 break;
21365 }
21366 }
21367 if (f != NULL)
21368 addr += stretch;
21369 }
21370
21371 return addr;
21372 }
21373
21374 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
21375 load. */
21376 static int
21377 relax_adr (fragS *fragp, asection *sec, long stretch)
21378 {
21379 addressT addr;
21380 offsetT val;
21381
21382 /* Assume worst case for symbols not known to be in the same section. */
21383 if (fragp->fr_symbol == NULL
21384 || !S_IS_DEFINED (fragp->fr_symbol)
21385 || sec != S_GET_SEGMENT (fragp->fr_symbol)
21386 || S_IS_WEAK (fragp->fr_symbol))
21387 return 4;
21388
21389 val = relaxed_symbol_addr (fragp, stretch);
21390 addr = fragp->fr_address + fragp->fr_fix;
21391 addr = (addr + 4) & ~3;
21392 /* Force misaligned targets to 32-bit variant. */
21393 if (val & 3)
21394 return 4;
21395 val -= addr;
21396 if (val < 0 || val > 1020)
21397 return 4;
21398 return 2;
21399 }
21400
21401 /* Return the size of a relaxable add/sub immediate instruction. */
21402 static int
21403 relax_addsub (fragS *fragp, asection *sec)
21404 {
21405 char *buf;
21406 int op;
21407
21408 buf = fragp->fr_literal + fragp->fr_fix;
21409 op = bfd_get_16(sec->owner, buf);
21410 if ((op & 0xf) == ((op >> 4) & 0xf))
21411 return relax_immediate (fragp, 8, 0);
21412 else
21413 return relax_immediate (fragp, 3, 0);
21414 }
21415
21416 /* Return TRUE iff the definition of symbol S could be pre-empted
21417 (overridden) at link or load time. */
21418 static bfd_boolean
21419 symbol_preemptible (symbolS *s)
21420 {
21421 /* Weak symbols can always be pre-empted. */
21422 if (S_IS_WEAK (s))
21423 return TRUE;
21424
21425 /* Non-global symbols cannot be pre-empted. */
21426 if (! S_IS_EXTERNAL (s))
21427 return FALSE;
21428
21429 #ifdef OBJ_ELF
21430 /* In ELF, a global symbol can be marked protected, or private. In that
21431 case it can't be pre-empted (other definitions in the same link unit
21432 would violate the ODR). */
21433 if (ELF_ST_VISIBILITY (S_GET_OTHER (s)) > STV_DEFAULT)
21434 return FALSE;
21435 #endif
21436
21437 /* Other global symbols might be pre-empted. */
21438 return TRUE;
21439 }
21440
21441 /* Return the size of a relaxable branch instruction. BITS is the
21442 size of the offset field in the narrow instruction. */
21443
21444 static int
21445 relax_branch (fragS *fragp, asection *sec, int bits, long stretch)
21446 {
21447 addressT addr;
21448 offsetT val;
21449 offsetT limit;
21450
21451 /* Assume worst case for symbols not known to be in the same section. */
21452 if (!S_IS_DEFINED (fragp->fr_symbol)
21453 || sec != S_GET_SEGMENT (fragp->fr_symbol)
21454 || S_IS_WEAK (fragp->fr_symbol))
21455 return 4;
21456
21457 #ifdef OBJ_ELF
21458 /* A branch to a function in ARM state will require interworking. */
21459 if (S_IS_DEFINED (fragp->fr_symbol)
21460 && ARM_IS_FUNC (fragp->fr_symbol))
21461 return 4;
21462 #endif
21463
21464 if (symbol_preemptible (fragp->fr_symbol))
21465 return 4;
21466
21467 val = relaxed_symbol_addr (fragp, stretch);
21468 addr = fragp->fr_address + fragp->fr_fix + 4;
21469 val -= addr;
21470
21471 /* Offset is a signed value *2 */
21472 limit = 1 << bits;
21473 if (val >= limit || val < -limit)
21474 return 4;
21475 return 2;
21476 }
21477
21478
21479 /* Relax a machine dependent frag. This returns the amount by which
21480 the current size of the frag should change. */
21481
21482 int
21483 arm_relax_frag (asection *sec, fragS *fragp, long stretch)
21484 {
21485 int oldsize;
21486 int newsize;
21487
21488 oldsize = fragp->fr_var;
21489 switch (fragp->fr_subtype)
21490 {
21491 case T_MNEM_ldr_pc2:
21492 newsize = relax_adr (fragp, sec, stretch);
21493 break;
21494 case T_MNEM_ldr_pc:
21495 case T_MNEM_ldr_sp:
21496 case T_MNEM_str_sp:
21497 newsize = relax_immediate (fragp, 8, 2);
21498 break;
21499 case T_MNEM_ldr:
21500 case T_MNEM_str:
21501 newsize = relax_immediate (fragp, 5, 2);
21502 break;
21503 case T_MNEM_ldrh:
21504 case T_MNEM_strh:
21505 newsize = relax_immediate (fragp, 5, 1);
21506 break;
21507 case T_MNEM_ldrb:
21508 case T_MNEM_strb:
21509 newsize = relax_immediate (fragp, 5, 0);
21510 break;
21511 case T_MNEM_adr:
21512 newsize = relax_adr (fragp, sec, stretch);
21513 break;
21514 case T_MNEM_mov:
21515 case T_MNEM_movs:
21516 case T_MNEM_cmp:
21517 case T_MNEM_cmn:
21518 newsize = relax_immediate (fragp, 8, 0);
21519 break;
21520 case T_MNEM_b:
21521 newsize = relax_branch (fragp, sec, 11, stretch);
21522 break;
21523 case T_MNEM_bcond:
21524 newsize = relax_branch (fragp, sec, 8, stretch);
21525 break;
21526 case T_MNEM_add_sp:
21527 case T_MNEM_add_pc:
21528 newsize = relax_immediate (fragp, 8, 2);
21529 break;
21530 case T_MNEM_inc_sp:
21531 case T_MNEM_dec_sp:
21532 newsize = relax_immediate (fragp, 7, 2);
21533 break;
21534 case T_MNEM_addi:
21535 case T_MNEM_addis:
21536 case T_MNEM_subi:
21537 case T_MNEM_subis:
21538 newsize = relax_addsub (fragp, sec);
21539 break;
21540 default:
21541 abort ();
21542 }
21543
21544 fragp->fr_var = newsize;
21545 /* Freeze wide instructions that are at or before the same location as
21546 in the previous pass. This avoids infinite loops.
21547 Don't freeze them unconditionally because targets may be artificially
21548 misaligned by the expansion of preceding frags. */
21549 if (stretch <= 0 && newsize > 2)
21550 {
21551 md_convert_frag (sec->owner, sec, fragp);
21552 frag_wane (fragp);
21553 }
21554
21555 return newsize - oldsize;
21556 }
21557
21558 /* Round up a section size to the appropriate boundary. */
21559
21560 valueT
21561 md_section_align (segT segment ATTRIBUTE_UNUSED,
21562 valueT size)
21563 {
21564 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
21565 if (OUTPUT_FLAVOR == bfd_target_aout_flavour)
21566 {
21567 /* For a.out, force the section size to be aligned. If we don't do
21568 this, BFD will align it for us, but it will not write out the
21569 final bytes of the section. This may be a bug in BFD, but it is
21570 easier to fix it here since that is how the other a.out targets
21571 work. */
21572 int align;
21573
21574 align = bfd_get_section_alignment (stdoutput, segment);
21575 size = ((size + (1 << align) - 1) & (-((valueT) 1 << align)));
21576 }
21577 #endif
21578
21579 return size;
21580 }
21581
21582 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
21583 of an rs_align_code fragment. */
21584
21585 void
21586 arm_handle_align (fragS * fragP)
21587 {
21588 static unsigned char const arm_noop[2][2][4] =
21589 {
21590 { /* ARMv1 */
21591 {0x00, 0x00, 0xa0, 0xe1}, /* LE */
21592 {0xe1, 0xa0, 0x00, 0x00}, /* BE */
21593 },
21594 { /* ARMv6k */
21595 {0x00, 0xf0, 0x20, 0xe3}, /* LE */
21596 {0xe3, 0x20, 0xf0, 0x00}, /* BE */
21597 },
21598 };
21599 static unsigned char const thumb_noop[2][2][2] =
21600 {
21601 { /* Thumb-1 */
21602 {0xc0, 0x46}, /* LE */
21603 {0x46, 0xc0}, /* BE */
21604 },
21605 { /* Thumb-2 */
21606 {0x00, 0xbf}, /* LE */
21607 {0xbf, 0x00} /* BE */
21608 }
21609 };
21610 static unsigned char const wide_thumb_noop[2][4] =
21611 { /* Wide Thumb-2 */
21612 {0xaf, 0xf3, 0x00, 0x80}, /* LE */
21613 {0xf3, 0xaf, 0x80, 0x00}, /* BE */
21614 };
21615
21616 unsigned bytes, fix, noop_size;
21617 char * p;
21618 const unsigned char * noop;
21619 const unsigned char *narrow_noop = NULL;
21620 #ifdef OBJ_ELF
21621 enum mstate state;
21622 #endif
21623
21624 if (fragP->fr_type != rs_align_code)
21625 return;
21626
21627 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
21628 p = fragP->fr_literal + fragP->fr_fix;
21629 fix = 0;
21630
21631 if (bytes > MAX_MEM_FOR_RS_ALIGN_CODE)
21632 bytes &= MAX_MEM_FOR_RS_ALIGN_CODE;
21633
21634 gas_assert ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) != 0);
21635
21636 if (fragP->tc_frag_data.thumb_mode & (~ MODE_RECORDED))
21637 {
21638 if (ARM_CPU_HAS_FEATURE (selected_cpu_name[0]
21639 ? selected_cpu : arm_arch_none, arm_ext_v6t2))
21640 {
21641 narrow_noop = thumb_noop[1][target_big_endian];
21642 noop = wide_thumb_noop[target_big_endian];
21643 }
21644 else
21645 noop = thumb_noop[0][target_big_endian];
21646 noop_size = 2;
21647 #ifdef OBJ_ELF
21648 state = MAP_THUMB;
21649 #endif
21650 }
21651 else
21652 {
21653 noop = arm_noop[ARM_CPU_HAS_FEATURE (selected_cpu_name[0]
21654 ? selected_cpu : arm_arch_none,
21655 arm_ext_v6k) != 0]
21656 [target_big_endian];
21657 noop_size = 4;
21658 #ifdef OBJ_ELF
21659 state = MAP_ARM;
21660 #endif
21661 }
21662
21663 fragP->fr_var = noop_size;
21664
21665 if (bytes & (noop_size - 1))
21666 {
21667 fix = bytes & (noop_size - 1);
21668 #ifdef OBJ_ELF
21669 insert_data_mapping_symbol (state, fragP->fr_fix, fragP, fix);
21670 #endif
21671 memset (p, 0, fix);
21672 p += fix;
21673 bytes -= fix;
21674 }
21675
21676 if (narrow_noop)
21677 {
21678 if (bytes & noop_size)
21679 {
21680 /* Insert a narrow noop. */
21681 memcpy (p, narrow_noop, noop_size);
21682 p += noop_size;
21683 bytes -= noop_size;
21684 fix += noop_size;
21685 }
21686
21687 /* Use wide noops for the remainder */
21688 noop_size = 4;
21689 }
21690
21691 while (bytes >= noop_size)
21692 {
21693 memcpy (p, noop, noop_size);
21694 p += noop_size;
21695 bytes -= noop_size;
21696 fix += noop_size;
21697 }
21698
21699 fragP->fr_fix += fix;
21700 }
21701
21702 /* Called from md_do_align. Used to create an alignment
21703 frag in a code section. */
21704
21705 void
21706 arm_frag_align_code (int n, int max)
21707 {
21708 char * p;
21709
21710 /* We assume that there will never be a requirement
21711 to support alignments greater than MAX_MEM_FOR_RS_ALIGN_CODE bytes. */
21712 if (max > MAX_MEM_FOR_RS_ALIGN_CODE)
21713 {
21714 char err_msg[128];
21715
21716 sprintf (err_msg,
21717 _("alignments greater than %d bytes not supported in .text sections."),
21718 MAX_MEM_FOR_RS_ALIGN_CODE + 1);
21719 as_fatal ("%s", err_msg);
21720 }
21721
21722 p = frag_var (rs_align_code,
21723 MAX_MEM_FOR_RS_ALIGN_CODE,
21724 1,
21725 (relax_substateT) max,
21726 (symbolS *) NULL,
21727 (offsetT) n,
21728 (char *) NULL);
21729 *p = 0;
21730 }
21731
21732 /* Perform target specific initialisation of a frag.
21733 Note - despite the name this initialisation is not done when the frag
21734 is created, but only when its type is assigned. A frag can be created
21735 and used a long time before its type is set, so beware of assuming that
21736 this initialisationis performed first. */
21737
21738 #ifndef OBJ_ELF
21739 void
21740 arm_init_frag (fragS * fragP, int max_chars ATTRIBUTE_UNUSED)
21741 {
21742 /* Record whether this frag is in an ARM or a THUMB area. */
21743 fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
21744 }
21745
21746 #else /* OBJ_ELF is defined. */
21747 void
21748 arm_init_frag (fragS * fragP, int max_chars)
21749 {
21750 int frag_thumb_mode;
21751
21752 /* If the current ARM vs THUMB mode has not already
21753 been recorded into this frag then do so now. */
21754 if ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) == 0)
21755 fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
21756
21757 frag_thumb_mode = fragP->tc_frag_data.thumb_mode ^ MODE_RECORDED;
21758
21759 /* Record a mapping symbol for alignment frags. We will delete this
21760 later if the alignment ends up empty. */
21761 switch (fragP->fr_type)
21762 {
21763 case rs_align:
21764 case rs_align_test:
21765 case rs_fill:
21766 mapping_state_2 (MAP_DATA, max_chars);
21767 break;
21768 case rs_align_code:
21769 mapping_state_2 (frag_thumb_mode ? MAP_THUMB : MAP_ARM, max_chars);
21770 break;
21771 default:
21772 break;
21773 }
21774 }
21775
21776 /* When we change sections we need to issue a new mapping symbol. */
21777
21778 void
21779 arm_elf_change_section (void)
21780 {
21781 /* Link an unlinked unwind index table section to the .text section. */
21782 if (elf_section_type (now_seg) == SHT_ARM_EXIDX
21783 && elf_linked_to_section (now_seg) == NULL)
21784 elf_linked_to_section (now_seg) = text_section;
21785 }
21786
21787 int
21788 arm_elf_section_type (const char * str, size_t len)
21789 {
21790 if (len == 5 && strncmp (str, "exidx", 5) == 0)
21791 return SHT_ARM_EXIDX;
21792
21793 return -1;
21794 }
21795 \f
21796 /* Code to deal with unwinding tables. */
21797
21798 static void add_unwind_adjustsp (offsetT);
21799
21800 /* Generate any deferred unwind frame offset. */
21801
21802 static void
21803 flush_pending_unwind (void)
21804 {
21805 offsetT offset;
21806
21807 offset = unwind.pending_offset;
21808 unwind.pending_offset = 0;
21809 if (offset != 0)
21810 add_unwind_adjustsp (offset);
21811 }
21812
21813 /* Add an opcode to this list for this function. Two-byte opcodes should
21814 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse
21815 order. */
21816
21817 static void
21818 add_unwind_opcode (valueT op, int length)
21819 {
21820 /* Add any deferred stack adjustment. */
21821 if (unwind.pending_offset)
21822 flush_pending_unwind ();
21823
21824 unwind.sp_restored = 0;
21825
21826 if (unwind.opcode_count + length > unwind.opcode_alloc)
21827 {
21828 unwind.opcode_alloc += ARM_OPCODE_CHUNK_SIZE;
21829 if (unwind.opcodes)
21830 unwind.opcodes = XRESIZEVEC (unsigned char, unwind.opcodes,
21831 unwind.opcode_alloc);
21832 else
21833 unwind.opcodes = XNEWVEC (unsigned char, unwind.opcode_alloc);
21834 }
21835 while (length > 0)
21836 {
21837 length--;
21838 unwind.opcodes[unwind.opcode_count] = op & 0xff;
21839 op >>= 8;
21840 unwind.opcode_count++;
21841 }
21842 }
21843
21844 /* Add unwind opcodes to adjust the stack pointer. */
21845
21846 static void
21847 add_unwind_adjustsp (offsetT offset)
21848 {
21849 valueT op;
21850
21851 if (offset > 0x200)
21852 {
21853 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */
21854 char bytes[5];
21855 int n;
21856 valueT o;
21857
21858 /* Long form: 0xb2, uleb128. */
21859 /* This might not fit in a word so add the individual bytes,
21860 remembering the list is built in reverse order. */
21861 o = (valueT) ((offset - 0x204) >> 2);
21862 if (o == 0)
21863 add_unwind_opcode (0, 1);
21864
21865 /* Calculate the uleb128 encoding of the offset. */
21866 n = 0;
21867 while (o)
21868 {
21869 bytes[n] = o & 0x7f;
21870 o >>= 7;
21871 if (o)
21872 bytes[n] |= 0x80;
21873 n++;
21874 }
21875 /* Add the insn. */
21876 for (; n; n--)
21877 add_unwind_opcode (bytes[n - 1], 1);
21878 add_unwind_opcode (0xb2, 1);
21879 }
21880 else if (offset > 0x100)
21881 {
21882 /* Two short opcodes. */
21883 add_unwind_opcode (0x3f, 1);
21884 op = (offset - 0x104) >> 2;
21885 add_unwind_opcode (op, 1);
21886 }
21887 else if (offset > 0)
21888 {
21889 /* Short opcode. */
21890 op = (offset - 4) >> 2;
21891 add_unwind_opcode (op, 1);
21892 }
21893 else if (offset < 0)
21894 {
21895 offset = -offset;
21896 while (offset > 0x100)
21897 {
21898 add_unwind_opcode (0x7f, 1);
21899 offset -= 0x100;
21900 }
21901 op = ((offset - 4) >> 2) | 0x40;
21902 add_unwind_opcode (op, 1);
21903 }
21904 }
21905
21906 /* Finish the list of unwind opcodes for this function. */
21907 static void
21908 finish_unwind_opcodes (void)
21909 {
21910 valueT op;
21911
21912 if (unwind.fp_used)
21913 {
21914 /* Adjust sp as necessary. */
21915 unwind.pending_offset += unwind.fp_offset - unwind.frame_size;
21916 flush_pending_unwind ();
21917
21918 /* After restoring sp from the frame pointer. */
21919 op = 0x90 | unwind.fp_reg;
21920 add_unwind_opcode (op, 1);
21921 }
21922 else
21923 flush_pending_unwind ();
21924 }
21925
21926
21927 /* Start an exception table entry. If idx is nonzero this is an index table
21928 entry. */
21929
21930 static void
21931 start_unwind_section (const segT text_seg, int idx)
21932 {
21933 const char * text_name;
21934 const char * prefix;
21935 const char * prefix_once;
21936 const char * group_name;
21937 char * sec_name;
21938 int type;
21939 int flags;
21940 int linkonce;
21941
21942 if (idx)
21943 {
21944 prefix = ELF_STRING_ARM_unwind;
21945 prefix_once = ELF_STRING_ARM_unwind_once;
21946 type = SHT_ARM_EXIDX;
21947 }
21948 else
21949 {
21950 prefix = ELF_STRING_ARM_unwind_info;
21951 prefix_once = ELF_STRING_ARM_unwind_info_once;
21952 type = SHT_PROGBITS;
21953 }
21954
21955 text_name = segment_name (text_seg);
21956 if (streq (text_name, ".text"))
21957 text_name = "";
21958
21959 if (strncmp (text_name, ".gnu.linkonce.t.",
21960 strlen (".gnu.linkonce.t.")) == 0)
21961 {
21962 prefix = prefix_once;
21963 text_name += strlen (".gnu.linkonce.t.");
21964 }
21965
21966 sec_name = concat (prefix, text_name, (char *) NULL);
21967
21968 flags = SHF_ALLOC;
21969 linkonce = 0;
21970 group_name = 0;
21971
21972 /* Handle COMDAT group. */
21973 if (prefix != prefix_once && (text_seg->flags & SEC_LINK_ONCE) != 0)
21974 {
21975 group_name = elf_group_name (text_seg);
21976 if (group_name == NULL)
21977 {
21978 as_bad (_("Group section `%s' has no group signature"),
21979 segment_name (text_seg));
21980 ignore_rest_of_line ();
21981 return;
21982 }
21983 flags |= SHF_GROUP;
21984 linkonce = 1;
21985 }
21986
21987 obj_elf_change_section (sec_name, type, flags, 0, group_name, linkonce, 0);
21988
21989 /* Set the section link for index tables. */
21990 if (idx)
21991 elf_linked_to_section (now_seg) = text_seg;
21992 }
21993
21994
21995 /* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
21996 personality routine data. Returns zero, or the index table value for
21997 an inline entry. */
21998
21999 static valueT
22000 create_unwind_entry (int have_data)
22001 {
22002 int size;
22003 addressT where;
22004 char *ptr;
22005 /* The current word of data. */
22006 valueT data;
22007 /* The number of bytes left in this word. */
22008 int n;
22009
22010 finish_unwind_opcodes ();
22011
22012 /* Remember the current text section. */
22013 unwind.saved_seg = now_seg;
22014 unwind.saved_subseg = now_subseg;
22015
22016 start_unwind_section (now_seg, 0);
22017
22018 if (unwind.personality_routine == NULL)
22019 {
22020 if (unwind.personality_index == -2)
22021 {
22022 if (have_data)
22023 as_bad (_("handlerdata in cantunwind frame"));
22024 return 1; /* EXIDX_CANTUNWIND. */
22025 }
22026
22027 /* Use a default personality routine if none is specified. */
22028 if (unwind.personality_index == -1)
22029 {
22030 if (unwind.opcode_count > 3)
22031 unwind.personality_index = 1;
22032 else
22033 unwind.personality_index = 0;
22034 }
22035
22036 /* Space for the personality routine entry. */
22037 if (unwind.personality_index == 0)
22038 {
22039 if (unwind.opcode_count > 3)
22040 as_bad (_("too many unwind opcodes for personality routine 0"));
22041
22042 if (!have_data)
22043 {
22044 /* All the data is inline in the index table. */
22045 data = 0x80;
22046 n = 3;
22047 while (unwind.opcode_count > 0)
22048 {
22049 unwind.opcode_count--;
22050 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
22051 n--;
22052 }
22053
22054 /* Pad with "finish" opcodes. */
22055 while (n--)
22056 data = (data << 8) | 0xb0;
22057
22058 return data;
22059 }
22060 size = 0;
22061 }
22062 else
22063 /* We get two opcodes "free" in the first word. */
22064 size = unwind.opcode_count - 2;
22065 }
22066 else
22067 {
22068 /* PR 16765: Missing or misplaced unwind directives can trigger this. */
22069 if (unwind.personality_index != -1)
22070 {
22071 as_bad (_("attempt to recreate an unwind entry"));
22072 return 1;
22073 }
22074
22075 /* An extra byte is required for the opcode count. */
22076 size = unwind.opcode_count + 1;
22077 }
22078
22079 size = (size + 3) >> 2;
22080 if (size > 0xff)
22081 as_bad (_("too many unwind opcodes"));
22082
22083 frag_align (2, 0, 0);
22084 record_alignment (now_seg, 2);
22085 unwind.table_entry = expr_build_dot ();
22086
22087 /* Allocate the table entry. */
22088 ptr = frag_more ((size << 2) + 4);
22089 /* PR 13449: Zero the table entries in case some of them are not used. */
22090 memset (ptr, 0, (size << 2) + 4);
22091 where = frag_now_fix () - ((size << 2) + 4);
22092
22093 switch (unwind.personality_index)
22094 {
22095 case -1:
22096 /* ??? Should this be a PLT generating relocation? */
22097 /* Custom personality routine. */
22098 fix_new (frag_now, where, 4, unwind.personality_routine, 0, 1,
22099 BFD_RELOC_ARM_PREL31);
22100
22101 where += 4;
22102 ptr += 4;
22103
22104 /* Set the first byte to the number of additional words. */
22105 data = size > 0 ? size - 1 : 0;
22106 n = 3;
22107 break;
22108
22109 /* ABI defined personality routines. */
22110 case 0:
22111 /* Three opcodes bytes are packed into the first word. */
22112 data = 0x80;
22113 n = 3;
22114 break;
22115
22116 case 1:
22117 case 2:
22118 /* The size and first two opcode bytes go in the first word. */
22119 data = ((0x80 + unwind.personality_index) << 8) | size;
22120 n = 2;
22121 break;
22122
22123 default:
22124 /* Should never happen. */
22125 abort ();
22126 }
22127
22128 /* Pack the opcodes into words (MSB first), reversing the list at the same
22129 time. */
22130 while (unwind.opcode_count > 0)
22131 {
22132 if (n == 0)
22133 {
22134 md_number_to_chars (ptr, data, 4);
22135 ptr += 4;
22136 n = 4;
22137 data = 0;
22138 }
22139 unwind.opcode_count--;
22140 n--;
22141 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
22142 }
22143
22144 /* Finish off the last word. */
22145 if (n < 4)
22146 {
22147 /* Pad with "finish" opcodes. */
22148 while (n--)
22149 data = (data << 8) | 0xb0;
22150
22151 md_number_to_chars (ptr, data, 4);
22152 }
22153
22154 if (!have_data)
22155 {
22156 /* Add an empty descriptor if there is no user-specified data. */
22157 ptr = frag_more (4);
22158 md_number_to_chars (ptr, 0, 4);
22159 }
22160
22161 return 0;
22162 }
22163
22164
22165 /* Initialize the DWARF-2 unwind information for this procedure. */
22166
22167 void
22168 tc_arm_frame_initial_instructions (void)
22169 {
22170 cfi_add_CFA_def_cfa (REG_SP, 0);
22171 }
22172 #endif /* OBJ_ELF */
22173
22174 /* Convert REGNAME to a DWARF-2 register number. */
22175
22176 int
22177 tc_arm_regname_to_dw2regnum (char *regname)
22178 {
22179 int reg = arm_reg_parse (&regname, REG_TYPE_RN);
22180 if (reg != FAIL)
22181 return reg;
22182
22183 /* PR 16694: Allow VFP registers as well. */
22184 reg = arm_reg_parse (&regname, REG_TYPE_VFS);
22185 if (reg != FAIL)
22186 return 64 + reg;
22187
22188 reg = arm_reg_parse (&regname, REG_TYPE_VFD);
22189 if (reg != FAIL)
22190 return reg + 256;
22191
22192 return -1;
22193 }
22194
22195 #ifdef TE_PE
22196 void
22197 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
22198 {
22199 expressionS exp;
22200
22201 exp.X_op = O_secrel;
22202 exp.X_add_symbol = symbol;
22203 exp.X_add_number = 0;
22204 emit_expr (&exp, size);
22205 }
22206 #endif
22207
22208 /* MD interface: Symbol and relocation handling. */
22209
22210 /* Return the address within the segment that a PC-relative fixup is
22211 relative to. For ARM, PC-relative fixups applied to instructions
22212 are generally relative to the location of the fixup plus 8 bytes.
22213 Thumb branches are offset by 4, and Thumb loads relative to PC
22214 require special handling. */
22215
22216 long
22217 md_pcrel_from_section (fixS * fixP, segT seg)
22218 {
22219 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
22220
22221 /* If this is pc-relative and we are going to emit a relocation
22222 then we just want to put out any pipeline compensation that the linker
22223 will need. Otherwise we want to use the calculated base.
22224 For WinCE we skip the bias for externals as well, since this
22225 is how the MS ARM-CE assembler behaves and we want to be compatible. */
22226 if (fixP->fx_pcrel
22227 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
22228 || (arm_force_relocation (fixP)
22229 #ifdef TE_WINCE
22230 && !S_IS_EXTERNAL (fixP->fx_addsy)
22231 #endif
22232 )))
22233 base = 0;
22234
22235
22236 switch (fixP->fx_r_type)
22237 {
22238 /* PC relative addressing on the Thumb is slightly odd as the
22239 bottom two bits of the PC are forced to zero for the
22240 calculation. This happens *after* application of the
22241 pipeline offset. However, Thumb adrl already adjusts for
22242 this, so we need not do it again. */
22243 case BFD_RELOC_ARM_THUMB_ADD:
22244 return base & ~3;
22245
22246 case BFD_RELOC_ARM_THUMB_OFFSET:
22247 case BFD_RELOC_ARM_T32_OFFSET_IMM:
22248 case BFD_RELOC_ARM_T32_ADD_PC12:
22249 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
22250 return (base + 4) & ~3;
22251
22252 /* Thumb branches are simply offset by +4. */
22253 case BFD_RELOC_THUMB_PCREL_BRANCH7:
22254 case BFD_RELOC_THUMB_PCREL_BRANCH9:
22255 case BFD_RELOC_THUMB_PCREL_BRANCH12:
22256 case BFD_RELOC_THUMB_PCREL_BRANCH20:
22257 case BFD_RELOC_THUMB_PCREL_BRANCH25:
22258 return base + 4;
22259
22260 case BFD_RELOC_THUMB_PCREL_BRANCH23:
22261 if (fixP->fx_addsy
22262 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22263 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22264 && ARM_IS_FUNC (fixP->fx_addsy)
22265 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
22266 base = fixP->fx_where + fixP->fx_frag->fr_address;
22267 return base + 4;
22268
22269 /* BLX is like branches above, but forces the low two bits of PC to
22270 zero. */
22271 case BFD_RELOC_THUMB_PCREL_BLX:
22272 if (fixP->fx_addsy
22273 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22274 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22275 && THUMB_IS_FUNC (fixP->fx_addsy)
22276 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
22277 base = fixP->fx_where + fixP->fx_frag->fr_address;
22278 return (base + 4) & ~3;
22279
22280 /* ARM mode branches are offset by +8. However, the Windows CE
22281 loader expects the relocation not to take this into account. */
22282 case BFD_RELOC_ARM_PCREL_BLX:
22283 if (fixP->fx_addsy
22284 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22285 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22286 && ARM_IS_FUNC (fixP->fx_addsy)
22287 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
22288 base = fixP->fx_where + fixP->fx_frag->fr_address;
22289 return base + 8;
22290
22291 case BFD_RELOC_ARM_PCREL_CALL:
22292 if (fixP->fx_addsy
22293 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22294 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22295 && THUMB_IS_FUNC (fixP->fx_addsy)
22296 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
22297 base = fixP->fx_where + fixP->fx_frag->fr_address;
22298 return base + 8;
22299
22300 case BFD_RELOC_ARM_PCREL_BRANCH:
22301 case BFD_RELOC_ARM_PCREL_JUMP:
22302 case BFD_RELOC_ARM_PLT32:
22303 #ifdef TE_WINCE
22304 /* When handling fixups immediately, because we have already
22305 discovered the value of a symbol, or the address of the frag involved
22306 we must account for the offset by +8, as the OS loader will never see the reloc.
22307 see fixup_segment() in write.c
22308 The S_IS_EXTERNAL test handles the case of global symbols.
22309 Those need the calculated base, not just the pipe compensation the linker will need. */
22310 if (fixP->fx_pcrel
22311 && fixP->fx_addsy != NULL
22312 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22313 && (S_IS_EXTERNAL (fixP->fx_addsy) || !arm_force_relocation (fixP)))
22314 return base + 8;
22315 return base;
22316 #else
22317 return base + 8;
22318 #endif
22319
22320
22321 /* ARM mode loads relative to PC are also offset by +8. Unlike
22322 branches, the Windows CE loader *does* expect the relocation
22323 to take this into account. */
22324 case BFD_RELOC_ARM_OFFSET_IMM:
22325 case BFD_RELOC_ARM_OFFSET_IMM8:
22326 case BFD_RELOC_ARM_HWLITERAL:
22327 case BFD_RELOC_ARM_LITERAL:
22328 case BFD_RELOC_ARM_CP_OFF_IMM:
22329 return base + 8;
22330
22331
22332 /* Other PC-relative relocations are un-offset. */
22333 default:
22334 return base;
22335 }
22336 }
22337
22338 static bfd_boolean flag_warn_syms = TRUE;
22339
22340 bfd_boolean
22341 arm_tc_equal_in_insn (int c ATTRIBUTE_UNUSED, char * name)
22342 {
22343 /* PR 18347 - Warn if the user attempts to create a symbol with the same
22344 name as an ARM instruction. Whilst strictly speaking it is allowed, it
22345 does mean that the resulting code might be very confusing to the reader.
22346 Also this warning can be triggered if the user omits an operand before
22347 an immediate address, eg:
22348
22349 LDR =foo
22350
22351 GAS treats this as an assignment of the value of the symbol foo to a
22352 symbol LDR, and so (without this code) it will not issue any kind of
22353 warning or error message.
22354
22355 Note - ARM instructions are case-insensitive but the strings in the hash
22356 table are all stored in lower case, so we must first ensure that name is
22357 lower case too. */
22358 if (flag_warn_syms && arm_ops_hsh)
22359 {
22360 char * nbuf = strdup (name);
22361 char * p;
22362
22363 for (p = nbuf; *p; p++)
22364 *p = TOLOWER (*p);
22365 if (hash_find (arm_ops_hsh, nbuf) != NULL)
22366 {
22367 static struct hash_control * already_warned = NULL;
22368
22369 if (already_warned == NULL)
22370 already_warned = hash_new ();
22371 /* Only warn about the symbol once. To keep the code
22372 simple we let hash_insert do the lookup for us. */
22373 if (hash_insert (already_warned, name, NULL) == NULL)
22374 as_warn (_("[-mwarn-syms]: Assignment makes a symbol match an ARM instruction: %s"), name);
22375 }
22376 else
22377 free (nbuf);
22378 }
22379
22380 return FALSE;
22381 }
22382
22383 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
22384 Otherwise we have no need to default values of symbols. */
22385
22386 symbolS *
22387 md_undefined_symbol (char * name ATTRIBUTE_UNUSED)
22388 {
22389 #ifdef OBJ_ELF
22390 if (name[0] == '_' && name[1] == 'G'
22391 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
22392 {
22393 if (!GOT_symbol)
22394 {
22395 if (symbol_find (name))
22396 as_bad (_("GOT already in the symbol table"));
22397
22398 GOT_symbol = symbol_new (name, undefined_section,
22399 (valueT) 0, & zero_address_frag);
22400 }
22401
22402 return GOT_symbol;
22403 }
22404 #endif
22405
22406 return NULL;
22407 }
22408
22409 /* Subroutine of md_apply_fix. Check to see if an immediate can be
22410 computed as two separate immediate values, added together. We
22411 already know that this value cannot be computed by just one ARM
22412 instruction. */
22413
22414 static unsigned int
22415 validate_immediate_twopart (unsigned int val,
22416 unsigned int * highpart)
22417 {
22418 unsigned int a;
22419 unsigned int i;
22420
22421 for (i = 0; i < 32; i += 2)
22422 if (((a = rotate_left (val, i)) & 0xff) != 0)
22423 {
22424 if (a & 0xff00)
22425 {
22426 if (a & ~ 0xffff)
22427 continue;
22428 * highpart = (a >> 8) | ((i + 24) << 7);
22429 }
22430 else if (a & 0xff0000)
22431 {
22432 if (a & 0xff000000)
22433 continue;
22434 * highpart = (a >> 16) | ((i + 16) << 7);
22435 }
22436 else
22437 {
22438 gas_assert (a & 0xff000000);
22439 * highpart = (a >> 24) | ((i + 8) << 7);
22440 }
22441
22442 return (a & 0xff) | (i << 7);
22443 }
22444
22445 return FAIL;
22446 }
22447
22448 static int
22449 validate_offset_imm (unsigned int val, int hwse)
22450 {
22451 if ((hwse && val > 255) || val > 4095)
22452 return FAIL;
22453 return val;
22454 }
22455
22456 /* Subroutine of md_apply_fix. Do those data_ops which can take a
22457 negative immediate constant by altering the instruction. A bit of
22458 a hack really.
22459 MOV <-> MVN
22460 AND <-> BIC
22461 ADC <-> SBC
22462 by inverting the second operand, and
22463 ADD <-> SUB
22464 CMP <-> CMN
22465 by negating the second operand. */
22466
22467 static int
22468 negate_data_op (unsigned long * instruction,
22469 unsigned long value)
22470 {
22471 int op, new_inst;
22472 unsigned long negated, inverted;
22473
22474 negated = encode_arm_immediate (-value);
22475 inverted = encode_arm_immediate (~value);
22476
22477 op = (*instruction >> DATA_OP_SHIFT) & 0xf;
22478 switch (op)
22479 {
22480 /* First negates. */
22481 case OPCODE_SUB: /* ADD <-> SUB */
22482 new_inst = OPCODE_ADD;
22483 value = negated;
22484 break;
22485
22486 case OPCODE_ADD:
22487 new_inst = OPCODE_SUB;
22488 value = negated;
22489 break;
22490
22491 case OPCODE_CMP: /* CMP <-> CMN */
22492 new_inst = OPCODE_CMN;
22493 value = negated;
22494 break;
22495
22496 case OPCODE_CMN:
22497 new_inst = OPCODE_CMP;
22498 value = negated;
22499 break;
22500
22501 /* Now Inverted ops. */
22502 case OPCODE_MOV: /* MOV <-> MVN */
22503 new_inst = OPCODE_MVN;
22504 value = inverted;
22505 break;
22506
22507 case OPCODE_MVN:
22508 new_inst = OPCODE_MOV;
22509 value = inverted;
22510 break;
22511
22512 case OPCODE_AND: /* AND <-> BIC */
22513 new_inst = OPCODE_BIC;
22514 value = inverted;
22515 break;
22516
22517 case OPCODE_BIC:
22518 new_inst = OPCODE_AND;
22519 value = inverted;
22520 break;
22521
22522 case OPCODE_ADC: /* ADC <-> SBC */
22523 new_inst = OPCODE_SBC;
22524 value = inverted;
22525 break;
22526
22527 case OPCODE_SBC:
22528 new_inst = OPCODE_ADC;
22529 value = inverted;
22530 break;
22531
22532 /* We cannot do anything. */
22533 default:
22534 return FAIL;
22535 }
22536
22537 if (value == (unsigned) FAIL)
22538 return FAIL;
22539
22540 *instruction &= OPCODE_MASK;
22541 *instruction |= new_inst << DATA_OP_SHIFT;
22542 return value;
22543 }
22544
22545 /* Like negate_data_op, but for Thumb-2. */
22546
22547 static unsigned int
22548 thumb32_negate_data_op (offsetT *instruction, unsigned int value)
22549 {
22550 int op, new_inst;
22551 int rd;
22552 unsigned int negated, inverted;
22553
22554 negated = encode_thumb32_immediate (-value);
22555 inverted = encode_thumb32_immediate (~value);
22556
22557 rd = (*instruction >> 8) & 0xf;
22558 op = (*instruction >> T2_DATA_OP_SHIFT) & 0xf;
22559 switch (op)
22560 {
22561 /* ADD <-> SUB. Includes CMP <-> CMN. */
22562 case T2_OPCODE_SUB:
22563 new_inst = T2_OPCODE_ADD;
22564 value = negated;
22565 break;
22566
22567 case T2_OPCODE_ADD:
22568 new_inst = T2_OPCODE_SUB;
22569 value = negated;
22570 break;
22571
22572 /* ORR <-> ORN. Includes MOV <-> MVN. */
22573 case T2_OPCODE_ORR:
22574 new_inst = T2_OPCODE_ORN;
22575 value = inverted;
22576 break;
22577
22578 case T2_OPCODE_ORN:
22579 new_inst = T2_OPCODE_ORR;
22580 value = inverted;
22581 break;
22582
22583 /* AND <-> BIC. TST has no inverted equivalent. */
22584 case T2_OPCODE_AND:
22585 new_inst = T2_OPCODE_BIC;
22586 if (rd == 15)
22587 value = FAIL;
22588 else
22589 value = inverted;
22590 break;
22591
22592 case T2_OPCODE_BIC:
22593 new_inst = T2_OPCODE_AND;
22594 value = inverted;
22595 break;
22596
22597 /* ADC <-> SBC */
22598 case T2_OPCODE_ADC:
22599 new_inst = T2_OPCODE_SBC;
22600 value = inverted;
22601 break;
22602
22603 case T2_OPCODE_SBC:
22604 new_inst = T2_OPCODE_ADC;
22605 value = inverted;
22606 break;
22607
22608 /* We cannot do anything. */
22609 default:
22610 return FAIL;
22611 }
22612
22613 if (value == (unsigned int)FAIL)
22614 return FAIL;
22615
22616 *instruction &= T2_OPCODE_MASK;
22617 *instruction |= new_inst << T2_DATA_OP_SHIFT;
22618 return value;
22619 }
22620
22621 /* Read a 32-bit thumb instruction from buf. */
22622 static unsigned long
22623 get_thumb32_insn (char * buf)
22624 {
22625 unsigned long insn;
22626 insn = md_chars_to_number (buf, THUMB_SIZE) << 16;
22627 insn |= md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
22628
22629 return insn;
22630 }
22631
22632
22633 /* We usually want to set the low bit on the address of thumb function
22634 symbols. In particular .word foo - . should have the low bit set.
22635 Generic code tries to fold the difference of two symbols to
22636 a constant. Prevent this and force a relocation when the first symbols
22637 is a thumb function. */
22638
22639 bfd_boolean
22640 arm_optimize_expr (expressionS *l, operatorT op, expressionS *r)
22641 {
22642 if (op == O_subtract
22643 && l->X_op == O_symbol
22644 && r->X_op == O_symbol
22645 && THUMB_IS_FUNC (l->X_add_symbol))
22646 {
22647 l->X_op = O_subtract;
22648 l->X_op_symbol = r->X_add_symbol;
22649 l->X_add_number -= r->X_add_number;
22650 return TRUE;
22651 }
22652
22653 /* Process as normal. */
22654 return FALSE;
22655 }
22656
22657 /* Encode Thumb2 unconditional branches and calls. The encoding
22658 for the 2 are identical for the immediate values. */
22659
22660 static void
22661 encode_thumb2_b_bl_offset (char * buf, offsetT value)
22662 {
22663 #define T2I1I2MASK ((1 << 13) | (1 << 11))
22664 offsetT newval;
22665 offsetT newval2;
22666 addressT S, I1, I2, lo, hi;
22667
22668 S = (value >> 24) & 0x01;
22669 I1 = (value >> 23) & 0x01;
22670 I2 = (value >> 22) & 0x01;
22671 hi = (value >> 12) & 0x3ff;
22672 lo = (value >> 1) & 0x7ff;
22673 newval = md_chars_to_number (buf, THUMB_SIZE);
22674 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
22675 newval |= (S << 10) | hi;
22676 newval2 &= ~T2I1I2MASK;
22677 newval2 |= (((I1 ^ S) << 13) | ((I2 ^ S) << 11) | lo) ^ T2I1I2MASK;
22678 md_number_to_chars (buf, newval, THUMB_SIZE);
22679 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
22680 }
22681
22682 void
22683 md_apply_fix (fixS * fixP,
22684 valueT * valP,
22685 segT seg)
22686 {
22687 offsetT value = * valP;
22688 offsetT newval;
22689 unsigned int newimm;
22690 unsigned long temp;
22691 int sign;
22692 char * buf = fixP->fx_where + fixP->fx_frag->fr_literal;
22693
22694 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
22695
22696 /* Note whether this will delete the relocation. */
22697
22698 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
22699 fixP->fx_done = 1;
22700
22701 /* On a 64-bit host, silently truncate 'value' to 32 bits for
22702 consistency with the behaviour on 32-bit hosts. Remember value
22703 for emit_reloc. */
22704 value &= 0xffffffff;
22705 value ^= 0x80000000;
22706 value -= 0x80000000;
22707
22708 *valP = value;
22709 fixP->fx_addnumber = value;
22710
22711 /* Same treatment for fixP->fx_offset. */
22712 fixP->fx_offset &= 0xffffffff;
22713 fixP->fx_offset ^= 0x80000000;
22714 fixP->fx_offset -= 0x80000000;
22715
22716 switch (fixP->fx_r_type)
22717 {
22718 case BFD_RELOC_NONE:
22719 /* This will need to go in the object file. */
22720 fixP->fx_done = 0;
22721 break;
22722
22723 case BFD_RELOC_ARM_IMMEDIATE:
22724 /* We claim that this fixup has been processed here,
22725 even if in fact we generate an error because we do
22726 not have a reloc for it, so tc_gen_reloc will reject it. */
22727 fixP->fx_done = 1;
22728
22729 if (fixP->fx_addsy)
22730 {
22731 const char *msg = 0;
22732
22733 if (! S_IS_DEFINED (fixP->fx_addsy))
22734 msg = _("undefined symbol %s used as an immediate value");
22735 else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
22736 msg = _("symbol %s is in a different section");
22737 else if (S_IS_WEAK (fixP->fx_addsy))
22738 msg = _("symbol %s is weak and may be overridden later");
22739
22740 if (msg)
22741 {
22742 as_bad_where (fixP->fx_file, fixP->fx_line,
22743 msg, S_GET_NAME (fixP->fx_addsy));
22744 break;
22745 }
22746 }
22747
22748 temp = md_chars_to_number (buf, INSN_SIZE);
22749
22750 /* If the offset is negative, we should use encoding A2 for ADR. */
22751 if ((temp & 0xfff0000) == 0x28f0000 && value < 0)
22752 newimm = negate_data_op (&temp, value);
22753 else
22754 {
22755 newimm = encode_arm_immediate (value);
22756
22757 /* If the instruction will fail, see if we can fix things up by
22758 changing the opcode. */
22759 if (newimm == (unsigned int) FAIL)
22760 newimm = negate_data_op (&temp, value);
22761 }
22762
22763 if (newimm == (unsigned int) FAIL)
22764 {
22765 as_bad_where (fixP->fx_file, fixP->fx_line,
22766 _("invalid constant (%lx) after fixup"),
22767 (unsigned long) value);
22768 break;
22769 }
22770
22771 newimm |= (temp & 0xfffff000);
22772 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
22773 break;
22774
22775 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
22776 {
22777 unsigned int highpart = 0;
22778 unsigned int newinsn = 0xe1a00000; /* nop. */
22779
22780 if (fixP->fx_addsy)
22781 {
22782 const char *msg = 0;
22783
22784 if (! S_IS_DEFINED (fixP->fx_addsy))
22785 msg = _("undefined symbol %s used as an immediate value");
22786 else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
22787 msg = _("symbol %s is in a different section");
22788 else if (S_IS_WEAK (fixP->fx_addsy))
22789 msg = _("symbol %s is weak and may be overridden later");
22790
22791 if (msg)
22792 {
22793 as_bad_where (fixP->fx_file, fixP->fx_line,
22794 msg, S_GET_NAME (fixP->fx_addsy));
22795 break;
22796 }
22797 }
22798
22799 newimm = encode_arm_immediate (value);
22800 temp = md_chars_to_number (buf, INSN_SIZE);
22801
22802 /* If the instruction will fail, see if we can fix things up by
22803 changing the opcode. */
22804 if (newimm == (unsigned int) FAIL
22805 && (newimm = negate_data_op (& temp, value)) == (unsigned int) FAIL)
22806 {
22807 /* No ? OK - try using two ADD instructions to generate
22808 the value. */
22809 newimm = validate_immediate_twopart (value, & highpart);
22810
22811 /* Yes - then make sure that the second instruction is
22812 also an add. */
22813 if (newimm != (unsigned int) FAIL)
22814 newinsn = temp;
22815 /* Still No ? Try using a negated value. */
22816 else if ((newimm = validate_immediate_twopart (- value, & highpart)) != (unsigned int) FAIL)
22817 temp = newinsn = (temp & OPCODE_MASK) | OPCODE_SUB << DATA_OP_SHIFT;
22818 /* Otherwise - give up. */
22819 else
22820 {
22821 as_bad_where (fixP->fx_file, fixP->fx_line,
22822 _("unable to compute ADRL instructions for PC offset of 0x%lx"),
22823 (long) value);
22824 break;
22825 }
22826
22827 /* Replace the first operand in the 2nd instruction (which
22828 is the PC) with the destination register. We have
22829 already added in the PC in the first instruction and we
22830 do not want to do it again. */
22831 newinsn &= ~ 0xf0000;
22832 newinsn |= ((newinsn & 0x0f000) << 4);
22833 }
22834
22835 newimm |= (temp & 0xfffff000);
22836 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
22837
22838 highpart |= (newinsn & 0xfffff000);
22839 md_number_to_chars (buf + INSN_SIZE, (valueT) highpart, INSN_SIZE);
22840 }
22841 break;
22842
22843 case BFD_RELOC_ARM_OFFSET_IMM:
22844 if (!fixP->fx_done && seg->use_rela_p)
22845 value = 0;
22846
22847 case BFD_RELOC_ARM_LITERAL:
22848 sign = value > 0;
22849
22850 if (value < 0)
22851 value = - value;
22852
22853 if (validate_offset_imm (value, 0) == FAIL)
22854 {
22855 if (fixP->fx_r_type == BFD_RELOC_ARM_LITERAL)
22856 as_bad_where (fixP->fx_file, fixP->fx_line,
22857 _("invalid literal constant: pool needs to be closer"));
22858 else
22859 as_bad_where (fixP->fx_file, fixP->fx_line,
22860 _("bad immediate value for offset (%ld)"),
22861 (long) value);
22862 break;
22863 }
22864
22865 newval = md_chars_to_number (buf, INSN_SIZE);
22866 if (value == 0)
22867 newval &= 0xfffff000;
22868 else
22869 {
22870 newval &= 0xff7ff000;
22871 newval |= value | (sign ? INDEX_UP : 0);
22872 }
22873 md_number_to_chars (buf, newval, INSN_SIZE);
22874 break;
22875
22876 case BFD_RELOC_ARM_OFFSET_IMM8:
22877 case BFD_RELOC_ARM_HWLITERAL:
22878 sign = value > 0;
22879
22880 if (value < 0)
22881 value = - value;
22882
22883 if (validate_offset_imm (value, 1) == FAIL)
22884 {
22885 if (fixP->fx_r_type == BFD_RELOC_ARM_HWLITERAL)
22886 as_bad_where (fixP->fx_file, fixP->fx_line,
22887 _("invalid literal constant: pool needs to be closer"));
22888 else
22889 as_bad_where (fixP->fx_file, fixP->fx_line,
22890 _("bad immediate value for 8-bit offset (%ld)"),
22891 (long) value);
22892 break;
22893 }
22894
22895 newval = md_chars_to_number (buf, INSN_SIZE);
22896 if (value == 0)
22897 newval &= 0xfffff0f0;
22898 else
22899 {
22900 newval &= 0xff7ff0f0;
22901 newval |= ((value >> 4) << 8) | (value & 0xf) | (sign ? INDEX_UP : 0);
22902 }
22903 md_number_to_chars (buf, newval, INSN_SIZE);
22904 break;
22905
22906 case BFD_RELOC_ARM_T32_OFFSET_U8:
22907 if (value < 0 || value > 1020 || value % 4 != 0)
22908 as_bad_where (fixP->fx_file, fixP->fx_line,
22909 _("bad immediate value for offset (%ld)"), (long) value);
22910 value /= 4;
22911
22912 newval = md_chars_to_number (buf+2, THUMB_SIZE);
22913 newval |= value;
22914 md_number_to_chars (buf+2, newval, THUMB_SIZE);
22915 break;
22916
22917 case BFD_RELOC_ARM_T32_OFFSET_IMM:
22918 /* This is a complicated relocation used for all varieties of Thumb32
22919 load/store instruction with immediate offset:
22920
22921 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
22922 *4, optional writeback(W)
22923 (doubleword load/store)
22924
22925 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
22926 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
22927 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
22928 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
22929 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
22930
22931 Uppercase letters indicate bits that are already encoded at
22932 this point. Lowercase letters are our problem. For the
22933 second block of instructions, the secondary opcode nybble
22934 (bits 8..11) is present, and bit 23 is zero, even if this is
22935 a PC-relative operation. */
22936 newval = md_chars_to_number (buf, THUMB_SIZE);
22937 newval <<= 16;
22938 newval |= md_chars_to_number (buf+THUMB_SIZE, THUMB_SIZE);
22939
22940 if ((newval & 0xf0000000) == 0xe0000000)
22941 {
22942 /* Doubleword load/store: 8-bit offset, scaled by 4. */
22943 if (value >= 0)
22944 newval |= (1 << 23);
22945 else
22946 value = -value;
22947 if (value % 4 != 0)
22948 {
22949 as_bad_where (fixP->fx_file, fixP->fx_line,
22950 _("offset not a multiple of 4"));
22951 break;
22952 }
22953 value /= 4;
22954 if (value > 0xff)
22955 {
22956 as_bad_where (fixP->fx_file, fixP->fx_line,
22957 _("offset out of range"));
22958 break;
22959 }
22960 newval &= ~0xff;
22961 }
22962 else if ((newval & 0x000f0000) == 0x000f0000)
22963 {
22964 /* PC-relative, 12-bit offset. */
22965 if (value >= 0)
22966 newval |= (1 << 23);
22967 else
22968 value = -value;
22969 if (value > 0xfff)
22970 {
22971 as_bad_where (fixP->fx_file, fixP->fx_line,
22972 _("offset out of range"));
22973 break;
22974 }
22975 newval &= ~0xfff;
22976 }
22977 else if ((newval & 0x00000100) == 0x00000100)
22978 {
22979 /* Writeback: 8-bit, +/- offset. */
22980 if (value >= 0)
22981 newval |= (1 << 9);
22982 else
22983 value = -value;
22984 if (value > 0xff)
22985 {
22986 as_bad_where (fixP->fx_file, fixP->fx_line,
22987 _("offset out of range"));
22988 break;
22989 }
22990 newval &= ~0xff;
22991 }
22992 else if ((newval & 0x00000f00) == 0x00000e00)
22993 {
22994 /* T-instruction: positive 8-bit offset. */
22995 if (value < 0 || value > 0xff)
22996 {
22997 as_bad_where (fixP->fx_file, fixP->fx_line,
22998 _("offset out of range"));
22999 break;
23000 }
23001 newval &= ~0xff;
23002 newval |= value;
23003 }
23004 else
23005 {
23006 /* Positive 12-bit or negative 8-bit offset. */
23007 int limit;
23008 if (value >= 0)
23009 {
23010 newval |= (1 << 23);
23011 limit = 0xfff;
23012 }
23013 else
23014 {
23015 value = -value;
23016 limit = 0xff;
23017 }
23018 if (value > limit)
23019 {
23020 as_bad_where (fixP->fx_file, fixP->fx_line,
23021 _("offset out of range"));
23022 break;
23023 }
23024 newval &= ~limit;
23025 }
23026
23027 newval |= value;
23028 md_number_to_chars (buf, (newval >> 16) & 0xffff, THUMB_SIZE);
23029 md_number_to_chars (buf + THUMB_SIZE, newval & 0xffff, THUMB_SIZE);
23030 break;
23031
23032 case BFD_RELOC_ARM_SHIFT_IMM:
23033 newval = md_chars_to_number (buf, INSN_SIZE);
23034 if (((unsigned long) value) > 32
23035 || (value == 32
23036 && (((newval & 0x60) == 0) || (newval & 0x60) == 0x60)))
23037 {
23038 as_bad_where (fixP->fx_file, fixP->fx_line,
23039 _("shift expression is too large"));
23040 break;
23041 }
23042
23043 if (value == 0)
23044 /* Shifts of zero must be done as lsl. */
23045 newval &= ~0x60;
23046 else if (value == 32)
23047 value = 0;
23048 newval &= 0xfffff07f;
23049 newval |= (value & 0x1f) << 7;
23050 md_number_to_chars (buf, newval, INSN_SIZE);
23051 break;
23052
23053 case BFD_RELOC_ARM_T32_IMMEDIATE:
23054 case BFD_RELOC_ARM_T32_ADD_IMM:
23055 case BFD_RELOC_ARM_T32_IMM12:
23056 case BFD_RELOC_ARM_T32_ADD_PC12:
23057 /* We claim that this fixup has been processed here,
23058 even if in fact we generate an error because we do
23059 not have a reloc for it, so tc_gen_reloc will reject it. */
23060 fixP->fx_done = 1;
23061
23062 if (fixP->fx_addsy
23063 && ! S_IS_DEFINED (fixP->fx_addsy))
23064 {
23065 as_bad_where (fixP->fx_file, fixP->fx_line,
23066 _("undefined symbol %s used as an immediate value"),
23067 S_GET_NAME (fixP->fx_addsy));
23068 break;
23069 }
23070
23071 newval = md_chars_to_number (buf, THUMB_SIZE);
23072 newval <<= 16;
23073 newval |= md_chars_to_number (buf+2, THUMB_SIZE);
23074
23075 newimm = FAIL;
23076 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
23077 || fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
23078 {
23079 newimm = encode_thumb32_immediate (value);
23080 if (newimm == (unsigned int) FAIL)
23081 newimm = thumb32_negate_data_op (&newval, value);
23082 }
23083 if (fixP->fx_r_type != BFD_RELOC_ARM_T32_IMMEDIATE
23084 && newimm == (unsigned int) FAIL)
23085 {
23086 /* Turn add/sum into addw/subw. */
23087 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
23088 newval = (newval & 0xfeffffff) | 0x02000000;
23089 /* No flat 12-bit imm encoding for addsw/subsw. */
23090 if ((newval & 0x00100000) == 0)
23091 {
23092 /* 12 bit immediate for addw/subw. */
23093 if (value < 0)
23094 {
23095 value = -value;
23096 newval ^= 0x00a00000;
23097 }
23098 if (value > 0xfff)
23099 newimm = (unsigned int) FAIL;
23100 else
23101 newimm = value;
23102 }
23103 }
23104
23105 if (newimm == (unsigned int)FAIL)
23106 {
23107 as_bad_where (fixP->fx_file, fixP->fx_line,
23108 _("invalid constant (%lx) after fixup"),
23109 (unsigned long) value);
23110 break;
23111 }
23112
23113 newval |= (newimm & 0x800) << 15;
23114 newval |= (newimm & 0x700) << 4;
23115 newval |= (newimm & 0x0ff);
23116
23117 md_number_to_chars (buf, (valueT) ((newval >> 16) & 0xffff), THUMB_SIZE);
23118 md_number_to_chars (buf+2, (valueT) (newval & 0xffff), THUMB_SIZE);
23119 break;
23120
23121 case BFD_RELOC_ARM_SMC:
23122 if (((unsigned long) value) > 0xffff)
23123 as_bad_where (fixP->fx_file, fixP->fx_line,
23124 _("invalid smc expression"));
23125 newval = md_chars_to_number (buf, INSN_SIZE);
23126 newval |= (value & 0xf) | ((value & 0xfff0) << 4);
23127 md_number_to_chars (buf, newval, INSN_SIZE);
23128 break;
23129
23130 case BFD_RELOC_ARM_HVC:
23131 if (((unsigned long) value) > 0xffff)
23132 as_bad_where (fixP->fx_file, fixP->fx_line,
23133 _("invalid hvc expression"));
23134 newval = md_chars_to_number (buf, INSN_SIZE);
23135 newval |= (value & 0xf) | ((value & 0xfff0) << 4);
23136 md_number_to_chars (buf, newval, INSN_SIZE);
23137 break;
23138
23139 case BFD_RELOC_ARM_SWI:
23140 if (fixP->tc_fix_data != 0)
23141 {
23142 if (((unsigned long) value) > 0xff)
23143 as_bad_where (fixP->fx_file, fixP->fx_line,
23144 _("invalid swi expression"));
23145 newval = md_chars_to_number (buf, THUMB_SIZE);
23146 newval |= value;
23147 md_number_to_chars (buf, newval, THUMB_SIZE);
23148 }
23149 else
23150 {
23151 if (((unsigned long) value) > 0x00ffffff)
23152 as_bad_where (fixP->fx_file, fixP->fx_line,
23153 _("invalid swi expression"));
23154 newval = md_chars_to_number (buf, INSN_SIZE);
23155 newval |= value;
23156 md_number_to_chars (buf, newval, INSN_SIZE);
23157 }
23158 break;
23159
23160 case BFD_RELOC_ARM_MULTI:
23161 if (((unsigned long) value) > 0xffff)
23162 as_bad_where (fixP->fx_file, fixP->fx_line,
23163 _("invalid expression in load/store multiple"));
23164 newval = value | md_chars_to_number (buf, INSN_SIZE);
23165 md_number_to_chars (buf, newval, INSN_SIZE);
23166 break;
23167
23168 #ifdef OBJ_ELF
23169 case BFD_RELOC_ARM_PCREL_CALL:
23170
23171 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
23172 && fixP->fx_addsy
23173 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
23174 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23175 && THUMB_IS_FUNC (fixP->fx_addsy))
23176 /* Flip the bl to blx. This is a simple flip
23177 bit here because we generate PCREL_CALL for
23178 unconditional bls. */
23179 {
23180 newval = md_chars_to_number (buf, INSN_SIZE);
23181 newval = newval | 0x10000000;
23182 md_number_to_chars (buf, newval, INSN_SIZE);
23183 temp = 1;
23184 fixP->fx_done = 1;
23185 }
23186 else
23187 temp = 3;
23188 goto arm_branch_common;
23189
23190 case BFD_RELOC_ARM_PCREL_JUMP:
23191 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
23192 && fixP->fx_addsy
23193 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
23194 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23195 && THUMB_IS_FUNC (fixP->fx_addsy))
23196 {
23197 /* This would map to a bl<cond>, b<cond>,
23198 b<always> to a Thumb function. We
23199 need to force a relocation for this particular
23200 case. */
23201 newval = md_chars_to_number (buf, INSN_SIZE);
23202 fixP->fx_done = 0;
23203 }
23204
23205 case BFD_RELOC_ARM_PLT32:
23206 #endif
23207 case BFD_RELOC_ARM_PCREL_BRANCH:
23208 temp = 3;
23209 goto arm_branch_common;
23210
23211 case BFD_RELOC_ARM_PCREL_BLX:
23212
23213 temp = 1;
23214 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
23215 && fixP->fx_addsy
23216 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
23217 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23218 && ARM_IS_FUNC (fixP->fx_addsy))
23219 {
23220 /* Flip the blx to a bl and warn. */
23221 const char *name = S_GET_NAME (fixP->fx_addsy);
23222 newval = 0xeb000000;
23223 as_warn_where (fixP->fx_file, fixP->fx_line,
23224 _("blx to '%s' an ARM ISA state function changed to bl"),
23225 name);
23226 md_number_to_chars (buf, newval, INSN_SIZE);
23227 temp = 3;
23228 fixP->fx_done = 1;
23229 }
23230
23231 #ifdef OBJ_ELF
23232 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
23233 fixP->fx_r_type = BFD_RELOC_ARM_PCREL_CALL;
23234 #endif
23235
23236 arm_branch_common:
23237 /* We are going to store value (shifted right by two) in the
23238 instruction, in a 24 bit, signed field. Bits 26 through 32 either
23239 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
23240 also be be clear. */
23241 if (value & temp)
23242 as_bad_where (fixP->fx_file, fixP->fx_line,
23243 _("misaligned branch destination"));
23244 if ((value & (offsetT)0xfe000000) != (offsetT)0
23245 && (value & (offsetT)0xfe000000) != (offsetT)0xfe000000)
23246 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
23247
23248 if (fixP->fx_done || !seg->use_rela_p)
23249 {
23250 newval = md_chars_to_number (buf, INSN_SIZE);
23251 newval |= (value >> 2) & 0x00ffffff;
23252 /* Set the H bit on BLX instructions. */
23253 if (temp == 1)
23254 {
23255 if (value & 2)
23256 newval |= 0x01000000;
23257 else
23258 newval &= ~0x01000000;
23259 }
23260 md_number_to_chars (buf, newval, INSN_SIZE);
23261 }
23262 break;
23263
23264 case BFD_RELOC_THUMB_PCREL_BRANCH7: /* CBZ */
23265 /* CBZ can only branch forward. */
23266
23267 /* Attempts to use CBZ to branch to the next instruction
23268 (which, strictly speaking, are prohibited) will be turned into
23269 no-ops.
23270
23271 FIXME: It may be better to remove the instruction completely and
23272 perform relaxation. */
23273 if (value == -2)
23274 {
23275 newval = md_chars_to_number (buf, THUMB_SIZE);
23276 newval = 0xbf00; /* NOP encoding T1 */
23277 md_number_to_chars (buf, newval, THUMB_SIZE);
23278 }
23279 else
23280 {
23281 if (value & ~0x7e)
23282 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
23283
23284 if (fixP->fx_done || !seg->use_rela_p)
23285 {
23286 newval = md_chars_to_number (buf, THUMB_SIZE);
23287 newval |= ((value & 0x3e) << 2) | ((value & 0x40) << 3);
23288 md_number_to_chars (buf, newval, THUMB_SIZE);
23289 }
23290 }
23291 break;
23292
23293 case BFD_RELOC_THUMB_PCREL_BRANCH9: /* Conditional branch. */
23294 if ((value & ~0xff) && ((value & ~0xff) != ~0xff))
23295 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
23296
23297 if (fixP->fx_done || !seg->use_rela_p)
23298 {
23299 newval = md_chars_to_number (buf, THUMB_SIZE);
23300 newval |= (value & 0x1ff) >> 1;
23301 md_number_to_chars (buf, newval, THUMB_SIZE);
23302 }
23303 break;
23304
23305 case BFD_RELOC_THUMB_PCREL_BRANCH12: /* Unconditional branch. */
23306 if ((value & ~0x7ff) && ((value & ~0x7ff) != ~0x7ff))
23307 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
23308
23309 if (fixP->fx_done || !seg->use_rela_p)
23310 {
23311 newval = md_chars_to_number (buf, THUMB_SIZE);
23312 newval |= (value & 0xfff) >> 1;
23313 md_number_to_chars (buf, newval, THUMB_SIZE);
23314 }
23315 break;
23316
23317 case BFD_RELOC_THUMB_PCREL_BRANCH20:
23318 if (fixP->fx_addsy
23319 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23320 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
23321 && ARM_IS_FUNC (fixP->fx_addsy)
23322 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
23323 {
23324 /* Force a relocation for a branch 20 bits wide. */
23325 fixP->fx_done = 0;
23326 }
23327 if ((value & ~0x1fffff) && ((value & ~0x0fffff) != ~0x0fffff))
23328 as_bad_where (fixP->fx_file, fixP->fx_line,
23329 _("conditional branch out of range"));
23330
23331 if (fixP->fx_done || !seg->use_rela_p)
23332 {
23333 offsetT newval2;
23334 addressT S, J1, J2, lo, hi;
23335
23336 S = (value & 0x00100000) >> 20;
23337 J2 = (value & 0x00080000) >> 19;
23338 J1 = (value & 0x00040000) >> 18;
23339 hi = (value & 0x0003f000) >> 12;
23340 lo = (value & 0x00000ffe) >> 1;
23341
23342 newval = md_chars_to_number (buf, THUMB_SIZE);
23343 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
23344 newval |= (S << 10) | hi;
23345 newval2 |= (J1 << 13) | (J2 << 11) | lo;
23346 md_number_to_chars (buf, newval, THUMB_SIZE);
23347 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
23348 }
23349 break;
23350
23351 case BFD_RELOC_THUMB_PCREL_BLX:
23352 /* If there is a blx from a thumb state function to
23353 another thumb function flip this to a bl and warn
23354 about it. */
23355
23356 if (fixP->fx_addsy
23357 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
23358 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23359 && THUMB_IS_FUNC (fixP->fx_addsy))
23360 {
23361 const char *name = S_GET_NAME (fixP->fx_addsy);
23362 as_warn_where (fixP->fx_file, fixP->fx_line,
23363 _("blx to Thumb func '%s' from Thumb ISA state changed to bl"),
23364 name);
23365 newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
23366 newval = newval | 0x1000;
23367 md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
23368 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
23369 fixP->fx_done = 1;
23370 }
23371
23372
23373 goto thumb_bl_common;
23374
23375 case BFD_RELOC_THUMB_PCREL_BRANCH23:
23376 /* A bl from Thumb state ISA to an internal ARM state function
23377 is converted to a blx. */
23378 if (fixP->fx_addsy
23379 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23380 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
23381 && ARM_IS_FUNC (fixP->fx_addsy)
23382 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
23383 {
23384 newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
23385 newval = newval & ~0x1000;
23386 md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
23387 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BLX;
23388 fixP->fx_done = 1;
23389 }
23390
23391 thumb_bl_common:
23392
23393 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
23394 /* For a BLX instruction, make sure that the relocation is rounded up
23395 to a word boundary. This follows the semantics of the instruction
23396 which specifies that bit 1 of the target address will come from bit
23397 1 of the base address. */
23398 value = (value + 3) & ~ 3;
23399
23400 #ifdef OBJ_ELF
23401 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4
23402 && fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
23403 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
23404 #endif
23405
23406 if ((value & ~0x3fffff) && ((value & ~0x3fffff) != ~0x3fffff))
23407 {
23408 if (!(ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2)))
23409 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
23410 else if ((value & ~0x1ffffff)
23411 && ((value & ~0x1ffffff) != ~0x1ffffff))
23412 as_bad_where (fixP->fx_file, fixP->fx_line,
23413 _("Thumb2 branch out of range"));
23414 }
23415
23416 if (fixP->fx_done || !seg->use_rela_p)
23417 encode_thumb2_b_bl_offset (buf, value);
23418
23419 break;
23420
23421 case BFD_RELOC_THUMB_PCREL_BRANCH25:
23422 if ((value & ~0x0ffffff) && ((value & ~0x0ffffff) != ~0x0ffffff))
23423 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
23424
23425 if (fixP->fx_done || !seg->use_rela_p)
23426 encode_thumb2_b_bl_offset (buf, value);
23427
23428 break;
23429
23430 case BFD_RELOC_8:
23431 if (fixP->fx_done || !seg->use_rela_p)
23432 *buf = value;
23433 break;
23434
23435 case BFD_RELOC_16:
23436 if (fixP->fx_done || !seg->use_rela_p)
23437 md_number_to_chars (buf, value, 2);
23438 break;
23439
23440 #ifdef OBJ_ELF
23441 case BFD_RELOC_ARM_TLS_CALL:
23442 case BFD_RELOC_ARM_THM_TLS_CALL:
23443 case BFD_RELOC_ARM_TLS_DESCSEQ:
23444 case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
23445 case BFD_RELOC_ARM_TLS_GOTDESC:
23446 case BFD_RELOC_ARM_TLS_GD32:
23447 case BFD_RELOC_ARM_TLS_LE32:
23448 case BFD_RELOC_ARM_TLS_IE32:
23449 case BFD_RELOC_ARM_TLS_LDM32:
23450 case BFD_RELOC_ARM_TLS_LDO32:
23451 S_SET_THREAD_LOCAL (fixP->fx_addsy);
23452 break;
23453
23454 case BFD_RELOC_ARM_GOT32:
23455 case BFD_RELOC_ARM_GOTOFF:
23456 break;
23457
23458 case BFD_RELOC_ARM_GOT_PREL:
23459 if (fixP->fx_done || !seg->use_rela_p)
23460 md_number_to_chars (buf, value, 4);
23461 break;
23462
23463 case BFD_RELOC_ARM_TARGET2:
23464 /* TARGET2 is not partial-inplace, so we need to write the
23465 addend here for REL targets, because it won't be written out
23466 during reloc processing later. */
23467 if (fixP->fx_done || !seg->use_rela_p)
23468 md_number_to_chars (buf, fixP->fx_offset, 4);
23469 break;
23470 #endif
23471
23472 case BFD_RELOC_RVA:
23473 case BFD_RELOC_32:
23474 case BFD_RELOC_ARM_TARGET1:
23475 case BFD_RELOC_ARM_ROSEGREL32:
23476 case BFD_RELOC_ARM_SBREL32:
23477 case BFD_RELOC_32_PCREL:
23478 #ifdef TE_PE
23479 case BFD_RELOC_32_SECREL:
23480 #endif
23481 if (fixP->fx_done || !seg->use_rela_p)
23482 #ifdef TE_WINCE
23483 /* For WinCE we only do this for pcrel fixups. */
23484 if (fixP->fx_done || fixP->fx_pcrel)
23485 #endif
23486 md_number_to_chars (buf, value, 4);
23487 break;
23488
23489 #ifdef OBJ_ELF
23490 case BFD_RELOC_ARM_PREL31:
23491 if (fixP->fx_done || !seg->use_rela_p)
23492 {
23493 newval = md_chars_to_number (buf, 4) & 0x80000000;
23494 if ((value ^ (value >> 1)) & 0x40000000)
23495 {
23496 as_bad_where (fixP->fx_file, fixP->fx_line,
23497 _("rel31 relocation overflow"));
23498 }
23499 newval |= value & 0x7fffffff;
23500 md_number_to_chars (buf, newval, 4);
23501 }
23502 break;
23503 #endif
23504
23505 case BFD_RELOC_ARM_CP_OFF_IMM:
23506 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
23507 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM)
23508 newval = md_chars_to_number (buf, INSN_SIZE);
23509 else
23510 newval = get_thumb32_insn (buf);
23511 if ((newval & 0x0f200f00) == 0x0d000900)
23512 {
23513 /* This is a fp16 vstr/vldr. The immediate offset in the mnemonic
23514 has permitted values that are multiples of 2, in the range 0
23515 to 510. */
23516 if (value < -510 || value > 510 || (value & 1))
23517 as_bad_where (fixP->fx_file, fixP->fx_line,
23518 _("co-processor offset out of range"));
23519 }
23520 else if (value < -1023 || value > 1023 || (value & 3))
23521 as_bad_where (fixP->fx_file, fixP->fx_line,
23522 _("co-processor offset out of range"));
23523 cp_off_common:
23524 sign = value > 0;
23525 if (value < 0)
23526 value = -value;
23527 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
23528 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
23529 newval = md_chars_to_number (buf, INSN_SIZE);
23530 else
23531 newval = get_thumb32_insn (buf);
23532 if (value == 0)
23533 newval &= 0xffffff00;
23534 else
23535 {
23536 newval &= 0xff7fff00;
23537 if ((newval & 0x0f200f00) == 0x0d000900)
23538 {
23539 /* This is a fp16 vstr/vldr.
23540
23541 It requires the immediate offset in the instruction is shifted
23542 left by 1 to be a half-word offset.
23543
23544 Here, left shift by 1 first, and later right shift by 2
23545 should get the right offset. */
23546 value <<= 1;
23547 }
23548 newval |= (value >> 2) | (sign ? INDEX_UP : 0);
23549 }
23550 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
23551 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
23552 md_number_to_chars (buf, newval, INSN_SIZE);
23553 else
23554 put_thumb32_insn (buf, newval);
23555 break;
23556
23557 case BFD_RELOC_ARM_CP_OFF_IMM_S2:
23558 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2:
23559 if (value < -255 || value > 255)
23560 as_bad_where (fixP->fx_file, fixP->fx_line,
23561 _("co-processor offset out of range"));
23562 value *= 4;
23563 goto cp_off_common;
23564
23565 case BFD_RELOC_ARM_THUMB_OFFSET:
23566 newval = md_chars_to_number (buf, THUMB_SIZE);
23567 /* Exactly what ranges, and where the offset is inserted depends
23568 on the type of instruction, we can establish this from the
23569 top 4 bits. */
23570 switch (newval >> 12)
23571 {
23572 case 4: /* PC load. */
23573 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
23574 forced to zero for these loads; md_pcrel_from has already
23575 compensated for this. */
23576 if (value & 3)
23577 as_bad_where (fixP->fx_file, fixP->fx_line,
23578 _("invalid offset, target not word aligned (0x%08lX)"),
23579 (((unsigned long) fixP->fx_frag->fr_address
23580 + (unsigned long) fixP->fx_where) & ~3)
23581 + (unsigned long) value);
23582
23583 if (value & ~0x3fc)
23584 as_bad_where (fixP->fx_file, fixP->fx_line,
23585 _("invalid offset, value too big (0x%08lX)"),
23586 (long) value);
23587
23588 newval |= value >> 2;
23589 break;
23590
23591 case 9: /* SP load/store. */
23592 if (value & ~0x3fc)
23593 as_bad_where (fixP->fx_file, fixP->fx_line,
23594 _("invalid offset, value too big (0x%08lX)"),
23595 (long) value);
23596 newval |= value >> 2;
23597 break;
23598
23599 case 6: /* Word load/store. */
23600 if (value & ~0x7c)
23601 as_bad_where (fixP->fx_file, fixP->fx_line,
23602 _("invalid offset, value too big (0x%08lX)"),
23603 (long) value);
23604 newval |= value << 4; /* 6 - 2. */
23605 break;
23606
23607 case 7: /* Byte load/store. */
23608 if (value & ~0x1f)
23609 as_bad_where (fixP->fx_file, fixP->fx_line,
23610 _("invalid offset, value too big (0x%08lX)"),
23611 (long) value);
23612 newval |= value << 6;
23613 break;
23614
23615 case 8: /* Halfword load/store. */
23616 if (value & ~0x3e)
23617 as_bad_where (fixP->fx_file, fixP->fx_line,
23618 _("invalid offset, value too big (0x%08lX)"),
23619 (long) value);
23620 newval |= value << 5; /* 6 - 1. */
23621 break;
23622
23623 default:
23624 as_bad_where (fixP->fx_file, fixP->fx_line,
23625 "Unable to process relocation for thumb opcode: %lx",
23626 (unsigned long) newval);
23627 break;
23628 }
23629 md_number_to_chars (buf, newval, THUMB_SIZE);
23630 break;
23631
23632 case BFD_RELOC_ARM_THUMB_ADD:
23633 /* This is a complicated relocation, since we use it for all of
23634 the following immediate relocations:
23635
23636 3bit ADD/SUB
23637 8bit ADD/SUB
23638 9bit ADD/SUB SP word-aligned
23639 10bit ADD PC/SP word-aligned
23640
23641 The type of instruction being processed is encoded in the
23642 instruction field:
23643
23644 0x8000 SUB
23645 0x00F0 Rd
23646 0x000F Rs
23647 */
23648 newval = md_chars_to_number (buf, THUMB_SIZE);
23649 {
23650 int rd = (newval >> 4) & 0xf;
23651 int rs = newval & 0xf;
23652 int subtract = !!(newval & 0x8000);
23653
23654 /* Check for HI regs, only very restricted cases allowed:
23655 Adjusting SP, and using PC or SP to get an address. */
23656 if ((rd > 7 && (rd != REG_SP || rs != REG_SP))
23657 || (rs > 7 && rs != REG_SP && rs != REG_PC))
23658 as_bad_where (fixP->fx_file, fixP->fx_line,
23659 _("invalid Hi register with immediate"));
23660
23661 /* If value is negative, choose the opposite instruction. */
23662 if (value < 0)
23663 {
23664 value = -value;
23665 subtract = !subtract;
23666 if (value < 0)
23667 as_bad_where (fixP->fx_file, fixP->fx_line,
23668 _("immediate value out of range"));
23669 }
23670
23671 if (rd == REG_SP)
23672 {
23673 if (value & ~0x1fc)
23674 as_bad_where (fixP->fx_file, fixP->fx_line,
23675 _("invalid immediate for stack address calculation"));
23676 newval = subtract ? T_OPCODE_SUB_ST : T_OPCODE_ADD_ST;
23677 newval |= value >> 2;
23678 }
23679 else if (rs == REG_PC || rs == REG_SP)
23680 {
23681 /* PR gas/18541. If the addition is for a defined symbol
23682 within range of an ADR instruction then accept it. */
23683 if (subtract
23684 && value == 4
23685 && fixP->fx_addsy != NULL)
23686 {
23687 subtract = 0;
23688
23689 if (! S_IS_DEFINED (fixP->fx_addsy)
23690 || S_GET_SEGMENT (fixP->fx_addsy) != seg
23691 || S_IS_WEAK (fixP->fx_addsy))
23692 {
23693 as_bad_where (fixP->fx_file, fixP->fx_line,
23694 _("address calculation needs a strongly defined nearby symbol"));
23695 }
23696 else
23697 {
23698 offsetT v = fixP->fx_where + fixP->fx_frag->fr_address;
23699
23700 /* Round up to the next 4-byte boundary. */
23701 if (v & 3)
23702 v = (v + 3) & ~ 3;
23703 else
23704 v += 4;
23705 v = S_GET_VALUE (fixP->fx_addsy) - v;
23706
23707 if (v & ~0x3fc)
23708 {
23709 as_bad_where (fixP->fx_file, fixP->fx_line,
23710 _("symbol too far away"));
23711 }
23712 else
23713 {
23714 fixP->fx_done = 1;
23715 value = v;
23716 }
23717 }
23718 }
23719
23720 if (subtract || value & ~0x3fc)
23721 as_bad_where (fixP->fx_file, fixP->fx_line,
23722 _("invalid immediate for address calculation (value = 0x%08lX)"),
23723 (unsigned long) (subtract ? - value : value));
23724 newval = (rs == REG_PC ? T_OPCODE_ADD_PC : T_OPCODE_ADD_SP);
23725 newval |= rd << 8;
23726 newval |= value >> 2;
23727 }
23728 else if (rs == rd)
23729 {
23730 if (value & ~0xff)
23731 as_bad_where (fixP->fx_file, fixP->fx_line,
23732 _("immediate value out of range"));
23733 newval = subtract ? T_OPCODE_SUB_I8 : T_OPCODE_ADD_I8;
23734 newval |= (rd << 8) | value;
23735 }
23736 else
23737 {
23738 if (value & ~0x7)
23739 as_bad_where (fixP->fx_file, fixP->fx_line,
23740 _("immediate value out of range"));
23741 newval = subtract ? T_OPCODE_SUB_I3 : T_OPCODE_ADD_I3;
23742 newval |= rd | (rs << 3) | (value << 6);
23743 }
23744 }
23745 md_number_to_chars (buf, newval, THUMB_SIZE);
23746 break;
23747
23748 case BFD_RELOC_ARM_THUMB_IMM:
23749 newval = md_chars_to_number (buf, THUMB_SIZE);
23750 if (value < 0 || value > 255)
23751 as_bad_where (fixP->fx_file, fixP->fx_line,
23752 _("invalid immediate: %ld is out of range"),
23753 (long) value);
23754 newval |= value;
23755 md_number_to_chars (buf, newval, THUMB_SIZE);
23756 break;
23757
23758 case BFD_RELOC_ARM_THUMB_SHIFT:
23759 /* 5bit shift value (0..32). LSL cannot take 32. */
23760 newval = md_chars_to_number (buf, THUMB_SIZE) & 0xf83f;
23761 temp = newval & 0xf800;
23762 if (value < 0 || value > 32 || (value == 32 && temp == T_OPCODE_LSL_I))
23763 as_bad_where (fixP->fx_file, fixP->fx_line,
23764 _("invalid shift value: %ld"), (long) value);
23765 /* Shifts of zero must be encoded as LSL. */
23766 if (value == 0)
23767 newval = (newval & 0x003f) | T_OPCODE_LSL_I;
23768 /* Shifts of 32 are encoded as zero. */
23769 else if (value == 32)
23770 value = 0;
23771 newval |= value << 6;
23772 md_number_to_chars (buf, newval, THUMB_SIZE);
23773 break;
23774
23775 case BFD_RELOC_VTABLE_INHERIT:
23776 case BFD_RELOC_VTABLE_ENTRY:
23777 fixP->fx_done = 0;
23778 return;
23779
23780 case BFD_RELOC_ARM_MOVW:
23781 case BFD_RELOC_ARM_MOVT:
23782 case BFD_RELOC_ARM_THUMB_MOVW:
23783 case BFD_RELOC_ARM_THUMB_MOVT:
23784 if (fixP->fx_done || !seg->use_rela_p)
23785 {
23786 /* REL format relocations are limited to a 16-bit addend. */
23787 if (!fixP->fx_done)
23788 {
23789 if (value < -0x8000 || value > 0x7fff)
23790 as_bad_where (fixP->fx_file, fixP->fx_line,
23791 _("offset out of range"));
23792 }
23793 else if (fixP->fx_r_type == BFD_RELOC_ARM_MOVT
23794 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
23795 {
23796 value >>= 16;
23797 }
23798
23799 if (fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
23800 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
23801 {
23802 newval = get_thumb32_insn (buf);
23803 newval &= 0xfbf08f00;
23804 newval |= (value & 0xf000) << 4;
23805 newval |= (value & 0x0800) << 15;
23806 newval |= (value & 0x0700) << 4;
23807 newval |= (value & 0x00ff);
23808 put_thumb32_insn (buf, newval);
23809 }
23810 else
23811 {
23812 newval = md_chars_to_number (buf, 4);
23813 newval &= 0xfff0f000;
23814 newval |= value & 0x0fff;
23815 newval |= (value & 0xf000) << 4;
23816 md_number_to_chars (buf, newval, 4);
23817 }
23818 }
23819 return;
23820
23821 case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC:
23822 case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC:
23823 case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC:
23824 case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC:
23825 gas_assert (!fixP->fx_done);
23826 {
23827 bfd_vma insn;
23828 bfd_boolean is_mov;
23829 bfd_vma encoded_addend = value;
23830
23831 /* Check that addend can be encoded in instruction. */
23832 if (!seg->use_rela_p && (value < 0 || value > 255))
23833 as_bad_where (fixP->fx_file, fixP->fx_line,
23834 _("the offset 0x%08lX is not representable"),
23835 (unsigned long) encoded_addend);
23836
23837 /* Extract the instruction. */
23838 insn = md_chars_to_number (buf, THUMB_SIZE);
23839 is_mov = (insn & 0xf800) == 0x2000;
23840
23841 /* Encode insn. */
23842 if (is_mov)
23843 {
23844 if (!seg->use_rela_p)
23845 insn |= encoded_addend;
23846 }
23847 else
23848 {
23849 int rd, rs;
23850
23851 /* Extract the instruction. */
23852 /* Encoding is the following
23853 0x8000 SUB
23854 0x00F0 Rd
23855 0x000F Rs
23856 */
23857 /* The following conditions must be true :
23858 - ADD
23859 - Rd == Rs
23860 - Rd <= 7
23861 */
23862 rd = (insn >> 4) & 0xf;
23863 rs = insn & 0xf;
23864 if ((insn & 0x8000) || (rd != rs) || rd > 7)
23865 as_bad_where (fixP->fx_file, fixP->fx_line,
23866 _("Unable to process relocation for thumb opcode: %lx"),
23867 (unsigned long) insn);
23868
23869 /* Encode as ADD immediate8 thumb 1 code. */
23870 insn = 0x3000 | (rd << 8);
23871
23872 /* Place the encoded addend into the first 8 bits of the
23873 instruction. */
23874 if (!seg->use_rela_p)
23875 insn |= encoded_addend;
23876 }
23877
23878 /* Update the instruction. */
23879 md_number_to_chars (buf, insn, THUMB_SIZE);
23880 }
23881 break;
23882
23883 case BFD_RELOC_ARM_ALU_PC_G0_NC:
23884 case BFD_RELOC_ARM_ALU_PC_G0:
23885 case BFD_RELOC_ARM_ALU_PC_G1_NC:
23886 case BFD_RELOC_ARM_ALU_PC_G1:
23887 case BFD_RELOC_ARM_ALU_PC_G2:
23888 case BFD_RELOC_ARM_ALU_SB_G0_NC:
23889 case BFD_RELOC_ARM_ALU_SB_G0:
23890 case BFD_RELOC_ARM_ALU_SB_G1_NC:
23891 case BFD_RELOC_ARM_ALU_SB_G1:
23892 case BFD_RELOC_ARM_ALU_SB_G2:
23893 gas_assert (!fixP->fx_done);
23894 if (!seg->use_rela_p)
23895 {
23896 bfd_vma insn;
23897 bfd_vma encoded_addend;
23898 bfd_vma addend_abs = abs (value);
23899
23900 /* Check that the absolute value of the addend can be
23901 expressed as an 8-bit constant plus a rotation. */
23902 encoded_addend = encode_arm_immediate (addend_abs);
23903 if (encoded_addend == (unsigned int) FAIL)
23904 as_bad_where (fixP->fx_file, fixP->fx_line,
23905 _("the offset 0x%08lX is not representable"),
23906 (unsigned long) addend_abs);
23907
23908 /* Extract the instruction. */
23909 insn = md_chars_to_number (buf, INSN_SIZE);
23910
23911 /* If the addend is positive, use an ADD instruction.
23912 Otherwise use a SUB. Take care not to destroy the S bit. */
23913 insn &= 0xff1fffff;
23914 if (value < 0)
23915 insn |= 1 << 22;
23916 else
23917 insn |= 1 << 23;
23918
23919 /* Place the encoded addend into the first 12 bits of the
23920 instruction. */
23921 insn &= 0xfffff000;
23922 insn |= encoded_addend;
23923
23924 /* Update the instruction. */
23925 md_number_to_chars (buf, insn, INSN_SIZE);
23926 }
23927 break;
23928
23929 case BFD_RELOC_ARM_LDR_PC_G0:
23930 case BFD_RELOC_ARM_LDR_PC_G1:
23931 case BFD_RELOC_ARM_LDR_PC_G2:
23932 case BFD_RELOC_ARM_LDR_SB_G0:
23933 case BFD_RELOC_ARM_LDR_SB_G1:
23934 case BFD_RELOC_ARM_LDR_SB_G2:
23935 gas_assert (!fixP->fx_done);
23936 if (!seg->use_rela_p)
23937 {
23938 bfd_vma insn;
23939 bfd_vma addend_abs = abs (value);
23940
23941 /* Check that the absolute value of the addend can be
23942 encoded in 12 bits. */
23943 if (addend_abs >= 0x1000)
23944 as_bad_where (fixP->fx_file, fixP->fx_line,
23945 _("bad offset 0x%08lX (only 12 bits available for the magnitude)"),
23946 (unsigned long) addend_abs);
23947
23948 /* Extract the instruction. */
23949 insn = md_chars_to_number (buf, INSN_SIZE);
23950
23951 /* If the addend is negative, clear bit 23 of the instruction.
23952 Otherwise set it. */
23953 if (value < 0)
23954 insn &= ~(1 << 23);
23955 else
23956 insn |= 1 << 23;
23957
23958 /* Place the absolute value of the addend into the first 12 bits
23959 of the instruction. */
23960 insn &= 0xfffff000;
23961 insn |= addend_abs;
23962
23963 /* Update the instruction. */
23964 md_number_to_chars (buf, insn, INSN_SIZE);
23965 }
23966 break;
23967
23968 case BFD_RELOC_ARM_LDRS_PC_G0:
23969 case BFD_RELOC_ARM_LDRS_PC_G1:
23970 case BFD_RELOC_ARM_LDRS_PC_G2:
23971 case BFD_RELOC_ARM_LDRS_SB_G0:
23972 case BFD_RELOC_ARM_LDRS_SB_G1:
23973 case BFD_RELOC_ARM_LDRS_SB_G2:
23974 gas_assert (!fixP->fx_done);
23975 if (!seg->use_rela_p)
23976 {
23977 bfd_vma insn;
23978 bfd_vma addend_abs = abs (value);
23979
23980 /* Check that the absolute value of the addend can be
23981 encoded in 8 bits. */
23982 if (addend_abs >= 0x100)
23983 as_bad_where (fixP->fx_file, fixP->fx_line,
23984 _("bad offset 0x%08lX (only 8 bits available for the magnitude)"),
23985 (unsigned long) addend_abs);
23986
23987 /* Extract the instruction. */
23988 insn = md_chars_to_number (buf, INSN_SIZE);
23989
23990 /* If the addend is negative, clear bit 23 of the instruction.
23991 Otherwise set it. */
23992 if (value < 0)
23993 insn &= ~(1 << 23);
23994 else
23995 insn |= 1 << 23;
23996
23997 /* Place the first four bits of the absolute value of the addend
23998 into the first 4 bits of the instruction, and the remaining
23999 four into bits 8 .. 11. */
24000 insn &= 0xfffff0f0;
24001 insn |= (addend_abs & 0xf) | ((addend_abs & 0xf0) << 4);
24002
24003 /* Update the instruction. */
24004 md_number_to_chars (buf, insn, INSN_SIZE);
24005 }
24006 break;
24007
24008 case BFD_RELOC_ARM_LDC_PC_G0:
24009 case BFD_RELOC_ARM_LDC_PC_G1:
24010 case BFD_RELOC_ARM_LDC_PC_G2:
24011 case BFD_RELOC_ARM_LDC_SB_G0:
24012 case BFD_RELOC_ARM_LDC_SB_G1:
24013 case BFD_RELOC_ARM_LDC_SB_G2:
24014 gas_assert (!fixP->fx_done);
24015 if (!seg->use_rela_p)
24016 {
24017 bfd_vma insn;
24018 bfd_vma addend_abs = abs (value);
24019
24020 /* Check that the absolute value of the addend is a multiple of
24021 four and, when divided by four, fits in 8 bits. */
24022 if (addend_abs & 0x3)
24023 as_bad_where (fixP->fx_file, fixP->fx_line,
24024 _("bad offset 0x%08lX (must be word-aligned)"),
24025 (unsigned long) addend_abs);
24026
24027 if ((addend_abs >> 2) > 0xff)
24028 as_bad_where (fixP->fx_file, fixP->fx_line,
24029 _("bad offset 0x%08lX (must be an 8-bit number of words)"),
24030 (unsigned long) addend_abs);
24031
24032 /* Extract the instruction. */
24033 insn = md_chars_to_number (buf, INSN_SIZE);
24034
24035 /* If the addend is negative, clear bit 23 of the instruction.
24036 Otherwise set it. */
24037 if (value < 0)
24038 insn &= ~(1 << 23);
24039 else
24040 insn |= 1 << 23;
24041
24042 /* Place the addend (divided by four) into the first eight
24043 bits of the instruction. */
24044 insn &= 0xfffffff0;
24045 insn |= addend_abs >> 2;
24046
24047 /* Update the instruction. */
24048 md_number_to_chars (buf, insn, INSN_SIZE);
24049 }
24050 break;
24051
24052 case BFD_RELOC_ARM_V4BX:
24053 /* This will need to go in the object file. */
24054 fixP->fx_done = 0;
24055 break;
24056
24057 case BFD_RELOC_UNUSED:
24058 default:
24059 as_bad_where (fixP->fx_file, fixP->fx_line,
24060 _("bad relocation fixup type (%d)"), fixP->fx_r_type);
24061 }
24062 }
24063
24064 /* Translate internal representation of relocation info to BFD target
24065 format. */
24066
24067 arelent *
24068 tc_gen_reloc (asection *section, fixS *fixp)
24069 {
24070 arelent * reloc;
24071 bfd_reloc_code_real_type code;
24072
24073 reloc = XNEW (arelent);
24074
24075 reloc->sym_ptr_ptr = XNEW (asymbol *);
24076 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
24077 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
24078
24079 if (fixp->fx_pcrel)
24080 {
24081 if (section->use_rela_p)
24082 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
24083 else
24084 fixp->fx_offset = reloc->address;
24085 }
24086 reloc->addend = fixp->fx_offset;
24087
24088 switch (fixp->fx_r_type)
24089 {
24090 case BFD_RELOC_8:
24091 if (fixp->fx_pcrel)
24092 {
24093 code = BFD_RELOC_8_PCREL;
24094 break;
24095 }
24096
24097 case BFD_RELOC_16:
24098 if (fixp->fx_pcrel)
24099 {
24100 code = BFD_RELOC_16_PCREL;
24101 break;
24102 }
24103
24104 case BFD_RELOC_32:
24105 if (fixp->fx_pcrel)
24106 {
24107 code = BFD_RELOC_32_PCREL;
24108 break;
24109 }
24110
24111 case BFD_RELOC_ARM_MOVW:
24112 if (fixp->fx_pcrel)
24113 {
24114 code = BFD_RELOC_ARM_MOVW_PCREL;
24115 break;
24116 }
24117
24118 case BFD_RELOC_ARM_MOVT:
24119 if (fixp->fx_pcrel)
24120 {
24121 code = BFD_RELOC_ARM_MOVT_PCREL;
24122 break;
24123 }
24124
24125 case BFD_RELOC_ARM_THUMB_MOVW:
24126 if (fixp->fx_pcrel)
24127 {
24128 code = BFD_RELOC_ARM_THUMB_MOVW_PCREL;
24129 break;
24130 }
24131
24132 case BFD_RELOC_ARM_THUMB_MOVT:
24133 if (fixp->fx_pcrel)
24134 {
24135 code = BFD_RELOC_ARM_THUMB_MOVT_PCREL;
24136 break;
24137 }
24138
24139 case BFD_RELOC_NONE:
24140 case BFD_RELOC_ARM_PCREL_BRANCH:
24141 case BFD_RELOC_ARM_PCREL_BLX:
24142 case BFD_RELOC_RVA:
24143 case BFD_RELOC_THUMB_PCREL_BRANCH7:
24144 case BFD_RELOC_THUMB_PCREL_BRANCH9:
24145 case BFD_RELOC_THUMB_PCREL_BRANCH12:
24146 case BFD_RELOC_THUMB_PCREL_BRANCH20:
24147 case BFD_RELOC_THUMB_PCREL_BRANCH23:
24148 case BFD_RELOC_THUMB_PCREL_BRANCH25:
24149 case BFD_RELOC_VTABLE_ENTRY:
24150 case BFD_RELOC_VTABLE_INHERIT:
24151 #ifdef TE_PE
24152 case BFD_RELOC_32_SECREL:
24153 #endif
24154 code = fixp->fx_r_type;
24155 break;
24156
24157 case BFD_RELOC_THUMB_PCREL_BLX:
24158 #ifdef OBJ_ELF
24159 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
24160 code = BFD_RELOC_THUMB_PCREL_BRANCH23;
24161 else
24162 #endif
24163 code = BFD_RELOC_THUMB_PCREL_BLX;
24164 break;
24165
24166 case BFD_RELOC_ARM_LITERAL:
24167 case BFD_RELOC_ARM_HWLITERAL:
24168 /* If this is called then the a literal has
24169 been referenced across a section boundary. */
24170 as_bad_where (fixp->fx_file, fixp->fx_line,
24171 _("literal referenced across section boundary"));
24172 return NULL;
24173
24174 #ifdef OBJ_ELF
24175 case BFD_RELOC_ARM_TLS_CALL:
24176 case BFD_RELOC_ARM_THM_TLS_CALL:
24177 case BFD_RELOC_ARM_TLS_DESCSEQ:
24178 case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
24179 case BFD_RELOC_ARM_GOT32:
24180 case BFD_RELOC_ARM_GOTOFF:
24181 case BFD_RELOC_ARM_GOT_PREL:
24182 case BFD_RELOC_ARM_PLT32:
24183 case BFD_RELOC_ARM_TARGET1:
24184 case BFD_RELOC_ARM_ROSEGREL32:
24185 case BFD_RELOC_ARM_SBREL32:
24186 case BFD_RELOC_ARM_PREL31:
24187 case BFD_RELOC_ARM_TARGET2:
24188 case BFD_RELOC_ARM_TLS_LDO32:
24189 case BFD_RELOC_ARM_PCREL_CALL:
24190 case BFD_RELOC_ARM_PCREL_JUMP:
24191 case BFD_RELOC_ARM_ALU_PC_G0_NC:
24192 case BFD_RELOC_ARM_ALU_PC_G0:
24193 case BFD_RELOC_ARM_ALU_PC_G1_NC:
24194 case BFD_RELOC_ARM_ALU_PC_G1:
24195 case BFD_RELOC_ARM_ALU_PC_G2:
24196 case BFD_RELOC_ARM_LDR_PC_G0:
24197 case BFD_RELOC_ARM_LDR_PC_G1:
24198 case BFD_RELOC_ARM_LDR_PC_G2:
24199 case BFD_RELOC_ARM_LDRS_PC_G0:
24200 case BFD_RELOC_ARM_LDRS_PC_G1:
24201 case BFD_RELOC_ARM_LDRS_PC_G2:
24202 case BFD_RELOC_ARM_LDC_PC_G0:
24203 case BFD_RELOC_ARM_LDC_PC_G1:
24204 case BFD_RELOC_ARM_LDC_PC_G2:
24205 case BFD_RELOC_ARM_ALU_SB_G0_NC:
24206 case BFD_RELOC_ARM_ALU_SB_G0:
24207 case BFD_RELOC_ARM_ALU_SB_G1_NC:
24208 case BFD_RELOC_ARM_ALU_SB_G1:
24209 case BFD_RELOC_ARM_ALU_SB_G2:
24210 case BFD_RELOC_ARM_LDR_SB_G0:
24211 case BFD_RELOC_ARM_LDR_SB_G1:
24212 case BFD_RELOC_ARM_LDR_SB_G2:
24213 case BFD_RELOC_ARM_LDRS_SB_G0:
24214 case BFD_RELOC_ARM_LDRS_SB_G1:
24215 case BFD_RELOC_ARM_LDRS_SB_G2:
24216 case BFD_RELOC_ARM_LDC_SB_G0:
24217 case BFD_RELOC_ARM_LDC_SB_G1:
24218 case BFD_RELOC_ARM_LDC_SB_G2:
24219 case BFD_RELOC_ARM_V4BX:
24220 case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC:
24221 case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC:
24222 case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC:
24223 case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC:
24224 code = fixp->fx_r_type;
24225 break;
24226
24227 case BFD_RELOC_ARM_TLS_GOTDESC:
24228 case BFD_RELOC_ARM_TLS_GD32:
24229 case BFD_RELOC_ARM_TLS_LE32:
24230 case BFD_RELOC_ARM_TLS_IE32:
24231 case BFD_RELOC_ARM_TLS_LDM32:
24232 /* BFD will include the symbol's address in the addend.
24233 But we don't want that, so subtract it out again here. */
24234 if (!S_IS_COMMON (fixp->fx_addsy))
24235 reloc->addend -= (*reloc->sym_ptr_ptr)->value;
24236 code = fixp->fx_r_type;
24237 break;
24238 #endif
24239
24240 case BFD_RELOC_ARM_IMMEDIATE:
24241 as_bad_where (fixp->fx_file, fixp->fx_line,
24242 _("internal relocation (type: IMMEDIATE) not fixed up"));
24243 return NULL;
24244
24245 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
24246 as_bad_where (fixp->fx_file, fixp->fx_line,
24247 _("ADRL used for a symbol not defined in the same file"));
24248 return NULL;
24249
24250 case BFD_RELOC_ARM_OFFSET_IMM:
24251 if (section->use_rela_p)
24252 {
24253 code = fixp->fx_r_type;
24254 break;
24255 }
24256
24257 if (fixp->fx_addsy != NULL
24258 && !S_IS_DEFINED (fixp->fx_addsy)
24259 && S_IS_LOCAL (fixp->fx_addsy))
24260 {
24261 as_bad_where (fixp->fx_file, fixp->fx_line,
24262 _("undefined local label `%s'"),
24263 S_GET_NAME (fixp->fx_addsy));
24264 return NULL;
24265 }
24266
24267 as_bad_where (fixp->fx_file, fixp->fx_line,
24268 _("internal_relocation (type: OFFSET_IMM) not fixed up"));
24269 return NULL;
24270
24271 default:
24272 {
24273 const char * type;
24274
24275 switch (fixp->fx_r_type)
24276 {
24277 case BFD_RELOC_NONE: type = "NONE"; break;
24278 case BFD_RELOC_ARM_OFFSET_IMM8: type = "OFFSET_IMM8"; break;
24279 case BFD_RELOC_ARM_SHIFT_IMM: type = "SHIFT_IMM"; break;
24280 case BFD_RELOC_ARM_SMC: type = "SMC"; break;
24281 case BFD_RELOC_ARM_SWI: type = "SWI"; break;
24282 case BFD_RELOC_ARM_MULTI: type = "MULTI"; break;
24283 case BFD_RELOC_ARM_CP_OFF_IMM: type = "CP_OFF_IMM"; break;
24284 case BFD_RELOC_ARM_T32_OFFSET_IMM: type = "T32_OFFSET_IMM"; break;
24285 case BFD_RELOC_ARM_T32_CP_OFF_IMM: type = "T32_CP_OFF_IMM"; break;
24286 case BFD_RELOC_ARM_THUMB_ADD: type = "THUMB_ADD"; break;
24287 case BFD_RELOC_ARM_THUMB_SHIFT: type = "THUMB_SHIFT"; break;
24288 case BFD_RELOC_ARM_THUMB_IMM: type = "THUMB_IMM"; break;
24289 case BFD_RELOC_ARM_THUMB_OFFSET: type = "THUMB_OFFSET"; break;
24290 default: type = _("<unknown>"); break;
24291 }
24292 as_bad_where (fixp->fx_file, fixp->fx_line,
24293 _("cannot represent %s relocation in this object file format"),
24294 type);
24295 return NULL;
24296 }
24297 }
24298
24299 #ifdef OBJ_ELF
24300 if ((code == BFD_RELOC_32_PCREL || code == BFD_RELOC_32)
24301 && GOT_symbol
24302 && fixp->fx_addsy == GOT_symbol)
24303 {
24304 code = BFD_RELOC_ARM_GOTPC;
24305 reloc->addend = fixp->fx_offset = reloc->address;
24306 }
24307 #endif
24308
24309 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
24310
24311 if (reloc->howto == NULL)
24312 {
24313 as_bad_where (fixp->fx_file, fixp->fx_line,
24314 _("cannot represent %s relocation in this object file format"),
24315 bfd_get_reloc_code_name (code));
24316 return NULL;
24317 }
24318
24319 /* HACK: Since arm ELF uses Rel instead of Rela, encode the
24320 vtable entry to be used in the relocation's section offset. */
24321 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
24322 reloc->address = fixp->fx_offset;
24323
24324 return reloc;
24325 }
24326
24327 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
24328
24329 void
24330 cons_fix_new_arm (fragS * frag,
24331 int where,
24332 int size,
24333 expressionS * exp,
24334 bfd_reloc_code_real_type reloc)
24335 {
24336 int pcrel = 0;
24337
24338 /* Pick a reloc.
24339 FIXME: @@ Should look at CPU word size. */
24340 switch (size)
24341 {
24342 case 1:
24343 reloc = BFD_RELOC_8;
24344 break;
24345 case 2:
24346 reloc = BFD_RELOC_16;
24347 break;
24348 case 4:
24349 default:
24350 reloc = BFD_RELOC_32;
24351 break;
24352 case 8:
24353 reloc = BFD_RELOC_64;
24354 break;
24355 }
24356
24357 #ifdef TE_PE
24358 if (exp->X_op == O_secrel)
24359 {
24360 exp->X_op = O_symbol;
24361 reloc = BFD_RELOC_32_SECREL;
24362 }
24363 #endif
24364
24365 fix_new_exp (frag, where, size, exp, pcrel, reloc);
24366 }
24367
24368 #if defined (OBJ_COFF)
24369 void
24370 arm_validate_fix (fixS * fixP)
24371 {
24372 /* If the destination of the branch is a defined symbol which does not have
24373 the THUMB_FUNC attribute, then we must be calling a function which has
24374 the (interfacearm) attribute. We look for the Thumb entry point to that
24375 function and change the branch to refer to that function instead. */
24376 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BRANCH23
24377 && fixP->fx_addsy != NULL
24378 && S_IS_DEFINED (fixP->fx_addsy)
24379 && ! THUMB_IS_FUNC (fixP->fx_addsy))
24380 {
24381 fixP->fx_addsy = find_real_start (fixP->fx_addsy);
24382 }
24383 }
24384 #endif
24385
24386
24387 int
24388 arm_force_relocation (struct fix * fixp)
24389 {
24390 #if defined (OBJ_COFF) && defined (TE_PE)
24391 if (fixp->fx_r_type == BFD_RELOC_RVA)
24392 return 1;
24393 #endif
24394
24395 /* In case we have a call or a branch to a function in ARM ISA mode from
24396 a thumb function or vice-versa force the relocation. These relocations
24397 are cleared off for some cores that might have blx and simple transformations
24398 are possible. */
24399
24400 #ifdef OBJ_ELF
24401 switch (fixp->fx_r_type)
24402 {
24403 case BFD_RELOC_ARM_PCREL_JUMP:
24404 case BFD_RELOC_ARM_PCREL_CALL:
24405 case BFD_RELOC_THUMB_PCREL_BLX:
24406 if (THUMB_IS_FUNC (fixp->fx_addsy))
24407 return 1;
24408 break;
24409
24410 case BFD_RELOC_ARM_PCREL_BLX:
24411 case BFD_RELOC_THUMB_PCREL_BRANCH25:
24412 case BFD_RELOC_THUMB_PCREL_BRANCH20:
24413 case BFD_RELOC_THUMB_PCREL_BRANCH23:
24414 if (ARM_IS_FUNC (fixp->fx_addsy))
24415 return 1;
24416 break;
24417
24418 default:
24419 break;
24420 }
24421 #endif
24422
24423 /* Resolve these relocations even if the symbol is extern or weak.
24424 Technically this is probably wrong due to symbol preemption.
24425 In practice these relocations do not have enough range to be useful
24426 at dynamic link time, and some code (e.g. in the Linux kernel)
24427 expects these references to be resolved. */
24428 if (fixp->fx_r_type == BFD_RELOC_ARM_IMMEDIATE
24429 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM
24430 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM8
24431 || fixp->fx_r_type == BFD_RELOC_ARM_ADRL_IMMEDIATE
24432 || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
24433 || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2
24434 || fixp->fx_r_type == BFD_RELOC_ARM_THUMB_OFFSET
24435 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM
24436 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
24437 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMM12
24438 || fixp->fx_r_type == BFD_RELOC_ARM_T32_OFFSET_IMM
24439 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_PC12
24440 || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM
24441 || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM_S2)
24442 return 0;
24443
24444 /* Always leave these relocations for the linker. */
24445 if ((fixp->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
24446 && fixp->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
24447 || fixp->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
24448 return 1;
24449
24450 /* Always generate relocations against function symbols. */
24451 if (fixp->fx_r_type == BFD_RELOC_32
24452 && fixp->fx_addsy
24453 && (symbol_get_bfdsym (fixp->fx_addsy)->flags & BSF_FUNCTION))
24454 return 1;
24455
24456 return generic_force_reloc (fixp);
24457 }
24458
24459 #if defined (OBJ_ELF) || defined (OBJ_COFF)
24460 /* Relocations against function names must be left unadjusted,
24461 so that the linker can use this information to generate interworking
24462 stubs. The MIPS version of this function
24463 also prevents relocations that are mips-16 specific, but I do not
24464 know why it does this.
24465
24466 FIXME:
24467 There is one other problem that ought to be addressed here, but
24468 which currently is not: Taking the address of a label (rather
24469 than a function) and then later jumping to that address. Such
24470 addresses also ought to have their bottom bit set (assuming that
24471 they reside in Thumb code), but at the moment they will not. */
24472
24473 bfd_boolean
24474 arm_fix_adjustable (fixS * fixP)
24475 {
24476 if (fixP->fx_addsy == NULL)
24477 return 1;
24478
24479 /* Preserve relocations against symbols with function type. */
24480 if (symbol_get_bfdsym (fixP->fx_addsy)->flags & BSF_FUNCTION)
24481 return FALSE;
24482
24483 if (THUMB_IS_FUNC (fixP->fx_addsy)
24484 && fixP->fx_subsy == NULL)
24485 return FALSE;
24486
24487 /* We need the symbol name for the VTABLE entries. */
24488 if ( fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
24489 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
24490 return FALSE;
24491
24492 /* Don't allow symbols to be discarded on GOT related relocs. */
24493 if (fixP->fx_r_type == BFD_RELOC_ARM_PLT32
24494 || fixP->fx_r_type == BFD_RELOC_ARM_GOT32
24495 || fixP->fx_r_type == BFD_RELOC_ARM_GOTOFF
24496 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GD32
24497 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LE32
24498 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_IE32
24499 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDM32
24500 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDO32
24501 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GOTDESC
24502 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_CALL
24503 || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_CALL
24504 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_DESCSEQ
24505 || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_DESCSEQ
24506 || fixP->fx_r_type == BFD_RELOC_ARM_TARGET2)
24507 return FALSE;
24508
24509 /* Similarly for group relocations. */
24510 if ((fixP->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
24511 && fixP->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
24512 || fixP->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
24513 return FALSE;
24514
24515 /* MOVW/MOVT REL relocations have limited offsets, so keep the symbols. */
24516 if (fixP->fx_r_type == BFD_RELOC_ARM_MOVW
24517 || fixP->fx_r_type == BFD_RELOC_ARM_MOVT
24518 || fixP->fx_r_type == BFD_RELOC_ARM_MOVW_PCREL
24519 || fixP->fx_r_type == BFD_RELOC_ARM_MOVT_PCREL
24520 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
24521 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT
24522 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW_PCREL
24523 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT_PCREL)
24524 return FALSE;
24525
24526 /* BFD_RELOC_ARM_THUMB_ALU_ABS_Gx_NC relocations have VERY limited
24527 offsets, so keep these symbols. */
24528 if (fixP->fx_r_type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
24529 && fixP->fx_r_type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC)
24530 return FALSE;
24531
24532 return TRUE;
24533 }
24534 #endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */
24535
24536 #ifdef OBJ_ELF
24537 const char *
24538 elf32_arm_target_format (void)
24539 {
24540 #ifdef TE_SYMBIAN
24541 return (target_big_endian
24542 ? "elf32-bigarm-symbian"
24543 : "elf32-littlearm-symbian");
24544 #elif defined (TE_VXWORKS)
24545 return (target_big_endian
24546 ? "elf32-bigarm-vxworks"
24547 : "elf32-littlearm-vxworks");
24548 #elif defined (TE_NACL)
24549 return (target_big_endian
24550 ? "elf32-bigarm-nacl"
24551 : "elf32-littlearm-nacl");
24552 #else
24553 if (target_big_endian)
24554 return "elf32-bigarm";
24555 else
24556 return "elf32-littlearm";
24557 #endif
24558 }
24559
24560 void
24561 armelf_frob_symbol (symbolS * symp,
24562 int * puntp)
24563 {
24564 elf_frob_symbol (symp, puntp);
24565 }
24566 #endif
24567
24568 /* MD interface: Finalization. */
24569
24570 void
24571 arm_cleanup (void)
24572 {
24573 literal_pool * pool;
24574
24575 /* Ensure that all the IT blocks are properly closed. */
24576 check_it_blocks_finished ();
24577
24578 for (pool = list_of_pools; pool; pool = pool->next)
24579 {
24580 /* Put it at the end of the relevant section. */
24581 subseg_set (pool->section, pool->sub_section);
24582 #ifdef OBJ_ELF
24583 arm_elf_change_section ();
24584 #endif
24585 s_ltorg (0);
24586 }
24587 }
24588
24589 #ifdef OBJ_ELF
24590 /* Remove any excess mapping symbols generated for alignment frags in
24591 SEC. We may have created a mapping symbol before a zero byte
24592 alignment; remove it if there's a mapping symbol after the
24593 alignment. */
24594 static void
24595 check_mapping_symbols (bfd *abfd ATTRIBUTE_UNUSED, asection *sec,
24596 void *dummy ATTRIBUTE_UNUSED)
24597 {
24598 segment_info_type *seginfo = seg_info (sec);
24599 fragS *fragp;
24600
24601 if (seginfo == NULL || seginfo->frchainP == NULL)
24602 return;
24603
24604 for (fragp = seginfo->frchainP->frch_root;
24605 fragp != NULL;
24606 fragp = fragp->fr_next)
24607 {
24608 symbolS *sym = fragp->tc_frag_data.last_map;
24609 fragS *next = fragp->fr_next;
24610
24611 /* Variable-sized frags have been converted to fixed size by
24612 this point. But if this was variable-sized to start with,
24613 there will be a fixed-size frag after it. So don't handle
24614 next == NULL. */
24615 if (sym == NULL || next == NULL)
24616 continue;
24617
24618 if (S_GET_VALUE (sym) < next->fr_address)
24619 /* Not at the end of this frag. */
24620 continue;
24621 know (S_GET_VALUE (sym) == next->fr_address);
24622
24623 do
24624 {
24625 if (next->tc_frag_data.first_map != NULL)
24626 {
24627 /* Next frag starts with a mapping symbol. Discard this
24628 one. */
24629 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
24630 break;
24631 }
24632
24633 if (next->fr_next == NULL)
24634 {
24635 /* This mapping symbol is at the end of the section. Discard
24636 it. */
24637 know (next->fr_fix == 0 && next->fr_var == 0);
24638 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
24639 break;
24640 }
24641
24642 /* As long as we have empty frags without any mapping symbols,
24643 keep looking. */
24644 /* If the next frag is non-empty and does not start with a
24645 mapping symbol, then this mapping symbol is required. */
24646 if (next->fr_address != next->fr_next->fr_address)
24647 break;
24648
24649 next = next->fr_next;
24650 }
24651 while (next != NULL);
24652 }
24653 }
24654 #endif
24655
24656 /* Adjust the symbol table. This marks Thumb symbols as distinct from
24657 ARM ones. */
24658
24659 void
24660 arm_adjust_symtab (void)
24661 {
24662 #ifdef OBJ_COFF
24663 symbolS * sym;
24664
24665 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
24666 {
24667 if (ARM_IS_THUMB (sym))
24668 {
24669 if (THUMB_IS_FUNC (sym))
24670 {
24671 /* Mark the symbol as a Thumb function. */
24672 if ( S_GET_STORAGE_CLASS (sym) == C_STAT
24673 || S_GET_STORAGE_CLASS (sym) == C_LABEL) /* This can happen! */
24674 S_SET_STORAGE_CLASS (sym, C_THUMBSTATFUNC);
24675
24676 else if (S_GET_STORAGE_CLASS (sym) == C_EXT)
24677 S_SET_STORAGE_CLASS (sym, C_THUMBEXTFUNC);
24678 else
24679 as_bad (_("%s: unexpected function type: %d"),
24680 S_GET_NAME (sym), S_GET_STORAGE_CLASS (sym));
24681 }
24682 else switch (S_GET_STORAGE_CLASS (sym))
24683 {
24684 case C_EXT:
24685 S_SET_STORAGE_CLASS (sym, C_THUMBEXT);
24686 break;
24687 case C_STAT:
24688 S_SET_STORAGE_CLASS (sym, C_THUMBSTAT);
24689 break;
24690 case C_LABEL:
24691 S_SET_STORAGE_CLASS (sym, C_THUMBLABEL);
24692 break;
24693 default:
24694 /* Do nothing. */
24695 break;
24696 }
24697 }
24698
24699 if (ARM_IS_INTERWORK (sym))
24700 coffsymbol (symbol_get_bfdsym (sym))->native->u.syment.n_flags = 0xFF;
24701 }
24702 #endif
24703 #ifdef OBJ_ELF
24704 symbolS * sym;
24705 char bind;
24706
24707 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
24708 {
24709 if (ARM_IS_THUMB (sym))
24710 {
24711 elf_symbol_type * elf_sym;
24712
24713 elf_sym = elf_symbol (symbol_get_bfdsym (sym));
24714 bind = ELF_ST_BIND (elf_sym->internal_elf_sym.st_info);
24715
24716 if (! bfd_is_arm_special_symbol_name (elf_sym->symbol.name,
24717 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
24718 {
24719 /* If it's a .thumb_func, declare it as so,
24720 otherwise tag label as .code 16. */
24721 if (THUMB_IS_FUNC (sym))
24722 ARM_SET_SYM_BRANCH_TYPE (elf_sym->internal_elf_sym.st_target_internal,
24723 ST_BRANCH_TO_THUMB);
24724 else if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
24725 elf_sym->internal_elf_sym.st_info =
24726 ELF_ST_INFO (bind, STT_ARM_16BIT);
24727 }
24728 }
24729 }
24730
24731 /* Remove any overlapping mapping symbols generated by alignment frags. */
24732 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
24733 /* Now do generic ELF adjustments. */
24734 elf_adjust_symtab ();
24735 #endif
24736 }
24737
24738 /* MD interface: Initialization. */
24739
24740 static void
24741 set_constant_flonums (void)
24742 {
24743 int i;
24744
24745 for (i = 0; i < NUM_FLOAT_VALS; i++)
24746 if (atof_ieee ((char *) fp_const[i], 'x', fp_values[i]) == NULL)
24747 abort ();
24748 }
24749
24750 /* Auto-select Thumb mode if it's the only available instruction set for the
24751 given architecture. */
24752
24753 static void
24754 autoselect_thumb_from_cpu_variant (void)
24755 {
24756 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
24757 opcode_select (16);
24758 }
24759
24760 void
24761 md_begin (void)
24762 {
24763 unsigned mach;
24764 unsigned int i;
24765
24766 if ( (arm_ops_hsh = hash_new ()) == NULL
24767 || (arm_cond_hsh = hash_new ()) == NULL
24768 || (arm_shift_hsh = hash_new ()) == NULL
24769 || (arm_psr_hsh = hash_new ()) == NULL
24770 || (arm_v7m_psr_hsh = hash_new ()) == NULL
24771 || (arm_reg_hsh = hash_new ()) == NULL
24772 || (arm_reloc_hsh = hash_new ()) == NULL
24773 || (arm_barrier_opt_hsh = hash_new ()) == NULL)
24774 as_fatal (_("virtual memory exhausted"));
24775
24776 for (i = 0; i < sizeof (insns) / sizeof (struct asm_opcode); i++)
24777 hash_insert (arm_ops_hsh, insns[i].template_name, (void *) (insns + i));
24778 for (i = 0; i < sizeof (conds) / sizeof (struct asm_cond); i++)
24779 hash_insert (arm_cond_hsh, conds[i].template_name, (void *) (conds + i));
24780 for (i = 0; i < sizeof (shift_names) / sizeof (struct asm_shift_name); i++)
24781 hash_insert (arm_shift_hsh, shift_names[i].name, (void *) (shift_names + i));
24782 for (i = 0; i < sizeof (psrs) / sizeof (struct asm_psr); i++)
24783 hash_insert (arm_psr_hsh, psrs[i].template_name, (void *) (psrs + i));
24784 for (i = 0; i < sizeof (v7m_psrs) / sizeof (struct asm_psr); i++)
24785 hash_insert (arm_v7m_psr_hsh, v7m_psrs[i].template_name,
24786 (void *) (v7m_psrs + i));
24787 for (i = 0; i < sizeof (reg_names) / sizeof (struct reg_entry); i++)
24788 hash_insert (arm_reg_hsh, reg_names[i].name, (void *) (reg_names + i));
24789 for (i = 0;
24790 i < sizeof (barrier_opt_names) / sizeof (struct asm_barrier_opt);
24791 i++)
24792 hash_insert (arm_barrier_opt_hsh, barrier_opt_names[i].template_name,
24793 (void *) (barrier_opt_names + i));
24794 #ifdef OBJ_ELF
24795 for (i = 0; i < ARRAY_SIZE (reloc_names); i++)
24796 {
24797 struct reloc_entry * entry = reloc_names + i;
24798
24799 if (arm_is_eabi() && entry->reloc == BFD_RELOC_ARM_PLT32)
24800 /* This makes encode_branch() use the EABI versions of this relocation. */
24801 entry->reloc = BFD_RELOC_UNUSED;
24802
24803 hash_insert (arm_reloc_hsh, entry->name, (void *) entry);
24804 }
24805 #endif
24806
24807 set_constant_flonums ();
24808
24809 /* Set the cpu variant based on the command-line options. We prefer
24810 -mcpu= over -march= if both are set (as for GCC); and we prefer
24811 -mfpu= over any other way of setting the floating point unit.
24812 Use of legacy options with new options are faulted. */
24813 if (legacy_cpu)
24814 {
24815 if (mcpu_cpu_opt || march_cpu_opt)
24816 as_bad (_("use of old and new-style options to set CPU type"));
24817
24818 mcpu_cpu_opt = legacy_cpu;
24819 }
24820 else if (!mcpu_cpu_opt)
24821 mcpu_cpu_opt = march_cpu_opt;
24822
24823 if (legacy_fpu)
24824 {
24825 if (mfpu_opt)
24826 as_bad (_("use of old and new-style options to set FPU type"));
24827
24828 mfpu_opt = legacy_fpu;
24829 }
24830 else if (!mfpu_opt)
24831 {
24832 #if !(defined (EABI_DEFAULT) || defined (TE_LINUX) \
24833 || defined (TE_NetBSD) || defined (TE_VXWORKS))
24834 /* Some environments specify a default FPU. If they don't, infer it
24835 from the processor. */
24836 if (mcpu_fpu_opt)
24837 mfpu_opt = mcpu_fpu_opt;
24838 else
24839 mfpu_opt = march_fpu_opt;
24840 #else
24841 mfpu_opt = &fpu_default;
24842 #endif
24843 }
24844
24845 if (!mfpu_opt)
24846 {
24847 if (mcpu_cpu_opt != NULL)
24848 mfpu_opt = &fpu_default;
24849 else if (mcpu_fpu_opt != NULL && ARM_CPU_HAS_FEATURE (*mcpu_fpu_opt, arm_ext_v5))
24850 mfpu_opt = &fpu_arch_vfp_v2;
24851 else
24852 mfpu_opt = &fpu_arch_fpa;
24853 }
24854
24855 #ifdef CPU_DEFAULT
24856 if (!mcpu_cpu_opt)
24857 {
24858 mcpu_cpu_opt = &cpu_default;
24859 selected_cpu = cpu_default;
24860 }
24861 else if (no_cpu_selected ())
24862 selected_cpu = cpu_default;
24863 #else
24864 if (mcpu_cpu_opt)
24865 selected_cpu = *mcpu_cpu_opt;
24866 else
24867 mcpu_cpu_opt = &arm_arch_any;
24868 #endif
24869
24870 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
24871
24872 autoselect_thumb_from_cpu_variant ();
24873
24874 arm_arch_used = thumb_arch_used = arm_arch_none;
24875
24876 #if defined OBJ_COFF || defined OBJ_ELF
24877 {
24878 unsigned int flags = 0;
24879
24880 #if defined OBJ_ELF
24881 flags = meabi_flags;
24882
24883 switch (meabi_flags)
24884 {
24885 case EF_ARM_EABI_UNKNOWN:
24886 #endif
24887 /* Set the flags in the private structure. */
24888 if (uses_apcs_26) flags |= F_APCS26;
24889 if (support_interwork) flags |= F_INTERWORK;
24890 if (uses_apcs_float) flags |= F_APCS_FLOAT;
24891 if (pic_code) flags |= F_PIC;
24892 if (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_any_hard))
24893 flags |= F_SOFT_FLOAT;
24894
24895 switch (mfloat_abi_opt)
24896 {
24897 case ARM_FLOAT_ABI_SOFT:
24898 case ARM_FLOAT_ABI_SOFTFP:
24899 flags |= F_SOFT_FLOAT;
24900 break;
24901
24902 case ARM_FLOAT_ABI_HARD:
24903 if (flags & F_SOFT_FLOAT)
24904 as_bad (_("hard-float conflicts with specified fpu"));
24905 break;
24906 }
24907
24908 /* Using pure-endian doubles (even if soft-float). */
24909 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
24910 flags |= F_VFP_FLOAT;
24911
24912 #if defined OBJ_ELF
24913 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_maverick))
24914 flags |= EF_ARM_MAVERICK_FLOAT;
24915 break;
24916
24917 case EF_ARM_EABI_VER4:
24918 case EF_ARM_EABI_VER5:
24919 /* No additional flags to set. */
24920 break;
24921
24922 default:
24923 abort ();
24924 }
24925 #endif
24926 bfd_set_private_flags (stdoutput, flags);
24927
24928 /* We have run out flags in the COFF header to encode the
24929 status of ATPCS support, so instead we create a dummy,
24930 empty, debug section called .arm.atpcs. */
24931 if (atpcs)
24932 {
24933 asection * sec;
24934
24935 sec = bfd_make_section (stdoutput, ".arm.atpcs");
24936
24937 if (sec != NULL)
24938 {
24939 bfd_set_section_flags
24940 (stdoutput, sec, SEC_READONLY | SEC_DEBUGGING /* | SEC_HAS_CONTENTS */);
24941 bfd_set_section_size (stdoutput, sec, 0);
24942 bfd_set_section_contents (stdoutput, sec, NULL, 0, 0);
24943 }
24944 }
24945 }
24946 #endif
24947
24948 /* Record the CPU type as well. */
24949 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2))
24950 mach = bfd_mach_arm_iWMMXt2;
24951 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt))
24952 mach = bfd_mach_arm_iWMMXt;
24953 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_xscale))
24954 mach = bfd_mach_arm_XScale;
24955 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_maverick))
24956 mach = bfd_mach_arm_ep9312;
24957 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5e))
24958 mach = bfd_mach_arm_5TE;
24959 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5))
24960 {
24961 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
24962 mach = bfd_mach_arm_5T;
24963 else
24964 mach = bfd_mach_arm_5;
24965 }
24966 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4))
24967 {
24968 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
24969 mach = bfd_mach_arm_4T;
24970 else
24971 mach = bfd_mach_arm_4;
24972 }
24973 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3m))
24974 mach = bfd_mach_arm_3M;
24975 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3))
24976 mach = bfd_mach_arm_3;
24977 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2s))
24978 mach = bfd_mach_arm_2a;
24979 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2))
24980 mach = bfd_mach_arm_2;
24981 else
24982 mach = bfd_mach_arm_unknown;
24983
24984 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
24985 }
24986
24987 /* Command line processing. */
24988
24989 /* md_parse_option
24990 Invocation line includes a switch not recognized by the base assembler.
24991 See if it's a processor-specific option.
24992
24993 This routine is somewhat complicated by the need for backwards
24994 compatibility (since older releases of gcc can't be changed).
24995 The new options try to make the interface as compatible as
24996 possible with GCC.
24997
24998 New options (supported) are:
24999
25000 -mcpu=<cpu name> Assemble for selected processor
25001 -march=<architecture name> Assemble for selected architecture
25002 -mfpu=<fpu architecture> Assemble for selected FPU.
25003 -EB/-mbig-endian Big-endian
25004 -EL/-mlittle-endian Little-endian
25005 -k Generate PIC code
25006 -mthumb Start in Thumb mode
25007 -mthumb-interwork Code supports ARM/Thumb interworking
25008
25009 -m[no-]warn-deprecated Warn about deprecated features
25010 -m[no-]warn-syms Warn when symbols match instructions
25011
25012 For now we will also provide support for:
25013
25014 -mapcs-32 32-bit Program counter
25015 -mapcs-26 26-bit Program counter
25016 -macps-float Floats passed in FP registers
25017 -mapcs-reentrant Reentrant code
25018 -matpcs
25019 (sometime these will probably be replaced with -mapcs=<list of options>
25020 and -matpcs=<list of options>)
25021
25022 The remaining options are only supported for back-wards compatibility.
25023 Cpu variants, the arm part is optional:
25024 -m[arm]1 Currently not supported.
25025 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor
25026 -m[arm]3 Arm 3 processor
25027 -m[arm]6[xx], Arm 6 processors
25028 -m[arm]7[xx][t][[d]m] Arm 7 processors
25029 -m[arm]8[10] Arm 8 processors
25030 -m[arm]9[20][tdmi] Arm 9 processors
25031 -mstrongarm[110[0]] StrongARM processors
25032 -mxscale XScale processors
25033 -m[arm]v[2345[t[e]]] Arm architectures
25034 -mall All (except the ARM1)
25035 FP variants:
25036 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions
25037 -mfpe-old (No float load/store multiples)
25038 -mvfpxd VFP Single precision
25039 -mvfp All VFP
25040 -mno-fpu Disable all floating point instructions
25041
25042 The following CPU names are recognized:
25043 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
25044 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
25045 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
25046 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
25047 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
25048 arm10t arm10e, arm1020t, arm1020e, arm10200e,
25049 strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
25050
25051 */
25052
25053 const char * md_shortopts = "m:k";
25054
25055 #ifdef ARM_BI_ENDIAN
25056 #define OPTION_EB (OPTION_MD_BASE + 0)
25057 #define OPTION_EL (OPTION_MD_BASE + 1)
25058 #else
25059 #if TARGET_BYTES_BIG_ENDIAN
25060 #define OPTION_EB (OPTION_MD_BASE + 0)
25061 #else
25062 #define OPTION_EL (OPTION_MD_BASE + 1)
25063 #endif
25064 #endif
25065 #define OPTION_FIX_V4BX (OPTION_MD_BASE + 2)
25066
25067 struct option md_longopts[] =
25068 {
25069 #ifdef OPTION_EB
25070 {"EB", no_argument, NULL, OPTION_EB},
25071 #endif
25072 #ifdef OPTION_EL
25073 {"EL", no_argument, NULL, OPTION_EL},
25074 #endif
25075 {"fix-v4bx", no_argument, NULL, OPTION_FIX_V4BX},
25076 {NULL, no_argument, NULL, 0}
25077 };
25078
25079
25080 size_t md_longopts_size = sizeof (md_longopts);
25081
25082 struct arm_option_table
25083 {
25084 const char *option; /* Option name to match. */
25085 const char *help; /* Help information. */
25086 int *var; /* Variable to change. */
25087 int value; /* What to change it to. */
25088 const char *deprecated; /* If non-null, print this message. */
25089 };
25090
25091 struct arm_option_table arm_opts[] =
25092 {
25093 {"k", N_("generate PIC code"), &pic_code, 1, NULL},
25094 {"mthumb", N_("assemble Thumb code"), &thumb_mode, 1, NULL},
25095 {"mthumb-interwork", N_("support ARM/Thumb interworking"),
25096 &support_interwork, 1, NULL},
25097 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26, 0, NULL},
25098 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26, 1, NULL},
25099 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float,
25100 1, NULL},
25101 {"mapcs-reentrant", N_("re-entrant code"), &pic_code, 1, NULL},
25102 {"matpcs", N_("code is ATPCS conformant"), &atpcs, 1, NULL},
25103 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
25104 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
25105 NULL},
25106
25107 /* These are recognized by the assembler, but have no affect on code. */
25108 {"mapcs-frame", N_("use frame pointer"), NULL, 0, NULL},
25109 {"mapcs-stack-check", N_("use stack size checking"), NULL, 0, NULL},
25110
25111 {"mwarn-deprecated", NULL, &warn_on_deprecated, 1, NULL},
25112 {"mno-warn-deprecated", N_("do not warn on use of deprecated feature"),
25113 &warn_on_deprecated, 0, NULL},
25114 {"mwarn-syms", N_("warn about symbols that match instruction names [default]"), (int *) (& flag_warn_syms), TRUE, NULL},
25115 {"mno-warn-syms", N_("disable warnings about symobls that match instructions"), (int *) (& flag_warn_syms), FALSE, NULL},
25116 {NULL, NULL, NULL, 0, NULL}
25117 };
25118
25119 struct arm_legacy_option_table
25120 {
25121 const char *option; /* Option name to match. */
25122 const arm_feature_set **var; /* Variable to change. */
25123 const arm_feature_set value; /* What to change it to. */
25124 const char *deprecated; /* If non-null, print this message. */
25125 };
25126
25127 const struct arm_legacy_option_table arm_legacy_opts[] =
25128 {
25129 /* DON'T add any new processors to this list -- we want the whole list
25130 to go away... Add them to the processors table instead. */
25131 {"marm1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
25132 {"m1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
25133 {"marm2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
25134 {"m2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
25135 {"marm250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
25136 {"m250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
25137 {"marm3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
25138 {"m3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
25139 {"marm6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
25140 {"m6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
25141 {"marm600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
25142 {"m600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
25143 {"marm610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
25144 {"m610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
25145 {"marm620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
25146 {"m620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
25147 {"marm7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
25148 {"m7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
25149 {"marm70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
25150 {"m70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
25151 {"marm700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
25152 {"m700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
25153 {"marm700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
25154 {"m700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
25155 {"marm710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
25156 {"m710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
25157 {"marm710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
25158 {"m710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
25159 {"marm720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
25160 {"m720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
25161 {"marm7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
25162 {"m7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
25163 {"marm7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
25164 {"m7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
25165 {"marm7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
25166 {"m7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
25167 {"marm7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
25168 {"m7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
25169 {"marm7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
25170 {"m7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
25171 {"marm7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
25172 {"m7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
25173 {"marm7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
25174 {"m7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
25175 {"marm7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
25176 {"m7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
25177 {"marm7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
25178 {"m7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
25179 {"marm7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
25180 {"m7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
25181 {"marm710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
25182 {"m710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
25183 {"marm720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
25184 {"m720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
25185 {"marm740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
25186 {"m740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
25187 {"marm8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
25188 {"m8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
25189 {"marm810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
25190 {"m810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
25191 {"marm9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
25192 {"m9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
25193 {"marm9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
25194 {"m9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
25195 {"marm920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
25196 {"m920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
25197 {"marm940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
25198 {"m940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
25199 {"mstrongarm", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=strongarm")},
25200 {"mstrongarm110", &legacy_cpu, ARM_ARCH_V4,
25201 N_("use -mcpu=strongarm110")},
25202 {"mstrongarm1100", &legacy_cpu, ARM_ARCH_V4,
25203 N_("use -mcpu=strongarm1100")},
25204 {"mstrongarm1110", &legacy_cpu, ARM_ARCH_V4,
25205 N_("use -mcpu=strongarm1110")},
25206 {"mxscale", &legacy_cpu, ARM_ARCH_XSCALE, N_("use -mcpu=xscale")},
25207 {"miwmmxt", &legacy_cpu, ARM_ARCH_IWMMXT, N_("use -mcpu=iwmmxt")},
25208 {"mall", &legacy_cpu, ARM_ANY, N_("use -mcpu=all")},
25209
25210 /* Architecture variants -- don't add any more to this list either. */
25211 {"mv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
25212 {"marmv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
25213 {"mv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
25214 {"marmv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
25215 {"mv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
25216 {"marmv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
25217 {"mv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
25218 {"marmv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
25219 {"mv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
25220 {"marmv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
25221 {"mv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
25222 {"marmv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
25223 {"mv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
25224 {"marmv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
25225 {"mv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
25226 {"marmv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
25227 {"mv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
25228 {"marmv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
25229
25230 /* Floating point variants -- don't add any more to this list either. */
25231 {"mfpe-old", &legacy_fpu, FPU_ARCH_FPE, N_("use -mfpu=fpe")},
25232 {"mfpa10", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa10")},
25233 {"mfpa11", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa11")},
25234 {"mno-fpu", &legacy_fpu, ARM_ARCH_NONE,
25235 N_("use either -mfpu=softfpa or -mfpu=softvfp")},
25236
25237 {NULL, NULL, ARM_ARCH_NONE, NULL}
25238 };
25239
25240 struct arm_cpu_option_table
25241 {
25242 const char *name;
25243 size_t name_len;
25244 const arm_feature_set value;
25245 /* For some CPUs we assume an FPU unless the user explicitly sets
25246 -mfpu=... */
25247 const arm_feature_set default_fpu;
25248 /* The canonical name of the CPU, or NULL to use NAME converted to upper
25249 case. */
25250 const char *canonical_name;
25251 };
25252
25253 /* This list should, at a minimum, contain all the cpu names
25254 recognized by GCC. */
25255 #define ARM_CPU_OPT(N, V, DF, CN) { N, sizeof (N) - 1, V, DF, CN }
25256 static const struct arm_cpu_option_table arm_cpus[] =
25257 {
25258 ARM_CPU_OPT ("all", ARM_ANY, FPU_ARCH_FPA, NULL),
25259 ARM_CPU_OPT ("arm1", ARM_ARCH_V1, FPU_ARCH_FPA, NULL),
25260 ARM_CPU_OPT ("arm2", ARM_ARCH_V2, FPU_ARCH_FPA, NULL),
25261 ARM_CPU_OPT ("arm250", ARM_ARCH_V2S, FPU_ARCH_FPA, NULL),
25262 ARM_CPU_OPT ("arm3", ARM_ARCH_V2S, FPU_ARCH_FPA, NULL),
25263 ARM_CPU_OPT ("arm6", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
25264 ARM_CPU_OPT ("arm60", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
25265 ARM_CPU_OPT ("arm600", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
25266 ARM_CPU_OPT ("arm610", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
25267 ARM_CPU_OPT ("arm620", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
25268 ARM_CPU_OPT ("arm7", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
25269 ARM_CPU_OPT ("arm7m", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL),
25270 ARM_CPU_OPT ("arm7d", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
25271 ARM_CPU_OPT ("arm7dm", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL),
25272 ARM_CPU_OPT ("arm7di", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
25273 ARM_CPU_OPT ("arm7dmi", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL),
25274 ARM_CPU_OPT ("arm70", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
25275 ARM_CPU_OPT ("arm700", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
25276 ARM_CPU_OPT ("arm700i", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
25277 ARM_CPU_OPT ("arm710", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
25278 ARM_CPU_OPT ("arm710t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
25279 ARM_CPU_OPT ("arm720", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
25280 ARM_CPU_OPT ("arm720t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
25281 ARM_CPU_OPT ("arm740t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
25282 ARM_CPU_OPT ("arm710c", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
25283 ARM_CPU_OPT ("arm7100", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
25284 ARM_CPU_OPT ("arm7500", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
25285 ARM_CPU_OPT ("arm7500fe", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
25286 ARM_CPU_OPT ("arm7t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
25287 ARM_CPU_OPT ("arm7tdmi", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
25288 ARM_CPU_OPT ("arm7tdmi-s", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
25289 ARM_CPU_OPT ("arm8", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
25290 ARM_CPU_OPT ("arm810", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
25291 ARM_CPU_OPT ("strongarm", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
25292 ARM_CPU_OPT ("strongarm1", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
25293 ARM_CPU_OPT ("strongarm110", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
25294 ARM_CPU_OPT ("strongarm1100", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
25295 ARM_CPU_OPT ("strongarm1110", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
25296 ARM_CPU_OPT ("arm9", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
25297 ARM_CPU_OPT ("arm920", ARM_ARCH_V4T, FPU_ARCH_FPA, "ARM920T"),
25298 ARM_CPU_OPT ("arm920t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
25299 ARM_CPU_OPT ("arm922t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
25300 ARM_CPU_OPT ("arm940t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
25301 ARM_CPU_OPT ("arm9tdmi", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
25302 ARM_CPU_OPT ("fa526", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
25303 ARM_CPU_OPT ("fa626", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
25304 /* For V5 or later processors we default to using VFP; but the user
25305 should really set the FPU type explicitly. */
25306 ARM_CPU_OPT ("arm9e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL),
25307 ARM_CPU_OPT ("arm9e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
25308 ARM_CPU_OPT ("arm926ej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM926EJ-S"),
25309 ARM_CPU_OPT ("arm926ejs", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM926EJ-S"),
25310 ARM_CPU_OPT ("arm926ej-s", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, NULL),
25311 ARM_CPU_OPT ("arm946e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL),
25312 ARM_CPU_OPT ("arm946e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM946E-S"),
25313 ARM_CPU_OPT ("arm946e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
25314 ARM_CPU_OPT ("arm966e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL),
25315 ARM_CPU_OPT ("arm966e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM966E-S"),
25316 ARM_CPU_OPT ("arm966e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
25317 ARM_CPU_OPT ("arm968e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
25318 ARM_CPU_OPT ("arm10t", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL),
25319 ARM_CPU_OPT ("arm10tdmi", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL),
25320 ARM_CPU_OPT ("arm10e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
25321 ARM_CPU_OPT ("arm1020", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM1020E"),
25322 ARM_CPU_OPT ("arm1020t", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL),
25323 ARM_CPU_OPT ("arm1020e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
25324 ARM_CPU_OPT ("arm1022e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
25325 ARM_CPU_OPT ("arm1026ejs", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2,
25326 "ARM1026EJ-S"),
25327 ARM_CPU_OPT ("arm1026ej-s", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, NULL),
25328 ARM_CPU_OPT ("fa606te", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
25329 ARM_CPU_OPT ("fa616te", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
25330 ARM_CPU_OPT ("fa626te", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
25331 ARM_CPU_OPT ("fmp626", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
25332 ARM_CPU_OPT ("fa726te", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
25333 ARM_CPU_OPT ("arm1136js", ARM_ARCH_V6, FPU_NONE, "ARM1136J-S"),
25334 ARM_CPU_OPT ("arm1136j-s", ARM_ARCH_V6, FPU_NONE, NULL),
25335 ARM_CPU_OPT ("arm1136jfs", ARM_ARCH_V6, FPU_ARCH_VFP_V2,
25336 "ARM1136JF-S"),
25337 ARM_CPU_OPT ("arm1136jf-s", ARM_ARCH_V6, FPU_ARCH_VFP_V2, NULL),
25338 ARM_CPU_OPT ("mpcore", ARM_ARCH_V6K, FPU_ARCH_VFP_V2, "MPCore"),
25339 ARM_CPU_OPT ("mpcorenovfp", ARM_ARCH_V6K, FPU_NONE, "MPCore"),
25340 ARM_CPU_OPT ("arm1156t2-s", ARM_ARCH_V6T2, FPU_NONE, NULL),
25341 ARM_CPU_OPT ("arm1156t2f-s", ARM_ARCH_V6T2, FPU_ARCH_VFP_V2, NULL),
25342 ARM_CPU_OPT ("arm1176jz-s", ARM_ARCH_V6KZ, FPU_NONE, NULL),
25343 ARM_CPU_OPT ("arm1176jzf-s", ARM_ARCH_V6KZ, FPU_ARCH_VFP_V2, NULL),
25344 ARM_CPU_OPT ("cortex-a5", ARM_ARCH_V7A_MP_SEC,
25345 FPU_NONE, "Cortex-A5"),
25346 ARM_CPU_OPT ("cortex-a7", ARM_ARCH_V7VE, FPU_ARCH_NEON_VFP_V4,
25347 "Cortex-A7"),
25348 ARM_CPU_OPT ("cortex-a8", ARM_ARCH_V7A_SEC,
25349 ARM_FEATURE_COPROC (FPU_VFP_V3
25350 | FPU_NEON_EXT_V1),
25351 "Cortex-A8"),
25352 ARM_CPU_OPT ("cortex-a9", ARM_ARCH_V7A_MP_SEC,
25353 ARM_FEATURE_COPROC (FPU_VFP_V3
25354 | FPU_NEON_EXT_V1),
25355 "Cortex-A9"),
25356 ARM_CPU_OPT ("cortex-a12", ARM_ARCH_V7VE, FPU_ARCH_NEON_VFP_V4,
25357 "Cortex-A12"),
25358 ARM_CPU_OPT ("cortex-a15", ARM_ARCH_V7VE, FPU_ARCH_NEON_VFP_V4,
25359 "Cortex-A15"),
25360 ARM_CPU_OPT ("cortex-a17", ARM_ARCH_V7VE, FPU_ARCH_NEON_VFP_V4,
25361 "Cortex-A17"),
25362 ARM_CPU_OPT ("cortex-a32", ARM_ARCH_V8A_CRC, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
25363 "Cortex-A32"),
25364 ARM_CPU_OPT ("cortex-a35", ARM_ARCH_V8A_CRC, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
25365 "Cortex-A35"),
25366 ARM_CPU_OPT ("cortex-a53", ARM_ARCH_V8A_CRC, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
25367 "Cortex-A53"),
25368 ARM_CPU_OPT ("cortex-a57", ARM_ARCH_V8A_CRC, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
25369 "Cortex-A57"),
25370 ARM_CPU_OPT ("cortex-a72", ARM_ARCH_V8A_CRC, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
25371 "Cortex-A72"),
25372 ARM_CPU_OPT ("cortex-a73", ARM_ARCH_V8A_CRC, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
25373 "Cortex-A73"),
25374 ARM_CPU_OPT ("cortex-r4", ARM_ARCH_V7R, FPU_NONE, "Cortex-R4"),
25375 ARM_CPU_OPT ("cortex-r4f", ARM_ARCH_V7R, FPU_ARCH_VFP_V3D16,
25376 "Cortex-R4F"),
25377 ARM_CPU_OPT ("cortex-r5", ARM_ARCH_V7R_IDIV,
25378 FPU_NONE, "Cortex-R5"),
25379 ARM_CPU_OPT ("cortex-r7", ARM_ARCH_V7R_IDIV,
25380 FPU_ARCH_VFP_V3D16,
25381 "Cortex-R7"),
25382 ARM_CPU_OPT ("cortex-r8", ARM_ARCH_V7R_IDIV,
25383 FPU_ARCH_VFP_V3D16,
25384 "Cortex-R8"),
25385 ARM_CPU_OPT ("cortex-m7", ARM_ARCH_V7EM, FPU_NONE, "Cortex-M7"),
25386 ARM_CPU_OPT ("cortex-m4", ARM_ARCH_V7EM, FPU_NONE, "Cortex-M4"),
25387 ARM_CPU_OPT ("cortex-m3", ARM_ARCH_V7M, FPU_NONE, "Cortex-M3"),
25388 ARM_CPU_OPT ("cortex-m1", ARM_ARCH_V6SM, FPU_NONE, "Cortex-M1"),
25389 ARM_CPU_OPT ("cortex-m0", ARM_ARCH_V6SM, FPU_NONE, "Cortex-M0"),
25390 ARM_CPU_OPT ("cortex-m0plus", ARM_ARCH_V6SM, FPU_NONE, "Cortex-M0+"),
25391 ARM_CPU_OPT ("exynos-m1", ARM_ARCH_V8A_CRC, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
25392 "Samsung " \
25393 "Exynos M1"),
25394 ARM_CPU_OPT ("qdf24xx", ARM_ARCH_V8A_CRC, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
25395 "Qualcomm "
25396 "QDF24XX"),
25397
25398 /* ??? XSCALE is really an architecture. */
25399 ARM_CPU_OPT ("xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL),
25400 /* ??? iwmmxt is not a processor. */
25401 ARM_CPU_OPT ("iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP_V2, NULL),
25402 ARM_CPU_OPT ("iwmmxt2", ARM_ARCH_IWMMXT2,FPU_ARCH_VFP_V2, NULL),
25403 ARM_CPU_OPT ("i80200", ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL),
25404 /* Maverick */
25405 ARM_CPU_OPT ("ep9312", ARM_FEATURE_LOW (ARM_AEXT_V4T, ARM_CEXT_MAVERICK),
25406 FPU_ARCH_MAVERICK, "ARM920T"),
25407 /* Marvell processors. */
25408 ARM_CPU_OPT ("marvell-pj4", ARM_FEATURE_CORE (ARM_AEXT_V7A | ARM_EXT_MP
25409 | ARM_EXT_SEC,
25410 ARM_EXT2_V6T2_V8M),
25411 FPU_ARCH_VFP_V3D16, NULL),
25412 ARM_CPU_OPT ("marvell-whitney", ARM_FEATURE_CORE (ARM_AEXT_V7A | ARM_EXT_MP
25413 | ARM_EXT_SEC,
25414 ARM_EXT2_V6T2_V8M),
25415 FPU_ARCH_NEON_VFP_V4, NULL),
25416 /* APM X-Gene family. */
25417 ARM_CPU_OPT ("xgene1", ARM_ARCH_V8A, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
25418 "APM X-Gene 1"),
25419 ARM_CPU_OPT ("xgene2", ARM_ARCH_V8A_CRC, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
25420 "APM X-Gene 2"),
25421
25422 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, NULL }
25423 };
25424 #undef ARM_CPU_OPT
25425
25426 struct arm_arch_option_table
25427 {
25428 const char *name;
25429 size_t name_len;
25430 const arm_feature_set value;
25431 const arm_feature_set default_fpu;
25432 };
25433
25434 /* This list should, at a minimum, contain all the architecture names
25435 recognized by GCC. */
25436 #define ARM_ARCH_OPT(N, V, DF) { N, sizeof (N) - 1, V, DF }
25437 static const struct arm_arch_option_table arm_archs[] =
25438 {
25439 ARM_ARCH_OPT ("all", ARM_ANY, FPU_ARCH_FPA),
25440 ARM_ARCH_OPT ("armv1", ARM_ARCH_V1, FPU_ARCH_FPA),
25441 ARM_ARCH_OPT ("armv2", ARM_ARCH_V2, FPU_ARCH_FPA),
25442 ARM_ARCH_OPT ("armv2a", ARM_ARCH_V2S, FPU_ARCH_FPA),
25443 ARM_ARCH_OPT ("armv2s", ARM_ARCH_V2S, FPU_ARCH_FPA),
25444 ARM_ARCH_OPT ("armv3", ARM_ARCH_V3, FPU_ARCH_FPA),
25445 ARM_ARCH_OPT ("armv3m", ARM_ARCH_V3M, FPU_ARCH_FPA),
25446 ARM_ARCH_OPT ("armv4", ARM_ARCH_V4, FPU_ARCH_FPA),
25447 ARM_ARCH_OPT ("armv4xm", ARM_ARCH_V4xM, FPU_ARCH_FPA),
25448 ARM_ARCH_OPT ("armv4t", ARM_ARCH_V4T, FPU_ARCH_FPA),
25449 ARM_ARCH_OPT ("armv4txm", ARM_ARCH_V4TxM, FPU_ARCH_FPA),
25450 ARM_ARCH_OPT ("armv5", ARM_ARCH_V5, FPU_ARCH_VFP),
25451 ARM_ARCH_OPT ("armv5t", ARM_ARCH_V5T, FPU_ARCH_VFP),
25452 ARM_ARCH_OPT ("armv5txm", ARM_ARCH_V5TxM, FPU_ARCH_VFP),
25453 ARM_ARCH_OPT ("armv5te", ARM_ARCH_V5TE, FPU_ARCH_VFP),
25454 ARM_ARCH_OPT ("armv5texp", ARM_ARCH_V5TExP, FPU_ARCH_VFP),
25455 ARM_ARCH_OPT ("armv5tej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP),
25456 ARM_ARCH_OPT ("armv6", ARM_ARCH_V6, FPU_ARCH_VFP),
25457 ARM_ARCH_OPT ("armv6j", ARM_ARCH_V6, FPU_ARCH_VFP),
25458 ARM_ARCH_OPT ("armv6k", ARM_ARCH_V6K, FPU_ARCH_VFP),
25459 ARM_ARCH_OPT ("armv6z", ARM_ARCH_V6Z, FPU_ARCH_VFP),
25460 /* The official spelling of this variant is ARMv6KZ, the name "armv6zk" is
25461 kept to preserve existing behaviour. */
25462 ARM_ARCH_OPT ("armv6kz", ARM_ARCH_V6KZ, FPU_ARCH_VFP),
25463 ARM_ARCH_OPT ("armv6zk", ARM_ARCH_V6KZ, FPU_ARCH_VFP),
25464 ARM_ARCH_OPT ("armv6t2", ARM_ARCH_V6T2, FPU_ARCH_VFP),
25465 ARM_ARCH_OPT ("armv6kt2", ARM_ARCH_V6KT2, FPU_ARCH_VFP),
25466 ARM_ARCH_OPT ("armv6zt2", ARM_ARCH_V6ZT2, FPU_ARCH_VFP),
25467 /* The official spelling of this variant is ARMv6KZ, the name "armv6zkt2" is
25468 kept to preserve existing behaviour. */
25469 ARM_ARCH_OPT ("armv6kzt2", ARM_ARCH_V6KZT2, FPU_ARCH_VFP),
25470 ARM_ARCH_OPT ("armv6zkt2", ARM_ARCH_V6KZT2, FPU_ARCH_VFP),
25471 ARM_ARCH_OPT ("armv6-m", ARM_ARCH_V6M, FPU_ARCH_VFP),
25472 ARM_ARCH_OPT ("armv6s-m", ARM_ARCH_V6SM, FPU_ARCH_VFP),
25473 ARM_ARCH_OPT ("armv7", ARM_ARCH_V7, FPU_ARCH_VFP),
25474 /* The official spelling of the ARMv7 profile variants is the dashed form.
25475 Accept the non-dashed form for compatibility with old toolchains. */
25476 ARM_ARCH_OPT ("armv7a", ARM_ARCH_V7A, FPU_ARCH_VFP),
25477 ARM_ARCH_OPT ("armv7ve", ARM_ARCH_V7VE, FPU_ARCH_VFP),
25478 ARM_ARCH_OPT ("armv7r", ARM_ARCH_V7R, FPU_ARCH_VFP),
25479 ARM_ARCH_OPT ("armv7m", ARM_ARCH_V7M, FPU_ARCH_VFP),
25480 ARM_ARCH_OPT ("armv7-a", ARM_ARCH_V7A, FPU_ARCH_VFP),
25481 ARM_ARCH_OPT ("armv7-r", ARM_ARCH_V7R, FPU_ARCH_VFP),
25482 ARM_ARCH_OPT ("armv7-m", ARM_ARCH_V7M, FPU_ARCH_VFP),
25483 ARM_ARCH_OPT ("armv7e-m", ARM_ARCH_V7EM, FPU_ARCH_VFP),
25484 ARM_ARCH_OPT ("armv8-m.base", ARM_ARCH_V8M_BASE, FPU_ARCH_VFP),
25485 ARM_ARCH_OPT ("armv8-m.main", ARM_ARCH_V8M_MAIN, FPU_ARCH_VFP),
25486 ARM_ARCH_OPT ("armv8-a", ARM_ARCH_V8A, FPU_ARCH_VFP),
25487 ARM_ARCH_OPT ("armv8.1-a", ARM_ARCH_V8_1A, FPU_ARCH_VFP),
25488 ARM_ARCH_OPT ("armv8.2-a", ARM_ARCH_V8_2A, FPU_ARCH_VFP),
25489 ARM_ARCH_OPT ("xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP),
25490 ARM_ARCH_OPT ("iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP),
25491 ARM_ARCH_OPT ("iwmmxt2", ARM_ARCH_IWMMXT2,FPU_ARCH_VFP),
25492 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
25493 };
25494 #undef ARM_ARCH_OPT
25495
25496 /* ISA extensions in the co-processor and main instruction set space. */
25497 struct arm_option_extension_value_table
25498 {
25499 const char *name;
25500 size_t name_len;
25501 const arm_feature_set merge_value;
25502 const arm_feature_set clear_value;
25503 /* List of architectures for which an extension is available. ARM_ARCH_NONE
25504 indicates that an extension is available for all architectures while
25505 ARM_ANY marks an empty entry. */
25506 const arm_feature_set allowed_archs[2];
25507 };
25508
25509 /* The following table must be in alphabetical order with a NULL last entry.
25510 */
25511 #define ARM_EXT_OPT(N, M, C, AA) { N, sizeof (N) - 1, M, C, { AA, ARM_ANY } }
25512 #define ARM_EXT_OPT2(N, M, C, AA1, AA2) { N, sizeof (N) - 1, M, C, {AA1, AA2} }
25513 static const struct arm_option_extension_value_table arm_extensions[] =
25514 {
25515 ARM_EXT_OPT ("crc", ARCH_CRC_ARMV8, ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
25516 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
25517 ARM_EXT_OPT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
25518 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8),
25519 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
25520 ARM_EXT_OPT ("dsp", ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
25521 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
25522 ARM_FEATURE_CORE (ARM_EXT_V7M, ARM_EXT2_V8M)),
25523 ARM_EXT_OPT ("fp", FPU_ARCH_VFP_ARMV8, ARM_FEATURE_COPROC (FPU_VFP_ARMV8),
25524 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
25525 ARM_EXT_OPT ("fp16", ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
25526 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
25527 ARM_ARCH_V8_2A),
25528 ARM_EXT_OPT2 ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV),
25529 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV),
25530 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A),
25531 ARM_FEATURE_CORE_LOW (ARM_EXT_V7R)),
25532 ARM_EXT_OPT ("iwmmxt",ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT),
25533 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT), ARM_ARCH_NONE),
25534 ARM_EXT_OPT ("iwmmxt2", ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2),
25535 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2), ARM_ARCH_NONE),
25536 ARM_EXT_OPT ("maverick", ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK),
25537 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK), ARM_ARCH_NONE),
25538 ARM_EXT_OPT2 ("mp", ARM_FEATURE_CORE_LOW (ARM_EXT_MP),
25539 ARM_FEATURE_CORE_LOW (ARM_EXT_MP),
25540 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A),
25541 ARM_FEATURE_CORE_LOW (ARM_EXT_V7R)),
25542 ARM_EXT_OPT ("os", ARM_FEATURE_CORE_LOW (ARM_EXT_OS),
25543 ARM_FEATURE_CORE_LOW (ARM_EXT_OS),
25544 ARM_FEATURE_CORE_LOW (ARM_EXT_V6M)),
25545 ARM_EXT_OPT ("pan", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN),
25546 ARM_FEATURE (ARM_EXT_V8, ARM_EXT2_PAN, 0),
25547 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
25548 ARM_EXT_OPT ("ras", ARM_FEATURE_CORE_HIGH (ARM_EXT2_RAS),
25549 ARM_FEATURE (ARM_EXT_V8, ARM_EXT2_RAS, 0),
25550 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
25551 ARM_EXT_OPT ("rdma", FPU_ARCH_NEON_VFP_ARMV8_1,
25552 ARM_FEATURE_COPROC (FPU_NEON_ARMV8 | FPU_NEON_EXT_RDMA),
25553 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
25554 ARM_EXT_OPT2 ("sec", ARM_FEATURE_CORE_LOW (ARM_EXT_SEC),
25555 ARM_FEATURE_CORE_LOW (ARM_EXT_SEC),
25556 ARM_FEATURE_CORE_LOW (ARM_EXT_V6K),
25557 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A)),
25558 ARM_EXT_OPT ("simd", FPU_ARCH_NEON_VFP_ARMV8,
25559 ARM_FEATURE_COPROC (FPU_NEON_ARMV8),
25560 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
25561 ARM_EXT_OPT ("virt", ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT | ARM_EXT_ADIV
25562 | ARM_EXT_DIV),
25563 ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT),
25564 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A)),
25565 ARM_EXT_OPT ("xscale",ARM_FEATURE_COPROC (ARM_CEXT_XSCALE),
25566 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE), ARM_ARCH_NONE),
25567 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, { ARM_ARCH_NONE, ARM_ARCH_NONE } }
25568 };
25569 #undef ARM_EXT_OPT
25570
25571 /* ISA floating-point and Advanced SIMD extensions. */
25572 struct arm_option_fpu_value_table
25573 {
25574 const char *name;
25575 const arm_feature_set value;
25576 };
25577
25578 /* This list should, at a minimum, contain all the fpu names
25579 recognized by GCC. */
25580 static const struct arm_option_fpu_value_table arm_fpus[] =
25581 {
25582 {"softfpa", FPU_NONE},
25583 {"fpe", FPU_ARCH_FPE},
25584 {"fpe2", FPU_ARCH_FPE},
25585 {"fpe3", FPU_ARCH_FPA}, /* Third release supports LFM/SFM. */
25586 {"fpa", FPU_ARCH_FPA},
25587 {"fpa10", FPU_ARCH_FPA},
25588 {"fpa11", FPU_ARCH_FPA},
25589 {"arm7500fe", FPU_ARCH_FPA},
25590 {"softvfp", FPU_ARCH_VFP},
25591 {"softvfp+vfp", FPU_ARCH_VFP_V2},
25592 {"vfp", FPU_ARCH_VFP_V2},
25593 {"vfp9", FPU_ARCH_VFP_V2},
25594 {"vfp3", FPU_ARCH_VFP_V3}, /* For backwards compatbility. */
25595 {"vfp10", FPU_ARCH_VFP_V2},
25596 {"vfp10-r0", FPU_ARCH_VFP_V1},
25597 {"vfpxd", FPU_ARCH_VFP_V1xD},
25598 {"vfpv2", FPU_ARCH_VFP_V2},
25599 {"vfpv3", FPU_ARCH_VFP_V3},
25600 {"vfpv3-fp16", FPU_ARCH_VFP_V3_FP16},
25601 {"vfpv3-d16", FPU_ARCH_VFP_V3D16},
25602 {"vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16},
25603 {"vfpv3xd", FPU_ARCH_VFP_V3xD},
25604 {"vfpv3xd-fp16", FPU_ARCH_VFP_V3xD_FP16},
25605 {"arm1020t", FPU_ARCH_VFP_V1},
25606 {"arm1020e", FPU_ARCH_VFP_V2},
25607 {"arm1136jfs", FPU_ARCH_VFP_V2},
25608 {"arm1136jf-s", FPU_ARCH_VFP_V2},
25609 {"maverick", FPU_ARCH_MAVERICK},
25610 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1},
25611 {"neon-fp16", FPU_ARCH_NEON_FP16},
25612 {"vfpv4", FPU_ARCH_VFP_V4},
25613 {"vfpv4-d16", FPU_ARCH_VFP_V4D16},
25614 {"fpv4-sp-d16", FPU_ARCH_VFP_V4_SP_D16},
25615 {"fpv5-d16", FPU_ARCH_VFP_V5D16},
25616 {"fpv5-sp-d16", FPU_ARCH_VFP_V5_SP_D16},
25617 {"neon-vfpv4", FPU_ARCH_NEON_VFP_V4},
25618 {"fp-armv8", FPU_ARCH_VFP_ARMV8},
25619 {"neon-fp-armv8", FPU_ARCH_NEON_VFP_ARMV8},
25620 {"crypto-neon-fp-armv8",
25621 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8},
25622 {"neon-fp-armv8.1", FPU_ARCH_NEON_VFP_ARMV8_1},
25623 {"crypto-neon-fp-armv8.1",
25624 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1},
25625 {NULL, ARM_ARCH_NONE}
25626 };
25627
25628 struct arm_option_value_table
25629 {
25630 const char *name;
25631 long value;
25632 };
25633
25634 static const struct arm_option_value_table arm_float_abis[] =
25635 {
25636 {"hard", ARM_FLOAT_ABI_HARD},
25637 {"softfp", ARM_FLOAT_ABI_SOFTFP},
25638 {"soft", ARM_FLOAT_ABI_SOFT},
25639 {NULL, 0}
25640 };
25641
25642 #ifdef OBJ_ELF
25643 /* We only know how to output GNU and ver 4/5 (AAELF) formats. */
25644 static const struct arm_option_value_table arm_eabis[] =
25645 {
25646 {"gnu", EF_ARM_EABI_UNKNOWN},
25647 {"4", EF_ARM_EABI_VER4},
25648 {"5", EF_ARM_EABI_VER5},
25649 {NULL, 0}
25650 };
25651 #endif
25652
25653 struct arm_long_option_table
25654 {
25655 const char * option; /* Substring to match. */
25656 const char * help; /* Help information. */
25657 int (* func) (const char * subopt); /* Function to decode sub-option. */
25658 const char * deprecated; /* If non-null, print this message. */
25659 };
25660
25661 static bfd_boolean
25662 arm_parse_extension (const char *str, const arm_feature_set **opt_p)
25663 {
25664 arm_feature_set *ext_set = XNEW (arm_feature_set);
25665
25666 /* We insist on extensions being specified in alphabetical order, and with
25667 extensions being added before being removed. We achieve this by having
25668 the global ARM_EXTENSIONS table in alphabetical order, and using the
25669 ADDING_VALUE variable to indicate whether we are adding an extension (1)
25670 or removing it (0) and only allowing it to change in the order
25671 -1 -> 1 -> 0. */
25672 const struct arm_option_extension_value_table * opt = NULL;
25673 const arm_feature_set arm_any = ARM_ANY;
25674 int adding_value = -1;
25675
25676 /* Copy the feature set, so that we can modify it. */
25677 *ext_set = **opt_p;
25678 *opt_p = ext_set;
25679
25680 while (str != NULL && *str != 0)
25681 {
25682 const char *ext;
25683 size_t len;
25684
25685 if (*str != '+')
25686 {
25687 as_bad (_("invalid architectural extension"));
25688 return FALSE;
25689 }
25690
25691 str++;
25692 ext = strchr (str, '+');
25693
25694 if (ext != NULL)
25695 len = ext - str;
25696 else
25697 len = strlen (str);
25698
25699 if (len >= 2 && strncmp (str, "no", 2) == 0)
25700 {
25701 if (adding_value != 0)
25702 {
25703 adding_value = 0;
25704 opt = arm_extensions;
25705 }
25706
25707 len -= 2;
25708 str += 2;
25709 }
25710 else if (len > 0)
25711 {
25712 if (adding_value == -1)
25713 {
25714 adding_value = 1;
25715 opt = arm_extensions;
25716 }
25717 else if (adding_value != 1)
25718 {
25719 as_bad (_("must specify extensions to add before specifying "
25720 "those to remove"));
25721 return FALSE;
25722 }
25723 }
25724
25725 if (len == 0)
25726 {
25727 as_bad (_("missing architectural extension"));
25728 return FALSE;
25729 }
25730
25731 gas_assert (adding_value != -1);
25732 gas_assert (opt != NULL);
25733
25734 /* Scan over the options table trying to find an exact match. */
25735 for (; opt->name != NULL; opt++)
25736 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
25737 {
25738 int i, nb_allowed_archs =
25739 sizeof (opt->allowed_archs) / sizeof (opt->allowed_archs[0]);
25740 /* Check we can apply the extension to this architecture. */
25741 for (i = 0; i < nb_allowed_archs; i++)
25742 {
25743 /* Empty entry. */
25744 if (ARM_FEATURE_EQUAL (opt->allowed_archs[i], arm_any))
25745 continue;
25746 if (ARM_FSET_CPU_SUBSET (opt->allowed_archs[i], *ext_set))
25747 break;
25748 }
25749 if (i == nb_allowed_archs)
25750 {
25751 as_bad (_("extension does not apply to the base architecture"));
25752 return FALSE;
25753 }
25754
25755 /* Add or remove the extension. */
25756 if (adding_value)
25757 ARM_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->merge_value);
25758 else
25759 ARM_CLEAR_FEATURE (*ext_set, *ext_set, opt->clear_value);
25760
25761 break;
25762 }
25763
25764 if (opt->name == NULL)
25765 {
25766 /* Did we fail to find an extension because it wasn't specified in
25767 alphabetical order, or because it does not exist? */
25768
25769 for (opt = arm_extensions; opt->name != NULL; opt++)
25770 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
25771 break;
25772
25773 if (opt->name == NULL)
25774 as_bad (_("unknown architectural extension `%s'"), str);
25775 else
25776 as_bad (_("architectural extensions must be specified in "
25777 "alphabetical order"));
25778
25779 return FALSE;
25780 }
25781 else
25782 {
25783 /* We should skip the extension we've just matched the next time
25784 round. */
25785 opt++;
25786 }
25787
25788 str = ext;
25789 };
25790
25791 return TRUE;
25792 }
25793
25794 static bfd_boolean
25795 arm_parse_cpu (const char *str)
25796 {
25797 const struct arm_cpu_option_table *opt;
25798 const char *ext = strchr (str, '+');
25799 size_t len;
25800
25801 if (ext != NULL)
25802 len = ext - str;
25803 else
25804 len = strlen (str);
25805
25806 if (len == 0)
25807 {
25808 as_bad (_("missing cpu name `%s'"), str);
25809 return FALSE;
25810 }
25811
25812 for (opt = arm_cpus; opt->name != NULL; opt++)
25813 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
25814 {
25815 mcpu_cpu_opt = &opt->value;
25816 mcpu_fpu_opt = &opt->default_fpu;
25817 if (opt->canonical_name)
25818 {
25819 gas_assert (sizeof selected_cpu_name > strlen (opt->canonical_name));
25820 strcpy (selected_cpu_name, opt->canonical_name);
25821 }
25822 else
25823 {
25824 size_t i;
25825
25826 if (len >= sizeof selected_cpu_name)
25827 len = (sizeof selected_cpu_name) - 1;
25828
25829 for (i = 0; i < len; i++)
25830 selected_cpu_name[i] = TOUPPER (opt->name[i]);
25831 selected_cpu_name[i] = 0;
25832 }
25833
25834 if (ext != NULL)
25835 return arm_parse_extension (ext, &mcpu_cpu_opt);
25836
25837 return TRUE;
25838 }
25839
25840 as_bad (_("unknown cpu `%s'"), str);
25841 return FALSE;
25842 }
25843
25844 static bfd_boolean
25845 arm_parse_arch (const char *str)
25846 {
25847 const struct arm_arch_option_table *opt;
25848 const char *ext = strchr (str, '+');
25849 size_t len;
25850
25851 if (ext != NULL)
25852 len = ext - str;
25853 else
25854 len = strlen (str);
25855
25856 if (len == 0)
25857 {
25858 as_bad (_("missing architecture name `%s'"), str);
25859 return FALSE;
25860 }
25861
25862 for (opt = arm_archs; opt->name != NULL; opt++)
25863 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
25864 {
25865 march_cpu_opt = &opt->value;
25866 march_fpu_opt = &opt->default_fpu;
25867 strcpy (selected_cpu_name, opt->name);
25868
25869 if (ext != NULL)
25870 return arm_parse_extension (ext, &march_cpu_opt);
25871
25872 return TRUE;
25873 }
25874
25875 as_bad (_("unknown architecture `%s'\n"), str);
25876 return FALSE;
25877 }
25878
25879 static bfd_boolean
25880 arm_parse_fpu (const char * str)
25881 {
25882 const struct arm_option_fpu_value_table * opt;
25883
25884 for (opt = arm_fpus; opt->name != NULL; opt++)
25885 if (streq (opt->name, str))
25886 {
25887 mfpu_opt = &opt->value;
25888 return TRUE;
25889 }
25890
25891 as_bad (_("unknown floating point format `%s'\n"), str);
25892 return FALSE;
25893 }
25894
25895 static bfd_boolean
25896 arm_parse_float_abi (const char * str)
25897 {
25898 const struct arm_option_value_table * opt;
25899
25900 for (opt = arm_float_abis; opt->name != NULL; opt++)
25901 if (streq (opt->name, str))
25902 {
25903 mfloat_abi_opt = opt->value;
25904 return TRUE;
25905 }
25906
25907 as_bad (_("unknown floating point abi `%s'\n"), str);
25908 return FALSE;
25909 }
25910
25911 #ifdef OBJ_ELF
25912 static bfd_boolean
25913 arm_parse_eabi (const char * str)
25914 {
25915 const struct arm_option_value_table *opt;
25916
25917 for (opt = arm_eabis; opt->name != NULL; opt++)
25918 if (streq (opt->name, str))
25919 {
25920 meabi_flags = opt->value;
25921 return TRUE;
25922 }
25923 as_bad (_("unknown EABI `%s'\n"), str);
25924 return FALSE;
25925 }
25926 #endif
25927
25928 static bfd_boolean
25929 arm_parse_it_mode (const char * str)
25930 {
25931 bfd_boolean ret = TRUE;
25932
25933 if (streq ("arm", str))
25934 implicit_it_mode = IMPLICIT_IT_MODE_ARM;
25935 else if (streq ("thumb", str))
25936 implicit_it_mode = IMPLICIT_IT_MODE_THUMB;
25937 else if (streq ("always", str))
25938 implicit_it_mode = IMPLICIT_IT_MODE_ALWAYS;
25939 else if (streq ("never", str))
25940 implicit_it_mode = IMPLICIT_IT_MODE_NEVER;
25941 else
25942 {
25943 as_bad (_("unknown implicit IT mode `%s', should be "\
25944 "arm, thumb, always, or never."), str);
25945 ret = FALSE;
25946 }
25947
25948 return ret;
25949 }
25950
25951 static bfd_boolean
25952 arm_ccs_mode (const char * unused ATTRIBUTE_UNUSED)
25953 {
25954 codecomposer_syntax = TRUE;
25955 arm_comment_chars[0] = ';';
25956 arm_line_separator_chars[0] = 0;
25957 return TRUE;
25958 }
25959
25960 struct arm_long_option_table arm_long_opts[] =
25961 {
25962 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
25963 arm_parse_cpu, NULL},
25964 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
25965 arm_parse_arch, NULL},
25966 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"),
25967 arm_parse_fpu, NULL},
25968 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"),
25969 arm_parse_float_abi, NULL},
25970 #ifdef OBJ_ELF
25971 {"meabi=", N_("<ver>\t\t assemble for eabi version <ver>"),
25972 arm_parse_eabi, NULL},
25973 #endif
25974 {"mimplicit-it=", N_("<mode>\t controls implicit insertion of IT instructions"),
25975 arm_parse_it_mode, NULL},
25976 {"mccs", N_("\t\t\t TI CodeComposer Studio syntax compatibility mode"),
25977 arm_ccs_mode, NULL},
25978 {NULL, NULL, 0, NULL}
25979 };
25980
25981 int
25982 md_parse_option (int c, const char * arg)
25983 {
25984 struct arm_option_table *opt;
25985 const struct arm_legacy_option_table *fopt;
25986 struct arm_long_option_table *lopt;
25987
25988 switch (c)
25989 {
25990 #ifdef OPTION_EB
25991 case OPTION_EB:
25992 target_big_endian = 1;
25993 break;
25994 #endif
25995
25996 #ifdef OPTION_EL
25997 case OPTION_EL:
25998 target_big_endian = 0;
25999 break;
26000 #endif
26001
26002 case OPTION_FIX_V4BX:
26003 fix_v4bx = TRUE;
26004 break;
26005
26006 case 'a':
26007 /* Listing option. Just ignore these, we don't support additional
26008 ones. */
26009 return 0;
26010
26011 default:
26012 for (opt = arm_opts; opt->option != NULL; opt++)
26013 {
26014 if (c == opt->option[0]
26015 && ((arg == NULL && opt->option[1] == 0)
26016 || streq (arg, opt->option + 1)))
26017 {
26018 /* If the option is deprecated, tell the user. */
26019 if (warn_on_deprecated && opt->deprecated != NULL)
26020 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
26021 arg ? arg : "", _(opt->deprecated));
26022
26023 if (opt->var != NULL)
26024 *opt->var = opt->value;
26025
26026 return 1;
26027 }
26028 }
26029
26030 for (fopt = arm_legacy_opts; fopt->option != NULL; fopt++)
26031 {
26032 if (c == fopt->option[0]
26033 && ((arg == NULL && fopt->option[1] == 0)
26034 || streq (arg, fopt->option + 1)))
26035 {
26036 /* If the option is deprecated, tell the user. */
26037 if (warn_on_deprecated && fopt->deprecated != NULL)
26038 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
26039 arg ? arg : "", _(fopt->deprecated));
26040
26041 if (fopt->var != NULL)
26042 *fopt->var = &fopt->value;
26043
26044 return 1;
26045 }
26046 }
26047
26048 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
26049 {
26050 /* These options are expected to have an argument. */
26051 if (c == lopt->option[0]
26052 && arg != NULL
26053 && strncmp (arg, lopt->option + 1,
26054 strlen (lopt->option + 1)) == 0)
26055 {
26056 /* If the option is deprecated, tell the user. */
26057 if (warn_on_deprecated && lopt->deprecated != NULL)
26058 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
26059 _(lopt->deprecated));
26060
26061 /* Call the sup-option parser. */
26062 return lopt->func (arg + strlen (lopt->option) - 1);
26063 }
26064 }
26065
26066 return 0;
26067 }
26068
26069 return 1;
26070 }
26071
26072 void
26073 md_show_usage (FILE * fp)
26074 {
26075 struct arm_option_table *opt;
26076 struct arm_long_option_table *lopt;
26077
26078 fprintf (fp, _(" ARM-specific assembler options:\n"));
26079
26080 for (opt = arm_opts; opt->option != NULL; opt++)
26081 if (opt->help != NULL)
26082 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
26083
26084 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
26085 if (lopt->help != NULL)
26086 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
26087
26088 #ifdef OPTION_EB
26089 fprintf (fp, _("\
26090 -EB assemble code for a big-endian cpu\n"));
26091 #endif
26092
26093 #ifdef OPTION_EL
26094 fprintf (fp, _("\
26095 -EL assemble code for a little-endian cpu\n"));
26096 #endif
26097
26098 fprintf (fp, _("\
26099 --fix-v4bx Allow BX in ARMv4 code\n"));
26100 }
26101
26102
26103 #ifdef OBJ_ELF
26104 typedef struct
26105 {
26106 int val;
26107 arm_feature_set flags;
26108 } cpu_arch_ver_table;
26109
26110 /* Mapping from CPU features to EABI CPU arch values. As a general rule, table
26111 must be sorted least features first but some reordering is needed, eg. for
26112 Thumb-2 instructions to be detected as coming from ARMv6T2. */
26113 static const cpu_arch_ver_table cpu_arch_ver[] =
26114 {
26115 {1, ARM_ARCH_V4},
26116 {2, ARM_ARCH_V4T},
26117 {3, ARM_ARCH_V5},
26118 {3, ARM_ARCH_V5T},
26119 {4, ARM_ARCH_V5TE},
26120 {5, ARM_ARCH_V5TEJ},
26121 {6, ARM_ARCH_V6},
26122 {9, ARM_ARCH_V6K},
26123 {7, ARM_ARCH_V6Z},
26124 {11, ARM_ARCH_V6M},
26125 {12, ARM_ARCH_V6SM},
26126 {8, ARM_ARCH_V6T2},
26127 {10, ARM_ARCH_V7VE},
26128 {10, ARM_ARCH_V7R},
26129 {10, ARM_ARCH_V7M},
26130 {14, ARM_ARCH_V8A},
26131 {16, ARM_ARCH_V8M_BASE},
26132 {17, ARM_ARCH_V8M_MAIN},
26133 {0, ARM_ARCH_NONE}
26134 };
26135
26136 /* Set an attribute if it has not already been set by the user. */
26137 static void
26138 aeabi_set_attribute_int (int tag, int value)
26139 {
26140 if (tag < 1
26141 || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
26142 || !attributes_set_explicitly[tag])
26143 bfd_elf_add_proc_attr_int (stdoutput, tag, value);
26144 }
26145
26146 static void
26147 aeabi_set_attribute_string (int tag, const char *value)
26148 {
26149 if (tag < 1
26150 || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
26151 || !attributes_set_explicitly[tag])
26152 bfd_elf_add_proc_attr_string (stdoutput, tag, value);
26153 }
26154
26155 /* Set the public EABI object attributes. */
26156 void
26157 aeabi_set_public_attributes (void)
26158 {
26159 int arch;
26160 char profile;
26161 int virt_sec = 0;
26162 int fp16_optional = 0;
26163 arm_feature_set arm_arch = ARM_ARCH_NONE;
26164 arm_feature_set flags;
26165 arm_feature_set tmp;
26166 arm_feature_set arm_arch_v8m_base = ARM_ARCH_V8M_BASE;
26167 const cpu_arch_ver_table *p;
26168
26169 /* Choose the architecture based on the capabilities of the requested cpu
26170 (if any) and/or the instructions actually used. */
26171 ARM_MERGE_FEATURE_SETS (flags, arm_arch_used, thumb_arch_used);
26172 ARM_MERGE_FEATURE_SETS (flags, flags, *mfpu_opt);
26173 ARM_MERGE_FEATURE_SETS (flags, flags, selected_cpu);
26174
26175 if (ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_any))
26176 ARM_MERGE_FEATURE_SETS (flags, flags, arm_ext_v1);
26177
26178 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_any))
26179 ARM_MERGE_FEATURE_SETS (flags, flags, arm_ext_v4t);
26180
26181 selected_cpu = flags;
26182
26183 /* Allow the user to override the reported architecture. */
26184 if (object_arch)
26185 {
26186 ARM_CLEAR_FEATURE (flags, flags, arm_arch_any);
26187 ARM_MERGE_FEATURE_SETS (flags, flags, *object_arch);
26188 }
26189
26190 /* We need to make sure that the attributes do not identify us as v6S-M
26191 when the only v6S-M feature in use is the Operating System Extensions. */
26192 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_os))
26193 if (!ARM_CPU_HAS_FEATURE (flags, arm_arch_v6m_only))
26194 ARM_CLEAR_FEATURE (flags, flags, arm_ext_os);
26195
26196 tmp = flags;
26197 arch = 0;
26198 for (p = cpu_arch_ver; p->val; p++)
26199 {
26200 if (ARM_CPU_HAS_FEATURE (tmp, p->flags))
26201 {
26202 arch = p->val;
26203 arm_arch = p->flags;
26204 ARM_CLEAR_FEATURE (tmp, tmp, p->flags);
26205 }
26206 }
26207
26208 /* The table lookup above finds the last architecture to contribute
26209 a new feature. Unfortunately, Tag13 is a subset of the union of
26210 v6T2 and v7-M, so it is never seen as contributing a new feature.
26211 We can not search for the last entry which is entirely used,
26212 because if no CPU is specified we build up only those flags
26213 actually used. Perhaps we should separate out the specified
26214 and implicit cases. Avoid taking this path for -march=all by
26215 checking for contradictory v7-A / v7-M features. */
26216 if (arch == TAG_CPU_ARCH_V7
26217 && !ARM_CPU_HAS_FEATURE (flags, arm_ext_v7a)
26218 && ARM_CPU_HAS_FEATURE (flags, arm_ext_v7m)
26219 && ARM_CPU_HAS_FEATURE (flags, arm_ext_v6_dsp))
26220 {
26221 arch = TAG_CPU_ARCH_V7E_M;
26222 arm_arch = (arm_feature_set) ARM_ARCH_V7EM;
26223 }
26224
26225 ARM_CLEAR_FEATURE (tmp, flags, arm_arch_v8m_base);
26226 if (arch == TAG_CPU_ARCH_V8M_BASE && ARM_CPU_HAS_FEATURE (tmp, arm_arch_any))
26227 {
26228 arch = TAG_CPU_ARCH_V8M_MAIN;
26229 arm_arch = (arm_feature_set) ARM_ARCH_V8M_MAIN;
26230 }
26231
26232 /* In cpu_arch_ver ARMv8-A is before ARMv8-M for atomics to be detected as
26233 coming from ARMv8-A. However, since ARMv8-A has more instructions than
26234 ARMv8-M, -march=all must be detected as ARMv8-A. */
26235 if (arch == TAG_CPU_ARCH_V8M_MAIN
26236 && ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any))
26237 {
26238 arch = TAG_CPU_ARCH_V8;
26239 arm_arch = (arm_feature_set) ARM_ARCH_V8A;
26240 }
26241
26242 /* Tag_CPU_name. */
26243 if (selected_cpu_name[0])
26244 {
26245 char *q;
26246
26247 q = selected_cpu_name;
26248 if (strncmp (q, "armv", 4) == 0)
26249 {
26250 int i;
26251
26252 q += 4;
26253 for (i = 0; q[i]; i++)
26254 q[i] = TOUPPER (q[i]);
26255 }
26256 aeabi_set_attribute_string (Tag_CPU_name, q);
26257 }
26258
26259 /* Tag_CPU_arch. */
26260 aeabi_set_attribute_int (Tag_CPU_arch, arch);
26261
26262 /* Tag_CPU_arch_profile. */
26263 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7a)
26264 || ARM_CPU_HAS_FEATURE (flags, arm_ext_v8)
26265 || (ARM_CPU_HAS_FEATURE (flags, arm_ext_atomics)
26266 && !ARM_CPU_HAS_FEATURE (flags, arm_ext_v8m_m_only)))
26267 profile = 'A';
26268 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7r))
26269 profile = 'R';
26270 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_m))
26271 profile = 'M';
26272 else
26273 profile = '\0';
26274
26275 if (profile != '\0')
26276 aeabi_set_attribute_int (Tag_CPU_arch_profile, profile);
26277
26278 /* Tag_DSP_extension. */
26279 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_dsp))
26280 {
26281 arm_feature_set ext;
26282
26283 /* DSP instructions not in architecture. */
26284 ARM_CLEAR_FEATURE (ext, flags, arm_arch);
26285 if (ARM_CPU_HAS_FEATURE (ext, arm_ext_dsp))
26286 aeabi_set_attribute_int (Tag_DSP_extension, 1);
26287 }
26288
26289 /* Tag_ARM_ISA_use. */
26290 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v1)
26291 || arch == 0)
26292 aeabi_set_attribute_int (Tag_ARM_ISA_use, 1);
26293
26294 /* Tag_THUMB_ISA_use. */
26295 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v4t)
26296 || arch == 0)
26297 {
26298 int thumb_isa_use;
26299
26300 if (!ARM_CPU_HAS_FEATURE (flags, arm_ext_v8)
26301 && ARM_CPU_HAS_FEATURE (flags, arm_ext_v8m_m_only))
26302 thumb_isa_use = 3;
26303 else if (ARM_CPU_HAS_FEATURE (flags, arm_arch_t2))
26304 thumb_isa_use = 2;
26305 else
26306 thumb_isa_use = 1;
26307 aeabi_set_attribute_int (Tag_THUMB_ISA_use, thumb_isa_use);
26308 }
26309
26310 /* Tag_VFP_arch. */
26311 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_armv8xd))
26312 aeabi_set_attribute_int (Tag_VFP_arch,
26313 ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32)
26314 ? 7 : 8);
26315 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_fma))
26316 aeabi_set_attribute_int (Tag_VFP_arch,
26317 ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32)
26318 ? 5 : 6);
26319 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32))
26320 {
26321 fp16_optional = 1;
26322 aeabi_set_attribute_int (Tag_VFP_arch, 3);
26323 }
26324 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v3xd))
26325 {
26326 aeabi_set_attribute_int (Tag_VFP_arch, 4);
26327 fp16_optional = 1;
26328 }
26329 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v2))
26330 aeabi_set_attribute_int (Tag_VFP_arch, 2);
26331 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1)
26332 || ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd))
26333 aeabi_set_attribute_int (Tag_VFP_arch, 1);
26334
26335 /* Tag_ABI_HardFP_use. */
26336 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd)
26337 && !ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1))
26338 aeabi_set_attribute_int (Tag_ABI_HardFP_use, 1);
26339
26340 /* Tag_WMMX_arch. */
26341 if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt2))
26342 aeabi_set_attribute_int (Tag_WMMX_arch, 2);
26343 else if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt))
26344 aeabi_set_attribute_int (Tag_WMMX_arch, 1);
26345
26346 /* Tag_Advanced_SIMD_arch (formerly Tag_NEON_arch). */
26347 if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_v8_1))
26348 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 4);
26349 else if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_armv8))
26350 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 3);
26351 else if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_v1))
26352 {
26353 if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_fma))
26354 {
26355 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 2);
26356 }
26357 else
26358 {
26359 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 1);
26360 fp16_optional = 1;
26361 }
26362 }
26363
26364 /* Tag_VFP_HP_extension (formerly Tag_NEON_FP16_arch). */
26365 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_fp16) && fp16_optional)
26366 aeabi_set_attribute_int (Tag_VFP_HP_extension, 1);
26367
26368 /* Tag_DIV_use.
26369
26370 We set Tag_DIV_use to two when integer divide instructions have been used
26371 in ARM state, or when Thumb integer divide instructions have been used,
26372 but we have no architecture profile set, nor have we any ARM instructions.
26373
26374 For ARMv8-A and ARMv8-M we set the tag to 0 as integer divide is implied
26375 by the base architecture.
26376
26377 For new architectures we will have to check these tests. */
26378 gas_assert (arch <= TAG_CPU_ARCH_V8
26379 || (arch >= TAG_CPU_ARCH_V8M_BASE
26380 && arch <= TAG_CPU_ARCH_V8M_MAIN));
26381 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v8)
26382 || ARM_CPU_HAS_FEATURE (flags, arm_ext_v8m))
26383 aeabi_set_attribute_int (Tag_DIV_use, 0);
26384 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_adiv)
26385 || (profile == '\0'
26386 && ARM_CPU_HAS_FEATURE (flags, arm_ext_div)
26387 && !ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_any)))
26388 aeabi_set_attribute_int (Tag_DIV_use, 2);
26389
26390 /* Tag_MP_extension_use. */
26391 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_mp))
26392 aeabi_set_attribute_int (Tag_MPextension_use, 1);
26393
26394 /* Tag Virtualization_use. */
26395 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_sec))
26396 virt_sec |= 1;
26397 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_virt))
26398 virt_sec |= 2;
26399 if (virt_sec != 0)
26400 aeabi_set_attribute_int (Tag_Virtualization_use, virt_sec);
26401 }
26402
26403 /* Add the default contents for the .ARM.attributes section. */
26404 void
26405 arm_md_end (void)
26406 {
26407 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
26408 return;
26409
26410 aeabi_set_public_attributes ();
26411 }
26412 #endif /* OBJ_ELF */
26413
26414
26415 /* Parse a .cpu directive. */
26416
26417 static void
26418 s_arm_cpu (int ignored ATTRIBUTE_UNUSED)
26419 {
26420 const struct arm_cpu_option_table *opt;
26421 char *name;
26422 char saved_char;
26423
26424 name = input_line_pointer;
26425 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
26426 input_line_pointer++;
26427 saved_char = *input_line_pointer;
26428 *input_line_pointer = 0;
26429
26430 /* Skip the first "all" entry. */
26431 for (opt = arm_cpus + 1; opt->name != NULL; opt++)
26432 if (streq (opt->name, name))
26433 {
26434 mcpu_cpu_opt = &opt->value;
26435 selected_cpu = opt->value;
26436 if (opt->canonical_name)
26437 strcpy (selected_cpu_name, opt->canonical_name);
26438 else
26439 {
26440 int i;
26441 for (i = 0; opt->name[i]; i++)
26442 selected_cpu_name[i] = TOUPPER (opt->name[i]);
26443
26444 selected_cpu_name[i] = 0;
26445 }
26446 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
26447 *input_line_pointer = saved_char;
26448 demand_empty_rest_of_line ();
26449 return;
26450 }
26451 as_bad (_("unknown cpu `%s'"), name);
26452 *input_line_pointer = saved_char;
26453 ignore_rest_of_line ();
26454 }
26455
26456
26457 /* Parse a .arch directive. */
26458
26459 static void
26460 s_arm_arch (int ignored ATTRIBUTE_UNUSED)
26461 {
26462 const struct arm_arch_option_table *opt;
26463 char saved_char;
26464 char *name;
26465
26466 name = input_line_pointer;
26467 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
26468 input_line_pointer++;
26469 saved_char = *input_line_pointer;
26470 *input_line_pointer = 0;
26471
26472 /* Skip the first "all" entry. */
26473 for (opt = arm_archs + 1; opt->name != NULL; opt++)
26474 if (streq (opt->name, name))
26475 {
26476 mcpu_cpu_opt = &opt->value;
26477 selected_cpu = opt->value;
26478 strcpy (selected_cpu_name, opt->name);
26479 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
26480 *input_line_pointer = saved_char;
26481 demand_empty_rest_of_line ();
26482 return;
26483 }
26484
26485 as_bad (_("unknown architecture `%s'\n"), name);
26486 *input_line_pointer = saved_char;
26487 ignore_rest_of_line ();
26488 }
26489
26490
26491 /* Parse a .object_arch directive. */
26492
26493 static void
26494 s_arm_object_arch (int ignored ATTRIBUTE_UNUSED)
26495 {
26496 const struct arm_arch_option_table *opt;
26497 char saved_char;
26498 char *name;
26499
26500 name = input_line_pointer;
26501 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
26502 input_line_pointer++;
26503 saved_char = *input_line_pointer;
26504 *input_line_pointer = 0;
26505
26506 /* Skip the first "all" entry. */
26507 for (opt = arm_archs + 1; opt->name != NULL; opt++)
26508 if (streq (opt->name, name))
26509 {
26510 object_arch = &opt->value;
26511 *input_line_pointer = saved_char;
26512 demand_empty_rest_of_line ();
26513 return;
26514 }
26515
26516 as_bad (_("unknown architecture `%s'\n"), name);
26517 *input_line_pointer = saved_char;
26518 ignore_rest_of_line ();
26519 }
26520
26521 /* Parse a .arch_extension directive. */
26522
26523 static void
26524 s_arm_arch_extension (int ignored ATTRIBUTE_UNUSED)
26525 {
26526 const struct arm_option_extension_value_table *opt;
26527 const arm_feature_set arm_any = ARM_ANY;
26528 char saved_char;
26529 char *name;
26530 int adding_value = 1;
26531
26532 name = input_line_pointer;
26533 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
26534 input_line_pointer++;
26535 saved_char = *input_line_pointer;
26536 *input_line_pointer = 0;
26537
26538 if (strlen (name) >= 2
26539 && strncmp (name, "no", 2) == 0)
26540 {
26541 adding_value = 0;
26542 name += 2;
26543 }
26544
26545 for (opt = arm_extensions; opt->name != NULL; opt++)
26546 if (streq (opt->name, name))
26547 {
26548 int i, nb_allowed_archs =
26549 sizeof (opt->allowed_archs) / sizeof (opt->allowed_archs[i]);
26550 for (i = 0; i < nb_allowed_archs; i++)
26551 {
26552 /* Empty entry. */
26553 if (ARM_FEATURE_EQUAL (opt->allowed_archs[i], arm_any))
26554 continue;
26555 if (ARM_FSET_CPU_SUBSET (opt->allowed_archs[i], *mcpu_cpu_opt))
26556 break;
26557 }
26558
26559 if (i == nb_allowed_archs)
26560 {
26561 as_bad (_("architectural extension `%s' is not allowed for the "
26562 "current base architecture"), name);
26563 break;
26564 }
26565
26566 if (adding_value)
26567 ARM_MERGE_FEATURE_SETS (selected_cpu, selected_cpu,
26568 opt->merge_value);
26569 else
26570 ARM_CLEAR_FEATURE (selected_cpu, selected_cpu, opt->clear_value);
26571
26572 mcpu_cpu_opt = &selected_cpu;
26573 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
26574 *input_line_pointer = saved_char;
26575 demand_empty_rest_of_line ();
26576 return;
26577 }
26578
26579 if (opt->name == NULL)
26580 as_bad (_("unknown architecture extension `%s'\n"), name);
26581
26582 *input_line_pointer = saved_char;
26583 ignore_rest_of_line ();
26584 }
26585
26586 /* Parse a .fpu directive. */
26587
26588 static void
26589 s_arm_fpu (int ignored ATTRIBUTE_UNUSED)
26590 {
26591 const struct arm_option_fpu_value_table *opt;
26592 char saved_char;
26593 char *name;
26594
26595 name = input_line_pointer;
26596 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
26597 input_line_pointer++;
26598 saved_char = *input_line_pointer;
26599 *input_line_pointer = 0;
26600
26601 for (opt = arm_fpus; opt->name != NULL; opt++)
26602 if (streq (opt->name, name))
26603 {
26604 mfpu_opt = &opt->value;
26605 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
26606 *input_line_pointer = saved_char;
26607 demand_empty_rest_of_line ();
26608 return;
26609 }
26610
26611 as_bad (_("unknown floating point format `%s'\n"), name);
26612 *input_line_pointer = saved_char;
26613 ignore_rest_of_line ();
26614 }
26615
26616 /* Copy symbol information. */
26617
26618 void
26619 arm_copy_symbol_attributes (symbolS *dest, symbolS *src)
26620 {
26621 ARM_GET_FLAG (dest) = ARM_GET_FLAG (src);
26622 }
26623
26624 #ifdef OBJ_ELF
26625 /* Given a symbolic attribute NAME, return the proper integer value.
26626 Returns -1 if the attribute is not known. */
26627
26628 int
26629 arm_convert_symbolic_attribute (const char *name)
26630 {
26631 static const struct
26632 {
26633 const char * name;
26634 const int tag;
26635 }
26636 attribute_table[] =
26637 {
26638 /* When you modify this table you should
26639 also modify the list in doc/c-arm.texi. */
26640 #define T(tag) {#tag, tag}
26641 T (Tag_CPU_raw_name),
26642 T (Tag_CPU_name),
26643 T (Tag_CPU_arch),
26644 T (Tag_CPU_arch_profile),
26645 T (Tag_ARM_ISA_use),
26646 T (Tag_THUMB_ISA_use),
26647 T (Tag_FP_arch),
26648 T (Tag_VFP_arch),
26649 T (Tag_WMMX_arch),
26650 T (Tag_Advanced_SIMD_arch),
26651 T (Tag_PCS_config),
26652 T (Tag_ABI_PCS_R9_use),
26653 T (Tag_ABI_PCS_RW_data),
26654 T (Tag_ABI_PCS_RO_data),
26655 T (Tag_ABI_PCS_GOT_use),
26656 T (Tag_ABI_PCS_wchar_t),
26657 T (Tag_ABI_FP_rounding),
26658 T (Tag_ABI_FP_denormal),
26659 T (Tag_ABI_FP_exceptions),
26660 T (Tag_ABI_FP_user_exceptions),
26661 T (Tag_ABI_FP_number_model),
26662 T (Tag_ABI_align_needed),
26663 T (Tag_ABI_align8_needed),
26664 T (Tag_ABI_align_preserved),
26665 T (Tag_ABI_align8_preserved),
26666 T (Tag_ABI_enum_size),
26667 T (Tag_ABI_HardFP_use),
26668 T (Tag_ABI_VFP_args),
26669 T (Tag_ABI_WMMX_args),
26670 T (Tag_ABI_optimization_goals),
26671 T (Tag_ABI_FP_optimization_goals),
26672 T (Tag_compatibility),
26673 T (Tag_CPU_unaligned_access),
26674 T (Tag_FP_HP_extension),
26675 T (Tag_VFP_HP_extension),
26676 T (Tag_ABI_FP_16bit_format),
26677 T (Tag_MPextension_use),
26678 T (Tag_DIV_use),
26679 T (Tag_nodefaults),
26680 T (Tag_also_compatible_with),
26681 T (Tag_conformance),
26682 T (Tag_T2EE_use),
26683 T (Tag_Virtualization_use),
26684 T (Tag_DSP_extension),
26685 /* We deliberately do not include Tag_MPextension_use_legacy. */
26686 #undef T
26687 };
26688 unsigned int i;
26689
26690 if (name == NULL)
26691 return -1;
26692
26693 for (i = 0; i < ARRAY_SIZE (attribute_table); i++)
26694 if (streq (name, attribute_table[i].name))
26695 return attribute_table[i].tag;
26696
26697 return -1;
26698 }
26699
26700
26701 /* Apply sym value for relocations only in the case that they are for
26702 local symbols in the same segment as the fixup and you have the
26703 respective architectural feature for blx and simple switches. */
26704 int
26705 arm_apply_sym_value (struct fix * fixP, segT this_seg)
26706 {
26707 if (fixP->fx_addsy
26708 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
26709 /* PR 17444: If the local symbol is in a different section then a reloc
26710 will always be generated for it, so applying the symbol value now
26711 will result in a double offset being stored in the relocation. */
26712 && (S_GET_SEGMENT (fixP->fx_addsy) == this_seg)
26713 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE))
26714 {
26715 switch (fixP->fx_r_type)
26716 {
26717 case BFD_RELOC_ARM_PCREL_BLX:
26718 case BFD_RELOC_THUMB_PCREL_BRANCH23:
26719 if (ARM_IS_FUNC (fixP->fx_addsy))
26720 return 1;
26721 break;
26722
26723 case BFD_RELOC_ARM_PCREL_CALL:
26724 case BFD_RELOC_THUMB_PCREL_BLX:
26725 if (THUMB_IS_FUNC (fixP->fx_addsy))
26726 return 1;
26727 break;
26728
26729 default:
26730 break;
26731 }
26732
26733 }
26734 return 0;
26735 }
26736 #endif /* OBJ_ELF */