[ARM]Positively emit symbols for alignment
[binutils-gdb.git] / gas / config / tc-arm.c
1 /* tc-arm.c -- Assemble for the ARM
2 Copyright (C) 1994-2015 Free Software Foundation, Inc.
3 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
4 Modified by David Taylor (dtaylor@armltd.co.uk)
5 Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
6 Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
7 Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
8
9 This file is part of GAS, the GNU Assembler.
10
11 GAS is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 3, or (at your option)
14 any later version.
15
16 GAS is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
20
21 You should have received a copy of the GNU General Public License
22 along with GAS; see the file COPYING. If not, write to the Free
23 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
24 02110-1301, USA. */
25
26 #include "as.h"
27 #include <limits.h>
28 #include <stdarg.h>
29 #define NO_RELOC 0
30 #include "safe-ctype.h"
31 #include "subsegs.h"
32 #include "obstack.h"
33 #include "libiberty.h"
34 #include "opcode/arm.h"
35
36 #ifdef OBJ_ELF
37 #include "elf/arm.h"
38 #include "dw2gencfi.h"
39 #endif
40
41 #include "dwarf2dbg.h"
42
43 #ifdef OBJ_ELF
44 /* Must be at least the size of the largest unwind opcode (currently two). */
45 #define ARM_OPCODE_CHUNK_SIZE 8
46
47 /* This structure holds the unwinding state. */
48
49 static struct
50 {
51 symbolS * proc_start;
52 symbolS * table_entry;
53 symbolS * personality_routine;
54 int personality_index;
55 /* The segment containing the function. */
56 segT saved_seg;
57 subsegT saved_subseg;
58 /* Opcodes generated from this function. */
59 unsigned char * opcodes;
60 int opcode_count;
61 int opcode_alloc;
62 /* The number of bytes pushed to the stack. */
63 offsetT frame_size;
64 /* We don't add stack adjustment opcodes immediately so that we can merge
65 multiple adjustments. We can also omit the final adjustment
66 when using a frame pointer. */
67 offsetT pending_offset;
68 /* These two fields are set by both unwind_movsp and unwind_setfp. They
69 hold the reg+offset to use when restoring sp from a frame pointer. */
70 offsetT fp_offset;
71 int fp_reg;
72 /* Nonzero if an unwind_setfp directive has been seen. */
73 unsigned fp_used:1;
74 /* Nonzero if the last opcode restores sp from fp_reg. */
75 unsigned sp_restored:1;
76 } unwind;
77
78 #endif /* OBJ_ELF */
79
80 /* Results from operand parsing worker functions. */
81
82 typedef enum
83 {
84 PARSE_OPERAND_SUCCESS,
85 PARSE_OPERAND_FAIL,
86 PARSE_OPERAND_FAIL_NO_BACKTRACK
87 } parse_operand_result;
88
89 enum arm_float_abi
90 {
91 ARM_FLOAT_ABI_HARD,
92 ARM_FLOAT_ABI_SOFTFP,
93 ARM_FLOAT_ABI_SOFT
94 };
95
96 /* Types of processor to assemble for. */
97 #ifndef CPU_DEFAULT
98 /* The code that was here used to select a default CPU depending on compiler
99 pre-defines which were only present when doing native builds, thus
100 changing gas' default behaviour depending upon the build host.
101
102 If you have a target that requires a default CPU option then the you
103 should define CPU_DEFAULT here. */
104 #endif
105
106 #ifndef FPU_DEFAULT
107 # ifdef TE_LINUX
108 # define FPU_DEFAULT FPU_ARCH_FPA
109 # elif defined (TE_NetBSD)
110 # ifdef OBJ_ELF
111 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */
112 # else
113 /* Legacy a.out format. */
114 # define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */
115 # endif
116 # elif defined (TE_VXWORKS)
117 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */
118 # else
119 /* For backwards compatibility, default to FPA. */
120 # define FPU_DEFAULT FPU_ARCH_FPA
121 # endif
122 #endif /* ifndef FPU_DEFAULT */
123
124 #define streq(a, b) (strcmp (a, b) == 0)
125
126 static arm_feature_set cpu_variant;
127 static arm_feature_set arm_arch_used;
128 static arm_feature_set thumb_arch_used;
129
130 /* Flags stored in private area of BFD structure. */
131 static int uses_apcs_26 = FALSE;
132 static int atpcs = FALSE;
133 static int support_interwork = FALSE;
134 static int uses_apcs_float = FALSE;
135 static int pic_code = FALSE;
136 static int fix_v4bx = FALSE;
137 /* Warn on using deprecated features. */
138 static int warn_on_deprecated = TRUE;
139
140 /* Understand CodeComposer Studio assembly syntax. */
141 bfd_boolean codecomposer_syntax = FALSE;
142
143 /* Variables that we set while parsing command-line options. Once all
144 options have been read we re-process these values to set the real
145 assembly flags. */
146 static const arm_feature_set *legacy_cpu = NULL;
147 static const arm_feature_set *legacy_fpu = NULL;
148
149 static const arm_feature_set *mcpu_cpu_opt = NULL;
150 static const arm_feature_set *mcpu_fpu_opt = NULL;
151 static const arm_feature_set *march_cpu_opt = NULL;
152 static const arm_feature_set *march_fpu_opt = NULL;
153 static const arm_feature_set *mfpu_opt = NULL;
154 static const arm_feature_set *object_arch = NULL;
155
156 /* Constants for known architecture features. */
157 static const arm_feature_set fpu_default = FPU_DEFAULT;
158 static const arm_feature_set fpu_arch_vfp_v1 = FPU_ARCH_VFP_V1;
159 static const arm_feature_set fpu_arch_vfp_v2 = FPU_ARCH_VFP_V2;
160 static const arm_feature_set fpu_arch_vfp_v3 = FPU_ARCH_VFP_V3;
161 static const arm_feature_set fpu_arch_neon_v1 = FPU_ARCH_NEON_V1;
162 static const arm_feature_set fpu_arch_fpa = FPU_ARCH_FPA;
163 static const arm_feature_set fpu_any_hard = FPU_ANY_HARD;
164 static const arm_feature_set fpu_arch_maverick = FPU_ARCH_MAVERICK;
165 static const arm_feature_set fpu_endian_pure = FPU_ARCH_ENDIAN_PURE;
166
167 #ifdef CPU_DEFAULT
168 static const arm_feature_set cpu_default = CPU_DEFAULT;
169 #endif
170
171 static const arm_feature_set arm_ext_v1 = ARM_FEATURE_CORE_LOW (ARM_EXT_V1);
172 static const arm_feature_set arm_ext_v2 = ARM_FEATURE_CORE_LOW (ARM_EXT_V1);
173 static const arm_feature_set arm_ext_v2s = ARM_FEATURE_CORE_LOW (ARM_EXT_V2S);
174 static const arm_feature_set arm_ext_v3 = ARM_FEATURE_CORE_LOW (ARM_EXT_V3);
175 static const arm_feature_set arm_ext_v3m = ARM_FEATURE_CORE_LOW (ARM_EXT_V3M);
176 static const arm_feature_set arm_ext_v4 = ARM_FEATURE_CORE_LOW (ARM_EXT_V4);
177 static const arm_feature_set arm_ext_v4t = ARM_FEATURE_CORE_LOW (ARM_EXT_V4T);
178 static const arm_feature_set arm_ext_v5 = ARM_FEATURE_CORE_LOW (ARM_EXT_V5);
179 static const arm_feature_set arm_ext_v4t_5 =
180 ARM_FEATURE_CORE_LOW (ARM_EXT_V4T | ARM_EXT_V5);
181 static const arm_feature_set arm_ext_v5t = ARM_FEATURE_CORE_LOW (ARM_EXT_V5T);
182 static const arm_feature_set arm_ext_v5e = ARM_FEATURE_CORE_LOW (ARM_EXT_V5E);
183 static const arm_feature_set arm_ext_v5exp = ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP);
184 static const arm_feature_set arm_ext_v5j = ARM_FEATURE_CORE_LOW (ARM_EXT_V5J);
185 static const arm_feature_set arm_ext_v6 = ARM_FEATURE_CORE_LOW (ARM_EXT_V6);
186 static const arm_feature_set arm_ext_v6k = ARM_FEATURE_CORE_LOW (ARM_EXT_V6K);
187 static const arm_feature_set arm_ext_v6t2 = ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2);
188 static const arm_feature_set arm_ext_v6m = ARM_FEATURE_CORE_LOW (ARM_EXT_V6M);
189 static const arm_feature_set arm_ext_v6_notm =
190 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_NOTM);
191 static const arm_feature_set arm_ext_v6_dsp =
192 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_DSP);
193 static const arm_feature_set arm_ext_barrier =
194 ARM_FEATURE_CORE_LOW (ARM_EXT_BARRIER);
195 static const arm_feature_set arm_ext_msr =
196 ARM_FEATURE_CORE_LOW (ARM_EXT_THUMB_MSR);
197 static const arm_feature_set arm_ext_div = ARM_FEATURE_CORE_LOW (ARM_EXT_DIV);
198 static const arm_feature_set arm_ext_v7 = ARM_FEATURE_CORE_LOW (ARM_EXT_V7);
199 static const arm_feature_set arm_ext_v7a = ARM_FEATURE_CORE_LOW (ARM_EXT_V7A);
200 static const arm_feature_set arm_ext_v7r = ARM_FEATURE_CORE_LOW (ARM_EXT_V7R);
201 static const arm_feature_set arm_ext_v7m = ARM_FEATURE_CORE_LOW (ARM_EXT_V7M);
202 static const arm_feature_set arm_ext_v8 = ARM_FEATURE_CORE_LOW (ARM_EXT_V8);
203 static const arm_feature_set arm_ext_m =
204 ARM_FEATURE_CORE_LOW (ARM_EXT_V6M | ARM_EXT_OS | ARM_EXT_V7M);
205 static const arm_feature_set arm_ext_mp = ARM_FEATURE_CORE_LOW (ARM_EXT_MP);
206 static const arm_feature_set arm_ext_sec = ARM_FEATURE_CORE_LOW (ARM_EXT_SEC);
207 static const arm_feature_set arm_ext_os = ARM_FEATURE_CORE_LOW (ARM_EXT_OS);
208 static const arm_feature_set arm_ext_adiv = ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV);
209 static const arm_feature_set arm_ext_virt = ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT);
210
211 static const arm_feature_set arm_arch_any = ARM_ANY;
212 static const arm_feature_set arm_arch_full = ARM_FEATURE (-1, -1, -1);
213 static const arm_feature_set arm_arch_t2 = ARM_ARCH_THUMB2;
214 static const arm_feature_set arm_arch_none = ARM_ARCH_NONE;
215 static const arm_feature_set arm_arch_v6m_only = ARM_ARCH_V6M_ONLY;
216
217 static const arm_feature_set arm_cext_iwmmxt2 =
218 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2);
219 static const arm_feature_set arm_cext_iwmmxt =
220 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT);
221 static const arm_feature_set arm_cext_xscale =
222 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE);
223 static const arm_feature_set arm_cext_maverick =
224 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK);
225 static const arm_feature_set fpu_fpa_ext_v1 =
226 ARM_FEATURE_COPROC (FPU_FPA_EXT_V1);
227 static const arm_feature_set fpu_fpa_ext_v2 =
228 ARM_FEATURE_COPROC (FPU_FPA_EXT_V2);
229 static const arm_feature_set fpu_vfp_ext_v1xd =
230 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1xD);
231 static const arm_feature_set fpu_vfp_ext_v1 =
232 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1);
233 static const arm_feature_set fpu_vfp_ext_v2 =
234 ARM_FEATURE_COPROC (FPU_VFP_EXT_V2);
235 static const arm_feature_set fpu_vfp_ext_v3xd =
236 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3xD);
237 static const arm_feature_set fpu_vfp_ext_v3 =
238 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3);
239 static const arm_feature_set fpu_vfp_ext_d32 =
240 ARM_FEATURE_COPROC (FPU_VFP_EXT_D32);
241 static const arm_feature_set fpu_neon_ext_v1 =
242 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1);
243 static const arm_feature_set fpu_vfp_v3_or_neon_ext =
244 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1 | FPU_VFP_EXT_V3);
245 static const arm_feature_set fpu_vfp_fp16 =
246 ARM_FEATURE_COPROC (FPU_VFP_EXT_FP16);
247 static const arm_feature_set fpu_neon_ext_fma =
248 ARM_FEATURE_COPROC (FPU_NEON_EXT_FMA);
249 static const arm_feature_set fpu_vfp_ext_fma =
250 ARM_FEATURE_COPROC (FPU_VFP_EXT_FMA);
251 static const arm_feature_set fpu_vfp_ext_armv8 =
252 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8);
253 static const arm_feature_set fpu_vfp_ext_armv8xd =
254 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8xD);
255 static const arm_feature_set fpu_neon_ext_armv8 =
256 ARM_FEATURE_COPROC (FPU_NEON_EXT_ARMV8);
257 static const arm_feature_set fpu_crypto_ext_armv8 =
258 ARM_FEATURE_COPROC (FPU_CRYPTO_EXT_ARMV8);
259 static const arm_feature_set crc_ext_armv8 =
260 ARM_FEATURE_COPROC (CRC_EXT_ARMV8);
261
262 static int mfloat_abi_opt = -1;
263 /* Record user cpu selection for object attributes. */
264 static arm_feature_set selected_cpu = ARM_ARCH_NONE;
265 /* Must be long enough to hold any of the names in arm_cpus. */
266 static char selected_cpu_name[16];
267
268 extern FLONUM_TYPE generic_floating_point_number;
269
270 /* Return if no cpu was selected on command-line. */
271 static bfd_boolean
272 no_cpu_selected (void)
273 {
274 return ARM_FEATURE_EQUAL (selected_cpu, arm_arch_none);
275 }
276
277 #ifdef OBJ_ELF
278 # ifdef EABI_DEFAULT
279 static int meabi_flags = EABI_DEFAULT;
280 # else
281 static int meabi_flags = EF_ARM_EABI_UNKNOWN;
282 # endif
283
284 static int attributes_set_explicitly[NUM_KNOWN_OBJ_ATTRIBUTES];
285
286 bfd_boolean
287 arm_is_eabi (void)
288 {
289 return (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4);
290 }
291 #endif
292
293 #ifdef OBJ_ELF
294 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
295 symbolS * GOT_symbol;
296 #endif
297
298 /* 0: assemble for ARM,
299 1: assemble for Thumb,
300 2: assemble for Thumb even though target CPU does not support thumb
301 instructions. */
302 static int thumb_mode = 0;
303 /* A value distinct from the possible values for thumb_mode that we
304 can use to record whether thumb_mode has been copied into the
305 tc_frag_data field of a frag. */
306 #define MODE_RECORDED (1 << 4)
307
308 /* Specifies the intrinsic IT insn behavior mode. */
309 enum implicit_it_mode
310 {
311 IMPLICIT_IT_MODE_NEVER = 0x00,
312 IMPLICIT_IT_MODE_ARM = 0x01,
313 IMPLICIT_IT_MODE_THUMB = 0x02,
314 IMPLICIT_IT_MODE_ALWAYS = (IMPLICIT_IT_MODE_ARM | IMPLICIT_IT_MODE_THUMB)
315 };
316 static int implicit_it_mode = IMPLICIT_IT_MODE_ARM;
317
318 /* If unified_syntax is true, we are processing the new unified
319 ARM/Thumb syntax. Important differences from the old ARM mode:
320
321 - Immediate operands do not require a # prefix.
322 - Conditional affixes always appear at the end of the
323 instruction. (For backward compatibility, those instructions
324 that formerly had them in the middle, continue to accept them
325 there.)
326 - The IT instruction may appear, and if it does is validated
327 against subsequent conditional affixes. It does not generate
328 machine code.
329
330 Important differences from the old Thumb mode:
331
332 - Immediate operands do not require a # prefix.
333 - Most of the V6T2 instructions are only available in unified mode.
334 - The .N and .W suffixes are recognized and honored (it is an error
335 if they cannot be honored).
336 - All instructions set the flags if and only if they have an 's' affix.
337 - Conditional affixes may be used. They are validated against
338 preceding IT instructions. Unlike ARM mode, you cannot use a
339 conditional affix except in the scope of an IT instruction. */
340
341 static bfd_boolean unified_syntax = FALSE;
342
343 /* An immediate operand can start with #, and ld*, st*, pld operands
344 can contain [ and ]. We need to tell APP not to elide whitespace
345 before a [, which can appear as the first operand for pld.
346 Likewise, a { can appear as the first operand for push, pop, vld*, etc. */
347 const char arm_symbol_chars[] = "#[]{}";
348
349 enum neon_el_type
350 {
351 NT_invtype,
352 NT_untyped,
353 NT_integer,
354 NT_float,
355 NT_poly,
356 NT_signed,
357 NT_unsigned
358 };
359
360 struct neon_type_el
361 {
362 enum neon_el_type type;
363 unsigned size;
364 };
365
366 #define NEON_MAX_TYPE_ELS 4
367
368 struct neon_type
369 {
370 struct neon_type_el el[NEON_MAX_TYPE_ELS];
371 unsigned elems;
372 };
373
374 enum it_instruction_type
375 {
376 OUTSIDE_IT_INSN,
377 INSIDE_IT_INSN,
378 INSIDE_IT_LAST_INSN,
379 IF_INSIDE_IT_LAST_INSN, /* Either outside or inside;
380 if inside, should be the last one. */
381 NEUTRAL_IT_INSN, /* This could be either inside or outside,
382 i.e. BKPT and NOP. */
383 IT_INSN /* The IT insn has been parsed. */
384 };
385
386 /* The maximum number of operands we need. */
387 #define ARM_IT_MAX_OPERANDS 6
388
389 struct arm_it
390 {
391 const char * error;
392 unsigned long instruction;
393 int size;
394 int size_req;
395 int cond;
396 /* "uncond_value" is set to the value in place of the conditional field in
397 unconditional versions of the instruction, or -1 if nothing is
398 appropriate. */
399 int uncond_value;
400 struct neon_type vectype;
401 /* This does not indicate an actual NEON instruction, only that
402 the mnemonic accepts neon-style type suffixes. */
403 int is_neon;
404 /* Set to the opcode if the instruction needs relaxation.
405 Zero if the instruction is not relaxed. */
406 unsigned long relax;
407 struct
408 {
409 bfd_reloc_code_real_type type;
410 expressionS exp;
411 int pc_rel;
412 } reloc;
413
414 enum it_instruction_type it_insn_type;
415
416 struct
417 {
418 unsigned reg;
419 signed int imm;
420 struct neon_type_el vectype;
421 unsigned present : 1; /* Operand present. */
422 unsigned isreg : 1; /* Operand was a register. */
423 unsigned immisreg : 1; /* .imm field is a second register. */
424 unsigned isscalar : 1; /* Operand is a (Neon) scalar. */
425 unsigned immisalign : 1; /* Immediate is an alignment specifier. */
426 unsigned immisfloat : 1; /* Immediate was parsed as a float. */
427 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
428 instructions. This allows us to disambiguate ARM <-> vector insns. */
429 unsigned regisimm : 1; /* 64-bit immediate, reg forms high 32 bits. */
430 unsigned isvec : 1; /* Is a single, double or quad VFP/Neon reg. */
431 unsigned isquad : 1; /* Operand is Neon quad-precision register. */
432 unsigned issingle : 1; /* Operand is VFP single-precision register. */
433 unsigned hasreloc : 1; /* Operand has relocation suffix. */
434 unsigned writeback : 1; /* Operand has trailing ! */
435 unsigned preind : 1; /* Preindexed address. */
436 unsigned postind : 1; /* Postindexed address. */
437 unsigned negative : 1; /* Index register was negated. */
438 unsigned shifted : 1; /* Shift applied to operation. */
439 unsigned shift_kind : 3; /* Shift operation (enum shift_kind). */
440 } operands[ARM_IT_MAX_OPERANDS];
441 };
442
443 static struct arm_it inst;
444
445 #define NUM_FLOAT_VALS 8
446
447 const char * fp_const[] =
448 {
449 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
450 };
451
452 /* Number of littlenums required to hold an extended precision number. */
453 #define MAX_LITTLENUMS 6
454
455 LITTLENUM_TYPE fp_values[NUM_FLOAT_VALS][MAX_LITTLENUMS];
456
457 #define FAIL (-1)
458 #define SUCCESS (0)
459
460 #define SUFF_S 1
461 #define SUFF_D 2
462 #define SUFF_E 3
463 #define SUFF_P 4
464
465 #define CP_T_X 0x00008000
466 #define CP_T_Y 0x00400000
467
468 #define CONDS_BIT 0x00100000
469 #define LOAD_BIT 0x00100000
470
471 #define DOUBLE_LOAD_FLAG 0x00000001
472
473 struct asm_cond
474 {
475 const char * template_name;
476 unsigned long value;
477 };
478
479 #define COND_ALWAYS 0xE
480
481 struct asm_psr
482 {
483 const char * template_name;
484 unsigned long field;
485 };
486
487 struct asm_barrier_opt
488 {
489 const char * template_name;
490 unsigned long value;
491 const arm_feature_set arch;
492 };
493
494 /* The bit that distinguishes CPSR and SPSR. */
495 #define SPSR_BIT (1 << 22)
496
497 /* The individual PSR flag bits. */
498 #define PSR_c (1 << 16)
499 #define PSR_x (1 << 17)
500 #define PSR_s (1 << 18)
501 #define PSR_f (1 << 19)
502
503 struct reloc_entry
504 {
505 char * name;
506 bfd_reloc_code_real_type reloc;
507 };
508
509 enum vfp_reg_pos
510 {
511 VFP_REG_Sd, VFP_REG_Sm, VFP_REG_Sn,
512 VFP_REG_Dd, VFP_REG_Dm, VFP_REG_Dn
513 };
514
515 enum vfp_ldstm_type
516 {
517 VFP_LDSTMIA, VFP_LDSTMDB, VFP_LDSTMIAX, VFP_LDSTMDBX
518 };
519
520 /* Bits for DEFINED field in neon_typed_alias. */
521 #define NTA_HASTYPE 1
522 #define NTA_HASINDEX 2
523
524 struct neon_typed_alias
525 {
526 unsigned char defined;
527 unsigned char index;
528 struct neon_type_el eltype;
529 };
530
531 /* ARM register categories. This includes coprocessor numbers and various
532 architecture extensions' registers. */
533 enum arm_reg_type
534 {
535 REG_TYPE_RN,
536 REG_TYPE_CP,
537 REG_TYPE_CN,
538 REG_TYPE_FN,
539 REG_TYPE_VFS,
540 REG_TYPE_VFD,
541 REG_TYPE_NQ,
542 REG_TYPE_VFSD,
543 REG_TYPE_NDQ,
544 REG_TYPE_NSDQ,
545 REG_TYPE_VFC,
546 REG_TYPE_MVF,
547 REG_TYPE_MVD,
548 REG_TYPE_MVFX,
549 REG_TYPE_MVDX,
550 REG_TYPE_MVAX,
551 REG_TYPE_DSPSC,
552 REG_TYPE_MMXWR,
553 REG_TYPE_MMXWC,
554 REG_TYPE_MMXWCG,
555 REG_TYPE_XSCALE,
556 REG_TYPE_RNB
557 };
558
559 /* Structure for a hash table entry for a register.
560 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
561 information which states whether a vector type or index is specified (for a
562 register alias created with .dn or .qn). Otherwise NEON should be NULL. */
563 struct reg_entry
564 {
565 const char * name;
566 unsigned int number;
567 unsigned char type;
568 unsigned char builtin;
569 struct neon_typed_alias * neon;
570 };
571
572 /* Diagnostics used when we don't get a register of the expected type. */
573 const char * const reg_expected_msgs[] =
574 {
575 N_("ARM register expected"),
576 N_("bad or missing co-processor number"),
577 N_("co-processor register expected"),
578 N_("FPA register expected"),
579 N_("VFP single precision register expected"),
580 N_("VFP/Neon double precision register expected"),
581 N_("Neon quad precision register expected"),
582 N_("VFP single or double precision register expected"),
583 N_("Neon double or quad precision register expected"),
584 N_("VFP single, double or Neon quad precision register expected"),
585 N_("VFP system register expected"),
586 N_("Maverick MVF register expected"),
587 N_("Maverick MVD register expected"),
588 N_("Maverick MVFX register expected"),
589 N_("Maverick MVDX register expected"),
590 N_("Maverick MVAX register expected"),
591 N_("Maverick DSPSC register expected"),
592 N_("iWMMXt data register expected"),
593 N_("iWMMXt control register expected"),
594 N_("iWMMXt scalar register expected"),
595 N_("XScale accumulator register expected"),
596 };
597
598 /* Some well known registers that we refer to directly elsewhere. */
599 #define REG_R12 12
600 #define REG_SP 13
601 #define REG_LR 14
602 #define REG_PC 15
603
604 /* ARM instructions take 4bytes in the object file, Thumb instructions
605 take 2: */
606 #define INSN_SIZE 4
607
608 struct asm_opcode
609 {
610 /* Basic string to match. */
611 const char * template_name;
612
613 /* Parameters to instruction. */
614 unsigned int operands[8];
615
616 /* Conditional tag - see opcode_lookup. */
617 unsigned int tag : 4;
618
619 /* Basic instruction code. */
620 unsigned int avalue : 28;
621
622 /* Thumb-format instruction code. */
623 unsigned int tvalue;
624
625 /* Which architecture variant provides this instruction. */
626 const arm_feature_set * avariant;
627 const arm_feature_set * tvariant;
628
629 /* Function to call to encode instruction in ARM format. */
630 void (* aencode) (void);
631
632 /* Function to call to encode instruction in Thumb format. */
633 void (* tencode) (void);
634 };
635
636 /* Defines for various bits that we will want to toggle. */
637 #define INST_IMMEDIATE 0x02000000
638 #define OFFSET_REG 0x02000000
639 #define HWOFFSET_IMM 0x00400000
640 #define SHIFT_BY_REG 0x00000010
641 #define PRE_INDEX 0x01000000
642 #define INDEX_UP 0x00800000
643 #define WRITE_BACK 0x00200000
644 #define LDM_TYPE_2_OR_3 0x00400000
645 #define CPSI_MMOD 0x00020000
646
647 #define LITERAL_MASK 0xf000f000
648 #define OPCODE_MASK 0xfe1fffff
649 #define V4_STR_BIT 0x00000020
650 #define VLDR_VMOV_SAME 0x0040f000
651
652 #define T2_SUBS_PC_LR 0xf3de8f00
653
654 #define DATA_OP_SHIFT 21
655
656 #define T2_OPCODE_MASK 0xfe1fffff
657 #define T2_DATA_OP_SHIFT 21
658
659 #define A_COND_MASK 0xf0000000
660 #define A_PUSH_POP_OP_MASK 0x0fff0000
661
662 /* Opcodes for pushing/poping registers to/from the stack. */
663 #define A1_OPCODE_PUSH 0x092d0000
664 #define A2_OPCODE_PUSH 0x052d0004
665 #define A2_OPCODE_POP 0x049d0004
666
667 /* Codes to distinguish the arithmetic instructions. */
668 #define OPCODE_AND 0
669 #define OPCODE_EOR 1
670 #define OPCODE_SUB 2
671 #define OPCODE_RSB 3
672 #define OPCODE_ADD 4
673 #define OPCODE_ADC 5
674 #define OPCODE_SBC 6
675 #define OPCODE_RSC 7
676 #define OPCODE_TST 8
677 #define OPCODE_TEQ 9
678 #define OPCODE_CMP 10
679 #define OPCODE_CMN 11
680 #define OPCODE_ORR 12
681 #define OPCODE_MOV 13
682 #define OPCODE_BIC 14
683 #define OPCODE_MVN 15
684
685 #define T2_OPCODE_AND 0
686 #define T2_OPCODE_BIC 1
687 #define T2_OPCODE_ORR 2
688 #define T2_OPCODE_ORN 3
689 #define T2_OPCODE_EOR 4
690 #define T2_OPCODE_ADD 8
691 #define T2_OPCODE_ADC 10
692 #define T2_OPCODE_SBC 11
693 #define T2_OPCODE_SUB 13
694 #define T2_OPCODE_RSB 14
695
696 #define T_OPCODE_MUL 0x4340
697 #define T_OPCODE_TST 0x4200
698 #define T_OPCODE_CMN 0x42c0
699 #define T_OPCODE_NEG 0x4240
700 #define T_OPCODE_MVN 0x43c0
701
702 #define T_OPCODE_ADD_R3 0x1800
703 #define T_OPCODE_SUB_R3 0x1a00
704 #define T_OPCODE_ADD_HI 0x4400
705 #define T_OPCODE_ADD_ST 0xb000
706 #define T_OPCODE_SUB_ST 0xb080
707 #define T_OPCODE_ADD_SP 0xa800
708 #define T_OPCODE_ADD_PC 0xa000
709 #define T_OPCODE_ADD_I8 0x3000
710 #define T_OPCODE_SUB_I8 0x3800
711 #define T_OPCODE_ADD_I3 0x1c00
712 #define T_OPCODE_SUB_I3 0x1e00
713
714 #define T_OPCODE_ASR_R 0x4100
715 #define T_OPCODE_LSL_R 0x4080
716 #define T_OPCODE_LSR_R 0x40c0
717 #define T_OPCODE_ROR_R 0x41c0
718 #define T_OPCODE_ASR_I 0x1000
719 #define T_OPCODE_LSL_I 0x0000
720 #define T_OPCODE_LSR_I 0x0800
721
722 #define T_OPCODE_MOV_I8 0x2000
723 #define T_OPCODE_CMP_I8 0x2800
724 #define T_OPCODE_CMP_LR 0x4280
725 #define T_OPCODE_MOV_HR 0x4600
726 #define T_OPCODE_CMP_HR 0x4500
727
728 #define T_OPCODE_LDR_PC 0x4800
729 #define T_OPCODE_LDR_SP 0x9800
730 #define T_OPCODE_STR_SP 0x9000
731 #define T_OPCODE_LDR_IW 0x6800
732 #define T_OPCODE_STR_IW 0x6000
733 #define T_OPCODE_LDR_IH 0x8800
734 #define T_OPCODE_STR_IH 0x8000
735 #define T_OPCODE_LDR_IB 0x7800
736 #define T_OPCODE_STR_IB 0x7000
737 #define T_OPCODE_LDR_RW 0x5800
738 #define T_OPCODE_STR_RW 0x5000
739 #define T_OPCODE_LDR_RH 0x5a00
740 #define T_OPCODE_STR_RH 0x5200
741 #define T_OPCODE_LDR_RB 0x5c00
742 #define T_OPCODE_STR_RB 0x5400
743
744 #define T_OPCODE_PUSH 0xb400
745 #define T_OPCODE_POP 0xbc00
746
747 #define T_OPCODE_BRANCH 0xe000
748
749 #define THUMB_SIZE 2 /* Size of thumb instruction. */
750 #define THUMB_PP_PC_LR 0x0100
751 #define THUMB_LOAD_BIT 0x0800
752 #define THUMB2_LOAD_BIT 0x00100000
753
754 #define BAD_ARGS _("bad arguments to instruction")
755 #define BAD_SP _("r13 not allowed here")
756 #define BAD_PC _("r15 not allowed here")
757 #define BAD_COND _("instruction cannot be conditional")
758 #define BAD_OVERLAP _("registers may not be the same")
759 #define BAD_HIREG _("lo register required")
760 #define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
761 #define BAD_ADDR_MODE _("instruction does not accept this addressing mode");
762 #define BAD_BRANCH _("branch must be last instruction in IT block")
763 #define BAD_NOT_IT _("instruction not allowed in IT block")
764 #define BAD_FPU _("selected FPU does not support instruction")
765 #define BAD_OUT_IT _("thumb conditional instruction should be in IT block")
766 #define BAD_IT_COND _("incorrect condition in IT block")
767 #define BAD_IT_IT _("IT falling in the range of a previous IT block")
768 #define MISSING_FNSTART _("missing .fnstart before unwinding directive")
769 #define BAD_PC_ADDRESSING \
770 _("cannot use register index with PC-relative addressing")
771 #define BAD_PC_WRITEBACK \
772 _("cannot use writeback with PC-relative addressing")
773 #define BAD_RANGE _("branch out of range")
774 #define UNPRED_REG(R) _("using " R " results in unpredictable behaviour")
775
776 static struct hash_control * arm_ops_hsh;
777 static struct hash_control * arm_cond_hsh;
778 static struct hash_control * arm_shift_hsh;
779 static struct hash_control * arm_psr_hsh;
780 static struct hash_control * arm_v7m_psr_hsh;
781 static struct hash_control * arm_reg_hsh;
782 static struct hash_control * arm_reloc_hsh;
783 static struct hash_control * arm_barrier_opt_hsh;
784
785 /* Stuff needed to resolve the label ambiguity
786 As:
787 ...
788 label: <insn>
789 may differ from:
790 ...
791 label:
792 <insn> */
793
794 symbolS * last_label_seen;
795 static int label_is_thumb_function_name = FALSE;
796
797 /* Literal pool structure. Held on a per-section
798 and per-sub-section basis. */
799
800 #define MAX_LITERAL_POOL_SIZE 1024
801 typedef struct literal_pool
802 {
803 expressionS literals [MAX_LITERAL_POOL_SIZE];
804 unsigned int next_free_entry;
805 unsigned int id;
806 symbolS * symbol;
807 segT section;
808 subsegT sub_section;
809 #ifdef OBJ_ELF
810 struct dwarf2_line_info locs [MAX_LITERAL_POOL_SIZE];
811 #endif
812 struct literal_pool * next;
813 unsigned int alignment;
814 } literal_pool;
815
816 /* Pointer to a linked list of literal pools. */
817 literal_pool * list_of_pools = NULL;
818
819 typedef enum asmfunc_states
820 {
821 OUTSIDE_ASMFUNC,
822 WAITING_ASMFUNC_NAME,
823 WAITING_ENDASMFUNC
824 } asmfunc_states;
825
826 static asmfunc_states asmfunc_state = OUTSIDE_ASMFUNC;
827
828 #ifdef OBJ_ELF
829 # define now_it seg_info (now_seg)->tc_segment_info_data.current_it
830 #else
831 static struct current_it now_it;
832 #endif
833
834 static inline int
835 now_it_compatible (int cond)
836 {
837 return (cond & ~1) == (now_it.cc & ~1);
838 }
839
840 static inline int
841 conditional_insn (void)
842 {
843 return inst.cond != COND_ALWAYS;
844 }
845
846 static int in_it_block (void);
847
848 static int handle_it_state (void);
849
850 static void force_automatic_it_block_close (void);
851
852 static void it_fsm_post_encode (void);
853
854 #define set_it_insn_type(type) \
855 do \
856 { \
857 inst.it_insn_type = type; \
858 if (handle_it_state () == FAIL) \
859 return; \
860 } \
861 while (0)
862
863 #define set_it_insn_type_nonvoid(type, failret) \
864 do \
865 { \
866 inst.it_insn_type = type; \
867 if (handle_it_state () == FAIL) \
868 return failret; \
869 } \
870 while(0)
871
872 #define set_it_insn_type_last() \
873 do \
874 { \
875 if (inst.cond == COND_ALWAYS) \
876 set_it_insn_type (IF_INSIDE_IT_LAST_INSN); \
877 else \
878 set_it_insn_type (INSIDE_IT_LAST_INSN); \
879 } \
880 while (0)
881
882 /* Pure syntax. */
883
884 /* This array holds the chars that always start a comment. If the
885 pre-processor is disabled, these aren't very useful. */
886 char arm_comment_chars[] = "@";
887
888 /* This array holds the chars that only start a comment at the beginning of
889 a line. If the line seems to have the form '# 123 filename'
890 .line and .file directives will appear in the pre-processed output. */
891 /* Note that input_file.c hand checks for '#' at the beginning of the
892 first line of the input file. This is because the compiler outputs
893 #NO_APP at the beginning of its output. */
894 /* Also note that comments like this one will always work. */
895 const char line_comment_chars[] = "#";
896
897 char arm_line_separator_chars[] = ";";
898
899 /* Chars that can be used to separate mant
900 from exp in floating point numbers. */
901 const char EXP_CHARS[] = "eE";
902
903 /* Chars that mean this number is a floating point constant. */
904 /* As in 0f12.456 */
905 /* or 0d1.2345e12 */
906
907 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
908
909 /* Prefix characters that indicate the start of an immediate
910 value. */
911 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
912
913 /* Separator character handling. */
914
915 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
916
917 static inline int
918 skip_past_char (char ** str, char c)
919 {
920 /* PR gas/14987: Allow for whitespace before the expected character. */
921 skip_whitespace (*str);
922
923 if (**str == c)
924 {
925 (*str)++;
926 return SUCCESS;
927 }
928 else
929 return FAIL;
930 }
931
932 #define skip_past_comma(str) skip_past_char (str, ',')
933
934 /* Arithmetic expressions (possibly involving symbols). */
935
936 /* Return TRUE if anything in the expression is a bignum. */
937
938 static int
939 walk_no_bignums (symbolS * sp)
940 {
941 if (symbol_get_value_expression (sp)->X_op == O_big)
942 return 1;
943
944 if (symbol_get_value_expression (sp)->X_add_symbol)
945 {
946 return (walk_no_bignums (symbol_get_value_expression (sp)->X_add_symbol)
947 || (symbol_get_value_expression (sp)->X_op_symbol
948 && walk_no_bignums (symbol_get_value_expression (sp)->X_op_symbol)));
949 }
950
951 return 0;
952 }
953
954 static int in_my_get_expression = 0;
955
956 /* Third argument to my_get_expression. */
957 #define GE_NO_PREFIX 0
958 #define GE_IMM_PREFIX 1
959 #define GE_OPT_PREFIX 2
960 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
961 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
962 #define GE_OPT_PREFIX_BIG 3
963
964 static int
965 my_get_expression (expressionS * ep, char ** str, int prefix_mode)
966 {
967 char * save_in;
968 segT seg;
969
970 /* In unified syntax, all prefixes are optional. */
971 if (unified_syntax)
972 prefix_mode = (prefix_mode == GE_OPT_PREFIX_BIG) ? prefix_mode
973 : GE_OPT_PREFIX;
974
975 switch (prefix_mode)
976 {
977 case GE_NO_PREFIX: break;
978 case GE_IMM_PREFIX:
979 if (!is_immediate_prefix (**str))
980 {
981 inst.error = _("immediate expression requires a # prefix");
982 return FAIL;
983 }
984 (*str)++;
985 break;
986 case GE_OPT_PREFIX:
987 case GE_OPT_PREFIX_BIG:
988 if (is_immediate_prefix (**str))
989 (*str)++;
990 break;
991 default: abort ();
992 }
993
994 memset (ep, 0, sizeof (expressionS));
995
996 save_in = input_line_pointer;
997 input_line_pointer = *str;
998 in_my_get_expression = 1;
999 seg = expression (ep);
1000 in_my_get_expression = 0;
1001
1002 if (ep->X_op == O_illegal || ep->X_op == O_absent)
1003 {
1004 /* We found a bad or missing expression in md_operand(). */
1005 *str = input_line_pointer;
1006 input_line_pointer = save_in;
1007 if (inst.error == NULL)
1008 inst.error = (ep->X_op == O_absent
1009 ? _("missing expression") :_("bad expression"));
1010 return 1;
1011 }
1012
1013 #ifdef OBJ_AOUT
1014 if (seg != absolute_section
1015 && seg != text_section
1016 && seg != data_section
1017 && seg != bss_section
1018 && seg != undefined_section)
1019 {
1020 inst.error = _("bad segment");
1021 *str = input_line_pointer;
1022 input_line_pointer = save_in;
1023 return 1;
1024 }
1025 #else
1026 (void) seg;
1027 #endif
1028
1029 /* Get rid of any bignums now, so that we don't generate an error for which
1030 we can't establish a line number later on. Big numbers are never valid
1031 in instructions, which is where this routine is always called. */
1032 if (prefix_mode != GE_OPT_PREFIX_BIG
1033 && (ep->X_op == O_big
1034 || (ep->X_add_symbol
1035 && (walk_no_bignums (ep->X_add_symbol)
1036 || (ep->X_op_symbol
1037 && walk_no_bignums (ep->X_op_symbol))))))
1038 {
1039 inst.error = _("invalid constant");
1040 *str = input_line_pointer;
1041 input_line_pointer = save_in;
1042 return 1;
1043 }
1044
1045 *str = input_line_pointer;
1046 input_line_pointer = save_in;
1047 return 0;
1048 }
1049
1050 /* Turn a string in input_line_pointer into a floating point constant
1051 of type TYPE, and store the appropriate bytes in *LITP. The number
1052 of LITTLENUMS emitted is stored in *SIZEP. An error message is
1053 returned, or NULL on OK.
1054
1055 Note that fp constants aren't represent in the normal way on the ARM.
1056 In big endian mode, things are as expected. However, in little endian
1057 mode fp constants are big-endian word-wise, and little-endian byte-wise
1058 within the words. For example, (double) 1.1 in big endian mode is
1059 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
1060 the byte sequence 99 99 f1 3f 9a 99 99 99.
1061
1062 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
1063
1064 char *
1065 md_atof (int type, char * litP, int * sizeP)
1066 {
1067 int prec;
1068 LITTLENUM_TYPE words[MAX_LITTLENUMS];
1069 char *t;
1070 int i;
1071
1072 switch (type)
1073 {
1074 case 'f':
1075 case 'F':
1076 case 's':
1077 case 'S':
1078 prec = 2;
1079 break;
1080
1081 case 'd':
1082 case 'D':
1083 case 'r':
1084 case 'R':
1085 prec = 4;
1086 break;
1087
1088 case 'x':
1089 case 'X':
1090 prec = 5;
1091 break;
1092
1093 case 'p':
1094 case 'P':
1095 prec = 5;
1096 break;
1097
1098 default:
1099 *sizeP = 0;
1100 return _("Unrecognized or unsupported floating point constant");
1101 }
1102
1103 t = atof_ieee (input_line_pointer, type, words);
1104 if (t)
1105 input_line_pointer = t;
1106 *sizeP = prec * sizeof (LITTLENUM_TYPE);
1107
1108 if (target_big_endian)
1109 {
1110 for (i = 0; i < prec; i++)
1111 {
1112 md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1113 litP += sizeof (LITTLENUM_TYPE);
1114 }
1115 }
1116 else
1117 {
1118 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
1119 for (i = prec - 1; i >= 0; i--)
1120 {
1121 md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1122 litP += sizeof (LITTLENUM_TYPE);
1123 }
1124 else
1125 /* For a 4 byte float the order of elements in `words' is 1 0.
1126 For an 8 byte float the order is 1 0 3 2. */
1127 for (i = 0; i < prec; i += 2)
1128 {
1129 md_number_to_chars (litP, (valueT) words[i + 1],
1130 sizeof (LITTLENUM_TYPE));
1131 md_number_to_chars (litP + sizeof (LITTLENUM_TYPE),
1132 (valueT) words[i], sizeof (LITTLENUM_TYPE));
1133 litP += 2 * sizeof (LITTLENUM_TYPE);
1134 }
1135 }
1136
1137 return NULL;
1138 }
1139
1140 /* We handle all bad expressions here, so that we can report the faulty
1141 instruction in the error message. */
1142 void
1143 md_operand (expressionS * exp)
1144 {
1145 if (in_my_get_expression)
1146 exp->X_op = O_illegal;
1147 }
1148
1149 /* Immediate values. */
1150
1151 /* Generic immediate-value read function for use in directives.
1152 Accepts anything that 'expression' can fold to a constant.
1153 *val receives the number. */
1154 #ifdef OBJ_ELF
1155 static int
1156 immediate_for_directive (int *val)
1157 {
1158 expressionS exp;
1159 exp.X_op = O_illegal;
1160
1161 if (is_immediate_prefix (*input_line_pointer))
1162 {
1163 input_line_pointer++;
1164 expression (&exp);
1165 }
1166
1167 if (exp.X_op != O_constant)
1168 {
1169 as_bad (_("expected #constant"));
1170 ignore_rest_of_line ();
1171 return FAIL;
1172 }
1173 *val = exp.X_add_number;
1174 return SUCCESS;
1175 }
1176 #endif
1177
1178 /* Register parsing. */
1179
1180 /* Generic register parser. CCP points to what should be the
1181 beginning of a register name. If it is indeed a valid register
1182 name, advance CCP over it and return the reg_entry structure;
1183 otherwise return NULL. Does not issue diagnostics. */
1184
1185 static struct reg_entry *
1186 arm_reg_parse_multi (char **ccp)
1187 {
1188 char *start = *ccp;
1189 char *p;
1190 struct reg_entry *reg;
1191
1192 skip_whitespace (start);
1193
1194 #ifdef REGISTER_PREFIX
1195 if (*start != REGISTER_PREFIX)
1196 return NULL;
1197 start++;
1198 #endif
1199 #ifdef OPTIONAL_REGISTER_PREFIX
1200 if (*start == OPTIONAL_REGISTER_PREFIX)
1201 start++;
1202 #endif
1203
1204 p = start;
1205 if (!ISALPHA (*p) || !is_name_beginner (*p))
1206 return NULL;
1207
1208 do
1209 p++;
1210 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
1211
1212 reg = (struct reg_entry *) hash_find_n (arm_reg_hsh, start, p - start);
1213
1214 if (!reg)
1215 return NULL;
1216
1217 *ccp = p;
1218 return reg;
1219 }
1220
1221 static int
1222 arm_reg_alt_syntax (char **ccp, char *start, struct reg_entry *reg,
1223 enum arm_reg_type type)
1224 {
1225 /* Alternative syntaxes are accepted for a few register classes. */
1226 switch (type)
1227 {
1228 case REG_TYPE_MVF:
1229 case REG_TYPE_MVD:
1230 case REG_TYPE_MVFX:
1231 case REG_TYPE_MVDX:
1232 /* Generic coprocessor register names are allowed for these. */
1233 if (reg && reg->type == REG_TYPE_CN)
1234 return reg->number;
1235 break;
1236
1237 case REG_TYPE_CP:
1238 /* For backward compatibility, a bare number is valid here. */
1239 {
1240 unsigned long processor = strtoul (start, ccp, 10);
1241 if (*ccp != start && processor <= 15)
1242 return processor;
1243 }
1244
1245 case REG_TYPE_MMXWC:
1246 /* WC includes WCG. ??? I'm not sure this is true for all
1247 instructions that take WC registers. */
1248 if (reg && reg->type == REG_TYPE_MMXWCG)
1249 return reg->number;
1250 break;
1251
1252 default:
1253 break;
1254 }
1255
1256 return FAIL;
1257 }
1258
1259 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1260 return value is the register number or FAIL. */
1261
1262 static int
1263 arm_reg_parse (char **ccp, enum arm_reg_type type)
1264 {
1265 char *start = *ccp;
1266 struct reg_entry *reg = arm_reg_parse_multi (ccp);
1267 int ret;
1268
1269 /* Do not allow a scalar (reg+index) to parse as a register. */
1270 if (reg && reg->neon && (reg->neon->defined & NTA_HASINDEX))
1271 return FAIL;
1272
1273 if (reg && reg->type == type)
1274 return reg->number;
1275
1276 if ((ret = arm_reg_alt_syntax (ccp, start, reg, type)) != FAIL)
1277 return ret;
1278
1279 *ccp = start;
1280 return FAIL;
1281 }
1282
1283 /* Parse a Neon type specifier. *STR should point at the leading '.'
1284 character. Does no verification at this stage that the type fits the opcode
1285 properly. E.g.,
1286
1287 .i32.i32.s16
1288 .s32.f32
1289 .u16
1290
1291 Can all be legally parsed by this function.
1292
1293 Fills in neon_type struct pointer with parsed information, and updates STR
1294 to point after the parsed type specifier. Returns SUCCESS if this was a legal
1295 type, FAIL if not. */
1296
1297 static int
1298 parse_neon_type (struct neon_type *type, char **str)
1299 {
1300 char *ptr = *str;
1301
1302 if (type)
1303 type->elems = 0;
1304
1305 while (type->elems < NEON_MAX_TYPE_ELS)
1306 {
1307 enum neon_el_type thistype = NT_untyped;
1308 unsigned thissize = -1u;
1309
1310 if (*ptr != '.')
1311 break;
1312
1313 ptr++;
1314
1315 /* Just a size without an explicit type. */
1316 if (ISDIGIT (*ptr))
1317 goto parsesize;
1318
1319 switch (TOLOWER (*ptr))
1320 {
1321 case 'i': thistype = NT_integer; break;
1322 case 'f': thistype = NT_float; break;
1323 case 'p': thistype = NT_poly; break;
1324 case 's': thistype = NT_signed; break;
1325 case 'u': thistype = NT_unsigned; break;
1326 case 'd':
1327 thistype = NT_float;
1328 thissize = 64;
1329 ptr++;
1330 goto done;
1331 default:
1332 as_bad (_("unexpected character `%c' in type specifier"), *ptr);
1333 return FAIL;
1334 }
1335
1336 ptr++;
1337
1338 /* .f is an abbreviation for .f32. */
1339 if (thistype == NT_float && !ISDIGIT (*ptr))
1340 thissize = 32;
1341 else
1342 {
1343 parsesize:
1344 thissize = strtoul (ptr, &ptr, 10);
1345
1346 if (thissize != 8 && thissize != 16 && thissize != 32
1347 && thissize != 64)
1348 {
1349 as_bad (_("bad size %d in type specifier"), thissize);
1350 return FAIL;
1351 }
1352 }
1353
1354 done:
1355 if (type)
1356 {
1357 type->el[type->elems].type = thistype;
1358 type->el[type->elems].size = thissize;
1359 type->elems++;
1360 }
1361 }
1362
1363 /* Empty/missing type is not a successful parse. */
1364 if (type->elems == 0)
1365 return FAIL;
1366
1367 *str = ptr;
1368
1369 return SUCCESS;
1370 }
1371
1372 /* Errors may be set multiple times during parsing or bit encoding
1373 (particularly in the Neon bits), but usually the earliest error which is set
1374 will be the most meaningful. Avoid overwriting it with later (cascading)
1375 errors by calling this function. */
1376
1377 static void
1378 first_error (const char *err)
1379 {
1380 if (!inst.error)
1381 inst.error = err;
1382 }
1383
1384 /* Parse a single type, e.g. ".s32", leading period included. */
1385 static int
1386 parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
1387 {
1388 char *str = *ccp;
1389 struct neon_type optype;
1390
1391 if (*str == '.')
1392 {
1393 if (parse_neon_type (&optype, &str) == SUCCESS)
1394 {
1395 if (optype.elems == 1)
1396 *vectype = optype.el[0];
1397 else
1398 {
1399 first_error (_("only one type should be specified for operand"));
1400 return FAIL;
1401 }
1402 }
1403 else
1404 {
1405 first_error (_("vector type expected"));
1406 return FAIL;
1407 }
1408 }
1409 else
1410 return FAIL;
1411
1412 *ccp = str;
1413
1414 return SUCCESS;
1415 }
1416
1417 /* Special meanings for indices (which have a range of 0-7), which will fit into
1418 a 4-bit integer. */
1419
1420 #define NEON_ALL_LANES 15
1421 #define NEON_INTERLEAVE_LANES 14
1422
1423 /* Parse either a register or a scalar, with an optional type. Return the
1424 register number, and optionally fill in the actual type of the register
1425 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1426 type/index information in *TYPEINFO. */
1427
1428 static int
1429 parse_typed_reg_or_scalar (char **ccp, enum arm_reg_type type,
1430 enum arm_reg_type *rtype,
1431 struct neon_typed_alias *typeinfo)
1432 {
1433 char *str = *ccp;
1434 struct reg_entry *reg = arm_reg_parse_multi (&str);
1435 struct neon_typed_alias atype;
1436 struct neon_type_el parsetype;
1437
1438 atype.defined = 0;
1439 atype.index = -1;
1440 atype.eltype.type = NT_invtype;
1441 atype.eltype.size = -1;
1442
1443 /* Try alternate syntax for some types of register. Note these are mutually
1444 exclusive with the Neon syntax extensions. */
1445 if (reg == NULL)
1446 {
1447 int altreg = arm_reg_alt_syntax (&str, *ccp, reg, type);
1448 if (altreg != FAIL)
1449 *ccp = str;
1450 if (typeinfo)
1451 *typeinfo = atype;
1452 return altreg;
1453 }
1454
1455 /* Undo polymorphism when a set of register types may be accepted. */
1456 if ((type == REG_TYPE_NDQ
1457 && (reg->type == REG_TYPE_NQ || reg->type == REG_TYPE_VFD))
1458 || (type == REG_TYPE_VFSD
1459 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD))
1460 || (type == REG_TYPE_NSDQ
1461 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD
1462 || reg->type == REG_TYPE_NQ))
1463 || (type == REG_TYPE_MMXWC
1464 && (reg->type == REG_TYPE_MMXWCG)))
1465 type = (enum arm_reg_type) reg->type;
1466
1467 if (type != reg->type)
1468 return FAIL;
1469
1470 if (reg->neon)
1471 atype = *reg->neon;
1472
1473 if (parse_neon_operand_type (&parsetype, &str) == SUCCESS)
1474 {
1475 if ((atype.defined & NTA_HASTYPE) != 0)
1476 {
1477 first_error (_("can't redefine type for operand"));
1478 return FAIL;
1479 }
1480 atype.defined |= NTA_HASTYPE;
1481 atype.eltype = parsetype;
1482 }
1483
1484 if (skip_past_char (&str, '[') == SUCCESS)
1485 {
1486 if (type != REG_TYPE_VFD)
1487 {
1488 first_error (_("only D registers may be indexed"));
1489 return FAIL;
1490 }
1491
1492 if ((atype.defined & NTA_HASINDEX) != 0)
1493 {
1494 first_error (_("can't change index for operand"));
1495 return FAIL;
1496 }
1497
1498 atype.defined |= NTA_HASINDEX;
1499
1500 if (skip_past_char (&str, ']') == SUCCESS)
1501 atype.index = NEON_ALL_LANES;
1502 else
1503 {
1504 expressionS exp;
1505
1506 my_get_expression (&exp, &str, GE_NO_PREFIX);
1507
1508 if (exp.X_op != O_constant)
1509 {
1510 first_error (_("constant expression required"));
1511 return FAIL;
1512 }
1513
1514 if (skip_past_char (&str, ']') == FAIL)
1515 return FAIL;
1516
1517 atype.index = exp.X_add_number;
1518 }
1519 }
1520
1521 if (typeinfo)
1522 *typeinfo = atype;
1523
1524 if (rtype)
1525 *rtype = type;
1526
1527 *ccp = str;
1528
1529 return reg->number;
1530 }
1531
1532 /* Like arm_reg_parse, but allow allow the following extra features:
1533 - If RTYPE is non-zero, return the (possibly restricted) type of the
1534 register (e.g. Neon double or quad reg when either has been requested).
1535 - If this is a Neon vector type with additional type information, fill
1536 in the struct pointed to by VECTYPE (if non-NULL).
1537 This function will fault on encountering a scalar. */
1538
1539 static int
1540 arm_typed_reg_parse (char **ccp, enum arm_reg_type type,
1541 enum arm_reg_type *rtype, struct neon_type_el *vectype)
1542 {
1543 struct neon_typed_alias atype;
1544 char *str = *ccp;
1545 int reg = parse_typed_reg_or_scalar (&str, type, rtype, &atype);
1546
1547 if (reg == FAIL)
1548 return FAIL;
1549
1550 /* Do not allow regname(... to parse as a register. */
1551 if (*str == '(')
1552 return FAIL;
1553
1554 /* Do not allow a scalar (reg+index) to parse as a register. */
1555 if ((atype.defined & NTA_HASINDEX) != 0)
1556 {
1557 first_error (_("register operand expected, but got scalar"));
1558 return FAIL;
1559 }
1560
1561 if (vectype)
1562 *vectype = atype.eltype;
1563
1564 *ccp = str;
1565
1566 return reg;
1567 }
1568
1569 #define NEON_SCALAR_REG(X) ((X) >> 4)
1570 #define NEON_SCALAR_INDEX(X) ((X) & 15)
1571
1572 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1573 have enough information to be able to do a good job bounds-checking. So, we
1574 just do easy checks here, and do further checks later. */
1575
1576 static int
1577 parse_scalar (char **ccp, int elsize, struct neon_type_el *type)
1578 {
1579 int reg;
1580 char *str = *ccp;
1581 struct neon_typed_alias atype;
1582
1583 reg = parse_typed_reg_or_scalar (&str, REG_TYPE_VFD, NULL, &atype);
1584
1585 if (reg == FAIL || (atype.defined & NTA_HASINDEX) == 0)
1586 return FAIL;
1587
1588 if (atype.index == NEON_ALL_LANES)
1589 {
1590 first_error (_("scalar must have an index"));
1591 return FAIL;
1592 }
1593 else if (atype.index >= 64 / elsize)
1594 {
1595 first_error (_("scalar index out of range"));
1596 return FAIL;
1597 }
1598
1599 if (type)
1600 *type = atype.eltype;
1601
1602 *ccp = str;
1603
1604 return reg * 16 + atype.index;
1605 }
1606
1607 /* Parse an ARM register list. Returns the bitmask, or FAIL. */
1608
1609 static long
1610 parse_reg_list (char ** strp)
1611 {
1612 char * str = * strp;
1613 long range = 0;
1614 int another_range;
1615
1616 /* We come back here if we get ranges concatenated by '+' or '|'. */
1617 do
1618 {
1619 skip_whitespace (str);
1620
1621 another_range = 0;
1622
1623 if (*str == '{')
1624 {
1625 int in_range = 0;
1626 int cur_reg = -1;
1627
1628 str++;
1629 do
1630 {
1631 int reg;
1632
1633 if ((reg = arm_reg_parse (&str, REG_TYPE_RN)) == FAIL)
1634 {
1635 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
1636 return FAIL;
1637 }
1638
1639 if (in_range)
1640 {
1641 int i;
1642
1643 if (reg <= cur_reg)
1644 {
1645 first_error (_("bad range in register list"));
1646 return FAIL;
1647 }
1648
1649 for (i = cur_reg + 1; i < reg; i++)
1650 {
1651 if (range & (1 << i))
1652 as_tsktsk
1653 (_("Warning: duplicated register (r%d) in register list"),
1654 i);
1655 else
1656 range |= 1 << i;
1657 }
1658 in_range = 0;
1659 }
1660
1661 if (range & (1 << reg))
1662 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1663 reg);
1664 else if (reg <= cur_reg)
1665 as_tsktsk (_("Warning: register range not in ascending order"));
1666
1667 range |= 1 << reg;
1668 cur_reg = reg;
1669 }
1670 while (skip_past_comma (&str) != FAIL
1671 || (in_range = 1, *str++ == '-'));
1672 str--;
1673
1674 if (skip_past_char (&str, '}') == FAIL)
1675 {
1676 first_error (_("missing `}'"));
1677 return FAIL;
1678 }
1679 }
1680 else
1681 {
1682 expressionS exp;
1683
1684 if (my_get_expression (&exp, &str, GE_NO_PREFIX))
1685 return FAIL;
1686
1687 if (exp.X_op == O_constant)
1688 {
1689 if (exp.X_add_number
1690 != (exp.X_add_number & 0x0000ffff))
1691 {
1692 inst.error = _("invalid register mask");
1693 return FAIL;
1694 }
1695
1696 if ((range & exp.X_add_number) != 0)
1697 {
1698 int regno = range & exp.X_add_number;
1699
1700 regno &= -regno;
1701 regno = (1 << regno) - 1;
1702 as_tsktsk
1703 (_("Warning: duplicated register (r%d) in register list"),
1704 regno);
1705 }
1706
1707 range |= exp.X_add_number;
1708 }
1709 else
1710 {
1711 if (inst.reloc.type != 0)
1712 {
1713 inst.error = _("expression too complex");
1714 return FAIL;
1715 }
1716
1717 memcpy (&inst.reloc.exp, &exp, sizeof (expressionS));
1718 inst.reloc.type = BFD_RELOC_ARM_MULTI;
1719 inst.reloc.pc_rel = 0;
1720 }
1721 }
1722
1723 if (*str == '|' || *str == '+')
1724 {
1725 str++;
1726 another_range = 1;
1727 }
1728 }
1729 while (another_range);
1730
1731 *strp = str;
1732 return range;
1733 }
1734
1735 /* Types of registers in a list. */
1736
1737 enum reg_list_els
1738 {
1739 REGLIST_VFP_S,
1740 REGLIST_VFP_D,
1741 REGLIST_NEON_D
1742 };
1743
1744 /* Parse a VFP register list. If the string is invalid return FAIL.
1745 Otherwise return the number of registers, and set PBASE to the first
1746 register. Parses registers of type ETYPE.
1747 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1748 - Q registers can be used to specify pairs of D registers
1749 - { } can be omitted from around a singleton register list
1750 FIXME: This is not implemented, as it would require backtracking in
1751 some cases, e.g.:
1752 vtbl.8 d3,d4,d5
1753 This could be done (the meaning isn't really ambiguous), but doesn't
1754 fit in well with the current parsing framework.
1755 - 32 D registers may be used (also true for VFPv3).
1756 FIXME: Types are ignored in these register lists, which is probably a
1757 bug. */
1758
1759 static int
1760 parse_vfp_reg_list (char **ccp, unsigned int *pbase, enum reg_list_els etype)
1761 {
1762 char *str = *ccp;
1763 int base_reg;
1764 int new_base;
1765 enum arm_reg_type regtype = (enum arm_reg_type) 0;
1766 int max_regs = 0;
1767 int count = 0;
1768 int warned = 0;
1769 unsigned long mask = 0;
1770 int i;
1771
1772 if (skip_past_char (&str, '{') == FAIL)
1773 {
1774 inst.error = _("expecting {");
1775 return FAIL;
1776 }
1777
1778 switch (etype)
1779 {
1780 case REGLIST_VFP_S:
1781 regtype = REG_TYPE_VFS;
1782 max_regs = 32;
1783 break;
1784
1785 case REGLIST_VFP_D:
1786 regtype = REG_TYPE_VFD;
1787 break;
1788
1789 case REGLIST_NEON_D:
1790 regtype = REG_TYPE_NDQ;
1791 break;
1792 }
1793
1794 if (etype != REGLIST_VFP_S)
1795 {
1796 /* VFPv3 allows 32 D registers, except for the VFPv3-D16 variant. */
1797 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
1798 {
1799 max_regs = 32;
1800 if (thumb_mode)
1801 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
1802 fpu_vfp_ext_d32);
1803 else
1804 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
1805 fpu_vfp_ext_d32);
1806 }
1807 else
1808 max_regs = 16;
1809 }
1810
1811 base_reg = max_regs;
1812
1813 do
1814 {
1815 int setmask = 1, addregs = 1;
1816
1817 new_base = arm_typed_reg_parse (&str, regtype, &regtype, NULL);
1818
1819 if (new_base == FAIL)
1820 {
1821 first_error (_(reg_expected_msgs[regtype]));
1822 return FAIL;
1823 }
1824
1825 if (new_base >= max_regs)
1826 {
1827 first_error (_("register out of range in list"));
1828 return FAIL;
1829 }
1830
1831 /* Note: a value of 2 * n is returned for the register Q<n>. */
1832 if (regtype == REG_TYPE_NQ)
1833 {
1834 setmask = 3;
1835 addregs = 2;
1836 }
1837
1838 if (new_base < base_reg)
1839 base_reg = new_base;
1840
1841 if (mask & (setmask << new_base))
1842 {
1843 first_error (_("invalid register list"));
1844 return FAIL;
1845 }
1846
1847 if ((mask >> new_base) != 0 && ! warned)
1848 {
1849 as_tsktsk (_("register list not in ascending order"));
1850 warned = 1;
1851 }
1852
1853 mask |= setmask << new_base;
1854 count += addregs;
1855
1856 if (*str == '-') /* We have the start of a range expression */
1857 {
1858 int high_range;
1859
1860 str++;
1861
1862 if ((high_range = arm_typed_reg_parse (&str, regtype, NULL, NULL))
1863 == FAIL)
1864 {
1865 inst.error = gettext (reg_expected_msgs[regtype]);
1866 return FAIL;
1867 }
1868
1869 if (high_range >= max_regs)
1870 {
1871 first_error (_("register out of range in list"));
1872 return FAIL;
1873 }
1874
1875 if (regtype == REG_TYPE_NQ)
1876 high_range = high_range + 1;
1877
1878 if (high_range <= new_base)
1879 {
1880 inst.error = _("register range not in ascending order");
1881 return FAIL;
1882 }
1883
1884 for (new_base += addregs; new_base <= high_range; new_base += addregs)
1885 {
1886 if (mask & (setmask << new_base))
1887 {
1888 inst.error = _("invalid register list");
1889 return FAIL;
1890 }
1891
1892 mask |= setmask << new_base;
1893 count += addregs;
1894 }
1895 }
1896 }
1897 while (skip_past_comma (&str) != FAIL);
1898
1899 str++;
1900
1901 /* Sanity check -- should have raised a parse error above. */
1902 if (count == 0 || count > max_regs)
1903 abort ();
1904
1905 *pbase = base_reg;
1906
1907 /* Final test -- the registers must be consecutive. */
1908 mask >>= base_reg;
1909 for (i = 0; i < count; i++)
1910 {
1911 if ((mask & (1u << i)) == 0)
1912 {
1913 inst.error = _("non-contiguous register range");
1914 return FAIL;
1915 }
1916 }
1917
1918 *ccp = str;
1919
1920 return count;
1921 }
1922
1923 /* True if two alias types are the same. */
1924
1925 static bfd_boolean
1926 neon_alias_types_same (struct neon_typed_alias *a, struct neon_typed_alias *b)
1927 {
1928 if (!a && !b)
1929 return TRUE;
1930
1931 if (!a || !b)
1932 return FALSE;
1933
1934 if (a->defined != b->defined)
1935 return FALSE;
1936
1937 if ((a->defined & NTA_HASTYPE) != 0
1938 && (a->eltype.type != b->eltype.type
1939 || a->eltype.size != b->eltype.size))
1940 return FALSE;
1941
1942 if ((a->defined & NTA_HASINDEX) != 0
1943 && (a->index != b->index))
1944 return FALSE;
1945
1946 return TRUE;
1947 }
1948
1949 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
1950 The base register is put in *PBASE.
1951 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
1952 the return value.
1953 The register stride (minus one) is put in bit 4 of the return value.
1954 Bits [6:5] encode the list length (minus one).
1955 The type of the list elements is put in *ELTYPE, if non-NULL. */
1956
1957 #define NEON_LANE(X) ((X) & 0xf)
1958 #define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1)
1959 #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
1960
1961 static int
1962 parse_neon_el_struct_list (char **str, unsigned *pbase,
1963 struct neon_type_el *eltype)
1964 {
1965 char *ptr = *str;
1966 int base_reg = -1;
1967 int reg_incr = -1;
1968 int count = 0;
1969 int lane = -1;
1970 int leading_brace = 0;
1971 enum arm_reg_type rtype = REG_TYPE_NDQ;
1972 const char *const incr_error = _("register stride must be 1 or 2");
1973 const char *const type_error = _("mismatched element/structure types in list");
1974 struct neon_typed_alias firsttype;
1975
1976 if (skip_past_char (&ptr, '{') == SUCCESS)
1977 leading_brace = 1;
1978
1979 do
1980 {
1981 struct neon_typed_alias atype;
1982 int getreg = parse_typed_reg_or_scalar (&ptr, rtype, &rtype, &atype);
1983
1984 if (getreg == FAIL)
1985 {
1986 first_error (_(reg_expected_msgs[rtype]));
1987 return FAIL;
1988 }
1989
1990 if (base_reg == -1)
1991 {
1992 base_reg = getreg;
1993 if (rtype == REG_TYPE_NQ)
1994 {
1995 reg_incr = 1;
1996 }
1997 firsttype = atype;
1998 }
1999 else if (reg_incr == -1)
2000 {
2001 reg_incr = getreg - base_reg;
2002 if (reg_incr < 1 || reg_incr > 2)
2003 {
2004 first_error (_(incr_error));
2005 return FAIL;
2006 }
2007 }
2008 else if (getreg != base_reg + reg_incr * count)
2009 {
2010 first_error (_(incr_error));
2011 return FAIL;
2012 }
2013
2014 if (! neon_alias_types_same (&atype, &firsttype))
2015 {
2016 first_error (_(type_error));
2017 return FAIL;
2018 }
2019
2020 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
2021 modes. */
2022 if (ptr[0] == '-')
2023 {
2024 struct neon_typed_alias htype;
2025 int hireg, dregs = (rtype == REG_TYPE_NQ) ? 2 : 1;
2026 if (lane == -1)
2027 lane = NEON_INTERLEAVE_LANES;
2028 else if (lane != NEON_INTERLEAVE_LANES)
2029 {
2030 first_error (_(type_error));
2031 return FAIL;
2032 }
2033 if (reg_incr == -1)
2034 reg_incr = 1;
2035 else if (reg_incr != 1)
2036 {
2037 first_error (_("don't use Rn-Rm syntax with non-unit stride"));
2038 return FAIL;
2039 }
2040 ptr++;
2041 hireg = parse_typed_reg_or_scalar (&ptr, rtype, NULL, &htype);
2042 if (hireg == FAIL)
2043 {
2044 first_error (_(reg_expected_msgs[rtype]));
2045 return FAIL;
2046 }
2047 if (! neon_alias_types_same (&htype, &firsttype))
2048 {
2049 first_error (_(type_error));
2050 return FAIL;
2051 }
2052 count += hireg + dregs - getreg;
2053 continue;
2054 }
2055
2056 /* If we're using Q registers, we can't use [] or [n] syntax. */
2057 if (rtype == REG_TYPE_NQ)
2058 {
2059 count += 2;
2060 continue;
2061 }
2062
2063 if ((atype.defined & NTA_HASINDEX) != 0)
2064 {
2065 if (lane == -1)
2066 lane = atype.index;
2067 else if (lane != atype.index)
2068 {
2069 first_error (_(type_error));
2070 return FAIL;
2071 }
2072 }
2073 else if (lane == -1)
2074 lane = NEON_INTERLEAVE_LANES;
2075 else if (lane != NEON_INTERLEAVE_LANES)
2076 {
2077 first_error (_(type_error));
2078 return FAIL;
2079 }
2080 count++;
2081 }
2082 while ((count != 1 || leading_brace) && skip_past_comma (&ptr) != FAIL);
2083
2084 /* No lane set by [x]. We must be interleaving structures. */
2085 if (lane == -1)
2086 lane = NEON_INTERLEAVE_LANES;
2087
2088 /* Sanity check. */
2089 if (lane == -1 || base_reg == -1 || count < 1 || count > 4
2090 || (count > 1 && reg_incr == -1))
2091 {
2092 first_error (_("error parsing element/structure list"));
2093 return FAIL;
2094 }
2095
2096 if ((count > 1 || leading_brace) && skip_past_char (&ptr, '}') == FAIL)
2097 {
2098 first_error (_("expected }"));
2099 return FAIL;
2100 }
2101
2102 if (reg_incr == -1)
2103 reg_incr = 1;
2104
2105 if (eltype)
2106 *eltype = firsttype.eltype;
2107
2108 *pbase = base_reg;
2109 *str = ptr;
2110
2111 return lane | ((reg_incr - 1) << 4) | ((count - 1) << 5);
2112 }
2113
2114 /* Parse an explicit relocation suffix on an expression. This is
2115 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
2116 arm_reloc_hsh contains no entries, so this function can only
2117 succeed if there is no () after the word. Returns -1 on error,
2118 BFD_RELOC_UNUSED if there wasn't any suffix. */
2119
2120 static int
2121 parse_reloc (char **str)
2122 {
2123 struct reloc_entry *r;
2124 char *p, *q;
2125
2126 if (**str != '(')
2127 return BFD_RELOC_UNUSED;
2128
2129 p = *str + 1;
2130 q = p;
2131
2132 while (*q && *q != ')' && *q != ',')
2133 q++;
2134 if (*q != ')')
2135 return -1;
2136
2137 if ((r = (struct reloc_entry *)
2138 hash_find_n (arm_reloc_hsh, p, q - p)) == NULL)
2139 return -1;
2140
2141 *str = q + 1;
2142 return r->reloc;
2143 }
2144
2145 /* Directives: register aliases. */
2146
2147 static struct reg_entry *
2148 insert_reg_alias (char *str, unsigned number, int type)
2149 {
2150 struct reg_entry *new_reg;
2151 const char *name;
2152
2153 if ((new_reg = (struct reg_entry *) hash_find (arm_reg_hsh, str)) != 0)
2154 {
2155 if (new_reg->builtin)
2156 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str);
2157
2158 /* Only warn about a redefinition if it's not defined as the
2159 same register. */
2160 else if (new_reg->number != number || new_reg->type != type)
2161 as_warn (_("ignoring redefinition of register alias '%s'"), str);
2162
2163 return NULL;
2164 }
2165
2166 name = xstrdup (str);
2167 new_reg = (struct reg_entry *) xmalloc (sizeof (struct reg_entry));
2168
2169 new_reg->name = name;
2170 new_reg->number = number;
2171 new_reg->type = type;
2172 new_reg->builtin = FALSE;
2173 new_reg->neon = NULL;
2174
2175 if (hash_insert (arm_reg_hsh, name, (void *) new_reg))
2176 abort ();
2177
2178 return new_reg;
2179 }
2180
2181 static void
2182 insert_neon_reg_alias (char *str, int number, int type,
2183 struct neon_typed_alias *atype)
2184 {
2185 struct reg_entry *reg = insert_reg_alias (str, number, type);
2186
2187 if (!reg)
2188 {
2189 first_error (_("attempt to redefine typed alias"));
2190 return;
2191 }
2192
2193 if (atype)
2194 {
2195 reg->neon = (struct neon_typed_alias *)
2196 xmalloc (sizeof (struct neon_typed_alias));
2197 *reg->neon = *atype;
2198 }
2199 }
2200
2201 /* Look for the .req directive. This is of the form:
2202
2203 new_register_name .req existing_register_name
2204
2205 If we find one, or if it looks sufficiently like one that we want to
2206 handle any error here, return TRUE. Otherwise return FALSE. */
2207
2208 static bfd_boolean
2209 create_register_alias (char * newname, char *p)
2210 {
2211 struct reg_entry *old;
2212 char *oldname, *nbuf;
2213 size_t nlen;
2214
2215 /* The input scrubber ensures that whitespace after the mnemonic is
2216 collapsed to single spaces. */
2217 oldname = p;
2218 if (strncmp (oldname, " .req ", 6) != 0)
2219 return FALSE;
2220
2221 oldname += 6;
2222 if (*oldname == '\0')
2223 return FALSE;
2224
2225 old = (struct reg_entry *) hash_find (arm_reg_hsh, oldname);
2226 if (!old)
2227 {
2228 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
2229 return TRUE;
2230 }
2231
2232 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2233 the desired alias name, and p points to its end. If not, then
2234 the desired alias name is in the global original_case_string. */
2235 #ifdef TC_CASE_SENSITIVE
2236 nlen = p - newname;
2237 #else
2238 newname = original_case_string;
2239 nlen = strlen (newname);
2240 #endif
2241
2242 nbuf = (char *) alloca (nlen + 1);
2243 memcpy (nbuf, newname, nlen);
2244 nbuf[nlen] = '\0';
2245
2246 /* Create aliases under the new name as stated; an all-lowercase
2247 version of the new name; and an all-uppercase version of the new
2248 name. */
2249 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
2250 {
2251 for (p = nbuf; *p; p++)
2252 *p = TOUPPER (*p);
2253
2254 if (strncmp (nbuf, newname, nlen))
2255 {
2256 /* If this attempt to create an additional alias fails, do not bother
2257 trying to create the all-lower case alias. We will fail and issue
2258 a second, duplicate error message. This situation arises when the
2259 programmer does something like:
2260 foo .req r0
2261 Foo .req r1
2262 The second .req creates the "Foo" alias but then fails to create
2263 the artificial FOO alias because it has already been created by the
2264 first .req. */
2265 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
2266 return TRUE;
2267 }
2268
2269 for (p = nbuf; *p; p++)
2270 *p = TOLOWER (*p);
2271
2272 if (strncmp (nbuf, newname, nlen))
2273 insert_reg_alias (nbuf, old->number, old->type);
2274 }
2275
2276 return TRUE;
2277 }
2278
2279 /* Create a Neon typed/indexed register alias using directives, e.g.:
2280 X .dn d5.s32[1]
2281 Y .qn 6.s16
2282 Z .dn d7
2283 T .dn Z[0]
2284 These typed registers can be used instead of the types specified after the
2285 Neon mnemonic, so long as all operands given have types. Types can also be
2286 specified directly, e.g.:
2287 vadd d0.s32, d1.s32, d2.s32 */
2288
2289 static bfd_boolean
2290 create_neon_reg_alias (char *newname, char *p)
2291 {
2292 enum arm_reg_type basetype;
2293 struct reg_entry *basereg;
2294 struct reg_entry mybasereg;
2295 struct neon_type ntype;
2296 struct neon_typed_alias typeinfo;
2297 char *namebuf, *nameend ATTRIBUTE_UNUSED;
2298 int namelen;
2299
2300 typeinfo.defined = 0;
2301 typeinfo.eltype.type = NT_invtype;
2302 typeinfo.eltype.size = -1;
2303 typeinfo.index = -1;
2304
2305 nameend = p;
2306
2307 if (strncmp (p, " .dn ", 5) == 0)
2308 basetype = REG_TYPE_VFD;
2309 else if (strncmp (p, " .qn ", 5) == 0)
2310 basetype = REG_TYPE_NQ;
2311 else
2312 return FALSE;
2313
2314 p += 5;
2315
2316 if (*p == '\0')
2317 return FALSE;
2318
2319 basereg = arm_reg_parse_multi (&p);
2320
2321 if (basereg && basereg->type != basetype)
2322 {
2323 as_bad (_("bad type for register"));
2324 return FALSE;
2325 }
2326
2327 if (basereg == NULL)
2328 {
2329 expressionS exp;
2330 /* Try parsing as an integer. */
2331 my_get_expression (&exp, &p, GE_NO_PREFIX);
2332 if (exp.X_op != O_constant)
2333 {
2334 as_bad (_("expression must be constant"));
2335 return FALSE;
2336 }
2337 basereg = &mybasereg;
2338 basereg->number = (basetype == REG_TYPE_NQ) ? exp.X_add_number * 2
2339 : exp.X_add_number;
2340 basereg->neon = 0;
2341 }
2342
2343 if (basereg->neon)
2344 typeinfo = *basereg->neon;
2345
2346 if (parse_neon_type (&ntype, &p) == SUCCESS)
2347 {
2348 /* We got a type. */
2349 if (typeinfo.defined & NTA_HASTYPE)
2350 {
2351 as_bad (_("can't redefine the type of a register alias"));
2352 return FALSE;
2353 }
2354
2355 typeinfo.defined |= NTA_HASTYPE;
2356 if (ntype.elems != 1)
2357 {
2358 as_bad (_("you must specify a single type only"));
2359 return FALSE;
2360 }
2361 typeinfo.eltype = ntype.el[0];
2362 }
2363
2364 if (skip_past_char (&p, '[') == SUCCESS)
2365 {
2366 expressionS exp;
2367 /* We got a scalar index. */
2368
2369 if (typeinfo.defined & NTA_HASINDEX)
2370 {
2371 as_bad (_("can't redefine the index of a scalar alias"));
2372 return FALSE;
2373 }
2374
2375 my_get_expression (&exp, &p, GE_NO_PREFIX);
2376
2377 if (exp.X_op != O_constant)
2378 {
2379 as_bad (_("scalar index must be constant"));
2380 return FALSE;
2381 }
2382
2383 typeinfo.defined |= NTA_HASINDEX;
2384 typeinfo.index = exp.X_add_number;
2385
2386 if (skip_past_char (&p, ']') == FAIL)
2387 {
2388 as_bad (_("expecting ]"));
2389 return FALSE;
2390 }
2391 }
2392
2393 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2394 the desired alias name, and p points to its end. If not, then
2395 the desired alias name is in the global original_case_string. */
2396 #ifdef TC_CASE_SENSITIVE
2397 namelen = nameend - newname;
2398 #else
2399 newname = original_case_string;
2400 namelen = strlen (newname);
2401 #endif
2402
2403 namebuf = (char *) alloca (namelen + 1);
2404 strncpy (namebuf, newname, namelen);
2405 namebuf[namelen] = '\0';
2406
2407 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2408 typeinfo.defined != 0 ? &typeinfo : NULL);
2409
2410 /* Insert name in all uppercase. */
2411 for (p = namebuf; *p; p++)
2412 *p = TOUPPER (*p);
2413
2414 if (strncmp (namebuf, newname, namelen))
2415 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2416 typeinfo.defined != 0 ? &typeinfo : NULL);
2417
2418 /* Insert name in all lowercase. */
2419 for (p = namebuf; *p; p++)
2420 *p = TOLOWER (*p);
2421
2422 if (strncmp (namebuf, newname, namelen))
2423 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2424 typeinfo.defined != 0 ? &typeinfo : NULL);
2425
2426 return TRUE;
2427 }
2428
2429 /* Should never be called, as .req goes between the alias and the
2430 register name, not at the beginning of the line. */
2431
2432 static void
2433 s_req (int a ATTRIBUTE_UNUSED)
2434 {
2435 as_bad (_("invalid syntax for .req directive"));
2436 }
2437
2438 static void
2439 s_dn (int a ATTRIBUTE_UNUSED)
2440 {
2441 as_bad (_("invalid syntax for .dn directive"));
2442 }
2443
2444 static void
2445 s_qn (int a ATTRIBUTE_UNUSED)
2446 {
2447 as_bad (_("invalid syntax for .qn directive"));
2448 }
2449
2450 /* The .unreq directive deletes an alias which was previously defined
2451 by .req. For example:
2452
2453 my_alias .req r11
2454 .unreq my_alias */
2455
2456 static void
2457 s_unreq (int a ATTRIBUTE_UNUSED)
2458 {
2459 char * name;
2460 char saved_char;
2461
2462 name = input_line_pointer;
2463
2464 while (*input_line_pointer != 0
2465 && *input_line_pointer != ' '
2466 && *input_line_pointer != '\n')
2467 ++input_line_pointer;
2468
2469 saved_char = *input_line_pointer;
2470 *input_line_pointer = 0;
2471
2472 if (!*name)
2473 as_bad (_("invalid syntax for .unreq directive"));
2474 else
2475 {
2476 struct reg_entry *reg = (struct reg_entry *) hash_find (arm_reg_hsh,
2477 name);
2478
2479 if (!reg)
2480 as_bad (_("unknown register alias '%s'"), name);
2481 else if (reg->builtin)
2482 as_warn (_("ignoring attempt to use .unreq on fixed register name: '%s'"),
2483 name);
2484 else
2485 {
2486 char * p;
2487 char * nbuf;
2488
2489 hash_delete (arm_reg_hsh, name, FALSE);
2490 free ((char *) reg->name);
2491 if (reg->neon)
2492 free (reg->neon);
2493 free (reg);
2494
2495 /* Also locate the all upper case and all lower case versions.
2496 Do not complain if we cannot find one or the other as it
2497 was probably deleted above. */
2498
2499 nbuf = strdup (name);
2500 for (p = nbuf; *p; p++)
2501 *p = TOUPPER (*p);
2502 reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
2503 if (reg)
2504 {
2505 hash_delete (arm_reg_hsh, nbuf, FALSE);
2506 free ((char *) reg->name);
2507 if (reg->neon)
2508 free (reg->neon);
2509 free (reg);
2510 }
2511
2512 for (p = nbuf; *p; p++)
2513 *p = TOLOWER (*p);
2514 reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
2515 if (reg)
2516 {
2517 hash_delete (arm_reg_hsh, nbuf, FALSE);
2518 free ((char *) reg->name);
2519 if (reg->neon)
2520 free (reg->neon);
2521 free (reg);
2522 }
2523
2524 free (nbuf);
2525 }
2526 }
2527
2528 *input_line_pointer = saved_char;
2529 demand_empty_rest_of_line ();
2530 }
2531
2532 /* Directives: Instruction set selection. */
2533
2534 #ifdef OBJ_ELF
2535 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
2536 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2537 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2538 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
2539
2540 /* Create a new mapping symbol for the transition to STATE. */
2541
2542 static void
2543 make_mapping_symbol (enum mstate state, valueT value, fragS *frag)
2544 {
2545 symbolS * symbolP;
2546 const char * symname;
2547 int type;
2548
2549 switch (state)
2550 {
2551 case MAP_DATA:
2552 symname = "$d";
2553 type = BSF_NO_FLAGS;
2554 break;
2555 case MAP_ARM:
2556 symname = "$a";
2557 type = BSF_NO_FLAGS;
2558 break;
2559 case MAP_THUMB:
2560 symname = "$t";
2561 type = BSF_NO_FLAGS;
2562 break;
2563 default:
2564 abort ();
2565 }
2566
2567 symbolP = symbol_new (symname, now_seg, value, frag);
2568 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
2569
2570 switch (state)
2571 {
2572 case MAP_ARM:
2573 THUMB_SET_FUNC (symbolP, 0);
2574 ARM_SET_THUMB (symbolP, 0);
2575 ARM_SET_INTERWORK (symbolP, support_interwork);
2576 break;
2577
2578 case MAP_THUMB:
2579 THUMB_SET_FUNC (symbolP, 1);
2580 ARM_SET_THUMB (symbolP, 1);
2581 ARM_SET_INTERWORK (symbolP, support_interwork);
2582 break;
2583
2584 case MAP_DATA:
2585 default:
2586 break;
2587 }
2588
2589 /* Save the mapping symbols for future reference. Also check that
2590 we do not place two mapping symbols at the same offset within a
2591 frag. We'll handle overlap between frags in
2592 check_mapping_symbols.
2593
2594 If .fill or other data filling directive generates zero sized data,
2595 the mapping symbol for the following code will have the same value
2596 as the one generated for the data filling directive. In this case,
2597 we replace the old symbol with the new one at the same address. */
2598 if (value == 0)
2599 {
2600 if (frag->tc_frag_data.first_map != NULL)
2601 {
2602 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
2603 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP, &symbol_lastP);
2604 }
2605 frag->tc_frag_data.first_map = symbolP;
2606 }
2607 if (frag->tc_frag_data.last_map != NULL)
2608 {
2609 know (S_GET_VALUE (frag->tc_frag_data.last_map) <= S_GET_VALUE (symbolP));
2610 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
2611 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP, &symbol_lastP);
2612 }
2613 frag->tc_frag_data.last_map = symbolP;
2614 }
2615
2616 /* We must sometimes convert a region marked as code to data during
2617 code alignment, if an odd number of bytes have to be padded. The
2618 code mapping symbol is pushed to an aligned address. */
2619
2620 static void
2621 insert_data_mapping_symbol (enum mstate state,
2622 valueT value, fragS *frag, offsetT bytes)
2623 {
2624 /* If there was already a mapping symbol, remove it. */
2625 if (frag->tc_frag_data.last_map != NULL
2626 && S_GET_VALUE (frag->tc_frag_data.last_map) == frag->fr_address + value)
2627 {
2628 symbolS *symp = frag->tc_frag_data.last_map;
2629
2630 if (value == 0)
2631 {
2632 know (frag->tc_frag_data.first_map == symp);
2633 frag->tc_frag_data.first_map = NULL;
2634 }
2635 frag->tc_frag_data.last_map = NULL;
2636 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
2637 }
2638
2639 make_mapping_symbol (MAP_DATA, value, frag);
2640 make_mapping_symbol (state, value + bytes, frag);
2641 }
2642
2643 static void mapping_state_2 (enum mstate state, int max_chars);
2644
2645 /* Set the mapping state to STATE. Only call this when about to
2646 emit some STATE bytes to the file. */
2647
2648 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
2649 void
2650 mapping_state (enum mstate state)
2651 {
2652 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2653
2654 if (mapstate == state)
2655 /* The mapping symbol has already been emitted.
2656 There is nothing else to do. */
2657 return;
2658
2659 if (state == MAP_ARM || state == MAP_THUMB)
2660 /* PR gas/12931
2661 All ARM instructions require 4-byte alignment.
2662 (Almost) all Thumb instructions require 2-byte alignment.
2663
2664 When emitting instructions into any section, mark the section
2665 appropriately.
2666
2667 Some Thumb instructions are alignment-sensitive modulo 4 bytes,
2668 but themselves require 2-byte alignment; this applies to some
2669 PC- relative forms. However, these cases will invovle implicit
2670 literal pool generation or an explicit .align >=2, both of
2671 which will cause the section to me marked with sufficient
2672 alignment. Thus, we don't handle those cases here. */
2673 record_alignment (now_seg, state == MAP_ARM ? 2 : 1);
2674
2675 if (TRANSITION (MAP_UNDEFINED, MAP_DATA))
2676 /* This case will be evaluated later. */
2677 return;
2678
2679 mapping_state_2 (state, 0);
2680 }
2681
2682 /* Same as mapping_state, but MAX_CHARS bytes have already been
2683 allocated. Put the mapping symbol that far back. */
2684
2685 static void
2686 mapping_state_2 (enum mstate state, int max_chars)
2687 {
2688 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2689
2690 if (!SEG_NORMAL (now_seg))
2691 return;
2692
2693 if (mapstate == state)
2694 /* The mapping symbol has already been emitted.
2695 There is nothing else to do. */
2696 return;
2697
2698 if (TRANSITION (MAP_UNDEFINED, MAP_ARM)
2699 || TRANSITION (MAP_UNDEFINED, MAP_THUMB))
2700 {
2701 struct frag * const frag_first = seg_info (now_seg)->frchainP->frch_root;
2702 const int add_symbol = (frag_now != frag_first) || (frag_now_fix () > 0);
2703
2704 if (add_symbol)
2705 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
2706 }
2707
2708 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
2709 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
2710 }
2711 #undef TRANSITION
2712 #else
2713 #define mapping_state(x) ((void)0)
2714 #define mapping_state_2(x, y) ((void)0)
2715 #endif
2716
2717 /* Find the real, Thumb encoded start of a Thumb function. */
2718
2719 #ifdef OBJ_COFF
2720 static symbolS *
2721 find_real_start (symbolS * symbolP)
2722 {
2723 char * real_start;
2724 const char * name = S_GET_NAME (symbolP);
2725 symbolS * new_target;
2726
2727 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
2728 #define STUB_NAME ".real_start_of"
2729
2730 if (name == NULL)
2731 abort ();
2732
2733 /* The compiler may generate BL instructions to local labels because
2734 it needs to perform a branch to a far away location. These labels
2735 do not have a corresponding ".real_start_of" label. We check
2736 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2737 the ".real_start_of" convention for nonlocal branches. */
2738 if (S_IS_LOCAL (symbolP) || name[0] == '.')
2739 return symbolP;
2740
2741 real_start = ACONCAT ((STUB_NAME, name, NULL));
2742 new_target = symbol_find (real_start);
2743
2744 if (new_target == NULL)
2745 {
2746 as_warn (_("Failed to find real start of function: %s\n"), name);
2747 new_target = symbolP;
2748 }
2749
2750 return new_target;
2751 }
2752 #endif
2753
2754 static void
2755 opcode_select (int width)
2756 {
2757 switch (width)
2758 {
2759 case 16:
2760 if (! thumb_mode)
2761 {
2762 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
2763 as_bad (_("selected processor does not support THUMB opcodes"));
2764
2765 thumb_mode = 1;
2766 /* No need to force the alignment, since we will have been
2767 coming from ARM mode, which is word-aligned. */
2768 record_alignment (now_seg, 1);
2769 }
2770 break;
2771
2772 case 32:
2773 if (thumb_mode)
2774 {
2775 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
2776 as_bad (_("selected processor does not support ARM opcodes"));
2777
2778 thumb_mode = 0;
2779
2780 if (!need_pass_2)
2781 frag_align (2, 0, 0);
2782
2783 record_alignment (now_seg, 1);
2784 }
2785 break;
2786
2787 default:
2788 as_bad (_("invalid instruction size selected (%d)"), width);
2789 }
2790 }
2791
2792 static void
2793 s_arm (int ignore ATTRIBUTE_UNUSED)
2794 {
2795 opcode_select (32);
2796 demand_empty_rest_of_line ();
2797 }
2798
2799 static void
2800 s_thumb (int ignore ATTRIBUTE_UNUSED)
2801 {
2802 opcode_select (16);
2803 demand_empty_rest_of_line ();
2804 }
2805
2806 static void
2807 s_code (int unused ATTRIBUTE_UNUSED)
2808 {
2809 int temp;
2810
2811 temp = get_absolute_expression ();
2812 switch (temp)
2813 {
2814 case 16:
2815 case 32:
2816 opcode_select (temp);
2817 break;
2818
2819 default:
2820 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp);
2821 }
2822 }
2823
2824 static void
2825 s_force_thumb (int ignore ATTRIBUTE_UNUSED)
2826 {
2827 /* If we are not already in thumb mode go into it, EVEN if
2828 the target processor does not support thumb instructions.
2829 This is used by gcc/config/arm/lib1funcs.asm for example
2830 to compile interworking support functions even if the
2831 target processor should not support interworking. */
2832 if (! thumb_mode)
2833 {
2834 thumb_mode = 2;
2835 record_alignment (now_seg, 1);
2836 }
2837
2838 demand_empty_rest_of_line ();
2839 }
2840
2841 static void
2842 s_thumb_func (int ignore ATTRIBUTE_UNUSED)
2843 {
2844 s_thumb (0);
2845
2846 /* The following label is the name/address of the start of a Thumb function.
2847 We need to know this for the interworking support. */
2848 label_is_thumb_function_name = TRUE;
2849 }
2850
2851 /* Perform a .set directive, but also mark the alias as
2852 being a thumb function. */
2853
2854 static void
2855 s_thumb_set (int equiv)
2856 {
2857 /* XXX the following is a duplicate of the code for s_set() in read.c
2858 We cannot just call that code as we need to get at the symbol that
2859 is created. */
2860 char * name;
2861 char delim;
2862 char * end_name;
2863 symbolS * symbolP;
2864
2865 /* Especial apologies for the random logic:
2866 This just grew, and could be parsed much more simply!
2867 Dean - in haste. */
2868 name = input_line_pointer;
2869 delim = get_symbol_end ();
2870 end_name = input_line_pointer;
2871 *end_name = delim;
2872
2873 if (*input_line_pointer != ',')
2874 {
2875 *end_name = 0;
2876 as_bad (_("expected comma after name \"%s\""), name);
2877 *end_name = delim;
2878 ignore_rest_of_line ();
2879 return;
2880 }
2881
2882 input_line_pointer++;
2883 *end_name = 0;
2884
2885 if (name[0] == '.' && name[1] == '\0')
2886 {
2887 /* XXX - this should not happen to .thumb_set. */
2888 abort ();
2889 }
2890
2891 if ((symbolP = symbol_find (name)) == NULL
2892 && (symbolP = md_undefined_symbol (name)) == NULL)
2893 {
2894 #ifndef NO_LISTING
2895 /* When doing symbol listings, play games with dummy fragments living
2896 outside the normal fragment chain to record the file and line info
2897 for this symbol. */
2898 if (listing & LISTING_SYMBOLS)
2899 {
2900 extern struct list_info_struct * listing_tail;
2901 fragS * dummy_frag = (fragS * ) xmalloc (sizeof (fragS));
2902
2903 memset (dummy_frag, 0, sizeof (fragS));
2904 dummy_frag->fr_type = rs_fill;
2905 dummy_frag->line = listing_tail;
2906 symbolP = symbol_new (name, undefined_section, 0, dummy_frag);
2907 dummy_frag->fr_symbol = symbolP;
2908 }
2909 else
2910 #endif
2911 symbolP = symbol_new (name, undefined_section, 0, &zero_address_frag);
2912
2913 #ifdef OBJ_COFF
2914 /* "set" symbols are local unless otherwise specified. */
2915 SF_SET_LOCAL (symbolP);
2916 #endif /* OBJ_COFF */
2917 } /* Make a new symbol. */
2918
2919 symbol_table_insert (symbolP);
2920
2921 * end_name = delim;
2922
2923 if (equiv
2924 && S_IS_DEFINED (symbolP)
2925 && S_GET_SEGMENT (symbolP) != reg_section)
2926 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP));
2927
2928 pseudo_set (symbolP);
2929
2930 demand_empty_rest_of_line ();
2931
2932 /* XXX Now we come to the Thumb specific bit of code. */
2933
2934 THUMB_SET_FUNC (symbolP, 1);
2935 ARM_SET_THUMB (symbolP, 1);
2936 #if defined OBJ_ELF || defined OBJ_COFF
2937 ARM_SET_INTERWORK (symbolP, support_interwork);
2938 #endif
2939 }
2940
2941 /* Directives: Mode selection. */
2942
2943 /* .syntax [unified|divided] - choose the new unified syntax
2944 (same for Arm and Thumb encoding, modulo slight differences in what
2945 can be represented) or the old divergent syntax for each mode. */
2946 static void
2947 s_syntax (int unused ATTRIBUTE_UNUSED)
2948 {
2949 char *name, delim;
2950
2951 name = input_line_pointer;
2952 delim = get_symbol_end ();
2953
2954 if (!strcasecmp (name, "unified"))
2955 unified_syntax = TRUE;
2956 else if (!strcasecmp (name, "divided"))
2957 unified_syntax = FALSE;
2958 else
2959 {
2960 as_bad (_("unrecognized syntax mode \"%s\""), name);
2961 return;
2962 }
2963 *input_line_pointer = delim;
2964 demand_empty_rest_of_line ();
2965 }
2966
2967 /* Directives: sectioning and alignment. */
2968
2969 /* Same as s_align_ptwo but align 0 => align 2. */
2970
2971 static void
2972 s_align (int unused ATTRIBUTE_UNUSED)
2973 {
2974 int temp;
2975 bfd_boolean fill_p;
2976 long temp_fill;
2977 long max_alignment = 15;
2978
2979 temp = get_absolute_expression ();
2980 if (temp > max_alignment)
2981 as_bad (_("alignment too large: %d assumed"), temp = max_alignment);
2982 else if (temp < 0)
2983 {
2984 as_bad (_("alignment negative. 0 assumed."));
2985 temp = 0;
2986 }
2987
2988 if (*input_line_pointer == ',')
2989 {
2990 input_line_pointer++;
2991 temp_fill = get_absolute_expression ();
2992 fill_p = TRUE;
2993 }
2994 else
2995 {
2996 fill_p = FALSE;
2997 temp_fill = 0;
2998 }
2999
3000 if (!temp)
3001 temp = 2;
3002
3003 /* Only make a frag if we HAVE to. */
3004 if (temp && !need_pass_2)
3005 {
3006 if (!fill_p && subseg_text_p (now_seg))
3007 frag_align_code (temp, 0);
3008 else
3009 frag_align (temp, (int) temp_fill, 0);
3010 }
3011 demand_empty_rest_of_line ();
3012
3013 record_alignment (now_seg, temp);
3014 }
3015
3016 static void
3017 s_bss (int ignore ATTRIBUTE_UNUSED)
3018 {
3019 /* We don't support putting frags in the BSS segment, we fake it by
3020 marking in_bss, then looking at s_skip for clues. */
3021 subseg_set (bss_section, 0);
3022 demand_empty_rest_of_line ();
3023
3024 #ifdef md_elf_section_change_hook
3025 md_elf_section_change_hook ();
3026 #endif
3027 }
3028
3029 static void
3030 s_even (int ignore ATTRIBUTE_UNUSED)
3031 {
3032 /* Never make frag if expect extra pass. */
3033 if (!need_pass_2)
3034 frag_align (1, 0, 0);
3035
3036 record_alignment (now_seg, 1);
3037
3038 demand_empty_rest_of_line ();
3039 }
3040
3041 /* Directives: CodeComposer Studio. */
3042
3043 /* .ref (for CodeComposer Studio syntax only). */
3044 static void
3045 s_ccs_ref (int unused ATTRIBUTE_UNUSED)
3046 {
3047 if (codecomposer_syntax)
3048 ignore_rest_of_line ();
3049 else
3050 as_bad (_(".ref pseudo-op only available with -mccs flag."));
3051 }
3052
3053 /* If name is not NULL, then it is used for marking the beginning of a
3054 function, wherease if it is NULL then it means the function end. */
3055 static void
3056 asmfunc_debug (const char * name)
3057 {
3058 static const char * last_name = NULL;
3059
3060 if (name != NULL)
3061 {
3062 gas_assert (last_name == NULL);
3063 last_name = name;
3064
3065 if (debug_type == DEBUG_STABS)
3066 stabs_generate_asm_func (name, name);
3067 }
3068 else
3069 {
3070 gas_assert (last_name != NULL);
3071
3072 if (debug_type == DEBUG_STABS)
3073 stabs_generate_asm_endfunc (last_name, last_name);
3074
3075 last_name = NULL;
3076 }
3077 }
3078
3079 static void
3080 s_ccs_asmfunc (int unused ATTRIBUTE_UNUSED)
3081 {
3082 if (codecomposer_syntax)
3083 {
3084 switch (asmfunc_state)
3085 {
3086 case OUTSIDE_ASMFUNC:
3087 asmfunc_state = WAITING_ASMFUNC_NAME;
3088 break;
3089
3090 case WAITING_ASMFUNC_NAME:
3091 as_bad (_(".asmfunc repeated."));
3092 break;
3093
3094 case WAITING_ENDASMFUNC:
3095 as_bad (_(".asmfunc without function."));
3096 break;
3097 }
3098 demand_empty_rest_of_line ();
3099 }
3100 else
3101 as_bad (_(".asmfunc pseudo-op only available with -mccs flag."));
3102 }
3103
3104 static void
3105 s_ccs_endasmfunc (int unused ATTRIBUTE_UNUSED)
3106 {
3107 if (codecomposer_syntax)
3108 {
3109 switch (asmfunc_state)
3110 {
3111 case OUTSIDE_ASMFUNC:
3112 as_bad (_(".endasmfunc without a .asmfunc."));
3113 break;
3114
3115 case WAITING_ASMFUNC_NAME:
3116 as_bad (_(".endasmfunc without function."));
3117 break;
3118
3119 case WAITING_ENDASMFUNC:
3120 asmfunc_state = OUTSIDE_ASMFUNC;
3121 asmfunc_debug (NULL);
3122 break;
3123 }
3124 demand_empty_rest_of_line ();
3125 }
3126 else
3127 as_bad (_(".endasmfunc pseudo-op only available with -mccs flag."));
3128 }
3129
3130 static void
3131 s_ccs_def (int name)
3132 {
3133 if (codecomposer_syntax)
3134 s_globl (name);
3135 else
3136 as_bad (_(".def pseudo-op only available with -mccs flag."));
3137 }
3138
3139 /* Directives: Literal pools. */
3140
3141 static literal_pool *
3142 find_literal_pool (void)
3143 {
3144 literal_pool * pool;
3145
3146 for (pool = list_of_pools; pool != NULL; pool = pool->next)
3147 {
3148 if (pool->section == now_seg
3149 && pool->sub_section == now_subseg)
3150 break;
3151 }
3152
3153 return pool;
3154 }
3155
3156 static literal_pool *
3157 find_or_make_literal_pool (void)
3158 {
3159 /* Next literal pool ID number. */
3160 static unsigned int latest_pool_num = 1;
3161 literal_pool * pool;
3162
3163 pool = find_literal_pool ();
3164
3165 if (pool == NULL)
3166 {
3167 /* Create a new pool. */
3168 pool = (literal_pool *) xmalloc (sizeof (* pool));
3169 if (! pool)
3170 return NULL;
3171
3172 pool->next_free_entry = 0;
3173 pool->section = now_seg;
3174 pool->sub_section = now_subseg;
3175 pool->next = list_of_pools;
3176 pool->symbol = NULL;
3177 pool->alignment = 2;
3178
3179 /* Add it to the list. */
3180 list_of_pools = pool;
3181 }
3182
3183 /* New pools, and emptied pools, will have a NULL symbol. */
3184 if (pool->symbol == NULL)
3185 {
3186 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
3187 (valueT) 0, &zero_address_frag);
3188 pool->id = latest_pool_num ++;
3189 }
3190
3191 /* Done. */
3192 return pool;
3193 }
3194
3195 /* Add the literal in the global 'inst'
3196 structure to the relevant literal pool. */
3197
3198 static int
3199 add_to_lit_pool (unsigned int nbytes)
3200 {
3201 #define PADDING_SLOT 0x1
3202 #define LIT_ENTRY_SIZE_MASK 0xFF
3203 literal_pool * pool;
3204 unsigned int entry, pool_size = 0;
3205 bfd_boolean padding_slot_p = FALSE;
3206 unsigned imm1 = 0;
3207 unsigned imm2 = 0;
3208
3209 if (nbytes == 8)
3210 {
3211 imm1 = inst.operands[1].imm;
3212 imm2 = (inst.operands[1].regisimm ? inst.operands[1].reg
3213 : inst.reloc.exp.X_unsigned ? 0
3214 : ((bfd_int64_t) inst.operands[1].imm) >> 32);
3215 if (target_big_endian)
3216 {
3217 imm1 = imm2;
3218 imm2 = inst.operands[1].imm;
3219 }
3220 }
3221
3222 pool = find_or_make_literal_pool ();
3223
3224 /* Check if this literal value is already in the pool. */
3225 for (entry = 0; entry < pool->next_free_entry; entry ++)
3226 {
3227 if (nbytes == 4)
3228 {
3229 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
3230 && (inst.reloc.exp.X_op == O_constant)
3231 && (pool->literals[entry].X_add_number
3232 == inst.reloc.exp.X_add_number)
3233 && (pool->literals[entry].X_md == nbytes)
3234 && (pool->literals[entry].X_unsigned
3235 == inst.reloc.exp.X_unsigned))
3236 break;
3237
3238 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
3239 && (inst.reloc.exp.X_op == O_symbol)
3240 && (pool->literals[entry].X_add_number
3241 == inst.reloc.exp.X_add_number)
3242 && (pool->literals[entry].X_add_symbol
3243 == inst.reloc.exp.X_add_symbol)
3244 && (pool->literals[entry].X_op_symbol
3245 == inst.reloc.exp.X_op_symbol)
3246 && (pool->literals[entry].X_md == nbytes))
3247 break;
3248 }
3249 else if ((nbytes == 8)
3250 && !(pool_size & 0x7)
3251 && ((entry + 1) != pool->next_free_entry)
3252 && (pool->literals[entry].X_op == O_constant)
3253 && (pool->literals[entry].X_add_number == (offsetT) imm1)
3254 && (pool->literals[entry].X_unsigned
3255 == inst.reloc.exp.X_unsigned)
3256 && (pool->literals[entry + 1].X_op == O_constant)
3257 && (pool->literals[entry + 1].X_add_number == (offsetT) imm2)
3258 && (pool->literals[entry + 1].X_unsigned
3259 == inst.reloc.exp.X_unsigned))
3260 break;
3261
3262 padding_slot_p = ((pool->literals[entry].X_md >> 8) == PADDING_SLOT);
3263 if (padding_slot_p && (nbytes == 4))
3264 break;
3265
3266 pool_size += 4;
3267 }
3268
3269 /* Do we need to create a new entry? */
3270 if (entry == pool->next_free_entry)
3271 {
3272 if (entry >= MAX_LITERAL_POOL_SIZE)
3273 {
3274 inst.error = _("literal pool overflow");
3275 return FAIL;
3276 }
3277
3278 if (nbytes == 8)
3279 {
3280 /* For 8-byte entries, we align to an 8-byte boundary,
3281 and split it into two 4-byte entries, because on 32-bit
3282 host, 8-byte constants are treated as big num, thus
3283 saved in "generic_bignum" which will be overwritten
3284 by later assignments.
3285
3286 We also need to make sure there is enough space for
3287 the split.
3288
3289 We also check to make sure the literal operand is a
3290 constant number. */
3291 if (!(inst.reloc.exp.X_op == O_constant
3292 || inst.reloc.exp.X_op == O_big))
3293 {
3294 inst.error = _("invalid type for literal pool");
3295 return FAIL;
3296 }
3297 else if (pool_size & 0x7)
3298 {
3299 if ((entry + 2) >= MAX_LITERAL_POOL_SIZE)
3300 {
3301 inst.error = _("literal pool overflow");
3302 return FAIL;
3303 }
3304
3305 pool->literals[entry] = inst.reloc.exp;
3306 pool->literals[entry].X_add_number = 0;
3307 pool->literals[entry++].X_md = (PADDING_SLOT << 8) | 4;
3308 pool->next_free_entry += 1;
3309 pool_size += 4;
3310 }
3311 else if ((entry + 1) >= MAX_LITERAL_POOL_SIZE)
3312 {
3313 inst.error = _("literal pool overflow");
3314 return FAIL;
3315 }
3316
3317 pool->literals[entry] = inst.reloc.exp;
3318 pool->literals[entry].X_op = O_constant;
3319 pool->literals[entry].X_add_number = imm1;
3320 pool->literals[entry].X_unsigned = inst.reloc.exp.X_unsigned;
3321 pool->literals[entry++].X_md = 4;
3322 pool->literals[entry] = inst.reloc.exp;
3323 pool->literals[entry].X_op = O_constant;
3324 pool->literals[entry].X_add_number = imm2;
3325 pool->literals[entry].X_unsigned = inst.reloc.exp.X_unsigned;
3326 pool->literals[entry].X_md = 4;
3327 pool->alignment = 3;
3328 pool->next_free_entry += 1;
3329 }
3330 else
3331 {
3332 pool->literals[entry] = inst.reloc.exp;
3333 pool->literals[entry].X_md = 4;
3334 }
3335
3336 #ifdef OBJ_ELF
3337 /* PR ld/12974: Record the location of the first source line to reference
3338 this entry in the literal pool. If it turns out during linking that the
3339 symbol does not exist we will be able to give an accurate line number for
3340 the (first use of the) missing reference. */
3341 if (debug_type == DEBUG_DWARF2)
3342 dwarf2_where (pool->locs + entry);
3343 #endif
3344 pool->next_free_entry += 1;
3345 }
3346 else if (padding_slot_p)
3347 {
3348 pool->literals[entry] = inst.reloc.exp;
3349 pool->literals[entry].X_md = nbytes;
3350 }
3351
3352 inst.reloc.exp.X_op = O_symbol;
3353 inst.reloc.exp.X_add_number = pool_size;
3354 inst.reloc.exp.X_add_symbol = pool->symbol;
3355
3356 return SUCCESS;
3357 }
3358
3359 bfd_boolean
3360 tc_start_label_without_colon (char unused1 ATTRIBUTE_UNUSED, const char * rest)
3361 {
3362 bfd_boolean ret = TRUE;
3363
3364 if (codecomposer_syntax && asmfunc_state == WAITING_ASMFUNC_NAME)
3365 {
3366 const char *label = rest;
3367
3368 while (!is_end_of_line[(int) label[-1]])
3369 --label;
3370
3371 if (*label == '.')
3372 {
3373 as_bad (_("Invalid label '%s'"), label);
3374 ret = FALSE;
3375 }
3376
3377 asmfunc_debug (label);
3378
3379 asmfunc_state = WAITING_ENDASMFUNC;
3380 }
3381
3382 return ret;
3383 }
3384
3385 /* Can't use symbol_new here, so have to create a symbol and then at
3386 a later date assign it a value. Thats what these functions do. */
3387
3388 static void
3389 symbol_locate (symbolS * symbolP,
3390 const char * name, /* It is copied, the caller can modify. */
3391 segT segment, /* Segment identifier (SEG_<something>). */
3392 valueT valu, /* Symbol value. */
3393 fragS * frag) /* Associated fragment. */
3394 {
3395 size_t name_length;
3396 char * preserved_copy_of_name;
3397
3398 name_length = strlen (name) + 1; /* +1 for \0. */
3399 obstack_grow (&notes, name, name_length);
3400 preserved_copy_of_name = (char *) obstack_finish (&notes);
3401
3402 #ifdef tc_canonicalize_symbol_name
3403 preserved_copy_of_name =
3404 tc_canonicalize_symbol_name (preserved_copy_of_name);
3405 #endif
3406
3407 S_SET_NAME (symbolP, preserved_copy_of_name);
3408
3409 S_SET_SEGMENT (symbolP, segment);
3410 S_SET_VALUE (symbolP, valu);
3411 symbol_clear_list_pointers (symbolP);
3412
3413 symbol_set_frag (symbolP, frag);
3414
3415 /* Link to end of symbol chain. */
3416 {
3417 extern int symbol_table_frozen;
3418
3419 if (symbol_table_frozen)
3420 abort ();
3421 }
3422
3423 symbol_append (symbolP, symbol_lastP, & symbol_rootP, & symbol_lastP);
3424
3425 obj_symbol_new_hook (symbolP);
3426
3427 #ifdef tc_symbol_new_hook
3428 tc_symbol_new_hook (symbolP);
3429 #endif
3430
3431 #ifdef DEBUG_SYMS
3432 verify_symbol_chain (symbol_rootP, symbol_lastP);
3433 #endif /* DEBUG_SYMS */
3434 }
3435
3436 static void
3437 s_ltorg (int ignored ATTRIBUTE_UNUSED)
3438 {
3439 unsigned int entry;
3440 literal_pool * pool;
3441 char sym_name[20];
3442
3443 pool = find_literal_pool ();
3444 if (pool == NULL
3445 || pool->symbol == NULL
3446 || pool->next_free_entry == 0)
3447 return;
3448
3449 /* Align pool as you have word accesses.
3450 Only make a frag if we have to. */
3451 if (!need_pass_2)
3452 frag_align (pool->alignment, 0, 0);
3453
3454 record_alignment (now_seg, 2);
3455
3456 #ifdef OBJ_ELF
3457 seg_info (now_seg)->tc_segment_info_data.mapstate = MAP_DATA;
3458 make_mapping_symbol (MAP_DATA, (valueT) frag_now_fix (), frag_now);
3459 #endif
3460 sprintf (sym_name, "$$lit_\002%x", pool->id);
3461
3462 symbol_locate (pool->symbol, sym_name, now_seg,
3463 (valueT) frag_now_fix (), frag_now);
3464 symbol_table_insert (pool->symbol);
3465
3466 ARM_SET_THUMB (pool->symbol, thumb_mode);
3467
3468 #if defined OBJ_COFF || defined OBJ_ELF
3469 ARM_SET_INTERWORK (pool->symbol, support_interwork);
3470 #endif
3471
3472 for (entry = 0; entry < pool->next_free_entry; entry ++)
3473 {
3474 #ifdef OBJ_ELF
3475 if (debug_type == DEBUG_DWARF2)
3476 dwarf2_gen_line_info (frag_now_fix (), pool->locs + entry);
3477 #endif
3478 /* First output the expression in the instruction to the pool. */
3479 emit_expr (&(pool->literals[entry]),
3480 pool->literals[entry].X_md & LIT_ENTRY_SIZE_MASK);
3481 }
3482
3483 /* Mark the pool as empty. */
3484 pool->next_free_entry = 0;
3485 pool->symbol = NULL;
3486 }
3487
3488 #ifdef OBJ_ELF
3489 /* Forward declarations for functions below, in the MD interface
3490 section. */
3491 static void fix_new_arm (fragS *, int, short, expressionS *, int, int);
3492 static valueT create_unwind_entry (int);
3493 static void start_unwind_section (const segT, int);
3494 static void add_unwind_opcode (valueT, int);
3495 static void flush_pending_unwind (void);
3496
3497 /* Directives: Data. */
3498
3499 static void
3500 s_arm_elf_cons (int nbytes)
3501 {
3502 expressionS exp;
3503
3504 #ifdef md_flush_pending_output
3505 md_flush_pending_output ();
3506 #endif
3507
3508 if (is_it_end_of_statement ())
3509 {
3510 demand_empty_rest_of_line ();
3511 return;
3512 }
3513
3514 #ifdef md_cons_align
3515 md_cons_align (nbytes);
3516 #endif
3517
3518 mapping_state (MAP_DATA);
3519 do
3520 {
3521 int reloc;
3522 char *base = input_line_pointer;
3523
3524 expression (& exp);
3525
3526 if (exp.X_op != O_symbol)
3527 emit_expr (&exp, (unsigned int) nbytes);
3528 else
3529 {
3530 char *before_reloc = input_line_pointer;
3531 reloc = parse_reloc (&input_line_pointer);
3532 if (reloc == -1)
3533 {
3534 as_bad (_("unrecognized relocation suffix"));
3535 ignore_rest_of_line ();
3536 return;
3537 }
3538 else if (reloc == BFD_RELOC_UNUSED)
3539 emit_expr (&exp, (unsigned int) nbytes);
3540 else
3541 {
3542 reloc_howto_type *howto = (reloc_howto_type *)
3543 bfd_reloc_type_lookup (stdoutput,
3544 (bfd_reloc_code_real_type) reloc);
3545 int size = bfd_get_reloc_size (howto);
3546
3547 if (reloc == BFD_RELOC_ARM_PLT32)
3548 {
3549 as_bad (_("(plt) is only valid on branch targets"));
3550 reloc = BFD_RELOC_UNUSED;
3551 size = 0;
3552 }
3553
3554 if (size > nbytes)
3555 as_bad (_("%s relocations do not fit in %d bytes"),
3556 howto->name, nbytes);
3557 else
3558 {
3559 /* We've parsed an expression stopping at O_symbol.
3560 But there may be more expression left now that we
3561 have parsed the relocation marker. Parse it again.
3562 XXX Surely there is a cleaner way to do this. */
3563 char *p = input_line_pointer;
3564 int offset;
3565 char *save_buf = (char *) alloca (input_line_pointer - base);
3566 memcpy (save_buf, base, input_line_pointer - base);
3567 memmove (base + (input_line_pointer - before_reloc),
3568 base, before_reloc - base);
3569
3570 input_line_pointer = base + (input_line_pointer-before_reloc);
3571 expression (&exp);
3572 memcpy (base, save_buf, p - base);
3573
3574 offset = nbytes - size;
3575 p = frag_more (nbytes);
3576 memset (p, 0, nbytes);
3577 fix_new_exp (frag_now, p - frag_now->fr_literal + offset,
3578 size, &exp, 0, (enum bfd_reloc_code_real) reloc);
3579 }
3580 }
3581 }
3582 }
3583 while (*input_line_pointer++ == ',');
3584
3585 /* Put terminator back into stream. */
3586 input_line_pointer --;
3587 demand_empty_rest_of_line ();
3588 }
3589
3590 /* Emit an expression containing a 32-bit thumb instruction.
3591 Implementation based on put_thumb32_insn. */
3592
3593 static void
3594 emit_thumb32_expr (expressionS * exp)
3595 {
3596 expressionS exp_high = *exp;
3597
3598 exp_high.X_add_number = (unsigned long)exp_high.X_add_number >> 16;
3599 emit_expr (& exp_high, (unsigned int) THUMB_SIZE);
3600 exp->X_add_number &= 0xffff;
3601 emit_expr (exp, (unsigned int) THUMB_SIZE);
3602 }
3603
3604 /* Guess the instruction size based on the opcode. */
3605
3606 static int
3607 thumb_insn_size (int opcode)
3608 {
3609 if ((unsigned int) opcode < 0xe800u)
3610 return 2;
3611 else if ((unsigned int) opcode >= 0xe8000000u)
3612 return 4;
3613 else
3614 return 0;
3615 }
3616
3617 static bfd_boolean
3618 emit_insn (expressionS *exp, int nbytes)
3619 {
3620 int size = 0;
3621
3622 if (exp->X_op == O_constant)
3623 {
3624 size = nbytes;
3625
3626 if (size == 0)
3627 size = thumb_insn_size (exp->X_add_number);
3628
3629 if (size != 0)
3630 {
3631 if (size == 2 && (unsigned int)exp->X_add_number > 0xffffu)
3632 {
3633 as_bad (_(".inst.n operand too big. "\
3634 "Use .inst.w instead"));
3635 size = 0;
3636 }
3637 else
3638 {
3639 if (now_it.state == AUTOMATIC_IT_BLOCK)
3640 set_it_insn_type_nonvoid (OUTSIDE_IT_INSN, 0);
3641 else
3642 set_it_insn_type_nonvoid (NEUTRAL_IT_INSN, 0);
3643
3644 if (thumb_mode && (size > THUMB_SIZE) && !target_big_endian)
3645 emit_thumb32_expr (exp);
3646 else
3647 emit_expr (exp, (unsigned int) size);
3648
3649 it_fsm_post_encode ();
3650 }
3651 }
3652 else
3653 as_bad (_("cannot determine Thumb instruction size. " \
3654 "Use .inst.n/.inst.w instead"));
3655 }
3656 else
3657 as_bad (_("constant expression required"));
3658
3659 return (size != 0);
3660 }
3661
3662 /* Like s_arm_elf_cons but do not use md_cons_align and
3663 set the mapping state to MAP_ARM/MAP_THUMB. */
3664
3665 static void
3666 s_arm_elf_inst (int nbytes)
3667 {
3668 if (is_it_end_of_statement ())
3669 {
3670 demand_empty_rest_of_line ();
3671 return;
3672 }
3673
3674 /* Calling mapping_state () here will not change ARM/THUMB,
3675 but will ensure not to be in DATA state. */
3676
3677 if (thumb_mode)
3678 mapping_state (MAP_THUMB);
3679 else
3680 {
3681 if (nbytes != 0)
3682 {
3683 as_bad (_("width suffixes are invalid in ARM mode"));
3684 ignore_rest_of_line ();
3685 return;
3686 }
3687
3688 nbytes = 4;
3689
3690 mapping_state (MAP_ARM);
3691 }
3692
3693 do
3694 {
3695 expressionS exp;
3696
3697 expression (& exp);
3698
3699 if (! emit_insn (& exp, nbytes))
3700 {
3701 ignore_rest_of_line ();
3702 return;
3703 }
3704 }
3705 while (*input_line_pointer++ == ',');
3706
3707 /* Put terminator back into stream. */
3708 input_line_pointer --;
3709 demand_empty_rest_of_line ();
3710 }
3711
3712 /* Parse a .rel31 directive. */
3713
3714 static void
3715 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED)
3716 {
3717 expressionS exp;
3718 char *p;
3719 valueT highbit;
3720
3721 highbit = 0;
3722 if (*input_line_pointer == '1')
3723 highbit = 0x80000000;
3724 else if (*input_line_pointer != '0')
3725 as_bad (_("expected 0 or 1"));
3726
3727 input_line_pointer++;
3728 if (*input_line_pointer != ',')
3729 as_bad (_("missing comma"));
3730 input_line_pointer++;
3731
3732 #ifdef md_flush_pending_output
3733 md_flush_pending_output ();
3734 #endif
3735
3736 #ifdef md_cons_align
3737 md_cons_align (4);
3738 #endif
3739
3740 mapping_state (MAP_DATA);
3741
3742 expression (&exp);
3743
3744 p = frag_more (4);
3745 md_number_to_chars (p, highbit, 4);
3746 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 1,
3747 BFD_RELOC_ARM_PREL31);
3748
3749 demand_empty_rest_of_line ();
3750 }
3751
3752 /* Directives: AEABI stack-unwind tables. */
3753
3754 /* Parse an unwind_fnstart directive. Simply records the current location. */
3755
3756 static void
3757 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED)
3758 {
3759 demand_empty_rest_of_line ();
3760 if (unwind.proc_start)
3761 {
3762 as_bad (_("duplicate .fnstart directive"));
3763 return;
3764 }
3765
3766 /* Mark the start of the function. */
3767 unwind.proc_start = expr_build_dot ();
3768
3769 /* Reset the rest of the unwind info. */
3770 unwind.opcode_count = 0;
3771 unwind.table_entry = NULL;
3772 unwind.personality_routine = NULL;
3773 unwind.personality_index = -1;
3774 unwind.frame_size = 0;
3775 unwind.fp_offset = 0;
3776 unwind.fp_reg = REG_SP;
3777 unwind.fp_used = 0;
3778 unwind.sp_restored = 0;
3779 }
3780
3781
3782 /* Parse a handlerdata directive. Creates the exception handling table entry
3783 for the function. */
3784
3785 static void
3786 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED)
3787 {
3788 demand_empty_rest_of_line ();
3789 if (!unwind.proc_start)
3790 as_bad (MISSING_FNSTART);
3791
3792 if (unwind.table_entry)
3793 as_bad (_("duplicate .handlerdata directive"));
3794
3795 create_unwind_entry (1);
3796 }
3797
3798 /* Parse an unwind_fnend directive. Generates the index table entry. */
3799
3800 static void
3801 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED)
3802 {
3803 long where;
3804 char *ptr;
3805 valueT val;
3806 unsigned int marked_pr_dependency;
3807
3808 demand_empty_rest_of_line ();
3809
3810 if (!unwind.proc_start)
3811 {
3812 as_bad (_(".fnend directive without .fnstart"));
3813 return;
3814 }
3815
3816 /* Add eh table entry. */
3817 if (unwind.table_entry == NULL)
3818 val = create_unwind_entry (0);
3819 else
3820 val = 0;
3821
3822 /* Add index table entry. This is two words. */
3823 start_unwind_section (unwind.saved_seg, 1);
3824 frag_align (2, 0, 0);
3825 record_alignment (now_seg, 2);
3826
3827 ptr = frag_more (8);
3828 memset (ptr, 0, 8);
3829 where = frag_now_fix () - 8;
3830
3831 /* Self relative offset of the function start. */
3832 fix_new (frag_now, where, 4, unwind.proc_start, 0, 1,
3833 BFD_RELOC_ARM_PREL31);
3834
3835 /* Indicate dependency on EHABI-defined personality routines to the
3836 linker, if it hasn't been done already. */
3837 marked_pr_dependency
3838 = seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency;
3839 if (unwind.personality_index >= 0 && unwind.personality_index < 3
3840 && !(marked_pr_dependency & (1 << unwind.personality_index)))
3841 {
3842 static const char *const name[] =
3843 {
3844 "__aeabi_unwind_cpp_pr0",
3845 "__aeabi_unwind_cpp_pr1",
3846 "__aeabi_unwind_cpp_pr2"
3847 };
3848 symbolS *pr = symbol_find_or_make (name[unwind.personality_index]);
3849 fix_new (frag_now, where, 0, pr, 0, 1, BFD_RELOC_NONE);
3850 seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency
3851 |= 1 << unwind.personality_index;
3852 }
3853
3854 if (val)
3855 /* Inline exception table entry. */
3856 md_number_to_chars (ptr + 4, val, 4);
3857 else
3858 /* Self relative offset of the table entry. */
3859 fix_new (frag_now, where + 4, 4, unwind.table_entry, 0, 1,
3860 BFD_RELOC_ARM_PREL31);
3861
3862 /* Restore the original section. */
3863 subseg_set (unwind.saved_seg, unwind.saved_subseg);
3864
3865 unwind.proc_start = NULL;
3866 }
3867
3868
3869 /* Parse an unwind_cantunwind directive. */
3870
3871 static void
3872 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED)
3873 {
3874 demand_empty_rest_of_line ();
3875 if (!unwind.proc_start)
3876 as_bad (MISSING_FNSTART);
3877
3878 if (unwind.personality_routine || unwind.personality_index != -1)
3879 as_bad (_("personality routine specified for cantunwind frame"));
3880
3881 unwind.personality_index = -2;
3882 }
3883
3884
3885 /* Parse a personalityindex directive. */
3886
3887 static void
3888 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED)
3889 {
3890 expressionS exp;
3891
3892 if (!unwind.proc_start)
3893 as_bad (MISSING_FNSTART);
3894
3895 if (unwind.personality_routine || unwind.personality_index != -1)
3896 as_bad (_("duplicate .personalityindex directive"));
3897
3898 expression (&exp);
3899
3900 if (exp.X_op != O_constant
3901 || exp.X_add_number < 0 || exp.X_add_number > 15)
3902 {
3903 as_bad (_("bad personality routine number"));
3904 ignore_rest_of_line ();
3905 return;
3906 }
3907
3908 unwind.personality_index = exp.X_add_number;
3909
3910 demand_empty_rest_of_line ();
3911 }
3912
3913
3914 /* Parse a personality directive. */
3915
3916 static void
3917 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED)
3918 {
3919 char *name, *p, c;
3920
3921 if (!unwind.proc_start)
3922 as_bad (MISSING_FNSTART);
3923
3924 if (unwind.personality_routine || unwind.personality_index != -1)
3925 as_bad (_("duplicate .personality directive"));
3926
3927 name = input_line_pointer;
3928 c = get_symbol_end ();
3929 p = input_line_pointer;
3930 unwind.personality_routine = symbol_find_or_make (name);
3931 *p = c;
3932 demand_empty_rest_of_line ();
3933 }
3934
3935
3936 /* Parse a directive saving core registers. */
3937
3938 static void
3939 s_arm_unwind_save_core (void)
3940 {
3941 valueT op;
3942 long range;
3943 int n;
3944
3945 range = parse_reg_list (&input_line_pointer);
3946 if (range == FAIL)
3947 {
3948 as_bad (_("expected register list"));
3949 ignore_rest_of_line ();
3950 return;
3951 }
3952
3953 demand_empty_rest_of_line ();
3954
3955 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
3956 into .unwind_save {..., sp...}. We aren't bothered about the value of
3957 ip because it is clobbered by calls. */
3958 if (unwind.sp_restored && unwind.fp_reg == 12
3959 && (range & 0x3000) == 0x1000)
3960 {
3961 unwind.opcode_count--;
3962 unwind.sp_restored = 0;
3963 range = (range | 0x2000) & ~0x1000;
3964 unwind.pending_offset = 0;
3965 }
3966
3967 /* Pop r4-r15. */
3968 if (range & 0xfff0)
3969 {
3970 /* See if we can use the short opcodes. These pop a block of up to 8
3971 registers starting with r4, plus maybe r14. */
3972 for (n = 0; n < 8; n++)
3973 {
3974 /* Break at the first non-saved register. */
3975 if ((range & (1 << (n + 4))) == 0)
3976 break;
3977 }
3978 /* See if there are any other bits set. */
3979 if (n == 0 || (range & (0xfff0 << n) & 0xbff0) != 0)
3980 {
3981 /* Use the long form. */
3982 op = 0x8000 | ((range >> 4) & 0xfff);
3983 add_unwind_opcode (op, 2);
3984 }
3985 else
3986 {
3987 /* Use the short form. */
3988 if (range & 0x4000)
3989 op = 0xa8; /* Pop r14. */
3990 else
3991 op = 0xa0; /* Do not pop r14. */
3992 op |= (n - 1);
3993 add_unwind_opcode (op, 1);
3994 }
3995 }
3996
3997 /* Pop r0-r3. */
3998 if (range & 0xf)
3999 {
4000 op = 0xb100 | (range & 0xf);
4001 add_unwind_opcode (op, 2);
4002 }
4003
4004 /* Record the number of bytes pushed. */
4005 for (n = 0; n < 16; n++)
4006 {
4007 if (range & (1 << n))
4008 unwind.frame_size += 4;
4009 }
4010 }
4011
4012
4013 /* Parse a directive saving FPA registers. */
4014
4015 static void
4016 s_arm_unwind_save_fpa (int reg)
4017 {
4018 expressionS exp;
4019 int num_regs;
4020 valueT op;
4021
4022 /* Get Number of registers to transfer. */
4023 if (skip_past_comma (&input_line_pointer) != FAIL)
4024 expression (&exp);
4025 else
4026 exp.X_op = O_illegal;
4027
4028 if (exp.X_op != O_constant)
4029 {
4030 as_bad (_("expected , <constant>"));
4031 ignore_rest_of_line ();
4032 return;
4033 }
4034
4035 num_regs = exp.X_add_number;
4036
4037 if (num_regs < 1 || num_regs > 4)
4038 {
4039 as_bad (_("number of registers must be in the range [1:4]"));
4040 ignore_rest_of_line ();
4041 return;
4042 }
4043
4044 demand_empty_rest_of_line ();
4045
4046 if (reg == 4)
4047 {
4048 /* Short form. */
4049 op = 0xb4 | (num_regs - 1);
4050 add_unwind_opcode (op, 1);
4051 }
4052 else
4053 {
4054 /* Long form. */
4055 op = 0xc800 | (reg << 4) | (num_regs - 1);
4056 add_unwind_opcode (op, 2);
4057 }
4058 unwind.frame_size += num_regs * 12;
4059 }
4060
4061
4062 /* Parse a directive saving VFP registers for ARMv6 and above. */
4063
4064 static void
4065 s_arm_unwind_save_vfp_armv6 (void)
4066 {
4067 int count;
4068 unsigned int start;
4069 valueT op;
4070 int num_vfpv3_regs = 0;
4071 int num_regs_below_16;
4072
4073 count = parse_vfp_reg_list (&input_line_pointer, &start, REGLIST_VFP_D);
4074 if (count == FAIL)
4075 {
4076 as_bad (_("expected register list"));
4077 ignore_rest_of_line ();
4078 return;
4079 }
4080
4081 demand_empty_rest_of_line ();
4082
4083 /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather
4084 than FSTMX/FLDMX-style ones). */
4085
4086 /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31. */
4087 if (start >= 16)
4088 num_vfpv3_regs = count;
4089 else if (start + count > 16)
4090 num_vfpv3_regs = start + count - 16;
4091
4092 if (num_vfpv3_regs > 0)
4093 {
4094 int start_offset = start > 16 ? start - 16 : 0;
4095 op = 0xc800 | (start_offset << 4) | (num_vfpv3_regs - 1);
4096 add_unwind_opcode (op, 2);
4097 }
4098
4099 /* Generate opcode for registers numbered in the range 0 .. 15. */
4100 num_regs_below_16 = num_vfpv3_regs > 0 ? 16 - (int) start : count;
4101 gas_assert (num_regs_below_16 + num_vfpv3_regs == count);
4102 if (num_regs_below_16 > 0)
4103 {
4104 op = 0xc900 | (start << 4) | (num_regs_below_16 - 1);
4105 add_unwind_opcode (op, 2);
4106 }
4107
4108 unwind.frame_size += count * 8;
4109 }
4110
4111
4112 /* Parse a directive saving VFP registers for pre-ARMv6. */
4113
4114 static void
4115 s_arm_unwind_save_vfp (void)
4116 {
4117 int count;
4118 unsigned int reg;
4119 valueT op;
4120
4121 count = parse_vfp_reg_list (&input_line_pointer, &reg, REGLIST_VFP_D);
4122 if (count == FAIL)
4123 {
4124 as_bad (_("expected register list"));
4125 ignore_rest_of_line ();
4126 return;
4127 }
4128
4129 demand_empty_rest_of_line ();
4130
4131 if (reg == 8)
4132 {
4133 /* Short form. */
4134 op = 0xb8 | (count - 1);
4135 add_unwind_opcode (op, 1);
4136 }
4137 else
4138 {
4139 /* Long form. */
4140 op = 0xb300 | (reg << 4) | (count - 1);
4141 add_unwind_opcode (op, 2);
4142 }
4143 unwind.frame_size += count * 8 + 4;
4144 }
4145
4146
4147 /* Parse a directive saving iWMMXt data registers. */
4148
4149 static void
4150 s_arm_unwind_save_mmxwr (void)
4151 {
4152 int reg;
4153 int hi_reg;
4154 int i;
4155 unsigned mask = 0;
4156 valueT op;
4157
4158 if (*input_line_pointer == '{')
4159 input_line_pointer++;
4160
4161 do
4162 {
4163 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
4164
4165 if (reg == FAIL)
4166 {
4167 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
4168 goto error;
4169 }
4170
4171 if (mask >> reg)
4172 as_tsktsk (_("register list not in ascending order"));
4173 mask |= 1 << reg;
4174
4175 if (*input_line_pointer == '-')
4176 {
4177 input_line_pointer++;
4178 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
4179 if (hi_reg == FAIL)
4180 {
4181 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
4182 goto error;
4183 }
4184 else if (reg >= hi_reg)
4185 {
4186 as_bad (_("bad register range"));
4187 goto error;
4188 }
4189 for (; reg < hi_reg; reg++)
4190 mask |= 1 << reg;
4191 }
4192 }
4193 while (skip_past_comma (&input_line_pointer) != FAIL);
4194
4195 skip_past_char (&input_line_pointer, '}');
4196
4197 demand_empty_rest_of_line ();
4198
4199 /* Generate any deferred opcodes because we're going to be looking at
4200 the list. */
4201 flush_pending_unwind ();
4202
4203 for (i = 0; i < 16; i++)
4204 {
4205 if (mask & (1 << i))
4206 unwind.frame_size += 8;
4207 }
4208
4209 /* Attempt to combine with a previous opcode. We do this because gcc
4210 likes to output separate unwind directives for a single block of
4211 registers. */
4212 if (unwind.opcode_count > 0)
4213 {
4214 i = unwind.opcodes[unwind.opcode_count - 1];
4215 if ((i & 0xf8) == 0xc0)
4216 {
4217 i &= 7;
4218 /* Only merge if the blocks are contiguous. */
4219 if (i < 6)
4220 {
4221 if ((mask & 0xfe00) == (1 << 9))
4222 {
4223 mask |= ((1 << (i + 11)) - 1) & 0xfc00;
4224 unwind.opcode_count--;
4225 }
4226 }
4227 else if (i == 6 && unwind.opcode_count >= 2)
4228 {
4229 i = unwind.opcodes[unwind.opcode_count - 2];
4230 reg = i >> 4;
4231 i &= 0xf;
4232
4233 op = 0xffff << (reg - 1);
4234 if (reg > 0
4235 && ((mask & op) == (1u << (reg - 1))))
4236 {
4237 op = (1 << (reg + i + 1)) - 1;
4238 op &= ~((1 << reg) - 1);
4239 mask |= op;
4240 unwind.opcode_count -= 2;
4241 }
4242 }
4243 }
4244 }
4245
4246 hi_reg = 15;
4247 /* We want to generate opcodes in the order the registers have been
4248 saved, ie. descending order. */
4249 for (reg = 15; reg >= -1; reg--)
4250 {
4251 /* Save registers in blocks. */
4252 if (reg < 0
4253 || !(mask & (1 << reg)))
4254 {
4255 /* We found an unsaved reg. Generate opcodes to save the
4256 preceding block. */
4257 if (reg != hi_reg)
4258 {
4259 if (reg == 9)
4260 {
4261 /* Short form. */
4262 op = 0xc0 | (hi_reg - 10);
4263 add_unwind_opcode (op, 1);
4264 }
4265 else
4266 {
4267 /* Long form. */
4268 op = 0xc600 | ((reg + 1) << 4) | ((hi_reg - reg) - 1);
4269 add_unwind_opcode (op, 2);
4270 }
4271 }
4272 hi_reg = reg - 1;
4273 }
4274 }
4275
4276 return;
4277 error:
4278 ignore_rest_of_line ();
4279 }
4280
4281 static void
4282 s_arm_unwind_save_mmxwcg (void)
4283 {
4284 int reg;
4285 int hi_reg;
4286 unsigned mask = 0;
4287 valueT op;
4288
4289 if (*input_line_pointer == '{')
4290 input_line_pointer++;
4291
4292 skip_whitespace (input_line_pointer);
4293
4294 do
4295 {
4296 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
4297
4298 if (reg == FAIL)
4299 {
4300 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
4301 goto error;
4302 }
4303
4304 reg -= 8;
4305 if (mask >> reg)
4306 as_tsktsk (_("register list not in ascending order"));
4307 mask |= 1 << reg;
4308
4309 if (*input_line_pointer == '-')
4310 {
4311 input_line_pointer++;
4312 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
4313 if (hi_reg == FAIL)
4314 {
4315 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
4316 goto error;
4317 }
4318 else if (reg >= hi_reg)
4319 {
4320 as_bad (_("bad register range"));
4321 goto error;
4322 }
4323 for (; reg < hi_reg; reg++)
4324 mask |= 1 << reg;
4325 }
4326 }
4327 while (skip_past_comma (&input_line_pointer) != FAIL);
4328
4329 skip_past_char (&input_line_pointer, '}');
4330
4331 demand_empty_rest_of_line ();
4332
4333 /* Generate any deferred opcodes because we're going to be looking at
4334 the list. */
4335 flush_pending_unwind ();
4336
4337 for (reg = 0; reg < 16; reg++)
4338 {
4339 if (mask & (1 << reg))
4340 unwind.frame_size += 4;
4341 }
4342 op = 0xc700 | mask;
4343 add_unwind_opcode (op, 2);
4344 return;
4345 error:
4346 ignore_rest_of_line ();
4347 }
4348
4349
4350 /* Parse an unwind_save directive.
4351 If the argument is non-zero, this is a .vsave directive. */
4352
4353 static void
4354 s_arm_unwind_save (int arch_v6)
4355 {
4356 char *peek;
4357 struct reg_entry *reg;
4358 bfd_boolean had_brace = FALSE;
4359
4360 if (!unwind.proc_start)
4361 as_bad (MISSING_FNSTART);
4362
4363 /* Figure out what sort of save we have. */
4364 peek = input_line_pointer;
4365
4366 if (*peek == '{')
4367 {
4368 had_brace = TRUE;
4369 peek++;
4370 }
4371
4372 reg = arm_reg_parse_multi (&peek);
4373
4374 if (!reg)
4375 {
4376 as_bad (_("register expected"));
4377 ignore_rest_of_line ();
4378 return;
4379 }
4380
4381 switch (reg->type)
4382 {
4383 case REG_TYPE_FN:
4384 if (had_brace)
4385 {
4386 as_bad (_("FPA .unwind_save does not take a register list"));
4387 ignore_rest_of_line ();
4388 return;
4389 }
4390 input_line_pointer = peek;
4391 s_arm_unwind_save_fpa (reg->number);
4392 return;
4393
4394 case REG_TYPE_RN:
4395 s_arm_unwind_save_core ();
4396 return;
4397
4398 case REG_TYPE_VFD:
4399 if (arch_v6)
4400 s_arm_unwind_save_vfp_armv6 ();
4401 else
4402 s_arm_unwind_save_vfp ();
4403 return;
4404
4405 case REG_TYPE_MMXWR:
4406 s_arm_unwind_save_mmxwr ();
4407 return;
4408
4409 case REG_TYPE_MMXWCG:
4410 s_arm_unwind_save_mmxwcg ();
4411 return;
4412
4413 default:
4414 as_bad (_(".unwind_save does not support this kind of register"));
4415 ignore_rest_of_line ();
4416 }
4417 }
4418
4419
4420 /* Parse an unwind_movsp directive. */
4421
4422 static void
4423 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED)
4424 {
4425 int reg;
4426 valueT op;
4427 int offset;
4428
4429 if (!unwind.proc_start)
4430 as_bad (MISSING_FNSTART);
4431
4432 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4433 if (reg == FAIL)
4434 {
4435 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_RN]));
4436 ignore_rest_of_line ();
4437 return;
4438 }
4439
4440 /* Optional constant. */
4441 if (skip_past_comma (&input_line_pointer) != FAIL)
4442 {
4443 if (immediate_for_directive (&offset) == FAIL)
4444 return;
4445 }
4446 else
4447 offset = 0;
4448
4449 demand_empty_rest_of_line ();
4450
4451 if (reg == REG_SP || reg == REG_PC)
4452 {
4453 as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
4454 return;
4455 }
4456
4457 if (unwind.fp_reg != REG_SP)
4458 as_bad (_("unexpected .unwind_movsp directive"));
4459
4460 /* Generate opcode to restore the value. */
4461 op = 0x90 | reg;
4462 add_unwind_opcode (op, 1);
4463
4464 /* Record the information for later. */
4465 unwind.fp_reg = reg;
4466 unwind.fp_offset = unwind.frame_size - offset;
4467 unwind.sp_restored = 1;
4468 }
4469
4470 /* Parse an unwind_pad directive. */
4471
4472 static void
4473 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED)
4474 {
4475 int offset;
4476
4477 if (!unwind.proc_start)
4478 as_bad (MISSING_FNSTART);
4479
4480 if (immediate_for_directive (&offset) == FAIL)
4481 return;
4482
4483 if (offset & 3)
4484 {
4485 as_bad (_("stack increment must be multiple of 4"));
4486 ignore_rest_of_line ();
4487 return;
4488 }
4489
4490 /* Don't generate any opcodes, just record the details for later. */
4491 unwind.frame_size += offset;
4492 unwind.pending_offset += offset;
4493
4494 demand_empty_rest_of_line ();
4495 }
4496
4497 /* Parse an unwind_setfp directive. */
4498
4499 static void
4500 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED)
4501 {
4502 int sp_reg;
4503 int fp_reg;
4504 int offset;
4505
4506 if (!unwind.proc_start)
4507 as_bad (MISSING_FNSTART);
4508
4509 fp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4510 if (skip_past_comma (&input_line_pointer) == FAIL)
4511 sp_reg = FAIL;
4512 else
4513 sp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4514
4515 if (fp_reg == FAIL || sp_reg == FAIL)
4516 {
4517 as_bad (_("expected <reg>, <reg>"));
4518 ignore_rest_of_line ();
4519 return;
4520 }
4521
4522 /* Optional constant. */
4523 if (skip_past_comma (&input_line_pointer) != FAIL)
4524 {
4525 if (immediate_for_directive (&offset) == FAIL)
4526 return;
4527 }
4528 else
4529 offset = 0;
4530
4531 demand_empty_rest_of_line ();
4532
4533 if (sp_reg != REG_SP && sp_reg != unwind.fp_reg)
4534 {
4535 as_bad (_("register must be either sp or set by a previous"
4536 "unwind_movsp directive"));
4537 return;
4538 }
4539
4540 /* Don't generate any opcodes, just record the information for later. */
4541 unwind.fp_reg = fp_reg;
4542 unwind.fp_used = 1;
4543 if (sp_reg == REG_SP)
4544 unwind.fp_offset = unwind.frame_size - offset;
4545 else
4546 unwind.fp_offset -= offset;
4547 }
4548
4549 /* Parse an unwind_raw directive. */
4550
4551 static void
4552 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED)
4553 {
4554 expressionS exp;
4555 /* This is an arbitrary limit. */
4556 unsigned char op[16];
4557 int count;
4558
4559 if (!unwind.proc_start)
4560 as_bad (MISSING_FNSTART);
4561
4562 expression (&exp);
4563 if (exp.X_op == O_constant
4564 && skip_past_comma (&input_line_pointer) != FAIL)
4565 {
4566 unwind.frame_size += exp.X_add_number;
4567 expression (&exp);
4568 }
4569 else
4570 exp.X_op = O_illegal;
4571
4572 if (exp.X_op != O_constant)
4573 {
4574 as_bad (_("expected <offset>, <opcode>"));
4575 ignore_rest_of_line ();
4576 return;
4577 }
4578
4579 count = 0;
4580
4581 /* Parse the opcode. */
4582 for (;;)
4583 {
4584 if (count >= 16)
4585 {
4586 as_bad (_("unwind opcode too long"));
4587 ignore_rest_of_line ();
4588 }
4589 if (exp.X_op != O_constant || exp.X_add_number & ~0xff)
4590 {
4591 as_bad (_("invalid unwind opcode"));
4592 ignore_rest_of_line ();
4593 return;
4594 }
4595 op[count++] = exp.X_add_number;
4596
4597 /* Parse the next byte. */
4598 if (skip_past_comma (&input_line_pointer) == FAIL)
4599 break;
4600
4601 expression (&exp);
4602 }
4603
4604 /* Add the opcode bytes in reverse order. */
4605 while (count--)
4606 add_unwind_opcode (op[count], 1);
4607
4608 demand_empty_rest_of_line ();
4609 }
4610
4611
4612 /* Parse a .eabi_attribute directive. */
4613
4614 static void
4615 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED)
4616 {
4617 int tag = obj_elf_vendor_attribute (OBJ_ATTR_PROC);
4618
4619 if (tag < NUM_KNOWN_OBJ_ATTRIBUTES)
4620 attributes_set_explicitly[tag] = 1;
4621 }
4622
4623 /* Emit a tls fix for the symbol. */
4624
4625 static void
4626 s_arm_tls_descseq (int ignored ATTRIBUTE_UNUSED)
4627 {
4628 char *p;
4629 expressionS exp;
4630 #ifdef md_flush_pending_output
4631 md_flush_pending_output ();
4632 #endif
4633
4634 #ifdef md_cons_align
4635 md_cons_align (4);
4636 #endif
4637
4638 /* Since we're just labelling the code, there's no need to define a
4639 mapping symbol. */
4640 expression (&exp);
4641 p = obstack_next_free (&frchain_now->frch_obstack);
4642 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 0,
4643 thumb_mode ? BFD_RELOC_ARM_THM_TLS_DESCSEQ
4644 : BFD_RELOC_ARM_TLS_DESCSEQ);
4645 }
4646 #endif /* OBJ_ELF */
4647
4648 static void s_arm_arch (int);
4649 static void s_arm_object_arch (int);
4650 static void s_arm_cpu (int);
4651 static void s_arm_fpu (int);
4652 static void s_arm_arch_extension (int);
4653
4654 #ifdef TE_PE
4655
4656 static void
4657 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
4658 {
4659 expressionS exp;
4660
4661 do
4662 {
4663 expression (&exp);
4664 if (exp.X_op == O_symbol)
4665 exp.X_op = O_secrel;
4666
4667 emit_expr (&exp, 4);
4668 }
4669 while (*input_line_pointer++ == ',');
4670
4671 input_line_pointer--;
4672 demand_empty_rest_of_line ();
4673 }
4674 #endif /* TE_PE */
4675
4676 /* This table describes all the machine specific pseudo-ops the assembler
4677 has to support. The fields are:
4678 pseudo-op name without dot
4679 function to call to execute this pseudo-op
4680 Integer arg to pass to the function. */
4681
4682 const pseudo_typeS md_pseudo_table[] =
4683 {
4684 /* Never called because '.req' does not start a line. */
4685 { "req", s_req, 0 },
4686 /* Following two are likewise never called. */
4687 { "dn", s_dn, 0 },
4688 { "qn", s_qn, 0 },
4689 { "unreq", s_unreq, 0 },
4690 { "bss", s_bss, 0 },
4691 { "align", s_align, 0 },
4692 { "arm", s_arm, 0 },
4693 { "thumb", s_thumb, 0 },
4694 { "code", s_code, 0 },
4695 { "force_thumb", s_force_thumb, 0 },
4696 { "thumb_func", s_thumb_func, 0 },
4697 { "thumb_set", s_thumb_set, 0 },
4698 { "even", s_even, 0 },
4699 { "ltorg", s_ltorg, 0 },
4700 { "pool", s_ltorg, 0 },
4701 { "syntax", s_syntax, 0 },
4702 { "cpu", s_arm_cpu, 0 },
4703 { "arch", s_arm_arch, 0 },
4704 { "object_arch", s_arm_object_arch, 0 },
4705 { "fpu", s_arm_fpu, 0 },
4706 { "arch_extension", s_arm_arch_extension, 0 },
4707 #ifdef OBJ_ELF
4708 { "word", s_arm_elf_cons, 4 },
4709 { "long", s_arm_elf_cons, 4 },
4710 { "inst.n", s_arm_elf_inst, 2 },
4711 { "inst.w", s_arm_elf_inst, 4 },
4712 { "inst", s_arm_elf_inst, 0 },
4713 { "rel31", s_arm_rel31, 0 },
4714 { "fnstart", s_arm_unwind_fnstart, 0 },
4715 { "fnend", s_arm_unwind_fnend, 0 },
4716 { "cantunwind", s_arm_unwind_cantunwind, 0 },
4717 { "personality", s_arm_unwind_personality, 0 },
4718 { "personalityindex", s_arm_unwind_personalityindex, 0 },
4719 { "handlerdata", s_arm_unwind_handlerdata, 0 },
4720 { "save", s_arm_unwind_save, 0 },
4721 { "vsave", s_arm_unwind_save, 1 },
4722 { "movsp", s_arm_unwind_movsp, 0 },
4723 { "pad", s_arm_unwind_pad, 0 },
4724 { "setfp", s_arm_unwind_setfp, 0 },
4725 { "unwind_raw", s_arm_unwind_raw, 0 },
4726 { "eabi_attribute", s_arm_eabi_attribute, 0 },
4727 { "tlsdescseq", s_arm_tls_descseq, 0 },
4728 #else
4729 { "word", cons, 4},
4730
4731 /* These are used for dwarf. */
4732 {"2byte", cons, 2},
4733 {"4byte", cons, 4},
4734 {"8byte", cons, 8},
4735 /* These are used for dwarf2. */
4736 { "file", (void (*) (int)) dwarf2_directive_file, 0 },
4737 { "loc", dwarf2_directive_loc, 0 },
4738 { "loc_mark_labels", dwarf2_directive_loc_mark_labels, 0 },
4739 #endif
4740 { "extend", float_cons, 'x' },
4741 { "ldouble", float_cons, 'x' },
4742 { "packed", float_cons, 'p' },
4743 #ifdef TE_PE
4744 {"secrel32", pe_directive_secrel, 0},
4745 #endif
4746
4747 /* These are for compatibility with CodeComposer Studio. */
4748 {"ref", s_ccs_ref, 0},
4749 {"def", s_ccs_def, 0},
4750 {"asmfunc", s_ccs_asmfunc, 0},
4751 {"endasmfunc", s_ccs_endasmfunc, 0},
4752
4753 { 0, 0, 0 }
4754 };
4755 \f
4756 /* Parser functions used exclusively in instruction operands. */
4757
4758 /* Generic immediate-value read function for use in insn parsing.
4759 STR points to the beginning of the immediate (the leading #);
4760 VAL receives the value; if the value is outside [MIN, MAX]
4761 issue an error. PREFIX_OPT is true if the immediate prefix is
4762 optional. */
4763
4764 static int
4765 parse_immediate (char **str, int *val, int min, int max,
4766 bfd_boolean prefix_opt)
4767 {
4768 expressionS exp;
4769 my_get_expression (&exp, str, prefix_opt ? GE_OPT_PREFIX : GE_IMM_PREFIX);
4770 if (exp.X_op != O_constant)
4771 {
4772 inst.error = _("constant expression required");
4773 return FAIL;
4774 }
4775
4776 if (exp.X_add_number < min || exp.X_add_number > max)
4777 {
4778 inst.error = _("immediate value out of range");
4779 return FAIL;
4780 }
4781
4782 *val = exp.X_add_number;
4783 return SUCCESS;
4784 }
4785
4786 /* Less-generic immediate-value read function with the possibility of loading a
4787 big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate
4788 instructions. Puts the result directly in inst.operands[i]. */
4789
4790 static int
4791 parse_big_immediate (char **str, int i, expressionS *in_exp,
4792 bfd_boolean allow_symbol_p)
4793 {
4794 expressionS exp;
4795 expressionS *exp_p = in_exp ? in_exp : &exp;
4796 char *ptr = *str;
4797
4798 my_get_expression (exp_p, &ptr, GE_OPT_PREFIX_BIG);
4799
4800 if (exp_p->X_op == O_constant)
4801 {
4802 inst.operands[i].imm = exp_p->X_add_number & 0xffffffff;
4803 /* If we're on a 64-bit host, then a 64-bit number can be returned using
4804 O_constant. We have to be careful not to break compilation for
4805 32-bit X_add_number, though. */
4806 if ((exp_p->X_add_number & ~(offsetT)(0xffffffffU)) != 0)
4807 {
4808 /* X >> 32 is illegal if sizeof (exp_p->X_add_number) == 4. */
4809 inst.operands[i].reg = (((exp_p->X_add_number >> 16) >> 16)
4810 & 0xffffffff);
4811 inst.operands[i].regisimm = 1;
4812 }
4813 }
4814 else if (exp_p->X_op == O_big
4815 && LITTLENUM_NUMBER_OF_BITS * exp_p->X_add_number > 32)
4816 {
4817 unsigned parts = 32 / LITTLENUM_NUMBER_OF_BITS, j, idx = 0;
4818
4819 /* Bignums have their least significant bits in
4820 generic_bignum[0]. Make sure we put 32 bits in imm and
4821 32 bits in reg, in a (hopefully) portable way. */
4822 gas_assert (parts != 0);
4823
4824 /* Make sure that the number is not too big.
4825 PR 11972: Bignums can now be sign-extended to the
4826 size of a .octa so check that the out of range bits
4827 are all zero or all one. */
4828 if (LITTLENUM_NUMBER_OF_BITS * exp_p->X_add_number > 64)
4829 {
4830 LITTLENUM_TYPE m = -1;
4831
4832 if (generic_bignum[parts * 2] != 0
4833 && generic_bignum[parts * 2] != m)
4834 return FAIL;
4835
4836 for (j = parts * 2 + 1; j < (unsigned) exp_p->X_add_number; j++)
4837 if (generic_bignum[j] != generic_bignum[j-1])
4838 return FAIL;
4839 }
4840
4841 inst.operands[i].imm = 0;
4842 for (j = 0; j < parts; j++, idx++)
4843 inst.operands[i].imm |= generic_bignum[idx]
4844 << (LITTLENUM_NUMBER_OF_BITS * j);
4845 inst.operands[i].reg = 0;
4846 for (j = 0; j < parts; j++, idx++)
4847 inst.operands[i].reg |= generic_bignum[idx]
4848 << (LITTLENUM_NUMBER_OF_BITS * j);
4849 inst.operands[i].regisimm = 1;
4850 }
4851 else if (!(exp_p->X_op == O_symbol && allow_symbol_p))
4852 return FAIL;
4853
4854 *str = ptr;
4855
4856 return SUCCESS;
4857 }
4858
4859 /* Returns the pseudo-register number of an FPA immediate constant,
4860 or FAIL if there isn't a valid constant here. */
4861
4862 static int
4863 parse_fpa_immediate (char ** str)
4864 {
4865 LITTLENUM_TYPE words[MAX_LITTLENUMS];
4866 char * save_in;
4867 expressionS exp;
4868 int i;
4869 int j;
4870
4871 /* First try and match exact strings, this is to guarantee
4872 that some formats will work even for cross assembly. */
4873
4874 for (i = 0; fp_const[i]; i++)
4875 {
4876 if (strncmp (*str, fp_const[i], strlen (fp_const[i])) == 0)
4877 {
4878 char *start = *str;
4879
4880 *str += strlen (fp_const[i]);
4881 if (is_end_of_line[(unsigned char) **str])
4882 return i + 8;
4883 *str = start;
4884 }
4885 }
4886
4887 /* Just because we didn't get a match doesn't mean that the constant
4888 isn't valid, just that it is in a format that we don't
4889 automatically recognize. Try parsing it with the standard
4890 expression routines. */
4891
4892 memset (words, 0, MAX_LITTLENUMS * sizeof (LITTLENUM_TYPE));
4893
4894 /* Look for a raw floating point number. */
4895 if ((save_in = atof_ieee (*str, 'x', words)) != NULL
4896 && is_end_of_line[(unsigned char) *save_in])
4897 {
4898 for (i = 0; i < NUM_FLOAT_VALS; i++)
4899 {
4900 for (j = 0; j < MAX_LITTLENUMS; j++)
4901 {
4902 if (words[j] != fp_values[i][j])
4903 break;
4904 }
4905
4906 if (j == MAX_LITTLENUMS)
4907 {
4908 *str = save_in;
4909 return i + 8;
4910 }
4911 }
4912 }
4913
4914 /* Try and parse a more complex expression, this will probably fail
4915 unless the code uses a floating point prefix (eg "0f"). */
4916 save_in = input_line_pointer;
4917 input_line_pointer = *str;
4918 if (expression (&exp) == absolute_section
4919 && exp.X_op == O_big
4920 && exp.X_add_number < 0)
4921 {
4922 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
4923 Ditto for 15. */
4924 if (gen_to_words (words, 5, (long) 15) == 0)
4925 {
4926 for (i = 0; i < NUM_FLOAT_VALS; i++)
4927 {
4928 for (j = 0; j < MAX_LITTLENUMS; j++)
4929 {
4930 if (words[j] != fp_values[i][j])
4931 break;
4932 }
4933
4934 if (j == MAX_LITTLENUMS)
4935 {
4936 *str = input_line_pointer;
4937 input_line_pointer = save_in;
4938 return i + 8;
4939 }
4940 }
4941 }
4942 }
4943
4944 *str = input_line_pointer;
4945 input_line_pointer = save_in;
4946 inst.error = _("invalid FPA immediate expression");
4947 return FAIL;
4948 }
4949
4950 /* Returns 1 if a number has "quarter-precision" float format
4951 0baBbbbbbc defgh000 00000000 00000000. */
4952
4953 static int
4954 is_quarter_float (unsigned imm)
4955 {
4956 int bs = (imm & 0x20000000) ? 0x3e000000 : 0x40000000;
4957 return (imm & 0x7ffff) == 0 && ((imm & 0x7e000000) ^ bs) == 0;
4958 }
4959
4960
4961 /* Detect the presence of a floating point or integer zero constant,
4962 i.e. #0.0 or #0. */
4963
4964 static bfd_boolean
4965 parse_ifimm_zero (char **in)
4966 {
4967 int error_code;
4968
4969 if (!is_immediate_prefix (**in))
4970 return FALSE;
4971
4972 ++*in;
4973
4974 /* Accept #0x0 as a synonym for #0. */
4975 if (strncmp (*in, "0x", 2) == 0)
4976 {
4977 int val;
4978 if (parse_immediate (in, &val, 0, 0, TRUE) == FAIL)
4979 return FALSE;
4980 return TRUE;
4981 }
4982
4983 error_code = atof_generic (in, ".", EXP_CHARS,
4984 &generic_floating_point_number);
4985
4986 if (!error_code
4987 && generic_floating_point_number.sign == '+'
4988 && (generic_floating_point_number.low
4989 > generic_floating_point_number.leader))
4990 return TRUE;
4991
4992 return FALSE;
4993 }
4994
4995 /* Parse an 8-bit "quarter-precision" floating point number of the form:
4996 0baBbbbbbc defgh000 00000000 00000000.
4997 The zero and minus-zero cases need special handling, since they can't be
4998 encoded in the "quarter-precision" float format, but can nonetheless be
4999 loaded as integer constants. */
5000
5001 static unsigned
5002 parse_qfloat_immediate (char **ccp, int *immed)
5003 {
5004 char *str = *ccp;
5005 char *fpnum;
5006 LITTLENUM_TYPE words[MAX_LITTLENUMS];
5007 int found_fpchar = 0;
5008
5009 skip_past_char (&str, '#');
5010
5011 /* We must not accidentally parse an integer as a floating-point number. Make
5012 sure that the value we parse is not an integer by checking for special
5013 characters '.' or 'e'.
5014 FIXME: This is a horrible hack, but doing better is tricky because type
5015 information isn't in a very usable state at parse time. */
5016 fpnum = str;
5017 skip_whitespace (fpnum);
5018
5019 if (strncmp (fpnum, "0x", 2) == 0)
5020 return FAIL;
5021 else
5022 {
5023 for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
5024 if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
5025 {
5026 found_fpchar = 1;
5027 break;
5028 }
5029
5030 if (!found_fpchar)
5031 return FAIL;
5032 }
5033
5034 if ((str = atof_ieee (str, 's', words)) != NULL)
5035 {
5036 unsigned fpword = 0;
5037 int i;
5038
5039 /* Our FP word must be 32 bits (single-precision FP). */
5040 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
5041 {
5042 fpword <<= LITTLENUM_NUMBER_OF_BITS;
5043 fpword |= words[i];
5044 }
5045
5046 if (is_quarter_float (fpword) || (fpword & 0x7fffffff) == 0)
5047 *immed = fpword;
5048 else
5049 return FAIL;
5050
5051 *ccp = str;
5052
5053 return SUCCESS;
5054 }
5055
5056 return FAIL;
5057 }
5058
5059 /* Shift operands. */
5060 enum shift_kind
5061 {
5062 SHIFT_LSL, SHIFT_LSR, SHIFT_ASR, SHIFT_ROR, SHIFT_RRX
5063 };
5064
5065 struct asm_shift_name
5066 {
5067 const char *name;
5068 enum shift_kind kind;
5069 };
5070
5071 /* Third argument to parse_shift. */
5072 enum parse_shift_mode
5073 {
5074 NO_SHIFT_RESTRICT, /* Any kind of shift is accepted. */
5075 SHIFT_IMMEDIATE, /* Shift operand must be an immediate. */
5076 SHIFT_LSL_OR_ASR_IMMEDIATE, /* Shift must be LSL or ASR immediate. */
5077 SHIFT_ASR_IMMEDIATE, /* Shift must be ASR immediate. */
5078 SHIFT_LSL_IMMEDIATE, /* Shift must be LSL immediate. */
5079 };
5080
5081 /* Parse a <shift> specifier on an ARM data processing instruction.
5082 This has three forms:
5083
5084 (LSL|LSR|ASL|ASR|ROR) Rs
5085 (LSL|LSR|ASL|ASR|ROR) #imm
5086 RRX
5087
5088 Note that ASL is assimilated to LSL in the instruction encoding, and
5089 RRX to ROR #0 (which cannot be written as such). */
5090
5091 static int
5092 parse_shift (char **str, int i, enum parse_shift_mode mode)
5093 {
5094 const struct asm_shift_name *shift_name;
5095 enum shift_kind shift;
5096 char *s = *str;
5097 char *p = s;
5098 int reg;
5099
5100 for (p = *str; ISALPHA (*p); p++)
5101 ;
5102
5103 if (p == *str)
5104 {
5105 inst.error = _("shift expression expected");
5106 return FAIL;
5107 }
5108
5109 shift_name = (const struct asm_shift_name *) hash_find_n (arm_shift_hsh, *str,
5110 p - *str);
5111
5112 if (shift_name == NULL)
5113 {
5114 inst.error = _("shift expression expected");
5115 return FAIL;
5116 }
5117
5118 shift = shift_name->kind;
5119
5120 switch (mode)
5121 {
5122 case NO_SHIFT_RESTRICT:
5123 case SHIFT_IMMEDIATE: break;
5124
5125 case SHIFT_LSL_OR_ASR_IMMEDIATE:
5126 if (shift != SHIFT_LSL && shift != SHIFT_ASR)
5127 {
5128 inst.error = _("'LSL' or 'ASR' required");
5129 return FAIL;
5130 }
5131 break;
5132
5133 case SHIFT_LSL_IMMEDIATE:
5134 if (shift != SHIFT_LSL)
5135 {
5136 inst.error = _("'LSL' required");
5137 return FAIL;
5138 }
5139 break;
5140
5141 case SHIFT_ASR_IMMEDIATE:
5142 if (shift != SHIFT_ASR)
5143 {
5144 inst.error = _("'ASR' required");
5145 return FAIL;
5146 }
5147 break;
5148
5149 default: abort ();
5150 }
5151
5152 if (shift != SHIFT_RRX)
5153 {
5154 /* Whitespace can appear here if the next thing is a bare digit. */
5155 skip_whitespace (p);
5156
5157 if (mode == NO_SHIFT_RESTRICT
5158 && (reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5159 {
5160 inst.operands[i].imm = reg;
5161 inst.operands[i].immisreg = 1;
5162 }
5163 else if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5164 return FAIL;
5165 }
5166 inst.operands[i].shift_kind = shift;
5167 inst.operands[i].shifted = 1;
5168 *str = p;
5169 return SUCCESS;
5170 }
5171
5172 /* Parse a <shifter_operand> for an ARM data processing instruction:
5173
5174 #<immediate>
5175 #<immediate>, <rotate>
5176 <Rm>
5177 <Rm>, <shift>
5178
5179 where <shift> is defined by parse_shift above, and <rotate> is a
5180 multiple of 2 between 0 and 30. Validation of immediate operands
5181 is deferred to md_apply_fix. */
5182
5183 static int
5184 parse_shifter_operand (char **str, int i)
5185 {
5186 int value;
5187 expressionS exp;
5188
5189 if ((value = arm_reg_parse (str, REG_TYPE_RN)) != FAIL)
5190 {
5191 inst.operands[i].reg = value;
5192 inst.operands[i].isreg = 1;
5193
5194 /* parse_shift will override this if appropriate */
5195 inst.reloc.exp.X_op = O_constant;
5196 inst.reloc.exp.X_add_number = 0;
5197
5198 if (skip_past_comma (str) == FAIL)
5199 return SUCCESS;
5200
5201 /* Shift operation on register. */
5202 return parse_shift (str, i, NO_SHIFT_RESTRICT);
5203 }
5204
5205 if (my_get_expression (&inst.reloc.exp, str, GE_IMM_PREFIX))
5206 return FAIL;
5207
5208 if (skip_past_comma (str) == SUCCESS)
5209 {
5210 /* #x, y -- ie explicit rotation by Y. */
5211 if (my_get_expression (&exp, str, GE_NO_PREFIX))
5212 return FAIL;
5213
5214 if (exp.X_op != O_constant || inst.reloc.exp.X_op != O_constant)
5215 {
5216 inst.error = _("constant expression expected");
5217 return FAIL;
5218 }
5219
5220 value = exp.X_add_number;
5221 if (value < 0 || value > 30 || value % 2 != 0)
5222 {
5223 inst.error = _("invalid rotation");
5224 return FAIL;
5225 }
5226 if (inst.reloc.exp.X_add_number < 0 || inst.reloc.exp.X_add_number > 255)
5227 {
5228 inst.error = _("invalid constant");
5229 return FAIL;
5230 }
5231
5232 /* Encode as specified. */
5233 inst.operands[i].imm = inst.reloc.exp.X_add_number | value << 7;
5234 return SUCCESS;
5235 }
5236
5237 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
5238 inst.reloc.pc_rel = 0;
5239 return SUCCESS;
5240 }
5241
5242 /* Group relocation information. Each entry in the table contains the
5243 textual name of the relocation as may appear in assembler source
5244 and must end with a colon.
5245 Along with this textual name are the relocation codes to be used if
5246 the corresponding instruction is an ALU instruction (ADD or SUB only),
5247 an LDR, an LDRS, or an LDC. */
5248
5249 struct group_reloc_table_entry
5250 {
5251 const char *name;
5252 int alu_code;
5253 int ldr_code;
5254 int ldrs_code;
5255 int ldc_code;
5256 };
5257
5258 typedef enum
5259 {
5260 /* Varieties of non-ALU group relocation. */
5261
5262 GROUP_LDR,
5263 GROUP_LDRS,
5264 GROUP_LDC
5265 } group_reloc_type;
5266
5267 static struct group_reloc_table_entry group_reloc_table[] =
5268 { /* Program counter relative: */
5269 { "pc_g0_nc",
5270 BFD_RELOC_ARM_ALU_PC_G0_NC, /* ALU */
5271 0, /* LDR */
5272 0, /* LDRS */
5273 0 }, /* LDC */
5274 { "pc_g0",
5275 BFD_RELOC_ARM_ALU_PC_G0, /* ALU */
5276 BFD_RELOC_ARM_LDR_PC_G0, /* LDR */
5277 BFD_RELOC_ARM_LDRS_PC_G0, /* LDRS */
5278 BFD_RELOC_ARM_LDC_PC_G0 }, /* LDC */
5279 { "pc_g1_nc",
5280 BFD_RELOC_ARM_ALU_PC_G1_NC, /* ALU */
5281 0, /* LDR */
5282 0, /* LDRS */
5283 0 }, /* LDC */
5284 { "pc_g1",
5285 BFD_RELOC_ARM_ALU_PC_G1, /* ALU */
5286 BFD_RELOC_ARM_LDR_PC_G1, /* LDR */
5287 BFD_RELOC_ARM_LDRS_PC_G1, /* LDRS */
5288 BFD_RELOC_ARM_LDC_PC_G1 }, /* LDC */
5289 { "pc_g2",
5290 BFD_RELOC_ARM_ALU_PC_G2, /* ALU */
5291 BFD_RELOC_ARM_LDR_PC_G2, /* LDR */
5292 BFD_RELOC_ARM_LDRS_PC_G2, /* LDRS */
5293 BFD_RELOC_ARM_LDC_PC_G2 }, /* LDC */
5294 /* Section base relative */
5295 { "sb_g0_nc",
5296 BFD_RELOC_ARM_ALU_SB_G0_NC, /* ALU */
5297 0, /* LDR */
5298 0, /* LDRS */
5299 0 }, /* LDC */
5300 { "sb_g0",
5301 BFD_RELOC_ARM_ALU_SB_G0, /* ALU */
5302 BFD_RELOC_ARM_LDR_SB_G0, /* LDR */
5303 BFD_RELOC_ARM_LDRS_SB_G0, /* LDRS */
5304 BFD_RELOC_ARM_LDC_SB_G0 }, /* LDC */
5305 { "sb_g1_nc",
5306 BFD_RELOC_ARM_ALU_SB_G1_NC, /* ALU */
5307 0, /* LDR */
5308 0, /* LDRS */
5309 0 }, /* LDC */
5310 { "sb_g1",
5311 BFD_RELOC_ARM_ALU_SB_G1, /* ALU */
5312 BFD_RELOC_ARM_LDR_SB_G1, /* LDR */
5313 BFD_RELOC_ARM_LDRS_SB_G1, /* LDRS */
5314 BFD_RELOC_ARM_LDC_SB_G1 }, /* LDC */
5315 { "sb_g2",
5316 BFD_RELOC_ARM_ALU_SB_G2, /* ALU */
5317 BFD_RELOC_ARM_LDR_SB_G2, /* LDR */
5318 BFD_RELOC_ARM_LDRS_SB_G2, /* LDRS */
5319 BFD_RELOC_ARM_LDC_SB_G2 } }; /* LDC */
5320
5321 /* Given the address of a pointer pointing to the textual name of a group
5322 relocation as may appear in assembler source, attempt to find its details
5323 in group_reloc_table. The pointer will be updated to the character after
5324 the trailing colon. On failure, FAIL will be returned; SUCCESS
5325 otherwise. On success, *entry will be updated to point at the relevant
5326 group_reloc_table entry. */
5327
5328 static int
5329 find_group_reloc_table_entry (char **str, struct group_reloc_table_entry **out)
5330 {
5331 unsigned int i;
5332 for (i = 0; i < ARRAY_SIZE (group_reloc_table); i++)
5333 {
5334 int length = strlen (group_reloc_table[i].name);
5335
5336 if (strncasecmp (group_reloc_table[i].name, *str, length) == 0
5337 && (*str)[length] == ':')
5338 {
5339 *out = &group_reloc_table[i];
5340 *str += (length + 1);
5341 return SUCCESS;
5342 }
5343 }
5344
5345 return FAIL;
5346 }
5347
5348 /* Parse a <shifter_operand> for an ARM data processing instruction
5349 (as for parse_shifter_operand) where group relocations are allowed:
5350
5351 #<immediate>
5352 #<immediate>, <rotate>
5353 #:<group_reloc>:<expression>
5354 <Rm>
5355 <Rm>, <shift>
5356
5357 where <group_reloc> is one of the strings defined in group_reloc_table.
5358 The hashes are optional.
5359
5360 Everything else is as for parse_shifter_operand. */
5361
5362 static parse_operand_result
5363 parse_shifter_operand_group_reloc (char **str, int i)
5364 {
5365 /* Determine if we have the sequence of characters #: or just :
5366 coming next. If we do, then we check for a group relocation.
5367 If we don't, punt the whole lot to parse_shifter_operand. */
5368
5369 if (((*str)[0] == '#' && (*str)[1] == ':')
5370 || (*str)[0] == ':')
5371 {
5372 struct group_reloc_table_entry *entry;
5373
5374 if ((*str)[0] == '#')
5375 (*str) += 2;
5376 else
5377 (*str)++;
5378
5379 /* Try to parse a group relocation. Anything else is an error. */
5380 if (find_group_reloc_table_entry (str, &entry) == FAIL)
5381 {
5382 inst.error = _("unknown group relocation");
5383 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5384 }
5385
5386 /* We now have the group relocation table entry corresponding to
5387 the name in the assembler source. Next, we parse the expression. */
5388 if (my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX))
5389 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5390
5391 /* Record the relocation type (always the ALU variant here). */
5392 inst.reloc.type = (bfd_reloc_code_real_type) entry->alu_code;
5393 gas_assert (inst.reloc.type != 0);
5394
5395 return PARSE_OPERAND_SUCCESS;
5396 }
5397 else
5398 return parse_shifter_operand (str, i) == SUCCESS
5399 ? PARSE_OPERAND_SUCCESS : PARSE_OPERAND_FAIL;
5400
5401 /* Never reached. */
5402 }
5403
5404 /* Parse a Neon alignment expression. Information is written to
5405 inst.operands[i]. We assume the initial ':' has been skipped.
5406
5407 align .imm = align << 8, .immisalign=1, .preind=0 */
5408 static parse_operand_result
5409 parse_neon_alignment (char **str, int i)
5410 {
5411 char *p = *str;
5412 expressionS exp;
5413
5414 my_get_expression (&exp, &p, GE_NO_PREFIX);
5415
5416 if (exp.X_op != O_constant)
5417 {
5418 inst.error = _("alignment must be constant");
5419 return PARSE_OPERAND_FAIL;
5420 }
5421
5422 inst.operands[i].imm = exp.X_add_number << 8;
5423 inst.operands[i].immisalign = 1;
5424 /* Alignments are not pre-indexes. */
5425 inst.operands[i].preind = 0;
5426
5427 *str = p;
5428 return PARSE_OPERAND_SUCCESS;
5429 }
5430
5431 /* Parse all forms of an ARM address expression. Information is written
5432 to inst.operands[i] and/or inst.reloc.
5433
5434 Preindexed addressing (.preind=1):
5435
5436 [Rn, #offset] .reg=Rn .reloc.exp=offset
5437 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5438 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5439 .shift_kind=shift .reloc.exp=shift_imm
5440
5441 These three may have a trailing ! which causes .writeback to be set also.
5442
5443 Postindexed addressing (.postind=1, .writeback=1):
5444
5445 [Rn], #offset .reg=Rn .reloc.exp=offset
5446 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5447 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5448 .shift_kind=shift .reloc.exp=shift_imm
5449
5450 Unindexed addressing (.preind=0, .postind=0):
5451
5452 [Rn], {option} .reg=Rn .imm=option .immisreg=0
5453
5454 Other:
5455
5456 [Rn]{!} shorthand for [Rn,#0]{!}
5457 =immediate .isreg=0 .reloc.exp=immediate
5458 label .reg=PC .reloc.pc_rel=1 .reloc.exp=label
5459
5460 It is the caller's responsibility to check for addressing modes not
5461 supported by the instruction, and to set inst.reloc.type. */
5462
5463 static parse_operand_result
5464 parse_address_main (char **str, int i, int group_relocations,
5465 group_reloc_type group_type)
5466 {
5467 char *p = *str;
5468 int reg;
5469
5470 if (skip_past_char (&p, '[') == FAIL)
5471 {
5472 if (skip_past_char (&p, '=') == FAIL)
5473 {
5474 /* Bare address - translate to PC-relative offset. */
5475 inst.reloc.pc_rel = 1;
5476 inst.operands[i].reg = REG_PC;
5477 inst.operands[i].isreg = 1;
5478 inst.operands[i].preind = 1;
5479
5480 if (my_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX_BIG))
5481 return PARSE_OPERAND_FAIL;
5482 }
5483 else if (parse_big_immediate (&p, i, &inst.reloc.exp,
5484 /*allow_symbol_p=*/TRUE))
5485 return PARSE_OPERAND_FAIL;
5486
5487 *str = p;
5488 return PARSE_OPERAND_SUCCESS;
5489 }
5490
5491 /* PR gas/14887: Allow for whitespace after the opening bracket. */
5492 skip_whitespace (p);
5493
5494 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5495 {
5496 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
5497 return PARSE_OPERAND_FAIL;
5498 }
5499 inst.operands[i].reg = reg;
5500 inst.operands[i].isreg = 1;
5501
5502 if (skip_past_comma (&p) == SUCCESS)
5503 {
5504 inst.operands[i].preind = 1;
5505
5506 if (*p == '+') p++;
5507 else if (*p == '-') p++, inst.operands[i].negative = 1;
5508
5509 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5510 {
5511 inst.operands[i].imm = reg;
5512 inst.operands[i].immisreg = 1;
5513
5514 if (skip_past_comma (&p) == SUCCESS)
5515 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
5516 return PARSE_OPERAND_FAIL;
5517 }
5518 else if (skip_past_char (&p, ':') == SUCCESS)
5519 {
5520 /* FIXME: '@' should be used here, but it's filtered out by generic
5521 code before we get to see it here. This may be subject to
5522 change. */
5523 parse_operand_result result = parse_neon_alignment (&p, i);
5524
5525 if (result != PARSE_OPERAND_SUCCESS)
5526 return result;
5527 }
5528 else
5529 {
5530 if (inst.operands[i].negative)
5531 {
5532 inst.operands[i].negative = 0;
5533 p--;
5534 }
5535
5536 if (group_relocations
5537 && ((*p == '#' && *(p + 1) == ':') || *p == ':'))
5538 {
5539 struct group_reloc_table_entry *entry;
5540
5541 /* Skip over the #: or : sequence. */
5542 if (*p == '#')
5543 p += 2;
5544 else
5545 p++;
5546
5547 /* Try to parse a group relocation. Anything else is an
5548 error. */
5549 if (find_group_reloc_table_entry (&p, &entry) == FAIL)
5550 {
5551 inst.error = _("unknown group relocation");
5552 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5553 }
5554
5555 /* We now have the group relocation table entry corresponding to
5556 the name in the assembler source. Next, we parse the
5557 expression. */
5558 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
5559 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5560
5561 /* Record the relocation type. */
5562 switch (group_type)
5563 {
5564 case GROUP_LDR:
5565 inst.reloc.type = (bfd_reloc_code_real_type) entry->ldr_code;
5566 break;
5567
5568 case GROUP_LDRS:
5569 inst.reloc.type = (bfd_reloc_code_real_type) entry->ldrs_code;
5570 break;
5571
5572 case GROUP_LDC:
5573 inst.reloc.type = (bfd_reloc_code_real_type) entry->ldc_code;
5574 break;
5575
5576 default:
5577 gas_assert (0);
5578 }
5579
5580 if (inst.reloc.type == 0)
5581 {
5582 inst.error = _("this group relocation is not allowed on this instruction");
5583 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5584 }
5585 }
5586 else
5587 {
5588 char *q = p;
5589 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5590 return PARSE_OPERAND_FAIL;
5591 /* If the offset is 0, find out if it's a +0 or -0. */
5592 if (inst.reloc.exp.X_op == O_constant
5593 && inst.reloc.exp.X_add_number == 0)
5594 {
5595 skip_whitespace (q);
5596 if (*q == '#')
5597 {
5598 q++;
5599 skip_whitespace (q);
5600 }
5601 if (*q == '-')
5602 inst.operands[i].negative = 1;
5603 }
5604 }
5605 }
5606 }
5607 else if (skip_past_char (&p, ':') == SUCCESS)
5608 {
5609 /* FIXME: '@' should be used here, but it's filtered out by generic code
5610 before we get to see it here. This may be subject to change. */
5611 parse_operand_result result = parse_neon_alignment (&p, i);
5612
5613 if (result != PARSE_OPERAND_SUCCESS)
5614 return result;
5615 }
5616
5617 if (skip_past_char (&p, ']') == FAIL)
5618 {
5619 inst.error = _("']' expected");
5620 return PARSE_OPERAND_FAIL;
5621 }
5622
5623 if (skip_past_char (&p, '!') == SUCCESS)
5624 inst.operands[i].writeback = 1;
5625
5626 else if (skip_past_comma (&p) == SUCCESS)
5627 {
5628 if (skip_past_char (&p, '{') == SUCCESS)
5629 {
5630 /* [Rn], {expr} - unindexed, with option */
5631 if (parse_immediate (&p, &inst.operands[i].imm,
5632 0, 255, TRUE) == FAIL)
5633 return PARSE_OPERAND_FAIL;
5634
5635 if (skip_past_char (&p, '}') == FAIL)
5636 {
5637 inst.error = _("'}' expected at end of 'option' field");
5638 return PARSE_OPERAND_FAIL;
5639 }
5640 if (inst.operands[i].preind)
5641 {
5642 inst.error = _("cannot combine index with option");
5643 return PARSE_OPERAND_FAIL;
5644 }
5645 *str = p;
5646 return PARSE_OPERAND_SUCCESS;
5647 }
5648 else
5649 {
5650 inst.operands[i].postind = 1;
5651 inst.operands[i].writeback = 1;
5652
5653 if (inst.operands[i].preind)
5654 {
5655 inst.error = _("cannot combine pre- and post-indexing");
5656 return PARSE_OPERAND_FAIL;
5657 }
5658
5659 if (*p == '+') p++;
5660 else if (*p == '-') p++, inst.operands[i].negative = 1;
5661
5662 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5663 {
5664 /* We might be using the immediate for alignment already. If we
5665 are, OR the register number into the low-order bits. */
5666 if (inst.operands[i].immisalign)
5667 inst.operands[i].imm |= reg;
5668 else
5669 inst.operands[i].imm = reg;
5670 inst.operands[i].immisreg = 1;
5671
5672 if (skip_past_comma (&p) == SUCCESS)
5673 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
5674 return PARSE_OPERAND_FAIL;
5675 }
5676 else
5677 {
5678 char *q = p;
5679 if (inst.operands[i].negative)
5680 {
5681 inst.operands[i].negative = 0;
5682 p--;
5683 }
5684 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5685 return PARSE_OPERAND_FAIL;
5686 /* If the offset is 0, find out if it's a +0 or -0. */
5687 if (inst.reloc.exp.X_op == O_constant
5688 && inst.reloc.exp.X_add_number == 0)
5689 {
5690 skip_whitespace (q);
5691 if (*q == '#')
5692 {
5693 q++;
5694 skip_whitespace (q);
5695 }
5696 if (*q == '-')
5697 inst.operands[i].negative = 1;
5698 }
5699 }
5700 }
5701 }
5702
5703 /* If at this point neither .preind nor .postind is set, we have a
5704 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
5705 if (inst.operands[i].preind == 0 && inst.operands[i].postind == 0)
5706 {
5707 inst.operands[i].preind = 1;
5708 inst.reloc.exp.X_op = O_constant;
5709 inst.reloc.exp.X_add_number = 0;
5710 }
5711 *str = p;
5712 return PARSE_OPERAND_SUCCESS;
5713 }
5714
5715 static int
5716 parse_address (char **str, int i)
5717 {
5718 return parse_address_main (str, i, 0, GROUP_LDR) == PARSE_OPERAND_SUCCESS
5719 ? SUCCESS : FAIL;
5720 }
5721
5722 static parse_operand_result
5723 parse_address_group_reloc (char **str, int i, group_reloc_type type)
5724 {
5725 return parse_address_main (str, i, 1, type);
5726 }
5727
5728 /* Parse an operand for a MOVW or MOVT instruction. */
5729 static int
5730 parse_half (char **str)
5731 {
5732 char * p;
5733
5734 p = *str;
5735 skip_past_char (&p, '#');
5736 if (strncasecmp (p, ":lower16:", 9) == 0)
5737 inst.reloc.type = BFD_RELOC_ARM_MOVW;
5738 else if (strncasecmp (p, ":upper16:", 9) == 0)
5739 inst.reloc.type = BFD_RELOC_ARM_MOVT;
5740
5741 if (inst.reloc.type != BFD_RELOC_UNUSED)
5742 {
5743 p += 9;
5744 skip_whitespace (p);
5745 }
5746
5747 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
5748 return FAIL;
5749
5750 if (inst.reloc.type == BFD_RELOC_UNUSED)
5751 {
5752 if (inst.reloc.exp.X_op != O_constant)
5753 {
5754 inst.error = _("constant expression expected");
5755 return FAIL;
5756 }
5757 if (inst.reloc.exp.X_add_number < 0
5758 || inst.reloc.exp.X_add_number > 0xffff)
5759 {
5760 inst.error = _("immediate value out of range");
5761 return FAIL;
5762 }
5763 }
5764 *str = p;
5765 return SUCCESS;
5766 }
5767
5768 /* Miscellaneous. */
5769
5770 /* Parse a PSR flag operand. The value returned is FAIL on syntax error,
5771 or a bitmask suitable to be or-ed into the ARM msr instruction. */
5772 static int
5773 parse_psr (char **str, bfd_boolean lhs)
5774 {
5775 char *p;
5776 unsigned long psr_field;
5777 const struct asm_psr *psr;
5778 char *start;
5779 bfd_boolean is_apsr = FALSE;
5780 bfd_boolean m_profile = ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m);
5781
5782 /* PR gas/12698: If the user has specified -march=all then m_profile will
5783 be TRUE, but we want to ignore it in this case as we are building for any
5784 CPU type, including non-m variants. */
5785 if (ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any))
5786 m_profile = FALSE;
5787
5788 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
5789 feature for ease of use and backwards compatibility. */
5790 p = *str;
5791 if (strncasecmp (p, "SPSR", 4) == 0)
5792 {
5793 if (m_profile)
5794 goto unsupported_psr;
5795
5796 psr_field = SPSR_BIT;
5797 }
5798 else if (strncasecmp (p, "CPSR", 4) == 0)
5799 {
5800 if (m_profile)
5801 goto unsupported_psr;
5802
5803 psr_field = 0;
5804 }
5805 else if (strncasecmp (p, "APSR", 4) == 0)
5806 {
5807 /* APSR[_<bits>] can be used as a synonym for CPSR[_<flags>] on ARMv7-A
5808 and ARMv7-R architecture CPUs. */
5809 is_apsr = TRUE;
5810 psr_field = 0;
5811 }
5812 else if (m_profile)
5813 {
5814 start = p;
5815 do
5816 p++;
5817 while (ISALNUM (*p) || *p == '_');
5818
5819 if (strncasecmp (start, "iapsr", 5) == 0
5820 || strncasecmp (start, "eapsr", 5) == 0
5821 || strncasecmp (start, "xpsr", 4) == 0
5822 || strncasecmp (start, "psr", 3) == 0)
5823 p = start + strcspn (start, "rR") + 1;
5824
5825 psr = (const struct asm_psr *) hash_find_n (arm_v7m_psr_hsh, start,
5826 p - start);
5827
5828 if (!psr)
5829 return FAIL;
5830
5831 /* If APSR is being written, a bitfield may be specified. Note that
5832 APSR itself is handled above. */
5833 if (psr->field <= 3)
5834 {
5835 psr_field = psr->field;
5836 is_apsr = TRUE;
5837 goto check_suffix;
5838 }
5839
5840 *str = p;
5841 /* M-profile MSR instructions have the mask field set to "10", except
5842 *PSR variants which modify APSR, which may use a different mask (and
5843 have been handled already). Do that by setting the PSR_f field
5844 here. */
5845 return psr->field | (lhs ? PSR_f : 0);
5846 }
5847 else
5848 goto unsupported_psr;
5849
5850 p += 4;
5851 check_suffix:
5852 if (*p == '_')
5853 {
5854 /* A suffix follows. */
5855 p++;
5856 start = p;
5857
5858 do
5859 p++;
5860 while (ISALNUM (*p) || *p == '_');
5861
5862 if (is_apsr)
5863 {
5864 /* APSR uses a notation for bits, rather than fields. */
5865 unsigned int nzcvq_bits = 0;
5866 unsigned int g_bit = 0;
5867 char *bit;
5868
5869 for (bit = start; bit != p; bit++)
5870 {
5871 switch (TOLOWER (*bit))
5872 {
5873 case 'n':
5874 nzcvq_bits |= (nzcvq_bits & 0x01) ? 0x20 : 0x01;
5875 break;
5876
5877 case 'z':
5878 nzcvq_bits |= (nzcvq_bits & 0x02) ? 0x20 : 0x02;
5879 break;
5880
5881 case 'c':
5882 nzcvq_bits |= (nzcvq_bits & 0x04) ? 0x20 : 0x04;
5883 break;
5884
5885 case 'v':
5886 nzcvq_bits |= (nzcvq_bits & 0x08) ? 0x20 : 0x08;
5887 break;
5888
5889 case 'q':
5890 nzcvq_bits |= (nzcvq_bits & 0x10) ? 0x20 : 0x10;
5891 break;
5892
5893 case 'g':
5894 g_bit |= (g_bit & 0x1) ? 0x2 : 0x1;
5895 break;
5896
5897 default:
5898 inst.error = _("unexpected bit specified after APSR");
5899 return FAIL;
5900 }
5901 }
5902
5903 if (nzcvq_bits == 0x1f)
5904 psr_field |= PSR_f;
5905
5906 if (g_bit == 0x1)
5907 {
5908 if (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp))
5909 {
5910 inst.error = _("selected processor does not "
5911 "support DSP extension");
5912 return FAIL;
5913 }
5914
5915 psr_field |= PSR_s;
5916 }
5917
5918 if ((nzcvq_bits & 0x20) != 0
5919 || (nzcvq_bits != 0x1f && nzcvq_bits != 0)
5920 || (g_bit & 0x2) != 0)
5921 {
5922 inst.error = _("bad bitmask specified after APSR");
5923 return FAIL;
5924 }
5925 }
5926 else
5927 {
5928 psr = (const struct asm_psr *) hash_find_n (arm_psr_hsh, start,
5929 p - start);
5930 if (!psr)
5931 goto error;
5932
5933 psr_field |= psr->field;
5934 }
5935 }
5936 else
5937 {
5938 if (ISALNUM (*p))
5939 goto error; /* Garbage after "[CS]PSR". */
5940
5941 /* Unadorned APSR is equivalent to APSR_nzcvq/CPSR_f (for writes). This
5942 is deprecated, but allow it anyway. */
5943 if (is_apsr && lhs)
5944 {
5945 psr_field |= PSR_f;
5946 as_tsktsk (_("writing to APSR without specifying a bitmask is "
5947 "deprecated"));
5948 }
5949 else if (!m_profile)
5950 /* These bits are never right for M-profile devices: don't set them
5951 (only code paths which read/write APSR reach here). */
5952 psr_field |= (PSR_c | PSR_f);
5953 }
5954 *str = p;
5955 return psr_field;
5956
5957 unsupported_psr:
5958 inst.error = _("selected processor does not support requested special "
5959 "purpose register");
5960 return FAIL;
5961
5962 error:
5963 inst.error = _("flag for {c}psr instruction expected");
5964 return FAIL;
5965 }
5966
5967 /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
5968 value suitable for splatting into the AIF field of the instruction. */
5969
5970 static int
5971 parse_cps_flags (char **str)
5972 {
5973 int val = 0;
5974 int saw_a_flag = 0;
5975 char *s = *str;
5976
5977 for (;;)
5978 switch (*s++)
5979 {
5980 case '\0': case ',':
5981 goto done;
5982
5983 case 'a': case 'A': saw_a_flag = 1; val |= 0x4; break;
5984 case 'i': case 'I': saw_a_flag = 1; val |= 0x2; break;
5985 case 'f': case 'F': saw_a_flag = 1; val |= 0x1; break;
5986
5987 default:
5988 inst.error = _("unrecognized CPS flag");
5989 return FAIL;
5990 }
5991
5992 done:
5993 if (saw_a_flag == 0)
5994 {
5995 inst.error = _("missing CPS flags");
5996 return FAIL;
5997 }
5998
5999 *str = s - 1;
6000 return val;
6001 }
6002
6003 /* Parse an endian specifier ("BE" or "LE", case insensitive);
6004 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
6005
6006 static int
6007 parse_endian_specifier (char **str)
6008 {
6009 int little_endian;
6010 char *s = *str;
6011
6012 if (strncasecmp (s, "BE", 2))
6013 little_endian = 0;
6014 else if (strncasecmp (s, "LE", 2))
6015 little_endian = 1;
6016 else
6017 {
6018 inst.error = _("valid endian specifiers are be or le");
6019 return FAIL;
6020 }
6021
6022 if (ISALNUM (s[2]) || s[2] == '_')
6023 {
6024 inst.error = _("valid endian specifiers are be or le");
6025 return FAIL;
6026 }
6027
6028 *str = s + 2;
6029 return little_endian;
6030 }
6031
6032 /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
6033 value suitable for poking into the rotate field of an sxt or sxta
6034 instruction, or FAIL on error. */
6035
6036 static int
6037 parse_ror (char **str)
6038 {
6039 int rot;
6040 char *s = *str;
6041
6042 if (strncasecmp (s, "ROR", 3) == 0)
6043 s += 3;
6044 else
6045 {
6046 inst.error = _("missing rotation field after comma");
6047 return FAIL;
6048 }
6049
6050 if (parse_immediate (&s, &rot, 0, 24, FALSE) == FAIL)
6051 return FAIL;
6052
6053 switch (rot)
6054 {
6055 case 0: *str = s; return 0x0;
6056 case 8: *str = s; return 0x1;
6057 case 16: *str = s; return 0x2;
6058 case 24: *str = s; return 0x3;
6059
6060 default:
6061 inst.error = _("rotation can only be 0, 8, 16, or 24");
6062 return FAIL;
6063 }
6064 }
6065
6066 /* Parse a conditional code (from conds[] below). The value returned is in the
6067 range 0 .. 14, or FAIL. */
6068 static int
6069 parse_cond (char **str)
6070 {
6071 char *q;
6072 const struct asm_cond *c;
6073 int n;
6074 /* Condition codes are always 2 characters, so matching up to
6075 3 characters is sufficient. */
6076 char cond[3];
6077
6078 q = *str;
6079 n = 0;
6080 while (ISALPHA (*q) && n < 3)
6081 {
6082 cond[n] = TOLOWER (*q);
6083 q++;
6084 n++;
6085 }
6086
6087 c = (const struct asm_cond *) hash_find_n (arm_cond_hsh, cond, n);
6088 if (!c)
6089 {
6090 inst.error = _("condition required");
6091 return FAIL;
6092 }
6093
6094 *str = q;
6095 return c->value;
6096 }
6097
6098 /* If the given feature available in the selected CPU, mark it as used.
6099 Returns TRUE iff feature is available. */
6100 static bfd_boolean
6101 mark_feature_used (const arm_feature_set *feature)
6102 {
6103 /* Ensure the option is valid on the current architecture. */
6104 if (!ARM_CPU_HAS_FEATURE (cpu_variant, *feature))
6105 return FALSE;
6106
6107 /* Add the appropriate architecture feature for the barrier option used.
6108 */
6109 if (thumb_mode)
6110 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, *feature);
6111 else
6112 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, *feature);
6113
6114 return TRUE;
6115 }
6116
6117 /* Parse an option for a barrier instruction. Returns the encoding for the
6118 option, or FAIL. */
6119 static int
6120 parse_barrier (char **str)
6121 {
6122 char *p, *q;
6123 const struct asm_barrier_opt *o;
6124
6125 p = q = *str;
6126 while (ISALPHA (*q))
6127 q++;
6128
6129 o = (const struct asm_barrier_opt *) hash_find_n (arm_barrier_opt_hsh, p,
6130 q - p);
6131 if (!o)
6132 return FAIL;
6133
6134 if (!mark_feature_used (&o->arch))
6135 return FAIL;
6136
6137 *str = q;
6138 return o->value;
6139 }
6140
6141 /* Parse the operands of a table branch instruction. Similar to a memory
6142 operand. */
6143 static int
6144 parse_tb (char **str)
6145 {
6146 char * p = *str;
6147 int reg;
6148
6149 if (skip_past_char (&p, '[') == FAIL)
6150 {
6151 inst.error = _("'[' expected");
6152 return FAIL;
6153 }
6154
6155 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
6156 {
6157 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
6158 return FAIL;
6159 }
6160 inst.operands[0].reg = reg;
6161
6162 if (skip_past_comma (&p) == FAIL)
6163 {
6164 inst.error = _("',' expected");
6165 return FAIL;
6166 }
6167
6168 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
6169 {
6170 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
6171 return FAIL;
6172 }
6173 inst.operands[0].imm = reg;
6174
6175 if (skip_past_comma (&p) == SUCCESS)
6176 {
6177 if (parse_shift (&p, 0, SHIFT_LSL_IMMEDIATE) == FAIL)
6178 return FAIL;
6179 if (inst.reloc.exp.X_add_number != 1)
6180 {
6181 inst.error = _("invalid shift");
6182 return FAIL;
6183 }
6184 inst.operands[0].shifted = 1;
6185 }
6186
6187 if (skip_past_char (&p, ']') == FAIL)
6188 {
6189 inst.error = _("']' expected");
6190 return FAIL;
6191 }
6192 *str = p;
6193 return SUCCESS;
6194 }
6195
6196 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
6197 information on the types the operands can take and how they are encoded.
6198 Up to four operands may be read; this function handles setting the
6199 ".present" field for each read operand itself.
6200 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
6201 else returns FAIL. */
6202
6203 static int
6204 parse_neon_mov (char **str, int *which_operand)
6205 {
6206 int i = *which_operand, val;
6207 enum arm_reg_type rtype;
6208 char *ptr = *str;
6209 struct neon_type_el optype;
6210
6211 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
6212 {
6213 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
6214 inst.operands[i].reg = val;
6215 inst.operands[i].isscalar = 1;
6216 inst.operands[i].vectype = optype;
6217 inst.operands[i++].present = 1;
6218
6219 if (skip_past_comma (&ptr) == FAIL)
6220 goto wanted_comma;
6221
6222 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6223 goto wanted_arm;
6224
6225 inst.operands[i].reg = val;
6226 inst.operands[i].isreg = 1;
6227 inst.operands[i].present = 1;
6228 }
6229 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype, &optype))
6230 != FAIL)
6231 {
6232 /* Cases 0, 1, 2, 3, 5 (D only). */
6233 if (skip_past_comma (&ptr) == FAIL)
6234 goto wanted_comma;
6235
6236 inst.operands[i].reg = val;
6237 inst.operands[i].isreg = 1;
6238 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
6239 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6240 inst.operands[i].isvec = 1;
6241 inst.operands[i].vectype = optype;
6242 inst.operands[i++].present = 1;
6243
6244 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6245 {
6246 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
6247 Case 13: VMOV <Sd>, <Rm> */
6248 inst.operands[i].reg = val;
6249 inst.operands[i].isreg = 1;
6250 inst.operands[i].present = 1;
6251
6252 if (rtype == REG_TYPE_NQ)
6253 {
6254 first_error (_("can't use Neon quad register here"));
6255 return FAIL;
6256 }
6257 else if (rtype != REG_TYPE_VFS)
6258 {
6259 i++;
6260 if (skip_past_comma (&ptr) == FAIL)
6261 goto wanted_comma;
6262 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6263 goto wanted_arm;
6264 inst.operands[i].reg = val;
6265 inst.operands[i].isreg = 1;
6266 inst.operands[i].present = 1;
6267 }
6268 }
6269 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype,
6270 &optype)) != FAIL)
6271 {
6272 /* Case 0: VMOV<c><q> <Qd>, <Qm>
6273 Case 1: VMOV<c><q> <Dd>, <Dm>
6274 Case 8: VMOV.F32 <Sd>, <Sm>
6275 Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm> */
6276
6277 inst.operands[i].reg = val;
6278 inst.operands[i].isreg = 1;
6279 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
6280 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6281 inst.operands[i].isvec = 1;
6282 inst.operands[i].vectype = optype;
6283 inst.operands[i].present = 1;
6284
6285 if (skip_past_comma (&ptr) == SUCCESS)
6286 {
6287 /* Case 15. */
6288 i++;
6289
6290 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6291 goto wanted_arm;
6292
6293 inst.operands[i].reg = val;
6294 inst.operands[i].isreg = 1;
6295 inst.operands[i++].present = 1;
6296
6297 if (skip_past_comma (&ptr) == FAIL)
6298 goto wanted_comma;
6299
6300 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6301 goto wanted_arm;
6302
6303 inst.operands[i].reg = val;
6304 inst.operands[i].isreg = 1;
6305 inst.operands[i].present = 1;
6306 }
6307 }
6308 else if (parse_qfloat_immediate (&ptr, &inst.operands[i].imm) == SUCCESS)
6309 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
6310 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
6311 Case 10: VMOV.F32 <Sd>, #<imm>
6312 Case 11: VMOV.F64 <Dd>, #<imm> */
6313 inst.operands[i].immisfloat = 1;
6314 else if (parse_big_immediate (&ptr, i, NULL, /*allow_symbol_p=*/FALSE)
6315 == SUCCESS)
6316 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
6317 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
6318 ;
6319 else
6320 {
6321 first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
6322 return FAIL;
6323 }
6324 }
6325 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6326 {
6327 /* Cases 6, 7. */
6328 inst.operands[i].reg = val;
6329 inst.operands[i].isreg = 1;
6330 inst.operands[i++].present = 1;
6331
6332 if (skip_past_comma (&ptr) == FAIL)
6333 goto wanted_comma;
6334
6335 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
6336 {
6337 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
6338 inst.operands[i].reg = val;
6339 inst.operands[i].isscalar = 1;
6340 inst.operands[i].present = 1;
6341 inst.operands[i].vectype = optype;
6342 }
6343 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6344 {
6345 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
6346 inst.operands[i].reg = val;
6347 inst.operands[i].isreg = 1;
6348 inst.operands[i++].present = 1;
6349
6350 if (skip_past_comma (&ptr) == FAIL)
6351 goto wanted_comma;
6352
6353 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFSD, &rtype, &optype))
6354 == FAIL)
6355 {
6356 first_error (_(reg_expected_msgs[REG_TYPE_VFSD]));
6357 return FAIL;
6358 }
6359
6360 inst.operands[i].reg = val;
6361 inst.operands[i].isreg = 1;
6362 inst.operands[i].isvec = 1;
6363 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6364 inst.operands[i].vectype = optype;
6365 inst.operands[i].present = 1;
6366
6367 if (rtype == REG_TYPE_VFS)
6368 {
6369 /* Case 14. */
6370 i++;
6371 if (skip_past_comma (&ptr) == FAIL)
6372 goto wanted_comma;
6373 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL,
6374 &optype)) == FAIL)
6375 {
6376 first_error (_(reg_expected_msgs[REG_TYPE_VFS]));
6377 return FAIL;
6378 }
6379 inst.operands[i].reg = val;
6380 inst.operands[i].isreg = 1;
6381 inst.operands[i].isvec = 1;
6382 inst.operands[i].issingle = 1;
6383 inst.operands[i].vectype = optype;
6384 inst.operands[i].present = 1;
6385 }
6386 }
6387 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL, &optype))
6388 != FAIL)
6389 {
6390 /* Case 13. */
6391 inst.operands[i].reg = val;
6392 inst.operands[i].isreg = 1;
6393 inst.operands[i].isvec = 1;
6394 inst.operands[i].issingle = 1;
6395 inst.operands[i].vectype = optype;
6396 inst.operands[i].present = 1;
6397 }
6398 }
6399 else
6400 {
6401 first_error (_("parse error"));
6402 return FAIL;
6403 }
6404
6405 /* Successfully parsed the operands. Update args. */
6406 *which_operand = i;
6407 *str = ptr;
6408 return SUCCESS;
6409
6410 wanted_comma:
6411 first_error (_("expected comma"));
6412 return FAIL;
6413
6414 wanted_arm:
6415 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
6416 return FAIL;
6417 }
6418
6419 /* Use this macro when the operand constraints are different
6420 for ARM and THUMB (e.g. ldrd). */
6421 #define MIX_ARM_THUMB_OPERANDS(arm_operand, thumb_operand) \
6422 ((arm_operand) | ((thumb_operand) << 16))
6423
6424 /* Matcher codes for parse_operands. */
6425 enum operand_parse_code
6426 {
6427 OP_stop, /* end of line */
6428
6429 OP_RR, /* ARM register */
6430 OP_RRnpc, /* ARM register, not r15 */
6431 OP_RRnpcsp, /* ARM register, neither r15 nor r13 (a.k.a. 'BadReg') */
6432 OP_RRnpcb, /* ARM register, not r15, in square brackets */
6433 OP_RRnpctw, /* ARM register, not r15 in Thumb-state or with writeback,
6434 optional trailing ! */
6435 OP_RRw, /* ARM register, not r15, optional trailing ! */
6436 OP_RCP, /* Coprocessor number */
6437 OP_RCN, /* Coprocessor register */
6438 OP_RF, /* FPA register */
6439 OP_RVS, /* VFP single precision register */
6440 OP_RVD, /* VFP double precision register (0..15) */
6441 OP_RND, /* Neon double precision register (0..31) */
6442 OP_RNQ, /* Neon quad precision register */
6443 OP_RVSD, /* VFP single or double precision register */
6444 OP_RNDQ, /* Neon double or quad precision register */
6445 OP_RNSDQ, /* Neon single, double or quad precision register */
6446 OP_RNSC, /* Neon scalar D[X] */
6447 OP_RVC, /* VFP control register */
6448 OP_RMF, /* Maverick F register */
6449 OP_RMD, /* Maverick D register */
6450 OP_RMFX, /* Maverick FX register */
6451 OP_RMDX, /* Maverick DX register */
6452 OP_RMAX, /* Maverick AX register */
6453 OP_RMDS, /* Maverick DSPSC register */
6454 OP_RIWR, /* iWMMXt wR register */
6455 OP_RIWC, /* iWMMXt wC register */
6456 OP_RIWG, /* iWMMXt wCG register */
6457 OP_RXA, /* XScale accumulator register */
6458
6459 OP_REGLST, /* ARM register list */
6460 OP_VRSLST, /* VFP single-precision register list */
6461 OP_VRDLST, /* VFP double-precision register list */
6462 OP_VRSDLST, /* VFP single or double-precision register list (& quad) */
6463 OP_NRDLST, /* Neon double-precision register list (d0-d31, qN aliases) */
6464 OP_NSTRLST, /* Neon element/structure list */
6465
6466 OP_RNDQ_I0, /* Neon D or Q reg, or immediate zero. */
6467 OP_RVSD_I0, /* VFP S or D reg, or immediate zero. */
6468 OP_RSVD_FI0, /* VFP S or D reg, or floating point immediate zero. */
6469 OP_RR_RNSC, /* ARM reg or Neon scalar. */
6470 OP_RNSDQ_RNSC, /* Vector S, D or Q reg, or Neon scalar. */
6471 OP_RNDQ_RNSC, /* Neon D or Q reg, or Neon scalar. */
6472 OP_RND_RNSC, /* Neon D reg, or Neon scalar. */
6473 OP_VMOV, /* Neon VMOV operands. */
6474 OP_RNDQ_Ibig, /* Neon D or Q reg, or big immediate for logic and VMVN. */
6475 OP_RNDQ_I63b, /* Neon D or Q reg, or immediate for shift. */
6476 OP_RIWR_I32z, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2. */
6477
6478 OP_I0, /* immediate zero */
6479 OP_I7, /* immediate value 0 .. 7 */
6480 OP_I15, /* 0 .. 15 */
6481 OP_I16, /* 1 .. 16 */
6482 OP_I16z, /* 0 .. 16 */
6483 OP_I31, /* 0 .. 31 */
6484 OP_I31w, /* 0 .. 31, optional trailing ! */
6485 OP_I32, /* 1 .. 32 */
6486 OP_I32z, /* 0 .. 32 */
6487 OP_I63, /* 0 .. 63 */
6488 OP_I63s, /* -64 .. 63 */
6489 OP_I64, /* 1 .. 64 */
6490 OP_I64z, /* 0 .. 64 */
6491 OP_I255, /* 0 .. 255 */
6492
6493 OP_I4b, /* immediate, prefix optional, 1 .. 4 */
6494 OP_I7b, /* 0 .. 7 */
6495 OP_I15b, /* 0 .. 15 */
6496 OP_I31b, /* 0 .. 31 */
6497
6498 OP_SH, /* shifter operand */
6499 OP_SHG, /* shifter operand with possible group relocation */
6500 OP_ADDR, /* Memory address expression (any mode) */
6501 OP_ADDRGLDR, /* Mem addr expr (any mode) with possible LDR group reloc */
6502 OP_ADDRGLDRS, /* Mem addr expr (any mode) with possible LDRS group reloc */
6503 OP_ADDRGLDC, /* Mem addr expr (any mode) with possible LDC group reloc */
6504 OP_EXP, /* arbitrary expression */
6505 OP_EXPi, /* same, with optional immediate prefix */
6506 OP_EXPr, /* same, with optional relocation suffix */
6507 OP_HALF, /* 0 .. 65535 or low/high reloc. */
6508
6509 OP_CPSF, /* CPS flags */
6510 OP_ENDI, /* Endianness specifier */
6511 OP_wPSR, /* CPSR/SPSR/APSR mask for msr (writing). */
6512 OP_rPSR, /* CPSR/SPSR/APSR mask for msr (reading). */
6513 OP_COND, /* conditional code */
6514 OP_TB, /* Table branch. */
6515
6516 OP_APSR_RR, /* ARM register or "APSR_nzcv". */
6517
6518 OP_RRnpc_I0, /* ARM register or literal 0 */
6519 OP_RR_EXr, /* ARM register or expression with opt. reloc suff. */
6520 OP_RR_EXi, /* ARM register or expression with imm prefix */
6521 OP_RF_IF, /* FPA register or immediate */
6522 OP_RIWR_RIWC, /* iWMMXt R or C reg */
6523 OP_RIWC_RIWG, /* iWMMXt wC or wCG reg */
6524
6525 /* Optional operands. */
6526 OP_oI7b, /* immediate, prefix optional, 0 .. 7 */
6527 OP_oI31b, /* 0 .. 31 */
6528 OP_oI32b, /* 1 .. 32 */
6529 OP_oI32z, /* 0 .. 32 */
6530 OP_oIffffb, /* 0 .. 65535 */
6531 OP_oI255c, /* curly-brace enclosed, 0 .. 255 */
6532
6533 OP_oRR, /* ARM register */
6534 OP_oRRnpc, /* ARM register, not the PC */
6535 OP_oRRnpcsp, /* ARM register, neither the PC nor the SP (a.k.a. BadReg) */
6536 OP_oRRw, /* ARM register, not r15, optional trailing ! */
6537 OP_oRND, /* Optional Neon double precision register */
6538 OP_oRNQ, /* Optional Neon quad precision register */
6539 OP_oRNDQ, /* Optional Neon double or quad precision register */
6540 OP_oRNSDQ, /* Optional single, double or quad precision vector register */
6541 OP_oSHll, /* LSL immediate */
6542 OP_oSHar, /* ASR immediate */
6543 OP_oSHllar, /* LSL or ASR immediate */
6544 OP_oROR, /* ROR 0/8/16/24 */
6545 OP_oBARRIER_I15, /* Option argument for a barrier instruction. */
6546
6547 /* Some pre-defined mixed (ARM/THUMB) operands. */
6548 OP_RR_npcsp = MIX_ARM_THUMB_OPERANDS (OP_RR, OP_RRnpcsp),
6549 OP_RRnpc_npcsp = MIX_ARM_THUMB_OPERANDS (OP_RRnpc, OP_RRnpcsp),
6550 OP_oRRnpc_npcsp = MIX_ARM_THUMB_OPERANDS (OP_oRRnpc, OP_oRRnpcsp),
6551
6552 OP_FIRST_OPTIONAL = OP_oI7b
6553 };
6554
6555 /* Generic instruction operand parser. This does no encoding and no
6556 semantic validation; it merely squirrels values away in the inst
6557 structure. Returns SUCCESS or FAIL depending on whether the
6558 specified grammar matched. */
6559 static int
6560 parse_operands (char *str, const unsigned int *pattern, bfd_boolean thumb)
6561 {
6562 unsigned const int *upat = pattern;
6563 char *backtrack_pos = 0;
6564 const char *backtrack_error = 0;
6565 int i, val = 0, backtrack_index = 0;
6566 enum arm_reg_type rtype;
6567 parse_operand_result result;
6568 unsigned int op_parse_code;
6569
6570 #define po_char_or_fail(chr) \
6571 do \
6572 { \
6573 if (skip_past_char (&str, chr) == FAIL) \
6574 goto bad_args; \
6575 } \
6576 while (0)
6577
6578 #define po_reg_or_fail(regtype) \
6579 do \
6580 { \
6581 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6582 & inst.operands[i].vectype); \
6583 if (val == FAIL) \
6584 { \
6585 first_error (_(reg_expected_msgs[regtype])); \
6586 goto failure; \
6587 } \
6588 inst.operands[i].reg = val; \
6589 inst.operands[i].isreg = 1; \
6590 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6591 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6592 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6593 || rtype == REG_TYPE_VFD \
6594 || rtype == REG_TYPE_NQ); \
6595 } \
6596 while (0)
6597
6598 #define po_reg_or_goto(regtype, label) \
6599 do \
6600 { \
6601 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6602 & inst.operands[i].vectype); \
6603 if (val == FAIL) \
6604 goto label; \
6605 \
6606 inst.operands[i].reg = val; \
6607 inst.operands[i].isreg = 1; \
6608 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6609 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6610 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6611 || rtype == REG_TYPE_VFD \
6612 || rtype == REG_TYPE_NQ); \
6613 } \
6614 while (0)
6615
6616 #define po_imm_or_fail(min, max, popt) \
6617 do \
6618 { \
6619 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
6620 goto failure; \
6621 inst.operands[i].imm = val; \
6622 } \
6623 while (0)
6624
6625 #define po_scalar_or_goto(elsz, label) \
6626 do \
6627 { \
6628 val = parse_scalar (& str, elsz, & inst.operands[i].vectype); \
6629 if (val == FAIL) \
6630 goto label; \
6631 inst.operands[i].reg = val; \
6632 inst.operands[i].isscalar = 1; \
6633 } \
6634 while (0)
6635
6636 #define po_misc_or_fail(expr) \
6637 do \
6638 { \
6639 if (expr) \
6640 goto failure; \
6641 } \
6642 while (0)
6643
6644 #define po_misc_or_fail_no_backtrack(expr) \
6645 do \
6646 { \
6647 result = expr; \
6648 if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK) \
6649 backtrack_pos = 0; \
6650 if (result != PARSE_OPERAND_SUCCESS) \
6651 goto failure; \
6652 } \
6653 while (0)
6654
6655 #define po_barrier_or_imm(str) \
6656 do \
6657 { \
6658 val = parse_barrier (&str); \
6659 if (val == FAIL && ! ISALPHA (*str)) \
6660 goto immediate; \
6661 if (val == FAIL \
6662 /* ISB can only take SY as an option. */ \
6663 || ((inst.instruction & 0xf0) == 0x60 \
6664 && val != 0xf)) \
6665 { \
6666 inst.error = _("invalid barrier type"); \
6667 backtrack_pos = 0; \
6668 goto failure; \
6669 } \
6670 } \
6671 while (0)
6672
6673 skip_whitespace (str);
6674
6675 for (i = 0; upat[i] != OP_stop; i++)
6676 {
6677 op_parse_code = upat[i];
6678 if (op_parse_code >= 1<<16)
6679 op_parse_code = thumb ? (op_parse_code >> 16)
6680 : (op_parse_code & ((1<<16)-1));
6681
6682 if (op_parse_code >= OP_FIRST_OPTIONAL)
6683 {
6684 /* Remember where we are in case we need to backtrack. */
6685 gas_assert (!backtrack_pos);
6686 backtrack_pos = str;
6687 backtrack_error = inst.error;
6688 backtrack_index = i;
6689 }
6690
6691 if (i > 0 && (i > 1 || inst.operands[0].present))
6692 po_char_or_fail (',');
6693
6694 switch (op_parse_code)
6695 {
6696 /* Registers */
6697 case OP_oRRnpc:
6698 case OP_oRRnpcsp:
6699 case OP_RRnpc:
6700 case OP_RRnpcsp:
6701 case OP_oRR:
6702 case OP_RR: po_reg_or_fail (REG_TYPE_RN); break;
6703 case OP_RCP: po_reg_or_fail (REG_TYPE_CP); break;
6704 case OP_RCN: po_reg_or_fail (REG_TYPE_CN); break;
6705 case OP_RF: po_reg_or_fail (REG_TYPE_FN); break;
6706 case OP_RVS: po_reg_or_fail (REG_TYPE_VFS); break;
6707 case OP_RVD: po_reg_or_fail (REG_TYPE_VFD); break;
6708 case OP_oRND:
6709 case OP_RND: po_reg_or_fail (REG_TYPE_VFD); break;
6710 case OP_RVC:
6711 po_reg_or_goto (REG_TYPE_VFC, coproc_reg);
6712 break;
6713 /* Also accept generic coprocessor regs for unknown registers. */
6714 coproc_reg:
6715 po_reg_or_fail (REG_TYPE_CN);
6716 break;
6717 case OP_RMF: po_reg_or_fail (REG_TYPE_MVF); break;
6718 case OP_RMD: po_reg_or_fail (REG_TYPE_MVD); break;
6719 case OP_RMFX: po_reg_or_fail (REG_TYPE_MVFX); break;
6720 case OP_RMDX: po_reg_or_fail (REG_TYPE_MVDX); break;
6721 case OP_RMAX: po_reg_or_fail (REG_TYPE_MVAX); break;
6722 case OP_RMDS: po_reg_or_fail (REG_TYPE_DSPSC); break;
6723 case OP_RIWR: po_reg_or_fail (REG_TYPE_MMXWR); break;
6724 case OP_RIWC: po_reg_or_fail (REG_TYPE_MMXWC); break;
6725 case OP_RIWG: po_reg_or_fail (REG_TYPE_MMXWCG); break;
6726 case OP_RXA: po_reg_or_fail (REG_TYPE_XSCALE); break;
6727 case OP_oRNQ:
6728 case OP_RNQ: po_reg_or_fail (REG_TYPE_NQ); break;
6729 case OP_oRNDQ:
6730 case OP_RNDQ: po_reg_or_fail (REG_TYPE_NDQ); break;
6731 case OP_RVSD: po_reg_or_fail (REG_TYPE_VFSD); break;
6732 case OP_oRNSDQ:
6733 case OP_RNSDQ: po_reg_or_fail (REG_TYPE_NSDQ); break;
6734
6735 /* Neon scalar. Using an element size of 8 means that some invalid
6736 scalars are accepted here, so deal with those in later code. */
6737 case OP_RNSC: po_scalar_or_goto (8, failure); break;
6738
6739 case OP_RNDQ_I0:
6740 {
6741 po_reg_or_goto (REG_TYPE_NDQ, try_imm0);
6742 break;
6743 try_imm0:
6744 po_imm_or_fail (0, 0, TRUE);
6745 }
6746 break;
6747
6748 case OP_RVSD_I0:
6749 po_reg_or_goto (REG_TYPE_VFSD, try_imm0);
6750 break;
6751
6752 case OP_RSVD_FI0:
6753 {
6754 po_reg_or_goto (REG_TYPE_VFSD, try_ifimm0);
6755 break;
6756 try_ifimm0:
6757 if (parse_ifimm_zero (&str))
6758 inst.operands[i].imm = 0;
6759 else
6760 {
6761 inst.error
6762 = _("only floating point zero is allowed as immediate value");
6763 goto failure;
6764 }
6765 }
6766 break;
6767
6768 case OP_RR_RNSC:
6769 {
6770 po_scalar_or_goto (8, try_rr);
6771 break;
6772 try_rr:
6773 po_reg_or_fail (REG_TYPE_RN);
6774 }
6775 break;
6776
6777 case OP_RNSDQ_RNSC:
6778 {
6779 po_scalar_or_goto (8, try_nsdq);
6780 break;
6781 try_nsdq:
6782 po_reg_or_fail (REG_TYPE_NSDQ);
6783 }
6784 break;
6785
6786 case OP_RNDQ_RNSC:
6787 {
6788 po_scalar_or_goto (8, try_ndq);
6789 break;
6790 try_ndq:
6791 po_reg_or_fail (REG_TYPE_NDQ);
6792 }
6793 break;
6794
6795 case OP_RND_RNSC:
6796 {
6797 po_scalar_or_goto (8, try_vfd);
6798 break;
6799 try_vfd:
6800 po_reg_or_fail (REG_TYPE_VFD);
6801 }
6802 break;
6803
6804 case OP_VMOV:
6805 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
6806 not careful then bad things might happen. */
6807 po_misc_or_fail (parse_neon_mov (&str, &i) == FAIL);
6808 break;
6809
6810 case OP_RNDQ_Ibig:
6811 {
6812 po_reg_or_goto (REG_TYPE_NDQ, try_immbig);
6813 break;
6814 try_immbig:
6815 /* There's a possibility of getting a 64-bit immediate here, so
6816 we need special handling. */
6817 if (parse_big_immediate (&str, i, NULL, /*allow_symbol_p=*/FALSE)
6818 == FAIL)
6819 {
6820 inst.error = _("immediate value is out of range");
6821 goto failure;
6822 }
6823 }
6824 break;
6825
6826 case OP_RNDQ_I63b:
6827 {
6828 po_reg_or_goto (REG_TYPE_NDQ, try_shimm);
6829 break;
6830 try_shimm:
6831 po_imm_or_fail (0, 63, TRUE);
6832 }
6833 break;
6834
6835 case OP_RRnpcb:
6836 po_char_or_fail ('[');
6837 po_reg_or_fail (REG_TYPE_RN);
6838 po_char_or_fail (']');
6839 break;
6840
6841 case OP_RRnpctw:
6842 case OP_RRw:
6843 case OP_oRRw:
6844 po_reg_or_fail (REG_TYPE_RN);
6845 if (skip_past_char (&str, '!') == SUCCESS)
6846 inst.operands[i].writeback = 1;
6847 break;
6848
6849 /* Immediates */
6850 case OP_I7: po_imm_or_fail ( 0, 7, FALSE); break;
6851 case OP_I15: po_imm_or_fail ( 0, 15, FALSE); break;
6852 case OP_I16: po_imm_or_fail ( 1, 16, FALSE); break;
6853 case OP_I16z: po_imm_or_fail ( 0, 16, FALSE); break;
6854 case OP_I31: po_imm_or_fail ( 0, 31, FALSE); break;
6855 case OP_I32: po_imm_or_fail ( 1, 32, FALSE); break;
6856 case OP_I32z: po_imm_or_fail ( 0, 32, FALSE); break;
6857 case OP_I63s: po_imm_or_fail (-64, 63, FALSE); break;
6858 case OP_I63: po_imm_or_fail ( 0, 63, FALSE); break;
6859 case OP_I64: po_imm_or_fail ( 1, 64, FALSE); break;
6860 case OP_I64z: po_imm_or_fail ( 0, 64, FALSE); break;
6861 case OP_I255: po_imm_or_fail ( 0, 255, FALSE); break;
6862
6863 case OP_I4b: po_imm_or_fail ( 1, 4, TRUE); break;
6864 case OP_oI7b:
6865 case OP_I7b: po_imm_or_fail ( 0, 7, TRUE); break;
6866 case OP_I15b: po_imm_or_fail ( 0, 15, TRUE); break;
6867 case OP_oI31b:
6868 case OP_I31b: po_imm_or_fail ( 0, 31, TRUE); break;
6869 case OP_oI32b: po_imm_or_fail ( 1, 32, TRUE); break;
6870 case OP_oI32z: po_imm_or_fail ( 0, 32, TRUE); break;
6871 case OP_oIffffb: po_imm_or_fail ( 0, 0xffff, TRUE); break;
6872
6873 /* Immediate variants */
6874 case OP_oI255c:
6875 po_char_or_fail ('{');
6876 po_imm_or_fail (0, 255, TRUE);
6877 po_char_or_fail ('}');
6878 break;
6879
6880 case OP_I31w:
6881 /* The expression parser chokes on a trailing !, so we have
6882 to find it first and zap it. */
6883 {
6884 char *s = str;
6885 while (*s && *s != ',')
6886 s++;
6887 if (s[-1] == '!')
6888 {
6889 s[-1] = '\0';
6890 inst.operands[i].writeback = 1;
6891 }
6892 po_imm_or_fail (0, 31, TRUE);
6893 if (str == s - 1)
6894 str = s;
6895 }
6896 break;
6897
6898 /* Expressions */
6899 case OP_EXPi: EXPi:
6900 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6901 GE_OPT_PREFIX));
6902 break;
6903
6904 case OP_EXP:
6905 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6906 GE_NO_PREFIX));
6907 break;
6908
6909 case OP_EXPr: EXPr:
6910 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6911 GE_NO_PREFIX));
6912 if (inst.reloc.exp.X_op == O_symbol)
6913 {
6914 val = parse_reloc (&str);
6915 if (val == -1)
6916 {
6917 inst.error = _("unrecognized relocation suffix");
6918 goto failure;
6919 }
6920 else if (val != BFD_RELOC_UNUSED)
6921 {
6922 inst.operands[i].imm = val;
6923 inst.operands[i].hasreloc = 1;
6924 }
6925 }
6926 break;
6927
6928 /* Operand for MOVW or MOVT. */
6929 case OP_HALF:
6930 po_misc_or_fail (parse_half (&str));
6931 break;
6932
6933 /* Register or expression. */
6934 case OP_RR_EXr: po_reg_or_goto (REG_TYPE_RN, EXPr); break;
6935 case OP_RR_EXi: po_reg_or_goto (REG_TYPE_RN, EXPi); break;
6936
6937 /* Register or immediate. */
6938 case OP_RRnpc_I0: po_reg_or_goto (REG_TYPE_RN, I0); break;
6939 I0: po_imm_or_fail (0, 0, FALSE); break;
6940
6941 case OP_RF_IF: po_reg_or_goto (REG_TYPE_FN, IF); break;
6942 IF:
6943 if (!is_immediate_prefix (*str))
6944 goto bad_args;
6945 str++;
6946 val = parse_fpa_immediate (&str);
6947 if (val == FAIL)
6948 goto failure;
6949 /* FPA immediates are encoded as registers 8-15.
6950 parse_fpa_immediate has already applied the offset. */
6951 inst.operands[i].reg = val;
6952 inst.operands[i].isreg = 1;
6953 break;
6954
6955 case OP_RIWR_I32z: po_reg_or_goto (REG_TYPE_MMXWR, I32z); break;
6956 I32z: po_imm_or_fail (0, 32, FALSE); break;
6957
6958 /* Two kinds of register. */
6959 case OP_RIWR_RIWC:
6960 {
6961 struct reg_entry *rege = arm_reg_parse_multi (&str);
6962 if (!rege
6963 || (rege->type != REG_TYPE_MMXWR
6964 && rege->type != REG_TYPE_MMXWC
6965 && rege->type != REG_TYPE_MMXWCG))
6966 {
6967 inst.error = _("iWMMXt data or control register expected");
6968 goto failure;
6969 }
6970 inst.operands[i].reg = rege->number;
6971 inst.operands[i].isreg = (rege->type == REG_TYPE_MMXWR);
6972 }
6973 break;
6974
6975 case OP_RIWC_RIWG:
6976 {
6977 struct reg_entry *rege = arm_reg_parse_multi (&str);
6978 if (!rege
6979 || (rege->type != REG_TYPE_MMXWC
6980 && rege->type != REG_TYPE_MMXWCG))
6981 {
6982 inst.error = _("iWMMXt control register expected");
6983 goto failure;
6984 }
6985 inst.operands[i].reg = rege->number;
6986 inst.operands[i].isreg = 1;
6987 }
6988 break;
6989
6990 /* Misc */
6991 case OP_CPSF: val = parse_cps_flags (&str); break;
6992 case OP_ENDI: val = parse_endian_specifier (&str); break;
6993 case OP_oROR: val = parse_ror (&str); break;
6994 case OP_COND: val = parse_cond (&str); break;
6995 case OP_oBARRIER_I15:
6996 po_barrier_or_imm (str); break;
6997 immediate:
6998 if (parse_immediate (&str, &val, 0, 15, TRUE) == FAIL)
6999 goto failure;
7000 break;
7001
7002 case OP_wPSR:
7003 case OP_rPSR:
7004 po_reg_or_goto (REG_TYPE_RNB, try_psr);
7005 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_virt))
7006 {
7007 inst.error = _("Banked registers are not available with this "
7008 "architecture.");
7009 goto failure;
7010 }
7011 break;
7012 try_psr:
7013 val = parse_psr (&str, op_parse_code == OP_wPSR);
7014 break;
7015
7016 case OP_APSR_RR:
7017 po_reg_or_goto (REG_TYPE_RN, try_apsr);
7018 break;
7019 try_apsr:
7020 /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
7021 instruction). */
7022 if (strncasecmp (str, "APSR_", 5) == 0)
7023 {
7024 unsigned found = 0;
7025 str += 5;
7026 while (found < 15)
7027 switch (*str++)
7028 {
7029 case 'c': found = (found & 1) ? 16 : found | 1; break;
7030 case 'n': found = (found & 2) ? 16 : found | 2; break;
7031 case 'z': found = (found & 4) ? 16 : found | 4; break;
7032 case 'v': found = (found & 8) ? 16 : found | 8; break;
7033 default: found = 16;
7034 }
7035 if (found != 15)
7036 goto failure;
7037 inst.operands[i].isvec = 1;
7038 /* APSR_nzcv is encoded in instructions as if it were the REG_PC. */
7039 inst.operands[i].reg = REG_PC;
7040 }
7041 else
7042 goto failure;
7043 break;
7044
7045 case OP_TB:
7046 po_misc_or_fail (parse_tb (&str));
7047 break;
7048
7049 /* Register lists. */
7050 case OP_REGLST:
7051 val = parse_reg_list (&str);
7052 if (*str == '^')
7053 {
7054 inst.operands[i].writeback = 1;
7055 str++;
7056 }
7057 break;
7058
7059 case OP_VRSLST:
7060 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_S);
7061 break;
7062
7063 case OP_VRDLST:
7064 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_D);
7065 break;
7066
7067 case OP_VRSDLST:
7068 /* Allow Q registers too. */
7069 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7070 REGLIST_NEON_D);
7071 if (val == FAIL)
7072 {
7073 inst.error = NULL;
7074 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7075 REGLIST_VFP_S);
7076 inst.operands[i].issingle = 1;
7077 }
7078 break;
7079
7080 case OP_NRDLST:
7081 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7082 REGLIST_NEON_D);
7083 break;
7084
7085 case OP_NSTRLST:
7086 val = parse_neon_el_struct_list (&str, &inst.operands[i].reg,
7087 &inst.operands[i].vectype);
7088 break;
7089
7090 /* Addressing modes */
7091 case OP_ADDR:
7092 po_misc_or_fail (parse_address (&str, i));
7093 break;
7094
7095 case OP_ADDRGLDR:
7096 po_misc_or_fail_no_backtrack (
7097 parse_address_group_reloc (&str, i, GROUP_LDR));
7098 break;
7099
7100 case OP_ADDRGLDRS:
7101 po_misc_or_fail_no_backtrack (
7102 parse_address_group_reloc (&str, i, GROUP_LDRS));
7103 break;
7104
7105 case OP_ADDRGLDC:
7106 po_misc_or_fail_no_backtrack (
7107 parse_address_group_reloc (&str, i, GROUP_LDC));
7108 break;
7109
7110 case OP_SH:
7111 po_misc_or_fail (parse_shifter_operand (&str, i));
7112 break;
7113
7114 case OP_SHG:
7115 po_misc_or_fail_no_backtrack (
7116 parse_shifter_operand_group_reloc (&str, i));
7117 break;
7118
7119 case OP_oSHll:
7120 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_IMMEDIATE));
7121 break;
7122
7123 case OP_oSHar:
7124 po_misc_or_fail (parse_shift (&str, i, SHIFT_ASR_IMMEDIATE));
7125 break;
7126
7127 case OP_oSHllar:
7128 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_OR_ASR_IMMEDIATE));
7129 break;
7130
7131 default:
7132 as_fatal (_("unhandled operand code %d"), op_parse_code);
7133 }
7134
7135 /* Various value-based sanity checks and shared operations. We
7136 do not signal immediate failures for the register constraints;
7137 this allows a syntax error to take precedence. */
7138 switch (op_parse_code)
7139 {
7140 case OP_oRRnpc:
7141 case OP_RRnpc:
7142 case OP_RRnpcb:
7143 case OP_RRw:
7144 case OP_oRRw:
7145 case OP_RRnpc_I0:
7146 if (inst.operands[i].isreg && inst.operands[i].reg == REG_PC)
7147 inst.error = BAD_PC;
7148 break;
7149
7150 case OP_oRRnpcsp:
7151 case OP_RRnpcsp:
7152 if (inst.operands[i].isreg)
7153 {
7154 if (inst.operands[i].reg == REG_PC)
7155 inst.error = BAD_PC;
7156 else if (inst.operands[i].reg == REG_SP)
7157 inst.error = BAD_SP;
7158 }
7159 break;
7160
7161 case OP_RRnpctw:
7162 if (inst.operands[i].isreg
7163 && inst.operands[i].reg == REG_PC
7164 && (inst.operands[i].writeback || thumb))
7165 inst.error = BAD_PC;
7166 break;
7167
7168 case OP_CPSF:
7169 case OP_ENDI:
7170 case OP_oROR:
7171 case OP_wPSR:
7172 case OP_rPSR:
7173 case OP_COND:
7174 case OP_oBARRIER_I15:
7175 case OP_REGLST:
7176 case OP_VRSLST:
7177 case OP_VRDLST:
7178 case OP_VRSDLST:
7179 case OP_NRDLST:
7180 case OP_NSTRLST:
7181 if (val == FAIL)
7182 goto failure;
7183 inst.operands[i].imm = val;
7184 break;
7185
7186 default:
7187 break;
7188 }
7189
7190 /* If we get here, this operand was successfully parsed. */
7191 inst.operands[i].present = 1;
7192 continue;
7193
7194 bad_args:
7195 inst.error = BAD_ARGS;
7196
7197 failure:
7198 if (!backtrack_pos)
7199 {
7200 /* The parse routine should already have set inst.error, but set a
7201 default here just in case. */
7202 if (!inst.error)
7203 inst.error = _("syntax error");
7204 return FAIL;
7205 }
7206
7207 /* Do not backtrack over a trailing optional argument that
7208 absorbed some text. We will only fail again, with the
7209 'garbage following instruction' error message, which is
7210 probably less helpful than the current one. */
7211 if (backtrack_index == i && backtrack_pos != str
7212 && upat[i+1] == OP_stop)
7213 {
7214 if (!inst.error)
7215 inst.error = _("syntax error");
7216 return FAIL;
7217 }
7218
7219 /* Try again, skipping the optional argument at backtrack_pos. */
7220 str = backtrack_pos;
7221 inst.error = backtrack_error;
7222 inst.operands[backtrack_index].present = 0;
7223 i = backtrack_index;
7224 backtrack_pos = 0;
7225 }
7226
7227 /* Check that we have parsed all the arguments. */
7228 if (*str != '\0' && !inst.error)
7229 inst.error = _("garbage following instruction");
7230
7231 return inst.error ? FAIL : SUCCESS;
7232 }
7233
7234 #undef po_char_or_fail
7235 #undef po_reg_or_fail
7236 #undef po_reg_or_goto
7237 #undef po_imm_or_fail
7238 #undef po_scalar_or_fail
7239 #undef po_barrier_or_imm
7240
7241 /* Shorthand macro for instruction encoding functions issuing errors. */
7242 #define constraint(expr, err) \
7243 do \
7244 { \
7245 if (expr) \
7246 { \
7247 inst.error = err; \
7248 return; \
7249 } \
7250 } \
7251 while (0)
7252
7253 /* Reject "bad registers" for Thumb-2 instructions. Many Thumb-2
7254 instructions are unpredictable if these registers are used. This
7255 is the BadReg predicate in ARM's Thumb-2 documentation. */
7256 #define reject_bad_reg(reg) \
7257 do \
7258 if (reg == REG_SP || reg == REG_PC) \
7259 { \
7260 inst.error = (reg == REG_SP) ? BAD_SP : BAD_PC; \
7261 return; \
7262 } \
7263 while (0)
7264
7265 /* If REG is R13 (the stack pointer), warn that its use is
7266 deprecated. */
7267 #define warn_deprecated_sp(reg) \
7268 do \
7269 if (warn_on_deprecated && reg == REG_SP) \
7270 as_tsktsk (_("use of r13 is deprecated")); \
7271 while (0)
7272
7273 /* Functions for operand encoding. ARM, then Thumb. */
7274
7275 #define rotate_left(v, n) (v << (n & 31) | v >> ((32 - n) & 31))
7276
7277 /* If VAL can be encoded in the immediate field of an ARM instruction,
7278 return the encoded form. Otherwise, return FAIL. */
7279
7280 static unsigned int
7281 encode_arm_immediate (unsigned int val)
7282 {
7283 unsigned int a, i;
7284
7285 for (i = 0; i < 32; i += 2)
7286 if ((a = rotate_left (val, i)) <= 0xff)
7287 return a | (i << 7); /* 12-bit pack: [shift-cnt,const]. */
7288
7289 return FAIL;
7290 }
7291
7292 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
7293 return the encoded form. Otherwise, return FAIL. */
7294 static unsigned int
7295 encode_thumb32_immediate (unsigned int val)
7296 {
7297 unsigned int a, i;
7298
7299 if (val <= 0xff)
7300 return val;
7301
7302 for (i = 1; i <= 24; i++)
7303 {
7304 a = val >> i;
7305 if ((val & ~(0xff << i)) == 0)
7306 return ((val >> i) & 0x7f) | ((32 - i) << 7);
7307 }
7308
7309 a = val & 0xff;
7310 if (val == ((a << 16) | a))
7311 return 0x100 | a;
7312 if (val == ((a << 24) | (a << 16) | (a << 8) | a))
7313 return 0x300 | a;
7314
7315 a = val & 0xff00;
7316 if (val == ((a << 16) | a))
7317 return 0x200 | (a >> 8);
7318
7319 return FAIL;
7320 }
7321 /* Encode a VFP SP or DP register number into inst.instruction. */
7322
7323 static void
7324 encode_arm_vfp_reg (int reg, enum vfp_reg_pos pos)
7325 {
7326 if ((pos == VFP_REG_Dd || pos == VFP_REG_Dn || pos == VFP_REG_Dm)
7327 && reg > 15)
7328 {
7329 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
7330 {
7331 if (thumb_mode)
7332 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
7333 fpu_vfp_ext_d32);
7334 else
7335 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
7336 fpu_vfp_ext_d32);
7337 }
7338 else
7339 {
7340 first_error (_("D register out of range for selected VFP version"));
7341 return;
7342 }
7343 }
7344
7345 switch (pos)
7346 {
7347 case VFP_REG_Sd:
7348 inst.instruction |= ((reg >> 1) << 12) | ((reg & 1) << 22);
7349 break;
7350
7351 case VFP_REG_Sn:
7352 inst.instruction |= ((reg >> 1) << 16) | ((reg & 1) << 7);
7353 break;
7354
7355 case VFP_REG_Sm:
7356 inst.instruction |= ((reg >> 1) << 0) | ((reg & 1) << 5);
7357 break;
7358
7359 case VFP_REG_Dd:
7360 inst.instruction |= ((reg & 15) << 12) | ((reg >> 4) << 22);
7361 break;
7362
7363 case VFP_REG_Dn:
7364 inst.instruction |= ((reg & 15) << 16) | ((reg >> 4) << 7);
7365 break;
7366
7367 case VFP_REG_Dm:
7368 inst.instruction |= (reg & 15) | ((reg >> 4) << 5);
7369 break;
7370
7371 default:
7372 abort ();
7373 }
7374 }
7375
7376 /* Encode a <shift> in an ARM-format instruction. The immediate,
7377 if any, is handled by md_apply_fix. */
7378 static void
7379 encode_arm_shift (int i)
7380 {
7381 if (inst.operands[i].shift_kind == SHIFT_RRX)
7382 inst.instruction |= SHIFT_ROR << 5;
7383 else
7384 {
7385 inst.instruction |= inst.operands[i].shift_kind << 5;
7386 if (inst.operands[i].immisreg)
7387 {
7388 inst.instruction |= SHIFT_BY_REG;
7389 inst.instruction |= inst.operands[i].imm << 8;
7390 }
7391 else
7392 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
7393 }
7394 }
7395
7396 static void
7397 encode_arm_shifter_operand (int i)
7398 {
7399 if (inst.operands[i].isreg)
7400 {
7401 inst.instruction |= inst.operands[i].reg;
7402 encode_arm_shift (i);
7403 }
7404 else
7405 {
7406 inst.instruction |= INST_IMMEDIATE;
7407 if (inst.reloc.type != BFD_RELOC_ARM_IMMEDIATE)
7408 inst.instruction |= inst.operands[i].imm;
7409 }
7410 }
7411
7412 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
7413 static void
7414 encode_arm_addr_mode_common (int i, bfd_boolean is_t)
7415 {
7416 /* PR 14260:
7417 Generate an error if the operand is not a register. */
7418 constraint (!inst.operands[i].isreg,
7419 _("Instruction does not support =N addresses"));
7420
7421 inst.instruction |= inst.operands[i].reg << 16;
7422
7423 if (inst.operands[i].preind)
7424 {
7425 if (is_t)
7426 {
7427 inst.error = _("instruction does not accept preindexed addressing");
7428 return;
7429 }
7430 inst.instruction |= PRE_INDEX;
7431 if (inst.operands[i].writeback)
7432 inst.instruction |= WRITE_BACK;
7433
7434 }
7435 else if (inst.operands[i].postind)
7436 {
7437 gas_assert (inst.operands[i].writeback);
7438 if (is_t)
7439 inst.instruction |= WRITE_BACK;
7440 }
7441 else /* unindexed - only for coprocessor */
7442 {
7443 inst.error = _("instruction does not accept unindexed addressing");
7444 return;
7445 }
7446
7447 if (((inst.instruction & WRITE_BACK) || !(inst.instruction & PRE_INDEX))
7448 && (((inst.instruction & 0x000f0000) >> 16)
7449 == ((inst.instruction & 0x0000f000) >> 12)))
7450 as_warn ((inst.instruction & LOAD_BIT)
7451 ? _("destination register same as write-back base")
7452 : _("source register same as write-back base"));
7453 }
7454
7455 /* inst.operands[i] was set up by parse_address. Encode it into an
7456 ARM-format mode 2 load or store instruction. If is_t is true,
7457 reject forms that cannot be used with a T instruction (i.e. not
7458 post-indexed). */
7459 static void
7460 encode_arm_addr_mode_2 (int i, bfd_boolean is_t)
7461 {
7462 const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
7463
7464 encode_arm_addr_mode_common (i, is_t);
7465
7466 if (inst.operands[i].immisreg)
7467 {
7468 constraint ((inst.operands[i].imm == REG_PC
7469 || (is_pc && inst.operands[i].writeback)),
7470 BAD_PC_ADDRESSING);
7471 inst.instruction |= INST_IMMEDIATE; /* yes, this is backwards */
7472 inst.instruction |= inst.operands[i].imm;
7473 if (!inst.operands[i].negative)
7474 inst.instruction |= INDEX_UP;
7475 if (inst.operands[i].shifted)
7476 {
7477 if (inst.operands[i].shift_kind == SHIFT_RRX)
7478 inst.instruction |= SHIFT_ROR << 5;
7479 else
7480 {
7481 inst.instruction |= inst.operands[i].shift_kind << 5;
7482 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
7483 }
7484 }
7485 }
7486 else /* immediate offset in inst.reloc */
7487 {
7488 if (is_pc && !inst.reloc.pc_rel)
7489 {
7490 const bfd_boolean is_load = ((inst.instruction & LOAD_BIT) != 0);
7491
7492 /* If is_t is TRUE, it's called from do_ldstt. ldrt/strt
7493 cannot use PC in addressing.
7494 PC cannot be used in writeback addressing, either. */
7495 constraint ((is_t || inst.operands[i].writeback),
7496 BAD_PC_ADDRESSING);
7497
7498 /* Use of PC in str is deprecated for ARMv7. */
7499 if (warn_on_deprecated
7500 && !is_load
7501 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7))
7502 as_tsktsk (_("use of PC in this instruction is deprecated"));
7503 }
7504
7505 if (inst.reloc.type == BFD_RELOC_UNUSED)
7506 {
7507 /* Prefer + for zero encoded value. */
7508 if (!inst.operands[i].negative)
7509 inst.instruction |= INDEX_UP;
7510 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM;
7511 }
7512 }
7513 }
7514
7515 /* inst.operands[i] was set up by parse_address. Encode it into an
7516 ARM-format mode 3 load or store instruction. Reject forms that
7517 cannot be used with such instructions. If is_t is true, reject
7518 forms that cannot be used with a T instruction (i.e. not
7519 post-indexed). */
7520 static void
7521 encode_arm_addr_mode_3 (int i, bfd_boolean is_t)
7522 {
7523 if (inst.operands[i].immisreg && inst.operands[i].shifted)
7524 {
7525 inst.error = _("instruction does not accept scaled register index");
7526 return;
7527 }
7528
7529 encode_arm_addr_mode_common (i, is_t);
7530
7531 if (inst.operands[i].immisreg)
7532 {
7533 constraint ((inst.operands[i].imm == REG_PC
7534 || (is_t && inst.operands[i].reg == REG_PC)),
7535 BAD_PC_ADDRESSING);
7536 constraint (inst.operands[i].reg == REG_PC && inst.operands[i].writeback,
7537 BAD_PC_WRITEBACK);
7538 inst.instruction |= inst.operands[i].imm;
7539 if (!inst.operands[i].negative)
7540 inst.instruction |= INDEX_UP;
7541 }
7542 else /* immediate offset in inst.reloc */
7543 {
7544 constraint ((inst.operands[i].reg == REG_PC && !inst.reloc.pc_rel
7545 && inst.operands[i].writeback),
7546 BAD_PC_WRITEBACK);
7547 inst.instruction |= HWOFFSET_IMM;
7548 if (inst.reloc.type == BFD_RELOC_UNUSED)
7549 {
7550 /* Prefer + for zero encoded value. */
7551 if (!inst.operands[i].negative)
7552 inst.instruction |= INDEX_UP;
7553
7554 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM8;
7555 }
7556 }
7557 }
7558
7559 /* Write immediate bits [7:0] to the following locations:
7560
7561 |28/24|23 19|18 16|15 4|3 0|
7562 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
7563
7564 This function is used by VMOV/VMVN/VORR/VBIC. */
7565
7566 static void
7567 neon_write_immbits (unsigned immbits)
7568 {
7569 inst.instruction |= immbits & 0xf;
7570 inst.instruction |= ((immbits >> 4) & 0x7) << 16;
7571 inst.instruction |= ((immbits >> 7) & 0x1) << (thumb_mode ? 28 : 24);
7572 }
7573
7574 /* Invert low-order SIZE bits of XHI:XLO. */
7575
7576 static void
7577 neon_invert_size (unsigned *xlo, unsigned *xhi, int size)
7578 {
7579 unsigned immlo = xlo ? *xlo : 0;
7580 unsigned immhi = xhi ? *xhi : 0;
7581
7582 switch (size)
7583 {
7584 case 8:
7585 immlo = (~immlo) & 0xff;
7586 break;
7587
7588 case 16:
7589 immlo = (~immlo) & 0xffff;
7590 break;
7591
7592 case 64:
7593 immhi = (~immhi) & 0xffffffff;
7594 /* fall through. */
7595
7596 case 32:
7597 immlo = (~immlo) & 0xffffffff;
7598 break;
7599
7600 default:
7601 abort ();
7602 }
7603
7604 if (xlo)
7605 *xlo = immlo;
7606
7607 if (xhi)
7608 *xhi = immhi;
7609 }
7610
7611 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
7612 A, B, C, D. */
7613
7614 static int
7615 neon_bits_same_in_bytes (unsigned imm)
7616 {
7617 return ((imm & 0x000000ff) == 0 || (imm & 0x000000ff) == 0x000000ff)
7618 && ((imm & 0x0000ff00) == 0 || (imm & 0x0000ff00) == 0x0000ff00)
7619 && ((imm & 0x00ff0000) == 0 || (imm & 0x00ff0000) == 0x00ff0000)
7620 && ((imm & 0xff000000) == 0 || (imm & 0xff000000) == 0xff000000);
7621 }
7622
7623 /* For immediate of above form, return 0bABCD. */
7624
7625 static unsigned
7626 neon_squash_bits (unsigned imm)
7627 {
7628 return (imm & 0x01) | ((imm & 0x0100) >> 7) | ((imm & 0x010000) >> 14)
7629 | ((imm & 0x01000000) >> 21);
7630 }
7631
7632 /* Compress quarter-float representation to 0b...000 abcdefgh. */
7633
7634 static unsigned
7635 neon_qfloat_bits (unsigned imm)
7636 {
7637 return ((imm >> 19) & 0x7f) | ((imm >> 24) & 0x80);
7638 }
7639
7640 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
7641 the instruction. *OP is passed as the initial value of the op field, and
7642 may be set to a different value depending on the constant (i.e.
7643 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
7644 MVN). If the immediate looks like a repeated pattern then also
7645 try smaller element sizes. */
7646
7647 static int
7648 neon_cmode_for_move_imm (unsigned immlo, unsigned immhi, int float_p,
7649 unsigned *immbits, int *op, int size,
7650 enum neon_el_type type)
7651 {
7652 /* Only permit float immediates (including 0.0/-0.0) if the operand type is
7653 float. */
7654 if (type == NT_float && !float_p)
7655 return FAIL;
7656
7657 if (type == NT_float && is_quarter_float (immlo) && immhi == 0)
7658 {
7659 if (size != 32 || *op == 1)
7660 return FAIL;
7661 *immbits = neon_qfloat_bits (immlo);
7662 return 0xf;
7663 }
7664
7665 if (size == 64)
7666 {
7667 if (neon_bits_same_in_bytes (immhi)
7668 && neon_bits_same_in_bytes (immlo))
7669 {
7670 if (*op == 1)
7671 return FAIL;
7672 *immbits = (neon_squash_bits (immhi) << 4)
7673 | neon_squash_bits (immlo);
7674 *op = 1;
7675 return 0xe;
7676 }
7677
7678 if (immhi != immlo)
7679 return FAIL;
7680 }
7681
7682 if (size >= 32)
7683 {
7684 if (immlo == (immlo & 0x000000ff))
7685 {
7686 *immbits = immlo;
7687 return 0x0;
7688 }
7689 else if (immlo == (immlo & 0x0000ff00))
7690 {
7691 *immbits = immlo >> 8;
7692 return 0x2;
7693 }
7694 else if (immlo == (immlo & 0x00ff0000))
7695 {
7696 *immbits = immlo >> 16;
7697 return 0x4;
7698 }
7699 else if (immlo == (immlo & 0xff000000))
7700 {
7701 *immbits = immlo >> 24;
7702 return 0x6;
7703 }
7704 else if (immlo == ((immlo & 0x0000ff00) | 0x000000ff))
7705 {
7706 *immbits = (immlo >> 8) & 0xff;
7707 return 0xc;
7708 }
7709 else if (immlo == ((immlo & 0x00ff0000) | 0x0000ffff))
7710 {
7711 *immbits = (immlo >> 16) & 0xff;
7712 return 0xd;
7713 }
7714
7715 if ((immlo & 0xffff) != (immlo >> 16))
7716 return FAIL;
7717 immlo &= 0xffff;
7718 }
7719
7720 if (size >= 16)
7721 {
7722 if (immlo == (immlo & 0x000000ff))
7723 {
7724 *immbits = immlo;
7725 return 0x8;
7726 }
7727 else if (immlo == (immlo & 0x0000ff00))
7728 {
7729 *immbits = immlo >> 8;
7730 return 0xa;
7731 }
7732
7733 if ((immlo & 0xff) != (immlo >> 8))
7734 return FAIL;
7735 immlo &= 0xff;
7736 }
7737
7738 if (immlo == (immlo & 0x000000ff))
7739 {
7740 /* Don't allow MVN with 8-bit immediate. */
7741 if (*op == 1)
7742 return FAIL;
7743 *immbits = immlo;
7744 return 0xe;
7745 }
7746
7747 return FAIL;
7748 }
7749
7750 enum lit_type
7751 {
7752 CONST_THUMB,
7753 CONST_ARM,
7754 CONST_VEC
7755 };
7756
7757 /* inst.reloc.exp describes an "=expr" load pseudo-operation.
7758 Determine whether it can be performed with a move instruction; if
7759 it can, convert inst.instruction to that move instruction and
7760 return TRUE; if it can't, convert inst.instruction to a literal-pool
7761 load and return FALSE. If this is not a valid thing to do in the
7762 current context, set inst.error and return TRUE.
7763
7764 inst.operands[i] describes the destination register. */
7765
7766 static bfd_boolean
7767 move_or_literal_pool (int i, enum lit_type t, bfd_boolean mode_3)
7768 {
7769 unsigned long tbit;
7770 bfd_boolean thumb_p = (t == CONST_THUMB);
7771 bfd_boolean arm_p = (t == CONST_ARM);
7772 bfd_boolean vec64_p = (t == CONST_VEC) && !inst.operands[i].issingle;
7773
7774 if (thumb_p)
7775 tbit = (inst.instruction > 0xffff) ? THUMB2_LOAD_BIT : THUMB_LOAD_BIT;
7776 else
7777 tbit = LOAD_BIT;
7778
7779 if ((inst.instruction & tbit) == 0)
7780 {
7781 inst.error = _("invalid pseudo operation");
7782 return TRUE;
7783 }
7784 if (inst.reloc.exp.X_op != O_constant
7785 && inst.reloc.exp.X_op != O_symbol
7786 && inst.reloc.exp.X_op != O_big)
7787 {
7788 inst.error = _("constant expression expected");
7789 return TRUE;
7790 }
7791 if ((inst.reloc.exp.X_op == O_constant
7792 || inst.reloc.exp.X_op == O_big)
7793 && !inst.operands[i].issingle)
7794 {
7795 if (thumb_p && inst.reloc.exp.X_op == O_constant)
7796 {
7797 if (!unified_syntax && (inst.reloc.exp.X_add_number & ~0xFF) == 0)
7798 {
7799 /* This can be done with a mov(1) instruction. */
7800 inst.instruction = T_OPCODE_MOV_I8 | (inst.operands[i].reg << 8);
7801 inst.instruction |= inst.reloc.exp.X_add_number;
7802 return TRUE;
7803 }
7804 }
7805 else if (arm_p && inst.reloc.exp.X_op == O_constant)
7806 {
7807 int value = encode_arm_immediate (inst.reloc.exp.X_add_number);
7808 if (value != FAIL)
7809 {
7810 /* This can be done with a mov instruction. */
7811 inst.instruction &= LITERAL_MASK;
7812 inst.instruction |= INST_IMMEDIATE | (OPCODE_MOV << DATA_OP_SHIFT);
7813 inst.instruction |= value & 0xfff;
7814 return TRUE;
7815 }
7816
7817 value = encode_arm_immediate (~inst.reloc.exp.X_add_number);
7818 if (value != FAIL)
7819 {
7820 /* This can be done with a mvn instruction. */
7821 inst.instruction &= LITERAL_MASK;
7822 inst.instruction |= INST_IMMEDIATE | (OPCODE_MVN << DATA_OP_SHIFT);
7823 inst.instruction |= value & 0xfff;
7824 return TRUE;
7825 }
7826 }
7827 else if (vec64_p)
7828 {
7829 int op = 0;
7830 unsigned immbits = 0;
7831 unsigned immlo = inst.operands[1].imm;
7832 unsigned immhi = inst.operands[1].regisimm
7833 ? inst.operands[1].reg
7834 : inst.reloc.exp.X_unsigned
7835 ? 0
7836 : ((bfd_int64_t)((int) immlo)) >> 32;
7837 int cmode = neon_cmode_for_move_imm (immlo, immhi, FALSE, &immbits,
7838 &op, 64, NT_invtype);
7839
7840 if (cmode == FAIL)
7841 {
7842 neon_invert_size (&immlo, &immhi, 64);
7843 op = !op;
7844 cmode = neon_cmode_for_move_imm (immlo, immhi, FALSE, &immbits,
7845 &op, 64, NT_invtype);
7846 }
7847 if (cmode != FAIL)
7848 {
7849 inst.instruction = (inst.instruction & VLDR_VMOV_SAME)
7850 | (1 << 23)
7851 | (cmode << 8)
7852 | (op << 5)
7853 | (1 << 4);
7854 /* Fill other bits in vmov encoding for both thumb and arm. */
7855 if (thumb_mode)
7856 inst.instruction |= (0x7 << 29) | (0xF << 24);
7857 else
7858 inst.instruction |= (0xF << 28) | (0x1 << 25);
7859 neon_write_immbits (immbits);
7860 return TRUE;
7861 }
7862 }
7863 }
7864
7865 if (add_to_lit_pool ((!inst.operands[i].isvec
7866 || inst.operands[i].issingle) ? 4 : 8) == FAIL)
7867 return TRUE;
7868
7869 inst.operands[1].reg = REG_PC;
7870 inst.operands[1].isreg = 1;
7871 inst.operands[1].preind = 1;
7872 inst.reloc.pc_rel = 1;
7873 inst.reloc.type = (thumb_p
7874 ? BFD_RELOC_ARM_THUMB_OFFSET
7875 : (mode_3
7876 ? BFD_RELOC_ARM_HWLITERAL
7877 : BFD_RELOC_ARM_LITERAL));
7878 return FALSE;
7879 }
7880
7881 /* inst.operands[i] was set up by parse_address. Encode it into an
7882 ARM-format instruction. Reject all forms which cannot be encoded
7883 into a coprocessor load/store instruction. If wb_ok is false,
7884 reject use of writeback; if unind_ok is false, reject use of
7885 unindexed addressing. If reloc_override is not 0, use it instead
7886 of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
7887 (in which case it is preserved). */
7888
7889 static int
7890 encode_arm_cp_address (int i, int wb_ok, int unind_ok, int reloc_override)
7891 {
7892 if (!inst.operands[i].isreg)
7893 {
7894 gas_assert (inst.operands[0].isvec);
7895 if (move_or_literal_pool (0, CONST_VEC, /*mode_3=*/FALSE))
7896 return SUCCESS;
7897 }
7898
7899 inst.instruction |= inst.operands[i].reg << 16;
7900
7901 gas_assert (!(inst.operands[i].preind && inst.operands[i].postind));
7902
7903 if (!inst.operands[i].preind && !inst.operands[i].postind) /* unindexed */
7904 {
7905 gas_assert (!inst.operands[i].writeback);
7906 if (!unind_ok)
7907 {
7908 inst.error = _("instruction does not support unindexed addressing");
7909 return FAIL;
7910 }
7911 inst.instruction |= inst.operands[i].imm;
7912 inst.instruction |= INDEX_UP;
7913 return SUCCESS;
7914 }
7915
7916 if (inst.operands[i].preind)
7917 inst.instruction |= PRE_INDEX;
7918
7919 if (inst.operands[i].writeback)
7920 {
7921 if (inst.operands[i].reg == REG_PC)
7922 {
7923 inst.error = _("pc may not be used with write-back");
7924 return FAIL;
7925 }
7926 if (!wb_ok)
7927 {
7928 inst.error = _("instruction does not support writeback");
7929 return FAIL;
7930 }
7931 inst.instruction |= WRITE_BACK;
7932 }
7933
7934 if (reloc_override)
7935 inst.reloc.type = (bfd_reloc_code_real_type) reloc_override;
7936 else if ((inst.reloc.type < BFD_RELOC_ARM_ALU_PC_G0_NC
7937 || inst.reloc.type > BFD_RELOC_ARM_LDC_SB_G2)
7938 && inst.reloc.type != BFD_RELOC_ARM_LDR_PC_G0)
7939 {
7940 if (thumb_mode)
7941 inst.reloc.type = BFD_RELOC_ARM_T32_CP_OFF_IMM;
7942 else
7943 inst.reloc.type = BFD_RELOC_ARM_CP_OFF_IMM;
7944 }
7945
7946 /* Prefer + for zero encoded value. */
7947 if (!inst.operands[i].negative)
7948 inst.instruction |= INDEX_UP;
7949
7950 return SUCCESS;
7951 }
7952
7953 /* Functions for instruction encoding, sorted by sub-architecture.
7954 First some generics; their names are taken from the conventional
7955 bit positions for register arguments in ARM format instructions. */
7956
7957 static void
7958 do_noargs (void)
7959 {
7960 }
7961
7962 static void
7963 do_rd (void)
7964 {
7965 inst.instruction |= inst.operands[0].reg << 12;
7966 }
7967
7968 static void
7969 do_rd_rm (void)
7970 {
7971 inst.instruction |= inst.operands[0].reg << 12;
7972 inst.instruction |= inst.operands[1].reg;
7973 }
7974
7975 static void
7976 do_rm_rn (void)
7977 {
7978 inst.instruction |= inst.operands[0].reg;
7979 inst.instruction |= inst.operands[1].reg << 16;
7980 }
7981
7982 static void
7983 do_rd_rn (void)
7984 {
7985 inst.instruction |= inst.operands[0].reg << 12;
7986 inst.instruction |= inst.operands[1].reg << 16;
7987 }
7988
7989 static void
7990 do_rn_rd (void)
7991 {
7992 inst.instruction |= inst.operands[0].reg << 16;
7993 inst.instruction |= inst.operands[1].reg << 12;
7994 }
7995
7996 static bfd_boolean
7997 check_obsolete (const arm_feature_set *feature, const char *msg)
7998 {
7999 if (ARM_CPU_IS_ANY (cpu_variant))
8000 {
8001 as_tsktsk ("%s", msg);
8002 return TRUE;
8003 }
8004 else if (ARM_CPU_HAS_FEATURE (cpu_variant, *feature))
8005 {
8006 as_bad ("%s", msg);
8007 return TRUE;
8008 }
8009
8010 return FALSE;
8011 }
8012
8013 static void
8014 do_rd_rm_rn (void)
8015 {
8016 unsigned Rn = inst.operands[2].reg;
8017 /* Enforce restrictions on SWP instruction. */
8018 if ((inst.instruction & 0x0fbfffff) == 0x01000090)
8019 {
8020 constraint (Rn == inst.operands[0].reg || Rn == inst.operands[1].reg,
8021 _("Rn must not overlap other operands"));
8022
8023 /* SWP{b} is obsolete for ARMv8-A, and deprecated for ARMv6* and ARMv7.
8024 */
8025 if (!check_obsolete (&arm_ext_v8,
8026 _("swp{b} use is obsoleted for ARMv8 and later"))
8027 && warn_on_deprecated
8028 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6))
8029 as_tsktsk (_("swp{b} use is deprecated for ARMv6 and ARMv7"));
8030 }
8031
8032 inst.instruction |= inst.operands[0].reg << 12;
8033 inst.instruction |= inst.operands[1].reg;
8034 inst.instruction |= Rn << 16;
8035 }
8036
8037 static void
8038 do_rd_rn_rm (void)
8039 {
8040 inst.instruction |= inst.operands[0].reg << 12;
8041 inst.instruction |= inst.operands[1].reg << 16;
8042 inst.instruction |= inst.operands[2].reg;
8043 }
8044
8045 static void
8046 do_rm_rd_rn (void)
8047 {
8048 constraint ((inst.operands[2].reg == REG_PC), BAD_PC);
8049 constraint (((inst.reloc.exp.X_op != O_constant
8050 && inst.reloc.exp.X_op != O_illegal)
8051 || inst.reloc.exp.X_add_number != 0),
8052 BAD_ADDR_MODE);
8053 inst.instruction |= inst.operands[0].reg;
8054 inst.instruction |= inst.operands[1].reg << 12;
8055 inst.instruction |= inst.operands[2].reg << 16;
8056 }
8057
8058 static void
8059 do_imm0 (void)
8060 {
8061 inst.instruction |= inst.operands[0].imm;
8062 }
8063
8064 static void
8065 do_rd_cpaddr (void)
8066 {
8067 inst.instruction |= inst.operands[0].reg << 12;
8068 encode_arm_cp_address (1, TRUE, TRUE, 0);
8069 }
8070
8071 /* ARM instructions, in alphabetical order by function name (except
8072 that wrapper functions appear immediately after the function they
8073 wrap). */
8074
8075 /* This is a pseudo-op of the form "adr rd, label" to be converted
8076 into a relative address of the form "add rd, pc, #label-.-8". */
8077
8078 static void
8079 do_adr (void)
8080 {
8081 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
8082
8083 /* Frag hacking will turn this into a sub instruction if the offset turns
8084 out to be negative. */
8085 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
8086 inst.reloc.pc_rel = 1;
8087 inst.reloc.exp.X_add_number -= 8;
8088 }
8089
8090 /* This is a pseudo-op of the form "adrl rd, label" to be converted
8091 into a relative address of the form:
8092 add rd, pc, #low(label-.-8)"
8093 add rd, rd, #high(label-.-8)" */
8094
8095 static void
8096 do_adrl (void)
8097 {
8098 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
8099
8100 /* Frag hacking will turn this into a sub instruction if the offset turns
8101 out to be negative. */
8102 inst.reloc.type = BFD_RELOC_ARM_ADRL_IMMEDIATE;
8103 inst.reloc.pc_rel = 1;
8104 inst.size = INSN_SIZE * 2;
8105 inst.reloc.exp.X_add_number -= 8;
8106 }
8107
8108 static void
8109 do_arit (void)
8110 {
8111 if (!inst.operands[1].present)
8112 inst.operands[1].reg = inst.operands[0].reg;
8113 inst.instruction |= inst.operands[0].reg << 12;
8114 inst.instruction |= inst.operands[1].reg << 16;
8115 encode_arm_shifter_operand (2);
8116 }
8117
8118 static void
8119 do_barrier (void)
8120 {
8121 if (inst.operands[0].present)
8122 inst.instruction |= inst.operands[0].imm;
8123 else
8124 inst.instruction |= 0xf;
8125 }
8126
8127 static void
8128 do_bfc (void)
8129 {
8130 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
8131 constraint (msb > 32, _("bit-field extends past end of register"));
8132 /* The instruction encoding stores the LSB and MSB,
8133 not the LSB and width. */
8134 inst.instruction |= inst.operands[0].reg << 12;
8135 inst.instruction |= inst.operands[1].imm << 7;
8136 inst.instruction |= (msb - 1) << 16;
8137 }
8138
8139 static void
8140 do_bfi (void)
8141 {
8142 unsigned int msb;
8143
8144 /* #0 in second position is alternative syntax for bfc, which is
8145 the same instruction but with REG_PC in the Rm field. */
8146 if (!inst.operands[1].isreg)
8147 inst.operands[1].reg = REG_PC;
8148
8149 msb = inst.operands[2].imm + inst.operands[3].imm;
8150 constraint (msb > 32, _("bit-field extends past end of register"));
8151 /* The instruction encoding stores the LSB and MSB,
8152 not the LSB and width. */
8153 inst.instruction |= inst.operands[0].reg << 12;
8154 inst.instruction |= inst.operands[1].reg;
8155 inst.instruction |= inst.operands[2].imm << 7;
8156 inst.instruction |= (msb - 1) << 16;
8157 }
8158
8159 static void
8160 do_bfx (void)
8161 {
8162 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
8163 _("bit-field extends past end of register"));
8164 inst.instruction |= inst.operands[0].reg << 12;
8165 inst.instruction |= inst.operands[1].reg;
8166 inst.instruction |= inst.operands[2].imm << 7;
8167 inst.instruction |= (inst.operands[3].imm - 1) << 16;
8168 }
8169
8170 /* ARM V5 breakpoint instruction (argument parse)
8171 BKPT <16 bit unsigned immediate>
8172 Instruction is not conditional.
8173 The bit pattern given in insns[] has the COND_ALWAYS condition,
8174 and it is an error if the caller tried to override that. */
8175
8176 static void
8177 do_bkpt (void)
8178 {
8179 /* Top 12 of 16 bits to bits 19:8. */
8180 inst.instruction |= (inst.operands[0].imm & 0xfff0) << 4;
8181
8182 /* Bottom 4 of 16 bits to bits 3:0. */
8183 inst.instruction |= inst.operands[0].imm & 0xf;
8184 }
8185
8186 static void
8187 encode_branch (int default_reloc)
8188 {
8189 if (inst.operands[0].hasreloc)
8190 {
8191 constraint (inst.operands[0].imm != BFD_RELOC_ARM_PLT32
8192 && inst.operands[0].imm != BFD_RELOC_ARM_TLS_CALL,
8193 _("the only valid suffixes here are '(plt)' and '(tlscall)'"));
8194 inst.reloc.type = inst.operands[0].imm == BFD_RELOC_ARM_PLT32
8195 ? BFD_RELOC_ARM_PLT32
8196 : thumb_mode ? BFD_RELOC_ARM_THM_TLS_CALL : BFD_RELOC_ARM_TLS_CALL;
8197 }
8198 else
8199 inst.reloc.type = (bfd_reloc_code_real_type) default_reloc;
8200 inst.reloc.pc_rel = 1;
8201 }
8202
8203 static void
8204 do_branch (void)
8205 {
8206 #ifdef OBJ_ELF
8207 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
8208 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
8209 else
8210 #endif
8211 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
8212 }
8213
8214 static void
8215 do_bl (void)
8216 {
8217 #ifdef OBJ_ELF
8218 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
8219 {
8220 if (inst.cond == COND_ALWAYS)
8221 encode_branch (BFD_RELOC_ARM_PCREL_CALL);
8222 else
8223 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
8224 }
8225 else
8226 #endif
8227 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
8228 }
8229
8230 /* ARM V5 branch-link-exchange instruction (argument parse)
8231 BLX <target_addr> ie BLX(1)
8232 BLX{<condition>} <Rm> ie BLX(2)
8233 Unfortunately, there are two different opcodes for this mnemonic.
8234 So, the insns[].value is not used, and the code here zaps values
8235 into inst.instruction.
8236 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
8237
8238 static void
8239 do_blx (void)
8240 {
8241 if (inst.operands[0].isreg)
8242 {
8243 /* Arg is a register; the opcode provided by insns[] is correct.
8244 It is not illegal to do "blx pc", just useless. */
8245 if (inst.operands[0].reg == REG_PC)
8246 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
8247
8248 inst.instruction |= inst.operands[0].reg;
8249 }
8250 else
8251 {
8252 /* Arg is an address; this instruction cannot be executed
8253 conditionally, and the opcode must be adjusted.
8254 We retain the BFD_RELOC_ARM_PCREL_BLX till the very end
8255 where we generate out a BFD_RELOC_ARM_PCREL_CALL instead. */
8256 constraint (inst.cond != COND_ALWAYS, BAD_COND);
8257 inst.instruction = 0xfa000000;
8258 encode_branch (BFD_RELOC_ARM_PCREL_BLX);
8259 }
8260 }
8261
8262 static void
8263 do_bx (void)
8264 {
8265 bfd_boolean want_reloc;
8266
8267 if (inst.operands[0].reg == REG_PC)
8268 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
8269
8270 inst.instruction |= inst.operands[0].reg;
8271 /* Output R_ARM_V4BX relocations if is an EABI object that looks like
8272 it is for ARMv4t or earlier. */
8273 want_reloc = !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5);
8274 if (object_arch && !ARM_CPU_HAS_FEATURE (*object_arch, arm_ext_v5))
8275 want_reloc = TRUE;
8276
8277 #ifdef OBJ_ELF
8278 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
8279 #endif
8280 want_reloc = FALSE;
8281
8282 if (want_reloc)
8283 inst.reloc.type = BFD_RELOC_ARM_V4BX;
8284 }
8285
8286
8287 /* ARM v5TEJ. Jump to Jazelle code. */
8288
8289 static void
8290 do_bxj (void)
8291 {
8292 if (inst.operands[0].reg == REG_PC)
8293 as_tsktsk (_("use of r15 in bxj is not really useful"));
8294
8295 inst.instruction |= inst.operands[0].reg;
8296 }
8297
8298 /* Co-processor data operation:
8299 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
8300 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
8301 static void
8302 do_cdp (void)
8303 {
8304 inst.instruction |= inst.operands[0].reg << 8;
8305 inst.instruction |= inst.operands[1].imm << 20;
8306 inst.instruction |= inst.operands[2].reg << 12;
8307 inst.instruction |= inst.operands[3].reg << 16;
8308 inst.instruction |= inst.operands[4].reg;
8309 inst.instruction |= inst.operands[5].imm << 5;
8310 }
8311
8312 static void
8313 do_cmp (void)
8314 {
8315 inst.instruction |= inst.operands[0].reg << 16;
8316 encode_arm_shifter_operand (1);
8317 }
8318
8319 /* Transfer between coprocessor and ARM registers.
8320 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
8321 MRC2
8322 MCR{cond}
8323 MCR2
8324
8325 No special properties. */
8326
8327 struct deprecated_coproc_regs_s
8328 {
8329 unsigned cp;
8330 int opc1;
8331 unsigned crn;
8332 unsigned crm;
8333 int opc2;
8334 arm_feature_set deprecated;
8335 arm_feature_set obsoleted;
8336 const char *dep_msg;
8337 const char *obs_msg;
8338 };
8339
8340 #define DEPR_ACCESS_V8 \
8341 N_("This coprocessor register access is deprecated in ARMv8")
8342
8343 /* Table of all deprecated coprocessor registers. */
8344 static struct deprecated_coproc_regs_s deprecated_coproc_regs[] =
8345 {
8346 {15, 0, 7, 10, 5, /* CP15DMB. */
8347 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8348 DEPR_ACCESS_V8, NULL},
8349 {15, 0, 7, 10, 4, /* CP15DSB. */
8350 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8351 DEPR_ACCESS_V8, NULL},
8352 {15, 0, 7, 5, 4, /* CP15ISB. */
8353 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8354 DEPR_ACCESS_V8, NULL},
8355 {14, 6, 1, 0, 0, /* TEEHBR. */
8356 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8357 DEPR_ACCESS_V8, NULL},
8358 {14, 6, 0, 0, 0, /* TEECR. */
8359 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8360 DEPR_ACCESS_V8, NULL},
8361 };
8362
8363 #undef DEPR_ACCESS_V8
8364
8365 static const size_t deprecated_coproc_reg_count =
8366 sizeof (deprecated_coproc_regs) / sizeof (deprecated_coproc_regs[0]);
8367
8368 static void
8369 do_co_reg (void)
8370 {
8371 unsigned Rd;
8372 size_t i;
8373
8374 Rd = inst.operands[2].reg;
8375 if (thumb_mode)
8376 {
8377 if (inst.instruction == 0xee000010
8378 || inst.instruction == 0xfe000010)
8379 /* MCR, MCR2 */
8380 reject_bad_reg (Rd);
8381 else
8382 /* MRC, MRC2 */
8383 constraint (Rd == REG_SP, BAD_SP);
8384 }
8385 else
8386 {
8387 /* MCR */
8388 if (inst.instruction == 0xe000010)
8389 constraint (Rd == REG_PC, BAD_PC);
8390 }
8391
8392 for (i = 0; i < deprecated_coproc_reg_count; ++i)
8393 {
8394 const struct deprecated_coproc_regs_s *r =
8395 deprecated_coproc_regs + i;
8396
8397 if (inst.operands[0].reg == r->cp
8398 && inst.operands[1].imm == r->opc1
8399 && inst.operands[3].reg == r->crn
8400 && inst.operands[4].reg == r->crm
8401 && inst.operands[5].imm == r->opc2)
8402 {
8403 if (! ARM_CPU_IS_ANY (cpu_variant)
8404 && warn_on_deprecated
8405 && ARM_CPU_HAS_FEATURE (cpu_variant, r->deprecated))
8406 as_tsktsk ("%s", r->dep_msg);
8407 }
8408 }
8409
8410 inst.instruction |= inst.operands[0].reg << 8;
8411 inst.instruction |= inst.operands[1].imm << 21;
8412 inst.instruction |= Rd << 12;
8413 inst.instruction |= inst.operands[3].reg << 16;
8414 inst.instruction |= inst.operands[4].reg;
8415 inst.instruction |= inst.operands[5].imm << 5;
8416 }
8417
8418 /* Transfer between coprocessor register and pair of ARM registers.
8419 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
8420 MCRR2
8421 MRRC{cond}
8422 MRRC2
8423
8424 Two XScale instructions are special cases of these:
8425
8426 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
8427 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
8428
8429 Result unpredictable if Rd or Rn is R15. */
8430
8431 static void
8432 do_co_reg2c (void)
8433 {
8434 unsigned Rd, Rn;
8435
8436 Rd = inst.operands[2].reg;
8437 Rn = inst.operands[3].reg;
8438
8439 if (thumb_mode)
8440 {
8441 reject_bad_reg (Rd);
8442 reject_bad_reg (Rn);
8443 }
8444 else
8445 {
8446 constraint (Rd == REG_PC, BAD_PC);
8447 constraint (Rn == REG_PC, BAD_PC);
8448 }
8449
8450 inst.instruction |= inst.operands[0].reg << 8;
8451 inst.instruction |= inst.operands[1].imm << 4;
8452 inst.instruction |= Rd << 12;
8453 inst.instruction |= Rn << 16;
8454 inst.instruction |= inst.operands[4].reg;
8455 }
8456
8457 static void
8458 do_cpsi (void)
8459 {
8460 inst.instruction |= inst.operands[0].imm << 6;
8461 if (inst.operands[1].present)
8462 {
8463 inst.instruction |= CPSI_MMOD;
8464 inst.instruction |= inst.operands[1].imm;
8465 }
8466 }
8467
8468 static void
8469 do_dbg (void)
8470 {
8471 inst.instruction |= inst.operands[0].imm;
8472 }
8473
8474 static void
8475 do_div (void)
8476 {
8477 unsigned Rd, Rn, Rm;
8478
8479 Rd = inst.operands[0].reg;
8480 Rn = (inst.operands[1].present
8481 ? inst.operands[1].reg : Rd);
8482 Rm = inst.operands[2].reg;
8483
8484 constraint ((Rd == REG_PC), BAD_PC);
8485 constraint ((Rn == REG_PC), BAD_PC);
8486 constraint ((Rm == REG_PC), BAD_PC);
8487
8488 inst.instruction |= Rd << 16;
8489 inst.instruction |= Rn << 0;
8490 inst.instruction |= Rm << 8;
8491 }
8492
8493 static void
8494 do_it (void)
8495 {
8496 /* There is no IT instruction in ARM mode. We
8497 process it to do the validation as if in
8498 thumb mode, just in case the code gets
8499 assembled for thumb using the unified syntax. */
8500
8501 inst.size = 0;
8502 if (unified_syntax)
8503 {
8504 set_it_insn_type (IT_INSN);
8505 now_it.mask = (inst.instruction & 0xf) | 0x10;
8506 now_it.cc = inst.operands[0].imm;
8507 }
8508 }
8509
8510 /* If there is only one register in the register list,
8511 then return its register number. Otherwise return -1. */
8512 static int
8513 only_one_reg_in_list (int range)
8514 {
8515 int i = ffs (range) - 1;
8516 return (i > 15 || range != (1 << i)) ? -1 : i;
8517 }
8518
8519 static void
8520 encode_ldmstm(int from_push_pop_mnem)
8521 {
8522 int base_reg = inst.operands[0].reg;
8523 int range = inst.operands[1].imm;
8524 int one_reg;
8525
8526 inst.instruction |= base_reg << 16;
8527 inst.instruction |= range;
8528
8529 if (inst.operands[1].writeback)
8530 inst.instruction |= LDM_TYPE_2_OR_3;
8531
8532 if (inst.operands[0].writeback)
8533 {
8534 inst.instruction |= WRITE_BACK;
8535 /* Check for unpredictable uses of writeback. */
8536 if (inst.instruction & LOAD_BIT)
8537 {
8538 /* Not allowed in LDM type 2. */
8539 if ((inst.instruction & LDM_TYPE_2_OR_3)
8540 && ((range & (1 << REG_PC)) == 0))
8541 as_warn (_("writeback of base register is UNPREDICTABLE"));
8542 /* Only allowed if base reg not in list for other types. */
8543 else if (range & (1 << base_reg))
8544 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
8545 }
8546 else /* STM. */
8547 {
8548 /* Not allowed for type 2. */
8549 if (inst.instruction & LDM_TYPE_2_OR_3)
8550 as_warn (_("writeback of base register is UNPREDICTABLE"));
8551 /* Only allowed if base reg not in list, or first in list. */
8552 else if ((range & (1 << base_reg))
8553 && (range & ((1 << base_reg) - 1)))
8554 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
8555 }
8556 }
8557
8558 /* If PUSH/POP has only one register, then use the A2 encoding. */
8559 one_reg = only_one_reg_in_list (range);
8560 if (from_push_pop_mnem && one_reg >= 0)
8561 {
8562 int is_push = (inst.instruction & A_PUSH_POP_OP_MASK) == A1_OPCODE_PUSH;
8563
8564 inst.instruction &= A_COND_MASK;
8565 inst.instruction |= is_push ? A2_OPCODE_PUSH : A2_OPCODE_POP;
8566 inst.instruction |= one_reg << 12;
8567 }
8568 }
8569
8570 static void
8571 do_ldmstm (void)
8572 {
8573 encode_ldmstm (/*from_push_pop_mnem=*/FALSE);
8574 }
8575
8576 /* ARMv5TE load-consecutive (argument parse)
8577 Mode is like LDRH.
8578
8579 LDRccD R, mode
8580 STRccD R, mode. */
8581
8582 static void
8583 do_ldrd (void)
8584 {
8585 constraint (inst.operands[0].reg % 2 != 0,
8586 _("first transfer register must be even"));
8587 constraint (inst.operands[1].present
8588 && inst.operands[1].reg != inst.operands[0].reg + 1,
8589 _("can only transfer two consecutive registers"));
8590 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
8591 constraint (!inst.operands[2].isreg, _("'[' expected"));
8592
8593 if (!inst.operands[1].present)
8594 inst.operands[1].reg = inst.operands[0].reg + 1;
8595
8596 /* encode_arm_addr_mode_3 will diagnose overlap between the base
8597 register and the first register written; we have to diagnose
8598 overlap between the base and the second register written here. */
8599
8600 if (inst.operands[2].reg == inst.operands[1].reg
8601 && (inst.operands[2].writeback || inst.operands[2].postind))
8602 as_warn (_("base register written back, and overlaps "
8603 "second transfer register"));
8604
8605 if (!(inst.instruction & V4_STR_BIT))
8606 {
8607 /* For an index-register load, the index register must not overlap the
8608 destination (even if not write-back). */
8609 if (inst.operands[2].immisreg
8610 && ((unsigned) inst.operands[2].imm == inst.operands[0].reg
8611 || (unsigned) inst.operands[2].imm == inst.operands[1].reg))
8612 as_warn (_("index register overlaps transfer register"));
8613 }
8614 inst.instruction |= inst.operands[0].reg << 12;
8615 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE);
8616 }
8617
8618 static void
8619 do_ldrex (void)
8620 {
8621 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
8622 || inst.operands[1].postind || inst.operands[1].writeback
8623 || inst.operands[1].immisreg || inst.operands[1].shifted
8624 || inst.operands[1].negative
8625 /* This can arise if the programmer has written
8626 strex rN, rM, foo
8627 or if they have mistakenly used a register name as the last
8628 operand, eg:
8629 strex rN, rM, rX
8630 It is very difficult to distinguish between these two cases
8631 because "rX" might actually be a label. ie the register
8632 name has been occluded by a symbol of the same name. So we
8633 just generate a general 'bad addressing mode' type error
8634 message and leave it up to the programmer to discover the
8635 true cause and fix their mistake. */
8636 || (inst.operands[1].reg == REG_PC),
8637 BAD_ADDR_MODE);
8638
8639 constraint (inst.reloc.exp.X_op != O_constant
8640 || inst.reloc.exp.X_add_number != 0,
8641 _("offset must be zero in ARM encoding"));
8642
8643 constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
8644
8645 inst.instruction |= inst.operands[0].reg << 12;
8646 inst.instruction |= inst.operands[1].reg << 16;
8647 inst.reloc.type = BFD_RELOC_UNUSED;
8648 }
8649
8650 static void
8651 do_ldrexd (void)
8652 {
8653 constraint (inst.operands[0].reg % 2 != 0,
8654 _("even register required"));
8655 constraint (inst.operands[1].present
8656 && inst.operands[1].reg != inst.operands[0].reg + 1,
8657 _("can only load two consecutive registers"));
8658 /* If op 1 were present and equal to PC, this function wouldn't
8659 have been called in the first place. */
8660 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
8661
8662 inst.instruction |= inst.operands[0].reg << 12;
8663 inst.instruction |= inst.operands[2].reg << 16;
8664 }
8665
8666 /* In both ARM and thumb state 'ldr pc, #imm' with an immediate
8667 which is not a multiple of four is UNPREDICTABLE. */
8668 static void
8669 check_ldr_r15_aligned (void)
8670 {
8671 constraint (!(inst.operands[1].immisreg)
8672 && (inst.operands[0].reg == REG_PC
8673 && inst.operands[1].reg == REG_PC
8674 && (inst.reloc.exp.X_add_number & 0x3)),
8675 _("ldr to register 15 must be 4-byte alligned"));
8676 }
8677
8678 static void
8679 do_ldst (void)
8680 {
8681 inst.instruction |= inst.operands[0].reg << 12;
8682 if (!inst.operands[1].isreg)
8683 if (move_or_literal_pool (0, CONST_ARM, /*mode_3=*/FALSE))
8684 return;
8685 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE);
8686 check_ldr_r15_aligned ();
8687 }
8688
8689 static void
8690 do_ldstt (void)
8691 {
8692 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
8693 reject [Rn,...]. */
8694 if (inst.operands[1].preind)
8695 {
8696 constraint (inst.reloc.exp.X_op != O_constant
8697 || inst.reloc.exp.X_add_number != 0,
8698 _("this instruction requires a post-indexed address"));
8699
8700 inst.operands[1].preind = 0;
8701 inst.operands[1].postind = 1;
8702 inst.operands[1].writeback = 1;
8703 }
8704 inst.instruction |= inst.operands[0].reg << 12;
8705 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE);
8706 }
8707
8708 /* Halfword and signed-byte load/store operations. */
8709
8710 static void
8711 do_ldstv4 (void)
8712 {
8713 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
8714 inst.instruction |= inst.operands[0].reg << 12;
8715 if (!inst.operands[1].isreg)
8716 if (move_or_literal_pool (0, CONST_ARM, /*mode_3=*/TRUE))
8717 return;
8718 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE);
8719 }
8720
8721 static void
8722 do_ldsttv4 (void)
8723 {
8724 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
8725 reject [Rn,...]. */
8726 if (inst.operands[1].preind)
8727 {
8728 constraint (inst.reloc.exp.X_op != O_constant
8729 || inst.reloc.exp.X_add_number != 0,
8730 _("this instruction requires a post-indexed address"));
8731
8732 inst.operands[1].preind = 0;
8733 inst.operands[1].postind = 1;
8734 inst.operands[1].writeback = 1;
8735 }
8736 inst.instruction |= inst.operands[0].reg << 12;
8737 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE);
8738 }
8739
8740 /* Co-processor register load/store.
8741 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
8742 static void
8743 do_lstc (void)
8744 {
8745 inst.instruction |= inst.operands[0].reg << 8;
8746 inst.instruction |= inst.operands[1].reg << 12;
8747 encode_arm_cp_address (2, TRUE, TRUE, 0);
8748 }
8749
8750 static void
8751 do_mlas (void)
8752 {
8753 /* This restriction does not apply to mls (nor to mla in v6 or later). */
8754 if (inst.operands[0].reg == inst.operands[1].reg
8755 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6)
8756 && !(inst.instruction & 0x00400000))
8757 as_tsktsk (_("Rd and Rm should be different in mla"));
8758
8759 inst.instruction |= inst.operands[0].reg << 16;
8760 inst.instruction |= inst.operands[1].reg;
8761 inst.instruction |= inst.operands[2].reg << 8;
8762 inst.instruction |= inst.operands[3].reg << 12;
8763 }
8764
8765 static void
8766 do_mov (void)
8767 {
8768 inst.instruction |= inst.operands[0].reg << 12;
8769 encode_arm_shifter_operand (1);
8770 }
8771
8772 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
8773 static void
8774 do_mov16 (void)
8775 {
8776 bfd_vma imm;
8777 bfd_boolean top;
8778
8779 top = (inst.instruction & 0x00400000) != 0;
8780 constraint (top && inst.reloc.type == BFD_RELOC_ARM_MOVW,
8781 _(":lower16: not allowed this instruction"));
8782 constraint (!top && inst.reloc.type == BFD_RELOC_ARM_MOVT,
8783 _(":upper16: not allowed instruction"));
8784 inst.instruction |= inst.operands[0].reg << 12;
8785 if (inst.reloc.type == BFD_RELOC_UNUSED)
8786 {
8787 imm = inst.reloc.exp.X_add_number;
8788 /* The value is in two pieces: 0:11, 16:19. */
8789 inst.instruction |= (imm & 0x00000fff);
8790 inst.instruction |= (imm & 0x0000f000) << 4;
8791 }
8792 }
8793
8794 static void do_vfp_nsyn_opcode (const char *);
8795
8796 static int
8797 do_vfp_nsyn_mrs (void)
8798 {
8799 if (inst.operands[0].isvec)
8800 {
8801 if (inst.operands[1].reg != 1)
8802 first_error (_("operand 1 must be FPSCR"));
8803 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
8804 memset (&inst.operands[1], '\0', sizeof (inst.operands[1]));
8805 do_vfp_nsyn_opcode ("fmstat");
8806 }
8807 else if (inst.operands[1].isvec)
8808 do_vfp_nsyn_opcode ("fmrx");
8809 else
8810 return FAIL;
8811
8812 return SUCCESS;
8813 }
8814
8815 static int
8816 do_vfp_nsyn_msr (void)
8817 {
8818 if (inst.operands[0].isvec)
8819 do_vfp_nsyn_opcode ("fmxr");
8820 else
8821 return FAIL;
8822
8823 return SUCCESS;
8824 }
8825
8826 static void
8827 do_vmrs (void)
8828 {
8829 unsigned Rt = inst.operands[0].reg;
8830
8831 if (thumb_mode && Rt == REG_SP)
8832 {
8833 inst.error = BAD_SP;
8834 return;
8835 }
8836
8837 /* APSR_ sets isvec. All other refs to PC are illegal. */
8838 if (!inst.operands[0].isvec && Rt == REG_PC)
8839 {
8840 inst.error = BAD_PC;
8841 return;
8842 }
8843
8844 /* If we get through parsing the register name, we just insert the number
8845 generated into the instruction without further validation. */
8846 inst.instruction |= (inst.operands[1].reg << 16);
8847 inst.instruction |= (Rt << 12);
8848 }
8849
8850 static void
8851 do_vmsr (void)
8852 {
8853 unsigned Rt = inst.operands[1].reg;
8854
8855 if (thumb_mode)
8856 reject_bad_reg (Rt);
8857 else if (Rt == REG_PC)
8858 {
8859 inst.error = BAD_PC;
8860 return;
8861 }
8862
8863 /* If we get through parsing the register name, we just insert the number
8864 generated into the instruction without further validation. */
8865 inst.instruction |= (inst.operands[0].reg << 16);
8866 inst.instruction |= (Rt << 12);
8867 }
8868
8869 static void
8870 do_mrs (void)
8871 {
8872 unsigned br;
8873
8874 if (do_vfp_nsyn_mrs () == SUCCESS)
8875 return;
8876
8877 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
8878 inst.instruction |= inst.operands[0].reg << 12;
8879
8880 if (inst.operands[1].isreg)
8881 {
8882 br = inst.operands[1].reg;
8883 if (((br & 0x200) == 0) && ((br & 0xf0000) != 0xf000))
8884 as_bad (_("bad register for mrs"));
8885 }
8886 else
8887 {
8888 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
8889 constraint ((inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f))
8890 != (PSR_c|PSR_f),
8891 _("'APSR', 'CPSR' or 'SPSR' expected"));
8892 br = (15<<16) | (inst.operands[1].imm & SPSR_BIT);
8893 }
8894
8895 inst.instruction |= br;
8896 }
8897
8898 /* Two possible forms:
8899 "{C|S}PSR_<field>, Rm",
8900 "{C|S}PSR_f, #expression". */
8901
8902 static void
8903 do_msr (void)
8904 {
8905 if (do_vfp_nsyn_msr () == SUCCESS)
8906 return;
8907
8908 inst.instruction |= inst.operands[0].imm;
8909 if (inst.operands[1].isreg)
8910 inst.instruction |= inst.operands[1].reg;
8911 else
8912 {
8913 inst.instruction |= INST_IMMEDIATE;
8914 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
8915 inst.reloc.pc_rel = 0;
8916 }
8917 }
8918
8919 static void
8920 do_mul (void)
8921 {
8922 constraint (inst.operands[2].reg == REG_PC, BAD_PC);
8923
8924 if (!inst.operands[2].present)
8925 inst.operands[2].reg = inst.operands[0].reg;
8926 inst.instruction |= inst.operands[0].reg << 16;
8927 inst.instruction |= inst.operands[1].reg;
8928 inst.instruction |= inst.operands[2].reg << 8;
8929
8930 if (inst.operands[0].reg == inst.operands[1].reg
8931 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
8932 as_tsktsk (_("Rd and Rm should be different in mul"));
8933 }
8934
8935 /* Long Multiply Parser
8936 UMULL RdLo, RdHi, Rm, Rs
8937 SMULL RdLo, RdHi, Rm, Rs
8938 UMLAL RdLo, RdHi, Rm, Rs
8939 SMLAL RdLo, RdHi, Rm, Rs. */
8940
8941 static void
8942 do_mull (void)
8943 {
8944 inst.instruction |= inst.operands[0].reg << 12;
8945 inst.instruction |= inst.operands[1].reg << 16;
8946 inst.instruction |= inst.operands[2].reg;
8947 inst.instruction |= inst.operands[3].reg << 8;
8948
8949 /* rdhi and rdlo must be different. */
8950 if (inst.operands[0].reg == inst.operands[1].reg)
8951 as_tsktsk (_("rdhi and rdlo must be different"));
8952
8953 /* rdhi, rdlo and rm must all be different before armv6. */
8954 if ((inst.operands[0].reg == inst.operands[2].reg
8955 || inst.operands[1].reg == inst.operands[2].reg)
8956 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
8957 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
8958 }
8959
8960 static void
8961 do_nop (void)
8962 {
8963 if (inst.operands[0].present
8964 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6k))
8965 {
8966 /* Architectural NOP hints are CPSR sets with no bits selected. */
8967 inst.instruction &= 0xf0000000;
8968 inst.instruction |= 0x0320f000;
8969 if (inst.operands[0].present)
8970 inst.instruction |= inst.operands[0].imm;
8971 }
8972 }
8973
8974 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
8975 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
8976 Condition defaults to COND_ALWAYS.
8977 Error if Rd, Rn or Rm are R15. */
8978
8979 static void
8980 do_pkhbt (void)
8981 {
8982 inst.instruction |= inst.operands[0].reg << 12;
8983 inst.instruction |= inst.operands[1].reg << 16;
8984 inst.instruction |= inst.operands[2].reg;
8985 if (inst.operands[3].present)
8986 encode_arm_shift (3);
8987 }
8988
8989 /* ARM V6 PKHTB (Argument Parse). */
8990
8991 static void
8992 do_pkhtb (void)
8993 {
8994 if (!inst.operands[3].present)
8995 {
8996 /* If the shift specifier is omitted, turn the instruction
8997 into pkhbt rd, rm, rn. */
8998 inst.instruction &= 0xfff00010;
8999 inst.instruction |= inst.operands[0].reg << 12;
9000 inst.instruction |= inst.operands[1].reg;
9001 inst.instruction |= inst.operands[2].reg << 16;
9002 }
9003 else
9004 {
9005 inst.instruction |= inst.operands[0].reg << 12;
9006 inst.instruction |= inst.operands[1].reg << 16;
9007 inst.instruction |= inst.operands[2].reg;
9008 encode_arm_shift (3);
9009 }
9010 }
9011
9012 /* ARMv5TE: Preload-Cache
9013 MP Extensions: Preload for write
9014
9015 PLD(W) <addr_mode>
9016
9017 Syntactically, like LDR with B=1, W=0, L=1. */
9018
9019 static void
9020 do_pld (void)
9021 {
9022 constraint (!inst.operands[0].isreg,
9023 _("'[' expected after PLD mnemonic"));
9024 constraint (inst.operands[0].postind,
9025 _("post-indexed expression used in preload instruction"));
9026 constraint (inst.operands[0].writeback,
9027 _("writeback used in preload instruction"));
9028 constraint (!inst.operands[0].preind,
9029 _("unindexed addressing used in preload instruction"));
9030 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
9031 }
9032
9033 /* ARMv7: PLI <addr_mode> */
9034 static void
9035 do_pli (void)
9036 {
9037 constraint (!inst.operands[0].isreg,
9038 _("'[' expected after PLI mnemonic"));
9039 constraint (inst.operands[0].postind,
9040 _("post-indexed expression used in preload instruction"));
9041 constraint (inst.operands[0].writeback,
9042 _("writeback used in preload instruction"));
9043 constraint (!inst.operands[0].preind,
9044 _("unindexed addressing used in preload instruction"));
9045 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
9046 inst.instruction &= ~PRE_INDEX;
9047 }
9048
9049 static void
9050 do_push_pop (void)
9051 {
9052 constraint (inst.operands[0].writeback,
9053 _("push/pop do not support {reglist}^"));
9054 inst.operands[1] = inst.operands[0];
9055 memset (&inst.operands[0], 0, sizeof inst.operands[0]);
9056 inst.operands[0].isreg = 1;
9057 inst.operands[0].writeback = 1;
9058 inst.operands[0].reg = REG_SP;
9059 encode_ldmstm (/*from_push_pop_mnem=*/TRUE);
9060 }
9061
9062 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
9063 word at the specified address and the following word
9064 respectively.
9065 Unconditionally executed.
9066 Error if Rn is R15. */
9067
9068 static void
9069 do_rfe (void)
9070 {
9071 inst.instruction |= inst.operands[0].reg << 16;
9072 if (inst.operands[0].writeback)
9073 inst.instruction |= WRITE_BACK;
9074 }
9075
9076 /* ARM V6 ssat (argument parse). */
9077
9078 static void
9079 do_ssat (void)
9080 {
9081 inst.instruction |= inst.operands[0].reg << 12;
9082 inst.instruction |= (inst.operands[1].imm - 1) << 16;
9083 inst.instruction |= inst.operands[2].reg;
9084
9085 if (inst.operands[3].present)
9086 encode_arm_shift (3);
9087 }
9088
9089 /* ARM V6 usat (argument parse). */
9090
9091 static void
9092 do_usat (void)
9093 {
9094 inst.instruction |= inst.operands[0].reg << 12;
9095 inst.instruction |= inst.operands[1].imm << 16;
9096 inst.instruction |= inst.operands[2].reg;
9097
9098 if (inst.operands[3].present)
9099 encode_arm_shift (3);
9100 }
9101
9102 /* ARM V6 ssat16 (argument parse). */
9103
9104 static void
9105 do_ssat16 (void)
9106 {
9107 inst.instruction |= inst.operands[0].reg << 12;
9108 inst.instruction |= ((inst.operands[1].imm - 1) << 16);
9109 inst.instruction |= inst.operands[2].reg;
9110 }
9111
9112 static void
9113 do_usat16 (void)
9114 {
9115 inst.instruction |= inst.operands[0].reg << 12;
9116 inst.instruction |= inst.operands[1].imm << 16;
9117 inst.instruction |= inst.operands[2].reg;
9118 }
9119
9120 /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
9121 preserving the other bits.
9122
9123 setend <endian_specifier>, where <endian_specifier> is either
9124 BE or LE. */
9125
9126 static void
9127 do_setend (void)
9128 {
9129 if (warn_on_deprecated
9130 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
9131 as_tsktsk (_("setend use is deprecated for ARMv8"));
9132
9133 if (inst.operands[0].imm)
9134 inst.instruction |= 0x200;
9135 }
9136
9137 static void
9138 do_shift (void)
9139 {
9140 unsigned int Rm = (inst.operands[1].present
9141 ? inst.operands[1].reg
9142 : inst.operands[0].reg);
9143
9144 inst.instruction |= inst.operands[0].reg << 12;
9145 inst.instruction |= Rm;
9146 if (inst.operands[2].isreg) /* Rd, {Rm,} Rs */
9147 {
9148 inst.instruction |= inst.operands[2].reg << 8;
9149 inst.instruction |= SHIFT_BY_REG;
9150 /* PR 12854: Error on extraneous shifts. */
9151 constraint (inst.operands[2].shifted,
9152 _("extraneous shift as part of operand to shift insn"));
9153 }
9154 else
9155 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
9156 }
9157
9158 static void
9159 do_smc (void)
9160 {
9161 inst.reloc.type = BFD_RELOC_ARM_SMC;
9162 inst.reloc.pc_rel = 0;
9163 }
9164
9165 static void
9166 do_hvc (void)
9167 {
9168 inst.reloc.type = BFD_RELOC_ARM_HVC;
9169 inst.reloc.pc_rel = 0;
9170 }
9171
9172 static void
9173 do_swi (void)
9174 {
9175 inst.reloc.type = BFD_RELOC_ARM_SWI;
9176 inst.reloc.pc_rel = 0;
9177 }
9178
9179 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
9180 SMLAxy{cond} Rd,Rm,Rs,Rn
9181 SMLAWy{cond} Rd,Rm,Rs,Rn
9182 Error if any register is R15. */
9183
9184 static void
9185 do_smla (void)
9186 {
9187 inst.instruction |= inst.operands[0].reg << 16;
9188 inst.instruction |= inst.operands[1].reg;
9189 inst.instruction |= inst.operands[2].reg << 8;
9190 inst.instruction |= inst.operands[3].reg << 12;
9191 }
9192
9193 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
9194 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
9195 Error if any register is R15.
9196 Warning if Rdlo == Rdhi. */
9197
9198 static void
9199 do_smlal (void)
9200 {
9201 inst.instruction |= inst.operands[0].reg << 12;
9202 inst.instruction |= inst.operands[1].reg << 16;
9203 inst.instruction |= inst.operands[2].reg;
9204 inst.instruction |= inst.operands[3].reg << 8;
9205
9206 if (inst.operands[0].reg == inst.operands[1].reg)
9207 as_tsktsk (_("rdhi and rdlo must be different"));
9208 }
9209
9210 /* ARM V5E (El Segundo) signed-multiply (argument parse)
9211 SMULxy{cond} Rd,Rm,Rs
9212 Error if any register is R15. */
9213
9214 static void
9215 do_smul (void)
9216 {
9217 inst.instruction |= inst.operands[0].reg << 16;
9218 inst.instruction |= inst.operands[1].reg;
9219 inst.instruction |= inst.operands[2].reg << 8;
9220 }
9221
9222 /* ARM V6 srs (argument parse). The variable fields in the encoding are
9223 the same for both ARM and Thumb-2. */
9224
9225 static void
9226 do_srs (void)
9227 {
9228 int reg;
9229
9230 if (inst.operands[0].present)
9231 {
9232 reg = inst.operands[0].reg;
9233 constraint (reg != REG_SP, _("SRS base register must be r13"));
9234 }
9235 else
9236 reg = REG_SP;
9237
9238 inst.instruction |= reg << 16;
9239 inst.instruction |= inst.operands[1].imm;
9240 if (inst.operands[0].writeback || inst.operands[1].writeback)
9241 inst.instruction |= WRITE_BACK;
9242 }
9243
9244 /* ARM V6 strex (argument parse). */
9245
9246 static void
9247 do_strex (void)
9248 {
9249 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
9250 || inst.operands[2].postind || inst.operands[2].writeback
9251 || inst.operands[2].immisreg || inst.operands[2].shifted
9252 || inst.operands[2].negative
9253 /* See comment in do_ldrex(). */
9254 || (inst.operands[2].reg == REG_PC),
9255 BAD_ADDR_MODE);
9256
9257 constraint (inst.operands[0].reg == inst.operands[1].reg
9258 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9259
9260 constraint (inst.reloc.exp.X_op != O_constant
9261 || inst.reloc.exp.X_add_number != 0,
9262 _("offset must be zero in ARM encoding"));
9263
9264 inst.instruction |= inst.operands[0].reg << 12;
9265 inst.instruction |= inst.operands[1].reg;
9266 inst.instruction |= inst.operands[2].reg << 16;
9267 inst.reloc.type = BFD_RELOC_UNUSED;
9268 }
9269
9270 static void
9271 do_t_strexbh (void)
9272 {
9273 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
9274 || inst.operands[2].postind || inst.operands[2].writeback
9275 || inst.operands[2].immisreg || inst.operands[2].shifted
9276 || inst.operands[2].negative,
9277 BAD_ADDR_MODE);
9278
9279 constraint (inst.operands[0].reg == inst.operands[1].reg
9280 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9281
9282 do_rm_rd_rn ();
9283 }
9284
9285 static void
9286 do_strexd (void)
9287 {
9288 constraint (inst.operands[1].reg % 2 != 0,
9289 _("even register required"));
9290 constraint (inst.operands[2].present
9291 && inst.operands[2].reg != inst.operands[1].reg + 1,
9292 _("can only store two consecutive registers"));
9293 /* If op 2 were present and equal to PC, this function wouldn't
9294 have been called in the first place. */
9295 constraint (inst.operands[1].reg == REG_LR, _("r14 not allowed here"));
9296
9297 constraint (inst.operands[0].reg == inst.operands[1].reg
9298 || inst.operands[0].reg == inst.operands[1].reg + 1
9299 || inst.operands[0].reg == inst.operands[3].reg,
9300 BAD_OVERLAP);
9301
9302 inst.instruction |= inst.operands[0].reg << 12;
9303 inst.instruction |= inst.operands[1].reg;
9304 inst.instruction |= inst.operands[3].reg << 16;
9305 }
9306
9307 /* ARM V8 STRL. */
9308 static void
9309 do_stlex (void)
9310 {
9311 constraint (inst.operands[0].reg == inst.operands[1].reg
9312 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9313
9314 do_rd_rm_rn ();
9315 }
9316
9317 static void
9318 do_t_stlex (void)
9319 {
9320 constraint (inst.operands[0].reg == inst.operands[1].reg
9321 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9322
9323 do_rm_rd_rn ();
9324 }
9325
9326 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
9327 extends it to 32-bits, and adds the result to a value in another
9328 register. You can specify a rotation by 0, 8, 16, or 24 bits
9329 before extracting the 16-bit value.
9330 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
9331 Condition defaults to COND_ALWAYS.
9332 Error if any register uses R15. */
9333
9334 static void
9335 do_sxtah (void)
9336 {
9337 inst.instruction |= inst.operands[0].reg << 12;
9338 inst.instruction |= inst.operands[1].reg << 16;
9339 inst.instruction |= inst.operands[2].reg;
9340 inst.instruction |= inst.operands[3].imm << 10;
9341 }
9342
9343 /* ARM V6 SXTH.
9344
9345 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
9346 Condition defaults to COND_ALWAYS.
9347 Error if any register uses R15. */
9348
9349 static void
9350 do_sxth (void)
9351 {
9352 inst.instruction |= inst.operands[0].reg << 12;
9353 inst.instruction |= inst.operands[1].reg;
9354 inst.instruction |= inst.operands[2].imm << 10;
9355 }
9356 \f
9357 /* VFP instructions. In a logical order: SP variant first, monad
9358 before dyad, arithmetic then move then load/store. */
9359
9360 static void
9361 do_vfp_sp_monadic (void)
9362 {
9363 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9364 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
9365 }
9366
9367 static void
9368 do_vfp_sp_dyadic (void)
9369 {
9370 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9371 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
9372 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
9373 }
9374
9375 static void
9376 do_vfp_sp_compare_z (void)
9377 {
9378 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9379 }
9380
9381 static void
9382 do_vfp_dp_sp_cvt (void)
9383 {
9384 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9385 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
9386 }
9387
9388 static void
9389 do_vfp_sp_dp_cvt (void)
9390 {
9391 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9392 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
9393 }
9394
9395 static void
9396 do_vfp_reg_from_sp (void)
9397 {
9398 inst.instruction |= inst.operands[0].reg << 12;
9399 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
9400 }
9401
9402 static void
9403 do_vfp_reg2_from_sp2 (void)
9404 {
9405 constraint (inst.operands[2].imm != 2,
9406 _("only two consecutive VFP SP registers allowed here"));
9407 inst.instruction |= inst.operands[0].reg << 12;
9408 inst.instruction |= inst.operands[1].reg << 16;
9409 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
9410 }
9411
9412 static void
9413 do_vfp_sp_from_reg (void)
9414 {
9415 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sn);
9416 inst.instruction |= inst.operands[1].reg << 12;
9417 }
9418
9419 static void
9420 do_vfp_sp2_from_reg2 (void)
9421 {
9422 constraint (inst.operands[0].imm != 2,
9423 _("only two consecutive VFP SP registers allowed here"));
9424 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sm);
9425 inst.instruction |= inst.operands[1].reg << 12;
9426 inst.instruction |= inst.operands[2].reg << 16;
9427 }
9428
9429 static void
9430 do_vfp_sp_ldst (void)
9431 {
9432 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9433 encode_arm_cp_address (1, FALSE, TRUE, 0);
9434 }
9435
9436 static void
9437 do_vfp_dp_ldst (void)
9438 {
9439 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9440 encode_arm_cp_address (1, FALSE, TRUE, 0);
9441 }
9442
9443
9444 static void
9445 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type)
9446 {
9447 if (inst.operands[0].writeback)
9448 inst.instruction |= WRITE_BACK;
9449 else
9450 constraint (ldstm_type != VFP_LDSTMIA,
9451 _("this addressing mode requires base-register writeback"));
9452 inst.instruction |= inst.operands[0].reg << 16;
9453 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sd);
9454 inst.instruction |= inst.operands[1].imm;
9455 }
9456
9457 static void
9458 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type)
9459 {
9460 int count;
9461
9462 if (inst.operands[0].writeback)
9463 inst.instruction |= WRITE_BACK;
9464 else
9465 constraint (ldstm_type != VFP_LDSTMIA && ldstm_type != VFP_LDSTMIAX,
9466 _("this addressing mode requires base-register writeback"));
9467
9468 inst.instruction |= inst.operands[0].reg << 16;
9469 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
9470
9471 count = inst.operands[1].imm << 1;
9472 if (ldstm_type == VFP_LDSTMIAX || ldstm_type == VFP_LDSTMDBX)
9473 count += 1;
9474
9475 inst.instruction |= count;
9476 }
9477
9478 static void
9479 do_vfp_sp_ldstmia (void)
9480 {
9481 vfp_sp_ldstm (VFP_LDSTMIA);
9482 }
9483
9484 static void
9485 do_vfp_sp_ldstmdb (void)
9486 {
9487 vfp_sp_ldstm (VFP_LDSTMDB);
9488 }
9489
9490 static void
9491 do_vfp_dp_ldstmia (void)
9492 {
9493 vfp_dp_ldstm (VFP_LDSTMIA);
9494 }
9495
9496 static void
9497 do_vfp_dp_ldstmdb (void)
9498 {
9499 vfp_dp_ldstm (VFP_LDSTMDB);
9500 }
9501
9502 static void
9503 do_vfp_xp_ldstmia (void)
9504 {
9505 vfp_dp_ldstm (VFP_LDSTMIAX);
9506 }
9507
9508 static void
9509 do_vfp_xp_ldstmdb (void)
9510 {
9511 vfp_dp_ldstm (VFP_LDSTMDBX);
9512 }
9513
9514 static void
9515 do_vfp_dp_rd_rm (void)
9516 {
9517 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9518 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
9519 }
9520
9521 static void
9522 do_vfp_dp_rn_rd (void)
9523 {
9524 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dn);
9525 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
9526 }
9527
9528 static void
9529 do_vfp_dp_rd_rn (void)
9530 {
9531 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9532 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
9533 }
9534
9535 static void
9536 do_vfp_dp_rd_rn_rm (void)
9537 {
9538 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9539 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
9540 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dm);
9541 }
9542
9543 static void
9544 do_vfp_dp_rd (void)
9545 {
9546 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9547 }
9548
9549 static void
9550 do_vfp_dp_rm_rd_rn (void)
9551 {
9552 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dm);
9553 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
9554 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dn);
9555 }
9556
9557 /* VFPv3 instructions. */
9558 static void
9559 do_vfp_sp_const (void)
9560 {
9561 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9562 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
9563 inst.instruction |= (inst.operands[1].imm & 0x0f);
9564 }
9565
9566 static void
9567 do_vfp_dp_const (void)
9568 {
9569 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9570 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
9571 inst.instruction |= (inst.operands[1].imm & 0x0f);
9572 }
9573
9574 static void
9575 vfp_conv (int srcsize)
9576 {
9577 int immbits = srcsize - inst.operands[1].imm;
9578
9579 if (srcsize == 16 && !(immbits >= 0 && immbits <= srcsize))
9580 {
9581 /* If srcsize is 16, inst.operands[1].imm must be in the range 0-16.
9582 i.e. immbits must be in range 0 - 16. */
9583 inst.error = _("immediate value out of range, expected range [0, 16]");
9584 return;
9585 }
9586 else if (srcsize == 32 && !(immbits >= 0 && immbits < srcsize))
9587 {
9588 /* If srcsize is 32, inst.operands[1].imm must be in the range 1-32.
9589 i.e. immbits must be in range 0 - 31. */
9590 inst.error = _("immediate value out of range, expected range [1, 32]");
9591 return;
9592 }
9593
9594 inst.instruction |= (immbits & 1) << 5;
9595 inst.instruction |= (immbits >> 1);
9596 }
9597
9598 static void
9599 do_vfp_sp_conv_16 (void)
9600 {
9601 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9602 vfp_conv (16);
9603 }
9604
9605 static void
9606 do_vfp_dp_conv_16 (void)
9607 {
9608 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9609 vfp_conv (16);
9610 }
9611
9612 static void
9613 do_vfp_sp_conv_32 (void)
9614 {
9615 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9616 vfp_conv (32);
9617 }
9618
9619 static void
9620 do_vfp_dp_conv_32 (void)
9621 {
9622 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9623 vfp_conv (32);
9624 }
9625 \f
9626 /* FPA instructions. Also in a logical order. */
9627
9628 static void
9629 do_fpa_cmp (void)
9630 {
9631 inst.instruction |= inst.operands[0].reg << 16;
9632 inst.instruction |= inst.operands[1].reg;
9633 }
9634
9635 static void
9636 do_fpa_ldmstm (void)
9637 {
9638 inst.instruction |= inst.operands[0].reg << 12;
9639 switch (inst.operands[1].imm)
9640 {
9641 case 1: inst.instruction |= CP_T_X; break;
9642 case 2: inst.instruction |= CP_T_Y; break;
9643 case 3: inst.instruction |= CP_T_Y | CP_T_X; break;
9644 case 4: break;
9645 default: abort ();
9646 }
9647
9648 if (inst.instruction & (PRE_INDEX | INDEX_UP))
9649 {
9650 /* The instruction specified "ea" or "fd", so we can only accept
9651 [Rn]{!}. The instruction does not really support stacking or
9652 unstacking, so we have to emulate these by setting appropriate
9653 bits and offsets. */
9654 constraint (inst.reloc.exp.X_op != O_constant
9655 || inst.reloc.exp.X_add_number != 0,
9656 _("this instruction does not support indexing"));
9657
9658 if ((inst.instruction & PRE_INDEX) || inst.operands[2].writeback)
9659 inst.reloc.exp.X_add_number = 12 * inst.operands[1].imm;
9660
9661 if (!(inst.instruction & INDEX_UP))
9662 inst.reloc.exp.X_add_number = -inst.reloc.exp.X_add_number;
9663
9664 if (!(inst.instruction & PRE_INDEX) && inst.operands[2].writeback)
9665 {
9666 inst.operands[2].preind = 0;
9667 inst.operands[2].postind = 1;
9668 }
9669 }
9670
9671 encode_arm_cp_address (2, TRUE, TRUE, 0);
9672 }
9673 \f
9674 /* iWMMXt instructions: strictly in alphabetical order. */
9675
9676 static void
9677 do_iwmmxt_tandorc (void)
9678 {
9679 constraint (inst.operands[0].reg != REG_PC, _("only r15 allowed here"));
9680 }
9681
9682 static void
9683 do_iwmmxt_textrc (void)
9684 {
9685 inst.instruction |= inst.operands[0].reg << 12;
9686 inst.instruction |= inst.operands[1].imm;
9687 }
9688
9689 static void
9690 do_iwmmxt_textrm (void)
9691 {
9692 inst.instruction |= inst.operands[0].reg << 12;
9693 inst.instruction |= inst.operands[1].reg << 16;
9694 inst.instruction |= inst.operands[2].imm;
9695 }
9696
9697 static void
9698 do_iwmmxt_tinsr (void)
9699 {
9700 inst.instruction |= inst.operands[0].reg << 16;
9701 inst.instruction |= inst.operands[1].reg << 12;
9702 inst.instruction |= inst.operands[2].imm;
9703 }
9704
9705 static void
9706 do_iwmmxt_tmia (void)
9707 {
9708 inst.instruction |= inst.operands[0].reg << 5;
9709 inst.instruction |= inst.operands[1].reg;
9710 inst.instruction |= inst.operands[2].reg << 12;
9711 }
9712
9713 static void
9714 do_iwmmxt_waligni (void)
9715 {
9716 inst.instruction |= inst.operands[0].reg << 12;
9717 inst.instruction |= inst.operands[1].reg << 16;
9718 inst.instruction |= inst.operands[2].reg;
9719 inst.instruction |= inst.operands[3].imm << 20;
9720 }
9721
9722 static void
9723 do_iwmmxt_wmerge (void)
9724 {
9725 inst.instruction |= inst.operands[0].reg << 12;
9726 inst.instruction |= inst.operands[1].reg << 16;
9727 inst.instruction |= inst.operands[2].reg;
9728 inst.instruction |= inst.operands[3].imm << 21;
9729 }
9730
9731 static void
9732 do_iwmmxt_wmov (void)
9733 {
9734 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
9735 inst.instruction |= inst.operands[0].reg << 12;
9736 inst.instruction |= inst.operands[1].reg << 16;
9737 inst.instruction |= inst.operands[1].reg;
9738 }
9739
9740 static void
9741 do_iwmmxt_wldstbh (void)
9742 {
9743 int reloc;
9744 inst.instruction |= inst.operands[0].reg << 12;
9745 if (thumb_mode)
9746 reloc = BFD_RELOC_ARM_T32_CP_OFF_IMM_S2;
9747 else
9748 reloc = BFD_RELOC_ARM_CP_OFF_IMM_S2;
9749 encode_arm_cp_address (1, TRUE, FALSE, reloc);
9750 }
9751
9752 static void
9753 do_iwmmxt_wldstw (void)
9754 {
9755 /* RIWR_RIWC clears .isreg for a control register. */
9756 if (!inst.operands[0].isreg)
9757 {
9758 constraint (inst.cond != COND_ALWAYS, BAD_COND);
9759 inst.instruction |= 0xf0000000;
9760 }
9761
9762 inst.instruction |= inst.operands[0].reg << 12;
9763 encode_arm_cp_address (1, TRUE, TRUE, 0);
9764 }
9765
9766 static void
9767 do_iwmmxt_wldstd (void)
9768 {
9769 inst.instruction |= inst.operands[0].reg << 12;
9770 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2)
9771 && inst.operands[1].immisreg)
9772 {
9773 inst.instruction &= ~0x1a000ff;
9774 inst.instruction |= (0xf << 28);
9775 if (inst.operands[1].preind)
9776 inst.instruction |= PRE_INDEX;
9777 if (!inst.operands[1].negative)
9778 inst.instruction |= INDEX_UP;
9779 if (inst.operands[1].writeback)
9780 inst.instruction |= WRITE_BACK;
9781 inst.instruction |= inst.operands[1].reg << 16;
9782 inst.instruction |= inst.reloc.exp.X_add_number << 4;
9783 inst.instruction |= inst.operands[1].imm;
9784 }
9785 else
9786 encode_arm_cp_address (1, TRUE, FALSE, 0);
9787 }
9788
9789 static void
9790 do_iwmmxt_wshufh (void)
9791 {
9792 inst.instruction |= inst.operands[0].reg << 12;
9793 inst.instruction |= inst.operands[1].reg << 16;
9794 inst.instruction |= ((inst.operands[2].imm & 0xf0) << 16);
9795 inst.instruction |= (inst.operands[2].imm & 0x0f);
9796 }
9797
9798 static void
9799 do_iwmmxt_wzero (void)
9800 {
9801 /* WZERO reg is an alias for WANDN reg, reg, reg. */
9802 inst.instruction |= inst.operands[0].reg;
9803 inst.instruction |= inst.operands[0].reg << 12;
9804 inst.instruction |= inst.operands[0].reg << 16;
9805 }
9806
9807 static void
9808 do_iwmmxt_wrwrwr_or_imm5 (void)
9809 {
9810 if (inst.operands[2].isreg)
9811 do_rd_rn_rm ();
9812 else {
9813 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2),
9814 _("immediate operand requires iWMMXt2"));
9815 do_rd_rn ();
9816 if (inst.operands[2].imm == 0)
9817 {
9818 switch ((inst.instruction >> 20) & 0xf)
9819 {
9820 case 4:
9821 case 5:
9822 case 6:
9823 case 7:
9824 /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16. */
9825 inst.operands[2].imm = 16;
9826 inst.instruction = (inst.instruction & 0xff0fffff) | (0x7 << 20);
9827 break;
9828 case 8:
9829 case 9:
9830 case 10:
9831 case 11:
9832 /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32. */
9833 inst.operands[2].imm = 32;
9834 inst.instruction = (inst.instruction & 0xff0fffff) | (0xb << 20);
9835 break;
9836 case 12:
9837 case 13:
9838 case 14:
9839 case 15:
9840 {
9841 /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn. */
9842 unsigned long wrn;
9843 wrn = (inst.instruction >> 16) & 0xf;
9844 inst.instruction &= 0xff0fff0f;
9845 inst.instruction |= wrn;
9846 /* Bail out here; the instruction is now assembled. */
9847 return;
9848 }
9849 }
9850 }
9851 /* Map 32 -> 0, etc. */
9852 inst.operands[2].imm &= 0x1f;
9853 inst.instruction |= (0xf << 28) | ((inst.operands[2].imm & 0x10) << 4) | (inst.operands[2].imm & 0xf);
9854 }
9855 }
9856 \f
9857 /* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
9858 operations first, then control, shift, and load/store. */
9859
9860 /* Insns like "foo X,Y,Z". */
9861
9862 static void
9863 do_mav_triple (void)
9864 {
9865 inst.instruction |= inst.operands[0].reg << 16;
9866 inst.instruction |= inst.operands[1].reg;
9867 inst.instruction |= inst.operands[2].reg << 12;
9868 }
9869
9870 /* Insns like "foo W,X,Y,Z".
9871 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
9872
9873 static void
9874 do_mav_quad (void)
9875 {
9876 inst.instruction |= inst.operands[0].reg << 5;
9877 inst.instruction |= inst.operands[1].reg << 12;
9878 inst.instruction |= inst.operands[2].reg << 16;
9879 inst.instruction |= inst.operands[3].reg;
9880 }
9881
9882 /* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
9883 static void
9884 do_mav_dspsc (void)
9885 {
9886 inst.instruction |= inst.operands[1].reg << 12;
9887 }
9888
9889 /* Maverick shift immediate instructions.
9890 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
9891 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
9892
9893 static void
9894 do_mav_shift (void)
9895 {
9896 int imm = inst.operands[2].imm;
9897
9898 inst.instruction |= inst.operands[0].reg << 12;
9899 inst.instruction |= inst.operands[1].reg << 16;
9900
9901 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
9902 Bits 5-7 of the insn should have bits 4-6 of the immediate.
9903 Bit 4 should be 0. */
9904 imm = (imm & 0xf) | ((imm & 0x70) << 1);
9905
9906 inst.instruction |= imm;
9907 }
9908 \f
9909 /* XScale instructions. Also sorted arithmetic before move. */
9910
9911 /* Xscale multiply-accumulate (argument parse)
9912 MIAcc acc0,Rm,Rs
9913 MIAPHcc acc0,Rm,Rs
9914 MIAxycc acc0,Rm,Rs. */
9915
9916 static void
9917 do_xsc_mia (void)
9918 {
9919 inst.instruction |= inst.operands[1].reg;
9920 inst.instruction |= inst.operands[2].reg << 12;
9921 }
9922
9923 /* Xscale move-accumulator-register (argument parse)
9924
9925 MARcc acc0,RdLo,RdHi. */
9926
9927 static void
9928 do_xsc_mar (void)
9929 {
9930 inst.instruction |= inst.operands[1].reg << 12;
9931 inst.instruction |= inst.operands[2].reg << 16;
9932 }
9933
9934 /* Xscale move-register-accumulator (argument parse)
9935
9936 MRAcc RdLo,RdHi,acc0. */
9937
9938 static void
9939 do_xsc_mra (void)
9940 {
9941 constraint (inst.operands[0].reg == inst.operands[1].reg, BAD_OVERLAP);
9942 inst.instruction |= inst.operands[0].reg << 12;
9943 inst.instruction |= inst.operands[1].reg << 16;
9944 }
9945 \f
9946 /* Encoding functions relevant only to Thumb. */
9947
9948 /* inst.operands[i] is a shifted-register operand; encode
9949 it into inst.instruction in the format used by Thumb32. */
9950
9951 static void
9952 encode_thumb32_shifted_operand (int i)
9953 {
9954 unsigned int value = inst.reloc.exp.X_add_number;
9955 unsigned int shift = inst.operands[i].shift_kind;
9956
9957 constraint (inst.operands[i].immisreg,
9958 _("shift by register not allowed in thumb mode"));
9959 inst.instruction |= inst.operands[i].reg;
9960 if (shift == SHIFT_RRX)
9961 inst.instruction |= SHIFT_ROR << 4;
9962 else
9963 {
9964 constraint (inst.reloc.exp.X_op != O_constant,
9965 _("expression too complex"));
9966
9967 constraint (value > 32
9968 || (value == 32 && (shift == SHIFT_LSL
9969 || shift == SHIFT_ROR)),
9970 _("shift expression is too large"));
9971
9972 if (value == 0)
9973 shift = SHIFT_LSL;
9974 else if (value == 32)
9975 value = 0;
9976
9977 inst.instruction |= shift << 4;
9978 inst.instruction |= (value & 0x1c) << 10;
9979 inst.instruction |= (value & 0x03) << 6;
9980 }
9981 }
9982
9983
9984 /* inst.operands[i] was set up by parse_address. Encode it into a
9985 Thumb32 format load or store instruction. Reject forms that cannot
9986 be used with such instructions. If is_t is true, reject forms that
9987 cannot be used with a T instruction; if is_d is true, reject forms
9988 that cannot be used with a D instruction. If it is a store insn,
9989 reject PC in Rn. */
9990
9991 static void
9992 encode_thumb32_addr_mode (int i, bfd_boolean is_t, bfd_boolean is_d)
9993 {
9994 const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
9995
9996 constraint (!inst.operands[i].isreg,
9997 _("Instruction does not support =N addresses"));
9998
9999 inst.instruction |= inst.operands[i].reg << 16;
10000 if (inst.operands[i].immisreg)
10001 {
10002 constraint (is_pc, BAD_PC_ADDRESSING);
10003 constraint (is_t || is_d, _("cannot use register index with this instruction"));
10004 constraint (inst.operands[i].negative,
10005 _("Thumb does not support negative register indexing"));
10006 constraint (inst.operands[i].postind,
10007 _("Thumb does not support register post-indexing"));
10008 constraint (inst.operands[i].writeback,
10009 _("Thumb does not support register indexing with writeback"));
10010 constraint (inst.operands[i].shifted && inst.operands[i].shift_kind != SHIFT_LSL,
10011 _("Thumb supports only LSL in shifted register indexing"));
10012
10013 inst.instruction |= inst.operands[i].imm;
10014 if (inst.operands[i].shifted)
10015 {
10016 constraint (inst.reloc.exp.X_op != O_constant,
10017 _("expression too complex"));
10018 constraint (inst.reloc.exp.X_add_number < 0
10019 || inst.reloc.exp.X_add_number > 3,
10020 _("shift out of range"));
10021 inst.instruction |= inst.reloc.exp.X_add_number << 4;
10022 }
10023 inst.reloc.type = BFD_RELOC_UNUSED;
10024 }
10025 else if (inst.operands[i].preind)
10026 {
10027 constraint (is_pc && inst.operands[i].writeback, BAD_PC_WRITEBACK);
10028 constraint (is_t && inst.operands[i].writeback,
10029 _("cannot use writeback with this instruction"));
10030 constraint (is_pc && ((inst.instruction & THUMB2_LOAD_BIT) == 0),
10031 BAD_PC_ADDRESSING);
10032
10033 if (is_d)
10034 {
10035 inst.instruction |= 0x01000000;
10036 if (inst.operands[i].writeback)
10037 inst.instruction |= 0x00200000;
10038 }
10039 else
10040 {
10041 inst.instruction |= 0x00000c00;
10042 if (inst.operands[i].writeback)
10043 inst.instruction |= 0x00000100;
10044 }
10045 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
10046 }
10047 else if (inst.operands[i].postind)
10048 {
10049 gas_assert (inst.operands[i].writeback);
10050 constraint (is_pc, _("cannot use post-indexing with PC-relative addressing"));
10051 constraint (is_t, _("cannot use post-indexing with this instruction"));
10052
10053 if (is_d)
10054 inst.instruction |= 0x00200000;
10055 else
10056 inst.instruction |= 0x00000900;
10057 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
10058 }
10059 else /* unindexed - only for coprocessor */
10060 inst.error = _("instruction does not accept unindexed addressing");
10061 }
10062
10063 /* Table of Thumb instructions which exist in both 16- and 32-bit
10064 encodings (the latter only in post-V6T2 cores). The index is the
10065 value used in the insns table below. When there is more than one
10066 possible 16-bit encoding for the instruction, this table always
10067 holds variant (1).
10068 Also contains several pseudo-instructions used during relaxation. */
10069 #define T16_32_TAB \
10070 X(_adc, 4140, eb400000), \
10071 X(_adcs, 4140, eb500000), \
10072 X(_add, 1c00, eb000000), \
10073 X(_adds, 1c00, eb100000), \
10074 X(_addi, 0000, f1000000), \
10075 X(_addis, 0000, f1100000), \
10076 X(_add_pc,000f, f20f0000), \
10077 X(_add_sp,000d, f10d0000), \
10078 X(_adr, 000f, f20f0000), \
10079 X(_and, 4000, ea000000), \
10080 X(_ands, 4000, ea100000), \
10081 X(_asr, 1000, fa40f000), \
10082 X(_asrs, 1000, fa50f000), \
10083 X(_b, e000, f000b000), \
10084 X(_bcond, d000, f0008000), \
10085 X(_bic, 4380, ea200000), \
10086 X(_bics, 4380, ea300000), \
10087 X(_cmn, 42c0, eb100f00), \
10088 X(_cmp, 2800, ebb00f00), \
10089 X(_cpsie, b660, f3af8400), \
10090 X(_cpsid, b670, f3af8600), \
10091 X(_cpy, 4600, ea4f0000), \
10092 X(_dec_sp,80dd, f1ad0d00), \
10093 X(_eor, 4040, ea800000), \
10094 X(_eors, 4040, ea900000), \
10095 X(_inc_sp,00dd, f10d0d00), \
10096 X(_ldmia, c800, e8900000), \
10097 X(_ldr, 6800, f8500000), \
10098 X(_ldrb, 7800, f8100000), \
10099 X(_ldrh, 8800, f8300000), \
10100 X(_ldrsb, 5600, f9100000), \
10101 X(_ldrsh, 5e00, f9300000), \
10102 X(_ldr_pc,4800, f85f0000), \
10103 X(_ldr_pc2,4800, f85f0000), \
10104 X(_ldr_sp,9800, f85d0000), \
10105 X(_lsl, 0000, fa00f000), \
10106 X(_lsls, 0000, fa10f000), \
10107 X(_lsr, 0800, fa20f000), \
10108 X(_lsrs, 0800, fa30f000), \
10109 X(_mov, 2000, ea4f0000), \
10110 X(_movs, 2000, ea5f0000), \
10111 X(_mul, 4340, fb00f000), \
10112 X(_muls, 4340, ffffffff), /* no 32b muls */ \
10113 X(_mvn, 43c0, ea6f0000), \
10114 X(_mvns, 43c0, ea7f0000), \
10115 X(_neg, 4240, f1c00000), /* rsb #0 */ \
10116 X(_negs, 4240, f1d00000), /* rsbs #0 */ \
10117 X(_orr, 4300, ea400000), \
10118 X(_orrs, 4300, ea500000), \
10119 X(_pop, bc00, e8bd0000), /* ldmia sp!,... */ \
10120 X(_push, b400, e92d0000), /* stmdb sp!,... */ \
10121 X(_rev, ba00, fa90f080), \
10122 X(_rev16, ba40, fa90f090), \
10123 X(_revsh, bac0, fa90f0b0), \
10124 X(_ror, 41c0, fa60f000), \
10125 X(_rors, 41c0, fa70f000), \
10126 X(_sbc, 4180, eb600000), \
10127 X(_sbcs, 4180, eb700000), \
10128 X(_stmia, c000, e8800000), \
10129 X(_str, 6000, f8400000), \
10130 X(_strb, 7000, f8000000), \
10131 X(_strh, 8000, f8200000), \
10132 X(_str_sp,9000, f84d0000), \
10133 X(_sub, 1e00, eba00000), \
10134 X(_subs, 1e00, ebb00000), \
10135 X(_subi, 8000, f1a00000), \
10136 X(_subis, 8000, f1b00000), \
10137 X(_sxtb, b240, fa4ff080), \
10138 X(_sxth, b200, fa0ff080), \
10139 X(_tst, 4200, ea100f00), \
10140 X(_uxtb, b2c0, fa5ff080), \
10141 X(_uxth, b280, fa1ff080), \
10142 X(_nop, bf00, f3af8000), \
10143 X(_yield, bf10, f3af8001), \
10144 X(_wfe, bf20, f3af8002), \
10145 X(_wfi, bf30, f3af8003), \
10146 X(_sev, bf40, f3af8004), \
10147 X(_sevl, bf50, f3af8005), \
10148 X(_udf, de00, f7f0a000)
10149
10150 /* To catch errors in encoding functions, the codes are all offset by
10151 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
10152 as 16-bit instructions. */
10153 #define X(a,b,c) T_MNEM##a
10154 enum t16_32_codes { T16_32_OFFSET = 0xF7FF, T16_32_TAB };
10155 #undef X
10156
10157 #define X(a,b,c) 0x##b
10158 static const unsigned short thumb_op16[] = { T16_32_TAB };
10159 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
10160 #undef X
10161
10162 #define X(a,b,c) 0x##c
10163 static const unsigned int thumb_op32[] = { T16_32_TAB };
10164 #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
10165 #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
10166 #undef X
10167 #undef T16_32_TAB
10168
10169 /* Thumb instruction encoders, in alphabetical order. */
10170
10171 /* ADDW or SUBW. */
10172
10173 static void
10174 do_t_add_sub_w (void)
10175 {
10176 int Rd, Rn;
10177
10178 Rd = inst.operands[0].reg;
10179 Rn = inst.operands[1].reg;
10180
10181 /* If Rn is REG_PC, this is ADR; if Rn is REG_SP, then this
10182 is the SP-{plus,minus}-immediate form of the instruction. */
10183 if (Rn == REG_SP)
10184 constraint (Rd == REG_PC, BAD_PC);
10185 else
10186 reject_bad_reg (Rd);
10187
10188 inst.instruction |= (Rn << 16) | (Rd << 8);
10189 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
10190 }
10191
10192 /* Parse an add or subtract instruction. We get here with inst.instruction
10193 equalling any of THUMB_OPCODE_add, adds, sub, or subs. */
10194
10195 static void
10196 do_t_add_sub (void)
10197 {
10198 int Rd, Rs, Rn;
10199
10200 Rd = inst.operands[0].reg;
10201 Rs = (inst.operands[1].present
10202 ? inst.operands[1].reg /* Rd, Rs, foo */
10203 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
10204
10205 if (Rd == REG_PC)
10206 set_it_insn_type_last ();
10207
10208 if (unified_syntax)
10209 {
10210 bfd_boolean flags;
10211 bfd_boolean narrow;
10212 int opcode;
10213
10214 flags = (inst.instruction == T_MNEM_adds
10215 || inst.instruction == T_MNEM_subs);
10216 if (flags)
10217 narrow = !in_it_block ();
10218 else
10219 narrow = in_it_block ();
10220 if (!inst.operands[2].isreg)
10221 {
10222 int add;
10223
10224 constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
10225
10226 add = (inst.instruction == T_MNEM_add
10227 || inst.instruction == T_MNEM_adds);
10228 opcode = 0;
10229 if (inst.size_req != 4)
10230 {
10231 /* Attempt to use a narrow opcode, with relaxation if
10232 appropriate. */
10233 if (Rd == REG_SP && Rs == REG_SP && !flags)
10234 opcode = add ? T_MNEM_inc_sp : T_MNEM_dec_sp;
10235 else if (Rd <= 7 && Rs == REG_SP && add && !flags)
10236 opcode = T_MNEM_add_sp;
10237 else if (Rd <= 7 && Rs == REG_PC && add && !flags)
10238 opcode = T_MNEM_add_pc;
10239 else if (Rd <= 7 && Rs <= 7 && narrow)
10240 {
10241 if (flags)
10242 opcode = add ? T_MNEM_addis : T_MNEM_subis;
10243 else
10244 opcode = add ? T_MNEM_addi : T_MNEM_subi;
10245 }
10246 if (opcode)
10247 {
10248 inst.instruction = THUMB_OP16(opcode);
10249 inst.instruction |= (Rd << 4) | Rs;
10250 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
10251 if (inst.size_req != 2)
10252 inst.relax = opcode;
10253 }
10254 else
10255 constraint (inst.size_req == 2, BAD_HIREG);
10256 }
10257 if (inst.size_req == 4
10258 || (inst.size_req != 2 && !opcode))
10259 {
10260 if (Rd == REG_PC)
10261 {
10262 constraint (add, BAD_PC);
10263 constraint (Rs != REG_LR || inst.instruction != T_MNEM_subs,
10264 _("only SUBS PC, LR, #const allowed"));
10265 constraint (inst.reloc.exp.X_op != O_constant,
10266 _("expression too complex"));
10267 constraint (inst.reloc.exp.X_add_number < 0
10268 || inst.reloc.exp.X_add_number > 0xff,
10269 _("immediate value out of range"));
10270 inst.instruction = T2_SUBS_PC_LR
10271 | inst.reloc.exp.X_add_number;
10272 inst.reloc.type = BFD_RELOC_UNUSED;
10273 return;
10274 }
10275 else if (Rs == REG_PC)
10276 {
10277 /* Always use addw/subw. */
10278 inst.instruction = add ? 0xf20f0000 : 0xf2af0000;
10279 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
10280 }
10281 else
10282 {
10283 inst.instruction = THUMB_OP32 (inst.instruction);
10284 inst.instruction = (inst.instruction & 0xe1ffffff)
10285 | 0x10000000;
10286 if (flags)
10287 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10288 else
10289 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_IMM;
10290 }
10291 inst.instruction |= Rd << 8;
10292 inst.instruction |= Rs << 16;
10293 }
10294 }
10295 else
10296 {
10297 unsigned int value = inst.reloc.exp.X_add_number;
10298 unsigned int shift = inst.operands[2].shift_kind;
10299
10300 Rn = inst.operands[2].reg;
10301 /* See if we can do this with a 16-bit instruction. */
10302 if (!inst.operands[2].shifted && inst.size_req != 4)
10303 {
10304 if (Rd > 7 || Rs > 7 || Rn > 7)
10305 narrow = FALSE;
10306
10307 if (narrow)
10308 {
10309 inst.instruction = ((inst.instruction == T_MNEM_adds
10310 || inst.instruction == T_MNEM_add)
10311 ? T_OPCODE_ADD_R3
10312 : T_OPCODE_SUB_R3);
10313 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
10314 return;
10315 }
10316
10317 if (inst.instruction == T_MNEM_add && (Rd == Rs || Rd == Rn))
10318 {
10319 /* Thumb-1 cores (except v6-M) require at least one high
10320 register in a narrow non flag setting add. */
10321 if (Rd > 7 || Rn > 7
10322 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2)
10323 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_msr))
10324 {
10325 if (Rd == Rn)
10326 {
10327 Rn = Rs;
10328 Rs = Rd;
10329 }
10330 inst.instruction = T_OPCODE_ADD_HI;
10331 inst.instruction |= (Rd & 8) << 4;
10332 inst.instruction |= (Rd & 7);
10333 inst.instruction |= Rn << 3;
10334 return;
10335 }
10336 }
10337 }
10338
10339 constraint (Rd == REG_PC, BAD_PC);
10340 constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
10341 constraint (Rs == REG_PC, BAD_PC);
10342 reject_bad_reg (Rn);
10343
10344 /* If we get here, it can't be done in 16 bits. */
10345 constraint (inst.operands[2].shifted && inst.operands[2].immisreg,
10346 _("shift must be constant"));
10347 inst.instruction = THUMB_OP32 (inst.instruction);
10348 inst.instruction |= Rd << 8;
10349 inst.instruction |= Rs << 16;
10350 constraint (Rd == REG_SP && Rs == REG_SP && value > 3,
10351 _("shift value over 3 not allowed in thumb mode"));
10352 constraint (Rd == REG_SP && Rs == REG_SP && shift != SHIFT_LSL,
10353 _("only LSL shift allowed in thumb mode"));
10354 encode_thumb32_shifted_operand (2);
10355 }
10356 }
10357 else
10358 {
10359 constraint (inst.instruction == T_MNEM_adds
10360 || inst.instruction == T_MNEM_subs,
10361 BAD_THUMB32);
10362
10363 if (!inst.operands[2].isreg) /* Rd, Rs, #imm */
10364 {
10365 constraint ((Rd > 7 && (Rd != REG_SP || Rs != REG_SP))
10366 || (Rs > 7 && Rs != REG_SP && Rs != REG_PC),
10367 BAD_HIREG);
10368
10369 inst.instruction = (inst.instruction == T_MNEM_add
10370 ? 0x0000 : 0x8000);
10371 inst.instruction |= (Rd << 4) | Rs;
10372 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
10373 return;
10374 }
10375
10376 Rn = inst.operands[2].reg;
10377 constraint (inst.operands[2].shifted, _("unshifted register required"));
10378
10379 /* We now have Rd, Rs, and Rn set to registers. */
10380 if (Rd > 7 || Rs > 7 || Rn > 7)
10381 {
10382 /* Can't do this for SUB. */
10383 constraint (inst.instruction == T_MNEM_sub, BAD_HIREG);
10384 inst.instruction = T_OPCODE_ADD_HI;
10385 inst.instruction |= (Rd & 8) << 4;
10386 inst.instruction |= (Rd & 7);
10387 if (Rs == Rd)
10388 inst.instruction |= Rn << 3;
10389 else if (Rn == Rd)
10390 inst.instruction |= Rs << 3;
10391 else
10392 constraint (1, _("dest must overlap one source register"));
10393 }
10394 else
10395 {
10396 inst.instruction = (inst.instruction == T_MNEM_add
10397 ? T_OPCODE_ADD_R3 : T_OPCODE_SUB_R3);
10398 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
10399 }
10400 }
10401 }
10402
10403 static void
10404 do_t_adr (void)
10405 {
10406 unsigned Rd;
10407
10408 Rd = inst.operands[0].reg;
10409 reject_bad_reg (Rd);
10410
10411 if (unified_syntax && inst.size_req == 0 && Rd <= 7)
10412 {
10413 /* Defer to section relaxation. */
10414 inst.relax = inst.instruction;
10415 inst.instruction = THUMB_OP16 (inst.instruction);
10416 inst.instruction |= Rd << 4;
10417 }
10418 else if (unified_syntax && inst.size_req != 2)
10419 {
10420 /* Generate a 32-bit opcode. */
10421 inst.instruction = THUMB_OP32 (inst.instruction);
10422 inst.instruction |= Rd << 8;
10423 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_PC12;
10424 inst.reloc.pc_rel = 1;
10425 }
10426 else
10427 {
10428 /* Generate a 16-bit opcode. */
10429 inst.instruction = THUMB_OP16 (inst.instruction);
10430 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
10431 inst.reloc.exp.X_add_number -= 4; /* PC relative adjust. */
10432 inst.reloc.pc_rel = 1;
10433
10434 inst.instruction |= Rd << 4;
10435 }
10436 }
10437
10438 /* Arithmetic instructions for which there is just one 16-bit
10439 instruction encoding, and it allows only two low registers.
10440 For maximal compatibility with ARM syntax, we allow three register
10441 operands even when Thumb-32 instructions are not available, as long
10442 as the first two are identical. For instance, both "sbc r0,r1" and
10443 "sbc r0,r0,r1" are allowed. */
10444 static void
10445 do_t_arit3 (void)
10446 {
10447 int Rd, Rs, Rn;
10448
10449 Rd = inst.operands[0].reg;
10450 Rs = (inst.operands[1].present
10451 ? inst.operands[1].reg /* Rd, Rs, foo */
10452 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
10453 Rn = inst.operands[2].reg;
10454
10455 reject_bad_reg (Rd);
10456 reject_bad_reg (Rs);
10457 if (inst.operands[2].isreg)
10458 reject_bad_reg (Rn);
10459
10460 if (unified_syntax)
10461 {
10462 if (!inst.operands[2].isreg)
10463 {
10464 /* For an immediate, we always generate a 32-bit opcode;
10465 section relaxation will shrink it later if possible. */
10466 inst.instruction = THUMB_OP32 (inst.instruction);
10467 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
10468 inst.instruction |= Rd << 8;
10469 inst.instruction |= Rs << 16;
10470 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10471 }
10472 else
10473 {
10474 bfd_boolean narrow;
10475
10476 /* See if we can do this with a 16-bit instruction. */
10477 if (THUMB_SETS_FLAGS (inst.instruction))
10478 narrow = !in_it_block ();
10479 else
10480 narrow = in_it_block ();
10481
10482 if (Rd > 7 || Rn > 7 || Rs > 7)
10483 narrow = FALSE;
10484 if (inst.operands[2].shifted)
10485 narrow = FALSE;
10486 if (inst.size_req == 4)
10487 narrow = FALSE;
10488
10489 if (narrow
10490 && Rd == Rs)
10491 {
10492 inst.instruction = THUMB_OP16 (inst.instruction);
10493 inst.instruction |= Rd;
10494 inst.instruction |= Rn << 3;
10495 return;
10496 }
10497
10498 /* If we get here, it can't be done in 16 bits. */
10499 constraint (inst.operands[2].shifted
10500 && inst.operands[2].immisreg,
10501 _("shift must be constant"));
10502 inst.instruction = THUMB_OP32 (inst.instruction);
10503 inst.instruction |= Rd << 8;
10504 inst.instruction |= Rs << 16;
10505 encode_thumb32_shifted_operand (2);
10506 }
10507 }
10508 else
10509 {
10510 /* On its face this is a lie - the instruction does set the
10511 flags. However, the only supported mnemonic in this mode
10512 says it doesn't. */
10513 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
10514
10515 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
10516 _("unshifted register required"));
10517 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
10518 constraint (Rd != Rs,
10519 _("dest and source1 must be the same register"));
10520
10521 inst.instruction = THUMB_OP16 (inst.instruction);
10522 inst.instruction |= Rd;
10523 inst.instruction |= Rn << 3;
10524 }
10525 }
10526
10527 /* Similarly, but for instructions where the arithmetic operation is
10528 commutative, so we can allow either of them to be different from
10529 the destination operand in a 16-bit instruction. For instance, all
10530 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
10531 accepted. */
10532 static void
10533 do_t_arit3c (void)
10534 {
10535 int Rd, Rs, Rn;
10536
10537 Rd = inst.operands[0].reg;
10538 Rs = (inst.operands[1].present
10539 ? inst.operands[1].reg /* Rd, Rs, foo */
10540 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
10541 Rn = inst.operands[2].reg;
10542
10543 reject_bad_reg (Rd);
10544 reject_bad_reg (Rs);
10545 if (inst.operands[2].isreg)
10546 reject_bad_reg (Rn);
10547
10548 if (unified_syntax)
10549 {
10550 if (!inst.operands[2].isreg)
10551 {
10552 /* For an immediate, we always generate a 32-bit opcode;
10553 section relaxation will shrink it later if possible. */
10554 inst.instruction = THUMB_OP32 (inst.instruction);
10555 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
10556 inst.instruction |= Rd << 8;
10557 inst.instruction |= Rs << 16;
10558 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10559 }
10560 else
10561 {
10562 bfd_boolean narrow;
10563
10564 /* See if we can do this with a 16-bit instruction. */
10565 if (THUMB_SETS_FLAGS (inst.instruction))
10566 narrow = !in_it_block ();
10567 else
10568 narrow = in_it_block ();
10569
10570 if (Rd > 7 || Rn > 7 || Rs > 7)
10571 narrow = FALSE;
10572 if (inst.operands[2].shifted)
10573 narrow = FALSE;
10574 if (inst.size_req == 4)
10575 narrow = FALSE;
10576
10577 if (narrow)
10578 {
10579 if (Rd == Rs)
10580 {
10581 inst.instruction = THUMB_OP16 (inst.instruction);
10582 inst.instruction |= Rd;
10583 inst.instruction |= Rn << 3;
10584 return;
10585 }
10586 if (Rd == Rn)
10587 {
10588 inst.instruction = THUMB_OP16 (inst.instruction);
10589 inst.instruction |= Rd;
10590 inst.instruction |= Rs << 3;
10591 return;
10592 }
10593 }
10594
10595 /* If we get here, it can't be done in 16 bits. */
10596 constraint (inst.operands[2].shifted
10597 && inst.operands[2].immisreg,
10598 _("shift must be constant"));
10599 inst.instruction = THUMB_OP32 (inst.instruction);
10600 inst.instruction |= Rd << 8;
10601 inst.instruction |= Rs << 16;
10602 encode_thumb32_shifted_operand (2);
10603 }
10604 }
10605 else
10606 {
10607 /* On its face this is a lie - the instruction does set the
10608 flags. However, the only supported mnemonic in this mode
10609 says it doesn't. */
10610 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
10611
10612 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
10613 _("unshifted register required"));
10614 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
10615
10616 inst.instruction = THUMB_OP16 (inst.instruction);
10617 inst.instruction |= Rd;
10618
10619 if (Rd == Rs)
10620 inst.instruction |= Rn << 3;
10621 else if (Rd == Rn)
10622 inst.instruction |= Rs << 3;
10623 else
10624 constraint (1, _("dest must overlap one source register"));
10625 }
10626 }
10627
10628 static void
10629 do_t_bfc (void)
10630 {
10631 unsigned Rd;
10632 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
10633 constraint (msb > 32, _("bit-field extends past end of register"));
10634 /* The instruction encoding stores the LSB and MSB,
10635 not the LSB and width. */
10636 Rd = inst.operands[0].reg;
10637 reject_bad_reg (Rd);
10638 inst.instruction |= Rd << 8;
10639 inst.instruction |= (inst.operands[1].imm & 0x1c) << 10;
10640 inst.instruction |= (inst.operands[1].imm & 0x03) << 6;
10641 inst.instruction |= msb - 1;
10642 }
10643
10644 static void
10645 do_t_bfi (void)
10646 {
10647 int Rd, Rn;
10648 unsigned int msb;
10649
10650 Rd = inst.operands[0].reg;
10651 reject_bad_reg (Rd);
10652
10653 /* #0 in second position is alternative syntax for bfc, which is
10654 the same instruction but with REG_PC in the Rm field. */
10655 if (!inst.operands[1].isreg)
10656 Rn = REG_PC;
10657 else
10658 {
10659 Rn = inst.operands[1].reg;
10660 reject_bad_reg (Rn);
10661 }
10662
10663 msb = inst.operands[2].imm + inst.operands[3].imm;
10664 constraint (msb > 32, _("bit-field extends past end of register"));
10665 /* The instruction encoding stores the LSB and MSB,
10666 not the LSB and width. */
10667 inst.instruction |= Rd << 8;
10668 inst.instruction |= Rn << 16;
10669 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
10670 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
10671 inst.instruction |= msb - 1;
10672 }
10673
10674 static void
10675 do_t_bfx (void)
10676 {
10677 unsigned Rd, Rn;
10678
10679 Rd = inst.operands[0].reg;
10680 Rn = inst.operands[1].reg;
10681
10682 reject_bad_reg (Rd);
10683 reject_bad_reg (Rn);
10684
10685 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
10686 _("bit-field extends past end of register"));
10687 inst.instruction |= Rd << 8;
10688 inst.instruction |= Rn << 16;
10689 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
10690 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
10691 inst.instruction |= inst.operands[3].imm - 1;
10692 }
10693
10694 /* ARM V5 Thumb BLX (argument parse)
10695 BLX <target_addr> which is BLX(1)
10696 BLX <Rm> which is BLX(2)
10697 Unfortunately, there are two different opcodes for this mnemonic.
10698 So, the insns[].value is not used, and the code here zaps values
10699 into inst.instruction.
10700
10701 ??? How to take advantage of the additional two bits of displacement
10702 available in Thumb32 mode? Need new relocation? */
10703
10704 static void
10705 do_t_blx (void)
10706 {
10707 set_it_insn_type_last ();
10708
10709 if (inst.operands[0].isreg)
10710 {
10711 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
10712 /* We have a register, so this is BLX(2). */
10713 inst.instruction |= inst.operands[0].reg << 3;
10714 }
10715 else
10716 {
10717 /* No register. This must be BLX(1). */
10718 inst.instruction = 0xf000e800;
10719 encode_branch (BFD_RELOC_THUMB_PCREL_BLX);
10720 }
10721 }
10722
10723 static void
10724 do_t_branch (void)
10725 {
10726 int opcode;
10727 int cond;
10728 int reloc;
10729
10730 cond = inst.cond;
10731 set_it_insn_type (IF_INSIDE_IT_LAST_INSN);
10732
10733 if (in_it_block ())
10734 {
10735 /* Conditional branches inside IT blocks are encoded as unconditional
10736 branches. */
10737 cond = COND_ALWAYS;
10738 }
10739 else
10740 cond = inst.cond;
10741
10742 if (cond != COND_ALWAYS)
10743 opcode = T_MNEM_bcond;
10744 else
10745 opcode = inst.instruction;
10746
10747 if (unified_syntax
10748 && (inst.size_req == 4
10749 || (inst.size_req != 2
10750 && (inst.operands[0].hasreloc
10751 || inst.reloc.exp.X_op == O_constant))))
10752 {
10753 inst.instruction = THUMB_OP32(opcode);
10754 if (cond == COND_ALWAYS)
10755 reloc = BFD_RELOC_THUMB_PCREL_BRANCH25;
10756 else
10757 {
10758 gas_assert (cond != 0xF);
10759 inst.instruction |= cond << 22;
10760 reloc = BFD_RELOC_THUMB_PCREL_BRANCH20;
10761 }
10762 }
10763 else
10764 {
10765 inst.instruction = THUMB_OP16(opcode);
10766 if (cond == COND_ALWAYS)
10767 reloc = BFD_RELOC_THUMB_PCREL_BRANCH12;
10768 else
10769 {
10770 inst.instruction |= cond << 8;
10771 reloc = BFD_RELOC_THUMB_PCREL_BRANCH9;
10772 }
10773 /* Allow section relaxation. */
10774 if (unified_syntax && inst.size_req != 2)
10775 inst.relax = opcode;
10776 }
10777 inst.reloc.type = reloc;
10778 inst.reloc.pc_rel = 1;
10779 }
10780
10781 /* Actually do the work for Thumb state bkpt and hlt. The only difference
10782 between the two is the maximum immediate allowed - which is passed in
10783 RANGE. */
10784 static void
10785 do_t_bkpt_hlt1 (int range)
10786 {
10787 constraint (inst.cond != COND_ALWAYS,
10788 _("instruction is always unconditional"));
10789 if (inst.operands[0].present)
10790 {
10791 constraint (inst.operands[0].imm > range,
10792 _("immediate value out of range"));
10793 inst.instruction |= inst.operands[0].imm;
10794 }
10795
10796 set_it_insn_type (NEUTRAL_IT_INSN);
10797 }
10798
10799 static void
10800 do_t_hlt (void)
10801 {
10802 do_t_bkpt_hlt1 (63);
10803 }
10804
10805 static void
10806 do_t_bkpt (void)
10807 {
10808 do_t_bkpt_hlt1 (255);
10809 }
10810
10811 static void
10812 do_t_branch23 (void)
10813 {
10814 set_it_insn_type_last ();
10815 encode_branch (BFD_RELOC_THUMB_PCREL_BRANCH23);
10816
10817 /* md_apply_fix blows up with 'bl foo(PLT)' where foo is defined in
10818 this file. We used to simply ignore the PLT reloc type here --
10819 the branch encoding is now needed to deal with TLSCALL relocs.
10820 So if we see a PLT reloc now, put it back to how it used to be to
10821 keep the preexisting behaviour. */
10822 if (inst.reloc.type == BFD_RELOC_ARM_PLT32)
10823 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH23;
10824
10825 #if defined(OBJ_COFF)
10826 /* If the destination of the branch is a defined symbol which does not have
10827 the THUMB_FUNC attribute, then we must be calling a function which has
10828 the (interfacearm) attribute. We look for the Thumb entry point to that
10829 function and change the branch to refer to that function instead. */
10830 if ( inst.reloc.exp.X_op == O_symbol
10831 && inst.reloc.exp.X_add_symbol != NULL
10832 && S_IS_DEFINED (inst.reloc.exp.X_add_symbol)
10833 && ! THUMB_IS_FUNC (inst.reloc.exp.X_add_symbol))
10834 inst.reloc.exp.X_add_symbol =
10835 find_real_start (inst.reloc.exp.X_add_symbol);
10836 #endif
10837 }
10838
10839 static void
10840 do_t_bx (void)
10841 {
10842 set_it_insn_type_last ();
10843 inst.instruction |= inst.operands[0].reg << 3;
10844 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
10845 should cause the alignment to be checked once it is known. This is
10846 because BX PC only works if the instruction is word aligned. */
10847 }
10848
10849 static void
10850 do_t_bxj (void)
10851 {
10852 int Rm;
10853
10854 set_it_insn_type_last ();
10855 Rm = inst.operands[0].reg;
10856 reject_bad_reg (Rm);
10857 inst.instruction |= Rm << 16;
10858 }
10859
10860 static void
10861 do_t_clz (void)
10862 {
10863 unsigned Rd;
10864 unsigned Rm;
10865
10866 Rd = inst.operands[0].reg;
10867 Rm = inst.operands[1].reg;
10868
10869 reject_bad_reg (Rd);
10870 reject_bad_reg (Rm);
10871
10872 inst.instruction |= Rd << 8;
10873 inst.instruction |= Rm << 16;
10874 inst.instruction |= Rm;
10875 }
10876
10877 static void
10878 do_t_cps (void)
10879 {
10880 set_it_insn_type (OUTSIDE_IT_INSN);
10881 inst.instruction |= inst.operands[0].imm;
10882 }
10883
10884 static void
10885 do_t_cpsi (void)
10886 {
10887 set_it_insn_type (OUTSIDE_IT_INSN);
10888 if (unified_syntax
10889 && (inst.operands[1].present || inst.size_req == 4)
10890 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6_notm))
10891 {
10892 unsigned int imod = (inst.instruction & 0x0030) >> 4;
10893 inst.instruction = 0xf3af8000;
10894 inst.instruction |= imod << 9;
10895 inst.instruction |= inst.operands[0].imm << 5;
10896 if (inst.operands[1].present)
10897 inst.instruction |= 0x100 | inst.operands[1].imm;
10898 }
10899 else
10900 {
10901 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1)
10902 && (inst.operands[0].imm & 4),
10903 _("selected processor does not support 'A' form "
10904 "of this instruction"));
10905 constraint (inst.operands[1].present || inst.size_req == 4,
10906 _("Thumb does not support the 2-argument "
10907 "form of this instruction"));
10908 inst.instruction |= inst.operands[0].imm;
10909 }
10910 }
10911
10912 /* THUMB CPY instruction (argument parse). */
10913
10914 static void
10915 do_t_cpy (void)
10916 {
10917 if (inst.size_req == 4)
10918 {
10919 inst.instruction = THUMB_OP32 (T_MNEM_mov);
10920 inst.instruction |= inst.operands[0].reg << 8;
10921 inst.instruction |= inst.operands[1].reg;
10922 }
10923 else
10924 {
10925 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
10926 inst.instruction |= (inst.operands[0].reg & 0x7);
10927 inst.instruction |= inst.operands[1].reg << 3;
10928 }
10929 }
10930
10931 static void
10932 do_t_cbz (void)
10933 {
10934 set_it_insn_type (OUTSIDE_IT_INSN);
10935 constraint (inst.operands[0].reg > 7, BAD_HIREG);
10936 inst.instruction |= inst.operands[0].reg;
10937 inst.reloc.pc_rel = 1;
10938 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH7;
10939 }
10940
10941 static void
10942 do_t_dbg (void)
10943 {
10944 inst.instruction |= inst.operands[0].imm;
10945 }
10946
10947 static void
10948 do_t_div (void)
10949 {
10950 unsigned Rd, Rn, Rm;
10951
10952 Rd = inst.operands[0].reg;
10953 Rn = (inst.operands[1].present
10954 ? inst.operands[1].reg : Rd);
10955 Rm = inst.operands[2].reg;
10956
10957 reject_bad_reg (Rd);
10958 reject_bad_reg (Rn);
10959 reject_bad_reg (Rm);
10960
10961 inst.instruction |= Rd << 8;
10962 inst.instruction |= Rn << 16;
10963 inst.instruction |= Rm;
10964 }
10965
10966 static void
10967 do_t_hint (void)
10968 {
10969 if (unified_syntax && inst.size_req == 4)
10970 inst.instruction = THUMB_OP32 (inst.instruction);
10971 else
10972 inst.instruction = THUMB_OP16 (inst.instruction);
10973 }
10974
10975 static void
10976 do_t_it (void)
10977 {
10978 unsigned int cond = inst.operands[0].imm;
10979
10980 set_it_insn_type (IT_INSN);
10981 now_it.mask = (inst.instruction & 0xf) | 0x10;
10982 now_it.cc = cond;
10983 now_it.warn_deprecated = FALSE;
10984
10985 /* If the condition is a negative condition, invert the mask. */
10986 if ((cond & 0x1) == 0x0)
10987 {
10988 unsigned int mask = inst.instruction & 0x000f;
10989
10990 if ((mask & 0x7) == 0)
10991 {
10992 /* No conversion needed. */
10993 now_it.block_length = 1;
10994 }
10995 else if ((mask & 0x3) == 0)
10996 {
10997 mask ^= 0x8;
10998 now_it.block_length = 2;
10999 }
11000 else if ((mask & 0x1) == 0)
11001 {
11002 mask ^= 0xC;
11003 now_it.block_length = 3;
11004 }
11005 else
11006 {
11007 mask ^= 0xE;
11008 now_it.block_length = 4;
11009 }
11010
11011 inst.instruction &= 0xfff0;
11012 inst.instruction |= mask;
11013 }
11014
11015 inst.instruction |= cond << 4;
11016 }
11017
11018 /* Helper function used for both push/pop and ldm/stm. */
11019 static void
11020 encode_thumb2_ldmstm (int base, unsigned mask, bfd_boolean writeback)
11021 {
11022 bfd_boolean load;
11023
11024 load = (inst.instruction & (1 << 20)) != 0;
11025
11026 if (mask & (1 << 13))
11027 inst.error = _("SP not allowed in register list");
11028
11029 if ((mask & (1 << base)) != 0
11030 && writeback)
11031 inst.error = _("having the base register in the register list when "
11032 "using write back is UNPREDICTABLE");
11033
11034 if (load)
11035 {
11036 if (mask & (1 << 15))
11037 {
11038 if (mask & (1 << 14))
11039 inst.error = _("LR and PC should not both be in register list");
11040 else
11041 set_it_insn_type_last ();
11042 }
11043 }
11044 else
11045 {
11046 if (mask & (1 << 15))
11047 inst.error = _("PC not allowed in register list");
11048 }
11049
11050 if ((mask & (mask - 1)) == 0)
11051 {
11052 /* Single register transfers implemented as str/ldr. */
11053 if (writeback)
11054 {
11055 if (inst.instruction & (1 << 23))
11056 inst.instruction = 0x00000b04; /* ia! -> [base], #4 */
11057 else
11058 inst.instruction = 0x00000d04; /* db! -> [base, #-4]! */
11059 }
11060 else
11061 {
11062 if (inst.instruction & (1 << 23))
11063 inst.instruction = 0x00800000; /* ia -> [base] */
11064 else
11065 inst.instruction = 0x00000c04; /* db -> [base, #-4] */
11066 }
11067
11068 inst.instruction |= 0xf8400000;
11069 if (load)
11070 inst.instruction |= 0x00100000;
11071
11072 mask = ffs (mask) - 1;
11073 mask <<= 12;
11074 }
11075 else if (writeback)
11076 inst.instruction |= WRITE_BACK;
11077
11078 inst.instruction |= mask;
11079 inst.instruction |= base << 16;
11080 }
11081
11082 static void
11083 do_t_ldmstm (void)
11084 {
11085 /* This really doesn't seem worth it. */
11086 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
11087 _("expression too complex"));
11088 constraint (inst.operands[1].writeback,
11089 _("Thumb load/store multiple does not support {reglist}^"));
11090
11091 if (unified_syntax)
11092 {
11093 bfd_boolean narrow;
11094 unsigned mask;
11095
11096 narrow = FALSE;
11097 /* See if we can use a 16-bit instruction. */
11098 if (inst.instruction < 0xffff /* not ldmdb/stmdb */
11099 && inst.size_req != 4
11100 && !(inst.operands[1].imm & ~0xff))
11101 {
11102 mask = 1 << inst.operands[0].reg;
11103
11104 if (inst.operands[0].reg <= 7)
11105 {
11106 if (inst.instruction == T_MNEM_stmia
11107 ? inst.operands[0].writeback
11108 : (inst.operands[0].writeback
11109 == !(inst.operands[1].imm & mask)))
11110 {
11111 if (inst.instruction == T_MNEM_stmia
11112 && (inst.operands[1].imm & mask)
11113 && (inst.operands[1].imm & (mask - 1)))
11114 as_warn (_("value stored for r%d is UNKNOWN"),
11115 inst.operands[0].reg);
11116
11117 inst.instruction = THUMB_OP16 (inst.instruction);
11118 inst.instruction |= inst.operands[0].reg << 8;
11119 inst.instruction |= inst.operands[1].imm;
11120 narrow = TRUE;
11121 }
11122 else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
11123 {
11124 /* This means 1 register in reg list one of 3 situations:
11125 1. Instruction is stmia, but without writeback.
11126 2. lmdia without writeback, but with Rn not in
11127 reglist.
11128 3. ldmia with writeback, but with Rn in reglist.
11129 Case 3 is UNPREDICTABLE behaviour, so we handle
11130 case 1 and 2 which can be converted into a 16-bit
11131 str or ldr. The SP cases are handled below. */
11132 unsigned long opcode;
11133 /* First, record an error for Case 3. */
11134 if (inst.operands[1].imm & mask
11135 && inst.operands[0].writeback)
11136 inst.error =
11137 _("having the base register in the register list when "
11138 "using write back is UNPREDICTABLE");
11139
11140 opcode = (inst.instruction == T_MNEM_stmia ? T_MNEM_str
11141 : T_MNEM_ldr);
11142 inst.instruction = THUMB_OP16 (opcode);
11143 inst.instruction |= inst.operands[0].reg << 3;
11144 inst.instruction |= (ffs (inst.operands[1].imm)-1);
11145 narrow = TRUE;
11146 }
11147 }
11148 else if (inst.operands[0] .reg == REG_SP)
11149 {
11150 if (inst.operands[0].writeback)
11151 {
11152 inst.instruction =
11153 THUMB_OP16 (inst.instruction == T_MNEM_stmia
11154 ? T_MNEM_push : T_MNEM_pop);
11155 inst.instruction |= inst.operands[1].imm;
11156 narrow = TRUE;
11157 }
11158 else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
11159 {
11160 inst.instruction =
11161 THUMB_OP16 (inst.instruction == T_MNEM_stmia
11162 ? T_MNEM_str_sp : T_MNEM_ldr_sp);
11163 inst.instruction |= ((ffs (inst.operands[1].imm)-1) << 8);
11164 narrow = TRUE;
11165 }
11166 }
11167 }
11168
11169 if (!narrow)
11170 {
11171 if (inst.instruction < 0xffff)
11172 inst.instruction = THUMB_OP32 (inst.instruction);
11173
11174 encode_thumb2_ldmstm (inst.operands[0].reg, inst.operands[1].imm,
11175 inst.operands[0].writeback);
11176 }
11177 }
11178 else
11179 {
11180 constraint (inst.operands[0].reg > 7
11181 || (inst.operands[1].imm & ~0xff), BAD_HIREG);
11182 constraint (inst.instruction != T_MNEM_ldmia
11183 && inst.instruction != T_MNEM_stmia,
11184 _("Thumb-2 instruction only valid in unified syntax"));
11185 if (inst.instruction == T_MNEM_stmia)
11186 {
11187 if (!inst.operands[0].writeback)
11188 as_warn (_("this instruction will write back the base register"));
11189 if ((inst.operands[1].imm & (1 << inst.operands[0].reg))
11190 && (inst.operands[1].imm & ((1 << inst.operands[0].reg) - 1)))
11191 as_warn (_("value stored for r%d is UNKNOWN"),
11192 inst.operands[0].reg);
11193 }
11194 else
11195 {
11196 if (!inst.operands[0].writeback
11197 && !(inst.operands[1].imm & (1 << inst.operands[0].reg)))
11198 as_warn (_("this instruction will write back the base register"));
11199 else if (inst.operands[0].writeback
11200 && (inst.operands[1].imm & (1 << inst.operands[0].reg)))
11201 as_warn (_("this instruction will not write back the base register"));
11202 }
11203
11204 inst.instruction = THUMB_OP16 (inst.instruction);
11205 inst.instruction |= inst.operands[0].reg << 8;
11206 inst.instruction |= inst.operands[1].imm;
11207 }
11208 }
11209
11210 static void
11211 do_t_ldrex (void)
11212 {
11213 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
11214 || inst.operands[1].postind || inst.operands[1].writeback
11215 || inst.operands[1].immisreg || inst.operands[1].shifted
11216 || inst.operands[1].negative,
11217 BAD_ADDR_MODE);
11218
11219 constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
11220
11221 inst.instruction |= inst.operands[0].reg << 12;
11222 inst.instruction |= inst.operands[1].reg << 16;
11223 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
11224 }
11225
11226 static void
11227 do_t_ldrexd (void)
11228 {
11229 if (!inst.operands[1].present)
11230 {
11231 constraint (inst.operands[0].reg == REG_LR,
11232 _("r14 not allowed as first register "
11233 "when second register is omitted"));
11234 inst.operands[1].reg = inst.operands[0].reg + 1;
11235 }
11236 constraint (inst.operands[0].reg == inst.operands[1].reg,
11237 BAD_OVERLAP);
11238
11239 inst.instruction |= inst.operands[0].reg << 12;
11240 inst.instruction |= inst.operands[1].reg << 8;
11241 inst.instruction |= inst.operands[2].reg << 16;
11242 }
11243
11244 static void
11245 do_t_ldst (void)
11246 {
11247 unsigned long opcode;
11248 int Rn;
11249
11250 if (inst.operands[0].isreg
11251 && !inst.operands[0].preind
11252 && inst.operands[0].reg == REG_PC)
11253 set_it_insn_type_last ();
11254
11255 opcode = inst.instruction;
11256 if (unified_syntax)
11257 {
11258 if (!inst.operands[1].isreg)
11259 {
11260 if (opcode <= 0xffff)
11261 inst.instruction = THUMB_OP32 (opcode);
11262 if (move_or_literal_pool (0, CONST_THUMB, /*mode_3=*/FALSE))
11263 return;
11264 }
11265 if (inst.operands[1].isreg
11266 && !inst.operands[1].writeback
11267 && !inst.operands[1].shifted && !inst.operands[1].postind
11268 && !inst.operands[1].negative && inst.operands[0].reg <= 7
11269 && opcode <= 0xffff
11270 && inst.size_req != 4)
11271 {
11272 /* Insn may have a 16-bit form. */
11273 Rn = inst.operands[1].reg;
11274 if (inst.operands[1].immisreg)
11275 {
11276 inst.instruction = THUMB_OP16 (opcode);
11277 /* [Rn, Rik] */
11278 if (Rn <= 7 && inst.operands[1].imm <= 7)
11279 goto op16;
11280 else if (opcode != T_MNEM_ldr && opcode != T_MNEM_str)
11281 reject_bad_reg (inst.operands[1].imm);
11282 }
11283 else if ((Rn <= 7 && opcode != T_MNEM_ldrsh
11284 && opcode != T_MNEM_ldrsb)
11285 || ((Rn == REG_PC || Rn == REG_SP) && opcode == T_MNEM_ldr)
11286 || (Rn == REG_SP && opcode == T_MNEM_str))
11287 {
11288 /* [Rn, #const] */
11289 if (Rn > 7)
11290 {
11291 if (Rn == REG_PC)
11292 {
11293 if (inst.reloc.pc_rel)
11294 opcode = T_MNEM_ldr_pc2;
11295 else
11296 opcode = T_MNEM_ldr_pc;
11297 }
11298 else
11299 {
11300 if (opcode == T_MNEM_ldr)
11301 opcode = T_MNEM_ldr_sp;
11302 else
11303 opcode = T_MNEM_str_sp;
11304 }
11305 inst.instruction = inst.operands[0].reg << 8;
11306 }
11307 else
11308 {
11309 inst.instruction = inst.operands[0].reg;
11310 inst.instruction |= inst.operands[1].reg << 3;
11311 }
11312 inst.instruction |= THUMB_OP16 (opcode);
11313 if (inst.size_req == 2)
11314 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
11315 else
11316 inst.relax = opcode;
11317 return;
11318 }
11319 }
11320 /* Definitely a 32-bit variant. */
11321
11322 /* Warning for Erratum 752419. */
11323 if (opcode == T_MNEM_ldr
11324 && inst.operands[0].reg == REG_SP
11325 && inst.operands[1].writeback == 1
11326 && !inst.operands[1].immisreg)
11327 {
11328 if (no_cpu_selected ()
11329 || (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7)
11330 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a)
11331 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7r)))
11332 as_warn (_("This instruction may be unpredictable "
11333 "if executed on M-profile cores "
11334 "with interrupts enabled."));
11335 }
11336
11337 /* Do some validations regarding addressing modes. */
11338 if (inst.operands[1].immisreg)
11339 reject_bad_reg (inst.operands[1].imm);
11340
11341 constraint (inst.operands[1].writeback == 1
11342 && inst.operands[0].reg == inst.operands[1].reg,
11343 BAD_OVERLAP);
11344
11345 inst.instruction = THUMB_OP32 (opcode);
11346 inst.instruction |= inst.operands[0].reg << 12;
11347 encode_thumb32_addr_mode (1, /*is_t=*/FALSE, /*is_d=*/FALSE);
11348 check_ldr_r15_aligned ();
11349 return;
11350 }
11351
11352 constraint (inst.operands[0].reg > 7, BAD_HIREG);
11353
11354 if (inst.instruction == T_MNEM_ldrsh || inst.instruction == T_MNEM_ldrsb)
11355 {
11356 /* Only [Rn,Rm] is acceptable. */
11357 constraint (inst.operands[1].reg > 7 || inst.operands[1].imm > 7, BAD_HIREG);
11358 constraint (!inst.operands[1].isreg || !inst.operands[1].immisreg
11359 || inst.operands[1].postind || inst.operands[1].shifted
11360 || inst.operands[1].negative,
11361 _("Thumb does not support this addressing mode"));
11362 inst.instruction = THUMB_OP16 (inst.instruction);
11363 goto op16;
11364 }
11365
11366 inst.instruction = THUMB_OP16 (inst.instruction);
11367 if (!inst.operands[1].isreg)
11368 if (move_or_literal_pool (0, CONST_THUMB, /*mode_3=*/FALSE))
11369 return;
11370
11371 constraint (!inst.operands[1].preind
11372 || inst.operands[1].shifted
11373 || inst.operands[1].writeback,
11374 _("Thumb does not support this addressing mode"));
11375 if (inst.operands[1].reg == REG_PC || inst.operands[1].reg == REG_SP)
11376 {
11377 constraint (inst.instruction & 0x0600,
11378 _("byte or halfword not valid for base register"));
11379 constraint (inst.operands[1].reg == REG_PC
11380 && !(inst.instruction & THUMB_LOAD_BIT),
11381 _("r15 based store not allowed"));
11382 constraint (inst.operands[1].immisreg,
11383 _("invalid base register for register offset"));
11384
11385 if (inst.operands[1].reg == REG_PC)
11386 inst.instruction = T_OPCODE_LDR_PC;
11387 else if (inst.instruction & THUMB_LOAD_BIT)
11388 inst.instruction = T_OPCODE_LDR_SP;
11389 else
11390 inst.instruction = T_OPCODE_STR_SP;
11391
11392 inst.instruction |= inst.operands[0].reg << 8;
11393 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
11394 return;
11395 }
11396
11397 constraint (inst.operands[1].reg > 7, BAD_HIREG);
11398 if (!inst.operands[1].immisreg)
11399 {
11400 /* Immediate offset. */
11401 inst.instruction |= inst.operands[0].reg;
11402 inst.instruction |= inst.operands[1].reg << 3;
11403 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
11404 return;
11405 }
11406
11407 /* Register offset. */
11408 constraint (inst.operands[1].imm > 7, BAD_HIREG);
11409 constraint (inst.operands[1].negative,
11410 _("Thumb does not support this addressing mode"));
11411
11412 op16:
11413 switch (inst.instruction)
11414 {
11415 case T_OPCODE_STR_IW: inst.instruction = T_OPCODE_STR_RW; break;
11416 case T_OPCODE_STR_IH: inst.instruction = T_OPCODE_STR_RH; break;
11417 case T_OPCODE_STR_IB: inst.instruction = T_OPCODE_STR_RB; break;
11418 case T_OPCODE_LDR_IW: inst.instruction = T_OPCODE_LDR_RW; break;
11419 case T_OPCODE_LDR_IH: inst.instruction = T_OPCODE_LDR_RH; break;
11420 case T_OPCODE_LDR_IB: inst.instruction = T_OPCODE_LDR_RB; break;
11421 case 0x5600 /* ldrsb */:
11422 case 0x5e00 /* ldrsh */: break;
11423 default: abort ();
11424 }
11425
11426 inst.instruction |= inst.operands[0].reg;
11427 inst.instruction |= inst.operands[1].reg << 3;
11428 inst.instruction |= inst.operands[1].imm << 6;
11429 }
11430
11431 static void
11432 do_t_ldstd (void)
11433 {
11434 if (!inst.operands[1].present)
11435 {
11436 inst.operands[1].reg = inst.operands[0].reg + 1;
11437 constraint (inst.operands[0].reg == REG_LR,
11438 _("r14 not allowed here"));
11439 constraint (inst.operands[0].reg == REG_R12,
11440 _("r12 not allowed here"));
11441 }
11442
11443 if (inst.operands[2].writeback
11444 && (inst.operands[0].reg == inst.operands[2].reg
11445 || inst.operands[1].reg == inst.operands[2].reg))
11446 as_warn (_("base register written back, and overlaps "
11447 "one of transfer registers"));
11448
11449 inst.instruction |= inst.operands[0].reg << 12;
11450 inst.instruction |= inst.operands[1].reg << 8;
11451 encode_thumb32_addr_mode (2, /*is_t=*/FALSE, /*is_d=*/TRUE);
11452 }
11453
11454 static void
11455 do_t_ldstt (void)
11456 {
11457 inst.instruction |= inst.operands[0].reg << 12;
11458 encode_thumb32_addr_mode (1, /*is_t=*/TRUE, /*is_d=*/FALSE);
11459 }
11460
11461 static void
11462 do_t_mla (void)
11463 {
11464 unsigned Rd, Rn, Rm, Ra;
11465
11466 Rd = inst.operands[0].reg;
11467 Rn = inst.operands[1].reg;
11468 Rm = inst.operands[2].reg;
11469 Ra = inst.operands[3].reg;
11470
11471 reject_bad_reg (Rd);
11472 reject_bad_reg (Rn);
11473 reject_bad_reg (Rm);
11474 reject_bad_reg (Ra);
11475
11476 inst.instruction |= Rd << 8;
11477 inst.instruction |= Rn << 16;
11478 inst.instruction |= Rm;
11479 inst.instruction |= Ra << 12;
11480 }
11481
11482 static void
11483 do_t_mlal (void)
11484 {
11485 unsigned RdLo, RdHi, Rn, Rm;
11486
11487 RdLo = inst.operands[0].reg;
11488 RdHi = inst.operands[1].reg;
11489 Rn = inst.operands[2].reg;
11490 Rm = inst.operands[3].reg;
11491
11492 reject_bad_reg (RdLo);
11493 reject_bad_reg (RdHi);
11494 reject_bad_reg (Rn);
11495 reject_bad_reg (Rm);
11496
11497 inst.instruction |= RdLo << 12;
11498 inst.instruction |= RdHi << 8;
11499 inst.instruction |= Rn << 16;
11500 inst.instruction |= Rm;
11501 }
11502
11503 static void
11504 do_t_mov_cmp (void)
11505 {
11506 unsigned Rn, Rm;
11507
11508 Rn = inst.operands[0].reg;
11509 Rm = inst.operands[1].reg;
11510
11511 if (Rn == REG_PC)
11512 set_it_insn_type_last ();
11513
11514 if (unified_syntax)
11515 {
11516 int r0off = (inst.instruction == T_MNEM_mov
11517 || inst.instruction == T_MNEM_movs) ? 8 : 16;
11518 unsigned long opcode;
11519 bfd_boolean narrow;
11520 bfd_boolean low_regs;
11521
11522 low_regs = (Rn <= 7 && Rm <= 7);
11523 opcode = inst.instruction;
11524 if (in_it_block ())
11525 narrow = opcode != T_MNEM_movs;
11526 else
11527 narrow = opcode != T_MNEM_movs || low_regs;
11528 if (inst.size_req == 4
11529 || inst.operands[1].shifted)
11530 narrow = FALSE;
11531
11532 /* MOVS PC, LR is encoded as SUBS PC, LR, #0. */
11533 if (opcode == T_MNEM_movs && inst.operands[1].isreg
11534 && !inst.operands[1].shifted
11535 && Rn == REG_PC
11536 && Rm == REG_LR)
11537 {
11538 inst.instruction = T2_SUBS_PC_LR;
11539 return;
11540 }
11541
11542 if (opcode == T_MNEM_cmp)
11543 {
11544 constraint (Rn == REG_PC, BAD_PC);
11545 if (narrow)
11546 {
11547 /* In the Thumb-2 ISA, use of R13 as Rm is deprecated,
11548 but valid. */
11549 warn_deprecated_sp (Rm);
11550 /* R15 was documented as a valid choice for Rm in ARMv6,
11551 but as UNPREDICTABLE in ARMv7. ARM's proprietary
11552 tools reject R15, so we do too. */
11553 constraint (Rm == REG_PC, BAD_PC);
11554 }
11555 else
11556 reject_bad_reg (Rm);
11557 }
11558 else if (opcode == T_MNEM_mov
11559 || opcode == T_MNEM_movs)
11560 {
11561 if (inst.operands[1].isreg)
11562 {
11563 if (opcode == T_MNEM_movs)
11564 {
11565 reject_bad_reg (Rn);
11566 reject_bad_reg (Rm);
11567 }
11568 else if (narrow)
11569 {
11570 /* This is mov.n. */
11571 if ((Rn == REG_SP || Rn == REG_PC)
11572 && (Rm == REG_SP || Rm == REG_PC))
11573 {
11574 as_tsktsk (_("Use of r%u as a source register is "
11575 "deprecated when r%u is the destination "
11576 "register."), Rm, Rn);
11577 }
11578 }
11579 else
11580 {
11581 /* This is mov.w. */
11582 constraint (Rn == REG_PC, BAD_PC);
11583 constraint (Rm == REG_PC, BAD_PC);
11584 constraint (Rn == REG_SP && Rm == REG_SP, BAD_SP);
11585 }
11586 }
11587 else
11588 reject_bad_reg (Rn);
11589 }
11590
11591 if (!inst.operands[1].isreg)
11592 {
11593 /* Immediate operand. */
11594 if (!in_it_block () && opcode == T_MNEM_mov)
11595 narrow = 0;
11596 if (low_regs && narrow)
11597 {
11598 inst.instruction = THUMB_OP16 (opcode);
11599 inst.instruction |= Rn << 8;
11600 if (inst.size_req == 2)
11601 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
11602 else
11603 inst.relax = opcode;
11604 }
11605 else
11606 {
11607 inst.instruction = THUMB_OP32 (inst.instruction);
11608 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
11609 inst.instruction |= Rn << r0off;
11610 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
11611 }
11612 }
11613 else if (inst.operands[1].shifted && inst.operands[1].immisreg
11614 && (inst.instruction == T_MNEM_mov
11615 || inst.instruction == T_MNEM_movs))
11616 {
11617 /* Register shifts are encoded as separate shift instructions. */
11618 bfd_boolean flags = (inst.instruction == T_MNEM_movs);
11619
11620 if (in_it_block ())
11621 narrow = !flags;
11622 else
11623 narrow = flags;
11624
11625 if (inst.size_req == 4)
11626 narrow = FALSE;
11627
11628 if (!low_regs || inst.operands[1].imm > 7)
11629 narrow = FALSE;
11630
11631 if (Rn != Rm)
11632 narrow = FALSE;
11633
11634 switch (inst.operands[1].shift_kind)
11635 {
11636 case SHIFT_LSL:
11637 opcode = narrow ? T_OPCODE_LSL_R : THUMB_OP32 (T_MNEM_lsl);
11638 break;
11639 case SHIFT_ASR:
11640 opcode = narrow ? T_OPCODE_ASR_R : THUMB_OP32 (T_MNEM_asr);
11641 break;
11642 case SHIFT_LSR:
11643 opcode = narrow ? T_OPCODE_LSR_R : THUMB_OP32 (T_MNEM_lsr);
11644 break;
11645 case SHIFT_ROR:
11646 opcode = narrow ? T_OPCODE_ROR_R : THUMB_OP32 (T_MNEM_ror);
11647 break;
11648 default:
11649 abort ();
11650 }
11651
11652 inst.instruction = opcode;
11653 if (narrow)
11654 {
11655 inst.instruction |= Rn;
11656 inst.instruction |= inst.operands[1].imm << 3;
11657 }
11658 else
11659 {
11660 if (flags)
11661 inst.instruction |= CONDS_BIT;
11662
11663 inst.instruction |= Rn << 8;
11664 inst.instruction |= Rm << 16;
11665 inst.instruction |= inst.operands[1].imm;
11666 }
11667 }
11668 else if (!narrow)
11669 {
11670 /* Some mov with immediate shift have narrow variants.
11671 Register shifts are handled above. */
11672 if (low_regs && inst.operands[1].shifted
11673 && (inst.instruction == T_MNEM_mov
11674 || inst.instruction == T_MNEM_movs))
11675 {
11676 if (in_it_block ())
11677 narrow = (inst.instruction == T_MNEM_mov);
11678 else
11679 narrow = (inst.instruction == T_MNEM_movs);
11680 }
11681
11682 if (narrow)
11683 {
11684 switch (inst.operands[1].shift_kind)
11685 {
11686 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
11687 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
11688 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
11689 default: narrow = FALSE; break;
11690 }
11691 }
11692
11693 if (narrow)
11694 {
11695 inst.instruction |= Rn;
11696 inst.instruction |= Rm << 3;
11697 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
11698 }
11699 else
11700 {
11701 inst.instruction = THUMB_OP32 (inst.instruction);
11702 inst.instruction |= Rn << r0off;
11703 encode_thumb32_shifted_operand (1);
11704 }
11705 }
11706 else
11707 switch (inst.instruction)
11708 {
11709 case T_MNEM_mov:
11710 /* In v4t or v5t a move of two lowregs produces unpredictable
11711 results. Don't allow this. */
11712 if (low_regs)
11713 {
11714 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6),
11715 "MOV Rd, Rs with two low registers is not "
11716 "permitted on this architecture");
11717 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
11718 arm_ext_v6);
11719 }
11720
11721 inst.instruction = T_OPCODE_MOV_HR;
11722 inst.instruction |= (Rn & 0x8) << 4;
11723 inst.instruction |= (Rn & 0x7);
11724 inst.instruction |= Rm << 3;
11725 break;
11726
11727 case T_MNEM_movs:
11728 /* We know we have low registers at this point.
11729 Generate LSLS Rd, Rs, #0. */
11730 inst.instruction = T_OPCODE_LSL_I;
11731 inst.instruction |= Rn;
11732 inst.instruction |= Rm << 3;
11733 break;
11734
11735 case T_MNEM_cmp:
11736 if (low_regs)
11737 {
11738 inst.instruction = T_OPCODE_CMP_LR;
11739 inst.instruction |= Rn;
11740 inst.instruction |= Rm << 3;
11741 }
11742 else
11743 {
11744 inst.instruction = T_OPCODE_CMP_HR;
11745 inst.instruction |= (Rn & 0x8) << 4;
11746 inst.instruction |= (Rn & 0x7);
11747 inst.instruction |= Rm << 3;
11748 }
11749 break;
11750 }
11751 return;
11752 }
11753
11754 inst.instruction = THUMB_OP16 (inst.instruction);
11755
11756 /* PR 10443: Do not silently ignore shifted operands. */
11757 constraint (inst.operands[1].shifted,
11758 _("shifts in CMP/MOV instructions are only supported in unified syntax"));
11759
11760 if (inst.operands[1].isreg)
11761 {
11762 if (Rn < 8 && Rm < 8)
11763 {
11764 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
11765 since a MOV instruction produces unpredictable results. */
11766 if (inst.instruction == T_OPCODE_MOV_I8)
11767 inst.instruction = T_OPCODE_ADD_I3;
11768 else
11769 inst.instruction = T_OPCODE_CMP_LR;
11770
11771 inst.instruction |= Rn;
11772 inst.instruction |= Rm << 3;
11773 }
11774 else
11775 {
11776 if (inst.instruction == T_OPCODE_MOV_I8)
11777 inst.instruction = T_OPCODE_MOV_HR;
11778 else
11779 inst.instruction = T_OPCODE_CMP_HR;
11780 do_t_cpy ();
11781 }
11782 }
11783 else
11784 {
11785 constraint (Rn > 7,
11786 _("only lo regs allowed with immediate"));
11787 inst.instruction |= Rn << 8;
11788 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
11789 }
11790 }
11791
11792 static void
11793 do_t_mov16 (void)
11794 {
11795 unsigned Rd;
11796 bfd_vma imm;
11797 bfd_boolean top;
11798
11799 top = (inst.instruction & 0x00800000) != 0;
11800 if (inst.reloc.type == BFD_RELOC_ARM_MOVW)
11801 {
11802 constraint (top, _(":lower16: not allowed this instruction"));
11803 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVW;
11804 }
11805 else if (inst.reloc.type == BFD_RELOC_ARM_MOVT)
11806 {
11807 constraint (!top, _(":upper16: not allowed this instruction"));
11808 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVT;
11809 }
11810
11811 Rd = inst.operands[0].reg;
11812 reject_bad_reg (Rd);
11813
11814 inst.instruction |= Rd << 8;
11815 if (inst.reloc.type == BFD_RELOC_UNUSED)
11816 {
11817 imm = inst.reloc.exp.X_add_number;
11818 inst.instruction |= (imm & 0xf000) << 4;
11819 inst.instruction |= (imm & 0x0800) << 15;
11820 inst.instruction |= (imm & 0x0700) << 4;
11821 inst.instruction |= (imm & 0x00ff);
11822 }
11823 }
11824
11825 static void
11826 do_t_mvn_tst (void)
11827 {
11828 unsigned Rn, Rm;
11829
11830 Rn = inst.operands[0].reg;
11831 Rm = inst.operands[1].reg;
11832
11833 if (inst.instruction == T_MNEM_cmp
11834 || inst.instruction == T_MNEM_cmn)
11835 constraint (Rn == REG_PC, BAD_PC);
11836 else
11837 reject_bad_reg (Rn);
11838 reject_bad_reg (Rm);
11839
11840 if (unified_syntax)
11841 {
11842 int r0off = (inst.instruction == T_MNEM_mvn
11843 || inst.instruction == T_MNEM_mvns) ? 8 : 16;
11844 bfd_boolean narrow;
11845
11846 if (inst.size_req == 4
11847 || inst.instruction > 0xffff
11848 || inst.operands[1].shifted
11849 || Rn > 7 || Rm > 7)
11850 narrow = FALSE;
11851 else if (inst.instruction == T_MNEM_cmn
11852 || inst.instruction == T_MNEM_tst)
11853 narrow = TRUE;
11854 else if (THUMB_SETS_FLAGS (inst.instruction))
11855 narrow = !in_it_block ();
11856 else
11857 narrow = in_it_block ();
11858
11859 if (!inst.operands[1].isreg)
11860 {
11861 /* For an immediate, we always generate a 32-bit opcode;
11862 section relaxation will shrink it later if possible. */
11863 if (inst.instruction < 0xffff)
11864 inst.instruction = THUMB_OP32 (inst.instruction);
11865 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
11866 inst.instruction |= Rn << r0off;
11867 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
11868 }
11869 else
11870 {
11871 /* See if we can do this with a 16-bit instruction. */
11872 if (narrow)
11873 {
11874 inst.instruction = THUMB_OP16 (inst.instruction);
11875 inst.instruction |= Rn;
11876 inst.instruction |= Rm << 3;
11877 }
11878 else
11879 {
11880 constraint (inst.operands[1].shifted
11881 && inst.operands[1].immisreg,
11882 _("shift must be constant"));
11883 if (inst.instruction < 0xffff)
11884 inst.instruction = THUMB_OP32 (inst.instruction);
11885 inst.instruction |= Rn << r0off;
11886 encode_thumb32_shifted_operand (1);
11887 }
11888 }
11889 }
11890 else
11891 {
11892 constraint (inst.instruction > 0xffff
11893 || inst.instruction == T_MNEM_mvns, BAD_THUMB32);
11894 constraint (!inst.operands[1].isreg || inst.operands[1].shifted,
11895 _("unshifted register required"));
11896 constraint (Rn > 7 || Rm > 7,
11897 BAD_HIREG);
11898
11899 inst.instruction = THUMB_OP16 (inst.instruction);
11900 inst.instruction |= Rn;
11901 inst.instruction |= Rm << 3;
11902 }
11903 }
11904
11905 static void
11906 do_t_mrs (void)
11907 {
11908 unsigned Rd;
11909
11910 if (do_vfp_nsyn_mrs () == SUCCESS)
11911 return;
11912
11913 Rd = inst.operands[0].reg;
11914 reject_bad_reg (Rd);
11915 inst.instruction |= Rd << 8;
11916
11917 if (inst.operands[1].isreg)
11918 {
11919 unsigned br = inst.operands[1].reg;
11920 if (((br & 0x200) == 0) && ((br & 0xf000) != 0xf000))
11921 as_bad (_("bad register for mrs"));
11922
11923 inst.instruction |= br & (0xf << 16);
11924 inst.instruction |= (br & 0x300) >> 4;
11925 inst.instruction |= (br & SPSR_BIT) >> 2;
11926 }
11927 else
11928 {
11929 int flags = inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
11930
11931 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
11932 {
11933 /* PR gas/12698: The constraint is only applied for m_profile.
11934 If the user has specified -march=all, we want to ignore it as
11935 we are building for any CPU type, including non-m variants. */
11936 bfd_boolean m_profile =
11937 !ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any);
11938 constraint ((flags != 0) && m_profile, _("selected processor does "
11939 "not support requested special purpose register"));
11940 }
11941 else
11942 /* mrs only accepts APSR/CPSR/SPSR/CPSR_all/SPSR_all (for non-M profile
11943 devices). */
11944 constraint ((flags & ~SPSR_BIT) != (PSR_c|PSR_f),
11945 _("'APSR', 'CPSR' or 'SPSR' expected"));
11946
11947 inst.instruction |= (flags & SPSR_BIT) >> 2;
11948 inst.instruction |= inst.operands[1].imm & 0xff;
11949 inst.instruction |= 0xf0000;
11950 }
11951 }
11952
11953 static void
11954 do_t_msr (void)
11955 {
11956 int flags;
11957 unsigned Rn;
11958
11959 if (do_vfp_nsyn_msr () == SUCCESS)
11960 return;
11961
11962 constraint (!inst.operands[1].isreg,
11963 _("Thumb encoding does not support an immediate here"));
11964
11965 if (inst.operands[0].isreg)
11966 flags = (int)(inst.operands[0].reg);
11967 else
11968 flags = inst.operands[0].imm;
11969
11970 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
11971 {
11972 int bits = inst.operands[0].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
11973
11974 /* PR gas/12698: The constraint is only applied for m_profile.
11975 If the user has specified -march=all, we want to ignore it as
11976 we are building for any CPU type, including non-m variants. */
11977 bfd_boolean m_profile =
11978 !ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any);
11979 constraint (((ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
11980 && (bits & ~(PSR_s | PSR_f)) != 0)
11981 || (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
11982 && bits != PSR_f)) && m_profile,
11983 _("selected processor does not support requested special "
11984 "purpose register"));
11985 }
11986 else
11987 constraint ((flags & 0xff) != 0, _("selected processor does not support "
11988 "requested special purpose register"));
11989
11990 Rn = inst.operands[1].reg;
11991 reject_bad_reg (Rn);
11992
11993 inst.instruction |= (flags & SPSR_BIT) >> 2;
11994 inst.instruction |= (flags & 0xf0000) >> 8;
11995 inst.instruction |= (flags & 0x300) >> 4;
11996 inst.instruction |= (flags & 0xff);
11997 inst.instruction |= Rn << 16;
11998 }
11999
12000 static void
12001 do_t_mul (void)
12002 {
12003 bfd_boolean narrow;
12004 unsigned Rd, Rn, Rm;
12005
12006 if (!inst.operands[2].present)
12007 inst.operands[2].reg = inst.operands[0].reg;
12008
12009 Rd = inst.operands[0].reg;
12010 Rn = inst.operands[1].reg;
12011 Rm = inst.operands[2].reg;
12012
12013 if (unified_syntax)
12014 {
12015 if (inst.size_req == 4
12016 || (Rd != Rn
12017 && Rd != Rm)
12018 || Rn > 7
12019 || Rm > 7)
12020 narrow = FALSE;
12021 else if (inst.instruction == T_MNEM_muls)
12022 narrow = !in_it_block ();
12023 else
12024 narrow = in_it_block ();
12025 }
12026 else
12027 {
12028 constraint (inst.instruction == T_MNEM_muls, BAD_THUMB32);
12029 constraint (Rn > 7 || Rm > 7,
12030 BAD_HIREG);
12031 narrow = TRUE;
12032 }
12033
12034 if (narrow)
12035 {
12036 /* 16-bit MULS/Conditional MUL. */
12037 inst.instruction = THUMB_OP16 (inst.instruction);
12038 inst.instruction |= Rd;
12039
12040 if (Rd == Rn)
12041 inst.instruction |= Rm << 3;
12042 else if (Rd == Rm)
12043 inst.instruction |= Rn << 3;
12044 else
12045 constraint (1, _("dest must overlap one source register"));
12046 }
12047 else
12048 {
12049 constraint (inst.instruction != T_MNEM_mul,
12050 _("Thumb-2 MUL must not set flags"));
12051 /* 32-bit MUL. */
12052 inst.instruction = THUMB_OP32 (inst.instruction);
12053 inst.instruction |= Rd << 8;
12054 inst.instruction |= Rn << 16;
12055 inst.instruction |= Rm << 0;
12056
12057 reject_bad_reg (Rd);
12058 reject_bad_reg (Rn);
12059 reject_bad_reg (Rm);
12060 }
12061 }
12062
12063 static void
12064 do_t_mull (void)
12065 {
12066 unsigned RdLo, RdHi, Rn, Rm;
12067
12068 RdLo = inst.operands[0].reg;
12069 RdHi = inst.operands[1].reg;
12070 Rn = inst.operands[2].reg;
12071 Rm = inst.operands[3].reg;
12072
12073 reject_bad_reg (RdLo);
12074 reject_bad_reg (RdHi);
12075 reject_bad_reg (Rn);
12076 reject_bad_reg (Rm);
12077
12078 inst.instruction |= RdLo << 12;
12079 inst.instruction |= RdHi << 8;
12080 inst.instruction |= Rn << 16;
12081 inst.instruction |= Rm;
12082
12083 if (RdLo == RdHi)
12084 as_tsktsk (_("rdhi and rdlo must be different"));
12085 }
12086
12087 static void
12088 do_t_nop (void)
12089 {
12090 set_it_insn_type (NEUTRAL_IT_INSN);
12091
12092 if (unified_syntax)
12093 {
12094 if (inst.size_req == 4 || inst.operands[0].imm > 15)
12095 {
12096 inst.instruction = THUMB_OP32 (inst.instruction);
12097 inst.instruction |= inst.operands[0].imm;
12098 }
12099 else
12100 {
12101 /* PR9722: Check for Thumb2 availability before
12102 generating a thumb2 nop instruction. */
12103 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2))
12104 {
12105 inst.instruction = THUMB_OP16 (inst.instruction);
12106 inst.instruction |= inst.operands[0].imm << 4;
12107 }
12108 else
12109 inst.instruction = 0x46c0;
12110 }
12111 }
12112 else
12113 {
12114 constraint (inst.operands[0].present,
12115 _("Thumb does not support NOP with hints"));
12116 inst.instruction = 0x46c0;
12117 }
12118 }
12119
12120 static void
12121 do_t_neg (void)
12122 {
12123 if (unified_syntax)
12124 {
12125 bfd_boolean narrow;
12126
12127 if (THUMB_SETS_FLAGS (inst.instruction))
12128 narrow = !in_it_block ();
12129 else
12130 narrow = in_it_block ();
12131 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
12132 narrow = FALSE;
12133 if (inst.size_req == 4)
12134 narrow = FALSE;
12135
12136 if (!narrow)
12137 {
12138 inst.instruction = THUMB_OP32 (inst.instruction);
12139 inst.instruction |= inst.operands[0].reg << 8;
12140 inst.instruction |= inst.operands[1].reg << 16;
12141 }
12142 else
12143 {
12144 inst.instruction = THUMB_OP16 (inst.instruction);
12145 inst.instruction |= inst.operands[0].reg;
12146 inst.instruction |= inst.operands[1].reg << 3;
12147 }
12148 }
12149 else
12150 {
12151 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
12152 BAD_HIREG);
12153 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
12154
12155 inst.instruction = THUMB_OP16 (inst.instruction);
12156 inst.instruction |= inst.operands[0].reg;
12157 inst.instruction |= inst.operands[1].reg << 3;
12158 }
12159 }
12160
12161 static void
12162 do_t_orn (void)
12163 {
12164 unsigned Rd, Rn;
12165
12166 Rd = inst.operands[0].reg;
12167 Rn = inst.operands[1].present ? inst.operands[1].reg : Rd;
12168
12169 reject_bad_reg (Rd);
12170 /* Rn == REG_SP is unpredictable; Rn == REG_PC is MVN. */
12171 reject_bad_reg (Rn);
12172
12173 inst.instruction |= Rd << 8;
12174 inst.instruction |= Rn << 16;
12175
12176 if (!inst.operands[2].isreg)
12177 {
12178 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
12179 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
12180 }
12181 else
12182 {
12183 unsigned Rm;
12184
12185 Rm = inst.operands[2].reg;
12186 reject_bad_reg (Rm);
12187
12188 constraint (inst.operands[2].shifted
12189 && inst.operands[2].immisreg,
12190 _("shift must be constant"));
12191 encode_thumb32_shifted_operand (2);
12192 }
12193 }
12194
12195 static void
12196 do_t_pkhbt (void)
12197 {
12198 unsigned Rd, Rn, Rm;
12199
12200 Rd = inst.operands[0].reg;
12201 Rn = inst.operands[1].reg;
12202 Rm = inst.operands[2].reg;
12203
12204 reject_bad_reg (Rd);
12205 reject_bad_reg (Rn);
12206 reject_bad_reg (Rm);
12207
12208 inst.instruction |= Rd << 8;
12209 inst.instruction |= Rn << 16;
12210 inst.instruction |= Rm;
12211 if (inst.operands[3].present)
12212 {
12213 unsigned int val = inst.reloc.exp.X_add_number;
12214 constraint (inst.reloc.exp.X_op != O_constant,
12215 _("expression too complex"));
12216 inst.instruction |= (val & 0x1c) << 10;
12217 inst.instruction |= (val & 0x03) << 6;
12218 }
12219 }
12220
12221 static void
12222 do_t_pkhtb (void)
12223 {
12224 if (!inst.operands[3].present)
12225 {
12226 unsigned Rtmp;
12227
12228 inst.instruction &= ~0x00000020;
12229
12230 /* PR 10168. Swap the Rm and Rn registers. */
12231 Rtmp = inst.operands[1].reg;
12232 inst.operands[1].reg = inst.operands[2].reg;
12233 inst.operands[2].reg = Rtmp;
12234 }
12235 do_t_pkhbt ();
12236 }
12237
12238 static void
12239 do_t_pld (void)
12240 {
12241 if (inst.operands[0].immisreg)
12242 reject_bad_reg (inst.operands[0].imm);
12243
12244 encode_thumb32_addr_mode (0, /*is_t=*/FALSE, /*is_d=*/FALSE);
12245 }
12246
12247 static void
12248 do_t_push_pop (void)
12249 {
12250 unsigned mask;
12251
12252 constraint (inst.operands[0].writeback,
12253 _("push/pop do not support {reglist}^"));
12254 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
12255 _("expression too complex"));
12256
12257 mask = inst.operands[0].imm;
12258 if (inst.size_req != 4 && (mask & ~0xff) == 0)
12259 inst.instruction = THUMB_OP16 (inst.instruction) | mask;
12260 else if (inst.size_req != 4
12261 && (mask & ~0xff) == (1 << (inst.instruction == T_MNEM_push
12262 ? REG_LR : REG_PC)))
12263 {
12264 inst.instruction = THUMB_OP16 (inst.instruction);
12265 inst.instruction |= THUMB_PP_PC_LR;
12266 inst.instruction |= mask & 0xff;
12267 }
12268 else if (unified_syntax)
12269 {
12270 inst.instruction = THUMB_OP32 (inst.instruction);
12271 encode_thumb2_ldmstm (13, mask, TRUE);
12272 }
12273 else
12274 {
12275 inst.error = _("invalid register list to push/pop instruction");
12276 return;
12277 }
12278 }
12279
12280 static void
12281 do_t_rbit (void)
12282 {
12283 unsigned Rd, Rm;
12284
12285 Rd = inst.operands[0].reg;
12286 Rm = inst.operands[1].reg;
12287
12288 reject_bad_reg (Rd);
12289 reject_bad_reg (Rm);
12290
12291 inst.instruction |= Rd << 8;
12292 inst.instruction |= Rm << 16;
12293 inst.instruction |= Rm;
12294 }
12295
12296 static void
12297 do_t_rev (void)
12298 {
12299 unsigned Rd, Rm;
12300
12301 Rd = inst.operands[0].reg;
12302 Rm = inst.operands[1].reg;
12303
12304 reject_bad_reg (Rd);
12305 reject_bad_reg (Rm);
12306
12307 if (Rd <= 7 && Rm <= 7
12308 && inst.size_req != 4)
12309 {
12310 inst.instruction = THUMB_OP16 (inst.instruction);
12311 inst.instruction |= Rd;
12312 inst.instruction |= Rm << 3;
12313 }
12314 else if (unified_syntax)
12315 {
12316 inst.instruction = THUMB_OP32 (inst.instruction);
12317 inst.instruction |= Rd << 8;
12318 inst.instruction |= Rm << 16;
12319 inst.instruction |= Rm;
12320 }
12321 else
12322 inst.error = BAD_HIREG;
12323 }
12324
12325 static void
12326 do_t_rrx (void)
12327 {
12328 unsigned Rd, Rm;
12329
12330 Rd = inst.operands[0].reg;
12331 Rm = inst.operands[1].reg;
12332
12333 reject_bad_reg (Rd);
12334 reject_bad_reg (Rm);
12335
12336 inst.instruction |= Rd << 8;
12337 inst.instruction |= Rm;
12338 }
12339
12340 static void
12341 do_t_rsb (void)
12342 {
12343 unsigned Rd, Rs;
12344
12345 Rd = inst.operands[0].reg;
12346 Rs = (inst.operands[1].present
12347 ? inst.operands[1].reg /* Rd, Rs, foo */
12348 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
12349
12350 reject_bad_reg (Rd);
12351 reject_bad_reg (Rs);
12352 if (inst.operands[2].isreg)
12353 reject_bad_reg (inst.operands[2].reg);
12354
12355 inst.instruction |= Rd << 8;
12356 inst.instruction |= Rs << 16;
12357 if (!inst.operands[2].isreg)
12358 {
12359 bfd_boolean narrow;
12360
12361 if ((inst.instruction & 0x00100000) != 0)
12362 narrow = !in_it_block ();
12363 else
12364 narrow = in_it_block ();
12365
12366 if (Rd > 7 || Rs > 7)
12367 narrow = FALSE;
12368
12369 if (inst.size_req == 4 || !unified_syntax)
12370 narrow = FALSE;
12371
12372 if (inst.reloc.exp.X_op != O_constant
12373 || inst.reloc.exp.X_add_number != 0)
12374 narrow = FALSE;
12375
12376 /* Turn rsb #0 into 16-bit neg. We should probably do this via
12377 relaxation, but it doesn't seem worth the hassle. */
12378 if (narrow)
12379 {
12380 inst.reloc.type = BFD_RELOC_UNUSED;
12381 inst.instruction = THUMB_OP16 (T_MNEM_negs);
12382 inst.instruction |= Rs << 3;
12383 inst.instruction |= Rd;
12384 }
12385 else
12386 {
12387 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
12388 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
12389 }
12390 }
12391 else
12392 encode_thumb32_shifted_operand (2);
12393 }
12394
12395 static void
12396 do_t_setend (void)
12397 {
12398 if (warn_on_deprecated
12399 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
12400 as_tsktsk (_("setend use is deprecated for ARMv8"));
12401
12402 set_it_insn_type (OUTSIDE_IT_INSN);
12403 if (inst.operands[0].imm)
12404 inst.instruction |= 0x8;
12405 }
12406
12407 static void
12408 do_t_shift (void)
12409 {
12410 if (!inst.operands[1].present)
12411 inst.operands[1].reg = inst.operands[0].reg;
12412
12413 if (unified_syntax)
12414 {
12415 bfd_boolean narrow;
12416 int shift_kind;
12417
12418 switch (inst.instruction)
12419 {
12420 case T_MNEM_asr:
12421 case T_MNEM_asrs: shift_kind = SHIFT_ASR; break;
12422 case T_MNEM_lsl:
12423 case T_MNEM_lsls: shift_kind = SHIFT_LSL; break;
12424 case T_MNEM_lsr:
12425 case T_MNEM_lsrs: shift_kind = SHIFT_LSR; break;
12426 case T_MNEM_ror:
12427 case T_MNEM_rors: shift_kind = SHIFT_ROR; break;
12428 default: abort ();
12429 }
12430
12431 if (THUMB_SETS_FLAGS (inst.instruction))
12432 narrow = !in_it_block ();
12433 else
12434 narrow = in_it_block ();
12435 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
12436 narrow = FALSE;
12437 if (!inst.operands[2].isreg && shift_kind == SHIFT_ROR)
12438 narrow = FALSE;
12439 if (inst.operands[2].isreg
12440 && (inst.operands[1].reg != inst.operands[0].reg
12441 || inst.operands[2].reg > 7))
12442 narrow = FALSE;
12443 if (inst.size_req == 4)
12444 narrow = FALSE;
12445
12446 reject_bad_reg (inst.operands[0].reg);
12447 reject_bad_reg (inst.operands[1].reg);
12448
12449 if (!narrow)
12450 {
12451 if (inst.operands[2].isreg)
12452 {
12453 reject_bad_reg (inst.operands[2].reg);
12454 inst.instruction = THUMB_OP32 (inst.instruction);
12455 inst.instruction |= inst.operands[0].reg << 8;
12456 inst.instruction |= inst.operands[1].reg << 16;
12457 inst.instruction |= inst.operands[2].reg;
12458
12459 /* PR 12854: Error on extraneous shifts. */
12460 constraint (inst.operands[2].shifted,
12461 _("extraneous shift as part of operand to shift insn"));
12462 }
12463 else
12464 {
12465 inst.operands[1].shifted = 1;
12466 inst.operands[1].shift_kind = shift_kind;
12467 inst.instruction = THUMB_OP32 (THUMB_SETS_FLAGS (inst.instruction)
12468 ? T_MNEM_movs : T_MNEM_mov);
12469 inst.instruction |= inst.operands[0].reg << 8;
12470 encode_thumb32_shifted_operand (1);
12471 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
12472 inst.reloc.type = BFD_RELOC_UNUSED;
12473 }
12474 }
12475 else
12476 {
12477 if (inst.operands[2].isreg)
12478 {
12479 switch (shift_kind)
12480 {
12481 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_R; break;
12482 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_R; break;
12483 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_R; break;
12484 case SHIFT_ROR: inst.instruction = T_OPCODE_ROR_R; break;
12485 default: abort ();
12486 }
12487
12488 inst.instruction |= inst.operands[0].reg;
12489 inst.instruction |= inst.operands[2].reg << 3;
12490
12491 /* PR 12854: Error on extraneous shifts. */
12492 constraint (inst.operands[2].shifted,
12493 _("extraneous shift as part of operand to shift insn"));
12494 }
12495 else
12496 {
12497 switch (shift_kind)
12498 {
12499 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
12500 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
12501 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
12502 default: abort ();
12503 }
12504 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
12505 inst.instruction |= inst.operands[0].reg;
12506 inst.instruction |= inst.operands[1].reg << 3;
12507 }
12508 }
12509 }
12510 else
12511 {
12512 constraint (inst.operands[0].reg > 7
12513 || inst.operands[1].reg > 7, BAD_HIREG);
12514 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
12515
12516 if (inst.operands[2].isreg) /* Rd, {Rs,} Rn */
12517 {
12518 constraint (inst.operands[2].reg > 7, BAD_HIREG);
12519 constraint (inst.operands[0].reg != inst.operands[1].reg,
12520 _("source1 and dest must be same register"));
12521
12522 switch (inst.instruction)
12523 {
12524 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_R; break;
12525 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_R; break;
12526 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_R; break;
12527 case T_MNEM_ror: inst.instruction = T_OPCODE_ROR_R; break;
12528 default: abort ();
12529 }
12530
12531 inst.instruction |= inst.operands[0].reg;
12532 inst.instruction |= inst.operands[2].reg << 3;
12533
12534 /* PR 12854: Error on extraneous shifts. */
12535 constraint (inst.operands[2].shifted,
12536 _("extraneous shift as part of operand to shift insn"));
12537 }
12538 else
12539 {
12540 switch (inst.instruction)
12541 {
12542 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_I; break;
12543 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_I; break;
12544 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_I; break;
12545 case T_MNEM_ror: inst.error = _("ror #imm not supported"); return;
12546 default: abort ();
12547 }
12548 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
12549 inst.instruction |= inst.operands[0].reg;
12550 inst.instruction |= inst.operands[1].reg << 3;
12551 }
12552 }
12553 }
12554
12555 static void
12556 do_t_simd (void)
12557 {
12558 unsigned Rd, Rn, Rm;
12559
12560 Rd = inst.operands[0].reg;
12561 Rn = inst.operands[1].reg;
12562 Rm = inst.operands[2].reg;
12563
12564 reject_bad_reg (Rd);
12565 reject_bad_reg (Rn);
12566 reject_bad_reg (Rm);
12567
12568 inst.instruction |= Rd << 8;
12569 inst.instruction |= Rn << 16;
12570 inst.instruction |= Rm;
12571 }
12572
12573 static void
12574 do_t_simd2 (void)
12575 {
12576 unsigned Rd, Rn, Rm;
12577
12578 Rd = inst.operands[0].reg;
12579 Rm = inst.operands[1].reg;
12580 Rn = inst.operands[2].reg;
12581
12582 reject_bad_reg (Rd);
12583 reject_bad_reg (Rn);
12584 reject_bad_reg (Rm);
12585
12586 inst.instruction |= Rd << 8;
12587 inst.instruction |= Rn << 16;
12588 inst.instruction |= Rm;
12589 }
12590
12591 static void
12592 do_t_smc (void)
12593 {
12594 unsigned int value = inst.reloc.exp.X_add_number;
12595 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a),
12596 _("SMC is not permitted on this architecture"));
12597 constraint (inst.reloc.exp.X_op != O_constant,
12598 _("expression too complex"));
12599 inst.reloc.type = BFD_RELOC_UNUSED;
12600 inst.instruction |= (value & 0xf000) >> 12;
12601 inst.instruction |= (value & 0x0ff0);
12602 inst.instruction |= (value & 0x000f) << 16;
12603 /* PR gas/15623: SMC instructions must be last in an IT block. */
12604 set_it_insn_type_last ();
12605 }
12606
12607 static void
12608 do_t_hvc (void)
12609 {
12610 unsigned int value = inst.reloc.exp.X_add_number;
12611
12612 inst.reloc.type = BFD_RELOC_UNUSED;
12613 inst.instruction |= (value & 0x0fff);
12614 inst.instruction |= (value & 0xf000) << 4;
12615 }
12616
12617 static void
12618 do_t_ssat_usat (int bias)
12619 {
12620 unsigned Rd, Rn;
12621
12622 Rd = inst.operands[0].reg;
12623 Rn = inst.operands[2].reg;
12624
12625 reject_bad_reg (Rd);
12626 reject_bad_reg (Rn);
12627
12628 inst.instruction |= Rd << 8;
12629 inst.instruction |= inst.operands[1].imm - bias;
12630 inst.instruction |= Rn << 16;
12631
12632 if (inst.operands[3].present)
12633 {
12634 offsetT shift_amount = inst.reloc.exp.X_add_number;
12635
12636 inst.reloc.type = BFD_RELOC_UNUSED;
12637
12638 constraint (inst.reloc.exp.X_op != O_constant,
12639 _("expression too complex"));
12640
12641 if (shift_amount != 0)
12642 {
12643 constraint (shift_amount > 31,
12644 _("shift expression is too large"));
12645
12646 if (inst.operands[3].shift_kind == SHIFT_ASR)
12647 inst.instruction |= 0x00200000; /* sh bit. */
12648
12649 inst.instruction |= (shift_amount & 0x1c) << 10;
12650 inst.instruction |= (shift_amount & 0x03) << 6;
12651 }
12652 }
12653 }
12654
12655 static void
12656 do_t_ssat (void)
12657 {
12658 do_t_ssat_usat (1);
12659 }
12660
12661 static void
12662 do_t_ssat16 (void)
12663 {
12664 unsigned Rd, Rn;
12665
12666 Rd = inst.operands[0].reg;
12667 Rn = inst.operands[2].reg;
12668
12669 reject_bad_reg (Rd);
12670 reject_bad_reg (Rn);
12671
12672 inst.instruction |= Rd << 8;
12673 inst.instruction |= inst.operands[1].imm - 1;
12674 inst.instruction |= Rn << 16;
12675 }
12676
12677 static void
12678 do_t_strex (void)
12679 {
12680 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
12681 || inst.operands[2].postind || inst.operands[2].writeback
12682 || inst.operands[2].immisreg || inst.operands[2].shifted
12683 || inst.operands[2].negative,
12684 BAD_ADDR_MODE);
12685
12686 constraint (inst.operands[2].reg == REG_PC, BAD_PC);
12687
12688 inst.instruction |= inst.operands[0].reg << 8;
12689 inst.instruction |= inst.operands[1].reg << 12;
12690 inst.instruction |= inst.operands[2].reg << 16;
12691 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
12692 }
12693
12694 static void
12695 do_t_strexd (void)
12696 {
12697 if (!inst.operands[2].present)
12698 inst.operands[2].reg = inst.operands[1].reg + 1;
12699
12700 constraint (inst.operands[0].reg == inst.operands[1].reg
12701 || inst.operands[0].reg == inst.operands[2].reg
12702 || inst.operands[0].reg == inst.operands[3].reg,
12703 BAD_OVERLAP);
12704
12705 inst.instruction |= inst.operands[0].reg;
12706 inst.instruction |= inst.operands[1].reg << 12;
12707 inst.instruction |= inst.operands[2].reg << 8;
12708 inst.instruction |= inst.operands[3].reg << 16;
12709 }
12710
12711 static void
12712 do_t_sxtah (void)
12713 {
12714 unsigned Rd, Rn, Rm;
12715
12716 Rd = inst.operands[0].reg;
12717 Rn = inst.operands[1].reg;
12718 Rm = inst.operands[2].reg;
12719
12720 reject_bad_reg (Rd);
12721 reject_bad_reg (Rn);
12722 reject_bad_reg (Rm);
12723
12724 inst.instruction |= Rd << 8;
12725 inst.instruction |= Rn << 16;
12726 inst.instruction |= Rm;
12727 inst.instruction |= inst.operands[3].imm << 4;
12728 }
12729
12730 static void
12731 do_t_sxth (void)
12732 {
12733 unsigned Rd, Rm;
12734
12735 Rd = inst.operands[0].reg;
12736 Rm = inst.operands[1].reg;
12737
12738 reject_bad_reg (Rd);
12739 reject_bad_reg (Rm);
12740
12741 if (inst.instruction <= 0xffff
12742 && inst.size_req != 4
12743 && Rd <= 7 && Rm <= 7
12744 && (!inst.operands[2].present || inst.operands[2].imm == 0))
12745 {
12746 inst.instruction = THUMB_OP16 (inst.instruction);
12747 inst.instruction |= Rd;
12748 inst.instruction |= Rm << 3;
12749 }
12750 else if (unified_syntax)
12751 {
12752 if (inst.instruction <= 0xffff)
12753 inst.instruction = THUMB_OP32 (inst.instruction);
12754 inst.instruction |= Rd << 8;
12755 inst.instruction |= Rm;
12756 inst.instruction |= inst.operands[2].imm << 4;
12757 }
12758 else
12759 {
12760 constraint (inst.operands[2].present && inst.operands[2].imm != 0,
12761 _("Thumb encoding does not support rotation"));
12762 constraint (1, BAD_HIREG);
12763 }
12764 }
12765
12766 static void
12767 do_t_swi (void)
12768 {
12769 /* We have to do the following check manually as ARM_EXT_OS only applies
12770 to ARM_EXT_V6M. */
12771 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6m))
12772 {
12773 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_os)
12774 /* This only applies to the v6m howver, not later architectures. */
12775 && ! ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7))
12776 as_bad (_("SVC is not permitted on this architecture"));
12777 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, arm_ext_os);
12778 }
12779
12780 inst.reloc.type = BFD_RELOC_ARM_SWI;
12781 }
12782
12783 static void
12784 do_t_tb (void)
12785 {
12786 unsigned Rn, Rm;
12787 int half;
12788
12789 half = (inst.instruction & 0x10) != 0;
12790 set_it_insn_type_last ();
12791 constraint (inst.operands[0].immisreg,
12792 _("instruction requires register index"));
12793
12794 Rn = inst.operands[0].reg;
12795 Rm = inst.operands[0].imm;
12796
12797 constraint (Rn == REG_SP, BAD_SP);
12798 reject_bad_reg (Rm);
12799
12800 constraint (!half && inst.operands[0].shifted,
12801 _("instruction does not allow shifted index"));
12802 inst.instruction |= (Rn << 16) | Rm;
12803 }
12804
12805 static void
12806 do_t_udf (void)
12807 {
12808 if (!inst.operands[0].present)
12809 inst.operands[0].imm = 0;
12810
12811 if ((unsigned int) inst.operands[0].imm > 255 || inst.size_req == 4)
12812 {
12813 constraint (inst.size_req == 2,
12814 _("immediate value out of range"));
12815 inst.instruction = THUMB_OP32 (inst.instruction);
12816 inst.instruction |= (inst.operands[0].imm & 0xf000u) << 4;
12817 inst.instruction |= (inst.operands[0].imm & 0x0fffu) << 0;
12818 }
12819 else
12820 {
12821 inst.instruction = THUMB_OP16 (inst.instruction);
12822 inst.instruction |= inst.operands[0].imm;
12823 }
12824
12825 set_it_insn_type (NEUTRAL_IT_INSN);
12826 }
12827
12828
12829 static void
12830 do_t_usat (void)
12831 {
12832 do_t_ssat_usat (0);
12833 }
12834
12835 static void
12836 do_t_usat16 (void)
12837 {
12838 unsigned Rd, Rn;
12839
12840 Rd = inst.operands[0].reg;
12841 Rn = inst.operands[2].reg;
12842
12843 reject_bad_reg (Rd);
12844 reject_bad_reg (Rn);
12845
12846 inst.instruction |= Rd << 8;
12847 inst.instruction |= inst.operands[1].imm;
12848 inst.instruction |= Rn << 16;
12849 }
12850
12851 /* Neon instruction encoder helpers. */
12852
12853 /* Encodings for the different types for various Neon opcodes. */
12854
12855 /* An "invalid" code for the following tables. */
12856 #define N_INV -1u
12857
12858 struct neon_tab_entry
12859 {
12860 unsigned integer;
12861 unsigned float_or_poly;
12862 unsigned scalar_or_imm;
12863 };
12864
12865 /* Map overloaded Neon opcodes to their respective encodings. */
12866 #define NEON_ENC_TAB \
12867 X(vabd, 0x0000700, 0x1200d00, N_INV), \
12868 X(vmax, 0x0000600, 0x0000f00, N_INV), \
12869 X(vmin, 0x0000610, 0x0200f00, N_INV), \
12870 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
12871 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
12872 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
12873 X(vadd, 0x0000800, 0x0000d00, N_INV), \
12874 X(vsub, 0x1000800, 0x0200d00, N_INV), \
12875 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
12876 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
12877 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
12878 /* Register variants of the following two instructions are encoded as
12879 vcge / vcgt with the operands reversed. */ \
12880 X(vclt, 0x0000300, 0x1200e00, 0x1b10200), \
12881 X(vcle, 0x0000310, 0x1000e00, 0x1b10180), \
12882 X(vfma, N_INV, 0x0000c10, N_INV), \
12883 X(vfms, N_INV, 0x0200c10, N_INV), \
12884 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
12885 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
12886 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
12887 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
12888 X(vmlal, 0x0800800, N_INV, 0x0800240), \
12889 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
12890 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
12891 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
12892 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
12893 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
12894 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
12895 X(vshl, 0x0000400, N_INV, 0x0800510), \
12896 X(vqshl, 0x0000410, N_INV, 0x0800710), \
12897 X(vand, 0x0000110, N_INV, 0x0800030), \
12898 X(vbic, 0x0100110, N_INV, 0x0800030), \
12899 X(veor, 0x1000110, N_INV, N_INV), \
12900 X(vorn, 0x0300110, N_INV, 0x0800010), \
12901 X(vorr, 0x0200110, N_INV, 0x0800010), \
12902 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
12903 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
12904 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
12905 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
12906 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
12907 X(vst1, 0x0000000, 0x0800000, N_INV), \
12908 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
12909 X(vst2, 0x0000100, 0x0800100, N_INV), \
12910 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
12911 X(vst3, 0x0000200, 0x0800200, N_INV), \
12912 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
12913 X(vst4, 0x0000300, 0x0800300, N_INV), \
12914 X(vmovn, 0x1b20200, N_INV, N_INV), \
12915 X(vtrn, 0x1b20080, N_INV, N_INV), \
12916 X(vqmovn, 0x1b20200, N_INV, N_INV), \
12917 X(vqmovun, 0x1b20240, N_INV, N_INV), \
12918 X(vnmul, 0xe200a40, 0xe200b40, N_INV), \
12919 X(vnmla, 0xe100a40, 0xe100b40, N_INV), \
12920 X(vnmls, 0xe100a00, 0xe100b00, N_INV), \
12921 X(vfnma, 0xe900a40, 0xe900b40, N_INV), \
12922 X(vfnms, 0xe900a00, 0xe900b00, N_INV), \
12923 X(vcmp, 0xeb40a40, 0xeb40b40, N_INV), \
12924 X(vcmpz, 0xeb50a40, 0xeb50b40, N_INV), \
12925 X(vcmpe, 0xeb40ac0, 0xeb40bc0, N_INV), \
12926 X(vcmpez, 0xeb50ac0, 0xeb50bc0, N_INV), \
12927 X(vseleq, 0xe000a00, N_INV, N_INV), \
12928 X(vselvs, 0xe100a00, N_INV, N_INV), \
12929 X(vselge, 0xe200a00, N_INV, N_INV), \
12930 X(vselgt, 0xe300a00, N_INV, N_INV), \
12931 X(vmaxnm, 0xe800a00, 0x3000f10, N_INV), \
12932 X(vminnm, 0xe800a40, 0x3200f10, N_INV), \
12933 X(vcvta, 0xebc0a40, 0x3bb0000, N_INV), \
12934 X(vrintr, 0xeb60a40, 0x3ba0400, N_INV), \
12935 X(vrinta, 0xeb80a40, 0x3ba0400, N_INV), \
12936 X(aes, 0x3b00300, N_INV, N_INV), \
12937 X(sha3op, 0x2000c00, N_INV, N_INV), \
12938 X(sha1h, 0x3b902c0, N_INV, N_INV), \
12939 X(sha2op, 0x3ba0380, N_INV, N_INV)
12940
12941 enum neon_opc
12942 {
12943 #define X(OPC,I,F,S) N_MNEM_##OPC
12944 NEON_ENC_TAB
12945 #undef X
12946 };
12947
12948 static const struct neon_tab_entry neon_enc_tab[] =
12949 {
12950 #define X(OPC,I,F,S) { (I), (F), (S) }
12951 NEON_ENC_TAB
12952 #undef X
12953 };
12954
12955 /* Do not use these macros; instead, use NEON_ENCODE defined below. */
12956 #define NEON_ENC_INTEGER_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
12957 #define NEON_ENC_ARMREG_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
12958 #define NEON_ENC_POLY_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
12959 #define NEON_ENC_FLOAT_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
12960 #define NEON_ENC_SCALAR_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
12961 #define NEON_ENC_IMMED_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
12962 #define NEON_ENC_INTERLV_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
12963 #define NEON_ENC_LANE_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
12964 #define NEON_ENC_DUP_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
12965 #define NEON_ENC_SINGLE_(X) \
12966 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
12967 #define NEON_ENC_DOUBLE_(X) \
12968 ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
12969 #define NEON_ENC_FPV8_(X) \
12970 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf000000))
12971
12972 #define NEON_ENCODE(type, inst) \
12973 do \
12974 { \
12975 inst.instruction = NEON_ENC_##type##_ (inst.instruction); \
12976 inst.is_neon = 1; \
12977 } \
12978 while (0)
12979
12980 #define check_neon_suffixes \
12981 do \
12982 { \
12983 if (!inst.error && inst.vectype.elems > 0 && !inst.is_neon) \
12984 { \
12985 as_bad (_("invalid neon suffix for non neon instruction")); \
12986 return; \
12987 } \
12988 } \
12989 while (0)
12990
12991 /* Define shapes for instruction operands. The following mnemonic characters
12992 are used in this table:
12993
12994 F - VFP S<n> register
12995 D - Neon D<n> register
12996 Q - Neon Q<n> register
12997 I - Immediate
12998 S - Scalar
12999 R - ARM register
13000 L - D<n> register list
13001
13002 This table is used to generate various data:
13003 - enumerations of the form NS_DDR to be used as arguments to
13004 neon_select_shape.
13005 - a table classifying shapes into single, double, quad, mixed.
13006 - a table used to drive neon_select_shape. */
13007
13008 #define NEON_SHAPE_DEF \
13009 X(3, (D, D, D), DOUBLE), \
13010 X(3, (Q, Q, Q), QUAD), \
13011 X(3, (D, D, I), DOUBLE), \
13012 X(3, (Q, Q, I), QUAD), \
13013 X(3, (D, D, S), DOUBLE), \
13014 X(3, (Q, Q, S), QUAD), \
13015 X(2, (D, D), DOUBLE), \
13016 X(2, (Q, Q), QUAD), \
13017 X(2, (D, S), DOUBLE), \
13018 X(2, (Q, S), QUAD), \
13019 X(2, (D, R), DOUBLE), \
13020 X(2, (Q, R), QUAD), \
13021 X(2, (D, I), DOUBLE), \
13022 X(2, (Q, I), QUAD), \
13023 X(3, (D, L, D), DOUBLE), \
13024 X(2, (D, Q), MIXED), \
13025 X(2, (Q, D), MIXED), \
13026 X(3, (D, Q, I), MIXED), \
13027 X(3, (Q, D, I), MIXED), \
13028 X(3, (Q, D, D), MIXED), \
13029 X(3, (D, Q, Q), MIXED), \
13030 X(3, (Q, Q, D), MIXED), \
13031 X(3, (Q, D, S), MIXED), \
13032 X(3, (D, Q, S), MIXED), \
13033 X(4, (D, D, D, I), DOUBLE), \
13034 X(4, (Q, Q, Q, I), QUAD), \
13035 X(2, (F, F), SINGLE), \
13036 X(3, (F, F, F), SINGLE), \
13037 X(2, (F, I), SINGLE), \
13038 X(2, (F, D), MIXED), \
13039 X(2, (D, F), MIXED), \
13040 X(3, (F, F, I), MIXED), \
13041 X(4, (R, R, F, F), SINGLE), \
13042 X(4, (F, F, R, R), SINGLE), \
13043 X(3, (D, R, R), DOUBLE), \
13044 X(3, (R, R, D), DOUBLE), \
13045 X(2, (S, R), SINGLE), \
13046 X(2, (R, S), SINGLE), \
13047 X(2, (F, R), SINGLE), \
13048 X(2, (R, F), SINGLE)
13049
13050 #define S2(A,B) NS_##A##B
13051 #define S3(A,B,C) NS_##A##B##C
13052 #define S4(A,B,C,D) NS_##A##B##C##D
13053
13054 #define X(N, L, C) S##N L
13055
13056 enum neon_shape
13057 {
13058 NEON_SHAPE_DEF,
13059 NS_NULL
13060 };
13061
13062 #undef X
13063 #undef S2
13064 #undef S3
13065 #undef S4
13066
13067 enum neon_shape_class
13068 {
13069 SC_SINGLE,
13070 SC_DOUBLE,
13071 SC_QUAD,
13072 SC_MIXED
13073 };
13074
13075 #define X(N, L, C) SC_##C
13076
13077 static enum neon_shape_class neon_shape_class[] =
13078 {
13079 NEON_SHAPE_DEF
13080 };
13081
13082 #undef X
13083
13084 enum neon_shape_el
13085 {
13086 SE_F,
13087 SE_D,
13088 SE_Q,
13089 SE_I,
13090 SE_S,
13091 SE_R,
13092 SE_L
13093 };
13094
13095 /* Register widths of above. */
13096 static unsigned neon_shape_el_size[] =
13097 {
13098 32,
13099 64,
13100 128,
13101 0,
13102 32,
13103 32,
13104 0
13105 };
13106
13107 struct neon_shape_info
13108 {
13109 unsigned els;
13110 enum neon_shape_el el[NEON_MAX_TYPE_ELS];
13111 };
13112
13113 #define S2(A,B) { SE_##A, SE_##B }
13114 #define S3(A,B,C) { SE_##A, SE_##B, SE_##C }
13115 #define S4(A,B,C,D) { SE_##A, SE_##B, SE_##C, SE_##D }
13116
13117 #define X(N, L, C) { N, S##N L }
13118
13119 static struct neon_shape_info neon_shape_tab[] =
13120 {
13121 NEON_SHAPE_DEF
13122 };
13123
13124 #undef X
13125 #undef S2
13126 #undef S3
13127 #undef S4
13128
13129 /* Bit masks used in type checking given instructions.
13130 'N_EQK' means the type must be the same as (or based on in some way) the key
13131 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
13132 set, various other bits can be set as well in order to modify the meaning of
13133 the type constraint. */
13134
13135 enum neon_type_mask
13136 {
13137 N_S8 = 0x0000001,
13138 N_S16 = 0x0000002,
13139 N_S32 = 0x0000004,
13140 N_S64 = 0x0000008,
13141 N_U8 = 0x0000010,
13142 N_U16 = 0x0000020,
13143 N_U32 = 0x0000040,
13144 N_U64 = 0x0000080,
13145 N_I8 = 0x0000100,
13146 N_I16 = 0x0000200,
13147 N_I32 = 0x0000400,
13148 N_I64 = 0x0000800,
13149 N_8 = 0x0001000,
13150 N_16 = 0x0002000,
13151 N_32 = 0x0004000,
13152 N_64 = 0x0008000,
13153 N_P8 = 0x0010000,
13154 N_P16 = 0x0020000,
13155 N_F16 = 0x0040000,
13156 N_F32 = 0x0080000,
13157 N_F64 = 0x0100000,
13158 N_P64 = 0x0200000,
13159 N_KEY = 0x1000000, /* Key element (main type specifier). */
13160 N_EQK = 0x2000000, /* Given operand has the same type & size as the key. */
13161 N_VFP = 0x4000000, /* VFP mode: operand size must match register width. */
13162 N_UNT = 0x8000000, /* Must be explicitly untyped. */
13163 N_DBL = 0x0000001, /* If N_EQK, this operand is twice the size. */
13164 N_HLF = 0x0000002, /* If N_EQK, this operand is half the size. */
13165 N_SGN = 0x0000004, /* If N_EQK, this operand is forced to be signed. */
13166 N_UNS = 0x0000008, /* If N_EQK, this operand is forced to be unsigned. */
13167 N_INT = 0x0000010, /* If N_EQK, this operand is forced to be integer. */
13168 N_FLT = 0x0000020, /* If N_EQK, this operand is forced to be float. */
13169 N_SIZ = 0x0000040, /* If N_EQK, this operand is forced to be size-only. */
13170 N_UTYP = 0,
13171 N_MAX_NONSPECIAL = N_P64
13172 };
13173
13174 #define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
13175
13176 #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
13177 #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
13178 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
13179 #define N_SUF_32 (N_SU_32 | N_F32)
13180 #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
13181 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F32)
13182
13183 /* Pass this as the first type argument to neon_check_type to ignore types
13184 altogether. */
13185 #define N_IGNORE_TYPE (N_KEY | N_EQK)
13186
13187 /* Select a "shape" for the current instruction (describing register types or
13188 sizes) from a list of alternatives. Return NS_NULL if the current instruction
13189 doesn't fit. For non-polymorphic shapes, checking is usually done as a
13190 function of operand parsing, so this function doesn't need to be called.
13191 Shapes should be listed in order of decreasing length. */
13192
13193 static enum neon_shape
13194 neon_select_shape (enum neon_shape shape, ...)
13195 {
13196 va_list ap;
13197 enum neon_shape first_shape = shape;
13198
13199 /* Fix missing optional operands. FIXME: we don't know at this point how
13200 many arguments we should have, so this makes the assumption that we have
13201 > 1. This is true of all current Neon opcodes, I think, but may not be
13202 true in the future. */
13203 if (!inst.operands[1].present)
13204 inst.operands[1] = inst.operands[0];
13205
13206 va_start (ap, shape);
13207
13208 for (; shape != NS_NULL; shape = (enum neon_shape) va_arg (ap, int))
13209 {
13210 unsigned j;
13211 int matches = 1;
13212
13213 for (j = 0; j < neon_shape_tab[shape].els; j++)
13214 {
13215 if (!inst.operands[j].present)
13216 {
13217 matches = 0;
13218 break;
13219 }
13220
13221 switch (neon_shape_tab[shape].el[j])
13222 {
13223 case SE_F:
13224 if (!(inst.operands[j].isreg
13225 && inst.operands[j].isvec
13226 && inst.operands[j].issingle
13227 && !inst.operands[j].isquad))
13228 matches = 0;
13229 break;
13230
13231 case SE_D:
13232 if (!(inst.operands[j].isreg
13233 && inst.operands[j].isvec
13234 && !inst.operands[j].isquad
13235 && !inst.operands[j].issingle))
13236 matches = 0;
13237 break;
13238
13239 case SE_R:
13240 if (!(inst.operands[j].isreg
13241 && !inst.operands[j].isvec))
13242 matches = 0;
13243 break;
13244
13245 case SE_Q:
13246 if (!(inst.operands[j].isreg
13247 && inst.operands[j].isvec
13248 && inst.operands[j].isquad
13249 && !inst.operands[j].issingle))
13250 matches = 0;
13251 break;
13252
13253 case SE_I:
13254 if (!(!inst.operands[j].isreg
13255 && !inst.operands[j].isscalar))
13256 matches = 0;
13257 break;
13258
13259 case SE_S:
13260 if (!(!inst.operands[j].isreg
13261 && inst.operands[j].isscalar))
13262 matches = 0;
13263 break;
13264
13265 case SE_L:
13266 break;
13267 }
13268 if (!matches)
13269 break;
13270 }
13271 if (matches && (j >= ARM_IT_MAX_OPERANDS || !inst.operands[j].present))
13272 /* We've matched all the entries in the shape table, and we don't
13273 have any left over operands which have not been matched. */
13274 break;
13275 }
13276
13277 va_end (ap);
13278
13279 if (shape == NS_NULL && first_shape != NS_NULL)
13280 first_error (_("invalid instruction shape"));
13281
13282 return shape;
13283 }
13284
13285 /* True if SHAPE is predominantly a quadword operation (most of the time, this
13286 means the Q bit should be set). */
13287
13288 static int
13289 neon_quad (enum neon_shape shape)
13290 {
13291 return neon_shape_class[shape] == SC_QUAD;
13292 }
13293
13294 static void
13295 neon_modify_type_size (unsigned typebits, enum neon_el_type *g_type,
13296 unsigned *g_size)
13297 {
13298 /* Allow modification to be made to types which are constrained to be
13299 based on the key element, based on bits set alongside N_EQK. */
13300 if ((typebits & N_EQK) != 0)
13301 {
13302 if ((typebits & N_HLF) != 0)
13303 *g_size /= 2;
13304 else if ((typebits & N_DBL) != 0)
13305 *g_size *= 2;
13306 if ((typebits & N_SGN) != 0)
13307 *g_type = NT_signed;
13308 else if ((typebits & N_UNS) != 0)
13309 *g_type = NT_unsigned;
13310 else if ((typebits & N_INT) != 0)
13311 *g_type = NT_integer;
13312 else if ((typebits & N_FLT) != 0)
13313 *g_type = NT_float;
13314 else if ((typebits & N_SIZ) != 0)
13315 *g_type = NT_untyped;
13316 }
13317 }
13318
13319 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
13320 operand type, i.e. the single type specified in a Neon instruction when it
13321 is the only one given. */
13322
13323 static struct neon_type_el
13324 neon_type_promote (struct neon_type_el *key, unsigned thisarg)
13325 {
13326 struct neon_type_el dest = *key;
13327
13328 gas_assert ((thisarg & N_EQK) != 0);
13329
13330 neon_modify_type_size (thisarg, &dest.type, &dest.size);
13331
13332 return dest;
13333 }
13334
13335 /* Convert Neon type and size into compact bitmask representation. */
13336
13337 static enum neon_type_mask
13338 type_chk_of_el_type (enum neon_el_type type, unsigned size)
13339 {
13340 switch (type)
13341 {
13342 case NT_untyped:
13343 switch (size)
13344 {
13345 case 8: return N_8;
13346 case 16: return N_16;
13347 case 32: return N_32;
13348 case 64: return N_64;
13349 default: ;
13350 }
13351 break;
13352
13353 case NT_integer:
13354 switch (size)
13355 {
13356 case 8: return N_I8;
13357 case 16: return N_I16;
13358 case 32: return N_I32;
13359 case 64: return N_I64;
13360 default: ;
13361 }
13362 break;
13363
13364 case NT_float:
13365 switch (size)
13366 {
13367 case 16: return N_F16;
13368 case 32: return N_F32;
13369 case 64: return N_F64;
13370 default: ;
13371 }
13372 break;
13373
13374 case NT_poly:
13375 switch (size)
13376 {
13377 case 8: return N_P8;
13378 case 16: return N_P16;
13379 case 64: return N_P64;
13380 default: ;
13381 }
13382 break;
13383
13384 case NT_signed:
13385 switch (size)
13386 {
13387 case 8: return N_S8;
13388 case 16: return N_S16;
13389 case 32: return N_S32;
13390 case 64: return N_S64;
13391 default: ;
13392 }
13393 break;
13394
13395 case NT_unsigned:
13396 switch (size)
13397 {
13398 case 8: return N_U8;
13399 case 16: return N_U16;
13400 case 32: return N_U32;
13401 case 64: return N_U64;
13402 default: ;
13403 }
13404 break;
13405
13406 default: ;
13407 }
13408
13409 return N_UTYP;
13410 }
13411
13412 /* Convert compact Neon bitmask type representation to a type and size. Only
13413 handles the case where a single bit is set in the mask. */
13414
13415 static int
13416 el_type_of_type_chk (enum neon_el_type *type, unsigned *size,
13417 enum neon_type_mask mask)
13418 {
13419 if ((mask & N_EQK) != 0)
13420 return FAIL;
13421
13422 if ((mask & (N_S8 | N_U8 | N_I8 | N_8 | N_P8)) != 0)
13423 *size = 8;
13424 else if ((mask & (N_S16 | N_U16 | N_I16 | N_16 | N_F16 | N_P16)) != 0)
13425 *size = 16;
13426 else if ((mask & (N_S32 | N_U32 | N_I32 | N_32 | N_F32)) != 0)
13427 *size = 32;
13428 else if ((mask & (N_S64 | N_U64 | N_I64 | N_64 | N_F64 | N_P64)) != 0)
13429 *size = 64;
13430 else
13431 return FAIL;
13432
13433 if ((mask & (N_S8 | N_S16 | N_S32 | N_S64)) != 0)
13434 *type = NT_signed;
13435 else if ((mask & (N_U8 | N_U16 | N_U32 | N_U64)) != 0)
13436 *type = NT_unsigned;
13437 else if ((mask & (N_I8 | N_I16 | N_I32 | N_I64)) != 0)
13438 *type = NT_integer;
13439 else if ((mask & (N_8 | N_16 | N_32 | N_64)) != 0)
13440 *type = NT_untyped;
13441 else if ((mask & (N_P8 | N_P16 | N_P64)) != 0)
13442 *type = NT_poly;
13443 else if ((mask & (N_F16 | N_F32 | N_F64)) != 0)
13444 *type = NT_float;
13445 else
13446 return FAIL;
13447
13448 return SUCCESS;
13449 }
13450
13451 /* Modify a bitmask of allowed types. This is only needed for type
13452 relaxation. */
13453
13454 static unsigned
13455 modify_types_allowed (unsigned allowed, unsigned mods)
13456 {
13457 unsigned size;
13458 enum neon_el_type type;
13459 unsigned destmask;
13460 int i;
13461
13462 destmask = 0;
13463
13464 for (i = 1; i <= N_MAX_NONSPECIAL; i <<= 1)
13465 {
13466 if (el_type_of_type_chk (&type, &size,
13467 (enum neon_type_mask) (allowed & i)) == SUCCESS)
13468 {
13469 neon_modify_type_size (mods, &type, &size);
13470 destmask |= type_chk_of_el_type (type, size);
13471 }
13472 }
13473
13474 return destmask;
13475 }
13476
13477 /* Check type and return type classification.
13478 The manual states (paraphrase): If one datatype is given, it indicates the
13479 type given in:
13480 - the second operand, if there is one
13481 - the operand, if there is no second operand
13482 - the result, if there are no operands.
13483 This isn't quite good enough though, so we use a concept of a "key" datatype
13484 which is set on a per-instruction basis, which is the one which matters when
13485 only one data type is written.
13486 Note: this function has side-effects (e.g. filling in missing operands). All
13487 Neon instructions should call it before performing bit encoding. */
13488
13489 static struct neon_type_el
13490 neon_check_type (unsigned els, enum neon_shape ns, ...)
13491 {
13492 va_list ap;
13493 unsigned i, pass, key_el = 0;
13494 unsigned types[NEON_MAX_TYPE_ELS];
13495 enum neon_el_type k_type = NT_invtype;
13496 unsigned k_size = -1u;
13497 struct neon_type_el badtype = {NT_invtype, -1};
13498 unsigned key_allowed = 0;
13499
13500 /* Optional registers in Neon instructions are always (not) in operand 1.
13501 Fill in the missing operand here, if it was omitted. */
13502 if (els > 1 && !inst.operands[1].present)
13503 inst.operands[1] = inst.operands[0];
13504
13505 /* Suck up all the varargs. */
13506 va_start (ap, ns);
13507 for (i = 0; i < els; i++)
13508 {
13509 unsigned thisarg = va_arg (ap, unsigned);
13510 if (thisarg == N_IGNORE_TYPE)
13511 {
13512 va_end (ap);
13513 return badtype;
13514 }
13515 types[i] = thisarg;
13516 if ((thisarg & N_KEY) != 0)
13517 key_el = i;
13518 }
13519 va_end (ap);
13520
13521 if (inst.vectype.elems > 0)
13522 for (i = 0; i < els; i++)
13523 if (inst.operands[i].vectype.type != NT_invtype)
13524 {
13525 first_error (_("types specified in both the mnemonic and operands"));
13526 return badtype;
13527 }
13528
13529 /* Duplicate inst.vectype elements here as necessary.
13530 FIXME: No idea if this is exactly the same as the ARM assembler,
13531 particularly when an insn takes one register and one non-register
13532 operand. */
13533 if (inst.vectype.elems == 1 && els > 1)
13534 {
13535 unsigned j;
13536 inst.vectype.elems = els;
13537 inst.vectype.el[key_el] = inst.vectype.el[0];
13538 for (j = 0; j < els; j++)
13539 if (j != key_el)
13540 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
13541 types[j]);
13542 }
13543 else if (inst.vectype.elems == 0 && els > 0)
13544 {
13545 unsigned j;
13546 /* No types were given after the mnemonic, so look for types specified
13547 after each operand. We allow some flexibility here; as long as the
13548 "key" operand has a type, we can infer the others. */
13549 for (j = 0; j < els; j++)
13550 if (inst.operands[j].vectype.type != NT_invtype)
13551 inst.vectype.el[j] = inst.operands[j].vectype;
13552
13553 if (inst.operands[key_el].vectype.type != NT_invtype)
13554 {
13555 for (j = 0; j < els; j++)
13556 if (inst.operands[j].vectype.type == NT_invtype)
13557 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
13558 types[j]);
13559 }
13560 else
13561 {
13562 first_error (_("operand types can't be inferred"));
13563 return badtype;
13564 }
13565 }
13566 else if (inst.vectype.elems != els)
13567 {
13568 first_error (_("type specifier has the wrong number of parts"));
13569 return badtype;
13570 }
13571
13572 for (pass = 0; pass < 2; pass++)
13573 {
13574 for (i = 0; i < els; i++)
13575 {
13576 unsigned thisarg = types[i];
13577 unsigned types_allowed = ((thisarg & N_EQK) != 0 && pass != 0)
13578 ? modify_types_allowed (key_allowed, thisarg) : thisarg;
13579 enum neon_el_type g_type = inst.vectype.el[i].type;
13580 unsigned g_size = inst.vectype.el[i].size;
13581
13582 /* Decay more-specific signed & unsigned types to sign-insensitive
13583 integer types if sign-specific variants are unavailable. */
13584 if ((g_type == NT_signed || g_type == NT_unsigned)
13585 && (types_allowed & N_SU_ALL) == 0)
13586 g_type = NT_integer;
13587
13588 /* If only untyped args are allowed, decay any more specific types to
13589 them. Some instructions only care about signs for some element
13590 sizes, so handle that properly. */
13591 if (((types_allowed & N_UNT) == 0)
13592 && ((g_size == 8 && (types_allowed & N_8) != 0)
13593 || (g_size == 16 && (types_allowed & N_16) != 0)
13594 || (g_size == 32 && (types_allowed & N_32) != 0)
13595 || (g_size == 64 && (types_allowed & N_64) != 0)))
13596 g_type = NT_untyped;
13597
13598 if (pass == 0)
13599 {
13600 if ((thisarg & N_KEY) != 0)
13601 {
13602 k_type = g_type;
13603 k_size = g_size;
13604 key_allowed = thisarg & ~N_KEY;
13605 }
13606 }
13607 else
13608 {
13609 if ((thisarg & N_VFP) != 0)
13610 {
13611 enum neon_shape_el regshape;
13612 unsigned regwidth, match;
13613
13614 /* PR 11136: Catch the case where we are passed a shape of NS_NULL. */
13615 if (ns == NS_NULL)
13616 {
13617 first_error (_("invalid instruction shape"));
13618 return badtype;
13619 }
13620 regshape = neon_shape_tab[ns].el[i];
13621 regwidth = neon_shape_el_size[regshape];
13622
13623 /* In VFP mode, operands must match register widths. If we
13624 have a key operand, use its width, else use the width of
13625 the current operand. */
13626 if (k_size != -1u)
13627 match = k_size;
13628 else
13629 match = g_size;
13630
13631 if (regwidth != match)
13632 {
13633 first_error (_("operand size must match register width"));
13634 return badtype;
13635 }
13636 }
13637
13638 if ((thisarg & N_EQK) == 0)
13639 {
13640 unsigned given_type = type_chk_of_el_type (g_type, g_size);
13641
13642 if ((given_type & types_allowed) == 0)
13643 {
13644 first_error (_("bad type in Neon instruction"));
13645 return badtype;
13646 }
13647 }
13648 else
13649 {
13650 enum neon_el_type mod_k_type = k_type;
13651 unsigned mod_k_size = k_size;
13652 neon_modify_type_size (thisarg, &mod_k_type, &mod_k_size);
13653 if (g_type != mod_k_type || g_size != mod_k_size)
13654 {
13655 first_error (_("inconsistent types in Neon instruction"));
13656 return badtype;
13657 }
13658 }
13659 }
13660 }
13661 }
13662
13663 return inst.vectype.el[key_el];
13664 }
13665
13666 /* Neon-style VFP instruction forwarding. */
13667
13668 /* Thumb VFP instructions have 0xE in the condition field. */
13669
13670 static void
13671 do_vfp_cond_or_thumb (void)
13672 {
13673 inst.is_neon = 1;
13674
13675 if (thumb_mode)
13676 inst.instruction |= 0xe0000000;
13677 else
13678 inst.instruction |= inst.cond << 28;
13679 }
13680
13681 /* Look up and encode a simple mnemonic, for use as a helper function for the
13682 Neon-style VFP syntax. This avoids duplication of bits of the insns table,
13683 etc. It is assumed that operand parsing has already been done, and that the
13684 operands are in the form expected by the given opcode (this isn't necessarily
13685 the same as the form in which they were parsed, hence some massaging must
13686 take place before this function is called).
13687 Checks current arch version against that in the looked-up opcode. */
13688
13689 static void
13690 do_vfp_nsyn_opcode (const char *opname)
13691 {
13692 const struct asm_opcode *opcode;
13693
13694 opcode = (const struct asm_opcode *) hash_find (arm_ops_hsh, opname);
13695
13696 if (!opcode)
13697 abort ();
13698
13699 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant,
13700 thumb_mode ? *opcode->tvariant : *opcode->avariant),
13701 _(BAD_FPU));
13702
13703 inst.is_neon = 1;
13704
13705 if (thumb_mode)
13706 {
13707 inst.instruction = opcode->tvalue;
13708 opcode->tencode ();
13709 }
13710 else
13711 {
13712 inst.instruction = (inst.cond << 28) | opcode->avalue;
13713 opcode->aencode ();
13714 }
13715 }
13716
13717 static void
13718 do_vfp_nsyn_add_sub (enum neon_shape rs)
13719 {
13720 int is_add = (inst.instruction & 0x0fffffff) == N_MNEM_vadd;
13721
13722 if (rs == NS_FFF)
13723 {
13724 if (is_add)
13725 do_vfp_nsyn_opcode ("fadds");
13726 else
13727 do_vfp_nsyn_opcode ("fsubs");
13728 }
13729 else
13730 {
13731 if (is_add)
13732 do_vfp_nsyn_opcode ("faddd");
13733 else
13734 do_vfp_nsyn_opcode ("fsubd");
13735 }
13736 }
13737
13738 /* Check operand types to see if this is a VFP instruction, and if so call
13739 PFN (). */
13740
13741 static int
13742 try_vfp_nsyn (int args, void (*pfn) (enum neon_shape))
13743 {
13744 enum neon_shape rs;
13745 struct neon_type_el et;
13746
13747 switch (args)
13748 {
13749 case 2:
13750 rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
13751 et = neon_check_type (2, rs,
13752 N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
13753 break;
13754
13755 case 3:
13756 rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
13757 et = neon_check_type (3, rs,
13758 N_EQK | N_VFP, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
13759 break;
13760
13761 default:
13762 abort ();
13763 }
13764
13765 if (et.type != NT_invtype)
13766 {
13767 pfn (rs);
13768 return SUCCESS;
13769 }
13770
13771 inst.error = NULL;
13772 return FAIL;
13773 }
13774
13775 static void
13776 do_vfp_nsyn_mla_mls (enum neon_shape rs)
13777 {
13778 int is_mla = (inst.instruction & 0x0fffffff) == N_MNEM_vmla;
13779
13780 if (rs == NS_FFF)
13781 {
13782 if (is_mla)
13783 do_vfp_nsyn_opcode ("fmacs");
13784 else
13785 do_vfp_nsyn_opcode ("fnmacs");
13786 }
13787 else
13788 {
13789 if (is_mla)
13790 do_vfp_nsyn_opcode ("fmacd");
13791 else
13792 do_vfp_nsyn_opcode ("fnmacd");
13793 }
13794 }
13795
13796 static void
13797 do_vfp_nsyn_fma_fms (enum neon_shape rs)
13798 {
13799 int is_fma = (inst.instruction & 0x0fffffff) == N_MNEM_vfma;
13800
13801 if (rs == NS_FFF)
13802 {
13803 if (is_fma)
13804 do_vfp_nsyn_opcode ("ffmas");
13805 else
13806 do_vfp_nsyn_opcode ("ffnmas");
13807 }
13808 else
13809 {
13810 if (is_fma)
13811 do_vfp_nsyn_opcode ("ffmad");
13812 else
13813 do_vfp_nsyn_opcode ("ffnmad");
13814 }
13815 }
13816
13817 static void
13818 do_vfp_nsyn_mul (enum neon_shape rs)
13819 {
13820 if (rs == NS_FFF)
13821 do_vfp_nsyn_opcode ("fmuls");
13822 else
13823 do_vfp_nsyn_opcode ("fmuld");
13824 }
13825
13826 static void
13827 do_vfp_nsyn_abs_neg (enum neon_shape rs)
13828 {
13829 int is_neg = (inst.instruction & 0x80) != 0;
13830 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_VFP | N_KEY);
13831
13832 if (rs == NS_FF)
13833 {
13834 if (is_neg)
13835 do_vfp_nsyn_opcode ("fnegs");
13836 else
13837 do_vfp_nsyn_opcode ("fabss");
13838 }
13839 else
13840 {
13841 if (is_neg)
13842 do_vfp_nsyn_opcode ("fnegd");
13843 else
13844 do_vfp_nsyn_opcode ("fabsd");
13845 }
13846 }
13847
13848 /* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
13849 insns belong to Neon, and are handled elsewhere. */
13850
13851 static void
13852 do_vfp_nsyn_ldm_stm (int is_dbmode)
13853 {
13854 int is_ldm = (inst.instruction & (1 << 20)) != 0;
13855 if (is_ldm)
13856 {
13857 if (is_dbmode)
13858 do_vfp_nsyn_opcode ("fldmdbs");
13859 else
13860 do_vfp_nsyn_opcode ("fldmias");
13861 }
13862 else
13863 {
13864 if (is_dbmode)
13865 do_vfp_nsyn_opcode ("fstmdbs");
13866 else
13867 do_vfp_nsyn_opcode ("fstmias");
13868 }
13869 }
13870
13871 static void
13872 do_vfp_nsyn_sqrt (void)
13873 {
13874 enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
13875 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
13876
13877 if (rs == NS_FF)
13878 do_vfp_nsyn_opcode ("fsqrts");
13879 else
13880 do_vfp_nsyn_opcode ("fsqrtd");
13881 }
13882
13883 static void
13884 do_vfp_nsyn_div (void)
13885 {
13886 enum neon_shape rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
13887 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
13888 N_F32 | N_F64 | N_KEY | N_VFP);
13889
13890 if (rs == NS_FFF)
13891 do_vfp_nsyn_opcode ("fdivs");
13892 else
13893 do_vfp_nsyn_opcode ("fdivd");
13894 }
13895
13896 static void
13897 do_vfp_nsyn_nmul (void)
13898 {
13899 enum neon_shape rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
13900 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
13901 N_F32 | N_F64 | N_KEY | N_VFP);
13902
13903 if (rs == NS_FFF)
13904 {
13905 NEON_ENCODE (SINGLE, inst);
13906 do_vfp_sp_dyadic ();
13907 }
13908 else
13909 {
13910 NEON_ENCODE (DOUBLE, inst);
13911 do_vfp_dp_rd_rn_rm ();
13912 }
13913 do_vfp_cond_or_thumb ();
13914 }
13915
13916 static void
13917 do_vfp_nsyn_cmp (void)
13918 {
13919 if (inst.operands[1].isreg)
13920 {
13921 enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
13922 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
13923
13924 if (rs == NS_FF)
13925 {
13926 NEON_ENCODE (SINGLE, inst);
13927 do_vfp_sp_monadic ();
13928 }
13929 else
13930 {
13931 NEON_ENCODE (DOUBLE, inst);
13932 do_vfp_dp_rd_rm ();
13933 }
13934 }
13935 else
13936 {
13937 enum neon_shape rs = neon_select_shape (NS_FI, NS_DI, NS_NULL);
13938 neon_check_type (2, rs, N_F32 | N_F64 | N_KEY | N_VFP, N_EQK);
13939
13940 switch (inst.instruction & 0x0fffffff)
13941 {
13942 case N_MNEM_vcmp:
13943 inst.instruction += N_MNEM_vcmpz - N_MNEM_vcmp;
13944 break;
13945 case N_MNEM_vcmpe:
13946 inst.instruction += N_MNEM_vcmpez - N_MNEM_vcmpe;
13947 break;
13948 default:
13949 abort ();
13950 }
13951
13952 if (rs == NS_FI)
13953 {
13954 NEON_ENCODE (SINGLE, inst);
13955 do_vfp_sp_compare_z ();
13956 }
13957 else
13958 {
13959 NEON_ENCODE (DOUBLE, inst);
13960 do_vfp_dp_rd ();
13961 }
13962 }
13963 do_vfp_cond_or_thumb ();
13964 }
13965
13966 static void
13967 nsyn_insert_sp (void)
13968 {
13969 inst.operands[1] = inst.operands[0];
13970 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
13971 inst.operands[0].reg = REG_SP;
13972 inst.operands[0].isreg = 1;
13973 inst.operands[0].writeback = 1;
13974 inst.operands[0].present = 1;
13975 }
13976
13977 static void
13978 do_vfp_nsyn_push (void)
13979 {
13980 nsyn_insert_sp ();
13981 if (inst.operands[1].issingle)
13982 do_vfp_nsyn_opcode ("fstmdbs");
13983 else
13984 do_vfp_nsyn_opcode ("fstmdbd");
13985 }
13986
13987 static void
13988 do_vfp_nsyn_pop (void)
13989 {
13990 nsyn_insert_sp ();
13991 if (inst.operands[1].issingle)
13992 do_vfp_nsyn_opcode ("fldmias");
13993 else
13994 do_vfp_nsyn_opcode ("fldmiad");
13995 }
13996
13997 /* Fix up Neon data-processing instructions, ORing in the correct bits for
13998 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
13999
14000 static void
14001 neon_dp_fixup (struct arm_it* insn)
14002 {
14003 unsigned int i = insn->instruction;
14004 insn->is_neon = 1;
14005
14006 if (thumb_mode)
14007 {
14008 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
14009 if (i & (1 << 24))
14010 i |= 1 << 28;
14011
14012 i &= ~(1 << 24);
14013
14014 i |= 0xef000000;
14015 }
14016 else
14017 i |= 0xf2000000;
14018
14019 insn->instruction = i;
14020 }
14021
14022 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
14023 (0, 1, 2, 3). */
14024
14025 static unsigned
14026 neon_logbits (unsigned x)
14027 {
14028 return ffs (x) - 4;
14029 }
14030
14031 #define LOW4(R) ((R) & 0xf)
14032 #define HI1(R) (((R) >> 4) & 1)
14033
14034 /* Encode insns with bit pattern:
14035
14036 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
14037 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
14038
14039 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
14040 different meaning for some instruction. */
14041
14042 static void
14043 neon_three_same (int isquad, int ubit, int size)
14044 {
14045 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14046 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14047 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
14048 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
14049 inst.instruction |= LOW4 (inst.operands[2].reg);
14050 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
14051 inst.instruction |= (isquad != 0) << 6;
14052 inst.instruction |= (ubit != 0) << 24;
14053 if (size != -1)
14054 inst.instruction |= neon_logbits (size) << 20;
14055
14056 neon_dp_fixup (&inst);
14057 }
14058
14059 /* Encode instructions of the form:
14060
14061 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
14062 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
14063
14064 Don't write size if SIZE == -1. */
14065
14066 static void
14067 neon_two_same (int qbit, int ubit, int size)
14068 {
14069 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14070 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14071 inst.instruction |= LOW4 (inst.operands[1].reg);
14072 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14073 inst.instruction |= (qbit != 0) << 6;
14074 inst.instruction |= (ubit != 0) << 24;
14075
14076 if (size != -1)
14077 inst.instruction |= neon_logbits (size) << 18;
14078
14079 neon_dp_fixup (&inst);
14080 }
14081
14082 /* Neon instruction encoders, in approximate order of appearance. */
14083
14084 static void
14085 do_neon_dyadic_i_su (void)
14086 {
14087 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14088 struct neon_type_el et = neon_check_type (3, rs,
14089 N_EQK, N_EQK, N_SU_32 | N_KEY);
14090 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14091 }
14092
14093 static void
14094 do_neon_dyadic_i64_su (void)
14095 {
14096 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14097 struct neon_type_el et = neon_check_type (3, rs,
14098 N_EQK, N_EQK, N_SU_ALL | N_KEY);
14099 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14100 }
14101
14102 static void
14103 neon_imm_shift (int write_ubit, int uval, int isquad, struct neon_type_el et,
14104 unsigned immbits)
14105 {
14106 unsigned size = et.size >> 3;
14107 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14108 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14109 inst.instruction |= LOW4 (inst.operands[1].reg);
14110 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14111 inst.instruction |= (isquad != 0) << 6;
14112 inst.instruction |= immbits << 16;
14113 inst.instruction |= (size >> 3) << 7;
14114 inst.instruction |= (size & 0x7) << 19;
14115 if (write_ubit)
14116 inst.instruction |= (uval != 0) << 24;
14117
14118 neon_dp_fixup (&inst);
14119 }
14120
14121 static void
14122 do_neon_shl_imm (void)
14123 {
14124 if (!inst.operands[2].isreg)
14125 {
14126 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14127 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_KEY | N_I_ALL);
14128 int imm = inst.operands[2].imm;
14129
14130 constraint (imm < 0 || (unsigned)imm >= et.size,
14131 _("immediate out of range for shift"));
14132 NEON_ENCODE (IMMED, inst);
14133 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
14134 }
14135 else
14136 {
14137 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14138 struct neon_type_el et = neon_check_type (3, rs,
14139 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
14140 unsigned int tmp;
14141
14142 /* VSHL/VQSHL 3-register variants have syntax such as:
14143 vshl.xx Dd, Dm, Dn
14144 whereas other 3-register operations encoded by neon_three_same have
14145 syntax like:
14146 vadd.xx Dd, Dn, Dm
14147 (i.e. with Dn & Dm reversed). Swap operands[1].reg and operands[2].reg
14148 here. */
14149 tmp = inst.operands[2].reg;
14150 inst.operands[2].reg = inst.operands[1].reg;
14151 inst.operands[1].reg = tmp;
14152 NEON_ENCODE (INTEGER, inst);
14153 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14154 }
14155 }
14156
14157 static void
14158 do_neon_qshl_imm (void)
14159 {
14160 if (!inst.operands[2].isreg)
14161 {
14162 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14163 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
14164 int imm = inst.operands[2].imm;
14165
14166 constraint (imm < 0 || (unsigned)imm >= et.size,
14167 _("immediate out of range for shift"));
14168 NEON_ENCODE (IMMED, inst);
14169 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et, imm);
14170 }
14171 else
14172 {
14173 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14174 struct neon_type_el et = neon_check_type (3, rs,
14175 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
14176 unsigned int tmp;
14177
14178 /* See note in do_neon_shl_imm. */
14179 tmp = inst.operands[2].reg;
14180 inst.operands[2].reg = inst.operands[1].reg;
14181 inst.operands[1].reg = tmp;
14182 NEON_ENCODE (INTEGER, inst);
14183 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14184 }
14185 }
14186
14187 static void
14188 do_neon_rshl (void)
14189 {
14190 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14191 struct neon_type_el et = neon_check_type (3, rs,
14192 N_EQK, N_EQK, N_SU_ALL | N_KEY);
14193 unsigned int tmp;
14194
14195 tmp = inst.operands[2].reg;
14196 inst.operands[2].reg = inst.operands[1].reg;
14197 inst.operands[1].reg = tmp;
14198 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14199 }
14200
14201 static int
14202 neon_cmode_for_logic_imm (unsigned immediate, unsigned *immbits, int size)
14203 {
14204 /* Handle .I8 pseudo-instructions. */
14205 if (size == 8)
14206 {
14207 /* Unfortunately, this will make everything apart from zero out-of-range.
14208 FIXME is this the intended semantics? There doesn't seem much point in
14209 accepting .I8 if so. */
14210 immediate |= immediate << 8;
14211 size = 16;
14212 }
14213
14214 if (size >= 32)
14215 {
14216 if (immediate == (immediate & 0x000000ff))
14217 {
14218 *immbits = immediate;
14219 return 0x1;
14220 }
14221 else if (immediate == (immediate & 0x0000ff00))
14222 {
14223 *immbits = immediate >> 8;
14224 return 0x3;
14225 }
14226 else if (immediate == (immediate & 0x00ff0000))
14227 {
14228 *immbits = immediate >> 16;
14229 return 0x5;
14230 }
14231 else if (immediate == (immediate & 0xff000000))
14232 {
14233 *immbits = immediate >> 24;
14234 return 0x7;
14235 }
14236 if ((immediate & 0xffff) != (immediate >> 16))
14237 goto bad_immediate;
14238 immediate &= 0xffff;
14239 }
14240
14241 if (immediate == (immediate & 0x000000ff))
14242 {
14243 *immbits = immediate;
14244 return 0x9;
14245 }
14246 else if (immediate == (immediate & 0x0000ff00))
14247 {
14248 *immbits = immediate >> 8;
14249 return 0xb;
14250 }
14251
14252 bad_immediate:
14253 first_error (_("immediate value out of range"));
14254 return FAIL;
14255 }
14256
14257 static void
14258 do_neon_logic (void)
14259 {
14260 if (inst.operands[2].present && inst.operands[2].isreg)
14261 {
14262 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14263 neon_check_type (3, rs, N_IGNORE_TYPE);
14264 /* U bit and size field were set as part of the bitmask. */
14265 NEON_ENCODE (INTEGER, inst);
14266 neon_three_same (neon_quad (rs), 0, -1);
14267 }
14268 else
14269 {
14270 const int three_ops_form = (inst.operands[2].present
14271 && !inst.operands[2].isreg);
14272 const int immoperand = (three_ops_form ? 2 : 1);
14273 enum neon_shape rs = (three_ops_form
14274 ? neon_select_shape (NS_DDI, NS_QQI, NS_NULL)
14275 : neon_select_shape (NS_DI, NS_QI, NS_NULL));
14276 struct neon_type_el et = neon_check_type (2, rs,
14277 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
14278 enum neon_opc opcode = (enum neon_opc) inst.instruction & 0x0fffffff;
14279 unsigned immbits;
14280 int cmode;
14281
14282 if (et.type == NT_invtype)
14283 return;
14284
14285 if (three_ops_form)
14286 constraint (inst.operands[0].reg != inst.operands[1].reg,
14287 _("first and second operands shall be the same register"));
14288
14289 NEON_ENCODE (IMMED, inst);
14290
14291 immbits = inst.operands[immoperand].imm;
14292 if (et.size == 64)
14293 {
14294 /* .i64 is a pseudo-op, so the immediate must be a repeating
14295 pattern. */
14296 if (immbits != (inst.operands[immoperand].regisimm ?
14297 inst.operands[immoperand].reg : 0))
14298 {
14299 /* Set immbits to an invalid constant. */
14300 immbits = 0xdeadbeef;
14301 }
14302 }
14303
14304 switch (opcode)
14305 {
14306 case N_MNEM_vbic:
14307 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14308 break;
14309
14310 case N_MNEM_vorr:
14311 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14312 break;
14313
14314 case N_MNEM_vand:
14315 /* Pseudo-instruction for VBIC. */
14316 neon_invert_size (&immbits, 0, et.size);
14317 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14318 break;
14319
14320 case N_MNEM_vorn:
14321 /* Pseudo-instruction for VORR. */
14322 neon_invert_size (&immbits, 0, et.size);
14323 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14324 break;
14325
14326 default:
14327 abort ();
14328 }
14329
14330 if (cmode == FAIL)
14331 return;
14332
14333 inst.instruction |= neon_quad (rs) << 6;
14334 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14335 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14336 inst.instruction |= cmode << 8;
14337 neon_write_immbits (immbits);
14338
14339 neon_dp_fixup (&inst);
14340 }
14341 }
14342
14343 static void
14344 do_neon_bitfield (void)
14345 {
14346 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14347 neon_check_type (3, rs, N_IGNORE_TYPE);
14348 neon_three_same (neon_quad (rs), 0, -1);
14349 }
14350
14351 static void
14352 neon_dyadic_misc (enum neon_el_type ubit_meaning, unsigned types,
14353 unsigned destbits)
14354 {
14355 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14356 struct neon_type_el et = neon_check_type (3, rs, N_EQK | destbits, N_EQK,
14357 types | N_KEY);
14358 if (et.type == NT_float)
14359 {
14360 NEON_ENCODE (FLOAT, inst);
14361 neon_three_same (neon_quad (rs), 0, -1);
14362 }
14363 else
14364 {
14365 NEON_ENCODE (INTEGER, inst);
14366 neon_three_same (neon_quad (rs), et.type == ubit_meaning, et.size);
14367 }
14368 }
14369
14370 static void
14371 do_neon_dyadic_if_su (void)
14372 {
14373 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
14374 }
14375
14376 static void
14377 do_neon_dyadic_if_su_d (void)
14378 {
14379 /* This version only allow D registers, but that constraint is enforced during
14380 operand parsing so we don't need to do anything extra here. */
14381 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
14382 }
14383
14384 static void
14385 do_neon_dyadic_if_i_d (void)
14386 {
14387 /* The "untyped" case can't happen. Do this to stop the "U" bit being
14388 affected if we specify unsigned args. */
14389 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
14390 }
14391
14392 enum vfp_or_neon_is_neon_bits
14393 {
14394 NEON_CHECK_CC = 1,
14395 NEON_CHECK_ARCH = 2,
14396 NEON_CHECK_ARCH8 = 4
14397 };
14398
14399 /* Call this function if an instruction which may have belonged to the VFP or
14400 Neon instruction sets, but turned out to be a Neon instruction (due to the
14401 operand types involved, etc.). We have to check and/or fix-up a couple of
14402 things:
14403
14404 - Make sure the user hasn't attempted to make a Neon instruction
14405 conditional.
14406 - Alter the value in the condition code field if necessary.
14407 - Make sure that the arch supports Neon instructions.
14408
14409 Which of these operations take place depends on bits from enum
14410 vfp_or_neon_is_neon_bits.
14411
14412 WARNING: This function has side effects! If NEON_CHECK_CC is used and the
14413 current instruction's condition is COND_ALWAYS, the condition field is
14414 changed to inst.uncond_value. This is necessary because instructions shared
14415 between VFP and Neon may be conditional for the VFP variants only, and the
14416 unconditional Neon version must have, e.g., 0xF in the condition field. */
14417
14418 static int
14419 vfp_or_neon_is_neon (unsigned check)
14420 {
14421 /* Conditions are always legal in Thumb mode (IT blocks). */
14422 if (!thumb_mode && (check & NEON_CHECK_CC))
14423 {
14424 if (inst.cond != COND_ALWAYS)
14425 {
14426 first_error (_(BAD_COND));
14427 return FAIL;
14428 }
14429 if (inst.uncond_value != -1)
14430 inst.instruction |= inst.uncond_value << 28;
14431 }
14432
14433 if ((check & NEON_CHECK_ARCH)
14434 && !mark_feature_used (&fpu_neon_ext_v1))
14435 {
14436 first_error (_(BAD_FPU));
14437 return FAIL;
14438 }
14439
14440 if ((check & NEON_CHECK_ARCH8)
14441 && !mark_feature_used (&fpu_neon_ext_armv8))
14442 {
14443 first_error (_(BAD_FPU));
14444 return FAIL;
14445 }
14446
14447 return SUCCESS;
14448 }
14449
14450 static void
14451 do_neon_addsub_if_i (void)
14452 {
14453 if (try_vfp_nsyn (3, do_vfp_nsyn_add_sub) == SUCCESS)
14454 return;
14455
14456 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14457 return;
14458
14459 /* The "untyped" case can't happen. Do this to stop the "U" bit being
14460 affected if we specify unsigned args. */
14461 neon_dyadic_misc (NT_untyped, N_IF_32 | N_I64, 0);
14462 }
14463
14464 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
14465 result to be:
14466 V<op> A,B (A is operand 0, B is operand 2)
14467 to mean:
14468 V<op> A,B,A
14469 not:
14470 V<op> A,B,B
14471 so handle that case specially. */
14472
14473 static void
14474 neon_exchange_operands (void)
14475 {
14476 void *scratch = alloca (sizeof (inst.operands[0]));
14477 if (inst.operands[1].present)
14478 {
14479 /* Swap operands[1] and operands[2]. */
14480 memcpy (scratch, &inst.operands[1], sizeof (inst.operands[0]));
14481 inst.operands[1] = inst.operands[2];
14482 memcpy (&inst.operands[2], scratch, sizeof (inst.operands[0]));
14483 }
14484 else
14485 {
14486 inst.operands[1] = inst.operands[2];
14487 inst.operands[2] = inst.operands[0];
14488 }
14489 }
14490
14491 static void
14492 neon_compare (unsigned regtypes, unsigned immtypes, int invert)
14493 {
14494 if (inst.operands[2].isreg)
14495 {
14496 if (invert)
14497 neon_exchange_operands ();
14498 neon_dyadic_misc (NT_unsigned, regtypes, N_SIZ);
14499 }
14500 else
14501 {
14502 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14503 struct neon_type_el et = neon_check_type (2, rs,
14504 N_EQK | N_SIZ, immtypes | N_KEY);
14505
14506 NEON_ENCODE (IMMED, inst);
14507 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14508 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14509 inst.instruction |= LOW4 (inst.operands[1].reg);
14510 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14511 inst.instruction |= neon_quad (rs) << 6;
14512 inst.instruction |= (et.type == NT_float) << 10;
14513 inst.instruction |= neon_logbits (et.size) << 18;
14514
14515 neon_dp_fixup (&inst);
14516 }
14517 }
14518
14519 static void
14520 do_neon_cmp (void)
14521 {
14522 neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, FALSE);
14523 }
14524
14525 static void
14526 do_neon_cmp_inv (void)
14527 {
14528 neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, TRUE);
14529 }
14530
14531 static void
14532 do_neon_ceq (void)
14533 {
14534 neon_compare (N_IF_32, N_IF_32, FALSE);
14535 }
14536
14537 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
14538 scalars, which are encoded in 5 bits, M : Rm.
14539 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
14540 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
14541 index in M. */
14542
14543 static unsigned
14544 neon_scalar_for_mul (unsigned scalar, unsigned elsize)
14545 {
14546 unsigned regno = NEON_SCALAR_REG (scalar);
14547 unsigned elno = NEON_SCALAR_INDEX (scalar);
14548
14549 switch (elsize)
14550 {
14551 case 16:
14552 if (regno > 7 || elno > 3)
14553 goto bad_scalar;
14554 return regno | (elno << 3);
14555
14556 case 32:
14557 if (regno > 15 || elno > 1)
14558 goto bad_scalar;
14559 return regno | (elno << 4);
14560
14561 default:
14562 bad_scalar:
14563 first_error (_("scalar out of range for multiply instruction"));
14564 }
14565
14566 return 0;
14567 }
14568
14569 /* Encode multiply / multiply-accumulate scalar instructions. */
14570
14571 static void
14572 neon_mul_mac (struct neon_type_el et, int ubit)
14573 {
14574 unsigned scalar;
14575
14576 /* Give a more helpful error message if we have an invalid type. */
14577 if (et.type == NT_invtype)
14578 return;
14579
14580 scalar = neon_scalar_for_mul (inst.operands[2].reg, et.size);
14581 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14582 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14583 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
14584 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
14585 inst.instruction |= LOW4 (scalar);
14586 inst.instruction |= HI1 (scalar) << 5;
14587 inst.instruction |= (et.type == NT_float) << 8;
14588 inst.instruction |= neon_logbits (et.size) << 20;
14589 inst.instruction |= (ubit != 0) << 24;
14590
14591 neon_dp_fixup (&inst);
14592 }
14593
14594 static void
14595 do_neon_mac_maybe_scalar (void)
14596 {
14597 if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls) == SUCCESS)
14598 return;
14599
14600 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14601 return;
14602
14603 if (inst.operands[2].isscalar)
14604 {
14605 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
14606 struct neon_type_el et = neon_check_type (3, rs,
14607 N_EQK, N_EQK, N_I16 | N_I32 | N_F32 | N_KEY);
14608 NEON_ENCODE (SCALAR, inst);
14609 neon_mul_mac (et, neon_quad (rs));
14610 }
14611 else
14612 {
14613 /* The "untyped" case can't happen. Do this to stop the "U" bit being
14614 affected if we specify unsigned args. */
14615 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
14616 }
14617 }
14618
14619 static void
14620 do_neon_fmac (void)
14621 {
14622 if (try_vfp_nsyn (3, do_vfp_nsyn_fma_fms) == SUCCESS)
14623 return;
14624
14625 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14626 return;
14627
14628 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
14629 }
14630
14631 static void
14632 do_neon_tst (void)
14633 {
14634 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14635 struct neon_type_el et = neon_check_type (3, rs,
14636 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_KEY);
14637 neon_three_same (neon_quad (rs), 0, et.size);
14638 }
14639
14640 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
14641 same types as the MAC equivalents. The polynomial type for this instruction
14642 is encoded the same as the integer type. */
14643
14644 static void
14645 do_neon_mul (void)
14646 {
14647 if (try_vfp_nsyn (3, do_vfp_nsyn_mul) == SUCCESS)
14648 return;
14649
14650 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14651 return;
14652
14653 if (inst.operands[2].isscalar)
14654 do_neon_mac_maybe_scalar ();
14655 else
14656 neon_dyadic_misc (NT_poly, N_I8 | N_I16 | N_I32 | N_F32 | N_P8, 0);
14657 }
14658
14659 static void
14660 do_neon_qdmulh (void)
14661 {
14662 if (inst.operands[2].isscalar)
14663 {
14664 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
14665 struct neon_type_el et = neon_check_type (3, rs,
14666 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
14667 NEON_ENCODE (SCALAR, inst);
14668 neon_mul_mac (et, neon_quad (rs));
14669 }
14670 else
14671 {
14672 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14673 struct neon_type_el et = neon_check_type (3, rs,
14674 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
14675 NEON_ENCODE (INTEGER, inst);
14676 /* The U bit (rounding) comes from bit mask. */
14677 neon_three_same (neon_quad (rs), 0, et.size);
14678 }
14679 }
14680
14681 static void
14682 do_neon_fcmp_absolute (void)
14683 {
14684 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14685 neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY);
14686 /* Size field comes from bit mask. */
14687 neon_three_same (neon_quad (rs), 1, -1);
14688 }
14689
14690 static void
14691 do_neon_fcmp_absolute_inv (void)
14692 {
14693 neon_exchange_operands ();
14694 do_neon_fcmp_absolute ();
14695 }
14696
14697 static void
14698 do_neon_step (void)
14699 {
14700 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14701 neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY);
14702 neon_three_same (neon_quad (rs), 0, -1);
14703 }
14704
14705 static void
14706 do_neon_abs_neg (void)
14707 {
14708 enum neon_shape rs;
14709 struct neon_type_el et;
14710
14711 if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg) == SUCCESS)
14712 return;
14713
14714 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14715 return;
14716
14717 rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
14718 et = neon_check_type (2, rs, N_EQK, N_S8 | N_S16 | N_S32 | N_F32 | N_KEY);
14719
14720 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14721 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14722 inst.instruction |= LOW4 (inst.operands[1].reg);
14723 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14724 inst.instruction |= neon_quad (rs) << 6;
14725 inst.instruction |= (et.type == NT_float) << 10;
14726 inst.instruction |= neon_logbits (et.size) << 18;
14727
14728 neon_dp_fixup (&inst);
14729 }
14730
14731 static void
14732 do_neon_sli (void)
14733 {
14734 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14735 struct neon_type_el et = neon_check_type (2, rs,
14736 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
14737 int imm = inst.operands[2].imm;
14738 constraint (imm < 0 || (unsigned)imm >= et.size,
14739 _("immediate out of range for insert"));
14740 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
14741 }
14742
14743 static void
14744 do_neon_sri (void)
14745 {
14746 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14747 struct neon_type_el et = neon_check_type (2, rs,
14748 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
14749 int imm = inst.operands[2].imm;
14750 constraint (imm < 1 || (unsigned)imm > et.size,
14751 _("immediate out of range for insert"));
14752 neon_imm_shift (FALSE, 0, neon_quad (rs), et, et.size - imm);
14753 }
14754
14755 static void
14756 do_neon_qshlu_imm (void)
14757 {
14758 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14759 struct neon_type_el et = neon_check_type (2, rs,
14760 N_EQK | N_UNS, N_S8 | N_S16 | N_S32 | N_S64 | N_KEY);
14761 int imm = inst.operands[2].imm;
14762 constraint (imm < 0 || (unsigned)imm >= et.size,
14763 _("immediate out of range for shift"));
14764 /* Only encodes the 'U present' variant of the instruction.
14765 In this case, signed types have OP (bit 8) set to 0.
14766 Unsigned types have OP set to 1. */
14767 inst.instruction |= (et.type == NT_unsigned) << 8;
14768 /* The rest of the bits are the same as other immediate shifts. */
14769 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
14770 }
14771
14772 static void
14773 do_neon_qmovn (void)
14774 {
14775 struct neon_type_el et = neon_check_type (2, NS_DQ,
14776 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
14777 /* Saturating move where operands can be signed or unsigned, and the
14778 destination has the same signedness. */
14779 NEON_ENCODE (INTEGER, inst);
14780 if (et.type == NT_unsigned)
14781 inst.instruction |= 0xc0;
14782 else
14783 inst.instruction |= 0x80;
14784 neon_two_same (0, 1, et.size / 2);
14785 }
14786
14787 static void
14788 do_neon_qmovun (void)
14789 {
14790 struct neon_type_el et = neon_check_type (2, NS_DQ,
14791 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
14792 /* Saturating move with unsigned results. Operands must be signed. */
14793 NEON_ENCODE (INTEGER, inst);
14794 neon_two_same (0, 1, et.size / 2);
14795 }
14796
14797 static void
14798 do_neon_rshift_sat_narrow (void)
14799 {
14800 /* FIXME: Types for narrowing. If operands are signed, results can be signed
14801 or unsigned. If operands are unsigned, results must also be unsigned. */
14802 struct neon_type_el et = neon_check_type (2, NS_DQI,
14803 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
14804 int imm = inst.operands[2].imm;
14805 /* This gets the bounds check, size encoding and immediate bits calculation
14806 right. */
14807 et.size /= 2;
14808
14809 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
14810 VQMOVN.I<size> <Dd>, <Qm>. */
14811 if (imm == 0)
14812 {
14813 inst.operands[2].present = 0;
14814 inst.instruction = N_MNEM_vqmovn;
14815 do_neon_qmovn ();
14816 return;
14817 }
14818
14819 constraint (imm < 1 || (unsigned)imm > et.size,
14820 _("immediate out of range"));
14821 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, et.size - imm);
14822 }
14823
14824 static void
14825 do_neon_rshift_sat_narrow_u (void)
14826 {
14827 /* FIXME: Types for narrowing. If operands are signed, results can be signed
14828 or unsigned. If operands are unsigned, results must also be unsigned. */
14829 struct neon_type_el et = neon_check_type (2, NS_DQI,
14830 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
14831 int imm = inst.operands[2].imm;
14832 /* This gets the bounds check, size encoding and immediate bits calculation
14833 right. */
14834 et.size /= 2;
14835
14836 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
14837 VQMOVUN.I<size> <Dd>, <Qm>. */
14838 if (imm == 0)
14839 {
14840 inst.operands[2].present = 0;
14841 inst.instruction = N_MNEM_vqmovun;
14842 do_neon_qmovun ();
14843 return;
14844 }
14845
14846 constraint (imm < 1 || (unsigned)imm > et.size,
14847 _("immediate out of range"));
14848 /* FIXME: The manual is kind of unclear about what value U should have in
14849 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
14850 must be 1. */
14851 neon_imm_shift (TRUE, 1, 0, et, et.size - imm);
14852 }
14853
14854 static void
14855 do_neon_movn (void)
14856 {
14857 struct neon_type_el et = neon_check_type (2, NS_DQ,
14858 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
14859 NEON_ENCODE (INTEGER, inst);
14860 neon_two_same (0, 1, et.size / 2);
14861 }
14862
14863 static void
14864 do_neon_rshift_narrow (void)
14865 {
14866 struct neon_type_el et = neon_check_type (2, NS_DQI,
14867 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
14868 int imm = inst.operands[2].imm;
14869 /* This gets the bounds check, size encoding and immediate bits calculation
14870 right. */
14871 et.size /= 2;
14872
14873 /* If immediate is zero then we are a pseudo-instruction for
14874 VMOVN.I<size> <Dd>, <Qm> */
14875 if (imm == 0)
14876 {
14877 inst.operands[2].present = 0;
14878 inst.instruction = N_MNEM_vmovn;
14879 do_neon_movn ();
14880 return;
14881 }
14882
14883 constraint (imm < 1 || (unsigned)imm > et.size,
14884 _("immediate out of range for narrowing operation"));
14885 neon_imm_shift (FALSE, 0, 0, et, et.size - imm);
14886 }
14887
14888 static void
14889 do_neon_shll (void)
14890 {
14891 /* FIXME: Type checking when lengthening. */
14892 struct neon_type_el et = neon_check_type (2, NS_QDI,
14893 N_EQK | N_DBL, N_I8 | N_I16 | N_I32 | N_KEY);
14894 unsigned imm = inst.operands[2].imm;
14895
14896 if (imm == et.size)
14897 {
14898 /* Maximum shift variant. */
14899 NEON_ENCODE (INTEGER, inst);
14900 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14901 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14902 inst.instruction |= LOW4 (inst.operands[1].reg);
14903 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14904 inst.instruction |= neon_logbits (et.size) << 18;
14905
14906 neon_dp_fixup (&inst);
14907 }
14908 else
14909 {
14910 /* A more-specific type check for non-max versions. */
14911 et = neon_check_type (2, NS_QDI,
14912 N_EQK | N_DBL, N_SU_32 | N_KEY);
14913 NEON_ENCODE (IMMED, inst);
14914 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, imm);
14915 }
14916 }
14917
14918 /* Check the various types for the VCVT instruction, and return which version
14919 the current instruction is. */
14920
14921 #define CVT_FLAVOUR_VAR \
14922 CVT_VAR (s32_f32, N_S32, N_F32, whole_reg, "ftosls", "ftosis", "ftosizs") \
14923 CVT_VAR (u32_f32, N_U32, N_F32, whole_reg, "ftouls", "ftouis", "ftouizs") \
14924 CVT_VAR (f32_s32, N_F32, N_S32, whole_reg, "fsltos", "fsitos", NULL) \
14925 CVT_VAR (f32_u32, N_F32, N_U32, whole_reg, "fultos", "fuitos", NULL) \
14926 /* Half-precision conversions. */ \
14927 CVT_VAR (f32_f16, N_F32, N_F16, whole_reg, NULL, NULL, NULL) \
14928 CVT_VAR (f16_f32, N_F16, N_F32, whole_reg, NULL, NULL, NULL) \
14929 /* VFP instructions. */ \
14930 CVT_VAR (f32_f64, N_F32, N_F64, N_VFP, NULL, "fcvtsd", NULL) \
14931 CVT_VAR (f64_f32, N_F64, N_F32, N_VFP, NULL, "fcvtds", NULL) \
14932 CVT_VAR (s32_f64, N_S32, N_F64 | key, N_VFP, "ftosld", "ftosid", "ftosizd") \
14933 CVT_VAR (u32_f64, N_U32, N_F64 | key, N_VFP, "ftould", "ftouid", "ftouizd") \
14934 CVT_VAR (f64_s32, N_F64 | key, N_S32, N_VFP, "fsltod", "fsitod", NULL) \
14935 CVT_VAR (f64_u32, N_F64 | key, N_U32, N_VFP, "fultod", "fuitod", NULL) \
14936 /* VFP instructions with bitshift. */ \
14937 CVT_VAR (f32_s16, N_F32 | key, N_S16, N_VFP, "fshtos", NULL, NULL) \
14938 CVT_VAR (f32_u16, N_F32 | key, N_U16, N_VFP, "fuhtos", NULL, NULL) \
14939 CVT_VAR (f64_s16, N_F64 | key, N_S16, N_VFP, "fshtod", NULL, NULL) \
14940 CVT_VAR (f64_u16, N_F64 | key, N_U16, N_VFP, "fuhtod", NULL, NULL) \
14941 CVT_VAR (s16_f32, N_S16, N_F32 | key, N_VFP, "ftoshs", NULL, NULL) \
14942 CVT_VAR (u16_f32, N_U16, N_F32 | key, N_VFP, "ftouhs", NULL, NULL) \
14943 CVT_VAR (s16_f64, N_S16, N_F64 | key, N_VFP, "ftoshd", NULL, NULL) \
14944 CVT_VAR (u16_f64, N_U16, N_F64 | key, N_VFP, "ftouhd", NULL, NULL)
14945
14946 #define CVT_VAR(C, X, Y, R, BSN, CN, ZN) \
14947 neon_cvt_flavour_##C,
14948
14949 /* The different types of conversions we can do. */
14950 enum neon_cvt_flavour
14951 {
14952 CVT_FLAVOUR_VAR
14953 neon_cvt_flavour_invalid,
14954 neon_cvt_flavour_first_fp = neon_cvt_flavour_f32_f64
14955 };
14956
14957 #undef CVT_VAR
14958
14959 static enum neon_cvt_flavour
14960 get_neon_cvt_flavour (enum neon_shape rs)
14961 {
14962 #define CVT_VAR(C,X,Y,R,BSN,CN,ZN) \
14963 et = neon_check_type (2, rs, (R) | (X), (R) | (Y)); \
14964 if (et.type != NT_invtype) \
14965 { \
14966 inst.error = NULL; \
14967 return (neon_cvt_flavour_##C); \
14968 }
14969
14970 struct neon_type_el et;
14971 unsigned whole_reg = (rs == NS_FFI || rs == NS_FD || rs == NS_DF
14972 || rs == NS_FF) ? N_VFP : 0;
14973 /* The instruction versions which take an immediate take one register
14974 argument, which is extended to the width of the full register. Thus the
14975 "source" and "destination" registers must have the same width. Hack that
14976 here by making the size equal to the key (wider, in this case) operand. */
14977 unsigned key = (rs == NS_QQI || rs == NS_DDI || rs == NS_FFI) ? N_KEY : 0;
14978
14979 CVT_FLAVOUR_VAR;
14980
14981 return neon_cvt_flavour_invalid;
14982 #undef CVT_VAR
14983 }
14984
14985 enum neon_cvt_mode
14986 {
14987 neon_cvt_mode_a,
14988 neon_cvt_mode_n,
14989 neon_cvt_mode_p,
14990 neon_cvt_mode_m,
14991 neon_cvt_mode_z,
14992 neon_cvt_mode_x,
14993 neon_cvt_mode_r
14994 };
14995
14996 /* Neon-syntax VFP conversions. */
14997
14998 static void
14999 do_vfp_nsyn_cvt (enum neon_shape rs, enum neon_cvt_flavour flavour)
15000 {
15001 const char *opname = 0;
15002
15003 if (rs == NS_DDI || rs == NS_QQI || rs == NS_FFI)
15004 {
15005 /* Conversions with immediate bitshift. */
15006 const char *enc[] =
15007 {
15008 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) BSN,
15009 CVT_FLAVOUR_VAR
15010 NULL
15011 #undef CVT_VAR
15012 };
15013
15014 if (flavour < (int) ARRAY_SIZE (enc))
15015 {
15016 opname = enc[flavour];
15017 constraint (inst.operands[0].reg != inst.operands[1].reg,
15018 _("operands 0 and 1 must be the same register"));
15019 inst.operands[1] = inst.operands[2];
15020 memset (&inst.operands[2], '\0', sizeof (inst.operands[2]));
15021 }
15022 }
15023 else
15024 {
15025 /* Conversions without bitshift. */
15026 const char *enc[] =
15027 {
15028 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) CN,
15029 CVT_FLAVOUR_VAR
15030 NULL
15031 #undef CVT_VAR
15032 };
15033
15034 if (flavour < (int) ARRAY_SIZE (enc))
15035 opname = enc[flavour];
15036 }
15037
15038 if (opname)
15039 do_vfp_nsyn_opcode (opname);
15040 }
15041
15042 static void
15043 do_vfp_nsyn_cvtz (void)
15044 {
15045 enum neon_shape rs = neon_select_shape (NS_FF, NS_FD, NS_NULL);
15046 enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
15047 const char *enc[] =
15048 {
15049 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) ZN,
15050 CVT_FLAVOUR_VAR
15051 NULL
15052 #undef CVT_VAR
15053 };
15054
15055 if (flavour < (int) ARRAY_SIZE (enc) && enc[flavour])
15056 do_vfp_nsyn_opcode (enc[flavour]);
15057 }
15058
15059 static void
15060 do_vfp_nsyn_cvt_fpv8 (enum neon_cvt_flavour flavour,
15061 enum neon_cvt_mode mode)
15062 {
15063 int sz, op;
15064 int rm;
15065
15066 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
15067 D register operands. */
15068 if (flavour == neon_cvt_flavour_s32_f64
15069 || flavour == neon_cvt_flavour_u32_f64)
15070 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
15071 _(BAD_FPU));
15072
15073 set_it_insn_type (OUTSIDE_IT_INSN);
15074
15075 switch (flavour)
15076 {
15077 case neon_cvt_flavour_s32_f64:
15078 sz = 1;
15079 op = 1;
15080 break;
15081 case neon_cvt_flavour_s32_f32:
15082 sz = 0;
15083 op = 1;
15084 break;
15085 case neon_cvt_flavour_u32_f64:
15086 sz = 1;
15087 op = 0;
15088 break;
15089 case neon_cvt_flavour_u32_f32:
15090 sz = 0;
15091 op = 0;
15092 break;
15093 default:
15094 first_error (_("invalid instruction shape"));
15095 return;
15096 }
15097
15098 switch (mode)
15099 {
15100 case neon_cvt_mode_a: rm = 0; break;
15101 case neon_cvt_mode_n: rm = 1; break;
15102 case neon_cvt_mode_p: rm = 2; break;
15103 case neon_cvt_mode_m: rm = 3; break;
15104 default: first_error (_("invalid rounding mode")); return;
15105 }
15106
15107 NEON_ENCODE (FPV8, inst);
15108 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
15109 encode_arm_vfp_reg (inst.operands[1].reg, sz == 1 ? VFP_REG_Dm : VFP_REG_Sm);
15110 inst.instruction |= sz << 8;
15111 inst.instruction |= op << 7;
15112 inst.instruction |= rm << 16;
15113 inst.instruction |= 0xf0000000;
15114 inst.is_neon = TRUE;
15115 }
15116
15117 static void
15118 do_neon_cvt_1 (enum neon_cvt_mode mode)
15119 {
15120 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_FFI, NS_DD, NS_QQ,
15121 NS_FD, NS_DF, NS_FF, NS_QD, NS_DQ, NS_NULL);
15122 enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
15123
15124 /* PR11109: Handle round-to-zero for VCVT conversions. */
15125 if (mode == neon_cvt_mode_z
15126 && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_vfp_v2)
15127 && (flavour == neon_cvt_flavour_s32_f32
15128 || flavour == neon_cvt_flavour_u32_f32
15129 || flavour == neon_cvt_flavour_s32_f64
15130 || flavour == neon_cvt_flavour_u32_f64)
15131 && (rs == NS_FD || rs == NS_FF))
15132 {
15133 do_vfp_nsyn_cvtz ();
15134 return;
15135 }
15136
15137 /* VFP rather than Neon conversions. */
15138 if (flavour >= neon_cvt_flavour_first_fp)
15139 {
15140 if (mode == neon_cvt_mode_x || mode == neon_cvt_mode_z)
15141 do_vfp_nsyn_cvt (rs, flavour);
15142 else
15143 do_vfp_nsyn_cvt_fpv8 (flavour, mode);
15144
15145 return;
15146 }
15147
15148 switch (rs)
15149 {
15150 case NS_DDI:
15151 case NS_QQI:
15152 {
15153 unsigned immbits;
15154 unsigned enctab[] = { 0x0000100, 0x1000100, 0x0, 0x1000000 };
15155
15156 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15157 return;
15158
15159 /* Fixed-point conversion with #0 immediate is encoded as an
15160 integer conversion. */
15161 if (inst.operands[2].present && inst.operands[2].imm == 0)
15162 goto int_encode;
15163 immbits = 32 - inst.operands[2].imm;
15164 NEON_ENCODE (IMMED, inst);
15165 if (flavour != neon_cvt_flavour_invalid)
15166 inst.instruction |= enctab[flavour];
15167 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15168 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15169 inst.instruction |= LOW4 (inst.operands[1].reg);
15170 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15171 inst.instruction |= neon_quad (rs) << 6;
15172 inst.instruction |= 1 << 21;
15173 inst.instruction |= immbits << 16;
15174
15175 neon_dp_fixup (&inst);
15176 }
15177 break;
15178
15179 case NS_DD:
15180 case NS_QQ:
15181 if (mode != neon_cvt_mode_x && mode != neon_cvt_mode_z)
15182 {
15183 NEON_ENCODE (FLOAT, inst);
15184 set_it_insn_type (OUTSIDE_IT_INSN);
15185
15186 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
15187 return;
15188
15189 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15190 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15191 inst.instruction |= LOW4 (inst.operands[1].reg);
15192 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15193 inst.instruction |= neon_quad (rs) << 6;
15194 inst.instruction |= (flavour == neon_cvt_flavour_u32_f32) << 7;
15195 inst.instruction |= mode << 8;
15196 if (thumb_mode)
15197 inst.instruction |= 0xfc000000;
15198 else
15199 inst.instruction |= 0xf0000000;
15200 }
15201 else
15202 {
15203 int_encode:
15204 {
15205 unsigned enctab[] = { 0x100, 0x180, 0x0, 0x080 };
15206
15207 NEON_ENCODE (INTEGER, inst);
15208
15209 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15210 return;
15211
15212 if (flavour != neon_cvt_flavour_invalid)
15213 inst.instruction |= enctab[flavour];
15214
15215 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15216 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15217 inst.instruction |= LOW4 (inst.operands[1].reg);
15218 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15219 inst.instruction |= neon_quad (rs) << 6;
15220 inst.instruction |= 2 << 18;
15221
15222 neon_dp_fixup (&inst);
15223 }
15224 }
15225 break;
15226
15227 /* Half-precision conversions for Advanced SIMD -- neon. */
15228 case NS_QD:
15229 case NS_DQ:
15230
15231 if ((rs == NS_DQ)
15232 && (inst.vectype.el[0].size != 16 || inst.vectype.el[1].size != 32))
15233 {
15234 as_bad (_("operand size must match register width"));
15235 break;
15236 }
15237
15238 if ((rs == NS_QD)
15239 && ((inst.vectype.el[0].size != 32 || inst.vectype.el[1].size != 16)))
15240 {
15241 as_bad (_("operand size must match register width"));
15242 break;
15243 }
15244
15245 if (rs == NS_DQ)
15246 inst.instruction = 0x3b60600;
15247 else
15248 inst.instruction = 0x3b60700;
15249
15250 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15251 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15252 inst.instruction |= LOW4 (inst.operands[1].reg);
15253 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15254 neon_dp_fixup (&inst);
15255 break;
15256
15257 default:
15258 /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32). */
15259 if (mode == neon_cvt_mode_x || mode == neon_cvt_mode_z)
15260 do_vfp_nsyn_cvt (rs, flavour);
15261 else
15262 do_vfp_nsyn_cvt_fpv8 (flavour, mode);
15263 }
15264 }
15265
15266 static void
15267 do_neon_cvtr (void)
15268 {
15269 do_neon_cvt_1 (neon_cvt_mode_x);
15270 }
15271
15272 static void
15273 do_neon_cvt (void)
15274 {
15275 do_neon_cvt_1 (neon_cvt_mode_z);
15276 }
15277
15278 static void
15279 do_neon_cvta (void)
15280 {
15281 do_neon_cvt_1 (neon_cvt_mode_a);
15282 }
15283
15284 static void
15285 do_neon_cvtn (void)
15286 {
15287 do_neon_cvt_1 (neon_cvt_mode_n);
15288 }
15289
15290 static void
15291 do_neon_cvtp (void)
15292 {
15293 do_neon_cvt_1 (neon_cvt_mode_p);
15294 }
15295
15296 static void
15297 do_neon_cvtm (void)
15298 {
15299 do_neon_cvt_1 (neon_cvt_mode_m);
15300 }
15301
15302 static void
15303 do_neon_cvttb_2 (bfd_boolean t, bfd_boolean to, bfd_boolean is_double)
15304 {
15305 if (is_double)
15306 mark_feature_used (&fpu_vfp_ext_armv8);
15307
15308 encode_arm_vfp_reg (inst.operands[0].reg,
15309 (is_double && !to) ? VFP_REG_Dd : VFP_REG_Sd);
15310 encode_arm_vfp_reg (inst.operands[1].reg,
15311 (is_double && to) ? VFP_REG_Dm : VFP_REG_Sm);
15312 inst.instruction |= to ? 0x10000 : 0;
15313 inst.instruction |= t ? 0x80 : 0;
15314 inst.instruction |= is_double ? 0x100 : 0;
15315 do_vfp_cond_or_thumb ();
15316 }
15317
15318 static void
15319 do_neon_cvttb_1 (bfd_boolean t)
15320 {
15321 enum neon_shape rs = neon_select_shape (NS_FF, NS_FD, NS_DF, NS_NULL);
15322
15323 if (rs == NS_NULL)
15324 return;
15325 else if (neon_check_type (2, rs, N_F16, N_F32 | N_VFP).type != NT_invtype)
15326 {
15327 inst.error = NULL;
15328 do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/FALSE);
15329 }
15330 else if (neon_check_type (2, rs, N_F32 | N_VFP, N_F16).type != NT_invtype)
15331 {
15332 inst.error = NULL;
15333 do_neon_cvttb_2 (t, /*to=*/FALSE, /*is_double=*/FALSE);
15334 }
15335 else if (neon_check_type (2, rs, N_F16, N_F64 | N_VFP).type != NT_invtype)
15336 {
15337 /* The VCVTB and VCVTT instructions with D-register operands
15338 don't work for SP only targets. */
15339 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
15340 _(BAD_FPU));
15341
15342 inst.error = NULL;
15343 do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/TRUE);
15344 }
15345 else if (neon_check_type (2, rs, N_F64 | N_VFP, N_F16).type != NT_invtype)
15346 {
15347 /* The VCVTB and VCVTT instructions with D-register operands
15348 don't work for SP only targets. */
15349 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
15350 _(BAD_FPU));
15351
15352 inst.error = NULL;
15353 do_neon_cvttb_2 (t, /*to=*/FALSE, /*is_double=*/TRUE);
15354 }
15355 else
15356 return;
15357 }
15358
15359 static void
15360 do_neon_cvtb (void)
15361 {
15362 do_neon_cvttb_1 (FALSE);
15363 }
15364
15365
15366 static void
15367 do_neon_cvtt (void)
15368 {
15369 do_neon_cvttb_1 (TRUE);
15370 }
15371
15372 static void
15373 neon_move_immediate (void)
15374 {
15375 enum neon_shape rs = neon_select_shape (NS_DI, NS_QI, NS_NULL);
15376 struct neon_type_el et = neon_check_type (2, rs,
15377 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
15378 unsigned immlo, immhi = 0, immbits;
15379 int op, cmode, float_p;
15380
15381 constraint (et.type == NT_invtype,
15382 _("operand size must be specified for immediate VMOV"));
15383
15384 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
15385 op = (inst.instruction & (1 << 5)) != 0;
15386
15387 immlo = inst.operands[1].imm;
15388 if (inst.operands[1].regisimm)
15389 immhi = inst.operands[1].reg;
15390
15391 constraint (et.size < 32 && (immlo & ~((1 << et.size) - 1)) != 0,
15392 _("immediate has bits set outside the operand size"));
15393
15394 float_p = inst.operands[1].immisfloat;
15395
15396 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits, &op,
15397 et.size, et.type)) == FAIL)
15398 {
15399 /* Invert relevant bits only. */
15400 neon_invert_size (&immlo, &immhi, et.size);
15401 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
15402 with one or the other; those cases are caught by
15403 neon_cmode_for_move_imm. */
15404 op = !op;
15405 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits,
15406 &op, et.size, et.type)) == FAIL)
15407 {
15408 first_error (_("immediate out of range"));
15409 return;
15410 }
15411 }
15412
15413 inst.instruction &= ~(1 << 5);
15414 inst.instruction |= op << 5;
15415
15416 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15417 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15418 inst.instruction |= neon_quad (rs) << 6;
15419 inst.instruction |= cmode << 8;
15420
15421 neon_write_immbits (immbits);
15422 }
15423
15424 static void
15425 do_neon_mvn (void)
15426 {
15427 if (inst.operands[1].isreg)
15428 {
15429 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15430
15431 NEON_ENCODE (INTEGER, inst);
15432 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15433 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15434 inst.instruction |= LOW4 (inst.operands[1].reg);
15435 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15436 inst.instruction |= neon_quad (rs) << 6;
15437 }
15438 else
15439 {
15440 NEON_ENCODE (IMMED, inst);
15441 neon_move_immediate ();
15442 }
15443
15444 neon_dp_fixup (&inst);
15445 }
15446
15447 /* Encode instructions of form:
15448
15449 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
15450 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm | */
15451
15452 static void
15453 neon_mixed_length (struct neon_type_el et, unsigned size)
15454 {
15455 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15456 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15457 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
15458 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
15459 inst.instruction |= LOW4 (inst.operands[2].reg);
15460 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
15461 inst.instruction |= (et.type == NT_unsigned) << 24;
15462 inst.instruction |= neon_logbits (size) << 20;
15463
15464 neon_dp_fixup (&inst);
15465 }
15466
15467 static void
15468 do_neon_dyadic_long (void)
15469 {
15470 /* FIXME: Type checking for lengthening op. */
15471 struct neon_type_el et = neon_check_type (3, NS_QDD,
15472 N_EQK | N_DBL, N_EQK, N_SU_32 | N_KEY);
15473 neon_mixed_length (et, et.size);
15474 }
15475
15476 static void
15477 do_neon_abal (void)
15478 {
15479 struct neon_type_el et = neon_check_type (3, NS_QDD,
15480 N_EQK | N_INT | N_DBL, N_EQK, N_SU_32 | N_KEY);
15481 neon_mixed_length (et, et.size);
15482 }
15483
15484 static void
15485 neon_mac_reg_scalar_long (unsigned regtypes, unsigned scalartypes)
15486 {
15487 if (inst.operands[2].isscalar)
15488 {
15489 struct neon_type_el et = neon_check_type (3, NS_QDS,
15490 N_EQK | N_DBL, N_EQK, regtypes | N_KEY);
15491 NEON_ENCODE (SCALAR, inst);
15492 neon_mul_mac (et, et.type == NT_unsigned);
15493 }
15494 else
15495 {
15496 struct neon_type_el et = neon_check_type (3, NS_QDD,
15497 N_EQK | N_DBL, N_EQK, scalartypes | N_KEY);
15498 NEON_ENCODE (INTEGER, inst);
15499 neon_mixed_length (et, et.size);
15500 }
15501 }
15502
15503 static void
15504 do_neon_mac_maybe_scalar_long (void)
15505 {
15506 neon_mac_reg_scalar_long (N_S16 | N_S32 | N_U16 | N_U32, N_SU_32);
15507 }
15508
15509 static void
15510 do_neon_dyadic_wide (void)
15511 {
15512 struct neon_type_el et = neon_check_type (3, NS_QQD,
15513 N_EQK | N_DBL, N_EQK | N_DBL, N_SU_32 | N_KEY);
15514 neon_mixed_length (et, et.size);
15515 }
15516
15517 static void
15518 do_neon_dyadic_narrow (void)
15519 {
15520 struct neon_type_el et = neon_check_type (3, NS_QDD,
15521 N_EQK | N_DBL, N_EQK, N_I16 | N_I32 | N_I64 | N_KEY);
15522 /* Operand sign is unimportant, and the U bit is part of the opcode,
15523 so force the operand type to integer. */
15524 et.type = NT_integer;
15525 neon_mixed_length (et, et.size / 2);
15526 }
15527
15528 static void
15529 do_neon_mul_sat_scalar_long (void)
15530 {
15531 neon_mac_reg_scalar_long (N_S16 | N_S32, N_S16 | N_S32);
15532 }
15533
15534 static void
15535 do_neon_vmull (void)
15536 {
15537 if (inst.operands[2].isscalar)
15538 do_neon_mac_maybe_scalar_long ();
15539 else
15540 {
15541 struct neon_type_el et = neon_check_type (3, NS_QDD,
15542 N_EQK | N_DBL, N_EQK, N_SU_32 | N_P8 | N_P64 | N_KEY);
15543
15544 if (et.type == NT_poly)
15545 NEON_ENCODE (POLY, inst);
15546 else
15547 NEON_ENCODE (INTEGER, inst);
15548
15549 /* For polynomial encoding the U bit must be zero, and the size must
15550 be 8 (encoded as 0b00) or, on ARMv8 or later 64 (encoded, non
15551 obviously, as 0b10). */
15552 if (et.size == 64)
15553 {
15554 /* Check we're on the correct architecture. */
15555 if (!mark_feature_used (&fpu_crypto_ext_armv8))
15556 inst.error =
15557 _("Instruction form not available on this architecture.");
15558
15559 et.size = 32;
15560 }
15561
15562 neon_mixed_length (et, et.size);
15563 }
15564 }
15565
15566 static void
15567 do_neon_ext (void)
15568 {
15569 enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
15570 struct neon_type_el et = neon_check_type (3, rs,
15571 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
15572 unsigned imm = (inst.operands[3].imm * et.size) / 8;
15573
15574 constraint (imm >= (unsigned) (neon_quad (rs) ? 16 : 8),
15575 _("shift out of range"));
15576 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15577 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15578 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
15579 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
15580 inst.instruction |= LOW4 (inst.operands[2].reg);
15581 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
15582 inst.instruction |= neon_quad (rs) << 6;
15583 inst.instruction |= imm << 8;
15584
15585 neon_dp_fixup (&inst);
15586 }
15587
15588 static void
15589 do_neon_rev (void)
15590 {
15591 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15592 struct neon_type_el et = neon_check_type (2, rs,
15593 N_EQK, N_8 | N_16 | N_32 | N_KEY);
15594 unsigned op = (inst.instruction >> 7) & 3;
15595 /* N (width of reversed regions) is encoded as part of the bitmask. We
15596 extract it here to check the elements to be reversed are smaller.
15597 Otherwise we'd get a reserved instruction. */
15598 unsigned elsize = (op == 2) ? 16 : (op == 1) ? 32 : (op == 0) ? 64 : 0;
15599 gas_assert (elsize != 0);
15600 constraint (et.size >= elsize,
15601 _("elements must be smaller than reversal region"));
15602 neon_two_same (neon_quad (rs), 1, et.size);
15603 }
15604
15605 static void
15606 do_neon_dup (void)
15607 {
15608 if (inst.operands[1].isscalar)
15609 {
15610 enum neon_shape rs = neon_select_shape (NS_DS, NS_QS, NS_NULL);
15611 struct neon_type_el et = neon_check_type (2, rs,
15612 N_EQK, N_8 | N_16 | N_32 | N_KEY);
15613 unsigned sizebits = et.size >> 3;
15614 unsigned dm = NEON_SCALAR_REG (inst.operands[1].reg);
15615 int logsize = neon_logbits (et.size);
15616 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg) << logsize;
15617
15618 if (vfp_or_neon_is_neon (NEON_CHECK_CC) == FAIL)
15619 return;
15620
15621 NEON_ENCODE (SCALAR, inst);
15622 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15623 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15624 inst.instruction |= LOW4 (dm);
15625 inst.instruction |= HI1 (dm) << 5;
15626 inst.instruction |= neon_quad (rs) << 6;
15627 inst.instruction |= x << 17;
15628 inst.instruction |= sizebits << 16;
15629
15630 neon_dp_fixup (&inst);
15631 }
15632 else
15633 {
15634 enum neon_shape rs = neon_select_shape (NS_DR, NS_QR, NS_NULL);
15635 struct neon_type_el et = neon_check_type (2, rs,
15636 N_8 | N_16 | N_32 | N_KEY, N_EQK);
15637 /* Duplicate ARM register to lanes of vector. */
15638 NEON_ENCODE (ARMREG, inst);
15639 switch (et.size)
15640 {
15641 case 8: inst.instruction |= 0x400000; break;
15642 case 16: inst.instruction |= 0x000020; break;
15643 case 32: inst.instruction |= 0x000000; break;
15644 default: break;
15645 }
15646 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
15647 inst.instruction |= LOW4 (inst.operands[0].reg) << 16;
15648 inst.instruction |= HI1 (inst.operands[0].reg) << 7;
15649 inst.instruction |= neon_quad (rs) << 21;
15650 /* The encoding for this instruction is identical for the ARM and Thumb
15651 variants, except for the condition field. */
15652 do_vfp_cond_or_thumb ();
15653 }
15654 }
15655
15656 /* VMOV has particularly many variations. It can be one of:
15657 0. VMOV<c><q> <Qd>, <Qm>
15658 1. VMOV<c><q> <Dd>, <Dm>
15659 (Register operations, which are VORR with Rm = Rn.)
15660 2. VMOV<c><q>.<dt> <Qd>, #<imm>
15661 3. VMOV<c><q>.<dt> <Dd>, #<imm>
15662 (Immediate loads.)
15663 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
15664 (ARM register to scalar.)
15665 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
15666 (Two ARM registers to vector.)
15667 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
15668 (Scalar to ARM register.)
15669 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
15670 (Vector to two ARM registers.)
15671 8. VMOV.F32 <Sd>, <Sm>
15672 9. VMOV.F64 <Dd>, <Dm>
15673 (VFP register moves.)
15674 10. VMOV.F32 <Sd>, #imm
15675 11. VMOV.F64 <Dd>, #imm
15676 (VFP float immediate load.)
15677 12. VMOV <Rd>, <Sm>
15678 (VFP single to ARM reg.)
15679 13. VMOV <Sd>, <Rm>
15680 (ARM reg to VFP single.)
15681 14. VMOV <Rd>, <Re>, <Sn>, <Sm>
15682 (Two ARM regs to two VFP singles.)
15683 15. VMOV <Sd>, <Se>, <Rn>, <Rm>
15684 (Two VFP singles to two ARM regs.)
15685
15686 These cases can be disambiguated using neon_select_shape, except cases 1/9
15687 and 3/11 which depend on the operand type too.
15688
15689 All the encoded bits are hardcoded by this function.
15690
15691 Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
15692 Cases 5, 7 may be used with VFPv2 and above.
15693
15694 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
15695 can specify a type where it doesn't make sense to, and is ignored). */
15696
15697 static void
15698 do_neon_mov (void)
15699 {
15700 enum neon_shape rs = neon_select_shape (NS_RRFF, NS_FFRR, NS_DRR, NS_RRD,
15701 NS_QQ, NS_DD, NS_QI, NS_DI, NS_SR, NS_RS, NS_FF, NS_FI, NS_RF, NS_FR,
15702 NS_NULL);
15703 struct neon_type_el et;
15704 const char *ldconst = 0;
15705
15706 switch (rs)
15707 {
15708 case NS_DD: /* case 1/9. */
15709 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
15710 /* It is not an error here if no type is given. */
15711 inst.error = NULL;
15712 if (et.type == NT_float && et.size == 64)
15713 {
15714 do_vfp_nsyn_opcode ("fcpyd");
15715 break;
15716 }
15717 /* fall through. */
15718
15719 case NS_QQ: /* case 0/1. */
15720 {
15721 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15722 return;
15723 /* The architecture manual I have doesn't explicitly state which
15724 value the U bit should have for register->register moves, but
15725 the equivalent VORR instruction has U = 0, so do that. */
15726 inst.instruction = 0x0200110;
15727 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15728 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15729 inst.instruction |= LOW4 (inst.operands[1].reg);
15730 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15731 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
15732 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
15733 inst.instruction |= neon_quad (rs) << 6;
15734
15735 neon_dp_fixup (&inst);
15736 }
15737 break;
15738
15739 case NS_DI: /* case 3/11. */
15740 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
15741 inst.error = NULL;
15742 if (et.type == NT_float && et.size == 64)
15743 {
15744 /* case 11 (fconstd). */
15745 ldconst = "fconstd";
15746 goto encode_fconstd;
15747 }
15748 /* fall through. */
15749
15750 case NS_QI: /* case 2/3. */
15751 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15752 return;
15753 inst.instruction = 0x0800010;
15754 neon_move_immediate ();
15755 neon_dp_fixup (&inst);
15756 break;
15757
15758 case NS_SR: /* case 4. */
15759 {
15760 unsigned bcdebits = 0;
15761 int logsize;
15762 unsigned dn = NEON_SCALAR_REG (inst.operands[0].reg);
15763 unsigned x = NEON_SCALAR_INDEX (inst.operands[0].reg);
15764
15765 /* .<size> is optional here, defaulting to .32. */
15766 if (inst.vectype.elems == 0
15767 && inst.operands[0].vectype.type == NT_invtype
15768 && inst.operands[1].vectype.type == NT_invtype)
15769 {
15770 inst.vectype.el[0].type = NT_untyped;
15771 inst.vectype.el[0].size = 32;
15772 inst.vectype.elems = 1;
15773 }
15774
15775 et = neon_check_type (2, NS_NULL, N_8 | N_16 | N_32 | N_KEY, N_EQK);
15776 logsize = neon_logbits (et.size);
15777
15778 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
15779 _(BAD_FPU));
15780 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
15781 && et.size != 32, _(BAD_FPU));
15782 constraint (et.type == NT_invtype, _("bad type for scalar"));
15783 constraint (x >= 64 / et.size, _("scalar index out of range"));
15784
15785 switch (et.size)
15786 {
15787 case 8: bcdebits = 0x8; break;
15788 case 16: bcdebits = 0x1; break;
15789 case 32: bcdebits = 0x0; break;
15790 default: ;
15791 }
15792
15793 bcdebits |= x << logsize;
15794
15795 inst.instruction = 0xe000b10;
15796 do_vfp_cond_or_thumb ();
15797 inst.instruction |= LOW4 (dn) << 16;
15798 inst.instruction |= HI1 (dn) << 7;
15799 inst.instruction |= inst.operands[1].reg << 12;
15800 inst.instruction |= (bcdebits & 3) << 5;
15801 inst.instruction |= (bcdebits >> 2) << 21;
15802 }
15803 break;
15804
15805 case NS_DRR: /* case 5 (fmdrr). */
15806 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
15807 _(BAD_FPU));
15808
15809 inst.instruction = 0xc400b10;
15810 do_vfp_cond_or_thumb ();
15811 inst.instruction |= LOW4 (inst.operands[0].reg);
15812 inst.instruction |= HI1 (inst.operands[0].reg) << 5;
15813 inst.instruction |= inst.operands[1].reg << 12;
15814 inst.instruction |= inst.operands[2].reg << 16;
15815 break;
15816
15817 case NS_RS: /* case 6. */
15818 {
15819 unsigned logsize;
15820 unsigned dn = NEON_SCALAR_REG (inst.operands[1].reg);
15821 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg);
15822 unsigned abcdebits = 0;
15823
15824 /* .<dt> is optional here, defaulting to .32. */
15825 if (inst.vectype.elems == 0
15826 && inst.operands[0].vectype.type == NT_invtype
15827 && inst.operands[1].vectype.type == NT_invtype)
15828 {
15829 inst.vectype.el[0].type = NT_untyped;
15830 inst.vectype.el[0].size = 32;
15831 inst.vectype.elems = 1;
15832 }
15833
15834 et = neon_check_type (2, NS_NULL,
15835 N_EQK, N_S8 | N_S16 | N_U8 | N_U16 | N_32 | N_KEY);
15836 logsize = neon_logbits (et.size);
15837
15838 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
15839 _(BAD_FPU));
15840 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
15841 && et.size != 32, _(BAD_FPU));
15842 constraint (et.type == NT_invtype, _("bad type for scalar"));
15843 constraint (x >= 64 / et.size, _("scalar index out of range"));
15844
15845 switch (et.size)
15846 {
15847 case 8: abcdebits = (et.type == NT_signed) ? 0x08 : 0x18; break;
15848 case 16: abcdebits = (et.type == NT_signed) ? 0x01 : 0x11; break;
15849 case 32: abcdebits = 0x00; break;
15850 default: ;
15851 }
15852
15853 abcdebits |= x << logsize;
15854 inst.instruction = 0xe100b10;
15855 do_vfp_cond_or_thumb ();
15856 inst.instruction |= LOW4 (dn) << 16;
15857 inst.instruction |= HI1 (dn) << 7;
15858 inst.instruction |= inst.operands[0].reg << 12;
15859 inst.instruction |= (abcdebits & 3) << 5;
15860 inst.instruction |= (abcdebits >> 2) << 21;
15861 }
15862 break;
15863
15864 case NS_RRD: /* case 7 (fmrrd). */
15865 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
15866 _(BAD_FPU));
15867
15868 inst.instruction = 0xc500b10;
15869 do_vfp_cond_or_thumb ();
15870 inst.instruction |= inst.operands[0].reg << 12;
15871 inst.instruction |= inst.operands[1].reg << 16;
15872 inst.instruction |= LOW4 (inst.operands[2].reg);
15873 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
15874 break;
15875
15876 case NS_FF: /* case 8 (fcpys). */
15877 do_vfp_nsyn_opcode ("fcpys");
15878 break;
15879
15880 case NS_FI: /* case 10 (fconsts). */
15881 ldconst = "fconsts";
15882 encode_fconstd:
15883 if (is_quarter_float (inst.operands[1].imm))
15884 {
15885 inst.operands[1].imm = neon_qfloat_bits (inst.operands[1].imm);
15886 do_vfp_nsyn_opcode (ldconst);
15887 }
15888 else
15889 first_error (_("immediate out of range"));
15890 break;
15891
15892 case NS_RF: /* case 12 (fmrs). */
15893 do_vfp_nsyn_opcode ("fmrs");
15894 break;
15895
15896 case NS_FR: /* case 13 (fmsr). */
15897 do_vfp_nsyn_opcode ("fmsr");
15898 break;
15899
15900 /* The encoders for the fmrrs and fmsrr instructions expect three operands
15901 (one of which is a list), but we have parsed four. Do some fiddling to
15902 make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
15903 expect. */
15904 case NS_RRFF: /* case 14 (fmrrs). */
15905 constraint (inst.operands[3].reg != inst.operands[2].reg + 1,
15906 _("VFP registers must be adjacent"));
15907 inst.operands[2].imm = 2;
15908 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
15909 do_vfp_nsyn_opcode ("fmrrs");
15910 break;
15911
15912 case NS_FFRR: /* case 15 (fmsrr). */
15913 constraint (inst.operands[1].reg != inst.operands[0].reg + 1,
15914 _("VFP registers must be adjacent"));
15915 inst.operands[1] = inst.operands[2];
15916 inst.operands[2] = inst.operands[3];
15917 inst.operands[0].imm = 2;
15918 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
15919 do_vfp_nsyn_opcode ("fmsrr");
15920 break;
15921
15922 case NS_NULL:
15923 /* neon_select_shape has determined that the instruction
15924 shape is wrong and has already set the error message. */
15925 break;
15926
15927 default:
15928 abort ();
15929 }
15930 }
15931
15932 static void
15933 do_neon_rshift_round_imm (void)
15934 {
15935 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
15936 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
15937 int imm = inst.operands[2].imm;
15938
15939 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
15940 if (imm == 0)
15941 {
15942 inst.operands[2].present = 0;
15943 do_neon_mov ();
15944 return;
15945 }
15946
15947 constraint (imm < 1 || (unsigned)imm > et.size,
15948 _("immediate out of range for shift"));
15949 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et,
15950 et.size - imm);
15951 }
15952
15953 static void
15954 do_neon_movl (void)
15955 {
15956 struct neon_type_el et = neon_check_type (2, NS_QD,
15957 N_EQK | N_DBL, N_SU_32 | N_KEY);
15958 unsigned sizebits = et.size >> 3;
15959 inst.instruction |= sizebits << 19;
15960 neon_two_same (0, et.type == NT_unsigned, -1);
15961 }
15962
15963 static void
15964 do_neon_trn (void)
15965 {
15966 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15967 struct neon_type_el et = neon_check_type (2, rs,
15968 N_EQK, N_8 | N_16 | N_32 | N_KEY);
15969 NEON_ENCODE (INTEGER, inst);
15970 neon_two_same (neon_quad (rs), 1, et.size);
15971 }
15972
15973 static void
15974 do_neon_zip_uzp (void)
15975 {
15976 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15977 struct neon_type_el et = neon_check_type (2, rs,
15978 N_EQK, N_8 | N_16 | N_32 | N_KEY);
15979 if (rs == NS_DD && et.size == 32)
15980 {
15981 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
15982 inst.instruction = N_MNEM_vtrn;
15983 do_neon_trn ();
15984 return;
15985 }
15986 neon_two_same (neon_quad (rs), 1, et.size);
15987 }
15988
15989 static void
15990 do_neon_sat_abs_neg (void)
15991 {
15992 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15993 struct neon_type_el et = neon_check_type (2, rs,
15994 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
15995 neon_two_same (neon_quad (rs), 1, et.size);
15996 }
15997
15998 static void
15999 do_neon_pair_long (void)
16000 {
16001 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16002 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_32 | N_KEY);
16003 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
16004 inst.instruction |= (et.type == NT_unsigned) << 7;
16005 neon_two_same (neon_quad (rs), 1, et.size);
16006 }
16007
16008 static void
16009 do_neon_recip_est (void)
16010 {
16011 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16012 struct neon_type_el et = neon_check_type (2, rs,
16013 N_EQK | N_FLT, N_F32 | N_U32 | N_KEY);
16014 inst.instruction |= (et.type == NT_float) << 8;
16015 neon_two_same (neon_quad (rs), 1, et.size);
16016 }
16017
16018 static void
16019 do_neon_cls (void)
16020 {
16021 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16022 struct neon_type_el et = neon_check_type (2, rs,
16023 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
16024 neon_two_same (neon_quad (rs), 1, et.size);
16025 }
16026
16027 static void
16028 do_neon_clz (void)
16029 {
16030 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16031 struct neon_type_el et = neon_check_type (2, rs,
16032 N_EQK, N_I8 | N_I16 | N_I32 | N_KEY);
16033 neon_two_same (neon_quad (rs), 1, et.size);
16034 }
16035
16036 static void
16037 do_neon_cnt (void)
16038 {
16039 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16040 struct neon_type_el et = neon_check_type (2, rs,
16041 N_EQK | N_INT, N_8 | N_KEY);
16042 neon_two_same (neon_quad (rs), 1, et.size);
16043 }
16044
16045 static void
16046 do_neon_swp (void)
16047 {
16048 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16049 neon_two_same (neon_quad (rs), 1, -1);
16050 }
16051
16052 static void
16053 do_neon_tbl_tbx (void)
16054 {
16055 unsigned listlenbits;
16056 neon_check_type (3, NS_DLD, N_EQK, N_EQK, N_8 | N_KEY);
16057
16058 if (inst.operands[1].imm < 1 || inst.operands[1].imm > 4)
16059 {
16060 first_error (_("bad list length for table lookup"));
16061 return;
16062 }
16063
16064 listlenbits = inst.operands[1].imm - 1;
16065 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16066 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16067 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16068 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16069 inst.instruction |= LOW4 (inst.operands[2].reg);
16070 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16071 inst.instruction |= listlenbits << 8;
16072
16073 neon_dp_fixup (&inst);
16074 }
16075
16076 static void
16077 do_neon_ldm_stm (void)
16078 {
16079 /* P, U and L bits are part of bitmask. */
16080 int is_dbmode = (inst.instruction & (1 << 24)) != 0;
16081 unsigned offsetbits = inst.operands[1].imm * 2;
16082
16083 if (inst.operands[1].issingle)
16084 {
16085 do_vfp_nsyn_ldm_stm (is_dbmode);
16086 return;
16087 }
16088
16089 constraint (is_dbmode && !inst.operands[0].writeback,
16090 _("writeback (!) must be used for VLDMDB and VSTMDB"));
16091
16092 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
16093 _("register list must contain at least 1 and at most 16 "
16094 "registers"));
16095
16096 inst.instruction |= inst.operands[0].reg << 16;
16097 inst.instruction |= inst.operands[0].writeback << 21;
16098 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
16099 inst.instruction |= HI1 (inst.operands[1].reg) << 22;
16100
16101 inst.instruction |= offsetbits;
16102
16103 do_vfp_cond_or_thumb ();
16104 }
16105
16106 static void
16107 do_neon_ldr_str (void)
16108 {
16109 int is_ldr = (inst.instruction & (1 << 20)) != 0;
16110
16111 /* Use of PC in vstr in ARM mode is deprecated in ARMv7.
16112 And is UNPREDICTABLE in thumb mode. */
16113 if (!is_ldr
16114 && inst.operands[1].reg == REG_PC
16115 && (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7) || thumb_mode))
16116 {
16117 if (thumb_mode)
16118 inst.error = _("Use of PC here is UNPREDICTABLE");
16119 else if (warn_on_deprecated)
16120 as_tsktsk (_("Use of PC here is deprecated"));
16121 }
16122
16123 if (inst.operands[0].issingle)
16124 {
16125 if (is_ldr)
16126 do_vfp_nsyn_opcode ("flds");
16127 else
16128 do_vfp_nsyn_opcode ("fsts");
16129 }
16130 else
16131 {
16132 if (is_ldr)
16133 do_vfp_nsyn_opcode ("fldd");
16134 else
16135 do_vfp_nsyn_opcode ("fstd");
16136 }
16137 }
16138
16139 /* "interleave" version also handles non-interleaving register VLD1/VST1
16140 instructions. */
16141
16142 static void
16143 do_neon_ld_st_interleave (void)
16144 {
16145 struct neon_type_el et = neon_check_type (1, NS_NULL,
16146 N_8 | N_16 | N_32 | N_64);
16147 unsigned alignbits = 0;
16148 unsigned idx;
16149 /* The bits in this table go:
16150 0: register stride of one (0) or two (1)
16151 1,2: register list length, minus one (1, 2, 3, 4).
16152 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
16153 We use -1 for invalid entries. */
16154 const int typetable[] =
16155 {
16156 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
16157 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
16158 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
16159 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
16160 };
16161 int typebits;
16162
16163 if (et.type == NT_invtype)
16164 return;
16165
16166 if (inst.operands[1].immisalign)
16167 switch (inst.operands[1].imm >> 8)
16168 {
16169 case 64: alignbits = 1; break;
16170 case 128:
16171 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2
16172 && NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
16173 goto bad_alignment;
16174 alignbits = 2;
16175 break;
16176 case 256:
16177 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
16178 goto bad_alignment;
16179 alignbits = 3;
16180 break;
16181 default:
16182 bad_alignment:
16183 first_error (_("bad alignment"));
16184 return;
16185 }
16186
16187 inst.instruction |= alignbits << 4;
16188 inst.instruction |= neon_logbits (et.size) << 6;
16189
16190 /* Bits [4:6] of the immediate in a list specifier encode register stride
16191 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
16192 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
16193 up the right value for "type" in a table based on this value and the given
16194 list style, then stick it back. */
16195 idx = ((inst.operands[0].imm >> 4) & 7)
16196 | (((inst.instruction >> 8) & 3) << 3);
16197
16198 typebits = typetable[idx];
16199
16200 constraint (typebits == -1, _("bad list type for instruction"));
16201 constraint (((inst.instruction >> 8) & 3) && et.size == 64,
16202 _("bad element type for instruction"));
16203
16204 inst.instruction &= ~0xf00;
16205 inst.instruction |= typebits << 8;
16206 }
16207
16208 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
16209 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
16210 otherwise. The variable arguments are a list of pairs of legal (size, align)
16211 values, terminated with -1. */
16212
16213 static int
16214 neon_alignment_bit (int size, int align, int *do_align, ...)
16215 {
16216 va_list ap;
16217 int result = FAIL, thissize, thisalign;
16218
16219 if (!inst.operands[1].immisalign)
16220 {
16221 *do_align = 0;
16222 return SUCCESS;
16223 }
16224
16225 va_start (ap, do_align);
16226
16227 do
16228 {
16229 thissize = va_arg (ap, int);
16230 if (thissize == -1)
16231 break;
16232 thisalign = va_arg (ap, int);
16233
16234 if (size == thissize && align == thisalign)
16235 result = SUCCESS;
16236 }
16237 while (result != SUCCESS);
16238
16239 va_end (ap);
16240
16241 if (result == SUCCESS)
16242 *do_align = 1;
16243 else
16244 first_error (_("unsupported alignment for instruction"));
16245
16246 return result;
16247 }
16248
16249 static void
16250 do_neon_ld_st_lane (void)
16251 {
16252 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
16253 int align_good, do_align = 0;
16254 int logsize = neon_logbits (et.size);
16255 int align = inst.operands[1].imm >> 8;
16256 int n = (inst.instruction >> 8) & 3;
16257 int max_el = 64 / et.size;
16258
16259 if (et.type == NT_invtype)
16260 return;
16261
16262 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != n + 1,
16263 _("bad list length"));
16264 constraint (NEON_LANE (inst.operands[0].imm) >= max_el,
16265 _("scalar index out of range"));
16266 constraint (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2
16267 && et.size == 8,
16268 _("stride of 2 unavailable when element size is 8"));
16269
16270 switch (n)
16271 {
16272 case 0: /* VLD1 / VST1. */
16273 align_good = neon_alignment_bit (et.size, align, &do_align, 16, 16,
16274 32, 32, -1);
16275 if (align_good == FAIL)
16276 return;
16277 if (do_align)
16278 {
16279 unsigned alignbits = 0;
16280 switch (et.size)
16281 {
16282 case 16: alignbits = 0x1; break;
16283 case 32: alignbits = 0x3; break;
16284 default: ;
16285 }
16286 inst.instruction |= alignbits << 4;
16287 }
16288 break;
16289
16290 case 1: /* VLD2 / VST2. */
16291 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 16, 16, 32,
16292 32, 64, -1);
16293 if (align_good == FAIL)
16294 return;
16295 if (do_align)
16296 inst.instruction |= 1 << 4;
16297 break;
16298
16299 case 2: /* VLD3 / VST3. */
16300 constraint (inst.operands[1].immisalign,
16301 _("can't use alignment with this instruction"));
16302 break;
16303
16304 case 3: /* VLD4 / VST4. */
16305 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32,
16306 16, 64, 32, 64, 32, 128, -1);
16307 if (align_good == FAIL)
16308 return;
16309 if (do_align)
16310 {
16311 unsigned alignbits = 0;
16312 switch (et.size)
16313 {
16314 case 8: alignbits = 0x1; break;
16315 case 16: alignbits = 0x1; break;
16316 case 32: alignbits = (align == 64) ? 0x1 : 0x2; break;
16317 default: ;
16318 }
16319 inst.instruction |= alignbits << 4;
16320 }
16321 break;
16322
16323 default: ;
16324 }
16325
16326 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
16327 if (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2)
16328 inst.instruction |= 1 << (4 + logsize);
16329
16330 inst.instruction |= NEON_LANE (inst.operands[0].imm) << (logsize + 5);
16331 inst.instruction |= logsize << 10;
16332 }
16333
16334 /* Encode single n-element structure to all lanes VLD<n> instructions. */
16335
16336 static void
16337 do_neon_ld_dup (void)
16338 {
16339 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
16340 int align_good, do_align = 0;
16341
16342 if (et.type == NT_invtype)
16343 return;
16344
16345 switch ((inst.instruction >> 8) & 3)
16346 {
16347 case 0: /* VLD1. */
16348 gas_assert (NEON_REG_STRIDE (inst.operands[0].imm) != 2);
16349 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
16350 &do_align, 16, 16, 32, 32, -1);
16351 if (align_good == FAIL)
16352 return;
16353 switch (NEON_REGLIST_LENGTH (inst.operands[0].imm))
16354 {
16355 case 1: break;
16356 case 2: inst.instruction |= 1 << 5; break;
16357 default: first_error (_("bad list length")); return;
16358 }
16359 inst.instruction |= neon_logbits (et.size) << 6;
16360 break;
16361
16362 case 1: /* VLD2. */
16363 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
16364 &do_align, 8, 16, 16, 32, 32, 64, -1);
16365 if (align_good == FAIL)
16366 return;
16367 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2,
16368 _("bad list length"));
16369 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
16370 inst.instruction |= 1 << 5;
16371 inst.instruction |= neon_logbits (et.size) << 6;
16372 break;
16373
16374 case 2: /* VLD3. */
16375 constraint (inst.operands[1].immisalign,
16376 _("can't use alignment with this instruction"));
16377 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 3,
16378 _("bad list length"));
16379 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
16380 inst.instruction |= 1 << 5;
16381 inst.instruction |= neon_logbits (et.size) << 6;
16382 break;
16383
16384 case 3: /* VLD4. */
16385 {
16386 int align = inst.operands[1].imm >> 8;
16387 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32,
16388 16, 64, 32, 64, 32, 128, -1);
16389 if (align_good == FAIL)
16390 return;
16391 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4,
16392 _("bad list length"));
16393 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
16394 inst.instruction |= 1 << 5;
16395 if (et.size == 32 && align == 128)
16396 inst.instruction |= 0x3 << 6;
16397 else
16398 inst.instruction |= neon_logbits (et.size) << 6;
16399 }
16400 break;
16401
16402 default: ;
16403 }
16404
16405 inst.instruction |= do_align << 4;
16406 }
16407
16408 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
16409 apart from bits [11:4]. */
16410
16411 static void
16412 do_neon_ldx_stx (void)
16413 {
16414 if (inst.operands[1].isreg)
16415 constraint (inst.operands[1].reg == REG_PC, BAD_PC);
16416
16417 switch (NEON_LANE (inst.operands[0].imm))
16418 {
16419 case NEON_INTERLEAVE_LANES:
16420 NEON_ENCODE (INTERLV, inst);
16421 do_neon_ld_st_interleave ();
16422 break;
16423
16424 case NEON_ALL_LANES:
16425 NEON_ENCODE (DUP, inst);
16426 if (inst.instruction == N_INV)
16427 {
16428 first_error ("only loads support such operands");
16429 break;
16430 }
16431 do_neon_ld_dup ();
16432 break;
16433
16434 default:
16435 NEON_ENCODE (LANE, inst);
16436 do_neon_ld_st_lane ();
16437 }
16438
16439 /* L bit comes from bit mask. */
16440 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16441 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16442 inst.instruction |= inst.operands[1].reg << 16;
16443
16444 if (inst.operands[1].postind)
16445 {
16446 int postreg = inst.operands[1].imm & 0xf;
16447 constraint (!inst.operands[1].immisreg,
16448 _("post-index must be a register"));
16449 constraint (postreg == 0xd || postreg == 0xf,
16450 _("bad register for post-index"));
16451 inst.instruction |= postreg;
16452 }
16453 else
16454 {
16455 constraint (inst.operands[1].immisreg, BAD_ADDR_MODE);
16456 constraint (inst.reloc.exp.X_op != O_constant
16457 || inst.reloc.exp.X_add_number != 0,
16458 BAD_ADDR_MODE);
16459
16460 if (inst.operands[1].writeback)
16461 {
16462 inst.instruction |= 0xd;
16463 }
16464 else
16465 inst.instruction |= 0xf;
16466 }
16467
16468 if (thumb_mode)
16469 inst.instruction |= 0xf9000000;
16470 else
16471 inst.instruction |= 0xf4000000;
16472 }
16473
16474 /* FP v8. */
16475 static void
16476 do_vfp_nsyn_fpv8 (enum neon_shape rs)
16477 {
16478 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
16479 D register operands. */
16480 if (neon_shape_class[rs] == SC_DOUBLE)
16481 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
16482 _(BAD_FPU));
16483
16484 NEON_ENCODE (FPV8, inst);
16485
16486 if (rs == NS_FFF)
16487 do_vfp_sp_dyadic ();
16488 else
16489 do_vfp_dp_rd_rn_rm ();
16490
16491 if (rs == NS_DDD)
16492 inst.instruction |= 0x100;
16493
16494 inst.instruction |= 0xf0000000;
16495 }
16496
16497 static void
16498 do_vsel (void)
16499 {
16500 set_it_insn_type (OUTSIDE_IT_INSN);
16501
16502 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8) != SUCCESS)
16503 first_error (_("invalid instruction shape"));
16504 }
16505
16506 static void
16507 do_vmaxnm (void)
16508 {
16509 set_it_insn_type (OUTSIDE_IT_INSN);
16510
16511 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8) == SUCCESS)
16512 return;
16513
16514 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
16515 return;
16516
16517 neon_dyadic_misc (NT_untyped, N_F32, 0);
16518 }
16519
16520 static void
16521 do_vrint_1 (enum neon_cvt_mode mode)
16522 {
16523 enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_QQ, NS_NULL);
16524 struct neon_type_el et;
16525
16526 if (rs == NS_NULL)
16527 return;
16528
16529 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
16530 D register operands. */
16531 if (neon_shape_class[rs] == SC_DOUBLE)
16532 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
16533 _(BAD_FPU));
16534
16535 et = neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
16536 if (et.type != NT_invtype)
16537 {
16538 /* VFP encodings. */
16539 if (mode == neon_cvt_mode_a || mode == neon_cvt_mode_n
16540 || mode == neon_cvt_mode_p || mode == neon_cvt_mode_m)
16541 set_it_insn_type (OUTSIDE_IT_INSN);
16542
16543 NEON_ENCODE (FPV8, inst);
16544 if (rs == NS_FF)
16545 do_vfp_sp_monadic ();
16546 else
16547 do_vfp_dp_rd_rm ();
16548
16549 switch (mode)
16550 {
16551 case neon_cvt_mode_r: inst.instruction |= 0x00000000; break;
16552 case neon_cvt_mode_z: inst.instruction |= 0x00000080; break;
16553 case neon_cvt_mode_x: inst.instruction |= 0x00010000; break;
16554 case neon_cvt_mode_a: inst.instruction |= 0xf0000000; break;
16555 case neon_cvt_mode_n: inst.instruction |= 0xf0010000; break;
16556 case neon_cvt_mode_p: inst.instruction |= 0xf0020000; break;
16557 case neon_cvt_mode_m: inst.instruction |= 0xf0030000; break;
16558 default: abort ();
16559 }
16560
16561 inst.instruction |= (rs == NS_DD) << 8;
16562 do_vfp_cond_or_thumb ();
16563 }
16564 else
16565 {
16566 /* Neon encodings (or something broken...). */
16567 inst.error = NULL;
16568 et = neon_check_type (2, rs, N_EQK, N_F32 | N_KEY);
16569
16570 if (et.type == NT_invtype)
16571 return;
16572
16573 set_it_insn_type (OUTSIDE_IT_INSN);
16574 NEON_ENCODE (FLOAT, inst);
16575
16576 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
16577 return;
16578
16579 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16580 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16581 inst.instruction |= LOW4 (inst.operands[1].reg);
16582 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16583 inst.instruction |= neon_quad (rs) << 6;
16584 switch (mode)
16585 {
16586 case neon_cvt_mode_z: inst.instruction |= 3 << 7; break;
16587 case neon_cvt_mode_x: inst.instruction |= 1 << 7; break;
16588 case neon_cvt_mode_a: inst.instruction |= 2 << 7; break;
16589 case neon_cvt_mode_n: inst.instruction |= 0 << 7; break;
16590 case neon_cvt_mode_p: inst.instruction |= 7 << 7; break;
16591 case neon_cvt_mode_m: inst.instruction |= 5 << 7; break;
16592 case neon_cvt_mode_r: inst.error = _("invalid rounding mode"); break;
16593 default: abort ();
16594 }
16595
16596 if (thumb_mode)
16597 inst.instruction |= 0xfc000000;
16598 else
16599 inst.instruction |= 0xf0000000;
16600 }
16601 }
16602
16603 static void
16604 do_vrintx (void)
16605 {
16606 do_vrint_1 (neon_cvt_mode_x);
16607 }
16608
16609 static void
16610 do_vrintz (void)
16611 {
16612 do_vrint_1 (neon_cvt_mode_z);
16613 }
16614
16615 static void
16616 do_vrintr (void)
16617 {
16618 do_vrint_1 (neon_cvt_mode_r);
16619 }
16620
16621 static void
16622 do_vrinta (void)
16623 {
16624 do_vrint_1 (neon_cvt_mode_a);
16625 }
16626
16627 static void
16628 do_vrintn (void)
16629 {
16630 do_vrint_1 (neon_cvt_mode_n);
16631 }
16632
16633 static void
16634 do_vrintp (void)
16635 {
16636 do_vrint_1 (neon_cvt_mode_p);
16637 }
16638
16639 static void
16640 do_vrintm (void)
16641 {
16642 do_vrint_1 (neon_cvt_mode_m);
16643 }
16644
16645 /* Crypto v1 instructions. */
16646 static void
16647 do_crypto_2op_1 (unsigned elttype, int op)
16648 {
16649 set_it_insn_type (OUTSIDE_IT_INSN);
16650
16651 if (neon_check_type (2, NS_QQ, N_EQK | N_UNT, elttype | N_UNT | N_KEY).type
16652 == NT_invtype)
16653 return;
16654
16655 inst.error = NULL;
16656
16657 NEON_ENCODE (INTEGER, inst);
16658 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16659 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16660 inst.instruction |= LOW4 (inst.operands[1].reg);
16661 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16662 if (op != -1)
16663 inst.instruction |= op << 6;
16664
16665 if (thumb_mode)
16666 inst.instruction |= 0xfc000000;
16667 else
16668 inst.instruction |= 0xf0000000;
16669 }
16670
16671 static void
16672 do_crypto_3op_1 (int u, int op)
16673 {
16674 set_it_insn_type (OUTSIDE_IT_INSN);
16675
16676 if (neon_check_type (3, NS_QQQ, N_EQK | N_UNT, N_EQK | N_UNT,
16677 N_32 | N_UNT | N_KEY).type == NT_invtype)
16678 return;
16679
16680 inst.error = NULL;
16681
16682 NEON_ENCODE (INTEGER, inst);
16683 neon_three_same (1, u, 8 << op);
16684 }
16685
16686 static void
16687 do_aese (void)
16688 {
16689 do_crypto_2op_1 (N_8, 0);
16690 }
16691
16692 static void
16693 do_aesd (void)
16694 {
16695 do_crypto_2op_1 (N_8, 1);
16696 }
16697
16698 static void
16699 do_aesmc (void)
16700 {
16701 do_crypto_2op_1 (N_8, 2);
16702 }
16703
16704 static void
16705 do_aesimc (void)
16706 {
16707 do_crypto_2op_1 (N_8, 3);
16708 }
16709
16710 static void
16711 do_sha1c (void)
16712 {
16713 do_crypto_3op_1 (0, 0);
16714 }
16715
16716 static void
16717 do_sha1p (void)
16718 {
16719 do_crypto_3op_1 (0, 1);
16720 }
16721
16722 static void
16723 do_sha1m (void)
16724 {
16725 do_crypto_3op_1 (0, 2);
16726 }
16727
16728 static void
16729 do_sha1su0 (void)
16730 {
16731 do_crypto_3op_1 (0, 3);
16732 }
16733
16734 static void
16735 do_sha256h (void)
16736 {
16737 do_crypto_3op_1 (1, 0);
16738 }
16739
16740 static void
16741 do_sha256h2 (void)
16742 {
16743 do_crypto_3op_1 (1, 1);
16744 }
16745
16746 static void
16747 do_sha256su1 (void)
16748 {
16749 do_crypto_3op_1 (1, 2);
16750 }
16751
16752 static void
16753 do_sha1h (void)
16754 {
16755 do_crypto_2op_1 (N_32, -1);
16756 }
16757
16758 static void
16759 do_sha1su1 (void)
16760 {
16761 do_crypto_2op_1 (N_32, 0);
16762 }
16763
16764 static void
16765 do_sha256su0 (void)
16766 {
16767 do_crypto_2op_1 (N_32, 1);
16768 }
16769
16770 static void
16771 do_crc32_1 (unsigned int poly, unsigned int sz)
16772 {
16773 unsigned int Rd = inst.operands[0].reg;
16774 unsigned int Rn = inst.operands[1].reg;
16775 unsigned int Rm = inst.operands[2].reg;
16776
16777 set_it_insn_type (OUTSIDE_IT_INSN);
16778 inst.instruction |= LOW4 (Rd) << (thumb_mode ? 8 : 12);
16779 inst.instruction |= LOW4 (Rn) << 16;
16780 inst.instruction |= LOW4 (Rm);
16781 inst.instruction |= sz << (thumb_mode ? 4 : 21);
16782 inst.instruction |= poly << (thumb_mode ? 20 : 9);
16783
16784 if (Rd == REG_PC || Rn == REG_PC || Rm == REG_PC)
16785 as_warn (UNPRED_REG ("r15"));
16786 if (thumb_mode && (Rd == REG_SP || Rn == REG_SP || Rm == REG_SP))
16787 as_warn (UNPRED_REG ("r13"));
16788 }
16789
16790 static void
16791 do_crc32b (void)
16792 {
16793 do_crc32_1 (0, 0);
16794 }
16795
16796 static void
16797 do_crc32h (void)
16798 {
16799 do_crc32_1 (0, 1);
16800 }
16801
16802 static void
16803 do_crc32w (void)
16804 {
16805 do_crc32_1 (0, 2);
16806 }
16807
16808 static void
16809 do_crc32cb (void)
16810 {
16811 do_crc32_1 (1, 0);
16812 }
16813
16814 static void
16815 do_crc32ch (void)
16816 {
16817 do_crc32_1 (1, 1);
16818 }
16819
16820 static void
16821 do_crc32cw (void)
16822 {
16823 do_crc32_1 (1, 2);
16824 }
16825
16826 \f
16827 /* Overall per-instruction processing. */
16828
16829 /* We need to be able to fix up arbitrary expressions in some statements.
16830 This is so that we can handle symbols that are an arbitrary distance from
16831 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
16832 which returns part of an address in a form which will be valid for
16833 a data instruction. We do this by pushing the expression into a symbol
16834 in the expr_section, and creating a fix for that. */
16835
16836 static void
16837 fix_new_arm (fragS * frag,
16838 int where,
16839 short int size,
16840 expressionS * exp,
16841 int pc_rel,
16842 int reloc)
16843 {
16844 fixS * new_fix;
16845
16846 switch (exp->X_op)
16847 {
16848 case O_constant:
16849 if (pc_rel)
16850 {
16851 /* Create an absolute valued symbol, so we have something to
16852 refer to in the object file. Unfortunately for us, gas's
16853 generic expression parsing will already have folded out
16854 any use of .set foo/.type foo %function that may have
16855 been used to set type information of the target location,
16856 that's being specified symbolically. We have to presume
16857 the user knows what they are doing. */
16858 char name[16 + 8];
16859 symbolS *symbol;
16860
16861 sprintf (name, "*ABS*0x%lx", (unsigned long)exp->X_add_number);
16862
16863 symbol = symbol_find_or_make (name);
16864 S_SET_SEGMENT (symbol, absolute_section);
16865 symbol_set_frag (symbol, &zero_address_frag);
16866 S_SET_VALUE (symbol, exp->X_add_number);
16867 exp->X_op = O_symbol;
16868 exp->X_add_symbol = symbol;
16869 exp->X_add_number = 0;
16870 }
16871 /* FALLTHROUGH */
16872 case O_symbol:
16873 case O_add:
16874 case O_subtract:
16875 new_fix = fix_new_exp (frag, where, size, exp, pc_rel,
16876 (enum bfd_reloc_code_real) reloc);
16877 break;
16878
16879 default:
16880 new_fix = (fixS *) fix_new (frag, where, size, make_expr_symbol (exp), 0,
16881 pc_rel, (enum bfd_reloc_code_real) reloc);
16882 break;
16883 }
16884
16885 /* Mark whether the fix is to a THUMB instruction, or an ARM
16886 instruction. */
16887 new_fix->tc_fix_data = thumb_mode;
16888 }
16889
16890 /* Create a frg for an instruction requiring relaxation. */
16891 static void
16892 output_relax_insn (void)
16893 {
16894 char * to;
16895 symbolS *sym;
16896 int offset;
16897
16898 /* The size of the instruction is unknown, so tie the debug info to the
16899 start of the instruction. */
16900 dwarf2_emit_insn (0);
16901
16902 switch (inst.reloc.exp.X_op)
16903 {
16904 case O_symbol:
16905 sym = inst.reloc.exp.X_add_symbol;
16906 offset = inst.reloc.exp.X_add_number;
16907 break;
16908 case O_constant:
16909 sym = NULL;
16910 offset = inst.reloc.exp.X_add_number;
16911 break;
16912 default:
16913 sym = make_expr_symbol (&inst.reloc.exp);
16914 offset = 0;
16915 break;
16916 }
16917 to = frag_var (rs_machine_dependent, INSN_SIZE, THUMB_SIZE,
16918 inst.relax, sym, offset, NULL/*offset, opcode*/);
16919 md_number_to_chars (to, inst.instruction, THUMB_SIZE);
16920 }
16921
16922 /* Write a 32-bit thumb instruction to buf. */
16923 static void
16924 put_thumb32_insn (char * buf, unsigned long insn)
16925 {
16926 md_number_to_chars (buf, insn >> 16, THUMB_SIZE);
16927 md_number_to_chars (buf + THUMB_SIZE, insn, THUMB_SIZE);
16928 }
16929
16930 static void
16931 output_inst (const char * str)
16932 {
16933 char * to = NULL;
16934
16935 if (inst.error)
16936 {
16937 as_bad ("%s -- `%s'", inst.error, str);
16938 return;
16939 }
16940 if (inst.relax)
16941 {
16942 output_relax_insn ();
16943 return;
16944 }
16945 if (inst.size == 0)
16946 return;
16947
16948 to = frag_more (inst.size);
16949 /* PR 9814: Record the thumb mode into the current frag so that we know
16950 what type of NOP padding to use, if necessary. We override any previous
16951 setting so that if the mode has changed then the NOPS that we use will
16952 match the encoding of the last instruction in the frag. */
16953 frag_now->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
16954
16955 if (thumb_mode && (inst.size > THUMB_SIZE))
16956 {
16957 gas_assert (inst.size == (2 * THUMB_SIZE));
16958 put_thumb32_insn (to, inst.instruction);
16959 }
16960 else if (inst.size > INSN_SIZE)
16961 {
16962 gas_assert (inst.size == (2 * INSN_SIZE));
16963 md_number_to_chars (to, inst.instruction, INSN_SIZE);
16964 md_number_to_chars (to + INSN_SIZE, inst.instruction, INSN_SIZE);
16965 }
16966 else
16967 md_number_to_chars (to, inst.instruction, inst.size);
16968
16969 if (inst.reloc.type != BFD_RELOC_UNUSED)
16970 fix_new_arm (frag_now, to - frag_now->fr_literal,
16971 inst.size, & inst.reloc.exp, inst.reloc.pc_rel,
16972 inst.reloc.type);
16973
16974 dwarf2_emit_insn (inst.size);
16975 }
16976
16977 static char *
16978 output_it_inst (int cond, int mask, char * to)
16979 {
16980 unsigned long instruction = 0xbf00;
16981
16982 mask &= 0xf;
16983 instruction |= mask;
16984 instruction |= cond << 4;
16985
16986 if (to == NULL)
16987 {
16988 to = frag_more (2);
16989 #ifdef OBJ_ELF
16990 dwarf2_emit_insn (2);
16991 #endif
16992 }
16993
16994 md_number_to_chars (to, instruction, 2);
16995
16996 return to;
16997 }
16998
16999 /* Tag values used in struct asm_opcode's tag field. */
17000 enum opcode_tag
17001 {
17002 OT_unconditional, /* Instruction cannot be conditionalized.
17003 The ARM condition field is still 0xE. */
17004 OT_unconditionalF, /* Instruction cannot be conditionalized
17005 and carries 0xF in its ARM condition field. */
17006 OT_csuffix, /* Instruction takes a conditional suffix. */
17007 OT_csuffixF, /* Some forms of the instruction take a conditional
17008 suffix, others place 0xF where the condition field
17009 would be. */
17010 OT_cinfix3, /* Instruction takes a conditional infix,
17011 beginning at character index 3. (In
17012 unified mode, it becomes a suffix.) */
17013 OT_cinfix3_deprecated, /* The same as OT_cinfix3. This is used for
17014 tsts, cmps, cmns, and teqs. */
17015 OT_cinfix3_legacy, /* Legacy instruction takes a conditional infix at
17016 character index 3, even in unified mode. Used for
17017 legacy instructions where suffix and infix forms
17018 may be ambiguous. */
17019 OT_csuf_or_in3, /* Instruction takes either a conditional
17020 suffix or an infix at character index 3. */
17021 OT_odd_infix_unc, /* This is the unconditional variant of an
17022 instruction that takes a conditional infix
17023 at an unusual position. In unified mode,
17024 this variant will accept a suffix. */
17025 OT_odd_infix_0 /* Values greater than or equal to OT_odd_infix_0
17026 are the conditional variants of instructions that
17027 take conditional infixes in unusual positions.
17028 The infix appears at character index
17029 (tag - OT_odd_infix_0). These are not accepted
17030 in unified mode. */
17031 };
17032
17033 /* Subroutine of md_assemble, responsible for looking up the primary
17034 opcode from the mnemonic the user wrote. STR points to the
17035 beginning of the mnemonic.
17036
17037 This is not simply a hash table lookup, because of conditional
17038 variants. Most instructions have conditional variants, which are
17039 expressed with a _conditional affix_ to the mnemonic. If we were
17040 to encode each conditional variant as a literal string in the opcode
17041 table, it would have approximately 20,000 entries.
17042
17043 Most mnemonics take this affix as a suffix, and in unified syntax,
17044 'most' is upgraded to 'all'. However, in the divided syntax, some
17045 instructions take the affix as an infix, notably the s-variants of
17046 the arithmetic instructions. Of those instructions, all but six
17047 have the infix appear after the third character of the mnemonic.
17048
17049 Accordingly, the algorithm for looking up primary opcodes given
17050 an identifier is:
17051
17052 1. Look up the identifier in the opcode table.
17053 If we find a match, go to step U.
17054
17055 2. Look up the last two characters of the identifier in the
17056 conditions table. If we find a match, look up the first N-2
17057 characters of the identifier in the opcode table. If we
17058 find a match, go to step CE.
17059
17060 3. Look up the fourth and fifth characters of the identifier in
17061 the conditions table. If we find a match, extract those
17062 characters from the identifier, and look up the remaining
17063 characters in the opcode table. If we find a match, go
17064 to step CM.
17065
17066 4. Fail.
17067
17068 U. Examine the tag field of the opcode structure, in case this is
17069 one of the six instructions with its conditional infix in an
17070 unusual place. If it is, the tag tells us where to find the
17071 infix; look it up in the conditions table and set inst.cond
17072 accordingly. Otherwise, this is an unconditional instruction.
17073 Again set inst.cond accordingly. Return the opcode structure.
17074
17075 CE. Examine the tag field to make sure this is an instruction that
17076 should receive a conditional suffix. If it is not, fail.
17077 Otherwise, set inst.cond from the suffix we already looked up,
17078 and return the opcode structure.
17079
17080 CM. Examine the tag field to make sure this is an instruction that
17081 should receive a conditional infix after the third character.
17082 If it is not, fail. Otherwise, undo the edits to the current
17083 line of input and proceed as for case CE. */
17084
17085 static const struct asm_opcode *
17086 opcode_lookup (char **str)
17087 {
17088 char *end, *base;
17089 char *affix;
17090 const struct asm_opcode *opcode;
17091 const struct asm_cond *cond;
17092 char save[2];
17093
17094 /* Scan up to the end of the mnemonic, which must end in white space,
17095 '.' (in unified mode, or for Neon/VFP instructions), or end of string. */
17096 for (base = end = *str; *end != '\0'; end++)
17097 if (*end == ' ' || *end == '.')
17098 break;
17099
17100 if (end == base)
17101 return NULL;
17102
17103 /* Handle a possible width suffix and/or Neon type suffix. */
17104 if (end[0] == '.')
17105 {
17106 int offset = 2;
17107
17108 /* The .w and .n suffixes are only valid if the unified syntax is in
17109 use. */
17110 if (unified_syntax && end[1] == 'w')
17111 inst.size_req = 4;
17112 else if (unified_syntax && end[1] == 'n')
17113 inst.size_req = 2;
17114 else
17115 offset = 0;
17116
17117 inst.vectype.elems = 0;
17118
17119 *str = end + offset;
17120
17121 if (end[offset] == '.')
17122 {
17123 /* See if we have a Neon type suffix (possible in either unified or
17124 non-unified ARM syntax mode). */
17125 if (parse_neon_type (&inst.vectype, str) == FAIL)
17126 return NULL;
17127 }
17128 else if (end[offset] != '\0' && end[offset] != ' ')
17129 return NULL;
17130 }
17131 else
17132 *str = end;
17133
17134 /* Look for unaffixed or special-case affixed mnemonic. */
17135 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
17136 end - base);
17137 if (opcode)
17138 {
17139 /* step U */
17140 if (opcode->tag < OT_odd_infix_0)
17141 {
17142 inst.cond = COND_ALWAYS;
17143 return opcode;
17144 }
17145
17146 if (warn_on_deprecated && unified_syntax)
17147 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
17148 affix = base + (opcode->tag - OT_odd_infix_0);
17149 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
17150 gas_assert (cond);
17151
17152 inst.cond = cond->value;
17153 return opcode;
17154 }
17155
17156 /* Cannot have a conditional suffix on a mnemonic of less than two
17157 characters. */
17158 if (end - base < 3)
17159 return NULL;
17160
17161 /* Look for suffixed mnemonic. */
17162 affix = end - 2;
17163 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
17164 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
17165 affix - base);
17166 if (opcode && cond)
17167 {
17168 /* step CE */
17169 switch (opcode->tag)
17170 {
17171 case OT_cinfix3_legacy:
17172 /* Ignore conditional suffixes matched on infix only mnemonics. */
17173 break;
17174
17175 case OT_cinfix3:
17176 case OT_cinfix3_deprecated:
17177 case OT_odd_infix_unc:
17178 if (!unified_syntax)
17179 return 0;
17180 /* else fall through */
17181
17182 case OT_csuffix:
17183 case OT_csuffixF:
17184 case OT_csuf_or_in3:
17185 inst.cond = cond->value;
17186 return opcode;
17187
17188 case OT_unconditional:
17189 case OT_unconditionalF:
17190 if (thumb_mode)
17191 inst.cond = cond->value;
17192 else
17193 {
17194 /* Delayed diagnostic. */
17195 inst.error = BAD_COND;
17196 inst.cond = COND_ALWAYS;
17197 }
17198 return opcode;
17199
17200 default:
17201 return NULL;
17202 }
17203 }
17204
17205 /* Cannot have a usual-position infix on a mnemonic of less than
17206 six characters (five would be a suffix). */
17207 if (end - base < 6)
17208 return NULL;
17209
17210 /* Look for infixed mnemonic in the usual position. */
17211 affix = base + 3;
17212 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
17213 if (!cond)
17214 return NULL;
17215
17216 memcpy (save, affix, 2);
17217 memmove (affix, affix + 2, (end - affix) - 2);
17218 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
17219 (end - base) - 2);
17220 memmove (affix + 2, affix, (end - affix) - 2);
17221 memcpy (affix, save, 2);
17222
17223 if (opcode
17224 && (opcode->tag == OT_cinfix3
17225 || opcode->tag == OT_cinfix3_deprecated
17226 || opcode->tag == OT_csuf_or_in3
17227 || opcode->tag == OT_cinfix3_legacy))
17228 {
17229 /* Step CM. */
17230 if (warn_on_deprecated && unified_syntax
17231 && (opcode->tag == OT_cinfix3
17232 || opcode->tag == OT_cinfix3_deprecated))
17233 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
17234
17235 inst.cond = cond->value;
17236 return opcode;
17237 }
17238
17239 return NULL;
17240 }
17241
17242 /* This function generates an initial IT instruction, leaving its block
17243 virtually open for the new instructions. Eventually,
17244 the mask will be updated by now_it_add_mask () each time
17245 a new instruction needs to be included in the IT block.
17246 Finally, the block is closed with close_automatic_it_block ().
17247 The block closure can be requested either from md_assemble (),
17248 a tencode (), or due to a label hook. */
17249
17250 static void
17251 new_automatic_it_block (int cond)
17252 {
17253 now_it.state = AUTOMATIC_IT_BLOCK;
17254 now_it.mask = 0x18;
17255 now_it.cc = cond;
17256 now_it.block_length = 1;
17257 mapping_state (MAP_THUMB);
17258 now_it.insn = output_it_inst (cond, now_it.mask, NULL);
17259 now_it.warn_deprecated = FALSE;
17260 now_it.insn_cond = TRUE;
17261 }
17262
17263 /* Close an automatic IT block.
17264 See comments in new_automatic_it_block (). */
17265
17266 static void
17267 close_automatic_it_block (void)
17268 {
17269 now_it.mask = 0x10;
17270 now_it.block_length = 0;
17271 }
17272
17273 /* Update the mask of the current automatically-generated IT
17274 instruction. See comments in new_automatic_it_block (). */
17275
17276 static void
17277 now_it_add_mask (int cond)
17278 {
17279 #define CLEAR_BIT(value, nbit) ((value) & ~(1 << (nbit)))
17280 #define SET_BIT_VALUE(value, bitvalue, nbit) (CLEAR_BIT (value, nbit) \
17281 | ((bitvalue) << (nbit)))
17282 const int resulting_bit = (cond & 1);
17283
17284 now_it.mask &= 0xf;
17285 now_it.mask = SET_BIT_VALUE (now_it.mask,
17286 resulting_bit,
17287 (5 - now_it.block_length));
17288 now_it.mask = SET_BIT_VALUE (now_it.mask,
17289 1,
17290 ((5 - now_it.block_length) - 1) );
17291 output_it_inst (now_it.cc, now_it.mask, now_it.insn);
17292
17293 #undef CLEAR_BIT
17294 #undef SET_BIT_VALUE
17295 }
17296
17297 /* The IT blocks handling machinery is accessed through the these functions:
17298 it_fsm_pre_encode () from md_assemble ()
17299 set_it_insn_type () optional, from the tencode functions
17300 set_it_insn_type_last () ditto
17301 in_it_block () ditto
17302 it_fsm_post_encode () from md_assemble ()
17303 force_automatic_it_block_close () from label habdling functions
17304
17305 Rationale:
17306 1) md_assemble () calls it_fsm_pre_encode () before calling tencode (),
17307 initializing the IT insn type with a generic initial value depending
17308 on the inst.condition.
17309 2) During the tencode function, two things may happen:
17310 a) The tencode function overrides the IT insn type by
17311 calling either set_it_insn_type (type) or set_it_insn_type_last ().
17312 b) The tencode function queries the IT block state by
17313 calling in_it_block () (i.e. to determine narrow/not narrow mode).
17314
17315 Both set_it_insn_type and in_it_block run the internal FSM state
17316 handling function (handle_it_state), because: a) setting the IT insn
17317 type may incur in an invalid state (exiting the function),
17318 and b) querying the state requires the FSM to be updated.
17319 Specifically we want to avoid creating an IT block for conditional
17320 branches, so it_fsm_pre_encode is actually a guess and we can't
17321 determine whether an IT block is required until the tencode () routine
17322 has decided what type of instruction this actually it.
17323 Because of this, if set_it_insn_type and in_it_block have to be used,
17324 set_it_insn_type has to be called first.
17325
17326 set_it_insn_type_last () is a wrapper of set_it_insn_type (type), that
17327 determines the insn IT type depending on the inst.cond code.
17328 When a tencode () routine encodes an instruction that can be
17329 either outside an IT block, or, in the case of being inside, has to be
17330 the last one, set_it_insn_type_last () will determine the proper
17331 IT instruction type based on the inst.cond code. Otherwise,
17332 set_it_insn_type can be called for overriding that logic or
17333 for covering other cases.
17334
17335 Calling handle_it_state () may not transition the IT block state to
17336 OUTSIDE_IT_BLOCK immediatelly, since the (current) state could be
17337 still queried. Instead, if the FSM determines that the state should
17338 be transitioned to OUTSIDE_IT_BLOCK, a flag is marked to be closed
17339 after the tencode () function: that's what it_fsm_post_encode () does.
17340
17341 Since in_it_block () calls the state handling function to get an
17342 updated state, an error may occur (due to invalid insns combination).
17343 In that case, inst.error is set.
17344 Therefore, inst.error has to be checked after the execution of
17345 the tencode () routine.
17346
17347 3) Back in md_assemble(), it_fsm_post_encode () is called to commit
17348 any pending state change (if any) that didn't take place in
17349 handle_it_state () as explained above. */
17350
17351 static void
17352 it_fsm_pre_encode (void)
17353 {
17354 if (inst.cond != COND_ALWAYS)
17355 inst.it_insn_type = INSIDE_IT_INSN;
17356 else
17357 inst.it_insn_type = OUTSIDE_IT_INSN;
17358
17359 now_it.state_handled = 0;
17360 }
17361
17362 /* IT state FSM handling function. */
17363
17364 static int
17365 handle_it_state (void)
17366 {
17367 now_it.state_handled = 1;
17368 now_it.insn_cond = FALSE;
17369
17370 switch (now_it.state)
17371 {
17372 case OUTSIDE_IT_BLOCK:
17373 switch (inst.it_insn_type)
17374 {
17375 case OUTSIDE_IT_INSN:
17376 break;
17377
17378 case INSIDE_IT_INSN:
17379 case INSIDE_IT_LAST_INSN:
17380 if (thumb_mode == 0)
17381 {
17382 if (unified_syntax
17383 && !(implicit_it_mode & IMPLICIT_IT_MODE_ARM))
17384 as_tsktsk (_("Warning: conditional outside an IT block"\
17385 " for Thumb."));
17386 }
17387 else
17388 {
17389 if ((implicit_it_mode & IMPLICIT_IT_MODE_THUMB)
17390 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_arch_t2))
17391 {
17392 /* Automatically generate the IT instruction. */
17393 new_automatic_it_block (inst.cond);
17394 if (inst.it_insn_type == INSIDE_IT_LAST_INSN)
17395 close_automatic_it_block ();
17396 }
17397 else
17398 {
17399 inst.error = BAD_OUT_IT;
17400 return FAIL;
17401 }
17402 }
17403 break;
17404
17405 case IF_INSIDE_IT_LAST_INSN:
17406 case NEUTRAL_IT_INSN:
17407 break;
17408
17409 case IT_INSN:
17410 now_it.state = MANUAL_IT_BLOCK;
17411 now_it.block_length = 0;
17412 break;
17413 }
17414 break;
17415
17416 case AUTOMATIC_IT_BLOCK:
17417 /* Three things may happen now:
17418 a) We should increment current it block size;
17419 b) We should close current it block (closing insn or 4 insns);
17420 c) We should close current it block and start a new one (due
17421 to incompatible conditions or
17422 4 insns-length block reached). */
17423
17424 switch (inst.it_insn_type)
17425 {
17426 case OUTSIDE_IT_INSN:
17427 /* The closure of the block shall happen immediatelly,
17428 so any in_it_block () call reports the block as closed. */
17429 force_automatic_it_block_close ();
17430 break;
17431
17432 case INSIDE_IT_INSN:
17433 case INSIDE_IT_LAST_INSN:
17434 case IF_INSIDE_IT_LAST_INSN:
17435 now_it.block_length++;
17436
17437 if (now_it.block_length > 4
17438 || !now_it_compatible (inst.cond))
17439 {
17440 force_automatic_it_block_close ();
17441 if (inst.it_insn_type != IF_INSIDE_IT_LAST_INSN)
17442 new_automatic_it_block (inst.cond);
17443 }
17444 else
17445 {
17446 now_it.insn_cond = TRUE;
17447 now_it_add_mask (inst.cond);
17448 }
17449
17450 if (now_it.state == AUTOMATIC_IT_BLOCK
17451 && (inst.it_insn_type == INSIDE_IT_LAST_INSN
17452 || inst.it_insn_type == IF_INSIDE_IT_LAST_INSN))
17453 close_automatic_it_block ();
17454 break;
17455
17456 case NEUTRAL_IT_INSN:
17457 now_it.block_length++;
17458 now_it.insn_cond = TRUE;
17459
17460 if (now_it.block_length > 4)
17461 force_automatic_it_block_close ();
17462 else
17463 now_it_add_mask (now_it.cc & 1);
17464 break;
17465
17466 case IT_INSN:
17467 close_automatic_it_block ();
17468 now_it.state = MANUAL_IT_BLOCK;
17469 break;
17470 }
17471 break;
17472
17473 case MANUAL_IT_BLOCK:
17474 {
17475 /* Check conditional suffixes. */
17476 const int cond = now_it.cc ^ ((now_it.mask >> 4) & 1) ^ 1;
17477 int is_last;
17478 now_it.mask <<= 1;
17479 now_it.mask &= 0x1f;
17480 is_last = (now_it.mask == 0x10);
17481 now_it.insn_cond = TRUE;
17482
17483 switch (inst.it_insn_type)
17484 {
17485 case OUTSIDE_IT_INSN:
17486 inst.error = BAD_NOT_IT;
17487 return FAIL;
17488
17489 case INSIDE_IT_INSN:
17490 if (cond != inst.cond)
17491 {
17492 inst.error = BAD_IT_COND;
17493 return FAIL;
17494 }
17495 break;
17496
17497 case INSIDE_IT_LAST_INSN:
17498 case IF_INSIDE_IT_LAST_INSN:
17499 if (cond != inst.cond)
17500 {
17501 inst.error = BAD_IT_COND;
17502 return FAIL;
17503 }
17504 if (!is_last)
17505 {
17506 inst.error = BAD_BRANCH;
17507 return FAIL;
17508 }
17509 break;
17510
17511 case NEUTRAL_IT_INSN:
17512 /* The BKPT instruction is unconditional even in an IT block. */
17513 break;
17514
17515 case IT_INSN:
17516 inst.error = BAD_IT_IT;
17517 return FAIL;
17518 }
17519 }
17520 break;
17521 }
17522
17523 return SUCCESS;
17524 }
17525
17526 struct depr_insn_mask
17527 {
17528 unsigned long pattern;
17529 unsigned long mask;
17530 const char* description;
17531 };
17532
17533 /* List of 16-bit instruction patterns deprecated in an IT block in
17534 ARMv8. */
17535 static const struct depr_insn_mask depr_it_insns[] = {
17536 { 0xc000, 0xc000, N_("Short branches, Undefined, SVC, LDM/STM") },
17537 { 0xb000, 0xb000, N_("Miscellaneous 16-bit instructions") },
17538 { 0xa000, 0xb800, N_("ADR") },
17539 { 0x4800, 0xf800, N_("Literal loads") },
17540 { 0x4478, 0xf478, N_("Hi-register ADD, MOV, CMP, BX, BLX using pc") },
17541 { 0x4487, 0xfc87, N_("Hi-register ADD, MOV, CMP using pc") },
17542 /* NOTE: 0x00dd is not the real encoding, instead, it is the 'tvalue'
17543 field in asm_opcode. 'tvalue' is used at the stage this check happen. */
17544 { 0x00dd, 0x7fff, N_("ADD/SUB sp, sp #imm") },
17545 { 0, 0, NULL }
17546 };
17547
17548 static void
17549 it_fsm_post_encode (void)
17550 {
17551 int is_last;
17552
17553 if (!now_it.state_handled)
17554 handle_it_state ();
17555
17556 if (now_it.insn_cond
17557 && !now_it.warn_deprecated
17558 && warn_on_deprecated
17559 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
17560 {
17561 if (inst.instruction >= 0x10000)
17562 {
17563 as_tsktsk (_("IT blocks containing 32-bit Thumb instructions are "
17564 "deprecated in ARMv8"));
17565 now_it.warn_deprecated = TRUE;
17566 }
17567 else
17568 {
17569 const struct depr_insn_mask *p = depr_it_insns;
17570
17571 while (p->mask != 0)
17572 {
17573 if ((inst.instruction & p->mask) == p->pattern)
17574 {
17575 as_tsktsk (_("IT blocks containing 16-bit Thumb instructions "
17576 "of the following class are deprecated in ARMv8: "
17577 "%s"), p->description);
17578 now_it.warn_deprecated = TRUE;
17579 break;
17580 }
17581
17582 ++p;
17583 }
17584 }
17585
17586 if (now_it.block_length > 1)
17587 {
17588 as_tsktsk (_("IT blocks containing more than one conditional "
17589 "instruction are deprecated in ARMv8"));
17590 now_it.warn_deprecated = TRUE;
17591 }
17592 }
17593
17594 is_last = (now_it.mask == 0x10);
17595 if (is_last)
17596 {
17597 now_it.state = OUTSIDE_IT_BLOCK;
17598 now_it.mask = 0;
17599 }
17600 }
17601
17602 static void
17603 force_automatic_it_block_close (void)
17604 {
17605 if (now_it.state == AUTOMATIC_IT_BLOCK)
17606 {
17607 close_automatic_it_block ();
17608 now_it.state = OUTSIDE_IT_BLOCK;
17609 now_it.mask = 0;
17610 }
17611 }
17612
17613 static int
17614 in_it_block (void)
17615 {
17616 if (!now_it.state_handled)
17617 handle_it_state ();
17618
17619 return now_it.state != OUTSIDE_IT_BLOCK;
17620 }
17621
17622 void
17623 md_assemble (char *str)
17624 {
17625 char *p = str;
17626 const struct asm_opcode * opcode;
17627
17628 /* Align the previous label if needed. */
17629 if (last_label_seen != NULL)
17630 {
17631 symbol_set_frag (last_label_seen, frag_now);
17632 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
17633 S_SET_SEGMENT (last_label_seen, now_seg);
17634 }
17635
17636 memset (&inst, '\0', sizeof (inst));
17637 inst.reloc.type = BFD_RELOC_UNUSED;
17638
17639 opcode = opcode_lookup (&p);
17640 if (!opcode)
17641 {
17642 /* It wasn't an instruction, but it might be a register alias of
17643 the form alias .req reg, or a Neon .dn/.qn directive. */
17644 if (! create_register_alias (str, p)
17645 && ! create_neon_reg_alias (str, p))
17646 as_bad (_("bad instruction `%s'"), str);
17647
17648 return;
17649 }
17650
17651 if (warn_on_deprecated && opcode->tag == OT_cinfix3_deprecated)
17652 as_tsktsk (_("s suffix on comparison instruction is deprecated"));
17653
17654 /* The value which unconditional instructions should have in place of the
17655 condition field. */
17656 inst.uncond_value = (opcode->tag == OT_csuffixF) ? 0xf : -1;
17657
17658 if (thumb_mode)
17659 {
17660 arm_feature_set variant;
17661
17662 variant = cpu_variant;
17663 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
17664 if (!ARM_CPU_HAS_FEATURE (variant, arm_arch_t2))
17665 ARM_CLEAR_FEATURE (variant, variant, fpu_any_hard);
17666 /* Check that this instruction is supported for this CPU. */
17667 if (!opcode->tvariant
17668 || (thumb_mode == 1
17669 && !ARM_CPU_HAS_FEATURE (variant, *opcode->tvariant)))
17670 {
17671 as_bad (_("selected processor does not support Thumb mode `%s'"), str);
17672 return;
17673 }
17674 if (inst.cond != COND_ALWAYS && !unified_syntax
17675 && opcode->tencode != do_t_branch)
17676 {
17677 as_bad (_("Thumb does not support conditional execution"));
17678 return;
17679 }
17680
17681 if (!ARM_CPU_HAS_FEATURE (variant, arm_ext_v6t2))
17682 {
17683 if (opcode->tencode != do_t_blx && opcode->tencode != do_t_branch23
17684 && !(ARM_CPU_HAS_FEATURE(*opcode->tvariant, arm_ext_msr)
17685 || ARM_CPU_HAS_FEATURE(*opcode->tvariant, arm_ext_barrier)))
17686 {
17687 /* Two things are addressed here.
17688 1) Implicit require narrow instructions on Thumb-1.
17689 This avoids relaxation accidentally introducing Thumb-2
17690 instructions.
17691 2) Reject wide instructions in non Thumb-2 cores. */
17692 if (inst.size_req == 0)
17693 inst.size_req = 2;
17694 else if (inst.size_req == 4)
17695 {
17696 as_bad (_("selected processor does not support Thumb-2 mode `%s'"), str);
17697 return;
17698 }
17699 }
17700 }
17701
17702 inst.instruction = opcode->tvalue;
17703
17704 if (!parse_operands (p, opcode->operands, /*thumb=*/TRUE))
17705 {
17706 /* Prepare the it_insn_type for those encodings that don't set
17707 it. */
17708 it_fsm_pre_encode ();
17709
17710 opcode->tencode ();
17711
17712 it_fsm_post_encode ();
17713 }
17714
17715 if (!(inst.error || inst.relax))
17716 {
17717 gas_assert (inst.instruction < 0xe800 || inst.instruction > 0xffff);
17718 inst.size = (inst.instruction > 0xffff ? 4 : 2);
17719 if (inst.size_req && inst.size_req != inst.size)
17720 {
17721 as_bad (_("cannot honor width suffix -- `%s'"), str);
17722 return;
17723 }
17724 }
17725
17726 /* Something has gone badly wrong if we try to relax a fixed size
17727 instruction. */
17728 gas_assert (inst.size_req == 0 || !inst.relax);
17729
17730 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
17731 *opcode->tvariant);
17732 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
17733 set those bits when Thumb-2 32-bit instructions are seen. ie.
17734 anything other than bl/blx and v6-M instructions.
17735 The impact of relaxable instructions will be considered later after we
17736 finish all relaxation. */
17737 if ((inst.size == 4 && (inst.instruction & 0xf800e800) != 0xf000e800)
17738 && !(ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_msr)
17739 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_barrier)))
17740 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
17741 arm_ext_v6t2);
17742
17743 check_neon_suffixes;
17744
17745 if (!inst.error)
17746 {
17747 mapping_state (MAP_THUMB);
17748 }
17749 }
17750 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
17751 {
17752 bfd_boolean is_bx;
17753
17754 /* bx is allowed on v5 cores, and sometimes on v4 cores. */
17755 is_bx = (opcode->aencode == do_bx);
17756
17757 /* Check that this instruction is supported for this CPU. */
17758 if (!(is_bx && fix_v4bx)
17759 && !(opcode->avariant &&
17760 ARM_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant)))
17761 {
17762 as_bad (_("selected processor does not support ARM mode `%s'"), str);
17763 return;
17764 }
17765 if (inst.size_req)
17766 {
17767 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str);
17768 return;
17769 }
17770
17771 inst.instruction = opcode->avalue;
17772 if (opcode->tag == OT_unconditionalF)
17773 inst.instruction |= 0xF << 28;
17774 else
17775 inst.instruction |= inst.cond << 28;
17776 inst.size = INSN_SIZE;
17777 if (!parse_operands (p, opcode->operands, /*thumb=*/FALSE))
17778 {
17779 it_fsm_pre_encode ();
17780 opcode->aencode ();
17781 it_fsm_post_encode ();
17782 }
17783 /* Arm mode bx is marked as both v4T and v5 because it's still required
17784 on a hypothetical non-thumb v5 core. */
17785 if (is_bx)
17786 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, arm_ext_v4t);
17787 else
17788 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
17789 *opcode->avariant);
17790
17791 check_neon_suffixes;
17792
17793 if (!inst.error)
17794 {
17795 mapping_state (MAP_ARM);
17796 }
17797 }
17798 else
17799 {
17800 as_bad (_("attempt to use an ARM instruction on a Thumb-only processor "
17801 "-- `%s'"), str);
17802 return;
17803 }
17804 output_inst (str);
17805 }
17806
17807 static void
17808 check_it_blocks_finished (void)
17809 {
17810 #ifdef OBJ_ELF
17811 asection *sect;
17812
17813 for (sect = stdoutput->sections; sect != NULL; sect = sect->next)
17814 if (seg_info (sect)->tc_segment_info_data.current_it.state
17815 == MANUAL_IT_BLOCK)
17816 {
17817 as_warn (_("section '%s' finished with an open IT block."),
17818 sect->name);
17819 }
17820 #else
17821 if (now_it.state == MANUAL_IT_BLOCK)
17822 as_warn (_("file finished with an open IT block."));
17823 #endif
17824 }
17825
17826 /* Various frobbings of labels and their addresses. */
17827
17828 void
17829 arm_start_line_hook (void)
17830 {
17831 last_label_seen = NULL;
17832 }
17833
17834 void
17835 arm_frob_label (symbolS * sym)
17836 {
17837 last_label_seen = sym;
17838
17839 ARM_SET_THUMB (sym, thumb_mode);
17840
17841 #if defined OBJ_COFF || defined OBJ_ELF
17842 ARM_SET_INTERWORK (sym, support_interwork);
17843 #endif
17844
17845 force_automatic_it_block_close ();
17846
17847 /* Note - do not allow local symbols (.Lxxx) to be labelled
17848 as Thumb functions. This is because these labels, whilst
17849 they exist inside Thumb code, are not the entry points for
17850 possible ARM->Thumb calls. Also, these labels can be used
17851 as part of a computed goto or switch statement. eg gcc
17852 can generate code that looks like this:
17853
17854 ldr r2, [pc, .Laaa]
17855 lsl r3, r3, #2
17856 ldr r2, [r3, r2]
17857 mov pc, r2
17858
17859 .Lbbb: .word .Lxxx
17860 .Lccc: .word .Lyyy
17861 ..etc...
17862 .Laaa: .word Lbbb
17863
17864 The first instruction loads the address of the jump table.
17865 The second instruction converts a table index into a byte offset.
17866 The third instruction gets the jump address out of the table.
17867 The fourth instruction performs the jump.
17868
17869 If the address stored at .Laaa is that of a symbol which has the
17870 Thumb_Func bit set, then the linker will arrange for this address
17871 to have the bottom bit set, which in turn would mean that the
17872 address computation performed by the third instruction would end
17873 up with the bottom bit set. Since the ARM is capable of unaligned
17874 word loads, the instruction would then load the incorrect address
17875 out of the jump table, and chaos would ensue. */
17876 if (label_is_thumb_function_name
17877 && (S_GET_NAME (sym)[0] != '.' || S_GET_NAME (sym)[1] != 'L')
17878 && (bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE) != 0)
17879 {
17880 /* When the address of a Thumb function is taken the bottom
17881 bit of that address should be set. This will allow
17882 interworking between Arm and Thumb functions to work
17883 correctly. */
17884
17885 THUMB_SET_FUNC (sym, 1);
17886
17887 label_is_thumb_function_name = FALSE;
17888 }
17889
17890 dwarf2_emit_label (sym);
17891 }
17892
17893 bfd_boolean
17894 arm_data_in_code (void)
17895 {
17896 if (thumb_mode && ! strncmp (input_line_pointer + 1, "data:", 5))
17897 {
17898 *input_line_pointer = '/';
17899 input_line_pointer += 5;
17900 *input_line_pointer = 0;
17901 return TRUE;
17902 }
17903
17904 return FALSE;
17905 }
17906
17907 char *
17908 arm_canonicalize_symbol_name (char * name)
17909 {
17910 int len;
17911
17912 if (thumb_mode && (len = strlen (name)) > 5
17913 && streq (name + len - 5, "/data"))
17914 *(name + len - 5) = 0;
17915
17916 return name;
17917 }
17918 \f
17919 /* Table of all register names defined by default. The user can
17920 define additional names with .req. Note that all register names
17921 should appear in both upper and lowercase variants. Some registers
17922 also have mixed-case names. */
17923
17924 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
17925 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
17926 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
17927 #define REGSET(p,t) \
17928 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
17929 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
17930 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
17931 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
17932 #define REGSETH(p,t) \
17933 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
17934 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
17935 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
17936 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
17937 #define REGSET2(p,t) \
17938 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
17939 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
17940 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
17941 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
17942 #define SPLRBANK(base,bank,t) \
17943 REGDEF(lr_##bank, 768|((base+0)<<16), t), \
17944 REGDEF(sp_##bank, 768|((base+1)<<16), t), \
17945 REGDEF(spsr_##bank, 768|(base<<16)|SPSR_BIT, t), \
17946 REGDEF(LR_##bank, 768|((base+0)<<16), t), \
17947 REGDEF(SP_##bank, 768|((base+1)<<16), t), \
17948 REGDEF(SPSR_##bank, 768|(base<<16)|SPSR_BIT, t)
17949
17950 static const struct reg_entry reg_names[] =
17951 {
17952 /* ARM integer registers. */
17953 REGSET(r, RN), REGSET(R, RN),
17954
17955 /* ATPCS synonyms. */
17956 REGDEF(a1,0,RN), REGDEF(a2,1,RN), REGDEF(a3, 2,RN), REGDEF(a4, 3,RN),
17957 REGDEF(v1,4,RN), REGDEF(v2,5,RN), REGDEF(v3, 6,RN), REGDEF(v4, 7,RN),
17958 REGDEF(v5,8,RN), REGDEF(v6,9,RN), REGDEF(v7,10,RN), REGDEF(v8,11,RN),
17959
17960 REGDEF(A1,0,RN), REGDEF(A2,1,RN), REGDEF(A3, 2,RN), REGDEF(A4, 3,RN),
17961 REGDEF(V1,4,RN), REGDEF(V2,5,RN), REGDEF(V3, 6,RN), REGDEF(V4, 7,RN),
17962 REGDEF(V5,8,RN), REGDEF(V6,9,RN), REGDEF(V7,10,RN), REGDEF(V8,11,RN),
17963
17964 /* Well-known aliases. */
17965 REGDEF(wr, 7,RN), REGDEF(sb, 9,RN), REGDEF(sl,10,RN), REGDEF(fp,11,RN),
17966 REGDEF(ip,12,RN), REGDEF(sp,13,RN), REGDEF(lr,14,RN), REGDEF(pc,15,RN),
17967
17968 REGDEF(WR, 7,RN), REGDEF(SB, 9,RN), REGDEF(SL,10,RN), REGDEF(FP,11,RN),
17969 REGDEF(IP,12,RN), REGDEF(SP,13,RN), REGDEF(LR,14,RN), REGDEF(PC,15,RN),
17970
17971 /* Coprocessor numbers. */
17972 REGSET(p, CP), REGSET(P, CP),
17973
17974 /* Coprocessor register numbers. The "cr" variants are for backward
17975 compatibility. */
17976 REGSET(c, CN), REGSET(C, CN),
17977 REGSET(cr, CN), REGSET(CR, CN),
17978
17979 /* ARM banked registers. */
17980 REGDEF(R8_usr,512|(0<<16),RNB), REGDEF(r8_usr,512|(0<<16),RNB),
17981 REGDEF(R9_usr,512|(1<<16),RNB), REGDEF(r9_usr,512|(1<<16),RNB),
17982 REGDEF(R10_usr,512|(2<<16),RNB), REGDEF(r10_usr,512|(2<<16),RNB),
17983 REGDEF(R11_usr,512|(3<<16),RNB), REGDEF(r11_usr,512|(3<<16),RNB),
17984 REGDEF(R12_usr,512|(4<<16),RNB), REGDEF(r12_usr,512|(4<<16),RNB),
17985 REGDEF(SP_usr,512|(5<<16),RNB), REGDEF(sp_usr,512|(5<<16),RNB),
17986 REGDEF(LR_usr,512|(6<<16),RNB), REGDEF(lr_usr,512|(6<<16),RNB),
17987
17988 REGDEF(R8_fiq,512|(8<<16),RNB), REGDEF(r8_fiq,512|(8<<16),RNB),
17989 REGDEF(R9_fiq,512|(9<<16),RNB), REGDEF(r9_fiq,512|(9<<16),RNB),
17990 REGDEF(R10_fiq,512|(10<<16),RNB), REGDEF(r10_fiq,512|(10<<16),RNB),
17991 REGDEF(R11_fiq,512|(11<<16),RNB), REGDEF(r11_fiq,512|(11<<16),RNB),
17992 REGDEF(R12_fiq,512|(12<<16),RNB), REGDEF(r12_fiq,512|(12<<16),RNB),
17993 REGDEF(SP_fiq,512|(13<<16),RNB), REGDEF(sp_fiq,512|(13<<16),RNB),
17994 REGDEF(LR_fiq,512|(14<<16),RNB), REGDEF(lr_fiq,512|(14<<16),RNB),
17995 REGDEF(SPSR_fiq,512|(14<<16)|SPSR_BIT,RNB), REGDEF(spsr_fiq,512|(14<<16)|SPSR_BIT,RNB),
17996
17997 SPLRBANK(0,IRQ,RNB), SPLRBANK(0,irq,RNB),
17998 SPLRBANK(2,SVC,RNB), SPLRBANK(2,svc,RNB),
17999 SPLRBANK(4,ABT,RNB), SPLRBANK(4,abt,RNB),
18000 SPLRBANK(6,UND,RNB), SPLRBANK(6,und,RNB),
18001 SPLRBANK(12,MON,RNB), SPLRBANK(12,mon,RNB),
18002 REGDEF(elr_hyp,768|(14<<16),RNB), REGDEF(ELR_hyp,768|(14<<16),RNB),
18003 REGDEF(sp_hyp,768|(15<<16),RNB), REGDEF(SP_hyp,768|(15<<16),RNB),
18004 REGDEF(spsr_hyp,768|(14<<16)|SPSR_BIT,RNB),
18005 REGDEF(SPSR_hyp,768|(14<<16)|SPSR_BIT,RNB),
18006
18007 /* FPA registers. */
18008 REGNUM(f,0,FN), REGNUM(f,1,FN), REGNUM(f,2,FN), REGNUM(f,3,FN),
18009 REGNUM(f,4,FN), REGNUM(f,5,FN), REGNUM(f,6,FN), REGNUM(f,7, FN),
18010
18011 REGNUM(F,0,FN), REGNUM(F,1,FN), REGNUM(F,2,FN), REGNUM(F,3,FN),
18012 REGNUM(F,4,FN), REGNUM(F,5,FN), REGNUM(F,6,FN), REGNUM(F,7, FN),
18013
18014 /* VFP SP registers. */
18015 REGSET(s,VFS), REGSET(S,VFS),
18016 REGSETH(s,VFS), REGSETH(S,VFS),
18017
18018 /* VFP DP Registers. */
18019 REGSET(d,VFD), REGSET(D,VFD),
18020 /* Extra Neon DP registers. */
18021 REGSETH(d,VFD), REGSETH(D,VFD),
18022
18023 /* Neon QP registers. */
18024 REGSET2(q,NQ), REGSET2(Q,NQ),
18025
18026 /* VFP control registers. */
18027 REGDEF(fpsid,0,VFC), REGDEF(fpscr,1,VFC), REGDEF(fpexc,8,VFC),
18028 REGDEF(FPSID,0,VFC), REGDEF(FPSCR,1,VFC), REGDEF(FPEXC,8,VFC),
18029 REGDEF(fpinst,9,VFC), REGDEF(fpinst2,10,VFC),
18030 REGDEF(FPINST,9,VFC), REGDEF(FPINST2,10,VFC),
18031 REGDEF(mvfr0,7,VFC), REGDEF(mvfr1,6,VFC),
18032 REGDEF(MVFR0,7,VFC), REGDEF(MVFR1,6,VFC),
18033
18034 /* Maverick DSP coprocessor registers. */
18035 REGSET(mvf,MVF), REGSET(mvd,MVD), REGSET(mvfx,MVFX), REGSET(mvdx,MVDX),
18036 REGSET(MVF,MVF), REGSET(MVD,MVD), REGSET(MVFX,MVFX), REGSET(MVDX,MVDX),
18037
18038 REGNUM(mvax,0,MVAX), REGNUM(mvax,1,MVAX),
18039 REGNUM(mvax,2,MVAX), REGNUM(mvax,3,MVAX),
18040 REGDEF(dspsc,0,DSPSC),
18041
18042 REGNUM(MVAX,0,MVAX), REGNUM(MVAX,1,MVAX),
18043 REGNUM(MVAX,2,MVAX), REGNUM(MVAX,3,MVAX),
18044 REGDEF(DSPSC,0,DSPSC),
18045
18046 /* iWMMXt data registers - p0, c0-15. */
18047 REGSET(wr,MMXWR), REGSET(wR,MMXWR), REGSET(WR, MMXWR),
18048
18049 /* iWMMXt control registers - p1, c0-3. */
18050 REGDEF(wcid, 0,MMXWC), REGDEF(wCID, 0,MMXWC), REGDEF(WCID, 0,MMXWC),
18051 REGDEF(wcon, 1,MMXWC), REGDEF(wCon, 1,MMXWC), REGDEF(WCON, 1,MMXWC),
18052 REGDEF(wcssf, 2,MMXWC), REGDEF(wCSSF, 2,MMXWC), REGDEF(WCSSF, 2,MMXWC),
18053 REGDEF(wcasf, 3,MMXWC), REGDEF(wCASF, 3,MMXWC), REGDEF(WCASF, 3,MMXWC),
18054
18055 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
18056 REGDEF(wcgr0, 8,MMXWCG), REGDEF(wCGR0, 8,MMXWCG), REGDEF(WCGR0, 8,MMXWCG),
18057 REGDEF(wcgr1, 9,MMXWCG), REGDEF(wCGR1, 9,MMXWCG), REGDEF(WCGR1, 9,MMXWCG),
18058 REGDEF(wcgr2,10,MMXWCG), REGDEF(wCGR2,10,MMXWCG), REGDEF(WCGR2,10,MMXWCG),
18059 REGDEF(wcgr3,11,MMXWCG), REGDEF(wCGR3,11,MMXWCG), REGDEF(WCGR3,11,MMXWCG),
18060
18061 /* XScale accumulator registers. */
18062 REGNUM(acc,0,XSCALE), REGNUM(ACC,0,XSCALE),
18063 };
18064 #undef REGDEF
18065 #undef REGNUM
18066 #undef REGSET
18067
18068 /* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
18069 within psr_required_here. */
18070 static const struct asm_psr psrs[] =
18071 {
18072 /* Backward compatibility notation. Note that "all" is no longer
18073 truly all possible PSR bits. */
18074 {"all", PSR_c | PSR_f},
18075 {"flg", PSR_f},
18076 {"ctl", PSR_c},
18077
18078 /* Individual flags. */
18079 {"f", PSR_f},
18080 {"c", PSR_c},
18081 {"x", PSR_x},
18082 {"s", PSR_s},
18083
18084 /* Combinations of flags. */
18085 {"fs", PSR_f | PSR_s},
18086 {"fx", PSR_f | PSR_x},
18087 {"fc", PSR_f | PSR_c},
18088 {"sf", PSR_s | PSR_f},
18089 {"sx", PSR_s | PSR_x},
18090 {"sc", PSR_s | PSR_c},
18091 {"xf", PSR_x | PSR_f},
18092 {"xs", PSR_x | PSR_s},
18093 {"xc", PSR_x | PSR_c},
18094 {"cf", PSR_c | PSR_f},
18095 {"cs", PSR_c | PSR_s},
18096 {"cx", PSR_c | PSR_x},
18097 {"fsx", PSR_f | PSR_s | PSR_x},
18098 {"fsc", PSR_f | PSR_s | PSR_c},
18099 {"fxs", PSR_f | PSR_x | PSR_s},
18100 {"fxc", PSR_f | PSR_x | PSR_c},
18101 {"fcs", PSR_f | PSR_c | PSR_s},
18102 {"fcx", PSR_f | PSR_c | PSR_x},
18103 {"sfx", PSR_s | PSR_f | PSR_x},
18104 {"sfc", PSR_s | PSR_f | PSR_c},
18105 {"sxf", PSR_s | PSR_x | PSR_f},
18106 {"sxc", PSR_s | PSR_x | PSR_c},
18107 {"scf", PSR_s | PSR_c | PSR_f},
18108 {"scx", PSR_s | PSR_c | PSR_x},
18109 {"xfs", PSR_x | PSR_f | PSR_s},
18110 {"xfc", PSR_x | PSR_f | PSR_c},
18111 {"xsf", PSR_x | PSR_s | PSR_f},
18112 {"xsc", PSR_x | PSR_s | PSR_c},
18113 {"xcf", PSR_x | PSR_c | PSR_f},
18114 {"xcs", PSR_x | PSR_c | PSR_s},
18115 {"cfs", PSR_c | PSR_f | PSR_s},
18116 {"cfx", PSR_c | PSR_f | PSR_x},
18117 {"csf", PSR_c | PSR_s | PSR_f},
18118 {"csx", PSR_c | PSR_s | PSR_x},
18119 {"cxf", PSR_c | PSR_x | PSR_f},
18120 {"cxs", PSR_c | PSR_x | PSR_s},
18121 {"fsxc", PSR_f | PSR_s | PSR_x | PSR_c},
18122 {"fscx", PSR_f | PSR_s | PSR_c | PSR_x},
18123 {"fxsc", PSR_f | PSR_x | PSR_s | PSR_c},
18124 {"fxcs", PSR_f | PSR_x | PSR_c | PSR_s},
18125 {"fcsx", PSR_f | PSR_c | PSR_s | PSR_x},
18126 {"fcxs", PSR_f | PSR_c | PSR_x | PSR_s},
18127 {"sfxc", PSR_s | PSR_f | PSR_x | PSR_c},
18128 {"sfcx", PSR_s | PSR_f | PSR_c | PSR_x},
18129 {"sxfc", PSR_s | PSR_x | PSR_f | PSR_c},
18130 {"sxcf", PSR_s | PSR_x | PSR_c | PSR_f},
18131 {"scfx", PSR_s | PSR_c | PSR_f | PSR_x},
18132 {"scxf", PSR_s | PSR_c | PSR_x | PSR_f},
18133 {"xfsc", PSR_x | PSR_f | PSR_s | PSR_c},
18134 {"xfcs", PSR_x | PSR_f | PSR_c | PSR_s},
18135 {"xsfc", PSR_x | PSR_s | PSR_f | PSR_c},
18136 {"xscf", PSR_x | PSR_s | PSR_c | PSR_f},
18137 {"xcfs", PSR_x | PSR_c | PSR_f | PSR_s},
18138 {"xcsf", PSR_x | PSR_c | PSR_s | PSR_f},
18139 {"cfsx", PSR_c | PSR_f | PSR_s | PSR_x},
18140 {"cfxs", PSR_c | PSR_f | PSR_x | PSR_s},
18141 {"csfx", PSR_c | PSR_s | PSR_f | PSR_x},
18142 {"csxf", PSR_c | PSR_s | PSR_x | PSR_f},
18143 {"cxfs", PSR_c | PSR_x | PSR_f | PSR_s},
18144 {"cxsf", PSR_c | PSR_x | PSR_s | PSR_f},
18145 };
18146
18147 /* Table of V7M psr names. */
18148 static const struct asm_psr v7m_psrs[] =
18149 {
18150 {"apsr", 0 }, {"APSR", 0 },
18151 {"iapsr", 1 }, {"IAPSR", 1 },
18152 {"eapsr", 2 }, {"EAPSR", 2 },
18153 {"psr", 3 }, {"PSR", 3 },
18154 {"xpsr", 3 }, {"XPSR", 3 }, {"xPSR", 3 },
18155 {"ipsr", 5 }, {"IPSR", 5 },
18156 {"epsr", 6 }, {"EPSR", 6 },
18157 {"iepsr", 7 }, {"IEPSR", 7 },
18158 {"msp", 8 }, {"MSP", 8 },
18159 {"psp", 9 }, {"PSP", 9 },
18160 {"primask", 16}, {"PRIMASK", 16},
18161 {"basepri", 17}, {"BASEPRI", 17},
18162 {"basepri_max", 18}, {"BASEPRI_MAX", 18},
18163 {"basepri_max", 18}, {"BASEPRI_MASK", 18}, /* Typo, preserved for backwards compatibility. */
18164 {"faultmask", 19}, {"FAULTMASK", 19},
18165 {"control", 20}, {"CONTROL", 20}
18166 };
18167
18168 /* Table of all shift-in-operand names. */
18169 static const struct asm_shift_name shift_names [] =
18170 {
18171 { "asl", SHIFT_LSL }, { "ASL", SHIFT_LSL },
18172 { "lsl", SHIFT_LSL }, { "LSL", SHIFT_LSL },
18173 { "lsr", SHIFT_LSR }, { "LSR", SHIFT_LSR },
18174 { "asr", SHIFT_ASR }, { "ASR", SHIFT_ASR },
18175 { "ror", SHIFT_ROR }, { "ROR", SHIFT_ROR },
18176 { "rrx", SHIFT_RRX }, { "RRX", SHIFT_RRX }
18177 };
18178
18179 /* Table of all explicit relocation names. */
18180 #ifdef OBJ_ELF
18181 static struct reloc_entry reloc_names[] =
18182 {
18183 { "got", BFD_RELOC_ARM_GOT32 }, { "GOT", BFD_RELOC_ARM_GOT32 },
18184 { "gotoff", BFD_RELOC_ARM_GOTOFF }, { "GOTOFF", BFD_RELOC_ARM_GOTOFF },
18185 { "plt", BFD_RELOC_ARM_PLT32 }, { "PLT", BFD_RELOC_ARM_PLT32 },
18186 { "target1", BFD_RELOC_ARM_TARGET1 }, { "TARGET1", BFD_RELOC_ARM_TARGET1 },
18187 { "target2", BFD_RELOC_ARM_TARGET2 }, { "TARGET2", BFD_RELOC_ARM_TARGET2 },
18188 { "sbrel", BFD_RELOC_ARM_SBREL32 }, { "SBREL", BFD_RELOC_ARM_SBREL32 },
18189 { "tlsgd", BFD_RELOC_ARM_TLS_GD32}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32},
18190 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32},
18191 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32},
18192 { "gottpoff",BFD_RELOC_ARM_TLS_IE32}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32},
18193 { "tpoff", BFD_RELOC_ARM_TLS_LE32}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32},
18194 { "got_prel", BFD_RELOC_ARM_GOT_PREL}, { "GOT_PREL", BFD_RELOC_ARM_GOT_PREL},
18195 { "tlsdesc", BFD_RELOC_ARM_TLS_GOTDESC},
18196 { "TLSDESC", BFD_RELOC_ARM_TLS_GOTDESC},
18197 { "tlscall", BFD_RELOC_ARM_TLS_CALL},
18198 { "TLSCALL", BFD_RELOC_ARM_TLS_CALL},
18199 { "tlsdescseq", BFD_RELOC_ARM_TLS_DESCSEQ},
18200 { "TLSDESCSEQ", BFD_RELOC_ARM_TLS_DESCSEQ}
18201 };
18202 #endif
18203
18204 /* Table of all conditional affixes. 0xF is not defined as a condition code. */
18205 static const struct asm_cond conds[] =
18206 {
18207 {"eq", 0x0},
18208 {"ne", 0x1},
18209 {"cs", 0x2}, {"hs", 0x2},
18210 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
18211 {"mi", 0x4},
18212 {"pl", 0x5},
18213 {"vs", 0x6},
18214 {"vc", 0x7},
18215 {"hi", 0x8},
18216 {"ls", 0x9},
18217 {"ge", 0xa},
18218 {"lt", 0xb},
18219 {"gt", 0xc},
18220 {"le", 0xd},
18221 {"al", 0xe}
18222 };
18223
18224 #define UL_BARRIER(L,U,CODE,FEAT) \
18225 { L, CODE, ARM_FEATURE_CORE_LOW (FEAT) }, \
18226 { U, CODE, ARM_FEATURE_CORE_LOW (FEAT) }
18227
18228 static struct asm_barrier_opt barrier_opt_names[] =
18229 {
18230 UL_BARRIER ("sy", "SY", 0xf, ARM_EXT_BARRIER),
18231 UL_BARRIER ("st", "ST", 0xe, ARM_EXT_BARRIER),
18232 UL_BARRIER ("ld", "LD", 0xd, ARM_EXT_V8),
18233 UL_BARRIER ("ish", "ISH", 0xb, ARM_EXT_BARRIER),
18234 UL_BARRIER ("sh", "SH", 0xb, ARM_EXT_BARRIER),
18235 UL_BARRIER ("ishst", "ISHST", 0xa, ARM_EXT_BARRIER),
18236 UL_BARRIER ("shst", "SHST", 0xa, ARM_EXT_BARRIER),
18237 UL_BARRIER ("ishld", "ISHLD", 0x9, ARM_EXT_V8),
18238 UL_BARRIER ("un", "UN", 0x7, ARM_EXT_BARRIER),
18239 UL_BARRIER ("nsh", "NSH", 0x7, ARM_EXT_BARRIER),
18240 UL_BARRIER ("unst", "UNST", 0x6, ARM_EXT_BARRIER),
18241 UL_BARRIER ("nshst", "NSHST", 0x6, ARM_EXT_BARRIER),
18242 UL_BARRIER ("nshld", "NSHLD", 0x5, ARM_EXT_V8),
18243 UL_BARRIER ("osh", "OSH", 0x3, ARM_EXT_BARRIER),
18244 UL_BARRIER ("oshst", "OSHST", 0x2, ARM_EXT_BARRIER),
18245 UL_BARRIER ("oshld", "OSHLD", 0x1, ARM_EXT_V8)
18246 };
18247
18248 #undef UL_BARRIER
18249
18250 /* Table of ARM-format instructions. */
18251
18252 /* Macros for gluing together operand strings. N.B. In all cases
18253 other than OPS0, the trailing OP_stop comes from default
18254 zero-initialization of the unspecified elements of the array. */
18255 #define OPS0() { OP_stop, }
18256 #define OPS1(a) { OP_##a, }
18257 #define OPS2(a,b) { OP_##a,OP_##b, }
18258 #define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
18259 #define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
18260 #define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
18261 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
18262
18263 /* These macros are similar to the OPSn, but do not prepend the OP_ prefix.
18264 This is useful when mixing operands for ARM and THUMB, i.e. using the
18265 MIX_ARM_THUMB_OPERANDS macro.
18266 In order to use these macros, prefix the number of operands with _
18267 e.g. _3. */
18268 #define OPS_1(a) { a, }
18269 #define OPS_2(a,b) { a,b, }
18270 #define OPS_3(a,b,c) { a,b,c, }
18271 #define OPS_4(a,b,c,d) { a,b,c,d, }
18272 #define OPS_5(a,b,c,d,e) { a,b,c,d,e, }
18273 #define OPS_6(a,b,c,d,e,f) { a,b,c,d,e,f, }
18274
18275 /* These macros abstract out the exact format of the mnemonic table and
18276 save some repeated characters. */
18277
18278 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
18279 #define TxCE(mnem, op, top, nops, ops, ae, te) \
18280 { mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
18281 THUMB_VARIANT, do_##ae, do_##te }
18282
18283 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
18284 a T_MNEM_xyz enumerator. */
18285 #define TCE(mnem, aop, top, nops, ops, ae, te) \
18286 TxCE (mnem, aop, 0x##top, nops, ops, ae, te)
18287 #define tCE(mnem, aop, top, nops, ops, ae, te) \
18288 TxCE (mnem, aop, T_MNEM##top, nops, ops, ae, te)
18289
18290 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
18291 infix after the third character. */
18292 #define TxC3(mnem, op, top, nops, ops, ae, te) \
18293 { mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
18294 THUMB_VARIANT, do_##ae, do_##te }
18295 #define TxC3w(mnem, op, top, nops, ops, ae, te) \
18296 { mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
18297 THUMB_VARIANT, do_##ae, do_##te }
18298 #define TC3(mnem, aop, top, nops, ops, ae, te) \
18299 TxC3 (mnem, aop, 0x##top, nops, ops, ae, te)
18300 #define TC3w(mnem, aop, top, nops, ops, ae, te) \
18301 TxC3w (mnem, aop, 0x##top, nops, ops, ae, te)
18302 #define tC3(mnem, aop, top, nops, ops, ae, te) \
18303 TxC3 (mnem, aop, T_MNEM##top, nops, ops, ae, te)
18304 #define tC3w(mnem, aop, top, nops, ops, ae, te) \
18305 TxC3w (mnem, aop, T_MNEM##top, nops, ops, ae, te)
18306
18307 /* Mnemonic that cannot be conditionalized. The ARM condition-code
18308 field is still 0xE. Many of the Thumb variants can be executed
18309 conditionally, so this is checked separately. */
18310 #define TUE(mnem, op, top, nops, ops, ae, te) \
18311 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
18312 THUMB_VARIANT, do_##ae, do_##te }
18313
18314 /* Same as TUE but the encoding function for ARM and Thumb modes is the same.
18315 Used by mnemonics that have very minimal differences in the encoding for
18316 ARM and Thumb variants and can be handled in a common function. */
18317 #define TUEc(mnem, op, top, nops, ops, en) \
18318 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
18319 THUMB_VARIANT, do_##en, do_##en }
18320
18321 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
18322 condition code field. */
18323 #define TUF(mnem, op, top, nops, ops, ae, te) \
18324 { mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
18325 THUMB_VARIANT, do_##ae, do_##te }
18326
18327 /* ARM-only variants of all the above. */
18328 #define CE(mnem, op, nops, ops, ae) \
18329 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
18330
18331 #define C3(mnem, op, nops, ops, ae) \
18332 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
18333
18334 /* Legacy mnemonics that always have conditional infix after the third
18335 character. */
18336 #define CL(mnem, op, nops, ops, ae) \
18337 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
18338 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
18339
18340 /* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
18341 #define cCE(mnem, op, nops, ops, ae) \
18342 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
18343
18344 /* Legacy coprocessor instructions where conditional infix and conditional
18345 suffix are ambiguous. For consistency this includes all FPA instructions,
18346 not just the potentially ambiguous ones. */
18347 #define cCL(mnem, op, nops, ops, ae) \
18348 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
18349 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
18350
18351 /* Coprocessor, takes either a suffix or a position-3 infix
18352 (for an FPA corner case). */
18353 #define C3E(mnem, op, nops, ops, ae) \
18354 { mnem, OPS##nops ops, OT_csuf_or_in3, \
18355 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
18356
18357 #define xCM_(m1, m2, m3, op, nops, ops, ae) \
18358 { m1 #m2 m3, OPS##nops ops, \
18359 sizeof (#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof (m1) - 1, \
18360 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
18361
18362 #define CM(m1, m2, op, nops, ops, ae) \
18363 xCM_ (m1, , m2, op, nops, ops, ae), \
18364 xCM_ (m1, eq, m2, op, nops, ops, ae), \
18365 xCM_ (m1, ne, m2, op, nops, ops, ae), \
18366 xCM_ (m1, cs, m2, op, nops, ops, ae), \
18367 xCM_ (m1, hs, m2, op, nops, ops, ae), \
18368 xCM_ (m1, cc, m2, op, nops, ops, ae), \
18369 xCM_ (m1, ul, m2, op, nops, ops, ae), \
18370 xCM_ (m1, lo, m2, op, nops, ops, ae), \
18371 xCM_ (m1, mi, m2, op, nops, ops, ae), \
18372 xCM_ (m1, pl, m2, op, nops, ops, ae), \
18373 xCM_ (m1, vs, m2, op, nops, ops, ae), \
18374 xCM_ (m1, vc, m2, op, nops, ops, ae), \
18375 xCM_ (m1, hi, m2, op, nops, ops, ae), \
18376 xCM_ (m1, ls, m2, op, nops, ops, ae), \
18377 xCM_ (m1, ge, m2, op, nops, ops, ae), \
18378 xCM_ (m1, lt, m2, op, nops, ops, ae), \
18379 xCM_ (m1, gt, m2, op, nops, ops, ae), \
18380 xCM_ (m1, le, m2, op, nops, ops, ae), \
18381 xCM_ (m1, al, m2, op, nops, ops, ae)
18382
18383 #define UE(mnem, op, nops, ops, ae) \
18384 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
18385
18386 #define UF(mnem, op, nops, ops, ae) \
18387 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
18388
18389 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
18390 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
18391 use the same encoding function for each. */
18392 #define NUF(mnem, op, nops, ops, enc) \
18393 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
18394 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
18395
18396 /* Neon data processing, version which indirects through neon_enc_tab for
18397 the various overloaded versions of opcodes. */
18398 #define nUF(mnem, op, nops, ops, enc) \
18399 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM##op, N_MNEM##op, \
18400 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
18401
18402 /* Neon insn with conditional suffix for the ARM version, non-overloaded
18403 version. */
18404 #define NCE_tag(mnem, op, nops, ops, enc, tag) \
18405 { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT, \
18406 THUMB_VARIANT, do_##enc, do_##enc }
18407
18408 #define NCE(mnem, op, nops, ops, enc) \
18409 NCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
18410
18411 #define NCEF(mnem, op, nops, ops, enc) \
18412 NCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
18413
18414 /* Neon insn with conditional suffix for the ARM version, overloaded types. */
18415 #define nCE_tag(mnem, op, nops, ops, enc, tag) \
18416 { #mnem, OPS##nops ops, tag, N_MNEM##op, N_MNEM##op, \
18417 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
18418
18419 #define nCE(mnem, op, nops, ops, enc) \
18420 nCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
18421
18422 #define nCEF(mnem, op, nops, ops, enc) \
18423 nCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
18424
18425 #define do_0 0
18426
18427 static const struct asm_opcode insns[] =
18428 {
18429 #define ARM_VARIANT & arm_ext_v1 /* Core ARM Instructions. */
18430 #define THUMB_VARIANT & arm_ext_v4t
18431 tCE("and", 0000000, _and, 3, (RR, oRR, SH), arit, t_arit3c),
18432 tC3("ands", 0100000, _ands, 3, (RR, oRR, SH), arit, t_arit3c),
18433 tCE("eor", 0200000, _eor, 3, (RR, oRR, SH), arit, t_arit3c),
18434 tC3("eors", 0300000, _eors, 3, (RR, oRR, SH), arit, t_arit3c),
18435 tCE("sub", 0400000, _sub, 3, (RR, oRR, SH), arit, t_add_sub),
18436 tC3("subs", 0500000, _subs, 3, (RR, oRR, SH), arit, t_add_sub),
18437 tCE("add", 0800000, _add, 3, (RR, oRR, SHG), arit, t_add_sub),
18438 tC3("adds", 0900000, _adds, 3, (RR, oRR, SHG), arit, t_add_sub),
18439 tCE("adc", 0a00000, _adc, 3, (RR, oRR, SH), arit, t_arit3c),
18440 tC3("adcs", 0b00000, _adcs, 3, (RR, oRR, SH), arit, t_arit3c),
18441 tCE("sbc", 0c00000, _sbc, 3, (RR, oRR, SH), arit, t_arit3),
18442 tC3("sbcs", 0d00000, _sbcs, 3, (RR, oRR, SH), arit, t_arit3),
18443 tCE("orr", 1800000, _orr, 3, (RR, oRR, SH), arit, t_arit3c),
18444 tC3("orrs", 1900000, _orrs, 3, (RR, oRR, SH), arit, t_arit3c),
18445 tCE("bic", 1c00000, _bic, 3, (RR, oRR, SH), arit, t_arit3),
18446 tC3("bics", 1d00000, _bics, 3, (RR, oRR, SH), arit, t_arit3),
18447
18448 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
18449 for setting PSR flag bits. They are obsolete in V6 and do not
18450 have Thumb equivalents. */
18451 tCE("tst", 1100000, _tst, 2, (RR, SH), cmp, t_mvn_tst),
18452 tC3w("tsts", 1100000, _tst, 2, (RR, SH), cmp, t_mvn_tst),
18453 CL("tstp", 110f000, 2, (RR, SH), cmp),
18454 tCE("cmp", 1500000, _cmp, 2, (RR, SH), cmp, t_mov_cmp),
18455 tC3w("cmps", 1500000, _cmp, 2, (RR, SH), cmp, t_mov_cmp),
18456 CL("cmpp", 150f000, 2, (RR, SH), cmp),
18457 tCE("cmn", 1700000, _cmn, 2, (RR, SH), cmp, t_mvn_tst),
18458 tC3w("cmns", 1700000, _cmn, 2, (RR, SH), cmp, t_mvn_tst),
18459 CL("cmnp", 170f000, 2, (RR, SH), cmp),
18460
18461 tCE("mov", 1a00000, _mov, 2, (RR, SH), mov, t_mov_cmp),
18462 tC3("movs", 1b00000, _movs, 2, (RR, SH), mov, t_mov_cmp),
18463 tCE("mvn", 1e00000, _mvn, 2, (RR, SH), mov, t_mvn_tst),
18464 tC3("mvns", 1f00000, _mvns, 2, (RR, SH), mov, t_mvn_tst),
18465
18466 tCE("ldr", 4100000, _ldr, 2, (RR, ADDRGLDR),ldst, t_ldst),
18467 tC3("ldrb", 4500000, _ldrb, 2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
18468 tCE("str", 4000000, _str, _2, (MIX_ARM_THUMB_OPERANDS (OP_RR,
18469 OP_RRnpc),
18470 OP_ADDRGLDR),ldst, t_ldst),
18471 tC3("strb", 4400000, _strb, 2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
18472
18473 tCE("stm", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18474 tC3("stmia", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18475 tC3("stmea", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18476 tCE("ldm", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18477 tC3("ldmia", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18478 tC3("ldmfd", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18479
18480 TCE("swi", f000000, df00, 1, (EXPi), swi, t_swi),
18481 TCE("svc", f000000, df00, 1, (EXPi), swi, t_swi),
18482 tCE("b", a000000, _b, 1, (EXPr), branch, t_branch),
18483 TCE("bl", b000000, f000f800, 1, (EXPr), bl, t_branch23),
18484
18485 /* Pseudo ops. */
18486 tCE("adr", 28f0000, _adr, 2, (RR, EXP), adr, t_adr),
18487 C3(adrl, 28f0000, 2, (RR, EXP), adrl),
18488 tCE("nop", 1a00000, _nop, 1, (oI255c), nop, t_nop),
18489 tCE("udf", 7f000f0, _udf, 1, (oIffffb), bkpt, t_udf),
18490
18491 /* Thumb-compatibility pseudo ops. */
18492 tCE("lsl", 1a00000, _lsl, 3, (RR, oRR, SH), shift, t_shift),
18493 tC3("lsls", 1b00000, _lsls, 3, (RR, oRR, SH), shift, t_shift),
18494 tCE("lsr", 1a00020, _lsr, 3, (RR, oRR, SH), shift, t_shift),
18495 tC3("lsrs", 1b00020, _lsrs, 3, (RR, oRR, SH), shift, t_shift),
18496 tCE("asr", 1a00040, _asr, 3, (RR, oRR, SH), shift, t_shift),
18497 tC3("asrs", 1b00040, _asrs, 3, (RR, oRR, SH), shift, t_shift),
18498 tCE("ror", 1a00060, _ror, 3, (RR, oRR, SH), shift, t_shift),
18499 tC3("rors", 1b00060, _rors, 3, (RR, oRR, SH), shift, t_shift),
18500 tCE("neg", 2600000, _neg, 2, (RR, RR), rd_rn, t_neg),
18501 tC3("negs", 2700000, _negs, 2, (RR, RR), rd_rn, t_neg),
18502 tCE("push", 92d0000, _push, 1, (REGLST), push_pop, t_push_pop),
18503 tCE("pop", 8bd0000, _pop, 1, (REGLST), push_pop, t_push_pop),
18504
18505 /* These may simplify to neg. */
18506 TCE("rsb", 0600000, ebc00000, 3, (RR, oRR, SH), arit, t_rsb),
18507 TC3("rsbs", 0700000, ebd00000, 3, (RR, oRR, SH), arit, t_rsb),
18508
18509 #undef THUMB_VARIANT
18510 #define THUMB_VARIANT & arm_ext_v6
18511
18512 TCE("cpy", 1a00000, 4600, 2, (RR, RR), rd_rm, t_cpy),
18513
18514 /* V1 instructions with no Thumb analogue prior to V6T2. */
18515 #undef THUMB_VARIANT
18516 #define THUMB_VARIANT & arm_ext_v6t2
18517
18518 TCE("teq", 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
18519 TC3w("teqs", 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
18520 CL("teqp", 130f000, 2, (RR, SH), cmp),
18521
18522 TC3("ldrt", 4300000, f8500e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
18523 TC3("ldrbt", 4700000, f8100e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
18524 TC3("strt", 4200000, f8400e00, 2, (RR_npcsp, ADDR), ldstt, t_ldstt),
18525 TC3("strbt", 4600000, f8000e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
18526
18527 TC3("stmdb", 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18528 TC3("stmfd", 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18529
18530 TC3("ldmdb", 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18531 TC3("ldmea", 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18532
18533 /* V1 instructions with no Thumb analogue at all. */
18534 CE("rsc", 0e00000, 3, (RR, oRR, SH), arit),
18535 C3(rscs, 0f00000, 3, (RR, oRR, SH), arit),
18536
18537 C3(stmib, 9800000, 2, (RRw, REGLST), ldmstm),
18538 C3(stmfa, 9800000, 2, (RRw, REGLST), ldmstm),
18539 C3(stmda, 8000000, 2, (RRw, REGLST), ldmstm),
18540 C3(stmed, 8000000, 2, (RRw, REGLST), ldmstm),
18541 C3(ldmib, 9900000, 2, (RRw, REGLST), ldmstm),
18542 C3(ldmed, 9900000, 2, (RRw, REGLST), ldmstm),
18543 C3(ldmda, 8100000, 2, (RRw, REGLST), ldmstm),
18544 C3(ldmfa, 8100000, 2, (RRw, REGLST), ldmstm),
18545
18546 #undef ARM_VARIANT
18547 #define ARM_VARIANT & arm_ext_v2 /* ARM 2 - multiplies. */
18548 #undef THUMB_VARIANT
18549 #define THUMB_VARIANT & arm_ext_v4t
18550
18551 tCE("mul", 0000090, _mul, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
18552 tC3("muls", 0100090, _muls, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
18553
18554 #undef THUMB_VARIANT
18555 #define THUMB_VARIANT & arm_ext_v6t2
18556
18557 TCE("mla", 0200090, fb000000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
18558 C3(mlas, 0300090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas),
18559
18560 /* Generic coprocessor instructions. */
18561 TCE("cdp", e000000, ee000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
18562 TCE("ldc", c100000, ec100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
18563 TC3("ldcl", c500000, ec500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
18564 TCE("stc", c000000, ec000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
18565 TC3("stcl", c400000, ec400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
18566 TCE("mcr", e000010, ee000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
18567 TCE("mrc", e100010, ee100010, 6, (RCP, I7b, APSR_RR, RCN, RCN, oI7b), co_reg, co_reg),
18568
18569 #undef ARM_VARIANT
18570 #define ARM_VARIANT & arm_ext_v2s /* ARM 3 - swp instructions. */
18571
18572 CE("swp", 1000090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
18573 C3(swpb, 1400090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
18574
18575 #undef ARM_VARIANT
18576 #define ARM_VARIANT & arm_ext_v3 /* ARM 6 Status register instructions. */
18577 #undef THUMB_VARIANT
18578 #define THUMB_VARIANT & arm_ext_msr
18579
18580 TCE("mrs", 1000000, f3e08000, 2, (RRnpc, rPSR), mrs, t_mrs),
18581 TCE("msr", 120f000, f3808000, 2, (wPSR, RR_EXi), msr, t_msr),
18582
18583 #undef ARM_VARIANT
18584 #define ARM_VARIANT & arm_ext_v3m /* ARM 7M long multiplies. */
18585 #undef THUMB_VARIANT
18586 #define THUMB_VARIANT & arm_ext_v6t2
18587
18588 TCE("smull", 0c00090, fb800000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
18589 CM("smull","s", 0d00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
18590 TCE("umull", 0800090, fba00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
18591 CM("umull","s", 0900090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
18592 TCE("smlal", 0e00090, fbc00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
18593 CM("smlal","s", 0f00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
18594 TCE("umlal", 0a00090, fbe00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
18595 CM("umlal","s", 0b00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
18596
18597 #undef ARM_VARIANT
18598 #define ARM_VARIANT & arm_ext_v4 /* ARM Architecture 4. */
18599 #undef THUMB_VARIANT
18600 #define THUMB_VARIANT & arm_ext_v4t
18601
18602 tC3("ldrh", 01000b0, _ldrh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
18603 tC3("strh", 00000b0, _strh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
18604 tC3("ldrsh", 01000f0, _ldrsh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
18605 tC3("ldrsb", 01000d0, _ldrsb, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
18606 tC3("ldsh", 01000f0, _ldrsh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
18607 tC3("ldsb", 01000d0, _ldrsb, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
18608
18609 #undef ARM_VARIANT
18610 #define ARM_VARIANT & arm_ext_v4t_5
18611
18612 /* ARM Architecture 4T. */
18613 /* Note: bx (and blx) are required on V5, even if the processor does
18614 not support Thumb. */
18615 TCE("bx", 12fff10, 4700, 1, (RR), bx, t_bx),
18616
18617 #undef ARM_VARIANT
18618 #define ARM_VARIANT & arm_ext_v5 /* ARM Architecture 5T. */
18619 #undef THUMB_VARIANT
18620 #define THUMB_VARIANT & arm_ext_v5t
18621
18622 /* Note: blx has 2 variants; the .value coded here is for
18623 BLX(2). Only this variant has conditional execution. */
18624 TCE("blx", 12fff30, 4780, 1, (RR_EXr), blx, t_blx),
18625 TUE("bkpt", 1200070, be00, 1, (oIffffb), bkpt, t_bkpt),
18626
18627 #undef THUMB_VARIANT
18628 #define THUMB_VARIANT & arm_ext_v6t2
18629
18630 TCE("clz", 16f0f10, fab0f080, 2, (RRnpc, RRnpc), rd_rm, t_clz),
18631 TUF("ldc2", c100000, fc100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
18632 TUF("ldc2l", c500000, fc500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
18633 TUF("stc2", c000000, fc000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
18634 TUF("stc2l", c400000, fc400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
18635 TUF("cdp2", e000000, fe000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
18636 TUF("mcr2", e000010, fe000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
18637 TUF("mrc2", e100010, fe100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
18638
18639 #undef ARM_VARIANT
18640 #define ARM_VARIANT & arm_ext_v5exp /* ARM Architecture 5TExP. */
18641 #undef THUMB_VARIANT
18642 #define THUMB_VARIANT & arm_ext_v5exp
18643
18644 TCE("smlabb", 1000080, fb100000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
18645 TCE("smlatb", 10000a0, fb100020, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
18646 TCE("smlabt", 10000c0, fb100010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
18647 TCE("smlatt", 10000e0, fb100030, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
18648
18649 TCE("smlawb", 1200080, fb300000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
18650 TCE("smlawt", 12000c0, fb300010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
18651
18652 TCE("smlalbb", 1400080, fbc00080, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
18653 TCE("smlaltb", 14000a0, fbc000a0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
18654 TCE("smlalbt", 14000c0, fbc00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
18655 TCE("smlaltt", 14000e0, fbc000b0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
18656
18657 TCE("smulbb", 1600080, fb10f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18658 TCE("smultb", 16000a0, fb10f020, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18659 TCE("smulbt", 16000c0, fb10f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18660 TCE("smultt", 16000e0, fb10f030, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18661
18662 TCE("smulwb", 12000a0, fb30f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18663 TCE("smulwt", 12000e0, fb30f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18664
18665 TCE("qadd", 1000050, fa80f080, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
18666 TCE("qdadd", 1400050, fa80f090, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
18667 TCE("qsub", 1200050, fa80f0a0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
18668 TCE("qdsub", 1600050, fa80f0b0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
18669
18670 #undef ARM_VARIANT
18671 #define ARM_VARIANT & arm_ext_v5e /* ARM Architecture 5TE. */
18672 #undef THUMB_VARIANT
18673 #define THUMB_VARIANT & arm_ext_v6t2
18674
18675 TUF("pld", 450f000, f810f000, 1, (ADDR), pld, t_pld),
18676 TC3("ldrd", 00000d0, e8500000, 3, (RRnpc_npcsp, oRRnpc_npcsp, ADDRGLDRS),
18677 ldrd, t_ldstd),
18678 TC3("strd", 00000f0, e8400000, 3, (RRnpc_npcsp, oRRnpc_npcsp,
18679 ADDRGLDRS), ldrd, t_ldstd),
18680
18681 TCE("mcrr", c400000, ec400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
18682 TCE("mrrc", c500000, ec500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
18683
18684 #undef ARM_VARIANT
18685 #define ARM_VARIANT & arm_ext_v5j /* ARM Architecture 5TEJ. */
18686
18687 TCE("bxj", 12fff20, f3c08f00, 1, (RR), bxj, t_bxj),
18688
18689 #undef ARM_VARIANT
18690 #define ARM_VARIANT & arm_ext_v6 /* ARM V6. */
18691 #undef THUMB_VARIANT
18692 #define THUMB_VARIANT & arm_ext_v6
18693
18694 TUF("cpsie", 1080000, b660, 2, (CPSF, oI31b), cpsi, t_cpsi),
18695 TUF("cpsid", 10c0000, b670, 2, (CPSF, oI31b), cpsi, t_cpsi),
18696 tCE("rev", 6bf0f30, _rev, 2, (RRnpc, RRnpc), rd_rm, t_rev),
18697 tCE("rev16", 6bf0fb0, _rev16, 2, (RRnpc, RRnpc), rd_rm, t_rev),
18698 tCE("revsh", 6ff0fb0, _revsh, 2, (RRnpc, RRnpc), rd_rm, t_rev),
18699 tCE("sxth", 6bf0070, _sxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
18700 tCE("uxth", 6ff0070, _uxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
18701 tCE("sxtb", 6af0070, _sxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
18702 tCE("uxtb", 6ef0070, _uxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
18703 TUF("setend", 1010000, b650, 1, (ENDI), setend, t_setend),
18704
18705 #undef THUMB_VARIANT
18706 #define THUMB_VARIANT & arm_ext_v6t2
18707
18708 TCE("ldrex", 1900f9f, e8500f00, 2, (RRnpc_npcsp, ADDR), ldrex, t_ldrex),
18709 TCE("strex", 1800f90, e8400000, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
18710 strex, t_strex),
18711 TUF("mcrr2", c400000, fc400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
18712 TUF("mrrc2", c500000, fc500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
18713
18714 TCE("ssat", 6a00010, f3000000, 4, (RRnpc, I32, RRnpc, oSHllar),ssat, t_ssat),
18715 TCE("usat", 6e00010, f3800000, 4, (RRnpc, I31, RRnpc, oSHllar),usat, t_usat),
18716
18717 /* ARM V6 not included in V7M. */
18718 #undef THUMB_VARIANT
18719 #define THUMB_VARIANT & arm_ext_v6_notm
18720 TUF("rfeia", 8900a00, e990c000, 1, (RRw), rfe, rfe),
18721 TUF("rfe", 8900a00, e990c000, 1, (RRw), rfe, rfe),
18722 UF(rfeib, 9900a00, 1, (RRw), rfe),
18723 UF(rfeda, 8100a00, 1, (RRw), rfe),
18724 TUF("rfedb", 9100a00, e810c000, 1, (RRw), rfe, rfe),
18725 TUF("rfefd", 8900a00, e990c000, 1, (RRw), rfe, rfe),
18726 UF(rfefa, 8100a00, 1, (RRw), rfe),
18727 TUF("rfeea", 9100a00, e810c000, 1, (RRw), rfe, rfe),
18728 UF(rfeed, 9900a00, 1, (RRw), rfe),
18729 TUF("srsia", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
18730 TUF("srs", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
18731 TUF("srsea", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
18732 UF(srsib, 9c00500, 2, (oRRw, I31w), srs),
18733 UF(srsfa, 9c00500, 2, (oRRw, I31w), srs),
18734 UF(srsda, 8400500, 2, (oRRw, I31w), srs),
18735 UF(srsed, 8400500, 2, (oRRw, I31w), srs),
18736 TUF("srsdb", 9400500, e800c000, 2, (oRRw, I31w), srs, srs),
18737 TUF("srsfd", 9400500, e800c000, 2, (oRRw, I31w), srs, srs),
18738
18739 /* ARM V6 not included in V7M (eg. integer SIMD). */
18740 #undef THUMB_VARIANT
18741 #define THUMB_VARIANT & arm_ext_v6_dsp
18742 TUF("cps", 1020000, f3af8100, 1, (I31b), imm0, t_cps),
18743 TCE("pkhbt", 6800010, eac00000, 4, (RRnpc, RRnpc, RRnpc, oSHll), pkhbt, t_pkhbt),
18744 TCE("pkhtb", 6800050, eac00020, 4, (RRnpc, RRnpc, RRnpc, oSHar), pkhtb, t_pkhtb),
18745 TCE("qadd16", 6200f10, fa90f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18746 TCE("qadd8", 6200f90, fa80f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18747 TCE("qasx", 6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18748 /* Old name for QASX. */
18749 TCE("qaddsubx",6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18750 TCE("qsax", 6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18751 /* Old name for QSAX. */
18752 TCE("qsubaddx",6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18753 TCE("qsub16", 6200f70, fad0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18754 TCE("qsub8", 6200ff0, fac0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18755 TCE("sadd16", 6100f10, fa90f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18756 TCE("sadd8", 6100f90, fa80f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18757 TCE("sasx", 6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18758 /* Old name for SASX. */
18759 TCE("saddsubx",6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18760 TCE("shadd16", 6300f10, fa90f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18761 TCE("shadd8", 6300f90, fa80f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18762 TCE("shasx", 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18763 /* Old name for SHASX. */
18764 TCE("shaddsubx", 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18765 TCE("shsax", 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18766 /* Old name for SHSAX. */
18767 TCE("shsubaddx", 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18768 TCE("shsub16", 6300f70, fad0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18769 TCE("shsub8", 6300ff0, fac0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18770 TCE("ssax", 6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18771 /* Old name for SSAX. */
18772 TCE("ssubaddx",6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18773 TCE("ssub16", 6100f70, fad0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18774 TCE("ssub8", 6100ff0, fac0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18775 TCE("uadd16", 6500f10, fa90f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18776 TCE("uadd8", 6500f90, fa80f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18777 TCE("uasx", 6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18778 /* Old name for UASX. */
18779 TCE("uaddsubx",6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18780 TCE("uhadd16", 6700f10, fa90f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18781 TCE("uhadd8", 6700f90, fa80f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18782 TCE("uhasx", 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18783 /* Old name for UHASX. */
18784 TCE("uhaddsubx", 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18785 TCE("uhsax", 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18786 /* Old name for UHSAX. */
18787 TCE("uhsubaddx", 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18788 TCE("uhsub16", 6700f70, fad0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18789 TCE("uhsub8", 6700ff0, fac0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18790 TCE("uqadd16", 6600f10, fa90f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18791 TCE("uqadd8", 6600f90, fa80f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18792 TCE("uqasx", 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18793 /* Old name for UQASX. */
18794 TCE("uqaddsubx", 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18795 TCE("uqsax", 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18796 /* Old name for UQSAX. */
18797 TCE("uqsubaddx", 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18798 TCE("uqsub16", 6600f70, fad0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18799 TCE("uqsub8", 6600ff0, fac0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18800 TCE("usub16", 6500f70, fad0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18801 TCE("usax", 6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18802 /* Old name for USAX. */
18803 TCE("usubaddx",6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18804 TCE("usub8", 6500ff0, fac0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18805 TCE("sxtah", 6b00070, fa00f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
18806 TCE("sxtab16", 6800070, fa20f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
18807 TCE("sxtab", 6a00070, fa40f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
18808 TCE("sxtb16", 68f0070, fa2ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
18809 TCE("uxtah", 6f00070, fa10f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
18810 TCE("uxtab16", 6c00070, fa30f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
18811 TCE("uxtab", 6e00070, fa50f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
18812 TCE("uxtb16", 6cf0070, fa3ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
18813 TCE("sel", 6800fb0, faa0f080, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18814 TCE("smlad", 7000010, fb200000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
18815 TCE("smladx", 7000030, fb200010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
18816 TCE("smlald", 7400010, fbc000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
18817 TCE("smlaldx", 7400030, fbc000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
18818 TCE("smlsd", 7000050, fb400000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
18819 TCE("smlsdx", 7000070, fb400010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
18820 TCE("smlsld", 7400050, fbd000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
18821 TCE("smlsldx", 7400070, fbd000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
18822 TCE("smmla", 7500010, fb500000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
18823 TCE("smmlar", 7500030, fb500010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
18824 TCE("smmls", 75000d0, fb600000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
18825 TCE("smmlsr", 75000f0, fb600010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
18826 TCE("smmul", 750f010, fb50f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18827 TCE("smmulr", 750f030, fb50f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18828 TCE("smuad", 700f010, fb20f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18829 TCE("smuadx", 700f030, fb20f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18830 TCE("smusd", 700f050, fb40f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18831 TCE("smusdx", 700f070, fb40f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18832 TCE("ssat16", 6a00f30, f3200000, 3, (RRnpc, I16, RRnpc), ssat16, t_ssat16),
18833 TCE("umaal", 0400090, fbe00060, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal, t_mlal),
18834 TCE("usad8", 780f010, fb70f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18835 TCE("usada8", 7800010, fb700000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
18836 TCE("usat16", 6e00f30, f3a00000, 3, (RRnpc, I15, RRnpc), usat16, t_usat16),
18837
18838 #undef ARM_VARIANT
18839 #define ARM_VARIANT & arm_ext_v6k
18840 #undef THUMB_VARIANT
18841 #define THUMB_VARIANT & arm_ext_v6k
18842
18843 tCE("yield", 320f001, _yield, 0, (), noargs, t_hint),
18844 tCE("wfe", 320f002, _wfe, 0, (), noargs, t_hint),
18845 tCE("wfi", 320f003, _wfi, 0, (), noargs, t_hint),
18846 tCE("sev", 320f004, _sev, 0, (), noargs, t_hint),
18847
18848 #undef THUMB_VARIANT
18849 #define THUMB_VARIANT & arm_ext_v6_notm
18850 TCE("ldrexd", 1b00f9f, e8d0007f, 3, (RRnpc_npcsp, oRRnpc_npcsp, RRnpcb),
18851 ldrexd, t_ldrexd),
18852 TCE("strexd", 1a00f90, e8c00070, 4, (RRnpc_npcsp, RRnpc_npcsp, oRRnpc_npcsp,
18853 RRnpcb), strexd, t_strexd),
18854
18855 #undef THUMB_VARIANT
18856 #define THUMB_VARIANT & arm_ext_v6t2
18857 TCE("ldrexb", 1d00f9f, e8d00f4f, 2, (RRnpc_npcsp,RRnpcb),
18858 rd_rn, rd_rn),
18859 TCE("ldrexh", 1f00f9f, e8d00f5f, 2, (RRnpc_npcsp, RRnpcb),
18860 rd_rn, rd_rn),
18861 TCE("strexb", 1c00f90, e8c00f40, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
18862 strex, t_strexbh),
18863 TCE("strexh", 1e00f90, e8c00f50, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
18864 strex, t_strexbh),
18865 TUF("clrex", 57ff01f, f3bf8f2f, 0, (), noargs, noargs),
18866
18867 #undef ARM_VARIANT
18868 #define ARM_VARIANT & arm_ext_sec
18869 #undef THUMB_VARIANT
18870 #define THUMB_VARIANT & arm_ext_sec
18871
18872 TCE("smc", 1600070, f7f08000, 1, (EXPi), smc, t_smc),
18873
18874 #undef ARM_VARIANT
18875 #define ARM_VARIANT & arm_ext_virt
18876 #undef THUMB_VARIANT
18877 #define THUMB_VARIANT & arm_ext_virt
18878
18879 TCE("hvc", 1400070, f7e08000, 1, (EXPi), hvc, t_hvc),
18880 TCE("eret", 160006e, f3de8f00, 0, (), noargs, noargs),
18881
18882 #undef ARM_VARIANT
18883 #define ARM_VARIANT & arm_ext_v6t2
18884 #undef THUMB_VARIANT
18885 #define THUMB_VARIANT & arm_ext_v6t2
18886
18887 TCE("bfc", 7c0001f, f36f0000, 3, (RRnpc, I31, I32), bfc, t_bfc),
18888 TCE("bfi", 7c00010, f3600000, 4, (RRnpc, RRnpc_I0, I31, I32), bfi, t_bfi),
18889 TCE("sbfx", 7a00050, f3400000, 4, (RR, RR, I31, I32), bfx, t_bfx),
18890 TCE("ubfx", 7e00050, f3c00000, 4, (RR, RR, I31, I32), bfx, t_bfx),
18891
18892 TCE("mls", 0600090, fb000010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
18893 TCE("movw", 3000000, f2400000, 2, (RRnpc, HALF), mov16, t_mov16),
18894 TCE("movt", 3400000, f2c00000, 2, (RRnpc, HALF), mov16, t_mov16),
18895 TCE("rbit", 6ff0f30, fa90f0a0, 2, (RR, RR), rd_rm, t_rbit),
18896
18897 TC3("ldrht", 03000b0, f8300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
18898 TC3("ldrsht", 03000f0, f9300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
18899 TC3("ldrsbt", 03000d0, f9100e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
18900 TC3("strht", 02000b0, f8200e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
18901
18902 /* Thumb-only instructions. */
18903 #undef ARM_VARIANT
18904 #define ARM_VARIANT NULL
18905 TUE("cbnz", 0, b900, 2, (RR, EXP), 0, t_cbz),
18906 TUE("cbz", 0, b100, 2, (RR, EXP), 0, t_cbz),
18907
18908 /* ARM does not really have an IT instruction, so always allow it.
18909 The opcode is copied from Thumb in order to allow warnings in
18910 -mimplicit-it=[never | arm] modes. */
18911 #undef ARM_VARIANT
18912 #define ARM_VARIANT & arm_ext_v1
18913
18914 TUE("it", bf08, bf08, 1, (COND), it, t_it),
18915 TUE("itt", bf0c, bf0c, 1, (COND), it, t_it),
18916 TUE("ite", bf04, bf04, 1, (COND), it, t_it),
18917 TUE("ittt", bf0e, bf0e, 1, (COND), it, t_it),
18918 TUE("itet", bf06, bf06, 1, (COND), it, t_it),
18919 TUE("itte", bf0a, bf0a, 1, (COND), it, t_it),
18920 TUE("itee", bf02, bf02, 1, (COND), it, t_it),
18921 TUE("itttt", bf0f, bf0f, 1, (COND), it, t_it),
18922 TUE("itett", bf07, bf07, 1, (COND), it, t_it),
18923 TUE("ittet", bf0b, bf0b, 1, (COND), it, t_it),
18924 TUE("iteet", bf03, bf03, 1, (COND), it, t_it),
18925 TUE("ittte", bf0d, bf0d, 1, (COND), it, t_it),
18926 TUE("itete", bf05, bf05, 1, (COND), it, t_it),
18927 TUE("ittee", bf09, bf09, 1, (COND), it, t_it),
18928 TUE("iteee", bf01, bf01, 1, (COND), it, t_it),
18929 /* ARM/Thumb-2 instructions with no Thumb-1 equivalent. */
18930 TC3("rrx", 01a00060, ea4f0030, 2, (RR, RR), rd_rm, t_rrx),
18931 TC3("rrxs", 01b00060, ea5f0030, 2, (RR, RR), rd_rm, t_rrx),
18932
18933 /* Thumb2 only instructions. */
18934 #undef ARM_VARIANT
18935 #define ARM_VARIANT NULL
18936
18937 TCE("addw", 0, f2000000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
18938 TCE("subw", 0, f2a00000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
18939 TCE("orn", 0, ea600000, 3, (RR, oRR, SH), 0, t_orn),
18940 TCE("orns", 0, ea700000, 3, (RR, oRR, SH), 0, t_orn),
18941 TCE("tbb", 0, e8d0f000, 1, (TB), 0, t_tb),
18942 TCE("tbh", 0, e8d0f010, 1, (TB), 0, t_tb),
18943
18944 /* Hardware division instructions. */
18945 #undef ARM_VARIANT
18946 #define ARM_VARIANT & arm_ext_adiv
18947 #undef THUMB_VARIANT
18948 #define THUMB_VARIANT & arm_ext_div
18949
18950 TCE("sdiv", 710f010, fb90f0f0, 3, (RR, oRR, RR), div, t_div),
18951 TCE("udiv", 730f010, fbb0f0f0, 3, (RR, oRR, RR), div, t_div),
18952
18953 /* ARM V6M/V7 instructions. */
18954 #undef ARM_VARIANT
18955 #define ARM_VARIANT & arm_ext_barrier
18956 #undef THUMB_VARIANT
18957 #define THUMB_VARIANT & arm_ext_barrier
18958
18959 TUF("dmb", 57ff050, f3bf8f50, 1, (oBARRIER_I15), barrier, barrier),
18960 TUF("dsb", 57ff040, f3bf8f40, 1, (oBARRIER_I15), barrier, barrier),
18961 TUF("isb", 57ff060, f3bf8f60, 1, (oBARRIER_I15), barrier, barrier),
18962
18963 /* ARM V7 instructions. */
18964 #undef ARM_VARIANT
18965 #define ARM_VARIANT & arm_ext_v7
18966 #undef THUMB_VARIANT
18967 #define THUMB_VARIANT & arm_ext_v7
18968
18969 TUF("pli", 450f000, f910f000, 1, (ADDR), pli, t_pld),
18970 TCE("dbg", 320f0f0, f3af80f0, 1, (I15), dbg, t_dbg),
18971
18972 #undef ARM_VARIANT
18973 #define ARM_VARIANT & arm_ext_mp
18974 #undef THUMB_VARIANT
18975 #define THUMB_VARIANT & arm_ext_mp
18976
18977 TUF("pldw", 410f000, f830f000, 1, (ADDR), pld, t_pld),
18978
18979 /* AArchv8 instructions. */
18980 #undef ARM_VARIANT
18981 #define ARM_VARIANT & arm_ext_v8
18982 #undef THUMB_VARIANT
18983 #define THUMB_VARIANT & arm_ext_v8
18984
18985 tCE("sevl", 320f005, _sevl, 0, (), noargs, t_hint),
18986 TUE("hlt", 1000070, ba80, 1, (oIffffb), bkpt, t_hlt),
18987 TCE("ldaex", 1900e9f, e8d00fef, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
18988 TCE("ldaexd", 1b00e9f, e8d000ff, 3, (RRnpc, oRRnpc, RRnpcb),
18989 ldrexd, t_ldrexd),
18990 TCE("ldaexb", 1d00e9f, e8d00fcf, 2, (RRnpc,RRnpcb), rd_rn, rd_rn),
18991 TCE("ldaexh", 1f00e9f, e8d00fdf, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
18992 TCE("stlex", 1800e90, e8c00fe0, 3, (RRnpc, RRnpc, RRnpcb),
18993 stlex, t_stlex),
18994 TCE("stlexd", 1a00e90, e8c000f0, 4, (RRnpc, RRnpc, oRRnpc, RRnpcb),
18995 strexd, t_strexd),
18996 TCE("stlexb", 1c00e90, e8c00fc0, 3, (RRnpc, RRnpc, RRnpcb),
18997 stlex, t_stlex),
18998 TCE("stlexh", 1e00e90, e8c00fd0, 3, (RRnpc, RRnpc, RRnpcb),
18999 stlex, t_stlex),
19000 TCE("lda", 1900c9f, e8d00faf, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
19001 TCE("ldab", 1d00c9f, e8d00f8f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
19002 TCE("ldah", 1f00c9f, e8d00f9f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
19003 TCE("stl", 180fc90, e8c00faf, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
19004 TCE("stlb", 1c0fc90, e8c00f8f, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
19005 TCE("stlh", 1e0fc90, e8c00f9f, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
19006
19007 /* ARMv8 T32 only. */
19008 #undef ARM_VARIANT
19009 #define ARM_VARIANT NULL
19010 TUF("dcps1", 0, f78f8001, 0, (), noargs, noargs),
19011 TUF("dcps2", 0, f78f8002, 0, (), noargs, noargs),
19012 TUF("dcps3", 0, f78f8003, 0, (), noargs, noargs),
19013
19014 /* FP for ARMv8. */
19015 #undef ARM_VARIANT
19016 #define ARM_VARIANT & fpu_vfp_ext_armv8xd
19017 #undef THUMB_VARIANT
19018 #define THUMB_VARIANT & fpu_vfp_ext_armv8xd
19019
19020 nUF(vseleq, _vseleq, 3, (RVSD, RVSD, RVSD), vsel),
19021 nUF(vselvs, _vselvs, 3, (RVSD, RVSD, RVSD), vsel),
19022 nUF(vselge, _vselge, 3, (RVSD, RVSD, RVSD), vsel),
19023 nUF(vselgt, _vselgt, 3, (RVSD, RVSD, RVSD), vsel),
19024 nUF(vmaxnm, _vmaxnm, 3, (RNSDQ, oRNSDQ, RNSDQ), vmaxnm),
19025 nUF(vminnm, _vminnm, 3, (RNSDQ, oRNSDQ, RNSDQ), vmaxnm),
19026 nUF(vcvta, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvta),
19027 nUF(vcvtn, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvtn),
19028 nUF(vcvtp, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvtp),
19029 nUF(vcvtm, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvtm),
19030 nCE(vrintr, _vrintr, 2, (RNSDQ, oRNSDQ), vrintr),
19031 nCE(vrintz, _vrintr, 2, (RNSDQ, oRNSDQ), vrintz),
19032 nCE(vrintx, _vrintr, 2, (RNSDQ, oRNSDQ), vrintx),
19033 nUF(vrinta, _vrinta, 2, (RNSDQ, oRNSDQ), vrinta),
19034 nUF(vrintn, _vrinta, 2, (RNSDQ, oRNSDQ), vrintn),
19035 nUF(vrintp, _vrinta, 2, (RNSDQ, oRNSDQ), vrintp),
19036 nUF(vrintm, _vrinta, 2, (RNSDQ, oRNSDQ), vrintm),
19037
19038 /* Crypto v1 extensions. */
19039 #undef ARM_VARIANT
19040 #define ARM_VARIANT & fpu_crypto_ext_armv8
19041 #undef THUMB_VARIANT
19042 #define THUMB_VARIANT & fpu_crypto_ext_armv8
19043
19044 nUF(aese, _aes, 2, (RNQ, RNQ), aese),
19045 nUF(aesd, _aes, 2, (RNQ, RNQ), aesd),
19046 nUF(aesmc, _aes, 2, (RNQ, RNQ), aesmc),
19047 nUF(aesimc, _aes, 2, (RNQ, RNQ), aesimc),
19048 nUF(sha1c, _sha3op, 3, (RNQ, RNQ, RNQ), sha1c),
19049 nUF(sha1p, _sha3op, 3, (RNQ, RNQ, RNQ), sha1p),
19050 nUF(sha1m, _sha3op, 3, (RNQ, RNQ, RNQ), sha1m),
19051 nUF(sha1su0, _sha3op, 3, (RNQ, RNQ, RNQ), sha1su0),
19052 nUF(sha256h, _sha3op, 3, (RNQ, RNQ, RNQ), sha256h),
19053 nUF(sha256h2, _sha3op, 3, (RNQ, RNQ, RNQ), sha256h2),
19054 nUF(sha256su1, _sha3op, 3, (RNQ, RNQ, RNQ), sha256su1),
19055 nUF(sha1h, _sha1h, 2, (RNQ, RNQ), sha1h),
19056 nUF(sha1su1, _sha2op, 2, (RNQ, RNQ), sha1su1),
19057 nUF(sha256su0, _sha2op, 2, (RNQ, RNQ), sha256su0),
19058
19059 #undef ARM_VARIANT
19060 #define ARM_VARIANT & crc_ext_armv8
19061 #undef THUMB_VARIANT
19062 #define THUMB_VARIANT & crc_ext_armv8
19063 TUEc("crc32b", 1000040, fac0f080, 3, (RR, oRR, RR), crc32b),
19064 TUEc("crc32h", 1200040, fac0f090, 3, (RR, oRR, RR), crc32h),
19065 TUEc("crc32w", 1400040, fac0f0a0, 3, (RR, oRR, RR), crc32w),
19066 TUEc("crc32cb",1000240, fad0f080, 3, (RR, oRR, RR), crc32cb),
19067 TUEc("crc32ch",1200240, fad0f090, 3, (RR, oRR, RR), crc32ch),
19068 TUEc("crc32cw",1400240, fad0f0a0, 3, (RR, oRR, RR), crc32cw),
19069
19070 #undef ARM_VARIANT
19071 #define ARM_VARIANT & fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
19072 #undef THUMB_VARIANT
19073 #define THUMB_VARIANT NULL
19074
19075 cCE("wfs", e200110, 1, (RR), rd),
19076 cCE("rfs", e300110, 1, (RR), rd),
19077 cCE("wfc", e400110, 1, (RR), rd),
19078 cCE("rfc", e500110, 1, (RR), rd),
19079
19080 cCL("ldfs", c100100, 2, (RF, ADDRGLDC), rd_cpaddr),
19081 cCL("ldfd", c108100, 2, (RF, ADDRGLDC), rd_cpaddr),
19082 cCL("ldfe", c500100, 2, (RF, ADDRGLDC), rd_cpaddr),
19083 cCL("ldfp", c508100, 2, (RF, ADDRGLDC), rd_cpaddr),
19084
19085 cCL("stfs", c000100, 2, (RF, ADDRGLDC), rd_cpaddr),
19086 cCL("stfd", c008100, 2, (RF, ADDRGLDC), rd_cpaddr),
19087 cCL("stfe", c400100, 2, (RF, ADDRGLDC), rd_cpaddr),
19088 cCL("stfp", c408100, 2, (RF, ADDRGLDC), rd_cpaddr),
19089
19090 cCL("mvfs", e008100, 2, (RF, RF_IF), rd_rm),
19091 cCL("mvfsp", e008120, 2, (RF, RF_IF), rd_rm),
19092 cCL("mvfsm", e008140, 2, (RF, RF_IF), rd_rm),
19093 cCL("mvfsz", e008160, 2, (RF, RF_IF), rd_rm),
19094 cCL("mvfd", e008180, 2, (RF, RF_IF), rd_rm),
19095 cCL("mvfdp", e0081a0, 2, (RF, RF_IF), rd_rm),
19096 cCL("mvfdm", e0081c0, 2, (RF, RF_IF), rd_rm),
19097 cCL("mvfdz", e0081e0, 2, (RF, RF_IF), rd_rm),
19098 cCL("mvfe", e088100, 2, (RF, RF_IF), rd_rm),
19099 cCL("mvfep", e088120, 2, (RF, RF_IF), rd_rm),
19100 cCL("mvfem", e088140, 2, (RF, RF_IF), rd_rm),
19101 cCL("mvfez", e088160, 2, (RF, RF_IF), rd_rm),
19102
19103 cCL("mnfs", e108100, 2, (RF, RF_IF), rd_rm),
19104 cCL("mnfsp", e108120, 2, (RF, RF_IF), rd_rm),
19105 cCL("mnfsm", e108140, 2, (RF, RF_IF), rd_rm),
19106 cCL("mnfsz", e108160, 2, (RF, RF_IF), rd_rm),
19107 cCL("mnfd", e108180, 2, (RF, RF_IF), rd_rm),
19108 cCL("mnfdp", e1081a0, 2, (RF, RF_IF), rd_rm),
19109 cCL("mnfdm", e1081c0, 2, (RF, RF_IF), rd_rm),
19110 cCL("mnfdz", e1081e0, 2, (RF, RF_IF), rd_rm),
19111 cCL("mnfe", e188100, 2, (RF, RF_IF), rd_rm),
19112 cCL("mnfep", e188120, 2, (RF, RF_IF), rd_rm),
19113 cCL("mnfem", e188140, 2, (RF, RF_IF), rd_rm),
19114 cCL("mnfez", e188160, 2, (RF, RF_IF), rd_rm),
19115
19116 cCL("abss", e208100, 2, (RF, RF_IF), rd_rm),
19117 cCL("abssp", e208120, 2, (RF, RF_IF), rd_rm),
19118 cCL("abssm", e208140, 2, (RF, RF_IF), rd_rm),
19119 cCL("abssz", e208160, 2, (RF, RF_IF), rd_rm),
19120 cCL("absd", e208180, 2, (RF, RF_IF), rd_rm),
19121 cCL("absdp", e2081a0, 2, (RF, RF_IF), rd_rm),
19122 cCL("absdm", e2081c0, 2, (RF, RF_IF), rd_rm),
19123 cCL("absdz", e2081e0, 2, (RF, RF_IF), rd_rm),
19124 cCL("abse", e288100, 2, (RF, RF_IF), rd_rm),
19125 cCL("absep", e288120, 2, (RF, RF_IF), rd_rm),
19126 cCL("absem", e288140, 2, (RF, RF_IF), rd_rm),
19127 cCL("absez", e288160, 2, (RF, RF_IF), rd_rm),
19128
19129 cCL("rnds", e308100, 2, (RF, RF_IF), rd_rm),
19130 cCL("rndsp", e308120, 2, (RF, RF_IF), rd_rm),
19131 cCL("rndsm", e308140, 2, (RF, RF_IF), rd_rm),
19132 cCL("rndsz", e308160, 2, (RF, RF_IF), rd_rm),
19133 cCL("rndd", e308180, 2, (RF, RF_IF), rd_rm),
19134 cCL("rnddp", e3081a0, 2, (RF, RF_IF), rd_rm),
19135 cCL("rnddm", e3081c0, 2, (RF, RF_IF), rd_rm),
19136 cCL("rnddz", e3081e0, 2, (RF, RF_IF), rd_rm),
19137 cCL("rnde", e388100, 2, (RF, RF_IF), rd_rm),
19138 cCL("rndep", e388120, 2, (RF, RF_IF), rd_rm),
19139 cCL("rndem", e388140, 2, (RF, RF_IF), rd_rm),
19140 cCL("rndez", e388160, 2, (RF, RF_IF), rd_rm),
19141
19142 cCL("sqts", e408100, 2, (RF, RF_IF), rd_rm),
19143 cCL("sqtsp", e408120, 2, (RF, RF_IF), rd_rm),
19144 cCL("sqtsm", e408140, 2, (RF, RF_IF), rd_rm),
19145 cCL("sqtsz", e408160, 2, (RF, RF_IF), rd_rm),
19146 cCL("sqtd", e408180, 2, (RF, RF_IF), rd_rm),
19147 cCL("sqtdp", e4081a0, 2, (RF, RF_IF), rd_rm),
19148 cCL("sqtdm", e4081c0, 2, (RF, RF_IF), rd_rm),
19149 cCL("sqtdz", e4081e0, 2, (RF, RF_IF), rd_rm),
19150 cCL("sqte", e488100, 2, (RF, RF_IF), rd_rm),
19151 cCL("sqtep", e488120, 2, (RF, RF_IF), rd_rm),
19152 cCL("sqtem", e488140, 2, (RF, RF_IF), rd_rm),
19153 cCL("sqtez", e488160, 2, (RF, RF_IF), rd_rm),
19154
19155 cCL("logs", e508100, 2, (RF, RF_IF), rd_rm),
19156 cCL("logsp", e508120, 2, (RF, RF_IF), rd_rm),
19157 cCL("logsm", e508140, 2, (RF, RF_IF), rd_rm),
19158 cCL("logsz", e508160, 2, (RF, RF_IF), rd_rm),
19159 cCL("logd", e508180, 2, (RF, RF_IF), rd_rm),
19160 cCL("logdp", e5081a0, 2, (RF, RF_IF), rd_rm),
19161 cCL("logdm", e5081c0, 2, (RF, RF_IF), rd_rm),
19162 cCL("logdz", e5081e0, 2, (RF, RF_IF), rd_rm),
19163 cCL("loge", e588100, 2, (RF, RF_IF), rd_rm),
19164 cCL("logep", e588120, 2, (RF, RF_IF), rd_rm),
19165 cCL("logem", e588140, 2, (RF, RF_IF), rd_rm),
19166 cCL("logez", e588160, 2, (RF, RF_IF), rd_rm),
19167
19168 cCL("lgns", e608100, 2, (RF, RF_IF), rd_rm),
19169 cCL("lgnsp", e608120, 2, (RF, RF_IF), rd_rm),
19170 cCL("lgnsm", e608140, 2, (RF, RF_IF), rd_rm),
19171 cCL("lgnsz", e608160, 2, (RF, RF_IF), rd_rm),
19172 cCL("lgnd", e608180, 2, (RF, RF_IF), rd_rm),
19173 cCL("lgndp", e6081a0, 2, (RF, RF_IF), rd_rm),
19174 cCL("lgndm", e6081c0, 2, (RF, RF_IF), rd_rm),
19175 cCL("lgndz", e6081e0, 2, (RF, RF_IF), rd_rm),
19176 cCL("lgne", e688100, 2, (RF, RF_IF), rd_rm),
19177 cCL("lgnep", e688120, 2, (RF, RF_IF), rd_rm),
19178 cCL("lgnem", e688140, 2, (RF, RF_IF), rd_rm),
19179 cCL("lgnez", e688160, 2, (RF, RF_IF), rd_rm),
19180
19181 cCL("exps", e708100, 2, (RF, RF_IF), rd_rm),
19182 cCL("expsp", e708120, 2, (RF, RF_IF), rd_rm),
19183 cCL("expsm", e708140, 2, (RF, RF_IF), rd_rm),
19184 cCL("expsz", e708160, 2, (RF, RF_IF), rd_rm),
19185 cCL("expd", e708180, 2, (RF, RF_IF), rd_rm),
19186 cCL("expdp", e7081a0, 2, (RF, RF_IF), rd_rm),
19187 cCL("expdm", e7081c0, 2, (RF, RF_IF), rd_rm),
19188 cCL("expdz", e7081e0, 2, (RF, RF_IF), rd_rm),
19189 cCL("expe", e788100, 2, (RF, RF_IF), rd_rm),
19190 cCL("expep", e788120, 2, (RF, RF_IF), rd_rm),
19191 cCL("expem", e788140, 2, (RF, RF_IF), rd_rm),
19192 cCL("expdz", e788160, 2, (RF, RF_IF), rd_rm),
19193
19194 cCL("sins", e808100, 2, (RF, RF_IF), rd_rm),
19195 cCL("sinsp", e808120, 2, (RF, RF_IF), rd_rm),
19196 cCL("sinsm", e808140, 2, (RF, RF_IF), rd_rm),
19197 cCL("sinsz", e808160, 2, (RF, RF_IF), rd_rm),
19198 cCL("sind", e808180, 2, (RF, RF_IF), rd_rm),
19199 cCL("sindp", e8081a0, 2, (RF, RF_IF), rd_rm),
19200 cCL("sindm", e8081c0, 2, (RF, RF_IF), rd_rm),
19201 cCL("sindz", e8081e0, 2, (RF, RF_IF), rd_rm),
19202 cCL("sine", e888100, 2, (RF, RF_IF), rd_rm),
19203 cCL("sinep", e888120, 2, (RF, RF_IF), rd_rm),
19204 cCL("sinem", e888140, 2, (RF, RF_IF), rd_rm),
19205 cCL("sinez", e888160, 2, (RF, RF_IF), rd_rm),
19206
19207 cCL("coss", e908100, 2, (RF, RF_IF), rd_rm),
19208 cCL("cossp", e908120, 2, (RF, RF_IF), rd_rm),
19209 cCL("cossm", e908140, 2, (RF, RF_IF), rd_rm),
19210 cCL("cossz", e908160, 2, (RF, RF_IF), rd_rm),
19211 cCL("cosd", e908180, 2, (RF, RF_IF), rd_rm),
19212 cCL("cosdp", e9081a0, 2, (RF, RF_IF), rd_rm),
19213 cCL("cosdm", e9081c0, 2, (RF, RF_IF), rd_rm),
19214 cCL("cosdz", e9081e0, 2, (RF, RF_IF), rd_rm),
19215 cCL("cose", e988100, 2, (RF, RF_IF), rd_rm),
19216 cCL("cosep", e988120, 2, (RF, RF_IF), rd_rm),
19217 cCL("cosem", e988140, 2, (RF, RF_IF), rd_rm),
19218 cCL("cosez", e988160, 2, (RF, RF_IF), rd_rm),
19219
19220 cCL("tans", ea08100, 2, (RF, RF_IF), rd_rm),
19221 cCL("tansp", ea08120, 2, (RF, RF_IF), rd_rm),
19222 cCL("tansm", ea08140, 2, (RF, RF_IF), rd_rm),
19223 cCL("tansz", ea08160, 2, (RF, RF_IF), rd_rm),
19224 cCL("tand", ea08180, 2, (RF, RF_IF), rd_rm),
19225 cCL("tandp", ea081a0, 2, (RF, RF_IF), rd_rm),
19226 cCL("tandm", ea081c0, 2, (RF, RF_IF), rd_rm),
19227 cCL("tandz", ea081e0, 2, (RF, RF_IF), rd_rm),
19228 cCL("tane", ea88100, 2, (RF, RF_IF), rd_rm),
19229 cCL("tanep", ea88120, 2, (RF, RF_IF), rd_rm),
19230 cCL("tanem", ea88140, 2, (RF, RF_IF), rd_rm),
19231 cCL("tanez", ea88160, 2, (RF, RF_IF), rd_rm),
19232
19233 cCL("asns", eb08100, 2, (RF, RF_IF), rd_rm),
19234 cCL("asnsp", eb08120, 2, (RF, RF_IF), rd_rm),
19235 cCL("asnsm", eb08140, 2, (RF, RF_IF), rd_rm),
19236 cCL("asnsz", eb08160, 2, (RF, RF_IF), rd_rm),
19237 cCL("asnd", eb08180, 2, (RF, RF_IF), rd_rm),
19238 cCL("asndp", eb081a0, 2, (RF, RF_IF), rd_rm),
19239 cCL("asndm", eb081c0, 2, (RF, RF_IF), rd_rm),
19240 cCL("asndz", eb081e0, 2, (RF, RF_IF), rd_rm),
19241 cCL("asne", eb88100, 2, (RF, RF_IF), rd_rm),
19242 cCL("asnep", eb88120, 2, (RF, RF_IF), rd_rm),
19243 cCL("asnem", eb88140, 2, (RF, RF_IF), rd_rm),
19244 cCL("asnez", eb88160, 2, (RF, RF_IF), rd_rm),
19245
19246 cCL("acss", ec08100, 2, (RF, RF_IF), rd_rm),
19247 cCL("acssp", ec08120, 2, (RF, RF_IF), rd_rm),
19248 cCL("acssm", ec08140, 2, (RF, RF_IF), rd_rm),
19249 cCL("acssz", ec08160, 2, (RF, RF_IF), rd_rm),
19250 cCL("acsd", ec08180, 2, (RF, RF_IF), rd_rm),
19251 cCL("acsdp", ec081a0, 2, (RF, RF_IF), rd_rm),
19252 cCL("acsdm", ec081c0, 2, (RF, RF_IF), rd_rm),
19253 cCL("acsdz", ec081e0, 2, (RF, RF_IF), rd_rm),
19254 cCL("acse", ec88100, 2, (RF, RF_IF), rd_rm),
19255 cCL("acsep", ec88120, 2, (RF, RF_IF), rd_rm),
19256 cCL("acsem", ec88140, 2, (RF, RF_IF), rd_rm),
19257 cCL("acsez", ec88160, 2, (RF, RF_IF), rd_rm),
19258
19259 cCL("atns", ed08100, 2, (RF, RF_IF), rd_rm),
19260 cCL("atnsp", ed08120, 2, (RF, RF_IF), rd_rm),
19261 cCL("atnsm", ed08140, 2, (RF, RF_IF), rd_rm),
19262 cCL("atnsz", ed08160, 2, (RF, RF_IF), rd_rm),
19263 cCL("atnd", ed08180, 2, (RF, RF_IF), rd_rm),
19264 cCL("atndp", ed081a0, 2, (RF, RF_IF), rd_rm),
19265 cCL("atndm", ed081c0, 2, (RF, RF_IF), rd_rm),
19266 cCL("atndz", ed081e0, 2, (RF, RF_IF), rd_rm),
19267 cCL("atne", ed88100, 2, (RF, RF_IF), rd_rm),
19268 cCL("atnep", ed88120, 2, (RF, RF_IF), rd_rm),
19269 cCL("atnem", ed88140, 2, (RF, RF_IF), rd_rm),
19270 cCL("atnez", ed88160, 2, (RF, RF_IF), rd_rm),
19271
19272 cCL("urds", ee08100, 2, (RF, RF_IF), rd_rm),
19273 cCL("urdsp", ee08120, 2, (RF, RF_IF), rd_rm),
19274 cCL("urdsm", ee08140, 2, (RF, RF_IF), rd_rm),
19275 cCL("urdsz", ee08160, 2, (RF, RF_IF), rd_rm),
19276 cCL("urdd", ee08180, 2, (RF, RF_IF), rd_rm),
19277 cCL("urddp", ee081a0, 2, (RF, RF_IF), rd_rm),
19278 cCL("urddm", ee081c0, 2, (RF, RF_IF), rd_rm),
19279 cCL("urddz", ee081e0, 2, (RF, RF_IF), rd_rm),
19280 cCL("urde", ee88100, 2, (RF, RF_IF), rd_rm),
19281 cCL("urdep", ee88120, 2, (RF, RF_IF), rd_rm),
19282 cCL("urdem", ee88140, 2, (RF, RF_IF), rd_rm),
19283 cCL("urdez", ee88160, 2, (RF, RF_IF), rd_rm),
19284
19285 cCL("nrms", ef08100, 2, (RF, RF_IF), rd_rm),
19286 cCL("nrmsp", ef08120, 2, (RF, RF_IF), rd_rm),
19287 cCL("nrmsm", ef08140, 2, (RF, RF_IF), rd_rm),
19288 cCL("nrmsz", ef08160, 2, (RF, RF_IF), rd_rm),
19289 cCL("nrmd", ef08180, 2, (RF, RF_IF), rd_rm),
19290 cCL("nrmdp", ef081a0, 2, (RF, RF_IF), rd_rm),
19291 cCL("nrmdm", ef081c0, 2, (RF, RF_IF), rd_rm),
19292 cCL("nrmdz", ef081e0, 2, (RF, RF_IF), rd_rm),
19293 cCL("nrme", ef88100, 2, (RF, RF_IF), rd_rm),
19294 cCL("nrmep", ef88120, 2, (RF, RF_IF), rd_rm),
19295 cCL("nrmem", ef88140, 2, (RF, RF_IF), rd_rm),
19296 cCL("nrmez", ef88160, 2, (RF, RF_IF), rd_rm),
19297
19298 cCL("adfs", e000100, 3, (RF, RF, RF_IF), rd_rn_rm),
19299 cCL("adfsp", e000120, 3, (RF, RF, RF_IF), rd_rn_rm),
19300 cCL("adfsm", e000140, 3, (RF, RF, RF_IF), rd_rn_rm),
19301 cCL("adfsz", e000160, 3, (RF, RF, RF_IF), rd_rn_rm),
19302 cCL("adfd", e000180, 3, (RF, RF, RF_IF), rd_rn_rm),
19303 cCL("adfdp", e0001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19304 cCL("adfdm", e0001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19305 cCL("adfdz", e0001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19306 cCL("adfe", e080100, 3, (RF, RF, RF_IF), rd_rn_rm),
19307 cCL("adfep", e080120, 3, (RF, RF, RF_IF), rd_rn_rm),
19308 cCL("adfem", e080140, 3, (RF, RF, RF_IF), rd_rn_rm),
19309 cCL("adfez", e080160, 3, (RF, RF, RF_IF), rd_rn_rm),
19310
19311 cCL("sufs", e200100, 3, (RF, RF, RF_IF), rd_rn_rm),
19312 cCL("sufsp", e200120, 3, (RF, RF, RF_IF), rd_rn_rm),
19313 cCL("sufsm", e200140, 3, (RF, RF, RF_IF), rd_rn_rm),
19314 cCL("sufsz", e200160, 3, (RF, RF, RF_IF), rd_rn_rm),
19315 cCL("sufd", e200180, 3, (RF, RF, RF_IF), rd_rn_rm),
19316 cCL("sufdp", e2001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19317 cCL("sufdm", e2001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19318 cCL("sufdz", e2001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19319 cCL("sufe", e280100, 3, (RF, RF, RF_IF), rd_rn_rm),
19320 cCL("sufep", e280120, 3, (RF, RF, RF_IF), rd_rn_rm),
19321 cCL("sufem", e280140, 3, (RF, RF, RF_IF), rd_rn_rm),
19322 cCL("sufez", e280160, 3, (RF, RF, RF_IF), rd_rn_rm),
19323
19324 cCL("rsfs", e300100, 3, (RF, RF, RF_IF), rd_rn_rm),
19325 cCL("rsfsp", e300120, 3, (RF, RF, RF_IF), rd_rn_rm),
19326 cCL("rsfsm", e300140, 3, (RF, RF, RF_IF), rd_rn_rm),
19327 cCL("rsfsz", e300160, 3, (RF, RF, RF_IF), rd_rn_rm),
19328 cCL("rsfd", e300180, 3, (RF, RF, RF_IF), rd_rn_rm),
19329 cCL("rsfdp", e3001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19330 cCL("rsfdm", e3001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19331 cCL("rsfdz", e3001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19332 cCL("rsfe", e380100, 3, (RF, RF, RF_IF), rd_rn_rm),
19333 cCL("rsfep", e380120, 3, (RF, RF, RF_IF), rd_rn_rm),
19334 cCL("rsfem", e380140, 3, (RF, RF, RF_IF), rd_rn_rm),
19335 cCL("rsfez", e380160, 3, (RF, RF, RF_IF), rd_rn_rm),
19336
19337 cCL("mufs", e100100, 3, (RF, RF, RF_IF), rd_rn_rm),
19338 cCL("mufsp", e100120, 3, (RF, RF, RF_IF), rd_rn_rm),
19339 cCL("mufsm", e100140, 3, (RF, RF, RF_IF), rd_rn_rm),
19340 cCL("mufsz", e100160, 3, (RF, RF, RF_IF), rd_rn_rm),
19341 cCL("mufd", e100180, 3, (RF, RF, RF_IF), rd_rn_rm),
19342 cCL("mufdp", e1001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19343 cCL("mufdm", e1001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19344 cCL("mufdz", e1001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19345 cCL("mufe", e180100, 3, (RF, RF, RF_IF), rd_rn_rm),
19346 cCL("mufep", e180120, 3, (RF, RF, RF_IF), rd_rn_rm),
19347 cCL("mufem", e180140, 3, (RF, RF, RF_IF), rd_rn_rm),
19348 cCL("mufez", e180160, 3, (RF, RF, RF_IF), rd_rn_rm),
19349
19350 cCL("dvfs", e400100, 3, (RF, RF, RF_IF), rd_rn_rm),
19351 cCL("dvfsp", e400120, 3, (RF, RF, RF_IF), rd_rn_rm),
19352 cCL("dvfsm", e400140, 3, (RF, RF, RF_IF), rd_rn_rm),
19353 cCL("dvfsz", e400160, 3, (RF, RF, RF_IF), rd_rn_rm),
19354 cCL("dvfd", e400180, 3, (RF, RF, RF_IF), rd_rn_rm),
19355 cCL("dvfdp", e4001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19356 cCL("dvfdm", e4001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19357 cCL("dvfdz", e4001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19358 cCL("dvfe", e480100, 3, (RF, RF, RF_IF), rd_rn_rm),
19359 cCL("dvfep", e480120, 3, (RF, RF, RF_IF), rd_rn_rm),
19360 cCL("dvfem", e480140, 3, (RF, RF, RF_IF), rd_rn_rm),
19361 cCL("dvfez", e480160, 3, (RF, RF, RF_IF), rd_rn_rm),
19362
19363 cCL("rdfs", e500100, 3, (RF, RF, RF_IF), rd_rn_rm),
19364 cCL("rdfsp", e500120, 3, (RF, RF, RF_IF), rd_rn_rm),
19365 cCL("rdfsm", e500140, 3, (RF, RF, RF_IF), rd_rn_rm),
19366 cCL("rdfsz", e500160, 3, (RF, RF, RF_IF), rd_rn_rm),
19367 cCL("rdfd", e500180, 3, (RF, RF, RF_IF), rd_rn_rm),
19368 cCL("rdfdp", e5001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19369 cCL("rdfdm", e5001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19370 cCL("rdfdz", e5001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19371 cCL("rdfe", e580100, 3, (RF, RF, RF_IF), rd_rn_rm),
19372 cCL("rdfep", e580120, 3, (RF, RF, RF_IF), rd_rn_rm),
19373 cCL("rdfem", e580140, 3, (RF, RF, RF_IF), rd_rn_rm),
19374 cCL("rdfez", e580160, 3, (RF, RF, RF_IF), rd_rn_rm),
19375
19376 cCL("pows", e600100, 3, (RF, RF, RF_IF), rd_rn_rm),
19377 cCL("powsp", e600120, 3, (RF, RF, RF_IF), rd_rn_rm),
19378 cCL("powsm", e600140, 3, (RF, RF, RF_IF), rd_rn_rm),
19379 cCL("powsz", e600160, 3, (RF, RF, RF_IF), rd_rn_rm),
19380 cCL("powd", e600180, 3, (RF, RF, RF_IF), rd_rn_rm),
19381 cCL("powdp", e6001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19382 cCL("powdm", e6001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19383 cCL("powdz", e6001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19384 cCL("powe", e680100, 3, (RF, RF, RF_IF), rd_rn_rm),
19385 cCL("powep", e680120, 3, (RF, RF, RF_IF), rd_rn_rm),
19386 cCL("powem", e680140, 3, (RF, RF, RF_IF), rd_rn_rm),
19387 cCL("powez", e680160, 3, (RF, RF, RF_IF), rd_rn_rm),
19388
19389 cCL("rpws", e700100, 3, (RF, RF, RF_IF), rd_rn_rm),
19390 cCL("rpwsp", e700120, 3, (RF, RF, RF_IF), rd_rn_rm),
19391 cCL("rpwsm", e700140, 3, (RF, RF, RF_IF), rd_rn_rm),
19392 cCL("rpwsz", e700160, 3, (RF, RF, RF_IF), rd_rn_rm),
19393 cCL("rpwd", e700180, 3, (RF, RF, RF_IF), rd_rn_rm),
19394 cCL("rpwdp", e7001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19395 cCL("rpwdm", e7001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19396 cCL("rpwdz", e7001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19397 cCL("rpwe", e780100, 3, (RF, RF, RF_IF), rd_rn_rm),
19398 cCL("rpwep", e780120, 3, (RF, RF, RF_IF), rd_rn_rm),
19399 cCL("rpwem", e780140, 3, (RF, RF, RF_IF), rd_rn_rm),
19400 cCL("rpwez", e780160, 3, (RF, RF, RF_IF), rd_rn_rm),
19401
19402 cCL("rmfs", e800100, 3, (RF, RF, RF_IF), rd_rn_rm),
19403 cCL("rmfsp", e800120, 3, (RF, RF, RF_IF), rd_rn_rm),
19404 cCL("rmfsm", e800140, 3, (RF, RF, RF_IF), rd_rn_rm),
19405 cCL("rmfsz", e800160, 3, (RF, RF, RF_IF), rd_rn_rm),
19406 cCL("rmfd", e800180, 3, (RF, RF, RF_IF), rd_rn_rm),
19407 cCL("rmfdp", e8001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19408 cCL("rmfdm", e8001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19409 cCL("rmfdz", e8001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19410 cCL("rmfe", e880100, 3, (RF, RF, RF_IF), rd_rn_rm),
19411 cCL("rmfep", e880120, 3, (RF, RF, RF_IF), rd_rn_rm),
19412 cCL("rmfem", e880140, 3, (RF, RF, RF_IF), rd_rn_rm),
19413 cCL("rmfez", e880160, 3, (RF, RF, RF_IF), rd_rn_rm),
19414
19415 cCL("fmls", e900100, 3, (RF, RF, RF_IF), rd_rn_rm),
19416 cCL("fmlsp", e900120, 3, (RF, RF, RF_IF), rd_rn_rm),
19417 cCL("fmlsm", e900140, 3, (RF, RF, RF_IF), rd_rn_rm),
19418 cCL("fmlsz", e900160, 3, (RF, RF, RF_IF), rd_rn_rm),
19419 cCL("fmld", e900180, 3, (RF, RF, RF_IF), rd_rn_rm),
19420 cCL("fmldp", e9001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19421 cCL("fmldm", e9001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19422 cCL("fmldz", e9001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19423 cCL("fmle", e980100, 3, (RF, RF, RF_IF), rd_rn_rm),
19424 cCL("fmlep", e980120, 3, (RF, RF, RF_IF), rd_rn_rm),
19425 cCL("fmlem", e980140, 3, (RF, RF, RF_IF), rd_rn_rm),
19426 cCL("fmlez", e980160, 3, (RF, RF, RF_IF), rd_rn_rm),
19427
19428 cCL("fdvs", ea00100, 3, (RF, RF, RF_IF), rd_rn_rm),
19429 cCL("fdvsp", ea00120, 3, (RF, RF, RF_IF), rd_rn_rm),
19430 cCL("fdvsm", ea00140, 3, (RF, RF, RF_IF), rd_rn_rm),
19431 cCL("fdvsz", ea00160, 3, (RF, RF, RF_IF), rd_rn_rm),
19432 cCL("fdvd", ea00180, 3, (RF, RF, RF_IF), rd_rn_rm),
19433 cCL("fdvdp", ea001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19434 cCL("fdvdm", ea001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19435 cCL("fdvdz", ea001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19436 cCL("fdve", ea80100, 3, (RF, RF, RF_IF), rd_rn_rm),
19437 cCL("fdvep", ea80120, 3, (RF, RF, RF_IF), rd_rn_rm),
19438 cCL("fdvem", ea80140, 3, (RF, RF, RF_IF), rd_rn_rm),
19439 cCL("fdvez", ea80160, 3, (RF, RF, RF_IF), rd_rn_rm),
19440
19441 cCL("frds", eb00100, 3, (RF, RF, RF_IF), rd_rn_rm),
19442 cCL("frdsp", eb00120, 3, (RF, RF, RF_IF), rd_rn_rm),
19443 cCL("frdsm", eb00140, 3, (RF, RF, RF_IF), rd_rn_rm),
19444 cCL("frdsz", eb00160, 3, (RF, RF, RF_IF), rd_rn_rm),
19445 cCL("frdd", eb00180, 3, (RF, RF, RF_IF), rd_rn_rm),
19446 cCL("frddp", eb001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19447 cCL("frddm", eb001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19448 cCL("frddz", eb001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19449 cCL("frde", eb80100, 3, (RF, RF, RF_IF), rd_rn_rm),
19450 cCL("frdep", eb80120, 3, (RF, RF, RF_IF), rd_rn_rm),
19451 cCL("frdem", eb80140, 3, (RF, RF, RF_IF), rd_rn_rm),
19452 cCL("frdez", eb80160, 3, (RF, RF, RF_IF), rd_rn_rm),
19453
19454 cCL("pols", ec00100, 3, (RF, RF, RF_IF), rd_rn_rm),
19455 cCL("polsp", ec00120, 3, (RF, RF, RF_IF), rd_rn_rm),
19456 cCL("polsm", ec00140, 3, (RF, RF, RF_IF), rd_rn_rm),
19457 cCL("polsz", ec00160, 3, (RF, RF, RF_IF), rd_rn_rm),
19458 cCL("pold", ec00180, 3, (RF, RF, RF_IF), rd_rn_rm),
19459 cCL("poldp", ec001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19460 cCL("poldm", ec001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19461 cCL("poldz", ec001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19462 cCL("pole", ec80100, 3, (RF, RF, RF_IF), rd_rn_rm),
19463 cCL("polep", ec80120, 3, (RF, RF, RF_IF), rd_rn_rm),
19464 cCL("polem", ec80140, 3, (RF, RF, RF_IF), rd_rn_rm),
19465 cCL("polez", ec80160, 3, (RF, RF, RF_IF), rd_rn_rm),
19466
19467 cCE("cmf", e90f110, 2, (RF, RF_IF), fpa_cmp),
19468 C3E("cmfe", ed0f110, 2, (RF, RF_IF), fpa_cmp),
19469 cCE("cnf", eb0f110, 2, (RF, RF_IF), fpa_cmp),
19470 C3E("cnfe", ef0f110, 2, (RF, RF_IF), fpa_cmp),
19471
19472 cCL("flts", e000110, 2, (RF, RR), rn_rd),
19473 cCL("fltsp", e000130, 2, (RF, RR), rn_rd),
19474 cCL("fltsm", e000150, 2, (RF, RR), rn_rd),
19475 cCL("fltsz", e000170, 2, (RF, RR), rn_rd),
19476 cCL("fltd", e000190, 2, (RF, RR), rn_rd),
19477 cCL("fltdp", e0001b0, 2, (RF, RR), rn_rd),
19478 cCL("fltdm", e0001d0, 2, (RF, RR), rn_rd),
19479 cCL("fltdz", e0001f0, 2, (RF, RR), rn_rd),
19480 cCL("flte", e080110, 2, (RF, RR), rn_rd),
19481 cCL("fltep", e080130, 2, (RF, RR), rn_rd),
19482 cCL("fltem", e080150, 2, (RF, RR), rn_rd),
19483 cCL("fltez", e080170, 2, (RF, RR), rn_rd),
19484
19485 /* The implementation of the FIX instruction is broken on some
19486 assemblers, in that it accepts a precision specifier as well as a
19487 rounding specifier, despite the fact that this is meaningless.
19488 To be more compatible, we accept it as well, though of course it
19489 does not set any bits. */
19490 cCE("fix", e100110, 2, (RR, RF), rd_rm),
19491 cCL("fixp", e100130, 2, (RR, RF), rd_rm),
19492 cCL("fixm", e100150, 2, (RR, RF), rd_rm),
19493 cCL("fixz", e100170, 2, (RR, RF), rd_rm),
19494 cCL("fixsp", e100130, 2, (RR, RF), rd_rm),
19495 cCL("fixsm", e100150, 2, (RR, RF), rd_rm),
19496 cCL("fixsz", e100170, 2, (RR, RF), rd_rm),
19497 cCL("fixdp", e100130, 2, (RR, RF), rd_rm),
19498 cCL("fixdm", e100150, 2, (RR, RF), rd_rm),
19499 cCL("fixdz", e100170, 2, (RR, RF), rd_rm),
19500 cCL("fixep", e100130, 2, (RR, RF), rd_rm),
19501 cCL("fixem", e100150, 2, (RR, RF), rd_rm),
19502 cCL("fixez", e100170, 2, (RR, RF), rd_rm),
19503
19504 /* Instructions that were new with the real FPA, call them V2. */
19505 #undef ARM_VARIANT
19506 #define ARM_VARIANT & fpu_fpa_ext_v2
19507
19508 cCE("lfm", c100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
19509 cCL("lfmfd", c900200, 3, (RF, I4b, ADDR), fpa_ldmstm),
19510 cCL("lfmea", d100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
19511 cCE("sfm", c000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
19512 cCL("sfmfd", d000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
19513 cCL("sfmea", c800200, 3, (RF, I4b, ADDR), fpa_ldmstm),
19514
19515 #undef ARM_VARIANT
19516 #define ARM_VARIANT & fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
19517
19518 /* Moves and type conversions. */
19519 cCE("fcpys", eb00a40, 2, (RVS, RVS), vfp_sp_monadic),
19520 cCE("fmrs", e100a10, 2, (RR, RVS), vfp_reg_from_sp),
19521 cCE("fmsr", e000a10, 2, (RVS, RR), vfp_sp_from_reg),
19522 cCE("fmstat", ef1fa10, 0, (), noargs),
19523 cCE("vmrs", ef00a10, 2, (APSR_RR, RVC), vmrs),
19524 cCE("vmsr", ee00a10, 2, (RVC, RR), vmsr),
19525 cCE("fsitos", eb80ac0, 2, (RVS, RVS), vfp_sp_monadic),
19526 cCE("fuitos", eb80a40, 2, (RVS, RVS), vfp_sp_monadic),
19527 cCE("ftosis", ebd0a40, 2, (RVS, RVS), vfp_sp_monadic),
19528 cCE("ftosizs", ebd0ac0, 2, (RVS, RVS), vfp_sp_monadic),
19529 cCE("ftouis", ebc0a40, 2, (RVS, RVS), vfp_sp_monadic),
19530 cCE("ftouizs", ebc0ac0, 2, (RVS, RVS), vfp_sp_monadic),
19531 cCE("fmrx", ef00a10, 2, (RR, RVC), rd_rn),
19532 cCE("fmxr", ee00a10, 2, (RVC, RR), rn_rd),
19533
19534 /* Memory operations. */
19535 cCE("flds", d100a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
19536 cCE("fsts", d000a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
19537 cCE("fldmias", c900a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
19538 cCE("fldmfds", c900a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
19539 cCE("fldmdbs", d300a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
19540 cCE("fldmeas", d300a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
19541 cCE("fldmiax", c900b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
19542 cCE("fldmfdx", c900b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
19543 cCE("fldmdbx", d300b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
19544 cCE("fldmeax", d300b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
19545 cCE("fstmias", c800a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
19546 cCE("fstmeas", c800a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
19547 cCE("fstmdbs", d200a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
19548 cCE("fstmfds", d200a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
19549 cCE("fstmiax", c800b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
19550 cCE("fstmeax", c800b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
19551 cCE("fstmdbx", d200b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
19552 cCE("fstmfdx", d200b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
19553
19554 /* Monadic operations. */
19555 cCE("fabss", eb00ac0, 2, (RVS, RVS), vfp_sp_monadic),
19556 cCE("fnegs", eb10a40, 2, (RVS, RVS), vfp_sp_monadic),
19557 cCE("fsqrts", eb10ac0, 2, (RVS, RVS), vfp_sp_monadic),
19558
19559 /* Dyadic operations. */
19560 cCE("fadds", e300a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19561 cCE("fsubs", e300a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19562 cCE("fmuls", e200a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19563 cCE("fdivs", e800a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19564 cCE("fmacs", e000a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19565 cCE("fmscs", e100a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19566 cCE("fnmuls", e200a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19567 cCE("fnmacs", e000a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19568 cCE("fnmscs", e100a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19569
19570 /* Comparisons. */
19571 cCE("fcmps", eb40a40, 2, (RVS, RVS), vfp_sp_monadic),
19572 cCE("fcmpzs", eb50a40, 1, (RVS), vfp_sp_compare_z),
19573 cCE("fcmpes", eb40ac0, 2, (RVS, RVS), vfp_sp_monadic),
19574 cCE("fcmpezs", eb50ac0, 1, (RVS), vfp_sp_compare_z),
19575
19576 /* Double precision load/store are still present on single precision
19577 implementations. */
19578 cCE("fldd", d100b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
19579 cCE("fstd", d000b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
19580 cCE("fldmiad", c900b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
19581 cCE("fldmfdd", c900b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
19582 cCE("fldmdbd", d300b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
19583 cCE("fldmead", d300b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
19584 cCE("fstmiad", c800b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
19585 cCE("fstmead", c800b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
19586 cCE("fstmdbd", d200b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
19587 cCE("fstmfdd", d200b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
19588
19589 #undef ARM_VARIANT
19590 #define ARM_VARIANT & fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
19591
19592 /* Moves and type conversions. */
19593 cCE("fcpyd", eb00b40, 2, (RVD, RVD), vfp_dp_rd_rm),
19594 cCE("fcvtds", eb70ac0, 2, (RVD, RVS), vfp_dp_sp_cvt),
19595 cCE("fcvtsd", eb70bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
19596 cCE("fmdhr", e200b10, 2, (RVD, RR), vfp_dp_rn_rd),
19597 cCE("fmdlr", e000b10, 2, (RVD, RR), vfp_dp_rn_rd),
19598 cCE("fmrdh", e300b10, 2, (RR, RVD), vfp_dp_rd_rn),
19599 cCE("fmrdl", e100b10, 2, (RR, RVD), vfp_dp_rd_rn),
19600 cCE("fsitod", eb80bc0, 2, (RVD, RVS), vfp_dp_sp_cvt),
19601 cCE("fuitod", eb80b40, 2, (RVD, RVS), vfp_dp_sp_cvt),
19602 cCE("ftosid", ebd0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
19603 cCE("ftosizd", ebd0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
19604 cCE("ftouid", ebc0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
19605 cCE("ftouizd", ebc0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
19606
19607 /* Monadic operations. */
19608 cCE("fabsd", eb00bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
19609 cCE("fnegd", eb10b40, 2, (RVD, RVD), vfp_dp_rd_rm),
19610 cCE("fsqrtd", eb10bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
19611
19612 /* Dyadic operations. */
19613 cCE("faddd", e300b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19614 cCE("fsubd", e300b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19615 cCE("fmuld", e200b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19616 cCE("fdivd", e800b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19617 cCE("fmacd", e000b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19618 cCE("fmscd", e100b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19619 cCE("fnmuld", e200b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19620 cCE("fnmacd", e000b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19621 cCE("fnmscd", e100b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19622
19623 /* Comparisons. */
19624 cCE("fcmpd", eb40b40, 2, (RVD, RVD), vfp_dp_rd_rm),
19625 cCE("fcmpzd", eb50b40, 1, (RVD), vfp_dp_rd),
19626 cCE("fcmped", eb40bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
19627 cCE("fcmpezd", eb50bc0, 1, (RVD), vfp_dp_rd),
19628
19629 #undef ARM_VARIANT
19630 #define ARM_VARIANT & fpu_vfp_ext_v2
19631
19632 cCE("fmsrr", c400a10, 3, (VRSLST, RR, RR), vfp_sp2_from_reg2),
19633 cCE("fmrrs", c500a10, 3, (RR, RR, VRSLST), vfp_reg2_from_sp2),
19634 cCE("fmdrr", c400b10, 3, (RVD, RR, RR), vfp_dp_rm_rd_rn),
19635 cCE("fmrrd", c500b10, 3, (RR, RR, RVD), vfp_dp_rd_rn_rm),
19636
19637 /* Instructions which may belong to either the Neon or VFP instruction sets.
19638 Individual encoder functions perform additional architecture checks. */
19639 #undef ARM_VARIANT
19640 #define ARM_VARIANT & fpu_vfp_ext_v1xd
19641 #undef THUMB_VARIANT
19642 #define THUMB_VARIANT & fpu_vfp_ext_v1xd
19643
19644 /* These mnemonics are unique to VFP. */
19645 NCE(vsqrt, 0, 2, (RVSD, RVSD), vfp_nsyn_sqrt),
19646 NCE(vdiv, 0, 3, (RVSD, RVSD, RVSD), vfp_nsyn_div),
19647 nCE(vnmul, _vnmul, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
19648 nCE(vnmla, _vnmla, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
19649 nCE(vnmls, _vnmls, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
19650 nCE(vcmp, _vcmp, 2, (RVSD, RSVD_FI0), vfp_nsyn_cmp),
19651 nCE(vcmpe, _vcmpe, 2, (RVSD, RSVD_FI0), vfp_nsyn_cmp),
19652 NCE(vpush, 0, 1, (VRSDLST), vfp_nsyn_push),
19653 NCE(vpop, 0, 1, (VRSDLST), vfp_nsyn_pop),
19654 NCE(vcvtz, 0, 2, (RVSD, RVSD), vfp_nsyn_cvtz),
19655
19656 /* Mnemonics shared by Neon and VFP. */
19657 nCEF(vmul, _vmul, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mul),
19658 nCEF(vmla, _vmla, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
19659 nCEF(vmls, _vmls, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
19660
19661 nCEF(vadd, _vadd, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
19662 nCEF(vsub, _vsub, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
19663
19664 NCEF(vabs, 1b10300, 2, (RNSDQ, RNSDQ), neon_abs_neg),
19665 NCEF(vneg, 1b10380, 2, (RNSDQ, RNSDQ), neon_abs_neg),
19666
19667 NCE(vldm, c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
19668 NCE(vldmia, c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
19669 NCE(vldmdb, d100b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
19670 NCE(vstm, c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
19671 NCE(vstmia, c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
19672 NCE(vstmdb, d000b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
19673 NCE(vldr, d100b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
19674 NCE(vstr, d000b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
19675
19676 nCEF(vcvt, _vcvt, 3, (RNSDQ, RNSDQ, oI32z), neon_cvt),
19677 nCEF(vcvtr, _vcvt, 2, (RNSDQ, RNSDQ), neon_cvtr),
19678 NCEF(vcvtb, eb20a40, 2, (RVSD, RVSD), neon_cvtb),
19679 NCEF(vcvtt, eb20a40, 2, (RVSD, RVSD), neon_cvtt),
19680
19681
19682 /* NOTE: All VMOV encoding is special-cased! */
19683 NCE(vmov, 0, 1, (VMOV), neon_mov),
19684 NCE(vmovq, 0, 1, (VMOV), neon_mov),
19685
19686 #undef THUMB_VARIANT
19687 #define THUMB_VARIANT & fpu_neon_ext_v1
19688 #undef ARM_VARIANT
19689 #define ARM_VARIANT & fpu_neon_ext_v1
19690
19691 /* Data processing with three registers of the same length. */
19692 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
19693 NUF(vaba, 0000710, 3, (RNDQ, RNDQ, RNDQ), neon_dyadic_i_su),
19694 NUF(vabaq, 0000710, 3, (RNQ, RNQ, RNQ), neon_dyadic_i_su),
19695 NUF(vhadd, 0000000, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
19696 NUF(vhaddq, 0000000, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
19697 NUF(vrhadd, 0000100, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
19698 NUF(vrhaddq, 0000100, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
19699 NUF(vhsub, 0000200, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
19700 NUF(vhsubq, 0000200, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
19701 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
19702 NUF(vqadd, 0000010, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
19703 NUF(vqaddq, 0000010, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
19704 NUF(vqsub, 0000210, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
19705 NUF(vqsubq, 0000210, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
19706 NUF(vrshl, 0000500, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
19707 NUF(vrshlq, 0000500, 3, (RNQ, oRNQ, RNQ), neon_rshl),
19708 NUF(vqrshl, 0000510, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
19709 NUF(vqrshlq, 0000510, 3, (RNQ, oRNQ, RNQ), neon_rshl),
19710 /* If not immediate, fall back to neon_dyadic_i64_su.
19711 shl_imm should accept I8 I16 I32 I64,
19712 qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64. */
19713 nUF(vshl, _vshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_shl_imm),
19714 nUF(vshlq, _vshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_shl_imm),
19715 nUF(vqshl, _vqshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_qshl_imm),
19716 nUF(vqshlq, _vqshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_qshl_imm),
19717 /* Logic ops, types optional & ignored. */
19718 nUF(vand, _vand, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
19719 nUF(vandq, _vand, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
19720 nUF(vbic, _vbic, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
19721 nUF(vbicq, _vbic, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
19722 nUF(vorr, _vorr, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
19723 nUF(vorrq, _vorr, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
19724 nUF(vorn, _vorn, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
19725 nUF(vornq, _vorn, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
19726 nUF(veor, _veor, 3, (RNDQ, oRNDQ, RNDQ), neon_logic),
19727 nUF(veorq, _veor, 3, (RNQ, oRNQ, RNQ), neon_logic),
19728 /* Bitfield ops, untyped. */
19729 NUF(vbsl, 1100110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
19730 NUF(vbslq, 1100110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
19731 NUF(vbit, 1200110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
19732 NUF(vbitq, 1200110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
19733 NUF(vbif, 1300110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
19734 NUF(vbifq, 1300110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
19735 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F32. */
19736 nUF(vabd, _vabd, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
19737 nUF(vabdq, _vabd, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
19738 nUF(vmax, _vmax, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
19739 nUF(vmaxq, _vmax, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
19740 nUF(vmin, _vmin, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
19741 nUF(vminq, _vmin, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
19742 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
19743 back to neon_dyadic_if_su. */
19744 nUF(vcge, _vcge, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
19745 nUF(vcgeq, _vcge, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
19746 nUF(vcgt, _vcgt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
19747 nUF(vcgtq, _vcgt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
19748 nUF(vclt, _vclt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
19749 nUF(vcltq, _vclt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
19750 nUF(vcle, _vcle, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
19751 nUF(vcleq, _vcle, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
19752 /* Comparison. Type I8 I16 I32 F32. */
19753 nUF(vceq, _vceq, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_ceq),
19754 nUF(vceqq, _vceq, 3, (RNQ, oRNQ, RNDQ_I0), neon_ceq),
19755 /* As above, D registers only. */
19756 nUF(vpmax, _vpmax, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
19757 nUF(vpmin, _vpmin, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
19758 /* Int and float variants, signedness unimportant. */
19759 nUF(vmlaq, _vmla, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
19760 nUF(vmlsq, _vmls, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
19761 nUF(vpadd, _vpadd, 3, (RND, oRND, RND), neon_dyadic_if_i_d),
19762 /* Add/sub take types I8 I16 I32 I64 F32. */
19763 nUF(vaddq, _vadd, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
19764 nUF(vsubq, _vsub, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
19765 /* vtst takes sizes 8, 16, 32. */
19766 NUF(vtst, 0000810, 3, (RNDQ, oRNDQ, RNDQ), neon_tst),
19767 NUF(vtstq, 0000810, 3, (RNQ, oRNQ, RNQ), neon_tst),
19768 /* VMUL takes I8 I16 I32 F32 P8. */
19769 nUF(vmulq, _vmul, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mul),
19770 /* VQD{R}MULH takes S16 S32. */
19771 nUF(vqdmulh, _vqdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
19772 nUF(vqdmulhq, _vqdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
19773 nUF(vqrdmulh, _vqrdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
19774 nUF(vqrdmulhq, _vqrdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
19775 NUF(vacge, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
19776 NUF(vacgeq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
19777 NUF(vacgt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
19778 NUF(vacgtq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
19779 NUF(vaclt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
19780 NUF(vacltq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
19781 NUF(vacle, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
19782 NUF(vacleq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
19783 NUF(vrecps, 0000f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
19784 NUF(vrecpsq, 0000f10, 3, (RNQ, oRNQ, RNQ), neon_step),
19785 NUF(vrsqrts, 0200f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
19786 NUF(vrsqrtsq, 0200f10, 3, (RNQ, oRNQ, RNQ), neon_step),
19787
19788 /* Two address, int/float. Types S8 S16 S32 F32. */
19789 NUF(vabsq, 1b10300, 2, (RNQ, RNQ), neon_abs_neg),
19790 NUF(vnegq, 1b10380, 2, (RNQ, RNQ), neon_abs_neg),
19791
19792 /* Data processing with two registers and a shift amount. */
19793 /* Right shifts, and variants with rounding.
19794 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
19795 NUF(vshr, 0800010, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
19796 NUF(vshrq, 0800010, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
19797 NUF(vrshr, 0800210, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
19798 NUF(vrshrq, 0800210, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
19799 NUF(vsra, 0800110, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
19800 NUF(vsraq, 0800110, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
19801 NUF(vrsra, 0800310, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
19802 NUF(vrsraq, 0800310, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
19803 /* Shift and insert. Sizes accepted 8 16 32 64. */
19804 NUF(vsli, 1800510, 3, (RNDQ, oRNDQ, I63), neon_sli),
19805 NUF(vsliq, 1800510, 3, (RNQ, oRNQ, I63), neon_sli),
19806 NUF(vsri, 1800410, 3, (RNDQ, oRNDQ, I64), neon_sri),
19807 NUF(vsriq, 1800410, 3, (RNQ, oRNQ, I64), neon_sri),
19808 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
19809 NUF(vqshlu, 1800610, 3, (RNDQ, oRNDQ, I63), neon_qshlu_imm),
19810 NUF(vqshluq, 1800610, 3, (RNQ, oRNQ, I63), neon_qshlu_imm),
19811 /* Right shift immediate, saturating & narrowing, with rounding variants.
19812 Types accepted S16 S32 S64 U16 U32 U64. */
19813 NUF(vqshrn, 0800910, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
19814 NUF(vqrshrn, 0800950, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
19815 /* As above, unsigned. Types accepted S16 S32 S64. */
19816 NUF(vqshrun, 0800810, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
19817 NUF(vqrshrun, 0800850, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
19818 /* Right shift narrowing. Types accepted I16 I32 I64. */
19819 NUF(vshrn, 0800810, 3, (RND, RNQ, I32z), neon_rshift_narrow),
19820 NUF(vrshrn, 0800850, 3, (RND, RNQ, I32z), neon_rshift_narrow),
19821 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
19822 nUF(vshll, _vshll, 3, (RNQ, RND, I32), neon_shll),
19823 /* CVT with optional immediate for fixed-point variant. */
19824 nUF(vcvtq, _vcvt, 3, (RNQ, RNQ, oI32b), neon_cvt),
19825
19826 nUF(vmvn, _vmvn, 2, (RNDQ, RNDQ_Ibig), neon_mvn),
19827 nUF(vmvnq, _vmvn, 2, (RNQ, RNDQ_Ibig), neon_mvn),
19828
19829 /* Data processing, three registers of different lengths. */
19830 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
19831 NUF(vabal, 0800500, 3, (RNQ, RND, RND), neon_abal),
19832 NUF(vabdl, 0800700, 3, (RNQ, RND, RND), neon_dyadic_long),
19833 NUF(vaddl, 0800000, 3, (RNQ, RND, RND), neon_dyadic_long),
19834 NUF(vsubl, 0800200, 3, (RNQ, RND, RND), neon_dyadic_long),
19835 /* If not scalar, fall back to neon_dyadic_long.
19836 Vector types as above, scalar types S16 S32 U16 U32. */
19837 nUF(vmlal, _vmlal, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
19838 nUF(vmlsl, _vmlsl, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
19839 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
19840 NUF(vaddw, 0800100, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
19841 NUF(vsubw, 0800300, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
19842 /* Dyadic, narrowing insns. Types I16 I32 I64. */
19843 NUF(vaddhn, 0800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
19844 NUF(vraddhn, 1800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
19845 NUF(vsubhn, 0800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
19846 NUF(vrsubhn, 1800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
19847 /* Saturating doubling multiplies. Types S16 S32. */
19848 nUF(vqdmlal, _vqdmlal, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
19849 nUF(vqdmlsl, _vqdmlsl, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
19850 nUF(vqdmull, _vqdmull, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
19851 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
19852 S16 S32 U16 U32. */
19853 nUF(vmull, _vmull, 3, (RNQ, RND, RND_RNSC), neon_vmull),
19854
19855 /* Extract. Size 8. */
19856 NUF(vext, 0b00000, 4, (RNDQ, oRNDQ, RNDQ, I15), neon_ext),
19857 NUF(vextq, 0b00000, 4, (RNQ, oRNQ, RNQ, I15), neon_ext),
19858
19859 /* Two registers, miscellaneous. */
19860 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
19861 NUF(vrev64, 1b00000, 2, (RNDQ, RNDQ), neon_rev),
19862 NUF(vrev64q, 1b00000, 2, (RNQ, RNQ), neon_rev),
19863 NUF(vrev32, 1b00080, 2, (RNDQ, RNDQ), neon_rev),
19864 NUF(vrev32q, 1b00080, 2, (RNQ, RNQ), neon_rev),
19865 NUF(vrev16, 1b00100, 2, (RNDQ, RNDQ), neon_rev),
19866 NUF(vrev16q, 1b00100, 2, (RNQ, RNQ), neon_rev),
19867 /* Vector replicate. Sizes 8 16 32. */
19868 nCE(vdup, _vdup, 2, (RNDQ, RR_RNSC), neon_dup),
19869 nCE(vdupq, _vdup, 2, (RNQ, RR_RNSC), neon_dup),
19870 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
19871 NUF(vmovl, 0800a10, 2, (RNQ, RND), neon_movl),
19872 /* VMOVN. Types I16 I32 I64. */
19873 nUF(vmovn, _vmovn, 2, (RND, RNQ), neon_movn),
19874 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
19875 nUF(vqmovn, _vqmovn, 2, (RND, RNQ), neon_qmovn),
19876 /* VQMOVUN. Types S16 S32 S64. */
19877 nUF(vqmovun, _vqmovun, 2, (RND, RNQ), neon_qmovun),
19878 /* VZIP / VUZP. Sizes 8 16 32. */
19879 NUF(vzip, 1b20180, 2, (RNDQ, RNDQ), neon_zip_uzp),
19880 NUF(vzipq, 1b20180, 2, (RNQ, RNQ), neon_zip_uzp),
19881 NUF(vuzp, 1b20100, 2, (RNDQ, RNDQ), neon_zip_uzp),
19882 NUF(vuzpq, 1b20100, 2, (RNQ, RNQ), neon_zip_uzp),
19883 /* VQABS / VQNEG. Types S8 S16 S32. */
19884 NUF(vqabs, 1b00700, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
19885 NUF(vqabsq, 1b00700, 2, (RNQ, RNQ), neon_sat_abs_neg),
19886 NUF(vqneg, 1b00780, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
19887 NUF(vqnegq, 1b00780, 2, (RNQ, RNQ), neon_sat_abs_neg),
19888 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
19889 NUF(vpadal, 1b00600, 2, (RNDQ, RNDQ), neon_pair_long),
19890 NUF(vpadalq, 1b00600, 2, (RNQ, RNQ), neon_pair_long),
19891 NUF(vpaddl, 1b00200, 2, (RNDQ, RNDQ), neon_pair_long),
19892 NUF(vpaddlq, 1b00200, 2, (RNQ, RNQ), neon_pair_long),
19893 /* Reciprocal estimates. Types U32 F32. */
19894 NUF(vrecpe, 1b30400, 2, (RNDQ, RNDQ), neon_recip_est),
19895 NUF(vrecpeq, 1b30400, 2, (RNQ, RNQ), neon_recip_est),
19896 NUF(vrsqrte, 1b30480, 2, (RNDQ, RNDQ), neon_recip_est),
19897 NUF(vrsqrteq, 1b30480, 2, (RNQ, RNQ), neon_recip_est),
19898 /* VCLS. Types S8 S16 S32. */
19899 NUF(vcls, 1b00400, 2, (RNDQ, RNDQ), neon_cls),
19900 NUF(vclsq, 1b00400, 2, (RNQ, RNQ), neon_cls),
19901 /* VCLZ. Types I8 I16 I32. */
19902 NUF(vclz, 1b00480, 2, (RNDQ, RNDQ), neon_clz),
19903 NUF(vclzq, 1b00480, 2, (RNQ, RNQ), neon_clz),
19904 /* VCNT. Size 8. */
19905 NUF(vcnt, 1b00500, 2, (RNDQ, RNDQ), neon_cnt),
19906 NUF(vcntq, 1b00500, 2, (RNQ, RNQ), neon_cnt),
19907 /* Two address, untyped. */
19908 NUF(vswp, 1b20000, 2, (RNDQ, RNDQ), neon_swp),
19909 NUF(vswpq, 1b20000, 2, (RNQ, RNQ), neon_swp),
19910 /* VTRN. Sizes 8 16 32. */
19911 nUF(vtrn, _vtrn, 2, (RNDQ, RNDQ), neon_trn),
19912 nUF(vtrnq, _vtrn, 2, (RNQ, RNQ), neon_trn),
19913
19914 /* Table lookup. Size 8. */
19915 NUF(vtbl, 1b00800, 3, (RND, NRDLST, RND), neon_tbl_tbx),
19916 NUF(vtbx, 1b00840, 3, (RND, NRDLST, RND), neon_tbl_tbx),
19917
19918 #undef THUMB_VARIANT
19919 #define THUMB_VARIANT & fpu_vfp_v3_or_neon_ext
19920 #undef ARM_VARIANT
19921 #define ARM_VARIANT & fpu_vfp_v3_or_neon_ext
19922
19923 /* Neon element/structure load/store. */
19924 nUF(vld1, _vld1, 2, (NSTRLST, ADDR), neon_ldx_stx),
19925 nUF(vst1, _vst1, 2, (NSTRLST, ADDR), neon_ldx_stx),
19926 nUF(vld2, _vld2, 2, (NSTRLST, ADDR), neon_ldx_stx),
19927 nUF(vst2, _vst2, 2, (NSTRLST, ADDR), neon_ldx_stx),
19928 nUF(vld3, _vld3, 2, (NSTRLST, ADDR), neon_ldx_stx),
19929 nUF(vst3, _vst3, 2, (NSTRLST, ADDR), neon_ldx_stx),
19930 nUF(vld4, _vld4, 2, (NSTRLST, ADDR), neon_ldx_stx),
19931 nUF(vst4, _vst4, 2, (NSTRLST, ADDR), neon_ldx_stx),
19932
19933 #undef THUMB_VARIANT
19934 #define THUMB_VARIANT & fpu_vfp_ext_v3xd
19935 #undef ARM_VARIANT
19936 #define ARM_VARIANT & fpu_vfp_ext_v3xd
19937 cCE("fconsts", eb00a00, 2, (RVS, I255), vfp_sp_const),
19938 cCE("fshtos", eba0a40, 2, (RVS, I16z), vfp_sp_conv_16),
19939 cCE("fsltos", eba0ac0, 2, (RVS, I32), vfp_sp_conv_32),
19940 cCE("fuhtos", ebb0a40, 2, (RVS, I16z), vfp_sp_conv_16),
19941 cCE("fultos", ebb0ac0, 2, (RVS, I32), vfp_sp_conv_32),
19942 cCE("ftoshs", ebe0a40, 2, (RVS, I16z), vfp_sp_conv_16),
19943 cCE("ftosls", ebe0ac0, 2, (RVS, I32), vfp_sp_conv_32),
19944 cCE("ftouhs", ebf0a40, 2, (RVS, I16z), vfp_sp_conv_16),
19945 cCE("ftouls", ebf0ac0, 2, (RVS, I32), vfp_sp_conv_32),
19946
19947 #undef THUMB_VARIANT
19948 #define THUMB_VARIANT & fpu_vfp_ext_v3
19949 #undef ARM_VARIANT
19950 #define ARM_VARIANT & fpu_vfp_ext_v3
19951
19952 cCE("fconstd", eb00b00, 2, (RVD, I255), vfp_dp_const),
19953 cCE("fshtod", eba0b40, 2, (RVD, I16z), vfp_dp_conv_16),
19954 cCE("fsltod", eba0bc0, 2, (RVD, I32), vfp_dp_conv_32),
19955 cCE("fuhtod", ebb0b40, 2, (RVD, I16z), vfp_dp_conv_16),
19956 cCE("fultod", ebb0bc0, 2, (RVD, I32), vfp_dp_conv_32),
19957 cCE("ftoshd", ebe0b40, 2, (RVD, I16z), vfp_dp_conv_16),
19958 cCE("ftosld", ebe0bc0, 2, (RVD, I32), vfp_dp_conv_32),
19959 cCE("ftouhd", ebf0b40, 2, (RVD, I16z), vfp_dp_conv_16),
19960 cCE("ftould", ebf0bc0, 2, (RVD, I32), vfp_dp_conv_32),
19961
19962 #undef ARM_VARIANT
19963 #define ARM_VARIANT & fpu_vfp_ext_fma
19964 #undef THUMB_VARIANT
19965 #define THUMB_VARIANT & fpu_vfp_ext_fma
19966 /* Mnemonics shared by Neon and VFP. These are included in the
19967 VFP FMA variant; NEON and VFP FMA always includes the NEON
19968 FMA instructions. */
19969 nCEF(vfma, _vfma, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_fmac),
19970 nCEF(vfms, _vfms, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_fmac),
19971 /* ffmas/ffmad/ffmss/ffmsd are dummy mnemonics to satisfy gas;
19972 the v form should always be used. */
19973 cCE("ffmas", ea00a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19974 cCE("ffnmas", ea00a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19975 cCE("ffmad", ea00b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19976 cCE("ffnmad", ea00b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19977 nCE(vfnma, _vfnma, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
19978 nCE(vfnms, _vfnms, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
19979
19980 #undef THUMB_VARIANT
19981 #undef ARM_VARIANT
19982 #define ARM_VARIANT & arm_cext_xscale /* Intel XScale extensions. */
19983
19984 cCE("mia", e200010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
19985 cCE("miaph", e280010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
19986 cCE("miabb", e2c0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
19987 cCE("miabt", e2d0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
19988 cCE("miatb", e2e0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
19989 cCE("miatt", e2f0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
19990 cCE("mar", c400000, 3, (RXA, RRnpc, RRnpc), xsc_mar),
19991 cCE("mra", c500000, 3, (RRnpc, RRnpc, RXA), xsc_mra),
19992
19993 #undef ARM_VARIANT
19994 #define ARM_VARIANT & arm_cext_iwmmxt /* Intel Wireless MMX technology. */
19995
19996 cCE("tandcb", e13f130, 1, (RR), iwmmxt_tandorc),
19997 cCE("tandch", e53f130, 1, (RR), iwmmxt_tandorc),
19998 cCE("tandcw", e93f130, 1, (RR), iwmmxt_tandorc),
19999 cCE("tbcstb", e400010, 2, (RIWR, RR), rn_rd),
20000 cCE("tbcsth", e400050, 2, (RIWR, RR), rn_rd),
20001 cCE("tbcstw", e400090, 2, (RIWR, RR), rn_rd),
20002 cCE("textrcb", e130170, 2, (RR, I7), iwmmxt_textrc),
20003 cCE("textrch", e530170, 2, (RR, I7), iwmmxt_textrc),
20004 cCE("textrcw", e930170, 2, (RR, I7), iwmmxt_textrc),
20005 cCE("textrmub",e100070, 3, (RR, RIWR, I7), iwmmxt_textrm),
20006 cCE("textrmuh",e500070, 3, (RR, RIWR, I7), iwmmxt_textrm),
20007 cCE("textrmuw",e900070, 3, (RR, RIWR, I7), iwmmxt_textrm),
20008 cCE("textrmsb",e100078, 3, (RR, RIWR, I7), iwmmxt_textrm),
20009 cCE("textrmsh",e500078, 3, (RR, RIWR, I7), iwmmxt_textrm),
20010 cCE("textrmsw",e900078, 3, (RR, RIWR, I7), iwmmxt_textrm),
20011 cCE("tinsrb", e600010, 3, (RIWR, RR, I7), iwmmxt_tinsr),
20012 cCE("tinsrh", e600050, 3, (RIWR, RR, I7), iwmmxt_tinsr),
20013 cCE("tinsrw", e600090, 3, (RIWR, RR, I7), iwmmxt_tinsr),
20014 cCE("tmcr", e000110, 2, (RIWC_RIWG, RR), rn_rd),
20015 cCE("tmcrr", c400000, 3, (RIWR, RR, RR), rm_rd_rn),
20016 cCE("tmia", e200010, 3, (RIWR, RR, RR), iwmmxt_tmia),
20017 cCE("tmiaph", e280010, 3, (RIWR, RR, RR), iwmmxt_tmia),
20018 cCE("tmiabb", e2c0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
20019 cCE("tmiabt", e2d0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
20020 cCE("tmiatb", e2e0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
20021 cCE("tmiatt", e2f0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
20022 cCE("tmovmskb",e100030, 2, (RR, RIWR), rd_rn),
20023 cCE("tmovmskh",e500030, 2, (RR, RIWR), rd_rn),
20024 cCE("tmovmskw",e900030, 2, (RR, RIWR), rd_rn),
20025 cCE("tmrc", e100110, 2, (RR, RIWC_RIWG), rd_rn),
20026 cCE("tmrrc", c500000, 3, (RR, RR, RIWR), rd_rn_rm),
20027 cCE("torcb", e13f150, 1, (RR), iwmmxt_tandorc),
20028 cCE("torch", e53f150, 1, (RR), iwmmxt_tandorc),
20029 cCE("torcw", e93f150, 1, (RR), iwmmxt_tandorc),
20030 cCE("waccb", e0001c0, 2, (RIWR, RIWR), rd_rn),
20031 cCE("wacch", e4001c0, 2, (RIWR, RIWR), rd_rn),
20032 cCE("waccw", e8001c0, 2, (RIWR, RIWR), rd_rn),
20033 cCE("waddbss", e300180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20034 cCE("waddb", e000180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20035 cCE("waddbus", e100180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20036 cCE("waddhss", e700180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20037 cCE("waddh", e400180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20038 cCE("waddhus", e500180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20039 cCE("waddwss", eb00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20040 cCE("waddw", e800180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20041 cCE("waddwus", e900180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20042 cCE("waligni", e000020, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_waligni),
20043 cCE("walignr0",e800020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20044 cCE("walignr1",e900020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20045 cCE("walignr2",ea00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20046 cCE("walignr3",eb00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20047 cCE("wand", e200000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20048 cCE("wandn", e300000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20049 cCE("wavg2b", e800000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20050 cCE("wavg2br", e900000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20051 cCE("wavg2h", ec00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20052 cCE("wavg2hr", ed00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20053 cCE("wcmpeqb", e000060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20054 cCE("wcmpeqh", e400060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20055 cCE("wcmpeqw", e800060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20056 cCE("wcmpgtub",e100060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20057 cCE("wcmpgtuh",e500060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20058 cCE("wcmpgtuw",e900060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20059 cCE("wcmpgtsb",e300060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20060 cCE("wcmpgtsh",e700060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20061 cCE("wcmpgtsw",eb00060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20062 cCE("wldrb", c100000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
20063 cCE("wldrh", c500000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
20064 cCE("wldrw", c100100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
20065 cCE("wldrd", c500100, 2, (RIWR, ADDR), iwmmxt_wldstd),
20066 cCE("wmacs", e600100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20067 cCE("wmacsz", e700100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20068 cCE("wmacu", e400100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20069 cCE("wmacuz", e500100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20070 cCE("wmadds", ea00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20071 cCE("wmaddu", e800100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20072 cCE("wmaxsb", e200160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20073 cCE("wmaxsh", e600160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20074 cCE("wmaxsw", ea00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20075 cCE("wmaxub", e000160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20076 cCE("wmaxuh", e400160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20077 cCE("wmaxuw", e800160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20078 cCE("wminsb", e300160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20079 cCE("wminsh", e700160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20080 cCE("wminsw", eb00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20081 cCE("wminub", e100160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20082 cCE("wminuh", e500160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20083 cCE("wminuw", e900160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20084 cCE("wmov", e000000, 2, (RIWR, RIWR), iwmmxt_wmov),
20085 cCE("wmulsm", e300100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20086 cCE("wmulsl", e200100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20087 cCE("wmulum", e100100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20088 cCE("wmulul", e000100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20089 cCE("wor", e000000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20090 cCE("wpackhss",e700080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20091 cCE("wpackhus",e500080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20092 cCE("wpackwss",eb00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20093 cCE("wpackwus",e900080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20094 cCE("wpackdss",ef00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20095 cCE("wpackdus",ed00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20096 cCE("wrorh", e700040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20097 cCE("wrorhg", e700148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20098 cCE("wrorw", eb00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20099 cCE("wrorwg", eb00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20100 cCE("wrord", ef00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20101 cCE("wrordg", ef00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20102 cCE("wsadb", e000120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20103 cCE("wsadbz", e100120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20104 cCE("wsadh", e400120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20105 cCE("wsadhz", e500120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20106 cCE("wshufh", e0001e0, 3, (RIWR, RIWR, I255), iwmmxt_wshufh),
20107 cCE("wsllh", e500040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20108 cCE("wsllhg", e500148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20109 cCE("wsllw", e900040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20110 cCE("wsllwg", e900148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20111 cCE("wslld", ed00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20112 cCE("wslldg", ed00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20113 cCE("wsrah", e400040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20114 cCE("wsrahg", e400148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20115 cCE("wsraw", e800040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20116 cCE("wsrawg", e800148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20117 cCE("wsrad", ec00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20118 cCE("wsradg", ec00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20119 cCE("wsrlh", e600040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20120 cCE("wsrlhg", e600148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20121 cCE("wsrlw", ea00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20122 cCE("wsrlwg", ea00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20123 cCE("wsrld", ee00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20124 cCE("wsrldg", ee00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20125 cCE("wstrb", c000000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
20126 cCE("wstrh", c400000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
20127 cCE("wstrw", c000100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
20128 cCE("wstrd", c400100, 2, (RIWR, ADDR), iwmmxt_wldstd),
20129 cCE("wsubbss", e3001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20130 cCE("wsubb", e0001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20131 cCE("wsubbus", e1001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20132 cCE("wsubhss", e7001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20133 cCE("wsubh", e4001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20134 cCE("wsubhus", e5001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20135 cCE("wsubwss", eb001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20136 cCE("wsubw", e8001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20137 cCE("wsubwus", e9001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20138 cCE("wunpckehub",e0000c0, 2, (RIWR, RIWR), rd_rn),
20139 cCE("wunpckehuh",e4000c0, 2, (RIWR, RIWR), rd_rn),
20140 cCE("wunpckehuw",e8000c0, 2, (RIWR, RIWR), rd_rn),
20141 cCE("wunpckehsb",e2000c0, 2, (RIWR, RIWR), rd_rn),
20142 cCE("wunpckehsh",e6000c0, 2, (RIWR, RIWR), rd_rn),
20143 cCE("wunpckehsw",ea000c0, 2, (RIWR, RIWR), rd_rn),
20144 cCE("wunpckihb", e1000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20145 cCE("wunpckihh", e5000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20146 cCE("wunpckihw", e9000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20147 cCE("wunpckelub",e0000e0, 2, (RIWR, RIWR), rd_rn),
20148 cCE("wunpckeluh",e4000e0, 2, (RIWR, RIWR), rd_rn),
20149 cCE("wunpckeluw",e8000e0, 2, (RIWR, RIWR), rd_rn),
20150 cCE("wunpckelsb",e2000e0, 2, (RIWR, RIWR), rd_rn),
20151 cCE("wunpckelsh",e6000e0, 2, (RIWR, RIWR), rd_rn),
20152 cCE("wunpckelsw",ea000e0, 2, (RIWR, RIWR), rd_rn),
20153 cCE("wunpckilb", e1000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20154 cCE("wunpckilh", e5000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20155 cCE("wunpckilw", e9000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20156 cCE("wxor", e100000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20157 cCE("wzero", e300000, 1, (RIWR), iwmmxt_wzero),
20158
20159 #undef ARM_VARIANT
20160 #define ARM_VARIANT & arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2. */
20161
20162 cCE("torvscb", e12f190, 1, (RR), iwmmxt_tandorc),
20163 cCE("torvsch", e52f190, 1, (RR), iwmmxt_tandorc),
20164 cCE("torvscw", e92f190, 1, (RR), iwmmxt_tandorc),
20165 cCE("wabsb", e2001c0, 2, (RIWR, RIWR), rd_rn),
20166 cCE("wabsh", e6001c0, 2, (RIWR, RIWR), rd_rn),
20167 cCE("wabsw", ea001c0, 2, (RIWR, RIWR), rd_rn),
20168 cCE("wabsdiffb", e1001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20169 cCE("wabsdiffh", e5001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20170 cCE("wabsdiffw", e9001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20171 cCE("waddbhusl", e2001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20172 cCE("waddbhusm", e6001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20173 cCE("waddhc", e600180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20174 cCE("waddwc", ea00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20175 cCE("waddsubhx", ea001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20176 cCE("wavg4", e400000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20177 cCE("wavg4r", e500000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20178 cCE("wmaddsn", ee00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20179 cCE("wmaddsx", eb00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20180 cCE("wmaddun", ec00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20181 cCE("wmaddux", e900100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20182 cCE("wmerge", e000080, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_wmerge),
20183 cCE("wmiabb", e0000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20184 cCE("wmiabt", e1000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20185 cCE("wmiatb", e2000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20186 cCE("wmiatt", e3000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20187 cCE("wmiabbn", e4000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20188 cCE("wmiabtn", e5000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20189 cCE("wmiatbn", e6000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20190 cCE("wmiattn", e7000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20191 cCE("wmiawbb", e800120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20192 cCE("wmiawbt", e900120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20193 cCE("wmiawtb", ea00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20194 cCE("wmiawtt", eb00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20195 cCE("wmiawbbn", ec00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20196 cCE("wmiawbtn", ed00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20197 cCE("wmiawtbn", ee00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20198 cCE("wmiawttn", ef00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20199 cCE("wmulsmr", ef00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20200 cCE("wmulumr", ed00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20201 cCE("wmulwumr", ec000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20202 cCE("wmulwsmr", ee000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20203 cCE("wmulwum", ed000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20204 cCE("wmulwsm", ef000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20205 cCE("wmulwl", eb000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20206 cCE("wqmiabb", e8000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20207 cCE("wqmiabt", e9000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20208 cCE("wqmiatb", ea000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20209 cCE("wqmiatt", eb000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20210 cCE("wqmiabbn", ec000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20211 cCE("wqmiabtn", ed000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20212 cCE("wqmiatbn", ee000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20213 cCE("wqmiattn", ef000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20214 cCE("wqmulm", e100080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20215 cCE("wqmulmr", e300080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20216 cCE("wqmulwm", ec000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20217 cCE("wqmulwmr", ee000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20218 cCE("wsubaddhx", ed001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20219
20220 #undef ARM_VARIANT
20221 #define ARM_VARIANT & arm_cext_maverick /* Cirrus Maverick instructions. */
20222
20223 cCE("cfldrs", c100400, 2, (RMF, ADDRGLDC), rd_cpaddr),
20224 cCE("cfldrd", c500400, 2, (RMD, ADDRGLDC), rd_cpaddr),
20225 cCE("cfldr32", c100500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
20226 cCE("cfldr64", c500500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
20227 cCE("cfstrs", c000400, 2, (RMF, ADDRGLDC), rd_cpaddr),
20228 cCE("cfstrd", c400400, 2, (RMD, ADDRGLDC), rd_cpaddr),
20229 cCE("cfstr32", c000500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
20230 cCE("cfstr64", c400500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
20231 cCE("cfmvsr", e000450, 2, (RMF, RR), rn_rd),
20232 cCE("cfmvrs", e100450, 2, (RR, RMF), rd_rn),
20233 cCE("cfmvdlr", e000410, 2, (RMD, RR), rn_rd),
20234 cCE("cfmvrdl", e100410, 2, (RR, RMD), rd_rn),
20235 cCE("cfmvdhr", e000430, 2, (RMD, RR), rn_rd),
20236 cCE("cfmvrdh", e100430, 2, (RR, RMD), rd_rn),
20237 cCE("cfmv64lr",e000510, 2, (RMDX, RR), rn_rd),
20238 cCE("cfmvr64l",e100510, 2, (RR, RMDX), rd_rn),
20239 cCE("cfmv64hr",e000530, 2, (RMDX, RR), rn_rd),
20240 cCE("cfmvr64h",e100530, 2, (RR, RMDX), rd_rn),
20241 cCE("cfmval32",e200440, 2, (RMAX, RMFX), rd_rn),
20242 cCE("cfmv32al",e100440, 2, (RMFX, RMAX), rd_rn),
20243 cCE("cfmvam32",e200460, 2, (RMAX, RMFX), rd_rn),
20244 cCE("cfmv32am",e100460, 2, (RMFX, RMAX), rd_rn),
20245 cCE("cfmvah32",e200480, 2, (RMAX, RMFX), rd_rn),
20246 cCE("cfmv32ah",e100480, 2, (RMFX, RMAX), rd_rn),
20247 cCE("cfmva32", e2004a0, 2, (RMAX, RMFX), rd_rn),
20248 cCE("cfmv32a", e1004a0, 2, (RMFX, RMAX), rd_rn),
20249 cCE("cfmva64", e2004c0, 2, (RMAX, RMDX), rd_rn),
20250 cCE("cfmv64a", e1004c0, 2, (RMDX, RMAX), rd_rn),
20251 cCE("cfmvsc32",e2004e0, 2, (RMDS, RMDX), mav_dspsc),
20252 cCE("cfmv32sc",e1004e0, 2, (RMDX, RMDS), rd),
20253 cCE("cfcpys", e000400, 2, (RMF, RMF), rd_rn),
20254 cCE("cfcpyd", e000420, 2, (RMD, RMD), rd_rn),
20255 cCE("cfcvtsd", e000460, 2, (RMD, RMF), rd_rn),
20256 cCE("cfcvtds", e000440, 2, (RMF, RMD), rd_rn),
20257 cCE("cfcvt32s",e000480, 2, (RMF, RMFX), rd_rn),
20258 cCE("cfcvt32d",e0004a0, 2, (RMD, RMFX), rd_rn),
20259 cCE("cfcvt64s",e0004c0, 2, (RMF, RMDX), rd_rn),
20260 cCE("cfcvt64d",e0004e0, 2, (RMD, RMDX), rd_rn),
20261 cCE("cfcvts32",e100580, 2, (RMFX, RMF), rd_rn),
20262 cCE("cfcvtd32",e1005a0, 2, (RMFX, RMD), rd_rn),
20263 cCE("cftruncs32",e1005c0, 2, (RMFX, RMF), rd_rn),
20264 cCE("cftruncd32",e1005e0, 2, (RMFX, RMD), rd_rn),
20265 cCE("cfrshl32",e000550, 3, (RMFX, RMFX, RR), mav_triple),
20266 cCE("cfrshl64",e000570, 3, (RMDX, RMDX, RR), mav_triple),
20267 cCE("cfsh32", e000500, 3, (RMFX, RMFX, I63s), mav_shift),
20268 cCE("cfsh64", e200500, 3, (RMDX, RMDX, I63s), mav_shift),
20269 cCE("cfcmps", e100490, 3, (RR, RMF, RMF), rd_rn_rm),
20270 cCE("cfcmpd", e1004b0, 3, (RR, RMD, RMD), rd_rn_rm),
20271 cCE("cfcmp32", e100590, 3, (RR, RMFX, RMFX), rd_rn_rm),
20272 cCE("cfcmp64", e1005b0, 3, (RR, RMDX, RMDX), rd_rn_rm),
20273 cCE("cfabss", e300400, 2, (RMF, RMF), rd_rn),
20274 cCE("cfabsd", e300420, 2, (RMD, RMD), rd_rn),
20275 cCE("cfnegs", e300440, 2, (RMF, RMF), rd_rn),
20276 cCE("cfnegd", e300460, 2, (RMD, RMD), rd_rn),
20277 cCE("cfadds", e300480, 3, (RMF, RMF, RMF), rd_rn_rm),
20278 cCE("cfaddd", e3004a0, 3, (RMD, RMD, RMD), rd_rn_rm),
20279 cCE("cfsubs", e3004c0, 3, (RMF, RMF, RMF), rd_rn_rm),
20280 cCE("cfsubd", e3004e0, 3, (RMD, RMD, RMD), rd_rn_rm),
20281 cCE("cfmuls", e100400, 3, (RMF, RMF, RMF), rd_rn_rm),
20282 cCE("cfmuld", e100420, 3, (RMD, RMD, RMD), rd_rn_rm),
20283 cCE("cfabs32", e300500, 2, (RMFX, RMFX), rd_rn),
20284 cCE("cfabs64", e300520, 2, (RMDX, RMDX), rd_rn),
20285 cCE("cfneg32", e300540, 2, (RMFX, RMFX), rd_rn),
20286 cCE("cfneg64", e300560, 2, (RMDX, RMDX), rd_rn),
20287 cCE("cfadd32", e300580, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
20288 cCE("cfadd64", e3005a0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
20289 cCE("cfsub32", e3005c0, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
20290 cCE("cfsub64", e3005e0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
20291 cCE("cfmul32", e100500, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
20292 cCE("cfmul64", e100520, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
20293 cCE("cfmac32", e100540, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
20294 cCE("cfmsc32", e100560, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
20295 cCE("cfmadd32",e000600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
20296 cCE("cfmsub32",e100600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
20297 cCE("cfmadda32", e200600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
20298 cCE("cfmsuba32", e300600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
20299 };
20300 #undef ARM_VARIANT
20301 #undef THUMB_VARIANT
20302 #undef TCE
20303 #undef TUE
20304 #undef TUF
20305 #undef TCC
20306 #undef cCE
20307 #undef cCL
20308 #undef C3E
20309 #undef CE
20310 #undef CM
20311 #undef UE
20312 #undef UF
20313 #undef UT
20314 #undef NUF
20315 #undef nUF
20316 #undef NCE
20317 #undef nCE
20318 #undef OPS0
20319 #undef OPS1
20320 #undef OPS2
20321 #undef OPS3
20322 #undef OPS4
20323 #undef OPS5
20324 #undef OPS6
20325 #undef do_0
20326 \f
20327 /* MD interface: bits in the object file. */
20328
20329 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
20330 for use in the a.out file, and stores them in the array pointed to by buf.
20331 This knows about the endian-ness of the target machine and does
20332 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
20333 2 (short) and 4 (long) Floating numbers are put out as a series of
20334 LITTLENUMS (shorts, here at least). */
20335
20336 void
20337 md_number_to_chars (char * buf, valueT val, int n)
20338 {
20339 if (target_big_endian)
20340 number_to_chars_bigendian (buf, val, n);
20341 else
20342 number_to_chars_littleendian (buf, val, n);
20343 }
20344
20345 static valueT
20346 md_chars_to_number (char * buf, int n)
20347 {
20348 valueT result = 0;
20349 unsigned char * where = (unsigned char *) buf;
20350
20351 if (target_big_endian)
20352 {
20353 while (n--)
20354 {
20355 result <<= 8;
20356 result |= (*where++ & 255);
20357 }
20358 }
20359 else
20360 {
20361 while (n--)
20362 {
20363 result <<= 8;
20364 result |= (where[n] & 255);
20365 }
20366 }
20367
20368 return result;
20369 }
20370
20371 /* MD interface: Sections. */
20372
20373 /* Calculate the maximum variable size (i.e., excluding fr_fix)
20374 that an rs_machine_dependent frag may reach. */
20375
20376 unsigned int
20377 arm_frag_max_var (fragS *fragp)
20378 {
20379 /* We only use rs_machine_dependent for variable-size Thumb instructions,
20380 which are either THUMB_SIZE (2) or INSN_SIZE (4).
20381
20382 Note that we generate relaxable instructions even for cases that don't
20383 really need it, like an immediate that's a trivial constant. So we're
20384 overestimating the instruction size for some of those cases. Rather
20385 than putting more intelligence here, it would probably be better to
20386 avoid generating a relaxation frag in the first place when it can be
20387 determined up front that a short instruction will suffice. */
20388
20389 gas_assert (fragp->fr_type == rs_machine_dependent);
20390 return INSN_SIZE;
20391 }
20392
20393 /* Estimate the size of a frag before relaxing. Assume everything fits in
20394 2 bytes. */
20395
20396 int
20397 md_estimate_size_before_relax (fragS * fragp,
20398 segT segtype ATTRIBUTE_UNUSED)
20399 {
20400 fragp->fr_var = 2;
20401 return 2;
20402 }
20403
20404 /* Convert a machine dependent frag. */
20405
20406 void
20407 md_convert_frag (bfd *abfd, segT asec ATTRIBUTE_UNUSED, fragS *fragp)
20408 {
20409 unsigned long insn;
20410 unsigned long old_op;
20411 char *buf;
20412 expressionS exp;
20413 fixS *fixp;
20414 int reloc_type;
20415 int pc_rel;
20416 int opcode;
20417
20418 buf = fragp->fr_literal + fragp->fr_fix;
20419
20420 old_op = bfd_get_16(abfd, buf);
20421 if (fragp->fr_symbol)
20422 {
20423 exp.X_op = O_symbol;
20424 exp.X_add_symbol = fragp->fr_symbol;
20425 }
20426 else
20427 {
20428 exp.X_op = O_constant;
20429 }
20430 exp.X_add_number = fragp->fr_offset;
20431 opcode = fragp->fr_subtype;
20432 switch (opcode)
20433 {
20434 case T_MNEM_ldr_pc:
20435 case T_MNEM_ldr_pc2:
20436 case T_MNEM_ldr_sp:
20437 case T_MNEM_str_sp:
20438 case T_MNEM_ldr:
20439 case T_MNEM_ldrb:
20440 case T_MNEM_ldrh:
20441 case T_MNEM_str:
20442 case T_MNEM_strb:
20443 case T_MNEM_strh:
20444 if (fragp->fr_var == 4)
20445 {
20446 insn = THUMB_OP32 (opcode);
20447 if ((old_op >> 12) == 4 || (old_op >> 12) == 9)
20448 {
20449 insn |= (old_op & 0x700) << 4;
20450 }
20451 else
20452 {
20453 insn |= (old_op & 7) << 12;
20454 insn |= (old_op & 0x38) << 13;
20455 }
20456 insn |= 0x00000c00;
20457 put_thumb32_insn (buf, insn);
20458 reloc_type = BFD_RELOC_ARM_T32_OFFSET_IMM;
20459 }
20460 else
20461 {
20462 reloc_type = BFD_RELOC_ARM_THUMB_OFFSET;
20463 }
20464 pc_rel = (opcode == T_MNEM_ldr_pc2);
20465 break;
20466 case T_MNEM_adr:
20467 if (fragp->fr_var == 4)
20468 {
20469 insn = THUMB_OP32 (opcode);
20470 insn |= (old_op & 0xf0) << 4;
20471 put_thumb32_insn (buf, insn);
20472 reloc_type = BFD_RELOC_ARM_T32_ADD_PC12;
20473 }
20474 else
20475 {
20476 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
20477 exp.X_add_number -= 4;
20478 }
20479 pc_rel = 1;
20480 break;
20481 case T_MNEM_mov:
20482 case T_MNEM_movs:
20483 case T_MNEM_cmp:
20484 case T_MNEM_cmn:
20485 if (fragp->fr_var == 4)
20486 {
20487 int r0off = (opcode == T_MNEM_mov
20488 || opcode == T_MNEM_movs) ? 0 : 8;
20489 insn = THUMB_OP32 (opcode);
20490 insn = (insn & 0xe1ffffff) | 0x10000000;
20491 insn |= (old_op & 0x700) << r0off;
20492 put_thumb32_insn (buf, insn);
20493 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
20494 }
20495 else
20496 {
20497 reloc_type = BFD_RELOC_ARM_THUMB_IMM;
20498 }
20499 pc_rel = 0;
20500 break;
20501 case T_MNEM_b:
20502 if (fragp->fr_var == 4)
20503 {
20504 insn = THUMB_OP32(opcode);
20505 put_thumb32_insn (buf, insn);
20506 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH25;
20507 }
20508 else
20509 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH12;
20510 pc_rel = 1;
20511 break;
20512 case T_MNEM_bcond:
20513 if (fragp->fr_var == 4)
20514 {
20515 insn = THUMB_OP32(opcode);
20516 insn |= (old_op & 0xf00) << 14;
20517 put_thumb32_insn (buf, insn);
20518 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH20;
20519 }
20520 else
20521 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH9;
20522 pc_rel = 1;
20523 break;
20524 case T_MNEM_add_sp:
20525 case T_MNEM_add_pc:
20526 case T_MNEM_inc_sp:
20527 case T_MNEM_dec_sp:
20528 if (fragp->fr_var == 4)
20529 {
20530 /* ??? Choose between add and addw. */
20531 insn = THUMB_OP32 (opcode);
20532 insn |= (old_op & 0xf0) << 4;
20533 put_thumb32_insn (buf, insn);
20534 if (opcode == T_MNEM_add_pc)
20535 reloc_type = BFD_RELOC_ARM_T32_IMM12;
20536 else
20537 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
20538 }
20539 else
20540 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
20541 pc_rel = 0;
20542 break;
20543
20544 case T_MNEM_addi:
20545 case T_MNEM_addis:
20546 case T_MNEM_subi:
20547 case T_MNEM_subis:
20548 if (fragp->fr_var == 4)
20549 {
20550 insn = THUMB_OP32 (opcode);
20551 insn |= (old_op & 0xf0) << 4;
20552 insn |= (old_op & 0xf) << 16;
20553 put_thumb32_insn (buf, insn);
20554 if (insn & (1 << 20))
20555 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
20556 else
20557 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
20558 }
20559 else
20560 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
20561 pc_rel = 0;
20562 break;
20563 default:
20564 abort ();
20565 }
20566 fixp = fix_new_exp (fragp, fragp->fr_fix, fragp->fr_var, &exp, pc_rel,
20567 (enum bfd_reloc_code_real) reloc_type);
20568 fixp->fx_file = fragp->fr_file;
20569 fixp->fx_line = fragp->fr_line;
20570 fragp->fr_fix += fragp->fr_var;
20571
20572 /* Set whether we use thumb-2 ISA based on final relaxation results. */
20573 if (thumb_mode && fragp->fr_var == 4 && no_cpu_selected ()
20574 && !ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_t2))
20575 ARM_MERGE_FEATURE_SETS (arm_arch_used, thumb_arch_used, arm_ext_v6t2);
20576 }
20577
20578 /* Return the size of a relaxable immediate operand instruction.
20579 SHIFT and SIZE specify the form of the allowable immediate. */
20580 static int
20581 relax_immediate (fragS *fragp, int size, int shift)
20582 {
20583 offsetT offset;
20584 offsetT mask;
20585 offsetT low;
20586
20587 /* ??? Should be able to do better than this. */
20588 if (fragp->fr_symbol)
20589 return 4;
20590
20591 low = (1 << shift) - 1;
20592 mask = (1 << (shift + size)) - (1 << shift);
20593 offset = fragp->fr_offset;
20594 /* Force misaligned offsets to 32-bit variant. */
20595 if (offset & low)
20596 return 4;
20597 if (offset & ~mask)
20598 return 4;
20599 return 2;
20600 }
20601
20602 /* Get the address of a symbol during relaxation. */
20603 static addressT
20604 relaxed_symbol_addr (fragS *fragp, long stretch)
20605 {
20606 fragS *sym_frag;
20607 addressT addr;
20608 symbolS *sym;
20609
20610 sym = fragp->fr_symbol;
20611 sym_frag = symbol_get_frag (sym);
20612 know (S_GET_SEGMENT (sym) != absolute_section
20613 || sym_frag == &zero_address_frag);
20614 addr = S_GET_VALUE (sym) + fragp->fr_offset;
20615
20616 /* If frag has yet to be reached on this pass, assume it will
20617 move by STRETCH just as we did. If this is not so, it will
20618 be because some frag between grows, and that will force
20619 another pass. */
20620
20621 if (stretch != 0
20622 && sym_frag->relax_marker != fragp->relax_marker)
20623 {
20624 fragS *f;
20625
20626 /* Adjust stretch for any alignment frag. Note that if have
20627 been expanding the earlier code, the symbol may be
20628 defined in what appears to be an earlier frag. FIXME:
20629 This doesn't handle the fr_subtype field, which specifies
20630 a maximum number of bytes to skip when doing an
20631 alignment. */
20632 for (f = fragp; f != NULL && f != sym_frag; f = f->fr_next)
20633 {
20634 if (f->fr_type == rs_align || f->fr_type == rs_align_code)
20635 {
20636 if (stretch < 0)
20637 stretch = - ((- stretch)
20638 & ~ ((1 << (int) f->fr_offset) - 1));
20639 else
20640 stretch &= ~ ((1 << (int) f->fr_offset) - 1);
20641 if (stretch == 0)
20642 break;
20643 }
20644 }
20645 if (f != NULL)
20646 addr += stretch;
20647 }
20648
20649 return addr;
20650 }
20651
20652 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
20653 load. */
20654 static int
20655 relax_adr (fragS *fragp, asection *sec, long stretch)
20656 {
20657 addressT addr;
20658 offsetT val;
20659
20660 /* Assume worst case for symbols not known to be in the same section. */
20661 if (fragp->fr_symbol == NULL
20662 || !S_IS_DEFINED (fragp->fr_symbol)
20663 || sec != S_GET_SEGMENT (fragp->fr_symbol)
20664 || S_IS_WEAK (fragp->fr_symbol))
20665 return 4;
20666
20667 val = relaxed_symbol_addr (fragp, stretch);
20668 addr = fragp->fr_address + fragp->fr_fix;
20669 addr = (addr + 4) & ~3;
20670 /* Force misaligned targets to 32-bit variant. */
20671 if (val & 3)
20672 return 4;
20673 val -= addr;
20674 if (val < 0 || val > 1020)
20675 return 4;
20676 return 2;
20677 }
20678
20679 /* Return the size of a relaxable add/sub immediate instruction. */
20680 static int
20681 relax_addsub (fragS *fragp, asection *sec)
20682 {
20683 char *buf;
20684 int op;
20685
20686 buf = fragp->fr_literal + fragp->fr_fix;
20687 op = bfd_get_16(sec->owner, buf);
20688 if ((op & 0xf) == ((op >> 4) & 0xf))
20689 return relax_immediate (fragp, 8, 0);
20690 else
20691 return relax_immediate (fragp, 3, 0);
20692 }
20693
20694 /* Return TRUE iff the definition of symbol S could be pre-empted
20695 (overridden) at link or load time. */
20696 static bfd_boolean
20697 symbol_preemptible (symbolS *s)
20698 {
20699 /* Weak symbols can always be pre-empted. */
20700 if (S_IS_WEAK (s))
20701 return TRUE;
20702
20703 /* Non-global symbols cannot be pre-empted. */
20704 if (! S_IS_EXTERNAL (s))
20705 return FALSE;
20706
20707 #ifdef OBJ_ELF
20708 /* In ELF, a global symbol can be marked protected, or private. In that
20709 case it can't be pre-empted (other definitions in the same link unit
20710 would violate the ODR). */
20711 if (ELF_ST_VISIBILITY (S_GET_OTHER (s)) > STV_DEFAULT)
20712 return FALSE;
20713 #endif
20714
20715 /* Other global symbols might be pre-empted. */
20716 return TRUE;
20717 }
20718
20719 /* Return the size of a relaxable branch instruction. BITS is the
20720 size of the offset field in the narrow instruction. */
20721
20722 static int
20723 relax_branch (fragS *fragp, asection *sec, int bits, long stretch)
20724 {
20725 addressT addr;
20726 offsetT val;
20727 offsetT limit;
20728
20729 /* Assume worst case for symbols not known to be in the same section. */
20730 if (!S_IS_DEFINED (fragp->fr_symbol)
20731 || sec != S_GET_SEGMENT (fragp->fr_symbol)
20732 || S_IS_WEAK (fragp->fr_symbol))
20733 return 4;
20734
20735 #ifdef OBJ_ELF
20736 /* A branch to a function in ARM state will require interworking. */
20737 if (S_IS_DEFINED (fragp->fr_symbol)
20738 && ARM_IS_FUNC (fragp->fr_symbol))
20739 return 4;
20740 #endif
20741
20742 if (symbol_preemptible (fragp->fr_symbol))
20743 return 4;
20744
20745 val = relaxed_symbol_addr (fragp, stretch);
20746 addr = fragp->fr_address + fragp->fr_fix + 4;
20747 val -= addr;
20748
20749 /* Offset is a signed value *2 */
20750 limit = 1 << bits;
20751 if (val >= limit || val < -limit)
20752 return 4;
20753 return 2;
20754 }
20755
20756
20757 /* Relax a machine dependent frag. This returns the amount by which
20758 the current size of the frag should change. */
20759
20760 int
20761 arm_relax_frag (asection *sec, fragS *fragp, long stretch)
20762 {
20763 int oldsize;
20764 int newsize;
20765
20766 oldsize = fragp->fr_var;
20767 switch (fragp->fr_subtype)
20768 {
20769 case T_MNEM_ldr_pc2:
20770 newsize = relax_adr (fragp, sec, stretch);
20771 break;
20772 case T_MNEM_ldr_pc:
20773 case T_MNEM_ldr_sp:
20774 case T_MNEM_str_sp:
20775 newsize = relax_immediate (fragp, 8, 2);
20776 break;
20777 case T_MNEM_ldr:
20778 case T_MNEM_str:
20779 newsize = relax_immediate (fragp, 5, 2);
20780 break;
20781 case T_MNEM_ldrh:
20782 case T_MNEM_strh:
20783 newsize = relax_immediate (fragp, 5, 1);
20784 break;
20785 case T_MNEM_ldrb:
20786 case T_MNEM_strb:
20787 newsize = relax_immediate (fragp, 5, 0);
20788 break;
20789 case T_MNEM_adr:
20790 newsize = relax_adr (fragp, sec, stretch);
20791 break;
20792 case T_MNEM_mov:
20793 case T_MNEM_movs:
20794 case T_MNEM_cmp:
20795 case T_MNEM_cmn:
20796 newsize = relax_immediate (fragp, 8, 0);
20797 break;
20798 case T_MNEM_b:
20799 newsize = relax_branch (fragp, sec, 11, stretch);
20800 break;
20801 case T_MNEM_bcond:
20802 newsize = relax_branch (fragp, sec, 8, stretch);
20803 break;
20804 case T_MNEM_add_sp:
20805 case T_MNEM_add_pc:
20806 newsize = relax_immediate (fragp, 8, 2);
20807 break;
20808 case T_MNEM_inc_sp:
20809 case T_MNEM_dec_sp:
20810 newsize = relax_immediate (fragp, 7, 2);
20811 break;
20812 case T_MNEM_addi:
20813 case T_MNEM_addis:
20814 case T_MNEM_subi:
20815 case T_MNEM_subis:
20816 newsize = relax_addsub (fragp, sec);
20817 break;
20818 default:
20819 abort ();
20820 }
20821
20822 fragp->fr_var = newsize;
20823 /* Freeze wide instructions that are at or before the same location as
20824 in the previous pass. This avoids infinite loops.
20825 Don't freeze them unconditionally because targets may be artificially
20826 misaligned by the expansion of preceding frags. */
20827 if (stretch <= 0 && newsize > 2)
20828 {
20829 md_convert_frag (sec->owner, sec, fragp);
20830 frag_wane (fragp);
20831 }
20832
20833 return newsize - oldsize;
20834 }
20835
20836 /* Round up a section size to the appropriate boundary. */
20837
20838 valueT
20839 md_section_align (segT segment ATTRIBUTE_UNUSED,
20840 valueT size)
20841 {
20842 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
20843 if (OUTPUT_FLAVOR == bfd_target_aout_flavour)
20844 {
20845 /* For a.out, force the section size to be aligned. If we don't do
20846 this, BFD will align it for us, but it will not write out the
20847 final bytes of the section. This may be a bug in BFD, but it is
20848 easier to fix it here since that is how the other a.out targets
20849 work. */
20850 int align;
20851
20852 align = bfd_get_section_alignment (stdoutput, segment);
20853 size = ((size + (1 << align) - 1) & ((valueT) -1 << align));
20854 }
20855 #endif
20856
20857 return size;
20858 }
20859
20860 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
20861 of an rs_align_code fragment. */
20862
20863 void
20864 arm_handle_align (fragS * fragP)
20865 {
20866 static char const arm_noop[2][2][4] =
20867 {
20868 { /* ARMv1 */
20869 {0x00, 0x00, 0xa0, 0xe1}, /* LE */
20870 {0xe1, 0xa0, 0x00, 0x00}, /* BE */
20871 },
20872 { /* ARMv6k */
20873 {0x00, 0xf0, 0x20, 0xe3}, /* LE */
20874 {0xe3, 0x20, 0xf0, 0x00}, /* BE */
20875 },
20876 };
20877 static char const thumb_noop[2][2][2] =
20878 {
20879 { /* Thumb-1 */
20880 {0xc0, 0x46}, /* LE */
20881 {0x46, 0xc0}, /* BE */
20882 },
20883 { /* Thumb-2 */
20884 {0x00, 0xbf}, /* LE */
20885 {0xbf, 0x00} /* BE */
20886 }
20887 };
20888 static char const wide_thumb_noop[2][4] =
20889 { /* Wide Thumb-2 */
20890 {0xaf, 0xf3, 0x00, 0x80}, /* LE */
20891 {0xf3, 0xaf, 0x80, 0x00}, /* BE */
20892 };
20893
20894 unsigned bytes, fix, noop_size;
20895 char * p;
20896 const char * noop;
20897 const char *narrow_noop = NULL;
20898 #ifdef OBJ_ELF
20899 enum mstate state;
20900 #endif
20901
20902 if (fragP->fr_type != rs_align_code)
20903 return;
20904
20905 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
20906 p = fragP->fr_literal + fragP->fr_fix;
20907 fix = 0;
20908
20909 if (bytes > MAX_MEM_FOR_RS_ALIGN_CODE)
20910 bytes &= MAX_MEM_FOR_RS_ALIGN_CODE;
20911
20912 gas_assert ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) != 0);
20913
20914 if (fragP->tc_frag_data.thumb_mode & (~ MODE_RECORDED))
20915 {
20916 if (ARM_CPU_HAS_FEATURE (selected_cpu_name[0]
20917 ? selected_cpu : arm_arch_none, arm_ext_v6t2))
20918 {
20919 narrow_noop = thumb_noop[1][target_big_endian];
20920 noop = wide_thumb_noop[target_big_endian];
20921 }
20922 else
20923 noop = thumb_noop[0][target_big_endian];
20924 noop_size = 2;
20925 #ifdef OBJ_ELF
20926 state = MAP_THUMB;
20927 #endif
20928 }
20929 else
20930 {
20931 noop = arm_noop[ARM_CPU_HAS_FEATURE (selected_cpu_name[0]
20932 ? selected_cpu : arm_arch_none,
20933 arm_ext_v6k) != 0]
20934 [target_big_endian];
20935 noop_size = 4;
20936 #ifdef OBJ_ELF
20937 state = MAP_ARM;
20938 #endif
20939 }
20940
20941 fragP->fr_var = noop_size;
20942
20943 if (bytes & (noop_size - 1))
20944 {
20945 fix = bytes & (noop_size - 1);
20946 #ifdef OBJ_ELF
20947 insert_data_mapping_symbol (state, fragP->fr_fix, fragP, fix);
20948 #endif
20949 memset (p, 0, fix);
20950 p += fix;
20951 bytes -= fix;
20952 }
20953
20954 if (narrow_noop)
20955 {
20956 if (bytes & noop_size)
20957 {
20958 /* Insert a narrow noop. */
20959 memcpy (p, narrow_noop, noop_size);
20960 p += noop_size;
20961 bytes -= noop_size;
20962 fix += noop_size;
20963 }
20964
20965 /* Use wide noops for the remainder */
20966 noop_size = 4;
20967 }
20968
20969 while (bytes >= noop_size)
20970 {
20971 memcpy (p, noop, noop_size);
20972 p += noop_size;
20973 bytes -= noop_size;
20974 fix += noop_size;
20975 }
20976
20977 fragP->fr_fix += fix;
20978 }
20979
20980 /* Called from md_do_align. Used to create an alignment
20981 frag in a code section. */
20982
20983 void
20984 arm_frag_align_code (int n, int max)
20985 {
20986 char * p;
20987
20988 /* We assume that there will never be a requirement
20989 to support alignments greater than MAX_MEM_FOR_RS_ALIGN_CODE bytes. */
20990 if (max > MAX_MEM_FOR_RS_ALIGN_CODE)
20991 {
20992 char err_msg[128];
20993
20994 sprintf (err_msg,
20995 _("alignments greater than %d bytes not supported in .text sections."),
20996 MAX_MEM_FOR_RS_ALIGN_CODE + 1);
20997 as_fatal ("%s", err_msg);
20998 }
20999
21000 p = frag_var (rs_align_code,
21001 MAX_MEM_FOR_RS_ALIGN_CODE,
21002 1,
21003 (relax_substateT) max,
21004 (symbolS *) NULL,
21005 (offsetT) n,
21006 (char *) NULL);
21007 *p = 0;
21008 }
21009
21010 /* Perform target specific initialisation of a frag.
21011 Note - despite the name this initialisation is not done when the frag
21012 is created, but only when its type is assigned. A frag can be created
21013 and used a long time before its type is set, so beware of assuming that
21014 this initialisationis performed first. */
21015
21016 #ifndef OBJ_ELF
21017 void
21018 arm_init_frag (fragS * fragP, int max_chars ATTRIBUTE_UNUSED)
21019 {
21020 /* Record whether this frag is in an ARM or a THUMB area. */
21021 fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
21022 }
21023
21024 #else /* OBJ_ELF is defined. */
21025 void
21026 arm_init_frag (fragS * fragP, int max_chars)
21027 {
21028 /* If the current ARM vs THUMB mode has not already
21029 been recorded into this frag then do so now. */
21030 if ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) == 0)
21031 fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
21032
21033 /* Record a mapping symbol for alignment frags. We will delete this
21034 later if the alignment ends up empty. */
21035 switch (fragP->fr_type)
21036 {
21037 case rs_align:
21038 case rs_align_test:
21039 case rs_fill:
21040 mapping_state_2 (MAP_DATA, max_chars);
21041 break;
21042 case rs_align_code:
21043 mapping_state_2 (thumb_mode ? MAP_THUMB : MAP_ARM, max_chars);
21044 break;
21045 default:
21046 break;
21047 }
21048 }
21049
21050 /* When we change sections we need to issue a new mapping symbol. */
21051
21052 void
21053 arm_elf_change_section (void)
21054 {
21055 /* Link an unlinked unwind index table section to the .text section. */
21056 if (elf_section_type (now_seg) == SHT_ARM_EXIDX
21057 && elf_linked_to_section (now_seg) == NULL)
21058 elf_linked_to_section (now_seg) = text_section;
21059 }
21060
21061 int
21062 arm_elf_section_type (const char * str, size_t len)
21063 {
21064 if (len == 5 && strncmp (str, "exidx", 5) == 0)
21065 return SHT_ARM_EXIDX;
21066
21067 return -1;
21068 }
21069 \f
21070 /* Code to deal with unwinding tables. */
21071
21072 static void add_unwind_adjustsp (offsetT);
21073
21074 /* Generate any deferred unwind frame offset. */
21075
21076 static void
21077 flush_pending_unwind (void)
21078 {
21079 offsetT offset;
21080
21081 offset = unwind.pending_offset;
21082 unwind.pending_offset = 0;
21083 if (offset != 0)
21084 add_unwind_adjustsp (offset);
21085 }
21086
21087 /* Add an opcode to this list for this function. Two-byte opcodes should
21088 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse
21089 order. */
21090
21091 static void
21092 add_unwind_opcode (valueT op, int length)
21093 {
21094 /* Add any deferred stack adjustment. */
21095 if (unwind.pending_offset)
21096 flush_pending_unwind ();
21097
21098 unwind.sp_restored = 0;
21099
21100 if (unwind.opcode_count + length > unwind.opcode_alloc)
21101 {
21102 unwind.opcode_alloc += ARM_OPCODE_CHUNK_SIZE;
21103 if (unwind.opcodes)
21104 unwind.opcodes = (unsigned char *) xrealloc (unwind.opcodes,
21105 unwind.opcode_alloc);
21106 else
21107 unwind.opcodes = (unsigned char *) xmalloc (unwind.opcode_alloc);
21108 }
21109 while (length > 0)
21110 {
21111 length--;
21112 unwind.opcodes[unwind.opcode_count] = op & 0xff;
21113 op >>= 8;
21114 unwind.opcode_count++;
21115 }
21116 }
21117
21118 /* Add unwind opcodes to adjust the stack pointer. */
21119
21120 static void
21121 add_unwind_adjustsp (offsetT offset)
21122 {
21123 valueT op;
21124
21125 if (offset > 0x200)
21126 {
21127 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */
21128 char bytes[5];
21129 int n;
21130 valueT o;
21131
21132 /* Long form: 0xb2, uleb128. */
21133 /* This might not fit in a word so add the individual bytes,
21134 remembering the list is built in reverse order. */
21135 o = (valueT) ((offset - 0x204) >> 2);
21136 if (o == 0)
21137 add_unwind_opcode (0, 1);
21138
21139 /* Calculate the uleb128 encoding of the offset. */
21140 n = 0;
21141 while (o)
21142 {
21143 bytes[n] = o & 0x7f;
21144 o >>= 7;
21145 if (o)
21146 bytes[n] |= 0x80;
21147 n++;
21148 }
21149 /* Add the insn. */
21150 for (; n; n--)
21151 add_unwind_opcode (bytes[n - 1], 1);
21152 add_unwind_opcode (0xb2, 1);
21153 }
21154 else if (offset > 0x100)
21155 {
21156 /* Two short opcodes. */
21157 add_unwind_opcode (0x3f, 1);
21158 op = (offset - 0x104) >> 2;
21159 add_unwind_opcode (op, 1);
21160 }
21161 else if (offset > 0)
21162 {
21163 /* Short opcode. */
21164 op = (offset - 4) >> 2;
21165 add_unwind_opcode (op, 1);
21166 }
21167 else if (offset < 0)
21168 {
21169 offset = -offset;
21170 while (offset > 0x100)
21171 {
21172 add_unwind_opcode (0x7f, 1);
21173 offset -= 0x100;
21174 }
21175 op = ((offset - 4) >> 2) | 0x40;
21176 add_unwind_opcode (op, 1);
21177 }
21178 }
21179
21180 /* Finish the list of unwind opcodes for this function. */
21181 static void
21182 finish_unwind_opcodes (void)
21183 {
21184 valueT op;
21185
21186 if (unwind.fp_used)
21187 {
21188 /* Adjust sp as necessary. */
21189 unwind.pending_offset += unwind.fp_offset - unwind.frame_size;
21190 flush_pending_unwind ();
21191
21192 /* After restoring sp from the frame pointer. */
21193 op = 0x90 | unwind.fp_reg;
21194 add_unwind_opcode (op, 1);
21195 }
21196 else
21197 flush_pending_unwind ();
21198 }
21199
21200
21201 /* Start an exception table entry. If idx is nonzero this is an index table
21202 entry. */
21203
21204 static void
21205 start_unwind_section (const segT text_seg, int idx)
21206 {
21207 const char * text_name;
21208 const char * prefix;
21209 const char * prefix_once;
21210 const char * group_name;
21211 size_t prefix_len;
21212 size_t text_len;
21213 char * sec_name;
21214 size_t sec_name_len;
21215 int type;
21216 int flags;
21217 int linkonce;
21218
21219 if (idx)
21220 {
21221 prefix = ELF_STRING_ARM_unwind;
21222 prefix_once = ELF_STRING_ARM_unwind_once;
21223 type = SHT_ARM_EXIDX;
21224 }
21225 else
21226 {
21227 prefix = ELF_STRING_ARM_unwind_info;
21228 prefix_once = ELF_STRING_ARM_unwind_info_once;
21229 type = SHT_PROGBITS;
21230 }
21231
21232 text_name = segment_name (text_seg);
21233 if (streq (text_name, ".text"))
21234 text_name = "";
21235
21236 if (strncmp (text_name, ".gnu.linkonce.t.",
21237 strlen (".gnu.linkonce.t.")) == 0)
21238 {
21239 prefix = prefix_once;
21240 text_name += strlen (".gnu.linkonce.t.");
21241 }
21242
21243 prefix_len = strlen (prefix);
21244 text_len = strlen (text_name);
21245 sec_name_len = prefix_len + text_len;
21246 sec_name = (char *) xmalloc (sec_name_len + 1);
21247 memcpy (sec_name, prefix, prefix_len);
21248 memcpy (sec_name + prefix_len, text_name, text_len);
21249 sec_name[prefix_len + text_len] = '\0';
21250
21251 flags = SHF_ALLOC;
21252 linkonce = 0;
21253 group_name = 0;
21254
21255 /* Handle COMDAT group. */
21256 if (prefix != prefix_once && (text_seg->flags & SEC_LINK_ONCE) != 0)
21257 {
21258 group_name = elf_group_name (text_seg);
21259 if (group_name == NULL)
21260 {
21261 as_bad (_("Group section `%s' has no group signature"),
21262 segment_name (text_seg));
21263 ignore_rest_of_line ();
21264 return;
21265 }
21266 flags |= SHF_GROUP;
21267 linkonce = 1;
21268 }
21269
21270 obj_elf_change_section (sec_name, type, flags, 0, group_name, linkonce, 0);
21271
21272 /* Set the section link for index tables. */
21273 if (idx)
21274 elf_linked_to_section (now_seg) = text_seg;
21275 }
21276
21277
21278 /* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
21279 personality routine data. Returns zero, or the index table value for
21280 an inline entry. */
21281
21282 static valueT
21283 create_unwind_entry (int have_data)
21284 {
21285 int size;
21286 addressT where;
21287 char *ptr;
21288 /* The current word of data. */
21289 valueT data;
21290 /* The number of bytes left in this word. */
21291 int n;
21292
21293 finish_unwind_opcodes ();
21294
21295 /* Remember the current text section. */
21296 unwind.saved_seg = now_seg;
21297 unwind.saved_subseg = now_subseg;
21298
21299 start_unwind_section (now_seg, 0);
21300
21301 if (unwind.personality_routine == NULL)
21302 {
21303 if (unwind.personality_index == -2)
21304 {
21305 if (have_data)
21306 as_bad (_("handlerdata in cantunwind frame"));
21307 return 1; /* EXIDX_CANTUNWIND. */
21308 }
21309
21310 /* Use a default personality routine if none is specified. */
21311 if (unwind.personality_index == -1)
21312 {
21313 if (unwind.opcode_count > 3)
21314 unwind.personality_index = 1;
21315 else
21316 unwind.personality_index = 0;
21317 }
21318
21319 /* Space for the personality routine entry. */
21320 if (unwind.personality_index == 0)
21321 {
21322 if (unwind.opcode_count > 3)
21323 as_bad (_("too many unwind opcodes for personality routine 0"));
21324
21325 if (!have_data)
21326 {
21327 /* All the data is inline in the index table. */
21328 data = 0x80;
21329 n = 3;
21330 while (unwind.opcode_count > 0)
21331 {
21332 unwind.opcode_count--;
21333 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
21334 n--;
21335 }
21336
21337 /* Pad with "finish" opcodes. */
21338 while (n--)
21339 data = (data << 8) | 0xb0;
21340
21341 return data;
21342 }
21343 size = 0;
21344 }
21345 else
21346 /* We get two opcodes "free" in the first word. */
21347 size = unwind.opcode_count - 2;
21348 }
21349 else
21350 {
21351 /* PR 16765: Missing or misplaced unwind directives can trigger this. */
21352 if (unwind.personality_index != -1)
21353 {
21354 as_bad (_("attempt to recreate an unwind entry"));
21355 return 1;
21356 }
21357
21358 /* An extra byte is required for the opcode count. */
21359 size = unwind.opcode_count + 1;
21360 }
21361
21362 size = (size + 3) >> 2;
21363 if (size > 0xff)
21364 as_bad (_("too many unwind opcodes"));
21365
21366 frag_align (2, 0, 0);
21367 record_alignment (now_seg, 2);
21368 unwind.table_entry = expr_build_dot ();
21369
21370 /* Allocate the table entry. */
21371 ptr = frag_more ((size << 2) + 4);
21372 /* PR 13449: Zero the table entries in case some of them are not used. */
21373 memset (ptr, 0, (size << 2) + 4);
21374 where = frag_now_fix () - ((size << 2) + 4);
21375
21376 switch (unwind.personality_index)
21377 {
21378 case -1:
21379 /* ??? Should this be a PLT generating relocation? */
21380 /* Custom personality routine. */
21381 fix_new (frag_now, where, 4, unwind.personality_routine, 0, 1,
21382 BFD_RELOC_ARM_PREL31);
21383
21384 where += 4;
21385 ptr += 4;
21386
21387 /* Set the first byte to the number of additional words. */
21388 data = size > 0 ? size - 1 : 0;
21389 n = 3;
21390 break;
21391
21392 /* ABI defined personality routines. */
21393 case 0:
21394 /* Three opcodes bytes are packed into the first word. */
21395 data = 0x80;
21396 n = 3;
21397 break;
21398
21399 case 1:
21400 case 2:
21401 /* The size and first two opcode bytes go in the first word. */
21402 data = ((0x80 + unwind.personality_index) << 8) | size;
21403 n = 2;
21404 break;
21405
21406 default:
21407 /* Should never happen. */
21408 abort ();
21409 }
21410
21411 /* Pack the opcodes into words (MSB first), reversing the list at the same
21412 time. */
21413 while (unwind.opcode_count > 0)
21414 {
21415 if (n == 0)
21416 {
21417 md_number_to_chars (ptr, data, 4);
21418 ptr += 4;
21419 n = 4;
21420 data = 0;
21421 }
21422 unwind.opcode_count--;
21423 n--;
21424 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
21425 }
21426
21427 /* Finish off the last word. */
21428 if (n < 4)
21429 {
21430 /* Pad with "finish" opcodes. */
21431 while (n--)
21432 data = (data << 8) | 0xb0;
21433
21434 md_number_to_chars (ptr, data, 4);
21435 }
21436
21437 if (!have_data)
21438 {
21439 /* Add an empty descriptor if there is no user-specified data. */
21440 ptr = frag_more (4);
21441 md_number_to_chars (ptr, 0, 4);
21442 }
21443
21444 return 0;
21445 }
21446
21447
21448 /* Initialize the DWARF-2 unwind information for this procedure. */
21449
21450 void
21451 tc_arm_frame_initial_instructions (void)
21452 {
21453 cfi_add_CFA_def_cfa (REG_SP, 0);
21454 }
21455 #endif /* OBJ_ELF */
21456
21457 /* Convert REGNAME to a DWARF-2 register number. */
21458
21459 int
21460 tc_arm_regname_to_dw2regnum (char *regname)
21461 {
21462 int reg = arm_reg_parse (&regname, REG_TYPE_RN);
21463 if (reg != FAIL)
21464 return reg;
21465
21466 /* PR 16694: Allow VFP registers as well. */
21467 reg = arm_reg_parse (&regname, REG_TYPE_VFS);
21468 if (reg != FAIL)
21469 return 64 + reg;
21470
21471 reg = arm_reg_parse (&regname, REG_TYPE_VFD);
21472 if (reg != FAIL)
21473 return reg + 256;
21474
21475 return -1;
21476 }
21477
21478 #ifdef TE_PE
21479 void
21480 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
21481 {
21482 expressionS exp;
21483
21484 exp.X_op = O_secrel;
21485 exp.X_add_symbol = symbol;
21486 exp.X_add_number = 0;
21487 emit_expr (&exp, size);
21488 }
21489 #endif
21490
21491 /* MD interface: Symbol and relocation handling. */
21492
21493 /* Return the address within the segment that a PC-relative fixup is
21494 relative to. For ARM, PC-relative fixups applied to instructions
21495 are generally relative to the location of the fixup plus 8 bytes.
21496 Thumb branches are offset by 4, and Thumb loads relative to PC
21497 require special handling. */
21498
21499 long
21500 md_pcrel_from_section (fixS * fixP, segT seg)
21501 {
21502 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
21503
21504 /* If this is pc-relative and we are going to emit a relocation
21505 then we just want to put out any pipeline compensation that the linker
21506 will need. Otherwise we want to use the calculated base.
21507 For WinCE we skip the bias for externals as well, since this
21508 is how the MS ARM-CE assembler behaves and we want to be compatible. */
21509 if (fixP->fx_pcrel
21510 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
21511 || (arm_force_relocation (fixP)
21512 #ifdef TE_WINCE
21513 && !S_IS_EXTERNAL (fixP->fx_addsy)
21514 #endif
21515 )))
21516 base = 0;
21517
21518
21519 switch (fixP->fx_r_type)
21520 {
21521 /* PC relative addressing on the Thumb is slightly odd as the
21522 bottom two bits of the PC are forced to zero for the
21523 calculation. This happens *after* application of the
21524 pipeline offset. However, Thumb adrl already adjusts for
21525 this, so we need not do it again. */
21526 case BFD_RELOC_ARM_THUMB_ADD:
21527 return base & ~3;
21528
21529 case BFD_RELOC_ARM_THUMB_OFFSET:
21530 case BFD_RELOC_ARM_T32_OFFSET_IMM:
21531 case BFD_RELOC_ARM_T32_ADD_PC12:
21532 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
21533 return (base + 4) & ~3;
21534
21535 /* Thumb branches are simply offset by +4. */
21536 case BFD_RELOC_THUMB_PCREL_BRANCH7:
21537 case BFD_RELOC_THUMB_PCREL_BRANCH9:
21538 case BFD_RELOC_THUMB_PCREL_BRANCH12:
21539 case BFD_RELOC_THUMB_PCREL_BRANCH20:
21540 case BFD_RELOC_THUMB_PCREL_BRANCH25:
21541 return base + 4;
21542
21543 case BFD_RELOC_THUMB_PCREL_BRANCH23:
21544 if (fixP->fx_addsy
21545 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21546 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
21547 && ARM_IS_FUNC (fixP->fx_addsy)
21548 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
21549 base = fixP->fx_where + fixP->fx_frag->fr_address;
21550 return base + 4;
21551
21552 /* BLX is like branches above, but forces the low two bits of PC to
21553 zero. */
21554 case BFD_RELOC_THUMB_PCREL_BLX:
21555 if (fixP->fx_addsy
21556 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21557 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
21558 && THUMB_IS_FUNC (fixP->fx_addsy)
21559 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
21560 base = fixP->fx_where + fixP->fx_frag->fr_address;
21561 return (base + 4) & ~3;
21562
21563 /* ARM mode branches are offset by +8. However, the Windows CE
21564 loader expects the relocation not to take this into account. */
21565 case BFD_RELOC_ARM_PCREL_BLX:
21566 if (fixP->fx_addsy
21567 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21568 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
21569 && ARM_IS_FUNC (fixP->fx_addsy)
21570 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
21571 base = fixP->fx_where + fixP->fx_frag->fr_address;
21572 return base + 8;
21573
21574 case BFD_RELOC_ARM_PCREL_CALL:
21575 if (fixP->fx_addsy
21576 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21577 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
21578 && THUMB_IS_FUNC (fixP->fx_addsy)
21579 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
21580 base = fixP->fx_where + fixP->fx_frag->fr_address;
21581 return base + 8;
21582
21583 case BFD_RELOC_ARM_PCREL_BRANCH:
21584 case BFD_RELOC_ARM_PCREL_JUMP:
21585 case BFD_RELOC_ARM_PLT32:
21586 #ifdef TE_WINCE
21587 /* When handling fixups immediately, because we have already
21588 discovered the value of a symbol, or the address of the frag involved
21589 we must account for the offset by +8, as the OS loader will never see the reloc.
21590 see fixup_segment() in write.c
21591 The S_IS_EXTERNAL test handles the case of global symbols.
21592 Those need the calculated base, not just the pipe compensation the linker will need. */
21593 if (fixP->fx_pcrel
21594 && fixP->fx_addsy != NULL
21595 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21596 && (S_IS_EXTERNAL (fixP->fx_addsy) || !arm_force_relocation (fixP)))
21597 return base + 8;
21598 return base;
21599 #else
21600 return base + 8;
21601 #endif
21602
21603
21604 /* ARM mode loads relative to PC are also offset by +8. Unlike
21605 branches, the Windows CE loader *does* expect the relocation
21606 to take this into account. */
21607 case BFD_RELOC_ARM_OFFSET_IMM:
21608 case BFD_RELOC_ARM_OFFSET_IMM8:
21609 case BFD_RELOC_ARM_HWLITERAL:
21610 case BFD_RELOC_ARM_LITERAL:
21611 case BFD_RELOC_ARM_CP_OFF_IMM:
21612 return base + 8;
21613
21614
21615 /* Other PC-relative relocations are un-offset. */
21616 default:
21617 return base;
21618 }
21619 }
21620
21621 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
21622 Otherwise we have no need to default values of symbols. */
21623
21624 symbolS *
21625 md_undefined_symbol (char * name ATTRIBUTE_UNUSED)
21626 {
21627 #ifdef OBJ_ELF
21628 if (name[0] == '_' && name[1] == 'G'
21629 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
21630 {
21631 if (!GOT_symbol)
21632 {
21633 if (symbol_find (name))
21634 as_bad (_("GOT already in the symbol table"));
21635
21636 GOT_symbol = symbol_new (name, undefined_section,
21637 (valueT) 0, & zero_address_frag);
21638 }
21639
21640 return GOT_symbol;
21641 }
21642 #endif
21643
21644 return NULL;
21645 }
21646
21647 /* Subroutine of md_apply_fix. Check to see if an immediate can be
21648 computed as two separate immediate values, added together. We
21649 already know that this value cannot be computed by just one ARM
21650 instruction. */
21651
21652 static unsigned int
21653 validate_immediate_twopart (unsigned int val,
21654 unsigned int * highpart)
21655 {
21656 unsigned int a;
21657 unsigned int i;
21658
21659 for (i = 0; i < 32; i += 2)
21660 if (((a = rotate_left (val, i)) & 0xff) != 0)
21661 {
21662 if (a & 0xff00)
21663 {
21664 if (a & ~ 0xffff)
21665 continue;
21666 * highpart = (a >> 8) | ((i + 24) << 7);
21667 }
21668 else if (a & 0xff0000)
21669 {
21670 if (a & 0xff000000)
21671 continue;
21672 * highpart = (a >> 16) | ((i + 16) << 7);
21673 }
21674 else
21675 {
21676 gas_assert (a & 0xff000000);
21677 * highpart = (a >> 24) | ((i + 8) << 7);
21678 }
21679
21680 return (a & 0xff) | (i << 7);
21681 }
21682
21683 return FAIL;
21684 }
21685
21686 static int
21687 validate_offset_imm (unsigned int val, int hwse)
21688 {
21689 if ((hwse && val > 255) || val > 4095)
21690 return FAIL;
21691 return val;
21692 }
21693
21694 /* Subroutine of md_apply_fix. Do those data_ops which can take a
21695 negative immediate constant by altering the instruction. A bit of
21696 a hack really.
21697 MOV <-> MVN
21698 AND <-> BIC
21699 ADC <-> SBC
21700 by inverting the second operand, and
21701 ADD <-> SUB
21702 CMP <-> CMN
21703 by negating the second operand. */
21704
21705 static int
21706 negate_data_op (unsigned long * instruction,
21707 unsigned long value)
21708 {
21709 int op, new_inst;
21710 unsigned long negated, inverted;
21711
21712 negated = encode_arm_immediate (-value);
21713 inverted = encode_arm_immediate (~value);
21714
21715 op = (*instruction >> DATA_OP_SHIFT) & 0xf;
21716 switch (op)
21717 {
21718 /* First negates. */
21719 case OPCODE_SUB: /* ADD <-> SUB */
21720 new_inst = OPCODE_ADD;
21721 value = negated;
21722 break;
21723
21724 case OPCODE_ADD:
21725 new_inst = OPCODE_SUB;
21726 value = negated;
21727 break;
21728
21729 case OPCODE_CMP: /* CMP <-> CMN */
21730 new_inst = OPCODE_CMN;
21731 value = negated;
21732 break;
21733
21734 case OPCODE_CMN:
21735 new_inst = OPCODE_CMP;
21736 value = negated;
21737 break;
21738
21739 /* Now Inverted ops. */
21740 case OPCODE_MOV: /* MOV <-> MVN */
21741 new_inst = OPCODE_MVN;
21742 value = inverted;
21743 break;
21744
21745 case OPCODE_MVN:
21746 new_inst = OPCODE_MOV;
21747 value = inverted;
21748 break;
21749
21750 case OPCODE_AND: /* AND <-> BIC */
21751 new_inst = OPCODE_BIC;
21752 value = inverted;
21753 break;
21754
21755 case OPCODE_BIC:
21756 new_inst = OPCODE_AND;
21757 value = inverted;
21758 break;
21759
21760 case OPCODE_ADC: /* ADC <-> SBC */
21761 new_inst = OPCODE_SBC;
21762 value = inverted;
21763 break;
21764
21765 case OPCODE_SBC:
21766 new_inst = OPCODE_ADC;
21767 value = inverted;
21768 break;
21769
21770 /* We cannot do anything. */
21771 default:
21772 return FAIL;
21773 }
21774
21775 if (value == (unsigned) FAIL)
21776 return FAIL;
21777
21778 *instruction &= OPCODE_MASK;
21779 *instruction |= new_inst << DATA_OP_SHIFT;
21780 return value;
21781 }
21782
21783 /* Like negate_data_op, but for Thumb-2. */
21784
21785 static unsigned int
21786 thumb32_negate_data_op (offsetT *instruction, unsigned int value)
21787 {
21788 int op, new_inst;
21789 int rd;
21790 unsigned int negated, inverted;
21791
21792 negated = encode_thumb32_immediate (-value);
21793 inverted = encode_thumb32_immediate (~value);
21794
21795 rd = (*instruction >> 8) & 0xf;
21796 op = (*instruction >> T2_DATA_OP_SHIFT) & 0xf;
21797 switch (op)
21798 {
21799 /* ADD <-> SUB. Includes CMP <-> CMN. */
21800 case T2_OPCODE_SUB:
21801 new_inst = T2_OPCODE_ADD;
21802 value = negated;
21803 break;
21804
21805 case T2_OPCODE_ADD:
21806 new_inst = T2_OPCODE_SUB;
21807 value = negated;
21808 break;
21809
21810 /* ORR <-> ORN. Includes MOV <-> MVN. */
21811 case T2_OPCODE_ORR:
21812 new_inst = T2_OPCODE_ORN;
21813 value = inverted;
21814 break;
21815
21816 case T2_OPCODE_ORN:
21817 new_inst = T2_OPCODE_ORR;
21818 value = inverted;
21819 break;
21820
21821 /* AND <-> BIC. TST has no inverted equivalent. */
21822 case T2_OPCODE_AND:
21823 new_inst = T2_OPCODE_BIC;
21824 if (rd == 15)
21825 value = FAIL;
21826 else
21827 value = inverted;
21828 break;
21829
21830 case T2_OPCODE_BIC:
21831 new_inst = T2_OPCODE_AND;
21832 value = inverted;
21833 break;
21834
21835 /* ADC <-> SBC */
21836 case T2_OPCODE_ADC:
21837 new_inst = T2_OPCODE_SBC;
21838 value = inverted;
21839 break;
21840
21841 case T2_OPCODE_SBC:
21842 new_inst = T2_OPCODE_ADC;
21843 value = inverted;
21844 break;
21845
21846 /* We cannot do anything. */
21847 default:
21848 return FAIL;
21849 }
21850
21851 if (value == (unsigned int)FAIL)
21852 return FAIL;
21853
21854 *instruction &= T2_OPCODE_MASK;
21855 *instruction |= new_inst << T2_DATA_OP_SHIFT;
21856 return value;
21857 }
21858
21859 /* Read a 32-bit thumb instruction from buf. */
21860 static unsigned long
21861 get_thumb32_insn (char * buf)
21862 {
21863 unsigned long insn;
21864 insn = md_chars_to_number (buf, THUMB_SIZE) << 16;
21865 insn |= md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
21866
21867 return insn;
21868 }
21869
21870
21871 /* We usually want to set the low bit on the address of thumb function
21872 symbols. In particular .word foo - . should have the low bit set.
21873 Generic code tries to fold the difference of two symbols to
21874 a constant. Prevent this and force a relocation when the first symbols
21875 is a thumb function. */
21876
21877 bfd_boolean
21878 arm_optimize_expr (expressionS *l, operatorT op, expressionS *r)
21879 {
21880 if (op == O_subtract
21881 && l->X_op == O_symbol
21882 && r->X_op == O_symbol
21883 && THUMB_IS_FUNC (l->X_add_symbol))
21884 {
21885 l->X_op = O_subtract;
21886 l->X_op_symbol = r->X_add_symbol;
21887 l->X_add_number -= r->X_add_number;
21888 return TRUE;
21889 }
21890
21891 /* Process as normal. */
21892 return FALSE;
21893 }
21894
21895 /* Encode Thumb2 unconditional branches and calls. The encoding
21896 for the 2 are identical for the immediate values. */
21897
21898 static void
21899 encode_thumb2_b_bl_offset (char * buf, offsetT value)
21900 {
21901 #define T2I1I2MASK ((1 << 13) | (1 << 11))
21902 offsetT newval;
21903 offsetT newval2;
21904 addressT S, I1, I2, lo, hi;
21905
21906 S = (value >> 24) & 0x01;
21907 I1 = (value >> 23) & 0x01;
21908 I2 = (value >> 22) & 0x01;
21909 hi = (value >> 12) & 0x3ff;
21910 lo = (value >> 1) & 0x7ff;
21911 newval = md_chars_to_number (buf, THUMB_SIZE);
21912 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
21913 newval |= (S << 10) | hi;
21914 newval2 &= ~T2I1I2MASK;
21915 newval2 |= (((I1 ^ S) << 13) | ((I2 ^ S) << 11) | lo) ^ T2I1I2MASK;
21916 md_number_to_chars (buf, newval, THUMB_SIZE);
21917 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
21918 }
21919
21920 void
21921 md_apply_fix (fixS * fixP,
21922 valueT * valP,
21923 segT seg)
21924 {
21925 offsetT value = * valP;
21926 offsetT newval;
21927 unsigned int newimm;
21928 unsigned long temp;
21929 int sign;
21930 char * buf = fixP->fx_where + fixP->fx_frag->fr_literal;
21931
21932 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
21933
21934 /* Note whether this will delete the relocation. */
21935
21936 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
21937 fixP->fx_done = 1;
21938
21939 /* On a 64-bit host, silently truncate 'value' to 32 bits for
21940 consistency with the behaviour on 32-bit hosts. Remember value
21941 for emit_reloc. */
21942 value &= 0xffffffff;
21943 value ^= 0x80000000;
21944 value -= 0x80000000;
21945
21946 *valP = value;
21947 fixP->fx_addnumber = value;
21948
21949 /* Same treatment for fixP->fx_offset. */
21950 fixP->fx_offset &= 0xffffffff;
21951 fixP->fx_offset ^= 0x80000000;
21952 fixP->fx_offset -= 0x80000000;
21953
21954 switch (fixP->fx_r_type)
21955 {
21956 case BFD_RELOC_NONE:
21957 /* This will need to go in the object file. */
21958 fixP->fx_done = 0;
21959 break;
21960
21961 case BFD_RELOC_ARM_IMMEDIATE:
21962 /* We claim that this fixup has been processed here,
21963 even if in fact we generate an error because we do
21964 not have a reloc for it, so tc_gen_reloc will reject it. */
21965 fixP->fx_done = 1;
21966
21967 if (fixP->fx_addsy)
21968 {
21969 const char *msg = 0;
21970
21971 if (! S_IS_DEFINED (fixP->fx_addsy))
21972 msg = _("undefined symbol %s used as an immediate value");
21973 else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
21974 msg = _("symbol %s is in a different section");
21975 else if (S_IS_WEAK (fixP->fx_addsy))
21976 msg = _("symbol %s is weak and may be overridden later");
21977
21978 if (msg)
21979 {
21980 as_bad_where (fixP->fx_file, fixP->fx_line,
21981 msg, S_GET_NAME (fixP->fx_addsy));
21982 break;
21983 }
21984 }
21985
21986 temp = md_chars_to_number (buf, INSN_SIZE);
21987
21988 /* If the offset is negative, we should use encoding A2 for ADR. */
21989 if ((temp & 0xfff0000) == 0x28f0000 && value < 0)
21990 newimm = negate_data_op (&temp, value);
21991 else
21992 {
21993 newimm = encode_arm_immediate (value);
21994
21995 /* If the instruction will fail, see if we can fix things up by
21996 changing the opcode. */
21997 if (newimm == (unsigned int) FAIL)
21998 newimm = negate_data_op (&temp, value);
21999 }
22000
22001 if (newimm == (unsigned int) FAIL)
22002 {
22003 as_bad_where (fixP->fx_file, fixP->fx_line,
22004 _("invalid constant (%lx) after fixup"),
22005 (unsigned long) value);
22006 break;
22007 }
22008
22009 newimm |= (temp & 0xfffff000);
22010 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
22011 break;
22012
22013 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
22014 {
22015 unsigned int highpart = 0;
22016 unsigned int newinsn = 0xe1a00000; /* nop. */
22017
22018 if (fixP->fx_addsy)
22019 {
22020 const char *msg = 0;
22021
22022 if (! S_IS_DEFINED (fixP->fx_addsy))
22023 msg = _("undefined symbol %s used as an immediate value");
22024 else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
22025 msg = _("symbol %s is in a different section");
22026 else if (S_IS_WEAK (fixP->fx_addsy))
22027 msg = _("symbol %s is weak and may be overridden later");
22028
22029 if (msg)
22030 {
22031 as_bad_where (fixP->fx_file, fixP->fx_line,
22032 msg, S_GET_NAME (fixP->fx_addsy));
22033 break;
22034 }
22035 }
22036
22037 newimm = encode_arm_immediate (value);
22038 temp = md_chars_to_number (buf, INSN_SIZE);
22039
22040 /* If the instruction will fail, see if we can fix things up by
22041 changing the opcode. */
22042 if (newimm == (unsigned int) FAIL
22043 && (newimm = negate_data_op (& temp, value)) == (unsigned int) FAIL)
22044 {
22045 /* No ? OK - try using two ADD instructions to generate
22046 the value. */
22047 newimm = validate_immediate_twopart (value, & highpart);
22048
22049 /* Yes - then make sure that the second instruction is
22050 also an add. */
22051 if (newimm != (unsigned int) FAIL)
22052 newinsn = temp;
22053 /* Still No ? Try using a negated value. */
22054 else if ((newimm = validate_immediate_twopart (- value, & highpart)) != (unsigned int) FAIL)
22055 temp = newinsn = (temp & OPCODE_MASK) | OPCODE_SUB << DATA_OP_SHIFT;
22056 /* Otherwise - give up. */
22057 else
22058 {
22059 as_bad_where (fixP->fx_file, fixP->fx_line,
22060 _("unable to compute ADRL instructions for PC offset of 0x%lx"),
22061 (long) value);
22062 break;
22063 }
22064
22065 /* Replace the first operand in the 2nd instruction (which
22066 is the PC) with the destination register. We have
22067 already added in the PC in the first instruction and we
22068 do not want to do it again. */
22069 newinsn &= ~ 0xf0000;
22070 newinsn |= ((newinsn & 0x0f000) << 4);
22071 }
22072
22073 newimm |= (temp & 0xfffff000);
22074 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
22075
22076 highpart |= (newinsn & 0xfffff000);
22077 md_number_to_chars (buf + INSN_SIZE, (valueT) highpart, INSN_SIZE);
22078 }
22079 break;
22080
22081 case BFD_RELOC_ARM_OFFSET_IMM:
22082 if (!fixP->fx_done && seg->use_rela_p)
22083 value = 0;
22084
22085 case BFD_RELOC_ARM_LITERAL:
22086 sign = value > 0;
22087
22088 if (value < 0)
22089 value = - value;
22090
22091 if (validate_offset_imm (value, 0) == FAIL)
22092 {
22093 if (fixP->fx_r_type == BFD_RELOC_ARM_LITERAL)
22094 as_bad_where (fixP->fx_file, fixP->fx_line,
22095 _("invalid literal constant: pool needs to be closer"));
22096 else
22097 as_bad_where (fixP->fx_file, fixP->fx_line,
22098 _("bad immediate value for offset (%ld)"),
22099 (long) value);
22100 break;
22101 }
22102
22103 newval = md_chars_to_number (buf, INSN_SIZE);
22104 if (value == 0)
22105 newval &= 0xfffff000;
22106 else
22107 {
22108 newval &= 0xff7ff000;
22109 newval |= value | (sign ? INDEX_UP : 0);
22110 }
22111 md_number_to_chars (buf, newval, INSN_SIZE);
22112 break;
22113
22114 case BFD_RELOC_ARM_OFFSET_IMM8:
22115 case BFD_RELOC_ARM_HWLITERAL:
22116 sign = value > 0;
22117
22118 if (value < 0)
22119 value = - value;
22120
22121 if (validate_offset_imm (value, 1) == FAIL)
22122 {
22123 if (fixP->fx_r_type == BFD_RELOC_ARM_HWLITERAL)
22124 as_bad_where (fixP->fx_file, fixP->fx_line,
22125 _("invalid literal constant: pool needs to be closer"));
22126 else
22127 as_bad_where (fixP->fx_file, fixP->fx_line,
22128 _("bad immediate value for 8-bit offset (%ld)"),
22129 (long) value);
22130 break;
22131 }
22132
22133 newval = md_chars_to_number (buf, INSN_SIZE);
22134 if (value == 0)
22135 newval &= 0xfffff0f0;
22136 else
22137 {
22138 newval &= 0xff7ff0f0;
22139 newval |= ((value >> 4) << 8) | (value & 0xf) | (sign ? INDEX_UP : 0);
22140 }
22141 md_number_to_chars (buf, newval, INSN_SIZE);
22142 break;
22143
22144 case BFD_RELOC_ARM_T32_OFFSET_U8:
22145 if (value < 0 || value > 1020 || value % 4 != 0)
22146 as_bad_where (fixP->fx_file, fixP->fx_line,
22147 _("bad immediate value for offset (%ld)"), (long) value);
22148 value /= 4;
22149
22150 newval = md_chars_to_number (buf+2, THUMB_SIZE);
22151 newval |= value;
22152 md_number_to_chars (buf+2, newval, THUMB_SIZE);
22153 break;
22154
22155 case BFD_RELOC_ARM_T32_OFFSET_IMM:
22156 /* This is a complicated relocation used for all varieties of Thumb32
22157 load/store instruction with immediate offset:
22158
22159 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
22160 *4, optional writeback(W)
22161 (doubleword load/store)
22162
22163 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
22164 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
22165 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
22166 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
22167 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
22168
22169 Uppercase letters indicate bits that are already encoded at
22170 this point. Lowercase letters are our problem. For the
22171 second block of instructions, the secondary opcode nybble
22172 (bits 8..11) is present, and bit 23 is zero, even if this is
22173 a PC-relative operation. */
22174 newval = md_chars_to_number (buf, THUMB_SIZE);
22175 newval <<= 16;
22176 newval |= md_chars_to_number (buf+THUMB_SIZE, THUMB_SIZE);
22177
22178 if ((newval & 0xf0000000) == 0xe0000000)
22179 {
22180 /* Doubleword load/store: 8-bit offset, scaled by 4. */
22181 if (value >= 0)
22182 newval |= (1 << 23);
22183 else
22184 value = -value;
22185 if (value % 4 != 0)
22186 {
22187 as_bad_where (fixP->fx_file, fixP->fx_line,
22188 _("offset not a multiple of 4"));
22189 break;
22190 }
22191 value /= 4;
22192 if (value > 0xff)
22193 {
22194 as_bad_where (fixP->fx_file, fixP->fx_line,
22195 _("offset out of range"));
22196 break;
22197 }
22198 newval &= ~0xff;
22199 }
22200 else if ((newval & 0x000f0000) == 0x000f0000)
22201 {
22202 /* PC-relative, 12-bit offset. */
22203 if (value >= 0)
22204 newval |= (1 << 23);
22205 else
22206 value = -value;
22207 if (value > 0xfff)
22208 {
22209 as_bad_where (fixP->fx_file, fixP->fx_line,
22210 _("offset out of range"));
22211 break;
22212 }
22213 newval &= ~0xfff;
22214 }
22215 else if ((newval & 0x00000100) == 0x00000100)
22216 {
22217 /* Writeback: 8-bit, +/- offset. */
22218 if (value >= 0)
22219 newval |= (1 << 9);
22220 else
22221 value = -value;
22222 if (value > 0xff)
22223 {
22224 as_bad_where (fixP->fx_file, fixP->fx_line,
22225 _("offset out of range"));
22226 break;
22227 }
22228 newval &= ~0xff;
22229 }
22230 else if ((newval & 0x00000f00) == 0x00000e00)
22231 {
22232 /* T-instruction: positive 8-bit offset. */
22233 if (value < 0 || value > 0xff)
22234 {
22235 as_bad_where (fixP->fx_file, fixP->fx_line,
22236 _("offset out of range"));
22237 break;
22238 }
22239 newval &= ~0xff;
22240 newval |= value;
22241 }
22242 else
22243 {
22244 /* Positive 12-bit or negative 8-bit offset. */
22245 int limit;
22246 if (value >= 0)
22247 {
22248 newval |= (1 << 23);
22249 limit = 0xfff;
22250 }
22251 else
22252 {
22253 value = -value;
22254 limit = 0xff;
22255 }
22256 if (value > limit)
22257 {
22258 as_bad_where (fixP->fx_file, fixP->fx_line,
22259 _("offset out of range"));
22260 break;
22261 }
22262 newval &= ~limit;
22263 }
22264
22265 newval |= value;
22266 md_number_to_chars (buf, (newval >> 16) & 0xffff, THUMB_SIZE);
22267 md_number_to_chars (buf + THUMB_SIZE, newval & 0xffff, THUMB_SIZE);
22268 break;
22269
22270 case BFD_RELOC_ARM_SHIFT_IMM:
22271 newval = md_chars_to_number (buf, INSN_SIZE);
22272 if (((unsigned long) value) > 32
22273 || (value == 32
22274 && (((newval & 0x60) == 0) || (newval & 0x60) == 0x60)))
22275 {
22276 as_bad_where (fixP->fx_file, fixP->fx_line,
22277 _("shift expression is too large"));
22278 break;
22279 }
22280
22281 if (value == 0)
22282 /* Shifts of zero must be done as lsl. */
22283 newval &= ~0x60;
22284 else if (value == 32)
22285 value = 0;
22286 newval &= 0xfffff07f;
22287 newval |= (value & 0x1f) << 7;
22288 md_number_to_chars (buf, newval, INSN_SIZE);
22289 break;
22290
22291 case BFD_RELOC_ARM_T32_IMMEDIATE:
22292 case BFD_RELOC_ARM_T32_ADD_IMM:
22293 case BFD_RELOC_ARM_T32_IMM12:
22294 case BFD_RELOC_ARM_T32_ADD_PC12:
22295 /* We claim that this fixup has been processed here,
22296 even if in fact we generate an error because we do
22297 not have a reloc for it, so tc_gen_reloc will reject it. */
22298 fixP->fx_done = 1;
22299
22300 if (fixP->fx_addsy
22301 && ! S_IS_DEFINED (fixP->fx_addsy))
22302 {
22303 as_bad_where (fixP->fx_file, fixP->fx_line,
22304 _("undefined symbol %s used as an immediate value"),
22305 S_GET_NAME (fixP->fx_addsy));
22306 break;
22307 }
22308
22309 newval = md_chars_to_number (buf, THUMB_SIZE);
22310 newval <<= 16;
22311 newval |= md_chars_to_number (buf+2, THUMB_SIZE);
22312
22313 newimm = FAIL;
22314 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
22315 || fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
22316 {
22317 newimm = encode_thumb32_immediate (value);
22318 if (newimm == (unsigned int) FAIL)
22319 newimm = thumb32_negate_data_op (&newval, value);
22320 }
22321 if (fixP->fx_r_type != BFD_RELOC_ARM_T32_IMMEDIATE
22322 && newimm == (unsigned int) FAIL)
22323 {
22324 /* Turn add/sum into addw/subw. */
22325 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
22326 newval = (newval & 0xfeffffff) | 0x02000000;
22327 /* No flat 12-bit imm encoding for addsw/subsw. */
22328 if ((newval & 0x00100000) == 0)
22329 {
22330 /* 12 bit immediate for addw/subw. */
22331 if (value < 0)
22332 {
22333 value = -value;
22334 newval ^= 0x00a00000;
22335 }
22336 if (value > 0xfff)
22337 newimm = (unsigned int) FAIL;
22338 else
22339 newimm = value;
22340 }
22341 }
22342
22343 if (newimm == (unsigned int)FAIL)
22344 {
22345 as_bad_where (fixP->fx_file, fixP->fx_line,
22346 _("invalid constant (%lx) after fixup"),
22347 (unsigned long) value);
22348 break;
22349 }
22350
22351 newval |= (newimm & 0x800) << 15;
22352 newval |= (newimm & 0x700) << 4;
22353 newval |= (newimm & 0x0ff);
22354
22355 md_number_to_chars (buf, (valueT) ((newval >> 16) & 0xffff), THUMB_SIZE);
22356 md_number_to_chars (buf+2, (valueT) (newval & 0xffff), THUMB_SIZE);
22357 break;
22358
22359 case BFD_RELOC_ARM_SMC:
22360 if (((unsigned long) value) > 0xffff)
22361 as_bad_where (fixP->fx_file, fixP->fx_line,
22362 _("invalid smc expression"));
22363 newval = md_chars_to_number (buf, INSN_SIZE);
22364 newval |= (value & 0xf) | ((value & 0xfff0) << 4);
22365 md_number_to_chars (buf, newval, INSN_SIZE);
22366 break;
22367
22368 case BFD_RELOC_ARM_HVC:
22369 if (((unsigned long) value) > 0xffff)
22370 as_bad_where (fixP->fx_file, fixP->fx_line,
22371 _("invalid hvc expression"));
22372 newval = md_chars_to_number (buf, INSN_SIZE);
22373 newval |= (value & 0xf) | ((value & 0xfff0) << 4);
22374 md_number_to_chars (buf, newval, INSN_SIZE);
22375 break;
22376
22377 case BFD_RELOC_ARM_SWI:
22378 if (fixP->tc_fix_data != 0)
22379 {
22380 if (((unsigned long) value) > 0xff)
22381 as_bad_where (fixP->fx_file, fixP->fx_line,
22382 _("invalid swi expression"));
22383 newval = md_chars_to_number (buf, THUMB_SIZE);
22384 newval |= value;
22385 md_number_to_chars (buf, newval, THUMB_SIZE);
22386 }
22387 else
22388 {
22389 if (((unsigned long) value) > 0x00ffffff)
22390 as_bad_where (fixP->fx_file, fixP->fx_line,
22391 _("invalid swi expression"));
22392 newval = md_chars_to_number (buf, INSN_SIZE);
22393 newval |= value;
22394 md_number_to_chars (buf, newval, INSN_SIZE);
22395 }
22396 break;
22397
22398 case BFD_RELOC_ARM_MULTI:
22399 if (((unsigned long) value) > 0xffff)
22400 as_bad_where (fixP->fx_file, fixP->fx_line,
22401 _("invalid expression in load/store multiple"));
22402 newval = value | md_chars_to_number (buf, INSN_SIZE);
22403 md_number_to_chars (buf, newval, INSN_SIZE);
22404 break;
22405
22406 #ifdef OBJ_ELF
22407 case BFD_RELOC_ARM_PCREL_CALL:
22408
22409 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
22410 && fixP->fx_addsy
22411 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22412 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22413 && THUMB_IS_FUNC (fixP->fx_addsy))
22414 /* Flip the bl to blx. This is a simple flip
22415 bit here because we generate PCREL_CALL for
22416 unconditional bls. */
22417 {
22418 newval = md_chars_to_number (buf, INSN_SIZE);
22419 newval = newval | 0x10000000;
22420 md_number_to_chars (buf, newval, INSN_SIZE);
22421 temp = 1;
22422 fixP->fx_done = 1;
22423 }
22424 else
22425 temp = 3;
22426 goto arm_branch_common;
22427
22428 case BFD_RELOC_ARM_PCREL_JUMP:
22429 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
22430 && fixP->fx_addsy
22431 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22432 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22433 && THUMB_IS_FUNC (fixP->fx_addsy))
22434 {
22435 /* This would map to a bl<cond>, b<cond>,
22436 b<always> to a Thumb function. We
22437 need to force a relocation for this particular
22438 case. */
22439 newval = md_chars_to_number (buf, INSN_SIZE);
22440 fixP->fx_done = 0;
22441 }
22442
22443 case BFD_RELOC_ARM_PLT32:
22444 #endif
22445 case BFD_RELOC_ARM_PCREL_BRANCH:
22446 temp = 3;
22447 goto arm_branch_common;
22448
22449 case BFD_RELOC_ARM_PCREL_BLX:
22450
22451 temp = 1;
22452 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
22453 && fixP->fx_addsy
22454 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22455 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22456 && ARM_IS_FUNC (fixP->fx_addsy))
22457 {
22458 /* Flip the blx to a bl and warn. */
22459 const char *name = S_GET_NAME (fixP->fx_addsy);
22460 newval = 0xeb000000;
22461 as_warn_where (fixP->fx_file, fixP->fx_line,
22462 _("blx to '%s' an ARM ISA state function changed to bl"),
22463 name);
22464 md_number_to_chars (buf, newval, INSN_SIZE);
22465 temp = 3;
22466 fixP->fx_done = 1;
22467 }
22468
22469 #ifdef OBJ_ELF
22470 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
22471 fixP->fx_r_type = BFD_RELOC_ARM_PCREL_CALL;
22472 #endif
22473
22474 arm_branch_common:
22475 /* We are going to store value (shifted right by two) in the
22476 instruction, in a 24 bit, signed field. Bits 26 through 32 either
22477 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
22478 also be be clear. */
22479 if (value & temp)
22480 as_bad_where (fixP->fx_file, fixP->fx_line,
22481 _("misaligned branch destination"));
22482 if ((value & (offsetT)0xfe000000) != (offsetT)0
22483 && (value & (offsetT)0xfe000000) != (offsetT)0xfe000000)
22484 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
22485
22486 if (fixP->fx_done || !seg->use_rela_p)
22487 {
22488 newval = md_chars_to_number (buf, INSN_SIZE);
22489 newval |= (value >> 2) & 0x00ffffff;
22490 /* Set the H bit on BLX instructions. */
22491 if (temp == 1)
22492 {
22493 if (value & 2)
22494 newval |= 0x01000000;
22495 else
22496 newval &= ~0x01000000;
22497 }
22498 md_number_to_chars (buf, newval, INSN_SIZE);
22499 }
22500 break;
22501
22502 case BFD_RELOC_THUMB_PCREL_BRANCH7: /* CBZ */
22503 /* CBZ can only branch forward. */
22504
22505 /* Attempts to use CBZ to branch to the next instruction
22506 (which, strictly speaking, are prohibited) will be turned into
22507 no-ops.
22508
22509 FIXME: It may be better to remove the instruction completely and
22510 perform relaxation. */
22511 if (value == -2)
22512 {
22513 newval = md_chars_to_number (buf, THUMB_SIZE);
22514 newval = 0xbf00; /* NOP encoding T1 */
22515 md_number_to_chars (buf, newval, THUMB_SIZE);
22516 }
22517 else
22518 {
22519 if (value & ~0x7e)
22520 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
22521
22522 if (fixP->fx_done || !seg->use_rela_p)
22523 {
22524 newval = md_chars_to_number (buf, THUMB_SIZE);
22525 newval |= ((value & 0x3e) << 2) | ((value & 0x40) << 3);
22526 md_number_to_chars (buf, newval, THUMB_SIZE);
22527 }
22528 }
22529 break;
22530
22531 case BFD_RELOC_THUMB_PCREL_BRANCH9: /* Conditional branch. */
22532 if ((value & ~0xff) && ((value & ~0xff) != ~0xff))
22533 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
22534
22535 if (fixP->fx_done || !seg->use_rela_p)
22536 {
22537 newval = md_chars_to_number (buf, THUMB_SIZE);
22538 newval |= (value & 0x1ff) >> 1;
22539 md_number_to_chars (buf, newval, THUMB_SIZE);
22540 }
22541 break;
22542
22543 case BFD_RELOC_THUMB_PCREL_BRANCH12: /* Unconditional branch. */
22544 if ((value & ~0x7ff) && ((value & ~0x7ff) != ~0x7ff))
22545 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
22546
22547 if (fixP->fx_done || !seg->use_rela_p)
22548 {
22549 newval = md_chars_to_number (buf, THUMB_SIZE);
22550 newval |= (value & 0xfff) >> 1;
22551 md_number_to_chars (buf, newval, THUMB_SIZE);
22552 }
22553 break;
22554
22555 case BFD_RELOC_THUMB_PCREL_BRANCH20:
22556 if (fixP->fx_addsy
22557 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22558 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22559 && ARM_IS_FUNC (fixP->fx_addsy)
22560 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
22561 {
22562 /* Force a relocation for a branch 20 bits wide. */
22563 fixP->fx_done = 0;
22564 }
22565 if ((value & ~0x1fffff) && ((value & ~0x0fffff) != ~0x0fffff))
22566 as_bad_where (fixP->fx_file, fixP->fx_line,
22567 _("conditional branch out of range"));
22568
22569 if (fixP->fx_done || !seg->use_rela_p)
22570 {
22571 offsetT newval2;
22572 addressT S, J1, J2, lo, hi;
22573
22574 S = (value & 0x00100000) >> 20;
22575 J2 = (value & 0x00080000) >> 19;
22576 J1 = (value & 0x00040000) >> 18;
22577 hi = (value & 0x0003f000) >> 12;
22578 lo = (value & 0x00000ffe) >> 1;
22579
22580 newval = md_chars_to_number (buf, THUMB_SIZE);
22581 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
22582 newval |= (S << 10) | hi;
22583 newval2 |= (J1 << 13) | (J2 << 11) | lo;
22584 md_number_to_chars (buf, newval, THUMB_SIZE);
22585 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
22586 }
22587 break;
22588
22589 case BFD_RELOC_THUMB_PCREL_BLX:
22590 /* If there is a blx from a thumb state function to
22591 another thumb function flip this to a bl and warn
22592 about it. */
22593
22594 if (fixP->fx_addsy
22595 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22596 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22597 && THUMB_IS_FUNC (fixP->fx_addsy))
22598 {
22599 const char *name = S_GET_NAME (fixP->fx_addsy);
22600 as_warn_where (fixP->fx_file, fixP->fx_line,
22601 _("blx to Thumb func '%s' from Thumb ISA state changed to bl"),
22602 name);
22603 newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
22604 newval = newval | 0x1000;
22605 md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
22606 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
22607 fixP->fx_done = 1;
22608 }
22609
22610
22611 goto thumb_bl_common;
22612
22613 case BFD_RELOC_THUMB_PCREL_BRANCH23:
22614 /* A bl from Thumb state ISA to an internal ARM state function
22615 is converted to a blx. */
22616 if (fixP->fx_addsy
22617 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22618 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22619 && ARM_IS_FUNC (fixP->fx_addsy)
22620 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
22621 {
22622 newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
22623 newval = newval & ~0x1000;
22624 md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
22625 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BLX;
22626 fixP->fx_done = 1;
22627 }
22628
22629 thumb_bl_common:
22630
22631 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
22632 /* For a BLX instruction, make sure that the relocation is rounded up
22633 to a word boundary. This follows the semantics of the instruction
22634 which specifies that bit 1 of the target address will come from bit
22635 1 of the base address. */
22636 value = (value + 3) & ~ 3;
22637
22638 #ifdef OBJ_ELF
22639 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4
22640 && fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
22641 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
22642 #endif
22643
22644 if ((value & ~0x3fffff) && ((value & ~0x3fffff) != ~0x3fffff))
22645 {
22646 if (!(ARM_CPU_HAS_FEATURE (cpu_variant, arm_arch_t2)))
22647 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
22648 else if ((value & ~0x1ffffff)
22649 && ((value & ~0x1ffffff) != ~0x1ffffff))
22650 as_bad_where (fixP->fx_file, fixP->fx_line,
22651 _("Thumb2 branch out of range"));
22652 }
22653
22654 if (fixP->fx_done || !seg->use_rela_p)
22655 encode_thumb2_b_bl_offset (buf, value);
22656
22657 break;
22658
22659 case BFD_RELOC_THUMB_PCREL_BRANCH25:
22660 if ((value & ~0x0ffffff) && ((value & ~0x0ffffff) != ~0x0ffffff))
22661 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
22662
22663 if (fixP->fx_done || !seg->use_rela_p)
22664 encode_thumb2_b_bl_offset (buf, value);
22665
22666 break;
22667
22668 case BFD_RELOC_8:
22669 if (fixP->fx_done || !seg->use_rela_p)
22670 *buf = value;
22671 break;
22672
22673 case BFD_RELOC_16:
22674 if (fixP->fx_done || !seg->use_rela_p)
22675 md_number_to_chars (buf, value, 2);
22676 break;
22677
22678 #ifdef OBJ_ELF
22679 case BFD_RELOC_ARM_TLS_CALL:
22680 case BFD_RELOC_ARM_THM_TLS_CALL:
22681 case BFD_RELOC_ARM_TLS_DESCSEQ:
22682 case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
22683 case BFD_RELOC_ARM_TLS_GOTDESC:
22684 case BFD_RELOC_ARM_TLS_GD32:
22685 case BFD_RELOC_ARM_TLS_LE32:
22686 case BFD_RELOC_ARM_TLS_IE32:
22687 case BFD_RELOC_ARM_TLS_LDM32:
22688 case BFD_RELOC_ARM_TLS_LDO32:
22689 S_SET_THREAD_LOCAL (fixP->fx_addsy);
22690 break;
22691
22692 case BFD_RELOC_ARM_GOT32:
22693 case BFD_RELOC_ARM_GOTOFF:
22694 break;
22695
22696 case BFD_RELOC_ARM_GOT_PREL:
22697 if (fixP->fx_done || !seg->use_rela_p)
22698 md_number_to_chars (buf, value, 4);
22699 break;
22700
22701 case BFD_RELOC_ARM_TARGET2:
22702 /* TARGET2 is not partial-inplace, so we need to write the
22703 addend here for REL targets, because it won't be written out
22704 during reloc processing later. */
22705 if (fixP->fx_done || !seg->use_rela_p)
22706 md_number_to_chars (buf, fixP->fx_offset, 4);
22707 break;
22708 #endif
22709
22710 case BFD_RELOC_RVA:
22711 case BFD_RELOC_32:
22712 case BFD_RELOC_ARM_TARGET1:
22713 case BFD_RELOC_ARM_ROSEGREL32:
22714 case BFD_RELOC_ARM_SBREL32:
22715 case BFD_RELOC_32_PCREL:
22716 #ifdef TE_PE
22717 case BFD_RELOC_32_SECREL:
22718 #endif
22719 if (fixP->fx_done || !seg->use_rela_p)
22720 #ifdef TE_WINCE
22721 /* For WinCE we only do this for pcrel fixups. */
22722 if (fixP->fx_done || fixP->fx_pcrel)
22723 #endif
22724 md_number_to_chars (buf, value, 4);
22725 break;
22726
22727 #ifdef OBJ_ELF
22728 case BFD_RELOC_ARM_PREL31:
22729 if (fixP->fx_done || !seg->use_rela_p)
22730 {
22731 newval = md_chars_to_number (buf, 4) & 0x80000000;
22732 if ((value ^ (value >> 1)) & 0x40000000)
22733 {
22734 as_bad_where (fixP->fx_file, fixP->fx_line,
22735 _("rel31 relocation overflow"));
22736 }
22737 newval |= value & 0x7fffffff;
22738 md_number_to_chars (buf, newval, 4);
22739 }
22740 break;
22741 #endif
22742
22743 case BFD_RELOC_ARM_CP_OFF_IMM:
22744 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
22745 if (value < -1023 || value > 1023 || (value & 3))
22746 as_bad_where (fixP->fx_file, fixP->fx_line,
22747 _("co-processor offset out of range"));
22748 cp_off_common:
22749 sign = value > 0;
22750 if (value < 0)
22751 value = -value;
22752 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
22753 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
22754 newval = md_chars_to_number (buf, INSN_SIZE);
22755 else
22756 newval = get_thumb32_insn (buf);
22757 if (value == 0)
22758 newval &= 0xffffff00;
22759 else
22760 {
22761 newval &= 0xff7fff00;
22762 newval |= (value >> 2) | (sign ? INDEX_UP : 0);
22763 }
22764 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
22765 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
22766 md_number_to_chars (buf, newval, INSN_SIZE);
22767 else
22768 put_thumb32_insn (buf, newval);
22769 break;
22770
22771 case BFD_RELOC_ARM_CP_OFF_IMM_S2:
22772 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2:
22773 if (value < -255 || value > 255)
22774 as_bad_where (fixP->fx_file, fixP->fx_line,
22775 _("co-processor offset out of range"));
22776 value *= 4;
22777 goto cp_off_common;
22778
22779 case BFD_RELOC_ARM_THUMB_OFFSET:
22780 newval = md_chars_to_number (buf, THUMB_SIZE);
22781 /* Exactly what ranges, and where the offset is inserted depends
22782 on the type of instruction, we can establish this from the
22783 top 4 bits. */
22784 switch (newval >> 12)
22785 {
22786 case 4: /* PC load. */
22787 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
22788 forced to zero for these loads; md_pcrel_from has already
22789 compensated for this. */
22790 if (value & 3)
22791 as_bad_where (fixP->fx_file, fixP->fx_line,
22792 _("invalid offset, target not word aligned (0x%08lX)"),
22793 (((unsigned long) fixP->fx_frag->fr_address
22794 + (unsigned long) fixP->fx_where) & ~3)
22795 + (unsigned long) value);
22796
22797 if (value & ~0x3fc)
22798 as_bad_where (fixP->fx_file, fixP->fx_line,
22799 _("invalid offset, value too big (0x%08lX)"),
22800 (long) value);
22801
22802 newval |= value >> 2;
22803 break;
22804
22805 case 9: /* SP load/store. */
22806 if (value & ~0x3fc)
22807 as_bad_where (fixP->fx_file, fixP->fx_line,
22808 _("invalid offset, value too big (0x%08lX)"),
22809 (long) value);
22810 newval |= value >> 2;
22811 break;
22812
22813 case 6: /* Word load/store. */
22814 if (value & ~0x7c)
22815 as_bad_where (fixP->fx_file, fixP->fx_line,
22816 _("invalid offset, value too big (0x%08lX)"),
22817 (long) value);
22818 newval |= value << 4; /* 6 - 2. */
22819 break;
22820
22821 case 7: /* Byte load/store. */
22822 if (value & ~0x1f)
22823 as_bad_where (fixP->fx_file, fixP->fx_line,
22824 _("invalid offset, value too big (0x%08lX)"),
22825 (long) value);
22826 newval |= value << 6;
22827 break;
22828
22829 case 8: /* Halfword load/store. */
22830 if (value & ~0x3e)
22831 as_bad_where (fixP->fx_file, fixP->fx_line,
22832 _("invalid offset, value too big (0x%08lX)"),
22833 (long) value);
22834 newval |= value << 5; /* 6 - 1. */
22835 break;
22836
22837 default:
22838 as_bad_where (fixP->fx_file, fixP->fx_line,
22839 "Unable to process relocation for thumb opcode: %lx",
22840 (unsigned long) newval);
22841 break;
22842 }
22843 md_number_to_chars (buf, newval, THUMB_SIZE);
22844 break;
22845
22846 case BFD_RELOC_ARM_THUMB_ADD:
22847 /* This is a complicated relocation, since we use it for all of
22848 the following immediate relocations:
22849
22850 3bit ADD/SUB
22851 8bit ADD/SUB
22852 9bit ADD/SUB SP word-aligned
22853 10bit ADD PC/SP word-aligned
22854
22855 The type of instruction being processed is encoded in the
22856 instruction field:
22857
22858 0x8000 SUB
22859 0x00F0 Rd
22860 0x000F Rs
22861 */
22862 newval = md_chars_to_number (buf, THUMB_SIZE);
22863 {
22864 int rd = (newval >> 4) & 0xf;
22865 int rs = newval & 0xf;
22866 int subtract = !!(newval & 0x8000);
22867
22868 /* Check for HI regs, only very restricted cases allowed:
22869 Adjusting SP, and using PC or SP to get an address. */
22870 if ((rd > 7 && (rd != REG_SP || rs != REG_SP))
22871 || (rs > 7 && rs != REG_SP && rs != REG_PC))
22872 as_bad_where (fixP->fx_file, fixP->fx_line,
22873 _("invalid Hi register with immediate"));
22874
22875 /* If value is negative, choose the opposite instruction. */
22876 if (value < 0)
22877 {
22878 value = -value;
22879 subtract = !subtract;
22880 if (value < 0)
22881 as_bad_where (fixP->fx_file, fixP->fx_line,
22882 _("immediate value out of range"));
22883 }
22884
22885 if (rd == REG_SP)
22886 {
22887 if (value & ~0x1fc)
22888 as_bad_where (fixP->fx_file, fixP->fx_line,
22889 _("invalid immediate for stack address calculation"));
22890 newval = subtract ? T_OPCODE_SUB_ST : T_OPCODE_ADD_ST;
22891 newval |= value >> 2;
22892 }
22893 else if (rs == REG_PC || rs == REG_SP)
22894 {
22895 if (subtract || value & ~0x3fc)
22896 as_bad_where (fixP->fx_file, fixP->fx_line,
22897 _("invalid immediate for address calculation (value = 0x%08lX)"),
22898 (unsigned long) value);
22899 newval = (rs == REG_PC ? T_OPCODE_ADD_PC : T_OPCODE_ADD_SP);
22900 newval |= rd << 8;
22901 newval |= value >> 2;
22902 }
22903 else if (rs == rd)
22904 {
22905 if (value & ~0xff)
22906 as_bad_where (fixP->fx_file, fixP->fx_line,
22907 _("immediate value out of range"));
22908 newval = subtract ? T_OPCODE_SUB_I8 : T_OPCODE_ADD_I8;
22909 newval |= (rd << 8) | value;
22910 }
22911 else
22912 {
22913 if (value & ~0x7)
22914 as_bad_where (fixP->fx_file, fixP->fx_line,
22915 _("immediate value out of range"));
22916 newval = subtract ? T_OPCODE_SUB_I3 : T_OPCODE_ADD_I3;
22917 newval |= rd | (rs << 3) | (value << 6);
22918 }
22919 }
22920 md_number_to_chars (buf, newval, THUMB_SIZE);
22921 break;
22922
22923 case BFD_RELOC_ARM_THUMB_IMM:
22924 newval = md_chars_to_number (buf, THUMB_SIZE);
22925 if (value < 0 || value > 255)
22926 as_bad_where (fixP->fx_file, fixP->fx_line,
22927 _("invalid immediate: %ld is out of range"),
22928 (long) value);
22929 newval |= value;
22930 md_number_to_chars (buf, newval, THUMB_SIZE);
22931 break;
22932
22933 case BFD_RELOC_ARM_THUMB_SHIFT:
22934 /* 5bit shift value (0..32). LSL cannot take 32. */
22935 newval = md_chars_to_number (buf, THUMB_SIZE) & 0xf83f;
22936 temp = newval & 0xf800;
22937 if (value < 0 || value > 32 || (value == 32 && temp == T_OPCODE_LSL_I))
22938 as_bad_where (fixP->fx_file, fixP->fx_line,
22939 _("invalid shift value: %ld"), (long) value);
22940 /* Shifts of zero must be encoded as LSL. */
22941 if (value == 0)
22942 newval = (newval & 0x003f) | T_OPCODE_LSL_I;
22943 /* Shifts of 32 are encoded as zero. */
22944 else if (value == 32)
22945 value = 0;
22946 newval |= value << 6;
22947 md_number_to_chars (buf, newval, THUMB_SIZE);
22948 break;
22949
22950 case BFD_RELOC_VTABLE_INHERIT:
22951 case BFD_RELOC_VTABLE_ENTRY:
22952 fixP->fx_done = 0;
22953 return;
22954
22955 case BFD_RELOC_ARM_MOVW:
22956 case BFD_RELOC_ARM_MOVT:
22957 case BFD_RELOC_ARM_THUMB_MOVW:
22958 case BFD_RELOC_ARM_THUMB_MOVT:
22959 if (fixP->fx_done || !seg->use_rela_p)
22960 {
22961 /* REL format relocations are limited to a 16-bit addend. */
22962 if (!fixP->fx_done)
22963 {
22964 if (value < -0x8000 || value > 0x7fff)
22965 as_bad_where (fixP->fx_file, fixP->fx_line,
22966 _("offset out of range"));
22967 }
22968 else if (fixP->fx_r_type == BFD_RELOC_ARM_MOVT
22969 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
22970 {
22971 value >>= 16;
22972 }
22973
22974 if (fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
22975 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
22976 {
22977 newval = get_thumb32_insn (buf);
22978 newval &= 0xfbf08f00;
22979 newval |= (value & 0xf000) << 4;
22980 newval |= (value & 0x0800) << 15;
22981 newval |= (value & 0x0700) << 4;
22982 newval |= (value & 0x00ff);
22983 put_thumb32_insn (buf, newval);
22984 }
22985 else
22986 {
22987 newval = md_chars_to_number (buf, 4);
22988 newval &= 0xfff0f000;
22989 newval |= value & 0x0fff;
22990 newval |= (value & 0xf000) << 4;
22991 md_number_to_chars (buf, newval, 4);
22992 }
22993 }
22994 return;
22995
22996 case BFD_RELOC_ARM_ALU_PC_G0_NC:
22997 case BFD_RELOC_ARM_ALU_PC_G0:
22998 case BFD_RELOC_ARM_ALU_PC_G1_NC:
22999 case BFD_RELOC_ARM_ALU_PC_G1:
23000 case BFD_RELOC_ARM_ALU_PC_G2:
23001 case BFD_RELOC_ARM_ALU_SB_G0_NC:
23002 case BFD_RELOC_ARM_ALU_SB_G0:
23003 case BFD_RELOC_ARM_ALU_SB_G1_NC:
23004 case BFD_RELOC_ARM_ALU_SB_G1:
23005 case BFD_RELOC_ARM_ALU_SB_G2:
23006 gas_assert (!fixP->fx_done);
23007 if (!seg->use_rela_p)
23008 {
23009 bfd_vma insn;
23010 bfd_vma encoded_addend;
23011 bfd_vma addend_abs = abs (value);
23012
23013 /* Check that the absolute value of the addend can be
23014 expressed as an 8-bit constant plus a rotation. */
23015 encoded_addend = encode_arm_immediate (addend_abs);
23016 if (encoded_addend == (unsigned int) FAIL)
23017 as_bad_where (fixP->fx_file, fixP->fx_line,
23018 _("the offset 0x%08lX is not representable"),
23019 (unsigned long) addend_abs);
23020
23021 /* Extract the instruction. */
23022 insn = md_chars_to_number (buf, INSN_SIZE);
23023
23024 /* If the addend is positive, use an ADD instruction.
23025 Otherwise use a SUB. Take care not to destroy the S bit. */
23026 insn &= 0xff1fffff;
23027 if (value < 0)
23028 insn |= 1 << 22;
23029 else
23030 insn |= 1 << 23;
23031
23032 /* Place the encoded addend into the first 12 bits of the
23033 instruction. */
23034 insn &= 0xfffff000;
23035 insn |= encoded_addend;
23036
23037 /* Update the instruction. */
23038 md_number_to_chars (buf, insn, INSN_SIZE);
23039 }
23040 break;
23041
23042 case BFD_RELOC_ARM_LDR_PC_G0:
23043 case BFD_RELOC_ARM_LDR_PC_G1:
23044 case BFD_RELOC_ARM_LDR_PC_G2:
23045 case BFD_RELOC_ARM_LDR_SB_G0:
23046 case BFD_RELOC_ARM_LDR_SB_G1:
23047 case BFD_RELOC_ARM_LDR_SB_G2:
23048 gas_assert (!fixP->fx_done);
23049 if (!seg->use_rela_p)
23050 {
23051 bfd_vma insn;
23052 bfd_vma addend_abs = abs (value);
23053
23054 /* Check that the absolute value of the addend can be
23055 encoded in 12 bits. */
23056 if (addend_abs >= 0x1000)
23057 as_bad_where (fixP->fx_file, fixP->fx_line,
23058 _("bad offset 0x%08lX (only 12 bits available for the magnitude)"),
23059 (unsigned long) addend_abs);
23060
23061 /* Extract the instruction. */
23062 insn = md_chars_to_number (buf, INSN_SIZE);
23063
23064 /* If the addend is negative, clear bit 23 of the instruction.
23065 Otherwise set it. */
23066 if (value < 0)
23067 insn &= ~(1 << 23);
23068 else
23069 insn |= 1 << 23;
23070
23071 /* Place the absolute value of the addend into the first 12 bits
23072 of the instruction. */
23073 insn &= 0xfffff000;
23074 insn |= addend_abs;
23075
23076 /* Update the instruction. */
23077 md_number_to_chars (buf, insn, INSN_SIZE);
23078 }
23079 break;
23080
23081 case BFD_RELOC_ARM_LDRS_PC_G0:
23082 case BFD_RELOC_ARM_LDRS_PC_G1:
23083 case BFD_RELOC_ARM_LDRS_PC_G2:
23084 case BFD_RELOC_ARM_LDRS_SB_G0:
23085 case BFD_RELOC_ARM_LDRS_SB_G1:
23086 case BFD_RELOC_ARM_LDRS_SB_G2:
23087 gas_assert (!fixP->fx_done);
23088 if (!seg->use_rela_p)
23089 {
23090 bfd_vma insn;
23091 bfd_vma addend_abs = abs (value);
23092
23093 /* Check that the absolute value of the addend can be
23094 encoded in 8 bits. */
23095 if (addend_abs >= 0x100)
23096 as_bad_where (fixP->fx_file, fixP->fx_line,
23097 _("bad offset 0x%08lX (only 8 bits available for the magnitude)"),
23098 (unsigned long) addend_abs);
23099
23100 /* Extract the instruction. */
23101 insn = md_chars_to_number (buf, INSN_SIZE);
23102
23103 /* If the addend is negative, clear bit 23 of the instruction.
23104 Otherwise set it. */
23105 if (value < 0)
23106 insn &= ~(1 << 23);
23107 else
23108 insn |= 1 << 23;
23109
23110 /* Place the first four bits of the absolute value of the addend
23111 into the first 4 bits of the instruction, and the remaining
23112 four into bits 8 .. 11. */
23113 insn &= 0xfffff0f0;
23114 insn |= (addend_abs & 0xf) | ((addend_abs & 0xf0) << 4);
23115
23116 /* Update the instruction. */
23117 md_number_to_chars (buf, insn, INSN_SIZE);
23118 }
23119 break;
23120
23121 case BFD_RELOC_ARM_LDC_PC_G0:
23122 case BFD_RELOC_ARM_LDC_PC_G1:
23123 case BFD_RELOC_ARM_LDC_PC_G2:
23124 case BFD_RELOC_ARM_LDC_SB_G0:
23125 case BFD_RELOC_ARM_LDC_SB_G1:
23126 case BFD_RELOC_ARM_LDC_SB_G2:
23127 gas_assert (!fixP->fx_done);
23128 if (!seg->use_rela_p)
23129 {
23130 bfd_vma insn;
23131 bfd_vma addend_abs = abs (value);
23132
23133 /* Check that the absolute value of the addend is a multiple of
23134 four and, when divided by four, fits in 8 bits. */
23135 if (addend_abs & 0x3)
23136 as_bad_where (fixP->fx_file, fixP->fx_line,
23137 _("bad offset 0x%08lX (must be word-aligned)"),
23138 (unsigned long) addend_abs);
23139
23140 if ((addend_abs >> 2) > 0xff)
23141 as_bad_where (fixP->fx_file, fixP->fx_line,
23142 _("bad offset 0x%08lX (must be an 8-bit number of words)"),
23143 (unsigned long) addend_abs);
23144
23145 /* Extract the instruction. */
23146 insn = md_chars_to_number (buf, INSN_SIZE);
23147
23148 /* If the addend is negative, clear bit 23 of the instruction.
23149 Otherwise set it. */
23150 if (value < 0)
23151 insn &= ~(1 << 23);
23152 else
23153 insn |= 1 << 23;
23154
23155 /* Place the addend (divided by four) into the first eight
23156 bits of the instruction. */
23157 insn &= 0xfffffff0;
23158 insn |= addend_abs >> 2;
23159
23160 /* Update the instruction. */
23161 md_number_to_chars (buf, insn, INSN_SIZE);
23162 }
23163 break;
23164
23165 case BFD_RELOC_ARM_V4BX:
23166 /* This will need to go in the object file. */
23167 fixP->fx_done = 0;
23168 break;
23169
23170 case BFD_RELOC_UNUSED:
23171 default:
23172 as_bad_where (fixP->fx_file, fixP->fx_line,
23173 _("bad relocation fixup type (%d)"), fixP->fx_r_type);
23174 }
23175 }
23176
23177 /* Translate internal representation of relocation info to BFD target
23178 format. */
23179
23180 arelent *
23181 tc_gen_reloc (asection *section, fixS *fixp)
23182 {
23183 arelent * reloc;
23184 bfd_reloc_code_real_type code;
23185
23186 reloc = (arelent *) xmalloc (sizeof (arelent));
23187
23188 reloc->sym_ptr_ptr = (asymbol **) xmalloc (sizeof (asymbol *));
23189 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
23190 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
23191
23192 if (fixp->fx_pcrel)
23193 {
23194 if (section->use_rela_p)
23195 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
23196 else
23197 fixp->fx_offset = reloc->address;
23198 }
23199 reloc->addend = fixp->fx_offset;
23200
23201 switch (fixp->fx_r_type)
23202 {
23203 case BFD_RELOC_8:
23204 if (fixp->fx_pcrel)
23205 {
23206 code = BFD_RELOC_8_PCREL;
23207 break;
23208 }
23209
23210 case BFD_RELOC_16:
23211 if (fixp->fx_pcrel)
23212 {
23213 code = BFD_RELOC_16_PCREL;
23214 break;
23215 }
23216
23217 case BFD_RELOC_32:
23218 if (fixp->fx_pcrel)
23219 {
23220 code = BFD_RELOC_32_PCREL;
23221 break;
23222 }
23223
23224 case BFD_RELOC_ARM_MOVW:
23225 if (fixp->fx_pcrel)
23226 {
23227 code = BFD_RELOC_ARM_MOVW_PCREL;
23228 break;
23229 }
23230
23231 case BFD_RELOC_ARM_MOVT:
23232 if (fixp->fx_pcrel)
23233 {
23234 code = BFD_RELOC_ARM_MOVT_PCREL;
23235 break;
23236 }
23237
23238 case BFD_RELOC_ARM_THUMB_MOVW:
23239 if (fixp->fx_pcrel)
23240 {
23241 code = BFD_RELOC_ARM_THUMB_MOVW_PCREL;
23242 break;
23243 }
23244
23245 case BFD_RELOC_ARM_THUMB_MOVT:
23246 if (fixp->fx_pcrel)
23247 {
23248 code = BFD_RELOC_ARM_THUMB_MOVT_PCREL;
23249 break;
23250 }
23251
23252 case BFD_RELOC_NONE:
23253 case BFD_RELOC_ARM_PCREL_BRANCH:
23254 case BFD_RELOC_ARM_PCREL_BLX:
23255 case BFD_RELOC_RVA:
23256 case BFD_RELOC_THUMB_PCREL_BRANCH7:
23257 case BFD_RELOC_THUMB_PCREL_BRANCH9:
23258 case BFD_RELOC_THUMB_PCREL_BRANCH12:
23259 case BFD_RELOC_THUMB_PCREL_BRANCH20:
23260 case BFD_RELOC_THUMB_PCREL_BRANCH23:
23261 case BFD_RELOC_THUMB_PCREL_BRANCH25:
23262 case BFD_RELOC_VTABLE_ENTRY:
23263 case BFD_RELOC_VTABLE_INHERIT:
23264 #ifdef TE_PE
23265 case BFD_RELOC_32_SECREL:
23266 #endif
23267 code = fixp->fx_r_type;
23268 break;
23269
23270 case BFD_RELOC_THUMB_PCREL_BLX:
23271 #ifdef OBJ_ELF
23272 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
23273 code = BFD_RELOC_THUMB_PCREL_BRANCH23;
23274 else
23275 #endif
23276 code = BFD_RELOC_THUMB_PCREL_BLX;
23277 break;
23278
23279 case BFD_RELOC_ARM_LITERAL:
23280 case BFD_RELOC_ARM_HWLITERAL:
23281 /* If this is called then the a literal has
23282 been referenced across a section boundary. */
23283 as_bad_where (fixp->fx_file, fixp->fx_line,
23284 _("literal referenced across section boundary"));
23285 return NULL;
23286
23287 #ifdef OBJ_ELF
23288 case BFD_RELOC_ARM_TLS_CALL:
23289 case BFD_RELOC_ARM_THM_TLS_CALL:
23290 case BFD_RELOC_ARM_TLS_DESCSEQ:
23291 case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
23292 case BFD_RELOC_ARM_GOT32:
23293 case BFD_RELOC_ARM_GOTOFF:
23294 case BFD_RELOC_ARM_GOT_PREL:
23295 case BFD_RELOC_ARM_PLT32:
23296 case BFD_RELOC_ARM_TARGET1:
23297 case BFD_RELOC_ARM_ROSEGREL32:
23298 case BFD_RELOC_ARM_SBREL32:
23299 case BFD_RELOC_ARM_PREL31:
23300 case BFD_RELOC_ARM_TARGET2:
23301 case BFD_RELOC_ARM_TLS_LE32:
23302 case BFD_RELOC_ARM_TLS_LDO32:
23303 case BFD_RELOC_ARM_PCREL_CALL:
23304 case BFD_RELOC_ARM_PCREL_JUMP:
23305 case BFD_RELOC_ARM_ALU_PC_G0_NC:
23306 case BFD_RELOC_ARM_ALU_PC_G0:
23307 case BFD_RELOC_ARM_ALU_PC_G1_NC:
23308 case BFD_RELOC_ARM_ALU_PC_G1:
23309 case BFD_RELOC_ARM_ALU_PC_G2:
23310 case BFD_RELOC_ARM_LDR_PC_G0:
23311 case BFD_RELOC_ARM_LDR_PC_G1:
23312 case BFD_RELOC_ARM_LDR_PC_G2:
23313 case BFD_RELOC_ARM_LDRS_PC_G0:
23314 case BFD_RELOC_ARM_LDRS_PC_G1:
23315 case BFD_RELOC_ARM_LDRS_PC_G2:
23316 case BFD_RELOC_ARM_LDC_PC_G0:
23317 case BFD_RELOC_ARM_LDC_PC_G1:
23318 case BFD_RELOC_ARM_LDC_PC_G2:
23319 case BFD_RELOC_ARM_ALU_SB_G0_NC:
23320 case BFD_RELOC_ARM_ALU_SB_G0:
23321 case BFD_RELOC_ARM_ALU_SB_G1_NC:
23322 case BFD_RELOC_ARM_ALU_SB_G1:
23323 case BFD_RELOC_ARM_ALU_SB_G2:
23324 case BFD_RELOC_ARM_LDR_SB_G0:
23325 case BFD_RELOC_ARM_LDR_SB_G1:
23326 case BFD_RELOC_ARM_LDR_SB_G2:
23327 case BFD_RELOC_ARM_LDRS_SB_G0:
23328 case BFD_RELOC_ARM_LDRS_SB_G1:
23329 case BFD_RELOC_ARM_LDRS_SB_G2:
23330 case BFD_RELOC_ARM_LDC_SB_G0:
23331 case BFD_RELOC_ARM_LDC_SB_G1:
23332 case BFD_RELOC_ARM_LDC_SB_G2:
23333 case BFD_RELOC_ARM_V4BX:
23334 code = fixp->fx_r_type;
23335 break;
23336
23337 case BFD_RELOC_ARM_TLS_GOTDESC:
23338 case BFD_RELOC_ARM_TLS_GD32:
23339 case BFD_RELOC_ARM_TLS_IE32:
23340 case BFD_RELOC_ARM_TLS_LDM32:
23341 /* BFD will include the symbol's address in the addend.
23342 But we don't want that, so subtract it out again here. */
23343 if (!S_IS_COMMON (fixp->fx_addsy))
23344 reloc->addend -= (*reloc->sym_ptr_ptr)->value;
23345 code = fixp->fx_r_type;
23346 break;
23347 #endif
23348
23349 case BFD_RELOC_ARM_IMMEDIATE:
23350 as_bad_where (fixp->fx_file, fixp->fx_line,
23351 _("internal relocation (type: IMMEDIATE) not fixed up"));
23352 return NULL;
23353
23354 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
23355 as_bad_where (fixp->fx_file, fixp->fx_line,
23356 _("ADRL used for a symbol not defined in the same file"));
23357 return NULL;
23358
23359 case BFD_RELOC_ARM_OFFSET_IMM:
23360 if (section->use_rela_p)
23361 {
23362 code = fixp->fx_r_type;
23363 break;
23364 }
23365
23366 if (fixp->fx_addsy != NULL
23367 && !S_IS_DEFINED (fixp->fx_addsy)
23368 && S_IS_LOCAL (fixp->fx_addsy))
23369 {
23370 as_bad_where (fixp->fx_file, fixp->fx_line,
23371 _("undefined local label `%s'"),
23372 S_GET_NAME (fixp->fx_addsy));
23373 return NULL;
23374 }
23375
23376 as_bad_where (fixp->fx_file, fixp->fx_line,
23377 _("internal_relocation (type: OFFSET_IMM) not fixed up"));
23378 return NULL;
23379
23380 default:
23381 {
23382 char * type;
23383
23384 switch (fixp->fx_r_type)
23385 {
23386 case BFD_RELOC_NONE: type = "NONE"; break;
23387 case BFD_RELOC_ARM_OFFSET_IMM8: type = "OFFSET_IMM8"; break;
23388 case BFD_RELOC_ARM_SHIFT_IMM: type = "SHIFT_IMM"; break;
23389 case BFD_RELOC_ARM_SMC: type = "SMC"; break;
23390 case BFD_RELOC_ARM_SWI: type = "SWI"; break;
23391 case BFD_RELOC_ARM_MULTI: type = "MULTI"; break;
23392 case BFD_RELOC_ARM_CP_OFF_IMM: type = "CP_OFF_IMM"; break;
23393 case BFD_RELOC_ARM_T32_OFFSET_IMM: type = "T32_OFFSET_IMM"; break;
23394 case BFD_RELOC_ARM_T32_CP_OFF_IMM: type = "T32_CP_OFF_IMM"; break;
23395 case BFD_RELOC_ARM_THUMB_ADD: type = "THUMB_ADD"; break;
23396 case BFD_RELOC_ARM_THUMB_SHIFT: type = "THUMB_SHIFT"; break;
23397 case BFD_RELOC_ARM_THUMB_IMM: type = "THUMB_IMM"; break;
23398 case BFD_RELOC_ARM_THUMB_OFFSET: type = "THUMB_OFFSET"; break;
23399 default: type = _("<unknown>"); break;
23400 }
23401 as_bad_where (fixp->fx_file, fixp->fx_line,
23402 _("cannot represent %s relocation in this object file format"),
23403 type);
23404 return NULL;
23405 }
23406 }
23407
23408 #ifdef OBJ_ELF
23409 if ((code == BFD_RELOC_32_PCREL || code == BFD_RELOC_32)
23410 && GOT_symbol
23411 && fixp->fx_addsy == GOT_symbol)
23412 {
23413 code = BFD_RELOC_ARM_GOTPC;
23414 reloc->addend = fixp->fx_offset = reloc->address;
23415 }
23416 #endif
23417
23418 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
23419
23420 if (reloc->howto == NULL)
23421 {
23422 as_bad_where (fixp->fx_file, fixp->fx_line,
23423 _("cannot represent %s relocation in this object file format"),
23424 bfd_get_reloc_code_name (code));
23425 return NULL;
23426 }
23427
23428 /* HACK: Since arm ELF uses Rel instead of Rela, encode the
23429 vtable entry to be used in the relocation's section offset. */
23430 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
23431 reloc->address = fixp->fx_offset;
23432
23433 return reloc;
23434 }
23435
23436 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
23437
23438 void
23439 cons_fix_new_arm (fragS * frag,
23440 int where,
23441 int size,
23442 expressionS * exp,
23443 bfd_reloc_code_real_type reloc)
23444 {
23445 int pcrel = 0;
23446
23447 /* Pick a reloc.
23448 FIXME: @@ Should look at CPU word size. */
23449 switch (size)
23450 {
23451 case 1:
23452 reloc = BFD_RELOC_8;
23453 break;
23454 case 2:
23455 reloc = BFD_RELOC_16;
23456 break;
23457 case 4:
23458 default:
23459 reloc = BFD_RELOC_32;
23460 break;
23461 case 8:
23462 reloc = BFD_RELOC_64;
23463 break;
23464 }
23465
23466 #ifdef TE_PE
23467 if (exp->X_op == O_secrel)
23468 {
23469 exp->X_op = O_symbol;
23470 reloc = BFD_RELOC_32_SECREL;
23471 }
23472 #endif
23473
23474 fix_new_exp (frag, where, size, exp, pcrel, reloc);
23475 }
23476
23477 #if defined (OBJ_COFF)
23478 void
23479 arm_validate_fix (fixS * fixP)
23480 {
23481 /* If the destination of the branch is a defined symbol which does not have
23482 the THUMB_FUNC attribute, then we must be calling a function which has
23483 the (interfacearm) attribute. We look for the Thumb entry point to that
23484 function and change the branch to refer to that function instead. */
23485 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BRANCH23
23486 && fixP->fx_addsy != NULL
23487 && S_IS_DEFINED (fixP->fx_addsy)
23488 && ! THUMB_IS_FUNC (fixP->fx_addsy))
23489 {
23490 fixP->fx_addsy = find_real_start (fixP->fx_addsy);
23491 }
23492 }
23493 #endif
23494
23495
23496 int
23497 arm_force_relocation (struct fix * fixp)
23498 {
23499 #if defined (OBJ_COFF) && defined (TE_PE)
23500 if (fixp->fx_r_type == BFD_RELOC_RVA)
23501 return 1;
23502 #endif
23503
23504 /* In case we have a call or a branch to a function in ARM ISA mode from
23505 a thumb function or vice-versa force the relocation. These relocations
23506 are cleared off for some cores that might have blx and simple transformations
23507 are possible. */
23508
23509 #ifdef OBJ_ELF
23510 switch (fixp->fx_r_type)
23511 {
23512 case BFD_RELOC_ARM_PCREL_JUMP:
23513 case BFD_RELOC_ARM_PCREL_CALL:
23514 case BFD_RELOC_THUMB_PCREL_BLX:
23515 if (THUMB_IS_FUNC (fixp->fx_addsy))
23516 return 1;
23517 break;
23518
23519 case BFD_RELOC_ARM_PCREL_BLX:
23520 case BFD_RELOC_THUMB_PCREL_BRANCH25:
23521 case BFD_RELOC_THUMB_PCREL_BRANCH20:
23522 case BFD_RELOC_THUMB_PCREL_BRANCH23:
23523 if (ARM_IS_FUNC (fixp->fx_addsy))
23524 return 1;
23525 break;
23526
23527 default:
23528 break;
23529 }
23530 #endif
23531
23532 /* Resolve these relocations even if the symbol is extern or weak.
23533 Technically this is probably wrong due to symbol preemption.
23534 In practice these relocations do not have enough range to be useful
23535 at dynamic link time, and some code (e.g. in the Linux kernel)
23536 expects these references to be resolved. */
23537 if (fixp->fx_r_type == BFD_RELOC_ARM_IMMEDIATE
23538 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM
23539 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM8
23540 || fixp->fx_r_type == BFD_RELOC_ARM_ADRL_IMMEDIATE
23541 || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
23542 || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2
23543 || fixp->fx_r_type == BFD_RELOC_ARM_THUMB_OFFSET
23544 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM
23545 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
23546 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMM12
23547 || fixp->fx_r_type == BFD_RELOC_ARM_T32_OFFSET_IMM
23548 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_PC12
23549 || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM
23550 || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM_S2)
23551 return 0;
23552
23553 /* Always leave these relocations for the linker. */
23554 if ((fixp->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
23555 && fixp->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
23556 || fixp->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
23557 return 1;
23558
23559 /* Always generate relocations against function symbols. */
23560 if (fixp->fx_r_type == BFD_RELOC_32
23561 && fixp->fx_addsy
23562 && (symbol_get_bfdsym (fixp->fx_addsy)->flags & BSF_FUNCTION))
23563 return 1;
23564
23565 return generic_force_reloc (fixp);
23566 }
23567
23568 #if defined (OBJ_ELF) || defined (OBJ_COFF)
23569 /* Relocations against function names must be left unadjusted,
23570 so that the linker can use this information to generate interworking
23571 stubs. The MIPS version of this function
23572 also prevents relocations that are mips-16 specific, but I do not
23573 know why it does this.
23574
23575 FIXME:
23576 There is one other problem that ought to be addressed here, but
23577 which currently is not: Taking the address of a label (rather
23578 than a function) and then later jumping to that address. Such
23579 addresses also ought to have their bottom bit set (assuming that
23580 they reside in Thumb code), but at the moment they will not. */
23581
23582 bfd_boolean
23583 arm_fix_adjustable (fixS * fixP)
23584 {
23585 if (fixP->fx_addsy == NULL)
23586 return 1;
23587
23588 /* Preserve relocations against symbols with function type. */
23589 if (symbol_get_bfdsym (fixP->fx_addsy)->flags & BSF_FUNCTION)
23590 return FALSE;
23591
23592 if (THUMB_IS_FUNC (fixP->fx_addsy)
23593 && fixP->fx_subsy == NULL)
23594 return FALSE;
23595
23596 /* We need the symbol name for the VTABLE entries. */
23597 if ( fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
23598 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
23599 return FALSE;
23600
23601 /* Don't allow symbols to be discarded on GOT related relocs. */
23602 if (fixP->fx_r_type == BFD_RELOC_ARM_PLT32
23603 || fixP->fx_r_type == BFD_RELOC_ARM_GOT32
23604 || fixP->fx_r_type == BFD_RELOC_ARM_GOTOFF
23605 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GD32
23606 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LE32
23607 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_IE32
23608 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDM32
23609 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDO32
23610 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GOTDESC
23611 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_CALL
23612 || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_CALL
23613 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_DESCSEQ
23614 || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_DESCSEQ
23615 || fixP->fx_r_type == BFD_RELOC_ARM_TARGET2)
23616 return FALSE;
23617
23618 /* Similarly for group relocations. */
23619 if ((fixP->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
23620 && fixP->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
23621 || fixP->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
23622 return FALSE;
23623
23624 /* MOVW/MOVT REL relocations have limited offsets, so keep the symbols. */
23625 if (fixP->fx_r_type == BFD_RELOC_ARM_MOVW
23626 || fixP->fx_r_type == BFD_RELOC_ARM_MOVT
23627 || fixP->fx_r_type == BFD_RELOC_ARM_MOVW_PCREL
23628 || fixP->fx_r_type == BFD_RELOC_ARM_MOVT_PCREL
23629 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
23630 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT
23631 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW_PCREL
23632 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT_PCREL)
23633 return FALSE;
23634
23635 return TRUE;
23636 }
23637 #endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */
23638
23639 #ifdef OBJ_ELF
23640
23641 const char *
23642 elf32_arm_target_format (void)
23643 {
23644 #ifdef TE_SYMBIAN
23645 return (target_big_endian
23646 ? "elf32-bigarm-symbian"
23647 : "elf32-littlearm-symbian");
23648 #elif defined (TE_VXWORKS)
23649 return (target_big_endian
23650 ? "elf32-bigarm-vxworks"
23651 : "elf32-littlearm-vxworks");
23652 #elif defined (TE_NACL)
23653 return (target_big_endian
23654 ? "elf32-bigarm-nacl"
23655 : "elf32-littlearm-nacl");
23656 #else
23657 if (target_big_endian)
23658 return "elf32-bigarm";
23659 else
23660 return "elf32-littlearm";
23661 #endif
23662 }
23663
23664 void
23665 armelf_frob_symbol (symbolS * symp,
23666 int * puntp)
23667 {
23668 elf_frob_symbol (symp, puntp);
23669 }
23670 #endif
23671
23672 /* MD interface: Finalization. */
23673
23674 void
23675 arm_cleanup (void)
23676 {
23677 literal_pool * pool;
23678
23679 /* Ensure that all the IT blocks are properly closed. */
23680 check_it_blocks_finished ();
23681
23682 for (pool = list_of_pools; pool; pool = pool->next)
23683 {
23684 /* Put it at the end of the relevant section. */
23685 subseg_set (pool->section, pool->sub_section);
23686 #ifdef OBJ_ELF
23687 arm_elf_change_section ();
23688 #endif
23689 s_ltorg (0);
23690 }
23691 }
23692
23693 #ifdef OBJ_ELF
23694 /* Remove any excess mapping symbols generated for alignment frags in
23695 SEC. We may have created a mapping symbol before a zero byte
23696 alignment; remove it if there's a mapping symbol after the
23697 alignment. */
23698 static void
23699 check_mapping_symbols (bfd *abfd ATTRIBUTE_UNUSED, asection *sec,
23700 void *dummy ATTRIBUTE_UNUSED)
23701 {
23702 segment_info_type *seginfo = seg_info (sec);
23703 fragS *fragp;
23704
23705 if (seginfo == NULL || seginfo->frchainP == NULL)
23706 return;
23707
23708 for (fragp = seginfo->frchainP->frch_root;
23709 fragp != NULL;
23710 fragp = fragp->fr_next)
23711 {
23712 symbolS *sym = fragp->tc_frag_data.last_map;
23713 fragS *next = fragp->fr_next;
23714
23715 /* Variable-sized frags have been converted to fixed size by
23716 this point. But if this was variable-sized to start with,
23717 there will be a fixed-size frag after it. So don't handle
23718 next == NULL. */
23719 if (sym == NULL || next == NULL)
23720 continue;
23721
23722 if (S_GET_VALUE (sym) < next->fr_address)
23723 /* Not at the end of this frag. */
23724 continue;
23725 know (S_GET_VALUE (sym) == next->fr_address);
23726
23727 do
23728 {
23729 if (next->tc_frag_data.first_map != NULL)
23730 {
23731 /* Next frag starts with a mapping symbol. Discard this
23732 one. */
23733 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
23734 break;
23735 }
23736
23737 if (next->fr_next == NULL)
23738 {
23739 /* This mapping symbol is at the end of the section. Discard
23740 it. */
23741 know (next->fr_fix == 0 && next->fr_var == 0);
23742 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
23743 break;
23744 }
23745
23746 /* As long as we have empty frags without any mapping symbols,
23747 keep looking. */
23748 /* If the next frag is non-empty and does not start with a
23749 mapping symbol, then this mapping symbol is required. */
23750 if (next->fr_address != next->fr_next->fr_address)
23751 break;
23752
23753 next = next->fr_next;
23754 }
23755 while (next != NULL);
23756 }
23757 }
23758 #endif
23759
23760 /* Adjust the symbol table. This marks Thumb symbols as distinct from
23761 ARM ones. */
23762
23763 void
23764 arm_adjust_symtab (void)
23765 {
23766 #ifdef OBJ_COFF
23767 symbolS * sym;
23768
23769 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
23770 {
23771 if (ARM_IS_THUMB (sym))
23772 {
23773 if (THUMB_IS_FUNC (sym))
23774 {
23775 /* Mark the symbol as a Thumb function. */
23776 if ( S_GET_STORAGE_CLASS (sym) == C_STAT
23777 || S_GET_STORAGE_CLASS (sym) == C_LABEL) /* This can happen! */
23778 S_SET_STORAGE_CLASS (sym, C_THUMBSTATFUNC);
23779
23780 else if (S_GET_STORAGE_CLASS (sym) == C_EXT)
23781 S_SET_STORAGE_CLASS (sym, C_THUMBEXTFUNC);
23782 else
23783 as_bad (_("%s: unexpected function type: %d"),
23784 S_GET_NAME (sym), S_GET_STORAGE_CLASS (sym));
23785 }
23786 else switch (S_GET_STORAGE_CLASS (sym))
23787 {
23788 case C_EXT:
23789 S_SET_STORAGE_CLASS (sym, C_THUMBEXT);
23790 break;
23791 case C_STAT:
23792 S_SET_STORAGE_CLASS (sym, C_THUMBSTAT);
23793 break;
23794 case C_LABEL:
23795 S_SET_STORAGE_CLASS (sym, C_THUMBLABEL);
23796 break;
23797 default:
23798 /* Do nothing. */
23799 break;
23800 }
23801 }
23802
23803 if (ARM_IS_INTERWORK (sym))
23804 coffsymbol (symbol_get_bfdsym (sym))->native->u.syment.n_flags = 0xFF;
23805 }
23806 #endif
23807 #ifdef OBJ_ELF
23808 symbolS * sym;
23809 char bind;
23810
23811 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
23812 {
23813 if (ARM_IS_THUMB (sym))
23814 {
23815 elf_symbol_type * elf_sym;
23816
23817 elf_sym = elf_symbol (symbol_get_bfdsym (sym));
23818 bind = ELF_ST_BIND (elf_sym->internal_elf_sym.st_info);
23819
23820 if (! bfd_is_arm_special_symbol_name (elf_sym->symbol.name,
23821 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
23822 {
23823 /* If it's a .thumb_func, declare it as so,
23824 otherwise tag label as .code 16. */
23825 if (THUMB_IS_FUNC (sym))
23826 elf_sym->internal_elf_sym.st_target_internal
23827 = ST_BRANCH_TO_THUMB;
23828 else if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
23829 elf_sym->internal_elf_sym.st_info =
23830 ELF_ST_INFO (bind, STT_ARM_16BIT);
23831 }
23832 }
23833 }
23834
23835 /* Remove any overlapping mapping symbols generated by alignment frags. */
23836 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
23837 /* Now do generic ELF adjustments. */
23838 elf_adjust_symtab ();
23839 #endif
23840 }
23841
23842 /* MD interface: Initialization. */
23843
23844 static void
23845 set_constant_flonums (void)
23846 {
23847 int i;
23848
23849 for (i = 0; i < NUM_FLOAT_VALS; i++)
23850 if (atof_ieee ((char *) fp_const[i], 'x', fp_values[i]) == NULL)
23851 abort ();
23852 }
23853
23854 /* Auto-select Thumb mode if it's the only available instruction set for the
23855 given architecture. */
23856
23857 static void
23858 autoselect_thumb_from_cpu_variant (void)
23859 {
23860 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
23861 opcode_select (16);
23862 }
23863
23864 void
23865 md_begin (void)
23866 {
23867 unsigned mach;
23868 unsigned int i;
23869
23870 if ( (arm_ops_hsh = hash_new ()) == NULL
23871 || (arm_cond_hsh = hash_new ()) == NULL
23872 || (arm_shift_hsh = hash_new ()) == NULL
23873 || (arm_psr_hsh = hash_new ()) == NULL
23874 || (arm_v7m_psr_hsh = hash_new ()) == NULL
23875 || (arm_reg_hsh = hash_new ()) == NULL
23876 || (arm_reloc_hsh = hash_new ()) == NULL
23877 || (arm_barrier_opt_hsh = hash_new ()) == NULL)
23878 as_fatal (_("virtual memory exhausted"));
23879
23880 for (i = 0; i < sizeof (insns) / sizeof (struct asm_opcode); i++)
23881 hash_insert (arm_ops_hsh, insns[i].template_name, (void *) (insns + i));
23882 for (i = 0; i < sizeof (conds) / sizeof (struct asm_cond); i++)
23883 hash_insert (arm_cond_hsh, conds[i].template_name, (void *) (conds + i));
23884 for (i = 0; i < sizeof (shift_names) / sizeof (struct asm_shift_name); i++)
23885 hash_insert (arm_shift_hsh, shift_names[i].name, (void *) (shift_names + i));
23886 for (i = 0; i < sizeof (psrs) / sizeof (struct asm_psr); i++)
23887 hash_insert (arm_psr_hsh, psrs[i].template_name, (void *) (psrs + i));
23888 for (i = 0; i < sizeof (v7m_psrs) / sizeof (struct asm_psr); i++)
23889 hash_insert (arm_v7m_psr_hsh, v7m_psrs[i].template_name,
23890 (void *) (v7m_psrs + i));
23891 for (i = 0; i < sizeof (reg_names) / sizeof (struct reg_entry); i++)
23892 hash_insert (arm_reg_hsh, reg_names[i].name, (void *) (reg_names + i));
23893 for (i = 0;
23894 i < sizeof (barrier_opt_names) / sizeof (struct asm_barrier_opt);
23895 i++)
23896 hash_insert (arm_barrier_opt_hsh, barrier_opt_names[i].template_name,
23897 (void *) (barrier_opt_names + i));
23898 #ifdef OBJ_ELF
23899 for (i = 0; i < ARRAY_SIZE (reloc_names); i++)
23900 {
23901 struct reloc_entry * entry = reloc_names + i;
23902
23903 if (arm_is_eabi() && entry->reloc == BFD_RELOC_ARM_PLT32)
23904 /* This makes encode_branch() use the EABI versions of this relocation. */
23905 entry->reloc = BFD_RELOC_UNUSED;
23906
23907 hash_insert (arm_reloc_hsh, entry->name, (void *) entry);
23908 }
23909 #endif
23910
23911 set_constant_flonums ();
23912
23913 /* Set the cpu variant based on the command-line options. We prefer
23914 -mcpu= over -march= if both are set (as for GCC); and we prefer
23915 -mfpu= over any other way of setting the floating point unit.
23916 Use of legacy options with new options are faulted. */
23917 if (legacy_cpu)
23918 {
23919 if (mcpu_cpu_opt || march_cpu_opt)
23920 as_bad (_("use of old and new-style options to set CPU type"));
23921
23922 mcpu_cpu_opt = legacy_cpu;
23923 }
23924 else if (!mcpu_cpu_opt)
23925 mcpu_cpu_opt = march_cpu_opt;
23926
23927 if (legacy_fpu)
23928 {
23929 if (mfpu_opt)
23930 as_bad (_("use of old and new-style options to set FPU type"));
23931
23932 mfpu_opt = legacy_fpu;
23933 }
23934 else if (!mfpu_opt)
23935 {
23936 #if !(defined (EABI_DEFAULT) || defined (TE_LINUX) \
23937 || defined (TE_NetBSD) || defined (TE_VXWORKS))
23938 /* Some environments specify a default FPU. If they don't, infer it
23939 from the processor. */
23940 if (mcpu_fpu_opt)
23941 mfpu_opt = mcpu_fpu_opt;
23942 else
23943 mfpu_opt = march_fpu_opt;
23944 #else
23945 mfpu_opt = &fpu_default;
23946 #endif
23947 }
23948
23949 if (!mfpu_opt)
23950 {
23951 if (mcpu_cpu_opt != NULL)
23952 mfpu_opt = &fpu_default;
23953 else if (mcpu_fpu_opt != NULL && ARM_CPU_HAS_FEATURE (*mcpu_fpu_opt, arm_ext_v5))
23954 mfpu_opt = &fpu_arch_vfp_v2;
23955 else
23956 mfpu_opt = &fpu_arch_fpa;
23957 }
23958
23959 #ifdef CPU_DEFAULT
23960 if (!mcpu_cpu_opt)
23961 {
23962 mcpu_cpu_opt = &cpu_default;
23963 selected_cpu = cpu_default;
23964 }
23965 else if (no_cpu_selected ())
23966 selected_cpu = cpu_default;
23967 #else
23968 if (mcpu_cpu_opt)
23969 selected_cpu = *mcpu_cpu_opt;
23970 else
23971 mcpu_cpu_opt = &arm_arch_any;
23972 #endif
23973
23974 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
23975
23976 autoselect_thumb_from_cpu_variant ();
23977
23978 arm_arch_used = thumb_arch_used = arm_arch_none;
23979
23980 #if defined OBJ_COFF || defined OBJ_ELF
23981 {
23982 unsigned int flags = 0;
23983
23984 #if defined OBJ_ELF
23985 flags = meabi_flags;
23986
23987 switch (meabi_flags)
23988 {
23989 case EF_ARM_EABI_UNKNOWN:
23990 #endif
23991 /* Set the flags in the private structure. */
23992 if (uses_apcs_26) flags |= F_APCS26;
23993 if (support_interwork) flags |= F_INTERWORK;
23994 if (uses_apcs_float) flags |= F_APCS_FLOAT;
23995 if (pic_code) flags |= F_PIC;
23996 if (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_any_hard))
23997 flags |= F_SOFT_FLOAT;
23998
23999 switch (mfloat_abi_opt)
24000 {
24001 case ARM_FLOAT_ABI_SOFT:
24002 case ARM_FLOAT_ABI_SOFTFP:
24003 flags |= F_SOFT_FLOAT;
24004 break;
24005
24006 case ARM_FLOAT_ABI_HARD:
24007 if (flags & F_SOFT_FLOAT)
24008 as_bad (_("hard-float conflicts with specified fpu"));
24009 break;
24010 }
24011
24012 /* Using pure-endian doubles (even if soft-float). */
24013 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
24014 flags |= F_VFP_FLOAT;
24015
24016 #if defined OBJ_ELF
24017 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_maverick))
24018 flags |= EF_ARM_MAVERICK_FLOAT;
24019 break;
24020
24021 case EF_ARM_EABI_VER4:
24022 case EF_ARM_EABI_VER5:
24023 /* No additional flags to set. */
24024 break;
24025
24026 default:
24027 abort ();
24028 }
24029 #endif
24030 bfd_set_private_flags (stdoutput, flags);
24031
24032 /* We have run out flags in the COFF header to encode the
24033 status of ATPCS support, so instead we create a dummy,
24034 empty, debug section called .arm.atpcs. */
24035 if (atpcs)
24036 {
24037 asection * sec;
24038
24039 sec = bfd_make_section (stdoutput, ".arm.atpcs");
24040
24041 if (sec != NULL)
24042 {
24043 bfd_set_section_flags
24044 (stdoutput, sec, SEC_READONLY | SEC_DEBUGGING /* | SEC_HAS_CONTENTS */);
24045 bfd_set_section_size (stdoutput, sec, 0);
24046 bfd_set_section_contents (stdoutput, sec, NULL, 0, 0);
24047 }
24048 }
24049 }
24050 #endif
24051
24052 /* Record the CPU type as well. */
24053 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2))
24054 mach = bfd_mach_arm_iWMMXt2;
24055 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt))
24056 mach = bfd_mach_arm_iWMMXt;
24057 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_xscale))
24058 mach = bfd_mach_arm_XScale;
24059 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_maverick))
24060 mach = bfd_mach_arm_ep9312;
24061 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5e))
24062 mach = bfd_mach_arm_5TE;
24063 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5))
24064 {
24065 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
24066 mach = bfd_mach_arm_5T;
24067 else
24068 mach = bfd_mach_arm_5;
24069 }
24070 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4))
24071 {
24072 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
24073 mach = bfd_mach_arm_4T;
24074 else
24075 mach = bfd_mach_arm_4;
24076 }
24077 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3m))
24078 mach = bfd_mach_arm_3M;
24079 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3))
24080 mach = bfd_mach_arm_3;
24081 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2s))
24082 mach = bfd_mach_arm_2a;
24083 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2))
24084 mach = bfd_mach_arm_2;
24085 else
24086 mach = bfd_mach_arm_unknown;
24087
24088 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
24089 }
24090
24091 /* Command line processing. */
24092
24093 /* md_parse_option
24094 Invocation line includes a switch not recognized by the base assembler.
24095 See if it's a processor-specific option.
24096
24097 This routine is somewhat complicated by the need for backwards
24098 compatibility (since older releases of gcc can't be changed).
24099 The new options try to make the interface as compatible as
24100 possible with GCC.
24101
24102 New options (supported) are:
24103
24104 -mcpu=<cpu name> Assemble for selected processor
24105 -march=<architecture name> Assemble for selected architecture
24106 -mfpu=<fpu architecture> Assemble for selected FPU.
24107 -EB/-mbig-endian Big-endian
24108 -EL/-mlittle-endian Little-endian
24109 -k Generate PIC code
24110 -mthumb Start in Thumb mode
24111 -mthumb-interwork Code supports ARM/Thumb interworking
24112
24113 -m[no-]warn-deprecated Warn about deprecated features
24114
24115 For now we will also provide support for:
24116
24117 -mapcs-32 32-bit Program counter
24118 -mapcs-26 26-bit Program counter
24119 -macps-float Floats passed in FP registers
24120 -mapcs-reentrant Reentrant code
24121 -matpcs
24122 (sometime these will probably be replaced with -mapcs=<list of options>
24123 and -matpcs=<list of options>)
24124
24125 The remaining options are only supported for back-wards compatibility.
24126 Cpu variants, the arm part is optional:
24127 -m[arm]1 Currently not supported.
24128 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor
24129 -m[arm]3 Arm 3 processor
24130 -m[arm]6[xx], Arm 6 processors
24131 -m[arm]7[xx][t][[d]m] Arm 7 processors
24132 -m[arm]8[10] Arm 8 processors
24133 -m[arm]9[20][tdmi] Arm 9 processors
24134 -mstrongarm[110[0]] StrongARM processors
24135 -mxscale XScale processors
24136 -m[arm]v[2345[t[e]]] Arm architectures
24137 -mall All (except the ARM1)
24138 FP variants:
24139 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions
24140 -mfpe-old (No float load/store multiples)
24141 -mvfpxd VFP Single precision
24142 -mvfp All VFP
24143 -mno-fpu Disable all floating point instructions
24144
24145 The following CPU names are recognized:
24146 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
24147 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
24148 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
24149 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
24150 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
24151 arm10t arm10e, arm1020t, arm1020e, arm10200e,
24152 strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
24153
24154 */
24155
24156 const char * md_shortopts = "m:k";
24157
24158 #ifdef ARM_BI_ENDIAN
24159 #define OPTION_EB (OPTION_MD_BASE + 0)
24160 #define OPTION_EL (OPTION_MD_BASE + 1)
24161 #else
24162 #if TARGET_BYTES_BIG_ENDIAN
24163 #define OPTION_EB (OPTION_MD_BASE + 0)
24164 #else
24165 #define OPTION_EL (OPTION_MD_BASE + 1)
24166 #endif
24167 #endif
24168 #define OPTION_FIX_V4BX (OPTION_MD_BASE + 2)
24169
24170 struct option md_longopts[] =
24171 {
24172 #ifdef OPTION_EB
24173 {"EB", no_argument, NULL, OPTION_EB},
24174 #endif
24175 #ifdef OPTION_EL
24176 {"EL", no_argument, NULL, OPTION_EL},
24177 #endif
24178 {"fix-v4bx", no_argument, NULL, OPTION_FIX_V4BX},
24179 {NULL, no_argument, NULL, 0}
24180 };
24181
24182 size_t md_longopts_size = sizeof (md_longopts);
24183
24184 struct arm_option_table
24185 {
24186 char *option; /* Option name to match. */
24187 char *help; /* Help information. */
24188 int *var; /* Variable to change. */
24189 int value; /* What to change it to. */
24190 char *deprecated; /* If non-null, print this message. */
24191 };
24192
24193 struct arm_option_table arm_opts[] =
24194 {
24195 {"k", N_("generate PIC code"), &pic_code, 1, NULL},
24196 {"mthumb", N_("assemble Thumb code"), &thumb_mode, 1, NULL},
24197 {"mthumb-interwork", N_("support ARM/Thumb interworking"),
24198 &support_interwork, 1, NULL},
24199 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26, 0, NULL},
24200 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26, 1, NULL},
24201 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float,
24202 1, NULL},
24203 {"mapcs-reentrant", N_("re-entrant code"), &pic_code, 1, NULL},
24204 {"matpcs", N_("code is ATPCS conformant"), &atpcs, 1, NULL},
24205 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
24206 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
24207 NULL},
24208
24209 /* These are recognized by the assembler, but have no affect on code. */
24210 {"mapcs-frame", N_("use frame pointer"), NULL, 0, NULL},
24211 {"mapcs-stack-check", N_("use stack size checking"), NULL, 0, NULL},
24212
24213 {"mwarn-deprecated", NULL, &warn_on_deprecated, 1, NULL},
24214 {"mno-warn-deprecated", N_("do not warn on use of deprecated feature"),
24215 &warn_on_deprecated, 0, NULL},
24216 {NULL, NULL, NULL, 0, NULL}
24217 };
24218
24219 struct arm_legacy_option_table
24220 {
24221 char *option; /* Option name to match. */
24222 const arm_feature_set **var; /* Variable to change. */
24223 const arm_feature_set value; /* What to change it to. */
24224 char *deprecated; /* If non-null, print this message. */
24225 };
24226
24227 const struct arm_legacy_option_table arm_legacy_opts[] =
24228 {
24229 /* DON'T add any new processors to this list -- we want the whole list
24230 to go away... Add them to the processors table instead. */
24231 {"marm1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
24232 {"m1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
24233 {"marm2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
24234 {"m2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
24235 {"marm250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
24236 {"m250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
24237 {"marm3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
24238 {"m3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
24239 {"marm6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
24240 {"m6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
24241 {"marm600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
24242 {"m600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
24243 {"marm610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
24244 {"m610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
24245 {"marm620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
24246 {"m620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
24247 {"marm7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
24248 {"m7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
24249 {"marm70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
24250 {"m70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
24251 {"marm700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
24252 {"m700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
24253 {"marm700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
24254 {"m700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
24255 {"marm710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
24256 {"m710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
24257 {"marm710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
24258 {"m710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
24259 {"marm720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
24260 {"m720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
24261 {"marm7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
24262 {"m7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
24263 {"marm7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
24264 {"m7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
24265 {"marm7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
24266 {"m7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
24267 {"marm7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
24268 {"m7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
24269 {"marm7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
24270 {"m7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
24271 {"marm7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
24272 {"m7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
24273 {"marm7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
24274 {"m7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
24275 {"marm7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
24276 {"m7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
24277 {"marm7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
24278 {"m7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
24279 {"marm7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
24280 {"m7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
24281 {"marm710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
24282 {"m710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
24283 {"marm720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
24284 {"m720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
24285 {"marm740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
24286 {"m740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
24287 {"marm8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
24288 {"m8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
24289 {"marm810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
24290 {"m810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
24291 {"marm9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
24292 {"m9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
24293 {"marm9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
24294 {"m9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
24295 {"marm920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
24296 {"m920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
24297 {"marm940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
24298 {"m940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
24299 {"mstrongarm", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=strongarm")},
24300 {"mstrongarm110", &legacy_cpu, ARM_ARCH_V4,
24301 N_("use -mcpu=strongarm110")},
24302 {"mstrongarm1100", &legacy_cpu, ARM_ARCH_V4,
24303 N_("use -mcpu=strongarm1100")},
24304 {"mstrongarm1110", &legacy_cpu, ARM_ARCH_V4,
24305 N_("use -mcpu=strongarm1110")},
24306 {"mxscale", &legacy_cpu, ARM_ARCH_XSCALE, N_("use -mcpu=xscale")},
24307 {"miwmmxt", &legacy_cpu, ARM_ARCH_IWMMXT, N_("use -mcpu=iwmmxt")},
24308 {"mall", &legacy_cpu, ARM_ANY, N_("use -mcpu=all")},
24309
24310 /* Architecture variants -- don't add any more to this list either. */
24311 {"mv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
24312 {"marmv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
24313 {"mv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
24314 {"marmv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
24315 {"mv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
24316 {"marmv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
24317 {"mv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
24318 {"marmv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
24319 {"mv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
24320 {"marmv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
24321 {"mv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
24322 {"marmv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
24323 {"mv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
24324 {"marmv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
24325 {"mv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
24326 {"marmv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
24327 {"mv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
24328 {"marmv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
24329
24330 /* Floating point variants -- don't add any more to this list either. */
24331 {"mfpe-old", &legacy_fpu, FPU_ARCH_FPE, N_("use -mfpu=fpe")},
24332 {"mfpa10", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa10")},
24333 {"mfpa11", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa11")},
24334 {"mno-fpu", &legacy_fpu, ARM_ARCH_NONE,
24335 N_("use either -mfpu=softfpa or -mfpu=softvfp")},
24336
24337 {NULL, NULL, ARM_ARCH_NONE, NULL}
24338 };
24339
24340 struct arm_cpu_option_table
24341 {
24342 char *name;
24343 size_t name_len;
24344 const arm_feature_set value;
24345 /* For some CPUs we assume an FPU unless the user explicitly sets
24346 -mfpu=... */
24347 const arm_feature_set default_fpu;
24348 /* The canonical name of the CPU, or NULL to use NAME converted to upper
24349 case. */
24350 const char *canonical_name;
24351 };
24352
24353 /* This list should, at a minimum, contain all the cpu names
24354 recognized by GCC. */
24355 #define ARM_CPU_OPT(N, V, DF, CN) { N, sizeof (N) - 1, V, DF, CN }
24356 static const struct arm_cpu_option_table arm_cpus[] =
24357 {
24358 ARM_CPU_OPT ("all", ARM_ANY, FPU_ARCH_FPA, NULL),
24359 ARM_CPU_OPT ("arm1", ARM_ARCH_V1, FPU_ARCH_FPA, NULL),
24360 ARM_CPU_OPT ("arm2", ARM_ARCH_V2, FPU_ARCH_FPA, NULL),
24361 ARM_CPU_OPT ("arm250", ARM_ARCH_V2S, FPU_ARCH_FPA, NULL),
24362 ARM_CPU_OPT ("arm3", ARM_ARCH_V2S, FPU_ARCH_FPA, NULL),
24363 ARM_CPU_OPT ("arm6", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24364 ARM_CPU_OPT ("arm60", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24365 ARM_CPU_OPT ("arm600", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24366 ARM_CPU_OPT ("arm610", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24367 ARM_CPU_OPT ("arm620", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24368 ARM_CPU_OPT ("arm7", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24369 ARM_CPU_OPT ("arm7m", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL),
24370 ARM_CPU_OPT ("arm7d", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24371 ARM_CPU_OPT ("arm7dm", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL),
24372 ARM_CPU_OPT ("arm7di", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24373 ARM_CPU_OPT ("arm7dmi", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL),
24374 ARM_CPU_OPT ("arm70", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24375 ARM_CPU_OPT ("arm700", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24376 ARM_CPU_OPT ("arm700i", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24377 ARM_CPU_OPT ("arm710", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24378 ARM_CPU_OPT ("arm710t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24379 ARM_CPU_OPT ("arm720", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24380 ARM_CPU_OPT ("arm720t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24381 ARM_CPU_OPT ("arm740t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24382 ARM_CPU_OPT ("arm710c", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24383 ARM_CPU_OPT ("arm7100", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24384 ARM_CPU_OPT ("arm7500", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24385 ARM_CPU_OPT ("arm7500fe", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24386 ARM_CPU_OPT ("arm7t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24387 ARM_CPU_OPT ("arm7tdmi", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24388 ARM_CPU_OPT ("arm7tdmi-s", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24389 ARM_CPU_OPT ("arm8", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
24390 ARM_CPU_OPT ("arm810", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
24391 ARM_CPU_OPT ("strongarm", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
24392 ARM_CPU_OPT ("strongarm1", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
24393 ARM_CPU_OPT ("strongarm110", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
24394 ARM_CPU_OPT ("strongarm1100", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
24395 ARM_CPU_OPT ("strongarm1110", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
24396 ARM_CPU_OPT ("arm9", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24397 ARM_CPU_OPT ("arm920", ARM_ARCH_V4T, FPU_ARCH_FPA, "ARM920T"),
24398 ARM_CPU_OPT ("arm920t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24399 ARM_CPU_OPT ("arm922t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24400 ARM_CPU_OPT ("arm940t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24401 ARM_CPU_OPT ("arm9tdmi", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24402 ARM_CPU_OPT ("fa526", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
24403 ARM_CPU_OPT ("fa626", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
24404 /* For V5 or later processors we default to using VFP; but the user
24405 should really set the FPU type explicitly. */
24406 ARM_CPU_OPT ("arm9e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL),
24407 ARM_CPU_OPT ("arm9e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24408 ARM_CPU_OPT ("arm926ej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM926EJ-S"),
24409 ARM_CPU_OPT ("arm926ejs", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM926EJ-S"),
24410 ARM_CPU_OPT ("arm926ej-s", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, NULL),
24411 ARM_CPU_OPT ("arm946e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL),
24412 ARM_CPU_OPT ("arm946e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM946E-S"),
24413 ARM_CPU_OPT ("arm946e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24414 ARM_CPU_OPT ("arm966e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL),
24415 ARM_CPU_OPT ("arm966e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM966E-S"),
24416 ARM_CPU_OPT ("arm966e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24417 ARM_CPU_OPT ("arm968e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24418 ARM_CPU_OPT ("arm10t", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL),
24419 ARM_CPU_OPT ("arm10tdmi", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL),
24420 ARM_CPU_OPT ("arm10e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24421 ARM_CPU_OPT ("arm1020", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM1020E"),
24422 ARM_CPU_OPT ("arm1020t", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL),
24423 ARM_CPU_OPT ("arm1020e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24424 ARM_CPU_OPT ("arm1022e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24425 ARM_CPU_OPT ("arm1026ejs", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2,
24426 "ARM1026EJ-S"),
24427 ARM_CPU_OPT ("arm1026ej-s", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, NULL),
24428 ARM_CPU_OPT ("fa606te", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24429 ARM_CPU_OPT ("fa616te", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24430 ARM_CPU_OPT ("fa626te", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24431 ARM_CPU_OPT ("fmp626", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24432 ARM_CPU_OPT ("fa726te", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24433 ARM_CPU_OPT ("arm1136js", ARM_ARCH_V6, FPU_NONE, "ARM1136J-S"),
24434 ARM_CPU_OPT ("arm1136j-s", ARM_ARCH_V6, FPU_NONE, NULL),
24435 ARM_CPU_OPT ("arm1136jfs", ARM_ARCH_V6, FPU_ARCH_VFP_V2,
24436 "ARM1136JF-S"),
24437 ARM_CPU_OPT ("arm1136jf-s", ARM_ARCH_V6, FPU_ARCH_VFP_V2, NULL),
24438 ARM_CPU_OPT ("mpcore", ARM_ARCH_V6K, FPU_ARCH_VFP_V2, "MPCore"),
24439 ARM_CPU_OPT ("mpcorenovfp", ARM_ARCH_V6K, FPU_NONE, "MPCore"),
24440 ARM_CPU_OPT ("arm1156t2-s", ARM_ARCH_V6T2, FPU_NONE, NULL),
24441 ARM_CPU_OPT ("arm1156t2f-s", ARM_ARCH_V6T2, FPU_ARCH_VFP_V2, NULL),
24442 ARM_CPU_OPT ("arm1176jz-s", ARM_ARCH_V6ZK, FPU_NONE, NULL),
24443 ARM_CPU_OPT ("arm1176jzf-s", ARM_ARCH_V6ZK, FPU_ARCH_VFP_V2, NULL),
24444 ARM_CPU_OPT ("cortex-a5", ARM_ARCH_V7A_MP_SEC,
24445 FPU_NONE, "Cortex-A5"),
24446 ARM_CPU_OPT ("cortex-a7", ARM_ARCH_V7VE, FPU_ARCH_NEON_VFP_V4,
24447 "Cortex-A7"),
24448 ARM_CPU_OPT ("cortex-a8", ARM_ARCH_V7A_SEC,
24449 ARM_FEATURE_COPROC (FPU_VFP_V3
24450 | FPU_NEON_EXT_V1),
24451 "Cortex-A8"),
24452 ARM_CPU_OPT ("cortex-a9", ARM_ARCH_V7A_MP_SEC,
24453 ARM_FEATURE_COPROC (FPU_VFP_V3
24454 | FPU_NEON_EXT_V1),
24455 "Cortex-A9"),
24456 ARM_CPU_OPT ("cortex-a12", ARM_ARCH_V7VE, FPU_ARCH_NEON_VFP_V4,
24457 "Cortex-A12"),
24458 ARM_CPU_OPT ("cortex-a15", ARM_ARCH_V7VE, FPU_ARCH_NEON_VFP_V4,
24459 "Cortex-A15"),
24460 ARM_CPU_OPT ("cortex-a17", ARM_ARCH_V7VE, FPU_ARCH_NEON_VFP_V4,
24461 "Cortex-A17"),
24462 ARM_CPU_OPT ("cortex-a53", ARM_ARCH_V8A, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
24463 "Cortex-A53"),
24464 ARM_CPU_OPT ("cortex-a57", ARM_ARCH_V8A, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
24465 "Cortex-A57"),
24466 ARM_CPU_OPT ("cortex-a72", ARM_ARCH_V8A, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
24467 "Cortex-A72"),
24468 ARM_CPU_OPT ("cortex-r4", ARM_ARCH_V7R, FPU_NONE, "Cortex-R4"),
24469 ARM_CPU_OPT ("cortex-r4f", ARM_ARCH_V7R, FPU_ARCH_VFP_V3D16,
24470 "Cortex-R4F"),
24471 ARM_CPU_OPT ("cortex-r5", ARM_ARCH_V7R_IDIV,
24472 FPU_NONE, "Cortex-R5"),
24473 ARM_CPU_OPT ("cortex-r7", ARM_ARCH_V7R_IDIV,
24474 FPU_ARCH_VFP_V3D16,
24475 "Cortex-R7"),
24476 ARM_CPU_OPT ("cortex-m7", ARM_ARCH_V7EM, FPU_NONE, "Cortex-M7"),
24477 ARM_CPU_OPT ("cortex-m4", ARM_ARCH_V7EM, FPU_NONE, "Cortex-M4"),
24478 ARM_CPU_OPT ("cortex-m3", ARM_ARCH_V7M, FPU_NONE, "Cortex-M3"),
24479 ARM_CPU_OPT ("cortex-m1", ARM_ARCH_V6SM, FPU_NONE, "Cortex-M1"),
24480 ARM_CPU_OPT ("cortex-m0", ARM_ARCH_V6SM, FPU_NONE, "Cortex-M0"),
24481 ARM_CPU_OPT ("cortex-m0plus", ARM_ARCH_V6SM, FPU_NONE, "Cortex-M0+"),
24482 ARM_CPU_OPT ("exynos-m1", ARM_ARCH_V8A, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
24483 "Samsung " \
24484 "Exynos M1"),
24485 /* ??? XSCALE is really an architecture. */
24486 ARM_CPU_OPT ("xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL),
24487 /* ??? iwmmxt is not a processor. */
24488 ARM_CPU_OPT ("iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP_V2, NULL),
24489 ARM_CPU_OPT ("iwmmxt2", ARM_ARCH_IWMMXT2,FPU_ARCH_VFP_V2, NULL),
24490 ARM_CPU_OPT ("i80200", ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL),
24491 /* Maverick */
24492 ARM_CPU_OPT ("ep9312", ARM_FEATURE_LOW (ARM_AEXT_V4T, ARM_CEXT_MAVERICK),
24493 FPU_ARCH_MAVERICK, "ARM920T"),
24494 /* Marvell processors. */
24495 ARM_CPU_OPT ("marvell-pj4", ARM_FEATURE_CORE_LOW (ARM_AEXT_V7A | ARM_EXT_MP
24496 | ARM_EXT_SEC),
24497 FPU_ARCH_VFP_V3D16, NULL),
24498 ARM_CPU_OPT ("marvell-whitney", ARM_FEATURE_CORE_LOW (ARM_AEXT_V7A | ARM_EXT_MP
24499 | ARM_EXT_SEC),
24500 FPU_ARCH_NEON_VFP_V4, NULL),
24501 /* APM X-Gene family. */
24502 ARM_CPU_OPT ("xgene1", ARM_ARCH_V8A, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
24503 "APM X-Gene 1"),
24504 ARM_CPU_OPT ("xgene2", ARM_ARCH_V8A, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
24505 "APM X-Gene 2"),
24506
24507 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, NULL }
24508 };
24509 #undef ARM_CPU_OPT
24510
24511 struct arm_arch_option_table
24512 {
24513 char *name;
24514 size_t name_len;
24515 const arm_feature_set value;
24516 const arm_feature_set default_fpu;
24517 };
24518
24519 /* This list should, at a minimum, contain all the architecture names
24520 recognized by GCC. */
24521 #define ARM_ARCH_OPT(N, V, DF) { N, sizeof (N) - 1, V, DF }
24522 static const struct arm_arch_option_table arm_archs[] =
24523 {
24524 ARM_ARCH_OPT ("all", ARM_ANY, FPU_ARCH_FPA),
24525 ARM_ARCH_OPT ("armv1", ARM_ARCH_V1, FPU_ARCH_FPA),
24526 ARM_ARCH_OPT ("armv2", ARM_ARCH_V2, FPU_ARCH_FPA),
24527 ARM_ARCH_OPT ("armv2a", ARM_ARCH_V2S, FPU_ARCH_FPA),
24528 ARM_ARCH_OPT ("armv2s", ARM_ARCH_V2S, FPU_ARCH_FPA),
24529 ARM_ARCH_OPT ("armv3", ARM_ARCH_V3, FPU_ARCH_FPA),
24530 ARM_ARCH_OPT ("armv3m", ARM_ARCH_V3M, FPU_ARCH_FPA),
24531 ARM_ARCH_OPT ("armv4", ARM_ARCH_V4, FPU_ARCH_FPA),
24532 ARM_ARCH_OPT ("armv4xm", ARM_ARCH_V4xM, FPU_ARCH_FPA),
24533 ARM_ARCH_OPT ("armv4t", ARM_ARCH_V4T, FPU_ARCH_FPA),
24534 ARM_ARCH_OPT ("armv4txm", ARM_ARCH_V4TxM, FPU_ARCH_FPA),
24535 ARM_ARCH_OPT ("armv5", ARM_ARCH_V5, FPU_ARCH_VFP),
24536 ARM_ARCH_OPT ("armv5t", ARM_ARCH_V5T, FPU_ARCH_VFP),
24537 ARM_ARCH_OPT ("armv5txm", ARM_ARCH_V5TxM, FPU_ARCH_VFP),
24538 ARM_ARCH_OPT ("armv5te", ARM_ARCH_V5TE, FPU_ARCH_VFP),
24539 ARM_ARCH_OPT ("armv5texp", ARM_ARCH_V5TExP, FPU_ARCH_VFP),
24540 ARM_ARCH_OPT ("armv5tej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP),
24541 ARM_ARCH_OPT ("armv6", ARM_ARCH_V6, FPU_ARCH_VFP),
24542 ARM_ARCH_OPT ("armv6j", ARM_ARCH_V6, FPU_ARCH_VFP),
24543 ARM_ARCH_OPT ("armv6k", ARM_ARCH_V6K, FPU_ARCH_VFP),
24544 ARM_ARCH_OPT ("armv6z", ARM_ARCH_V6Z, FPU_ARCH_VFP),
24545 ARM_ARCH_OPT ("armv6zk", ARM_ARCH_V6ZK, FPU_ARCH_VFP),
24546 ARM_ARCH_OPT ("armv6t2", ARM_ARCH_V6T2, FPU_ARCH_VFP),
24547 ARM_ARCH_OPT ("armv6kt2", ARM_ARCH_V6KT2, FPU_ARCH_VFP),
24548 ARM_ARCH_OPT ("armv6zt2", ARM_ARCH_V6ZT2, FPU_ARCH_VFP),
24549 ARM_ARCH_OPT ("armv6zkt2", ARM_ARCH_V6ZKT2, FPU_ARCH_VFP),
24550 ARM_ARCH_OPT ("armv6-m", ARM_ARCH_V6M, FPU_ARCH_VFP),
24551 ARM_ARCH_OPT ("armv6s-m", ARM_ARCH_V6SM, FPU_ARCH_VFP),
24552 ARM_ARCH_OPT ("armv7", ARM_ARCH_V7, FPU_ARCH_VFP),
24553 /* The official spelling of the ARMv7 profile variants is the dashed form.
24554 Accept the non-dashed form for compatibility with old toolchains. */
24555 ARM_ARCH_OPT ("armv7a", ARM_ARCH_V7A, FPU_ARCH_VFP),
24556 ARM_ARCH_OPT ("armv7ve", ARM_ARCH_V7VE, FPU_ARCH_VFP),
24557 ARM_ARCH_OPT ("armv7r", ARM_ARCH_V7R, FPU_ARCH_VFP),
24558 ARM_ARCH_OPT ("armv7m", ARM_ARCH_V7M, FPU_ARCH_VFP),
24559 ARM_ARCH_OPT ("armv7-a", ARM_ARCH_V7A, FPU_ARCH_VFP),
24560 ARM_ARCH_OPT ("armv7-r", ARM_ARCH_V7R, FPU_ARCH_VFP),
24561 ARM_ARCH_OPT ("armv7-m", ARM_ARCH_V7M, FPU_ARCH_VFP),
24562 ARM_ARCH_OPT ("armv7e-m", ARM_ARCH_V7EM, FPU_ARCH_VFP),
24563 ARM_ARCH_OPT ("armv8-a", ARM_ARCH_V8A, FPU_ARCH_VFP),
24564 ARM_ARCH_OPT ("xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP),
24565 ARM_ARCH_OPT ("iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP),
24566 ARM_ARCH_OPT ("iwmmxt2", ARM_ARCH_IWMMXT2,FPU_ARCH_VFP),
24567 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
24568 };
24569 #undef ARM_ARCH_OPT
24570
24571 /* ISA extensions in the co-processor and main instruction set space. */
24572 struct arm_option_extension_value_table
24573 {
24574 char *name;
24575 size_t name_len;
24576 const arm_feature_set merge_value;
24577 const arm_feature_set clear_value;
24578 const arm_feature_set allowed_archs;
24579 };
24580
24581 /* The following table must be in alphabetical order with a NULL last entry.
24582 */
24583 #define ARM_EXT_OPT(N, M, C, AA) { N, sizeof (N) - 1, M, C, AA }
24584 static const struct arm_option_extension_value_table arm_extensions[] =
24585 {
24586 ARM_EXT_OPT ("crc", ARCH_CRC_ARMV8, ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
24587 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
24588 ARM_EXT_OPT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
24589 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8),
24590 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
24591 ARM_EXT_OPT ("fp", FPU_ARCH_VFP_ARMV8, ARM_FEATURE_COPROC (FPU_VFP_ARMV8),
24592 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
24593 ARM_EXT_OPT ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV),
24594 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV),
24595 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A | ARM_EXT_V7R)),
24596 ARM_EXT_OPT ("iwmmxt",ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT),
24597 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT), ARM_ANY),
24598 ARM_EXT_OPT ("iwmmxt2", ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2),
24599 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2), ARM_ANY),
24600 ARM_EXT_OPT ("maverick", ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK),
24601 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK), ARM_ANY),
24602 ARM_EXT_OPT ("mp", ARM_FEATURE_CORE_LOW (ARM_EXT_MP),
24603 ARM_FEATURE_CORE_LOW (ARM_EXT_MP),
24604 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A | ARM_EXT_V7R)),
24605 ARM_EXT_OPT ("simd", FPU_ARCH_NEON_VFP_ARMV8,
24606 ARM_FEATURE_COPROC (FPU_NEON_ARMV8),
24607 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
24608 ARM_EXT_OPT ("os", ARM_FEATURE_CORE_LOW (ARM_EXT_OS),
24609 ARM_FEATURE_CORE_LOW (ARM_EXT_OS),
24610 ARM_FEATURE_CORE_LOW (ARM_EXT_V6M)),
24611 ARM_EXT_OPT ("sec", ARM_FEATURE_CORE_LOW (ARM_EXT_SEC),
24612 ARM_FEATURE_CORE_LOW (ARM_EXT_SEC),
24613 ARM_FEATURE_CORE_LOW (ARM_EXT_V6K | ARM_EXT_V7A)),
24614 ARM_EXT_OPT ("virt", ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT | ARM_EXT_ADIV
24615 | ARM_EXT_DIV),
24616 ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT),
24617 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A)),
24618 ARM_EXT_OPT ("xscale",ARM_FEATURE_COPROC (ARM_CEXT_XSCALE),
24619 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE), ARM_ANY),
24620 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, ARM_ARCH_NONE }
24621 };
24622 #undef ARM_EXT_OPT
24623
24624 /* ISA floating-point and Advanced SIMD extensions. */
24625 struct arm_option_fpu_value_table
24626 {
24627 char *name;
24628 const arm_feature_set value;
24629 };
24630
24631 /* This list should, at a minimum, contain all the fpu names
24632 recognized by GCC. */
24633 static const struct arm_option_fpu_value_table arm_fpus[] =
24634 {
24635 {"softfpa", FPU_NONE},
24636 {"fpe", FPU_ARCH_FPE},
24637 {"fpe2", FPU_ARCH_FPE},
24638 {"fpe3", FPU_ARCH_FPA}, /* Third release supports LFM/SFM. */
24639 {"fpa", FPU_ARCH_FPA},
24640 {"fpa10", FPU_ARCH_FPA},
24641 {"fpa11", FPU_ARCH_FPA},
24642 {"arm7500fe", FPU_ARCH_FPA},
24643 {"softvfp", FPU_ARCH_VFP},
24644 {"softvfp+vfp", FPU_ARCH_VFP_V2},
24645 {"vfp", FPU_ARCH_VFP_V2},
24646 {"vfp9", FPU_ARCH_VFP_V2},
24647 {"vfp3", FPU_ARCH_VFP_V3}, /* For backwards compatbility. */
24648 {"vfp10", FPU_ARCH_VFP_V2},
24649 {"vfp10-r0", FPU_ARCH_VFP_V1},
24650 {"vfpxd", FPU_ARCH_VFP_V1xD},
24651 {"vfpv2", FPU_ARCH_VFP_V2},
24652 {"vfpv3", FPU_ARCH_VFP_V3},
24653 {"vfpv3-fp16", FPU_ARCH_VFP_V3_FP16},
24654 {"vfpv3-d16", FPU_ARCH_VFP_V3D16},
24655 {"vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16},
24656 {"vfpv3xd", FPU_ARCH_VFP_V3xD},
24657 {"vfpv3xd-fp16", FPU_ARCH_VFP_V3xD_FP16},
24658 {"arm1020t", FPU_ARCH_VFP_V1},
24659 {"arm1020e", FPU_ARCH_VFP_V2},
24660 {"arm1136jfs", FPU_ARCH_VFP_V2},
24661 {"arm1136jf-s", FPU_ARCH_VFP_V2},
24662 {"maverick", FPU_ARCH_MAVERICK},
24663 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1},
24664 {"neon-fp16", FPU_ARCH_NEON_FP16},
24665 {"vfpv4", FPU_ARCH_VFP_V4},
24666 {"vfpv4-d16", FPU_ARCH_VFP_V4D16},
24667 {"fpv4-sp-d16", FPU_ARCH_VFP_V4_SP_D16},
24668 {"fpv5-d16", FPU_ARCH_VFP_V5D16},
24669 {"fpv5-sp-d16", FPU_ARCH_VFP_V5_SP_D16},
24670 {"neon-vfpv4", FPU_ARCH_NEON_VFP_V4},
24671 {"fp-armv8", FPU_ARCH_VFP_ARMV8},
24672 {"neon-fp-armv8", FPU_ARCH_NEON_VFP_ARMV8},
24673 {"crypto-neon-fp-armv8",
24674 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8},
24675 {NULL, ARM_ARCH_NONE}
24676 };
24677
24678 struct arm_option_value_table
24679 {
24680 char *name;
24681 long value;
24682 };
24683
24684 static const struct arm_option_value_table arm_float_abis[] =
24685 {
24686 {"hard", ARM_FLOAT_ABI_HARD},
24687 {"softfp", ARM_FLOAT_ABI_SOFTFP},
24688 {"soft", ARM_FLOAT_ABI_SOFT},
24689 {NULL, 0}
24690 };
24691
24692 #ifdef OBJ_ELF
24693 /* We only know how to output GNU and ver 4/5 (AAELF) formats. */
24694 static const struct arm_option_value_table arm_eabis[] =
24695 {
24696 {"gnu", EF_ARM_EABI_UNKNOWN},
24697 {"4", EF_ARM_EABI_VER4},
24698 {"5", EF_ARM_EABI_VER5},
24699 {NULL, 0}
24700 };
24701 #endif
24702
24703 struct arm_long_option_table
24704 {
24705 char * option; /* Substring to match. */
24706 char * help; /* Help information. */
24707 int (* func) (char * subopt); /* Function to decode sub-option. */
24708 char * deprecated; /* If non-null, print this message. */
24709 };
24710
24711 static bfd_boolean
24712 arm_parse_extension (char *str, const arm_feature_set **opt_p)
24713 {
24714 arm_feature_set *ext_set = (arm_feature_set *)
24715 xmalloc (sizeof (arm_feature_set));
24716
24717 /* We insist on extensions being specified in alphabetical order, and with
24718 extensions being added before being removed. We achieve this by having
24719 the global ARM_EXTENSIONS table in alphabetical order, and using the
24720 ADDING_VALUE variable to indicate whether we are adding an extension (1)
24721 or removing it (0) and only allowing it to change in the order
24722 -1 -> 1 -> 0. */
24723 const struct arm_option_extension_value_table * opt = NULL;
24724 int adding_value = -1;
24725
24726 /* Copy the feature set, so that we can modify it. */
24727 *ext_set = **opt_p;
24728 *opt_p = ext_set;
24729
24730 while (str != NULL && *str != 0)
24731 {
24732 char *ext;
24733 size_t len;
24734
24735 if (*str != '+')
24736 {
24737 as_bad (_("invalid architectural extension"));
24738 return FALSE;
24739 }
24740
24741 str++;
24742 ext = strchr (str, '+');
24743
24744 if (ext != NULL)
24745 len = ext - str;
24746 else
24747 len = strlen (str);
24748
24749 if (len >= 2 && strncmp (str, "no", 2) == 0)
24750 {
24751 if (adding_value != 0)
24752 {
24753 adding_value = 0;
24754 opt = arm_extensions;
24755 }
24756
24757 len -= 2;
24758 str += 2;
24759 }
24760 else if (len > 0)
24761 {
24762 if (adding_value == -1)
24763 {
24764 adding_value = 1;
24765 opt = arm_extensions;
24766 }
24767 else if (adding_value != 1)
24768 {
24769 as_bad (_("must specify extensions to add before specifying "
24770 "those to remove"));
24771 return FALSE;
24772 }
24773 }
24774
24775 if (len == 0)
24776 {
24777 as_bad (_("missing architectural extension"));
24778 return FALSE;
24779 }
24780
24781 gas_assert (adding_value != -1);
24782 gas_assert (opt != NULL);
24783
24784 /* Scan over the options table trying to find an exact match. */
24785 for (; opt->name != NULL; opt++)
24786 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
24787 {
24788 /* Check we can apply the extension to this architecture. */
24789 if (!ARM_CPU_HAS_FEATURE (*ext_set, opt->allowed_archs))
24790 {
24791 as_bad (_("extension does not apply to the base architecture"));
24792 return FALSE;
24793 }
24794
24795 /* Add or remove the extension. */
24796 if (adding_value)
24797 ARM_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->merge_value);
24798 else
24799 ARM_CLEAR_FEATURE (*ext_set, *ext_set, opt->clear_value);
24800
24801 break;
24802 }
24803
24804 if (opt->name == NULL)
24805 {
24806 /* Did we fail to find an extension because it wasn't specified in
24807 alphabetical order, or because it does not exist? */
24808
24809 for (opt = arm_extensions; opt->name != NULL; opt++)
24810 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
24811 break;
24812
24813 if (opt->name == NULL)
24814 as_bad (_("unknown architectural extension `%s'"), str);
24815 else
24816 as_bad (_("architectural extensions must be specified in "
24817 "alphabetical order"));
24818
24819 return FALSE;
24820 }
24821 else
24822 {
24823 /* We should skip the extension we've just matched the next time
24824 round. */
24825 opt++;
24826 }
24827
24828 str = ext;
24829 };
24830
24831 return TRUE;
24832 }
24833
24834 static bfd_boolean
24835 arm_parse_cpu (char *str)
24836 {
24837 const struct arm_cpu_option_table *opt;
24838 char *ext = strchr (str, '+');
24839 size_t len;
24840
24841 if (ext != NULL)
24842 len = ext - str;
24843 else
24844 len = strlen (str);
24845
24846 if (len == 0)
24847 {
24848 as_bad (_("missing cpu name `%s'"), str);
24849 return FALSE;
24850 }
24851
24852 for (opt = arm_cpus; opt->name != NULL; opt++)
24853 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
24854 {
24855 mcpu_cpu_opt = &opt->value;
24856 mcpu_fpu_opt = &opt->default_fpu;
24857 if (opt->canonical_name)
24858 strcpy (selected_cpu_name, opt->canonical_name);
24859 else
24860 {
24861 size_t i;
24862
24863 for (i = 0; i < len; i++)
24864 selected_cpu_name[i] = TOUPPER (opt->name[i]);
24865 selected_cpu_name[i] = 0;
24866 }
24867
24868 if (ext != NULL)
24869 return arm_parse_extension (ext, &mcpu_cpu_opt);
24870
24871 return TRUE;
24872 }
24873
24874 as_bad (_("unknown cpu `%s'"), str);
24875 return FALSE;
24876 }
24877
24878 static bfd_boolean
24879 arm_parse_arch (char *str)
24880 {
24881 const struct arm_arch_option_table *opt;
24882 char *ext = strchr (str, '+');
24883 size_t len;
24884
24885 if (ext != NULL)
24886 len = ext - str;
24887 else
24888 len = strlen (str);
24889
24890 if (len == 0)
24891 {
24892 as_bad (_("missing architecture name `%s'"), str);
24893 return FALSE;
24894 }
24895
24896 for (opt = arm_archs; opt->name != NULL; opt++)
24897 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
24898 {
24899 march_cpu_opt = &opt->value;
24900 march_fpu_opt = &opt->default_fpu;
24901 strcpy (selected_cpu_name, opt->name);
24902
24903 if (ext != NULL)
24904 return arm_parse_extension (ext, &march_cpu_opt);
24905
24906 return TRUE;
24907 }
24908
24909 as_bad (_("unknown architecture `%s'\n"), str);
24910 return FALSE;
24911 }
24912
24913 static bfd_boolean
24914 arm_parse_fpu (char * str)
24915 {
24916 const struct arm_option_fpu_value_table * opt;
24917
24918 for (opt = arm_fpus; opt->name != NULL; opt++)
24919 if (streq (opt->name, str))
24920 {
24921 mfpu_opt = &opt->value;
24922 return TRUE;
24923 }
24924
24925 as_bad (_("unknown floating point format `%s'\n"), str);
24926 return FALSE;
24927 }
24928
24929 static bfd_boolean
24930 arm_parse_float_abi (char * str)
24931 {
24932 const struct arm_option_value_table * opt;
24933
24934 for (opt = arm_float_abis; opt->name != NULL; opt++)
24935 if (streq (opt->name, str))
24936 {
24937 mfloat_abi_opt = opt->value;
24938 return TRUE;
24939 }
24940
24941 as_bad (_("unknown floating point abi `%s'\n"), str);
24942 return FALSE;
24943 }
24944
24945 #ifdef OBJ_ELF
24946 static bfd_boolean
24947 arm_parse_eabi (char * str)
24948 {
24949 const struct arm_option_value_table *opt;
24950
24951 for (opt = arm_eabis; opt->name != NULL; opt++)
24952 if (streq (opt->name, str))
24953 {
24954 meabi_flags = opt->value;
24955 return TRUE;
24956 }
24957 as_bad (_("unknown EABI `%s'\n"), str);
24958 return FALSE;
24959 }
24960 #endif
24961
24962 static bfd_boolean
24963 arm_parse_it_mode (char * str)
24964 {
24965 bfd_boolean ret = TRUE;
24966
24967 if (streq ("arm", str))
24968 implicit_it_mode = IMPLICIT_IT_MODE_ARM;
24969 else if (streq ("thumb", str))
24970 implicit_it_mode = IMPLICIT_IT_MODE_THUMB;
24971 else if (streq ("always", str))
24972 implicit_it_mode = IMPLICIT_IT_MODE_ALWAYS;
24973 else if (streq ("never", str))
24974 implicit_it_mode = IMPLICIT_IT_MODE_NEVER;
24975 else
24976 {
24977 as_bad (_("unknown implicit IT mode `%s', should be "\
24978 "arm, thumb, always, or never."), str);
24979 ret = FALSE;
24980 }
24981
24982 return ret;
24983 }
24984
24985 static bfd_boolean
24986 arm_ccs_mode (char * unused ATTRIBUTE_UNUSED)
24987 {
24988 codecomposer_syntax = TRUE;
24989 arm_comment_chars[0] = ';';
24990 arm_line_separator_chars[0] = 0;
24991 return TRUE;
24992 }
24993
24994 struct arm_long_option_table arm_long_opts[] =
24995 {
24996 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
24997 arm_parse_cpu, NULL},
24998 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
24999 arm_parse_arch, NULL},
25000 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"),
25001 arm_parse_fpu, NULL},
25002 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"),
25003 arm_parse_float_abi, NULL},
25004 #ifdef OBJ_ELF
25005 {"meabi=", N_("<ver>\t\t assemble for eabi version <ver>"),
25006 arm_parse_eabi, NULL},
25007 #endif
25008 {"mimplicit-it=", N_("<mode>\t controls implicit insertion of IT instructions"),
25009 arm_parse_it_mode, NULL},
25010 {"mccs", N_("\t\t\t TI CodeComposer Studio syntax compatibility mode"),
25011 arm_ccs_mode, NULL},
25012 {NULL, NULL, 0, NULL}
25013 };
25014
25015 int
25016 md_parse_option (int c, char * arg)
25017 {
25018 struct arm_option_table *opt;
25019 const struct arm_legacy_option_table *fopt;
25020 struct arm_long_option_table *lopt;
25021
25022 switch (c)
25023 {
25024 #ifdef OPTION_EB
25025 case OPTION_EB:
25026 target_big_endian = 1;
25027 break;
25028 #endif
25029
25030 #ifdef OPTION_EL
25031 case OPTION_EL:
25032 target_big_endian = 0;
25033 break;
25034 #endif
25035
25036 case OPTION_FIX_V4BX:
25037 fix_v4bx = TRUE;
25038 break;
25039
25040 case 'a':
25041 /* Listing option. Just ignore these, we don't support additional
25042 ones. */
25043 return 0;
25044
25045 default:
25046 for (opt = arm_opts; opt->option != NULL; opt++)
25047 {
25048 if (c == opt->option[0]
25049 && ((arg == NULL && opt->option[1] == 0)
25050 || streq (arg, opt->option + 1)))
25051 {
25052 /* If the option is deprecated, tell the user. */
25053 if (warn_on_deprecated && opt->deprecated != NULL)
25054 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
25055 arg ? arg : "", _(opt->deprecated));
25056
25057 if (opt->var != NULL)
25058 *opt->var = opt->value;
25059
25060 return 1;
25061 }
25062 }
25063
25064 for (fopt = arm_legacy_opts; fopt->option != NULL; fopt++)
25065 {
25066 if (c == fopt->option[0]
25067 && ((arg == NULL && fopt->option[1] == 0)
25068 || streq (arg, fopt->option + 1)))
25069 {
25070 /* If the option is deprecated, tell the user. */
25071 if (warn_on_deprecated && fopt->deprecated != NULL)
25072 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
25073 arg ? arg : "", _(fopt->deprecated));
25074
25075 if (fopt->var != NULL)
25076 *fopt->var = &fopt->value;
25077
25078 return 1;
25079 }
25080 }
25081
25082 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
25083 {
25084 /* These options are expected to have an argument. */
25085 if (c == lopt->option[0]
25086 && arg != NULL
25087 && strncmp (arg, lopt->option + 1,
25088 strlen (lopt->option + 1)) == 0)
25089 {
25090 /* If the option is deprecated, tell the user. */
25091 if (warn_on_deprecated && lopt->deprecated != NULL)
25092 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
25093 _(lopt->deprecated));
25094
25095 /* Call the sup-option parser. */
25096 return lopt->func (arg + strlen (lopt->option) - 1);
25097 }
25098 }
25099
25100 return 0;
25101 }
25102
25103 return 1;
25104 }
25105
25106 void
25107 md_show_usage (FILE * fp)
25108 {
25109 struct arm_option_table *opt;
25110 struct arm_long_option_table *lopt;
25111
25112 fprintf (fp, _(" ARM-specific assembler options:\n"));
25113
25114 for (opt = arm_opts; opt->option != NULL; opt++)
25115 if (opt->help != NULL)
25116 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
25117
25118 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
25119 if (lopt->help != NULL)
25120 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
25121
25122 #ifdef OPTION_EB
25123 fprintf (fp, _("\
25124 -EB assemble code for a big-endian cpu\n"));
25125 #endif
25126
25127 #ifdef OPTION_EL
25128 fprintf (fp, _("\
25129 -EL assemble code for a little-endian cpu\n"));
25130 #endif
25131
25132 fprintf (fp, _("\
25133 --fix-v4bx Allow BX in ARMv4 code\n"));
25134 }
25135
25136
25137 #ifdef OBJ_ELF
25138 typedef struct
25139 {
25140 int val;
25141 arm_feature_set flags;
25142 } cpu_arch_ver_table;
25143
25144 /* Mapping from CPU features to EABI CPU arch values. Table must be sorted
25145 least features first. */
25146 static const cpu_arch_ver_table cpu_arch_ver[] =
25147 {
25148 {1, ARM_ARCH_V4},
25149 {2, ARM_ARCH_V4T},
25150 {3, ARM_ARCH_V5},
25151 {3, ARM_ARCH_V5T},
25152 {4, ARM_ARCH_V5TE},
25153 {5, ARM_ARCH_V5TEJ},
25154 {6, ARM_ARCH_V6},
25155 {9, ARM_ARCH_V6K},
25156 {7, ARM_ARCH_V6Z},
25157 {11, ARM_ARCH_V6M},
25158 {12, ARM_ARCH_V6SM},
25159 {8, ARM_ARCH_V6T2},
25160 {10, ARM_ARCH_V7VE},
25161 {10, ARM_ARCH_V7R},
25162 {10, ARM_ARCH_V7M},
25163 {14, ARM_ARCH_V8A},
25164 {0, ARM_ARCH_NONE}
25165 };
25166
25167 /* Set an attribute if it has not already been set by the user. */
25168 static void
25169 aeabi_set_attribute_int (int tag, int value)
25170 {
25171 if (tag < 1
25172 || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
25173 || !attributes_set_explicitly[tag])
25174 bfd_elf_add_proc_attr_int (stdoutput, tag, value);
25175 }
25176
25177 static void
25178 aeabi_set_attribute_string (int tag, const char *value)
25179 {
25180 if (tag < 1
25181 || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
25182 || !attributes_set_explicitly[tag])
25183 bfd_elf_add_proc_attr_string (stdoutput, tag, value);
25184 }
25185
25186 /* Set the public EABI object attributes. */
25187 void
25188 aeabi_set_public_attributes (void)
25189 {
25190 int arch;
25191 char profile;
25192 int virt_sec = 0;
25193 int fp16_optional = 0;
25194 arm_feature_set flags;
25195 arm_feature_set tmp;
25196 const cpu_arch_ver_table *p;
25197
25198 /* Choose the architecture based on the capabilities of the requested cpu
25199 (if any) and/or the instructions actually used. */
25200 ARM_MERGE_FEATURE_SETS (flags, arm_arch_used, thumb_arch_used);
25201 ARM_MERGE_FEATURE_SETS (flags, flags, *mfpu_opt);
25202 ARM_MERGE_FEATURE_SETS (flags, flags, selected_cpu);
25203
25204 if (ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_any))
25205 ARM_MERGE_FEATURE_SETS (flags, flags, arm_ext_v1);
25206
25207 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_any))
25208 ARM_MERGE_FEATURE_SETS (flags, flags, arm_ext_v4t);
25209
25210 selected_cpu = flags;
25211
25212 /* Allow the user to override the reported architecture. */
25213 if (object_arch)
25214 {
25215 ARM_CLEAR_FEATURE (flags, flags, arm_arch_any);
25216 ARM_MERGE_FEATURE_SETS (flags, flags, *object_arch);
25217 }
25218
25219 /* We need to make sure that the attributes do not identify us as v6S-M
25220 when the only v6S-M feature in use is the Operating System Extensions. */
25221 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_os))
25222 if (!ARM_CPU_HAS_FEATURE (flags, arm_arch_v6m_only))
25223 ARM_CLEAR_FEATURE (flags, flags, arm_ext_os);
25224
25225 tmp = flags;
25226 arch = 0;
25227 for (p = cpu_arch_ver; p->val; p++)
25228 {
25229 if (ARM_CPU_HAS_FEATURE (tmp, p->flags))
25230 {
25231 arch = p->val;
25232 ARM_CLEAR_FEATURE (tmp, tmp, p->flags);
25233 }
25234 }
25235
25236 /* The table lookup above finds the last architecture to contribute
25237 a new feature. Unfortunately, Tag13 is a subset of the union of
25238 v6T2 and v7-M, so it is never seen as contributing a new feature.
25239 We can not search for the last entry which is entirely used,
25240 because if no CPU is specified we build up only those flags
25241 actually used. Perhaps we should separate out the specified
25242 and implicit cases. Avoid taking this path for -march=all by
25243 checking for contradictory v7-A / v7-M features. */
25244 if (arch == 10
25245 && !ARM_CPU_HAS_FEATURE (flags, arm_ext_v7a)
25246 && ARM_CPU_HAS_FEATURE (flags, arm_ext_v7m)
25247 && ARM_CPU_HAS_FEATURE (flags, arm_ext_v6_dsp))
25248 arch = 13;
25249
25250 /* Tag_CPU_name. */
25251 if (selected_cpu_name[0])
25252 {
25253 char *q;
25254
25255 q = selected_cpu_name;
25256 if (strncmp (q, "armv", 4) == 0)
25257 {
25258 int i;
25259
25260 q += 4;
25261 for (i = 0; q[i]; i++)
25262 q[i] = TOUPPER (q[i]);
25263 }
25264 aeabi_set_attribute_string (Tag_CPU_name, q);
25265 }
25266
25267 /* Tag_CPU_arch. */
25268 aeabi_set_attribute_int (Tag_CPU_arch, arch);
25269
25270 /* Tag_CPU_arch_profile. */
25271 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7a))
25272 profile = 'A';
25273 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7r))
25274 profile = 'R';
25275 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_m))
25276 profile = 'M';
25277 else
25278 profile = '\0';
25279
25280 if (profile != '\0')
25281 aeabi_set_attribute_int (Tag_CPU_arch_profile, profile);
25282
25283 /* Tag_ARM_ISA_use. */
25284 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v1)
25285 || arch == 0)
25286 aeabi_set_attribute_int (Tag_ARM_ISA_use, 1);
25287
25288 /* Tag_THUMB_ISA_use. */
25289 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v4t)
25290 || arch == 0)
25291 aeabi_set_attribute_int (Tag_THUMB_ISA_use,
25292 ARM_CPU_HAS_FEATURE (flags, arm_arch_t2) ? 2 : 1);
25293
25294 /* Tag_VFP_arch. */
25295 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_armv8xd))
25296 aeabi_set_attribute_int (Tag_VFP_arch,
25297 ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32)
25298 ? 7 : 8);
25299 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_fma))
25300 aeabi_set_attribute_int (Tag_VFP_arch,
25301 ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32)
25302 ? 5 : 6);
25303 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32))
25304 {
25305 fp16_optional = 1;
25306 aeabi_set_attribute_int (Tag_VFP_arch, 3);
25307 }
25308 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v3xd))
25309 {
25310 aeabi_set_attribute_int (Tag_VFP_arch, 4);
25311 fp16_optional = 1;
25312 }
25313 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v2))
25314 aeabi_set_attribute_int (Tag_VFP_arch, 2);
25315 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1)
25316 || ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd))
25317 aeabi_set_attribute_int (Tag_VFP_arch, 1);
25318
25319 /* Tag_ABI_HardFP_use. */
25320 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd)
25321 && !ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1))
25322 aeabi_set_attribute_int (Tag_ABI_HardFP_use, 1);
25323
25324 /* Tag_WMMX_arch. */
25325 if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt2))
25326 aeabi_set_attribute_int (Tag_WMMX_arch, 2);
25327 else if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt))
25328 aeabi_set_attribute_int (Tag_WMMX_arch, 1);
25329
25330 /* Tag_Advanced_SIMD_arch (formerly Tag_NEON_arch). */
25331 if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_armv8))
25332 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 3);
25333 else if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_v1))
25334 {
25335 if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_fma))
25336 {
25337 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 2);
25338 }
25339 else
25340 {
25341 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 1);
25342 fp16_optional = 1;
25343 }
25344 }
25345
25346 /* Tag_VFP_HP_extension (formerly Tag_NEON_FP16_arch). */
25347 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_fp16) && fp16_optional)
25348 aeabi_set_attribute_int (Tag_VFP_HP_extension, 1);
25349
25350 /* Tag_DIV_use.
25351
25352 We set Tag_DIV_use to two when integer divide instructions have been used
25353 in ARM state, or when Thumb integer divide instructions have been used,
25354 but we have no architecture profile set, nor have we any ARM instructions.
25355
25356 For ARMv8 we set the tag to 0 as integer divide is implied by the base
25357 architecture.
25358
25359 For new architectures we will have to check these tests. */
25360 gas_assert (arch <= TAG_CPU_ARCH_V8);
25361 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v8))
25362 aeabi_set_attribute_int (Tag_DIV_use, 0);
25363 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_adiv)
25364 || (profile == '\0'
25365 && ARM_CPU_HAS_FEATURE (flags, arm_ext_div)
25366 && !ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_any)))
25367 aeabi_set_attribute_int (Tag_DIV_use, 2);
25368
25369 /* Tag_MP_extension_use. */
25370 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_mp))
25371 aeabi_set_attribute_int (Tag_MPextension_use, 1);
25372
25373 /* Tag Virtualization_use. */
25374 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_sec))
25375 virt_sec |= 1;
25376 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_virt))
25377 virt_sec |= 2;
25378 if (virt_sec != 0)
25379 aeabi_set_attribute_int (Tag_Virtualization_use, virt_sec);
25380 }
25381
25382 /* Add the default contents for the .ARM.attributes section. */
25383 void
25384 arm_md_end (void)
25385 {
25386 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
25387 return;
25388
25389 aeabi_set_public_attributes ();
25390 }
25391 #endif /* OBJ_ELF */
25392
25393
25394 /* Parse a .cpu directive. */
25395
25396 static void
25397 s_arm_cpu (int ignored ATTRIBUTE_UNUSED)
25398 {
25399 const struct arm_cpu_option_table *opt;
25400 char *name;
25401 char saved_char;
25402
25403 name = input_line_pointer;
25404 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
25405 input_line_pointer++;
25406 saved_char = *input_line_pointer;
25407 *input_line_pointer = 0;
25408
25409 /* Skip the first "all" entry. */
25410 for (opt = arm_cpus + 1; opt->name != NULL; opt++)
25411 if (streq (opt->name, name))
25412 {
25413 mcpu_cpu_opt = &opt->value;
25414 selected_cpu = opt->value;
25415 if (opt->canonical_name)
25416 strcpy (selected_cpu_name, opt->canonical_name);
25417 else
25418 {
25419 int i;
25420 for (i = 0; opt->name[i]; i++)
25421 selected_cpu_name[i] = TOUPPER (opt->name[i]);
25422
25423 selected_cpu_name[i] = 0;
25424 }
25425 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
25426 *input_line_pointer = saved_char;
25427 demand_empty_rest_of_line ();
25428 return;
25429 }
25430 as_bad (_("unknown cpu `%s'"), name);
25431 *input_line_pointer = saved_char;
25432 ignore_rest_of_line ();
25433 }
25434
25435
25436 /* Parse a .arch directive. */
25437
25438 static void
25439 s_arm_arch (int ignored ATTRIBUTE_UNUSED)
25440 {
25441 const struct arm_arch_option_table *opt;
25442 char saved_char;
25443 char *name;
25444
25445 name = input_line_pointer;
25446 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
25447 input_line_pointer++;
25448 saved_char = *input_line_pointer;
25449 *input_line_pointer = 0;
25450
25451 /* Skip the first "all" entry. */
25452 for (opt = arm_archs + 1; opt->name != NULL; opt++)
25453 if (streq (opt->name, name))
25454 {
25455 mcpu_cpu_opt = &opt->value;
25456 selected_cpu = opt->value;
25457 strcpy (selected_cpu_name, opt->name);
25458 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
25459 *input_line_pointer = saved_char;
25460 demand_empty_rest_of_line ();
25461 return;
25462 }
25463
25464 as_bad (_("unknown architecture `%s'\n"), name);
25465 *input_line_pointer = saved_char;
25466 ignore_rest_of_line ();
25467 }
25468
25469
25470 /* Parse a .object_arch directive. */
25471
25472 static void
25473 s_arm_object_arch (int ignored ATTRIBUTE_UNUSED)
25474 {
25475 const struct arm_arch_option_table *opt;
25476 char saved_char;
25477 char *name;
25478
25479 name = input_line_pointer;
25480 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
25481 input_line_pointer++;
25482 saved_char = *input_line_pointer;
25483 *input_line_pointer = 0;
25484
25485 /* Skip the first "all" entry. */
25486 for (opt = arm_archs + 1; opt->name != NULL; opt++)
25487 if (streq (opt->name, name))
25488 {
25489 object_arch = &opt->value;
25490 *input_line_pointer = saved_char;
25491 demand_empty_rest_of_line ();
25492 return;
25493 }
25494
25495 as_bad (_("unknown architecture `%s'\n"), name);
25496 *input_line_pointer = saved_char;
25497 ignore_rest_of_line ();
25498 }
25499
25500 /* Parse a .arch_extension directive. */
25501
25502 static void
25503 s_arm_arch_extension (int ignored ATTRIBUTE_UNUSED)
25504 {
25505 const struct arm_option_extension_value_table *opt;
25506 char saved_char;
25507 char *name;
25508 int adding_value = 1;
25509
25510 name = input_line_pointer;
25511 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
25512 input_line_pointer++;
25513 saved_char = *input_line_pointer;
25514 *input_line_pointer = 0;
25515
25516 if (strlen (name) >= 2
25517 && strncmp (name, "no", 2) == 0)
25518 {
25519 adding_value = 0;
25520 name += 2;
25521 }
25522
25523 for (opt = arm_extensions; opt->name != NULL; opt++)
25524 if (streq (opt->name, name))
25525 {
25526 if (!ARM_CPU_HAS_FEATURE (*mcpu_cpu_opt, opt->allowed_archs))
25527 {
25528 as_bad (_("architectural extension `%s' is not allowed for the "
25529 "current base architecture"), name);
25530 break;
25531 }
25532
25533 if (adding_value)
25534 ARM_MERGE_FEATURE_SETS (selected_cpu, selected_cpu,
25535 opt->merge_value);
25536 else
25537 ARM_CLEAR_FEATURE (selected_cpu, selected_cpu, opt->clear_value);
25538
25539 mcpu_cpu_opt = &selected_cpu;
25540 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
25541 *input_line_pointer = saved_char;
25542 demand_empty_rest_of_line ();
25543 return;
25544 }
25545
25546 if (opt->name == NULL)
25547 as_bad (_("unknown architecture extension `%s'\n"), name);
25548
25549 *input_line_pointer = saved_char;
25550 ignore_rest_of_line ();
25551 }
25552
25553 /* Parse a .fpu directive. */
25554
25555 static void
25556 s_arm_fpu (int ignored ATTRIBUTE_UNUSED)
25557 {
25558 const struct arm_option_fpu_value_table *opt;
25559 char saved_char;
25560 char *name;
25561
25562 name = input_line_pointer;
25563 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
25564 input_line_pointer++;
25565 saved_char = *input_line_pointer;
25566 *input_line_pointer = 0;
25567
25568 for (opt = arm_fpus; opt->name != NULL; opt++)
25569 if (streq (opt->name, name))
25570 {
25571 mfpu_opt = &opt->value;
25572 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
25573 *input_line_pointer = saved_char;
25574 demand_empty_rest_of_line ();
25575 return;
25576 }
25577
25578 as_bad (_("unknown floating point format `%s'\n"), name);
25579 *input_line_pointer = saved_char;
25580 ignore_rest_of_line ();
25581 }
25582
25583 /* Copy symbol information. */
25584
25585 void
25586 arm_copy_symbol_attributes (symbolS *dest, symbolS *src)
25587 {
25588 ARM_GET_FLAG (dest) = ARM_GET_FLAG (src);
25589 }
25590
25591 #ifdef OBJ_ELF
25592 /* Given a symbolic attribute NAME, return the proper integer value.
25593 Returns -1 if the attribute is not known. */
25594
25595 int
25596 arm_convert_symbolic_attribute (const char *name)
25597 {
25598 static const struct
25599 {
25600 const char * name;
25601 const int tag;
25602 }
25603 attribute_table[] =
25604 {
25605 /* When you modify this table you should
25606 also modify the list in doc/c-arm.texi. */
25607 #define T(tag) {#tag, tag}
25608 T (Tag_CPU_raw_name),
25609 T (Tag_CPU_name),
25610 T (Tag_CPU_arch),
25611 T (Tag_CPU_arch_profile),
25612 T (Tag_ARM_ISA_use),
25613 T (Tag_THUMB_ISA_use),
25614 T (Tag_FP_arch),
25615 T (Tag_VFP_arch),
25616 T (Tag_WMMX_arch),
25617 T (Tag_Advanced_SIMD_arch),
25618 T (Tag_PCS_config),
25619 T (Tag_ABI_PCS_R9_use),
25620 T (Tag_ABI_PCS_RW_data),
25621 T (Tag_ABI_PCS_RO_data),
25622 T (Tag_ABI_PCS_GOT_use),
25623 T (Tag_ABI_PCS_wchar_t),
25624 T (Tag_ABI_FP_rounding),
25625 T (Tag_ABI_FP_denormal),
25626 T (Tag_ABI_FP_exceptions),
25627 T (Tag_ABI_FP_user_exceptions),
25628 T (Tag_ABI_FP_number_model),
25629 T (Tag_ABI_align_needed),
25630 T (Tag_ABI_align8_needed),
25631 T (Tag_ABI_align_preserved),
25632 T (Tag_ABI_align8_preserved),
25633 T (Tag_ABI_enum_size),
25634 T (Tag_ABI_HardFP_use),
25635 T (Tag_ABI_VFP_args),
25636 T (Tag_ABI_WMMX_args),
25637 T (Tag_ABI_optimization_goals),
25638 T (Tag_ABI_FP_optimization_goals),
25639 T (Tag_compatibility),
25640 T (Tag_CPU_unaligned_access),
25641 T (Tag_FP_HP_extension),
25642 T (Tag_VFP_HP_extension),
25643 T (Tag_ABI_FP_16bit_format),
25644 T (Tag_MPextension_use),
25645 T (Tag_DIV_use),
25646 T (Tag_nodefaults),
25647 T (Tag_also_compatible_with),
25648 T (Tag_conformance),
25649 T (Tag_T2EE_use),
25650 T (Tag_Virtualization_use),
25651 /* We deliberately do not include Tag_MPextension_use_legacy. */
25652 #undef T
25653 };
25654 unsigned int i;
25655
25656 if (name == NULL)
25657 return -1;
25658
25659 for (i = 0; i < ARRAY_SIZE (attribute_table); i++)
25660 if (streq (name, attribute_table[i].name))
25661 return attribute_table[i].tag;
25662
25663 return -1;
25664 }
25665
25666
25667 /* Apply sym value for relocations only in the case that they are for
25668 local symbols in the same segment as the fixup and you have the
25669 respective architectural feature for blx and simple switches. */
25670 int
25671 arm_apply_sym_value (struct fix * fixP, segT this_seg)
25672 {
25673 if (fixP->fx_addsy
25674 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
25675 /* PR 17444: If the local symbol is in a different section then a reloc
25676 will always be generated for it, so applying the symbol value now
25677 will result in a double offset being stored in the relocation. */
25678 && (S_GET_SEGMENT (fixP->fx_addsy) == this_seg)
25679 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE))
25680 {
25681 switch (fixP->fx_r_type)
25682 {
25683 case BFD_RELOC_ARM_PCREL_BLX:
25684 case BFD_RELOC_THUMB_PCREL_BRANCH23:
25685 if (ARM_IS_FUNC (fixP->fx_addsy))
25686 return 1;
25687 break;
25688
25689 case BFD_RELOC_ARM_PCREL_CALL:
25690 case BFD_RELOC_THUMB_PCREL_BLX:
25691 if (THUMB_IS_FUNC (fixP->fx_addsy))
25692 return 1;
25693 break;
25694
25695 default:
25696 break;
25697 }
25698
25699 }
25700 return 0;
25701 }
25702 #endif /* OBJ_ELF */