* config/tc-arm.c (md_apply_fix): do not clear write_back bit
[binutils-gdb.git] / gas / config / tc-arm.c
1 /* tc-arm.c -- Assemble for the ARM
2 Copyright 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003,
3 2004, 2005, 2006
4 Free Software Foundation, Inc.
5 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
6 Modified by David Taylor (dtaylor@armltd.co.uk)
7 Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
8 Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
9 Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
10
11 This file is part of GAS, the GNU Assembler.
12
13 GAS is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2, or (at your option)
16 any later version.
17
18 GAS is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with GAS; see the file COPYING. If not, write to the Free
25 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
26 02110-1301, USA. */
27
28 #include <limits.h>
29 #include <stdarg.h>
30 #define NO_RELOC 0
31 #include "as.h"
32 #include "safe-ctype.h"
33 #include "subsegs.h"
34 #include "obstack.h"
35
36 #include "opcode/arm.h"
37
38 #ifdef OBJ_ELF
39 #include "elf/arm.h"
40 #include "dw2gencfi.h"
41 #endif
42
43 #include "dwarf2dbg.h"
44
45 #define WARN_DEPRECATED 1
46
47 #ifdef OBJ_ELF
48 /* Must be at least the size of the largest unwind opcode (currently two). */
49 #define ARM_OPCODE_CHUNK_SIZE 8
50
51 /* This structure holds the unwinding state. */
52
53 static struct
54 {
55 symbolS * proc_start;
56 symbolS * table_entry;
57 symbolS * personality_routine;
58 int personality_index;
59 /* The segment containing the function. */
60 segT saved_seg;
61 subsegT saved_subseg;
62 /* Opcodes generated from this function. */
63 unsigned char * opcodes;
64 int opcode_count;
65 int opcode_alloc;
66 /* The number of bytes pushed to the stack. */
67 offsetT frame_size;
68 /* We don't add stack adjustment opcodes immediately so that we can merge
69 multiple adjustments. We can also omit the final adjustment
70 when using a frame pointer. */
71 offsetT pending_offset;
72 /* These two fields are set by both unwind_movsp and unwind_setfp. They
73 hold the reg+offset to use when restoring sp from a frame pointer. */
74 offsetT fp_offset;
75 int fp_reg;
76 /* Nonzero if an unwind_setfp directive has been seen. */
77 unsigned fp_used:1;
78 /* Nonzero if the last opcode restores sp from fp_reg. */
79 unsigned sp_restored:1;
80 } unwind;
81
82 /* Bit N indicates that an R_ARM_NONE relocation has been output for
83 __aeabi_unwind_cpp_prN already if set. This enables dependencies to be
84 emitted only once per section, to save unnecessary bloat. */
85 static unsigned int marked_pr_dependency = 0;
86
87 #endif /* OBJ_ELF */
88
89 /* Results from operand parsing worker functions. */
90
91 typedef enum
92 {
93 PARSE_OPERAND_SUCCESS,
94 PARSE_OPERAND_FAIL,
95 PARSE_OPERAND_FAIL_NO_BACKTRACK
96 } parse_operand_result;
97
98 enum arm_float_abi
99 {
100 ARM_FLOAT_ABI_HARD,
101 ARM_FLOAT_ABI_SOFTFP,
102 ARM_FLOAT_ABI_SOFT
103 };
104
105 /* Types of processor to assemble for. */
106 #ifndef CPU_DEFAULT
107 #if defined __XSCALE__
108 #define CPU_DEFAULT ARM_ARCH_XSCALE
109 #else
110 #if defined __thumb__
111 #define CPU_DEFAULT ARM_ARCH_V5T
112 #endif
113 #endif
114 #endif
115
116 #ifndef FPU_DEFAULT
117 # ifdef TE_LINUX
118 # define FPU_DEFAULT FPU_ARCH_FPA
119 # elif defined (TE_NetBSD)
120 # ifdef OBJ_ELF
121 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */
122 # else
123 /* Legacy a.out format. */
124 # define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */
125 # endif
126 # elif defined (TE_VXWORKS)
127 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */
128 # else
129 /* For backwards compatibility, default to FPA. */
130 # define FPU_DEFAULT FPU_ARCH_FPA
131 # endif
132 #endif /* ifndef FPU_DEFAULT */
133
134 #define streq(a, b) (strcmp (a, b) == 0)
135
136 static arm_feature_set cpu_variant;
137 static arm_feature_set arm_arch_used;
138 static arm_feature_set thumb_arch_used;
139
140 /* Flags stored in private area of BFD structure. */
141 static int uses_apcs_26 = FALSE;
142 static int atpcs = FALSE;
143 static int support_interwork = FALSE;
144 static int uses_apcs_float = FALSE;
145 static int pic_code = FALSE;
146
147 /* Variables that we set while parsing command-line options. Once all
148 options have been read we re-process these values to set the real
149 assembly flags. */
150 static const arm_feature_set *legacy_cpu = NULL;
151 static const arm_feature_set *legacy_fpu = NULL;
152
153 static const arm_feature_set *mcpu_cpu_opt = NULL;
154 static const arm_feature_set *mcpu_fpu_opt = NULL;
155 static const arm_feature_set *march_cpu_opt = NULL;
156 static const arm_feature_set *march_fpu_opt = NULL;
157 static const arm_feature_set *mfpu_opt = NULL;
158
159 /* Constants for known architecture features. */
160 static const arm_feature_set fpu_default = FPU_DEFAULT;
161 static const arm_feature_set fpu_arch_vfp_v1 = FPU_ARCH_VFP_V1;
162 static const arm_feature_set fpu_arch_vfp_v2 = FPU_ARCH_VFP_V2;
163 static const arm_feature_set fpu_arch_vfp_v3 = FPU_ARCH_VFP_V3;
164 static const arm_feature_set fpu_arch_neon_v1 = FPU_ARCH_NEON_V1;
165 static const arm_feature_set fpu_arch_fpa = FPU_ARCH_FPA;
166 static const arm_feature_set fpu_any_hard = FPU_ANY_HARD;
167 static const arm_feature_set fpu_arch_maverick = FPU_ARCH_MAVERICK;
168 static const arm_feature_set fpu_endian_pure = FPU_ARCH_ENDIAN_PURE;
169
170 #ifdef CPU_DEFAULT
171 static const arm_feature_set cpu_default = CPU_DEFAULT;
172 #endif
173
174 static const arm_feature_set arm_ext_v1 = ARM_FEATURE (ARM_EXT_V1, 0);
175 static const arm_feature_set arm_ext_v2 = ARM_FEATURE (ARM_EXT_V1, 0);
176 static const arm_feature_set arm_ext_v2s = ARM_FEATURE (ARM_EXT_V2S, 0);
177 static const arm_feature_set arm_ext_v3 = ARM_FEATURE (ARM_EXT_V3, 0);
178 static const arm_feature_set arm_ext_v3m = ARM_FEATURE (ARM_EXT_V3M, 0);
179 static const arm_feature_set arm_ext_v4 = ARM_FEATURE (ARM_EXT_V4, 0);
180 static const arm_feature_set arm_ext_v4t = ARM_FEATURE (ARM_EXT_V4T, 0);
181 static const arm_feature_set arm_ext_v5 = ARM_FEATURE (ARM_EXT_V5, 0);
182 static const arm_feature_set arm_ext_v4t_5 =
183 ARM_FEATURE (ARM_EXT_V4T | ARM_EXT_V5, 0);
184 static const arm_feature_set arm_ext_v5t = ARM_FEATURE (ARM_EXT_V5T, 0);
185 static const arm_feature_set arm_ext_v5e = ARM_FEATURE (ARM_EXT_V5E, 0);
186 static const arm_feature_set arm_ext_v5exp = ARM_FEATURE (ARM_EXT_V5ExP, 0);
187 static const arm_feature_set arm_ext_v5j = ARM_FEATURE (ARM_EXT_V5J, 0);
188 static const arm_feature_set arm_ext_v6 = ARM_FEATURE (ARM_EXT_V6, 0);
189 static const arm_feature_set arm_ext_v6k = ARM_FEATURE (ARM_EXT_V6K, 0);
190 static const arm_feature_set arm_ext_v6z = ARM_FEATURE (ARM_EXT_V6Z, 0);
191 static const arm_feature_set arm_ext_v6t2 = ARM_FEATURE (ARM_EXT_V6T2, 0);
192 static const arm_feature_set arm_ext_v6_notm = ARM_FEATURE (ARM_EXT_V6_NOTM, 0);
193 static const arm_feature_set arm_ext_div = ARM_FEATURE (ARM_EXT_DIV, 0);
194 static const arm_feature_set arm_ext_v7 = ARM_FEATURE (ARM_EXT_V7, 0);
195 static const arm_feature_set arm_ext_v7a = ARM_FEATURE (ARM_EXT_V7A, 0);
196 static const arm_feature_set arm_ext_v7r = ARM_FEATURE (ARM_EXT_V7R, 0);
197 static const arm_feature_set arm_ext_v7m = ARM_FEATURE (ARM_EXT_V7M, 0);
198
199 static const arm_feature_set arm_arch_any = ARM_ANY;
200 static const arm_feature_set arm_arch_full = ARM_FEATURE (-1, -1);
201 static const arm_feature_set arm_arch_t2 = ARM_ARCH_THUMB2;
202 static const arm_feature_set arm_arch_none = ARM_ARCH_NONE;
203
204 static const arm_feature_set arm_cext_iwmmxt2 =
205 ARM_FEATURE (0, ARM_CEXT_IWMMXT2);
206 static const arm_feature_set arm_cext_iwmmxt =
207 ARM_FEATURE (0, ARM_CEXT_IWMMXT);
208 static const arm_feature_set arm_cext_xscale =
209 ARM_FEATURE (0, ARM_CEXT_XSCALE);
210 static const arm_feature_set arm_cext_maverick =
211 ARM_FEATURE (0, ARM_CEXT_MAVERICK);
212 static const arm_feature_set fpu_fpa_ext_v1 = ARM_FEATURE (0, FPU_FPA_EXT_V1);
213 static const arm_feature_set fpu_fpa_ext_v2 = ARM_FEATURE (0, FPU_FPA_EXT_V2);
214 static const arm_feature_set fpu_vfp_ext_v1xd =
215 ARM_FEATURE (0, FPU_VFP_EXT_V1xD);
216 static const arm_feature_set fpu_vfp_ext_v1 = ARM_FEATURE (0, FPU_VFP_EXT_V1);
217 static const arm_feature_set fpu_vfp_ext_v2 = ARM_FEATURE (0, FPU_VFP_EXT_V2);
218 static const arm_feature_set fpu_vfp_ext_v3 = ARM_FEATURE (0, FPU_VFP_EXT_V3);
219 static const arm_feature_set fpu_neon_ext_v1 = ARM_FEATURE (0, FPU_NEON_EXT_V1);
220 static const arm_feature_set fpu_vfp_v3_or_neon_ext =
221 ARM_FEATURE (0, FPU_NEON_EXT_V1 | FPU_VFP_EXT_V3);
222
223 static int mfloat_abi_opt = -1;
224 /* Record user cpu selection for object attributes. */
225 static arm_feature_set selected_cpu = ARM_ARCH_NONE;
226 /* Must be long enough to hold any of the names in arm_cpus. */
227 static char selected_cpu_name[16];
228 #ifdef OBJ_ELF
229 # ifdef EABI_DEFAULT
230 static int meabi_flags = EABI_DEFAULT;
231 # else
232 static int meabi_flags = EF_ARM_EABI_UNKNOWN;
233 # endif
234 #endif
235
236 #ifdef OBJ_ELF
237 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
238 symbolS * GOT_symbol;
239 #endif
240
241 /* 0: assemble for ARM,
242 1: assemble for Thumb,
243 2: assemble for Thumb even though target CPU does not support thumb
244 instructions. */
245 static int thumb_mode = 0;
246
247 /* If unified_syntax is true, we are processing the new unified
248 ARM/Thumb syntax. Important differences from the old ARM mode:
249
250 - Immediate operands do not require a # prefix.
251 - Conditional affixes always appear at the end of the
252 instruction. (For backward compatibility, those instructions
253 that formerly had them in the middle, continue to accept them
254 there.)
255 - The IT instruction may appear, and if it does is validated
256 against subsequent conditional affixes. It does not generate
257 machine code.
258
259 Important differences from the old Thumb mode:
260
261 - Immediate operands do not require a # prefix.
262 - Most of the V6T2 instructions are only available in unified mode.
263 - The .N and .W suffixes are recognized and honored (it is an error
264 if they cannot be honored).
265 - All instructions set the flags if and only if they have an 's' affix.
266 - Conditional affixes may be used. They are validated against
267 preceding IT instructions. Unlike ARM mode, you cannot use a
268 conditional affix except in the scope of an IT instruction. */
269
270 static bfd_boolean unified_syntax = FALSE;
271
272 enum neon_el_type
273 {
274 NT_invtype,
275 NT_untyped,
276 NT_integer,
277 NT_float,
278 NT_poly,
279 NT_signed,
280 NT_unsigned
281 };
282
283 struct neon_type_el
284 {
285 enum neon_el_type type;
286 unsigned size;
287 };
288
289 #define NEON_MAX_TYPE_ELS 4
290
291 struct neon_type
292 {
293 struct neon_type_el el[NEON_MAX_TYPE_ELS];
294 unsigned elems;
295 };
296
297 struct arm_it
298 {
299 const char * error;
300 unsigned long instruction;
301 int size;
302 int size_req;
303 int cond;
304 /* "uncond_value" is set to the value in place of the conditional field in
305 unconditional versions of the instruction, or -1 if nothing is
306 appropriate. */
307 int uncond_value;
308 struct neon_type vectype;
309 /* Set to the opcode if the instruction needs relaxation.
310 Zero if the instruction is not relaxed. */
311 unsigned long relax;
312 struct
313 {
314 bfd_reloc_code_real_type type;
315 expressionS exp;
316 int pc_rel;
317 } reloc;
318
319 struct
320 {
321 unsigned reg;
322 signed int imm;
323 struct neon_type_el vectype;
324 unsigned present : 1; /* Operand present. */
325 unsigned isreg : 1; /* Operand was a register. */
326 unsigned immisreg : 1; /* .imm field is a second register. */
327 unsigned isscalar : 1; /* Operand is a (Neon) scalar. */
328 unsigned immisalign : 1; /* Immediate is an alignment specifier. */
329 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
330 instructions. This allows us to disambiguate ARM <-> vector insns. */
331 unsigned regisimm : 1; /* 64-bit immediate, reg forms high 32 bits. */
332 unsigned isvec : 1; /* Is a single, double or quad VFP/Neon reg. */
333 unsigned isquad : 1; /* Operand is Neon quad-precision register. */
334 unsigned issingle : 1; /* Operand is VFP single-precision register. */
335 unsigned hasreloc : 1; /* Operand has relocation suffix. */
336 unsigned writeback : 1; /* Operand has trailing ! */
337 unsigned preind : 1; /* Preindexed address. */
338 unsigned postind : 1; /* Postindexed address. */
339 unsigned negative : 1; /* Index register was negated. */
340 unsigned shifted : 1; /* Shift applied to operation. */
341 unsigned shift_kind : 3; /* Shift operation (enum shift_kind). */
342 } operands[6];
343 };
344
345 static struct arm_it inst;
346
347 #define NUM_FLOAT_VALS 8
348
349 const char * fp_const[] =
350 {
351 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
352 };
353
354 /* Number of littlenums required to hold an extended precision number. */
355 #define MAX_LITTLENUMS 6
356
357 LITTLENUM_TYPE fp_values[NUM_FLOAT_VALS][MAX_LITTLENUMS];
358
359 #define FAIL (-1)
360 #define SUCCESS (0)
361
362 #define SUFF_S 1
363 #define SUFF_D 2
364 #define SUFF_E 3
365 #define SUFF_P 4
366
367 #define CP_T_X 0x00008000
368 #define CP_T_Y 0x00400000
369
370 #define CONDS_BIT 0x00100000
371 #define LOAD_BIT 0x00100000
372
373 #define DOUBLE_LOAD_FLAG 0x00000001
374
375 struct asm_cond
376 {
377 const char * template;
378 unsigned long value;
379 };
380
381 #define COND_ALWAYS 0xE
382
383 struct asm_psr
384 {
385 const char *template;
386 unsigned long field;
387 };
388
389 struct asm_barrier_opt
390 {
391 const char *template;
392 unsigned long value;
393 };
394
395 /* The bit that distinguishes CPSR and SPSR. */
396 #define SPSR_BIT (1 << 22)
397
398 /* The individual PSR flag bits. */
399 #define PSR_c (1 << 16)
400 #define PSR_x (1 << 17)
401 #define PSR_s (1 << 18)
402 #define PSR_f (1 << 19)
403
404 struct reloc_entry
405 {
406 char *name;
407 bfd_reloc_code_real_type reloc;
408 };
409
410 enum vfp_reg_pos
411 {
412 VFP_REG_Sd, VFP_REG_Sm, VFP_REG_Sn,
413 VFP_REG_Dd, VFP_REG_Dm, VFP_REG_Dn
414 };
415
416 enum vfp_ldstm_type
417 {
418 VFP_LDSTMIA, VFP_LDSTMDB, VFP_LDSTMIAX, VFP_LDSTMDBX
419 };
420
421 /* Bits for DEFINED field in neon_typed_alias. */
422 #define NTA_HASTYPE 1
423 #define NTA_HASINDEX 2
424
425 struct neon_typed_alias
426 {
427 unsigned char defined;
428 unsigned char index;
429 struct neon_type_el eltype;
430 };
431
432 /* ARM register categories. This includes coprocessor numbers and various
433 architecture extensions' registers. */
434 enum arm_reg_type
435 {
436 REG_TYPE_RN,
437 REG_TYPE_CP,
438 REG_TYPE_CN,
439 REG_TYPE_FN,
440 REG_TYPE_VFS,
441 REG_TYPE_VFD,
442 REG_TYPE_NQ,
443 REG_TYPE_VFSD,
444 REG_TYPE_NDQ,
445 REG_TYPE_NSDQ,
446 REG_TYPE_VFC,
447 REG_TYPE_MVF,
448 REG_TYPE_MVD,
449 REG_TYPE_MVFX,
450 REG_TYPE_MVDX,
451 REG_TYPE_MVAX,
452 REG_TYPE_DSPSC,
453 REG_TYPE_MMXWR,
454 REG_TYPE_MMXWC,
455 REG_TYPE_MMXWCG,
456 REG_TYPE_XSCALE,
457 };
458
459 /* Structure for a hash table entry for a register.
460 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
461 information which states whether a vector type or index is specified (for a
462 register alias created with .dn or .qn). Otherwise NEON should be NULL. */
463 struct reg_entry
464 {
465 const char *name;
466 unsigned char number;
467 unsigned char type;
468 unsigned char builtin;
469 struct neon_typed_alias *neon;
470 };
471
472 /* Diagnostics used when we don't get a register of the expected type. */
473 const char *const reg_expected_msgs[] =
474 {
475 N_("ARM register expected"),
476 N_("bad or missing co-processor number"),
477 N_("co-processor register expected"),
478 N_("FPA register expected"),
479 N_("VFP single precision register expected"),
480 N_("VFP/Neon double precision register expected"),
481 N_("Neon quad precision register expected"),
482 N_("VFP single or double precision register expected"),
483 N_("Neon double or quad precision register expected"),
484 N_("VFP single, double or Neon quad precision register expected"),
485 N_("VFP system register expected"),
486 N_("Maverick MVF register expected"),
487 N_("Maverick MVD register expected"),
488 N_("Maverick MVFX register expected"),
489 N_("Maverick MVDX register expected"),
490 N_("Maverick MVAX register expected"),
491 N_("Maverick DSPSC register expected"),
492 N_("iWMMXt data register expected"),
493 N_("iWMMXt control register expected"),
494 N_("iWMMXt scalar register expected"),
495 N_("XScale accumulator register expected"),
496 };
497
498 /* Some well known registers that we refer to directly elsewhere. */
499 #define REG_SP 13
500 #define REG_LR 14
501 #define REG_PC 15
502
503 /* ARM instructions take 4bytes in the object file, Thumb instructions
504 take 2: */
505 #define INSN_SIZE 4
506
507 struct asm_opcode
508 {
509 /* Basic string to match. */
510 const char *template;
511
512 /* Parameters to instruction. */
513 unsigned char operands[8];
514
515 /* Conditional tag - see opcode_lookup. */
516 unsigned int tag : 4;
517
518 /* Basic instruction code. */
519 unsigned int avalue : 28;
520
521 /* Thumb-format instruction code. */
522 unsigned int tvalue;
523
524 /* Which architecture variant provides this instruction. */
525 const arm_feature_set *avariant;
526 const arm_feature_set *tvariant;
527
528 /* Function to call to encode instruction in ARM format. */
529 void (* aencode) (void);
530
531 /* Function to call to encode instruction in Thumb format. */
532 void (* tencode) (void);
533 };
534
535 /* Defines for various bits that we will want to toggle. */
536 #define INST_IMMEDIATE 0x02000000
537 #define OFFSET_REG 0x02000000
538 #define HWOFFSET_IMM 0x00400000
539 #define SHIFT_BY_REG 0x00000010
540 #define PRE_INDEX 0x01000000
541 #define INDEX_UP 0x00800000
542 #define WRITE_BACK 0x00200000
543 #define LDM_TYPE_2_OR_3 0x00400000
544
545 #define LITERAL_MASK 0xf000f000
546 #define OPCODE_MASK 0xfe1fffff
547 #define V4_STR_BIT 0x00000020
548
549 #define DATA_OP_SHIFT 21
550
551 #define T2_OPCODE_MASK 0xfe1fffff
552 #define T2_DATA_OP_SHIFT 21
553
554 /* Codes to distinguish the arithmetic instructions. */
555 #define OPCODE_AND 0
556 #define OPCODE_EOR 1
557 #define OPCODE_SUB 2
558 #define OPCODE_RSB 3
559 #define OPCODE_ADD 4
560 #define OPCODE_ADC 5
561 #define OPCODE_SBC 6
562 #define OPCODE_RSC 7
563 #define OPCODE_TST 8
564 #define OPCODE_TEQ 9
565 #define OPCODE_CMP 10
566 #define OPCODE_CMN 11
567 #define OPCODE_ORR 12
568 #define OPCODE_MOV 13
569 #define OPCODE_BIC 14
570 #define OPCODE_MVN 15
571
572 #define T2_OPCODE_AND 0
573 #define T2_OPCODE_BIC 1
574 #define T2_OPCODE_ORR 2
575 #define T2_OPCODE_ORN 3
576 #define T2_OPCODE_EOR 4
577 #define T2_OPCODE_ADD 8
578 #define T2_OPCODE_ADC 10
579 #define T2_OPCODE_SBC 11
580 #define T2_OPCODE_SUB 13
581 #define T2_OPCODE_RSB 14
582
583 #define T_OPCODE_MUL 0x4340
584 #define T_OPCODE_TST 0x4200
585 #define T_OPCODE_CMN 0x42c0
586 #define T_OPCODE_NEG 0x4240
587 #define T_OPCODE_MVN 0x43c0
588
589 #define T_OPCODE_ADD_R3 0x1800
590 #define T_OPCODE_SUB_R3 0x1a00
591 #define T_OPCODE_ADD_HI 0x4400
592 #define T_OPCODE_ADD_ST 0xb000
593 #define T_OPCODE_SUB_ST 0xb080
594 #define T_OPCODE_ADD_SP 0xa800
595 #define T_OPCODE_ADD_PC 0xa000
596 #define T_OPCODE_ADD_I8 0x3000
597 #define T_OPCODE_SUB_I8 0x3800
598 #define T_OPCODE_ADD_I3 0x1c00
599 #define T_OPCODE_SUB_I3 0x1e00
600
601 #define T_OPCODE_ASR_R 0x4100
602 #define T_OPCODE_LSL_R 0x4080
603 #define T_OPCODE_LSR_R 0x40c0
604 #define T_OPCODE_ROR_R 0x41c0
605 #define T_OPCODE_ASR_I 0x1000
606 #define T_OPCODE_LSL_I 0x0000
607 #define T_OPCODE_LSR_I 0x0800
608
609 #define T_OPCODE_MOV_I8 0x2000
610 #define T_OPCODE_CMP_I8 0x2800
611 #define T_OPCODE_CMP_LR 0x4280
612 #define T_OPCODE_MOV_HR 0x4600
613 #define T_OPCODE_CMP_HR 0x4500
614
615 #define T_OPCODE_LDR_PC 0x4800
616 #define T_OPCODE_LDR_SP 0x9800
617 #define T_OPCODE_STR_SP 0x9000
618 #define T_OPCODE_LDR_IW 0x6800
619 #define T_OPCODE_STR_IW 0x6000
620 #define T_OPCODE_LDR_IH 0x8800
621 #define T_OPCODE_STR_IH 0x8000
622 #define T_OPCODE_LDR_IB 0x7800
623 #define T_OPCODE_STR_IB 0x7000
624 #define T_OPCODE_LDR_RW 0x5800
625 #define T_OPCODE_STR_RW 0x5000
626 #define T_OPCODE_LDR_RH 0x5a00
627 #define T_OPCODE_STR_RH 0x5200
628 #define T_OPCODE_LDR_RB 0x5c00
629 #define T_OPCODE_STR_RB 0x5400
630
631 #define T_OPCODE_PUSH 0xb400
632 #define T_OPCODE_POP 0xbc00
633
634 #define T_OPCODE_BRANCH 0xe000
635
636 #define THUMB_SIZE 2 /* Size of thumb instruction. */
637 #define THUMB_PP_PC_LR 0x0100
638 #define THUMB_LOAD_BIT 0x0800
639 #define THUMB2_LOAD_BIT 0x00100000
640
641 #define BAD_ARGS _("bad arguments to instruction")
642 #define BAD_PC _("r15 not allowed here")
643 #define BAD_COND _("instruction cannot be conditional")
644 #define BAD_OVERLAP _("registers may not be the same")
645 #define BAD_HIREG _("lo register required")
646 #define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
647 #define BAD_ADDR_MODE _("instruction does not accept this addressing mode");
648 #define BAD_BRANCH _("branch must be last instruction in IT block")
649 #define BAD_NOT_IT _("instruction not allowed in IT block")
650 #define BAD_FPU _("selected FPU does not support instruction")
651
652 static struct hash_control *arm_ops_hsh;
653 static struct hash_control *arm_cond_hsh;
654 static struct hash_control *arm_shift_hsh;
655 static struct hash_control *arm_psr_hsh;
656 static struct hash_control *arm_v7m_psr_hsh;
657 static struct hash_control *arm_reg_hsh;
658 static struct hash_control *arm_reloc_hsh;
659 static struct hash_control *arm_barrier_opt_hsh;
660
661 /* Stuff needed to resolve the label ambiguity
662 As:
663 ...
664 label: <insn>
665 may differ from:
666 ...
667 label:
668 <insn>
669 */
670
671 symbolS * last_label_seen;
672 static int label_is_thumb_function_name = FALSE;
673 \f
674 /* Literal pool structure. Held on a per-section
675 and per-sub-section basis. */
676
677 #define MAX_LITERAL_POOL_SIZE 1024
678 typedef struct literal_pool
679 {
680 expressionS literals [MAX_LITERAL_POOL_SIZE];
681 unsigned int next_free_entry;
682 unsigned int id;
683 symbolS * symbol;
684 segT section;
685 subsegT sub_section;
686 struct literal_pool * next;
687 } literal_pool;
688
689 /* Pointer to a linked list of literal pools. */
690 literal_pool * list_of_pools = NULL;
691
692 /* State variables for IT block handling. */
693 static bfd_boolean current_it_mask = 0;
694 static int current_cc;
695
696 \f
697 /* Pure syntax. */
698
699 /* This array holds the chars that always start a comment. If the
700 pre-processor is disabled, these aren't very useful. */
701 const char comment_chars[] = "@";
702
703 /* This array holds the chars that only start a comment at the beginning of
704 a line. If the line seems to have the form '# 123 filename'
705 .line and .file directives will appear in the pre-processed output. */
706 /* Note that input_file.c hand checks for '#' at the beginning of the
707 first line of the input file. This is because the compiler outputs
708 #NO_APP at the beginning of its output. */
709 /* Also note that comments like this one will always work. */
710 const char line_comment_chars[] = "#";
711
712 const char line_separator_chars[] = ";";
713
714 /* Chars that can be used to separate mant
715 from exp in floating point numbers. */
716 const char EXP_CHARS[] = "eE";
717
718 /* Chars that mean this number is a floating point constant. */
719 /* As in 0f12.456 */
720 /* or 0d1.2345e12 */
721
722 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
723
724 /* Prefix characters that indicate the start of an immediate
725 value. */
726 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
727
728 /* Separator character handling. */
729
730 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
731
732 static inline int
733 skip_past_char (char ** str, char c)
734 {
735 if (**str == c)
736 {
737 (*str)++;
738 return SUCCESS;
739 }
740 else
741 return FAIL;
742 }
743 #define skip_past_comma(str) skip_past_char (str, ',')
744
745 /* Arithmetic expressions (possibly involving symbols). */
746
747 /* Return TRUE if anything in the expression is a bignum. */
748
749 static int
750 walk_no_bignums (symbolS * sp)
751 {
752 if (symbol_get_value_expression (sp)->X_op == O_big)
753 return 1;
754
755 if (symbol_get_value_expression (sp)->X_add_symbol)
756 {
757 return (walk_no_bignums (symbol_get_value_expression (sp)->X_add_symbol)
758 || (symbol_get_value_expression (sp)->X_op_symbol
759 && walk_no_bignums (symbol_get_value_expression (sp)->X_op_symbol)));
760 }
761
762 return 0;
763 }
764
765 static int in_my_get_expression = 0;
766
767 /* Third argument to my_get_expression. */
768 #define GE_NO_PREFIX 0
769 #define GE_IMM_PREFIX 1
770 #define GE_OPT_PREFIX 2
771 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
772 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
773 #define GE_OPT_PREFIX_BIG 3
774
775 static int
776 my_get_expression (expressionS * ep, char ** str, int prefix_mode)
777 {
778 char * save_in;
779 segT seg;
780
781 /* In unified syntax, all prefixes are optional. */
782 if (unified_syntax)
783 prefix_mode = (prefix_mode == GE_OPT_PREFIX_BIG) ? prefix_mode
784 : GE_OPT_PREFIX;
785
786 switch (prefix_mode)
787 {
788 case GE_NO_PREFIX: break;
789 case GE_IMM_PREFIX:
790 if (!is_immediate_prefix (**str))
791 {
792 inst.error = _("immediate expression requires a # prefix");
793 return FAIL;
794 }
795 (*str)++;
796 break;
797 case GE_OPT_PREFIX:
798 case GE_OPT_PREFIX_BIG:
799 if (is_immediate_prefix (**str))
800 (*str)++;
801 break;
802 default: abort ();
803 }
804
805 memset (ep, 0, sizeof (expressionS));
806
807 save_in = input_line_pointer;
808 input_line_pointer = *str;
809 in_my_get_expression = 1;
810 seg = expression (ep);
811 in_my_get_expression = 0;
812
813 if (ep->X_op == O_illegal)
814 {
815 /* We found a bad expression in md_operand(). */
816 *str = input_line_pointer;
817 input_line_pointer = save_in;
818 if (inst.error == NULL)
819 inst.error = _("bad expression");
820 return 1;
821 }
822
823 #ifdef OBJ_AOUT
824 if (seg != absolute_section
825 && seg != text_section
826 && seg != data_section
827 && seg != bss_section
828 && seg != undefined_section)
829 {
830 inst.error = _("bad segment");
831 *str = input_line_pointer;
832 input_line_pointer = save_in;
833 return 1;
834 }
835 #endif
836
837 /* Get rid of any bignums now, so that we don't generate an error for which
838 we can't establish a line number later on. Big numbers are never valid
839 in instructions, which is where this routine is always called. */
840 if (prefix_mode != GE_OPT_PREFIX_BIG
841 && (ep->X_op == O_big
842 || (ep->X_add_symbol
843 && (walk_no_bignums (ep->X_add_symbol)
844 || (ep->X_op_symbol
845 && walk_no_bignums (ep->X_op_symbol))))))
846 {
847 inst.error = _("invalid constant");
848 *str = input_line_pointer;
849 input_line_pointer = save_in;
850 return 1;
851 }
852
853 *str = input_line_pointer;
854 input_line_pointer = save_in;
855 return 0;
856 }
857
858 /* Turn a string in input_line_pointer into a floating point constant
859 of type TYPE, and store the appropriate bytes in *LITP. The number
860 of LITTLENUMS emitted is stored in *SIZEP. An error message is
861 returned, or NULL on OK.
862
863 Note that fp constants aren't represent in the normal way on the ARM.
864 In big endian mode, things are as expected. However, in little endian
865 mode fp constants are big-endian word-wise, and little-endian byte-wise
866 within the words. For example, (double) 1.1 in big endian mode is
867 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
868 the byte sequence 99 99 f1 3f 9a 99 99 99.
869
870 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
871
872 char *
873 md_atof (int type, char * litP, int * sizeP)
874 {
875 int prec;
876 LITTLENUM_TYPE words[MAX_LITTLENUMS];
877 char *t;
878 int i;
879
880 switch (type)
881 {
882 case 'f':
883 case 'F':
884 case 's':
885 case 'S':
886 prec = 2;
887 break;
888
889 case 'd':
890 case 'D':
891 case 'r':
892 case 'R':
893 prec = 4;
894 break;
895
896 case 'x':
897 case 'X':
898 prec = 6;
899 break;
900
901 case 'p':
902 case 'P':
903 prec = 6;
904 break;
905
906 default:
907 *sizeP = 0;
908 return _("bad call to MD_ATOF()");
909 }
910
911 t = atof_ieee (input_line_pointer, type, words);
912 if (t)
913 input_line_pointer = t;
914 *sizeP = prec * 2;
915
916 if (target_big_endian)
917 {
918 for (i = 0; i < prec; i++)
919 {
920 md_number_to_chars (litP, (valueT) words[i], 2);
921 litP += 2;
922 }
923 }
924 else
925 {
926 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
927 for (i = prec - 1; i >= 0; i--)
928 {
929 md_number_to_chars (litP, (valueT) words[i], 2);
930 litP += 2;
931 }
932 else
933 /* For a 4 byte float the order of elements in `words' is 1 0.
934 For an 8 byte float the order is 1 0 3 2. */
935 for (i = 0; i < prec; i += 2)
936 {
937 md_number_to_chars (litP, (valueT) words[i + 1], 2);
938 md_number_to_chars (litP + 2, (valueT) words[i], 2);
939 litP += 4;
940 }
941 }
942
943 return 0;
944 }
945
946 /* We handle all bad expressions here, so that we can report the faulty
947 instruction in the error message. */
948 void
949 md_operand (expressionS * expr)
950 {
951 if (in_my_get_expression)
952 expr->X_op = O_illegal;
953 }
954
955 /* Immediate values. */
956
957 /* Generic immediate-value read function for use in directives.
958 Accepts anything that 'expression' can fold to a constant.
959 *val receives the number. */
960 #ifdef OBJ_ELF
961 static int
962 immediate_for_directive (int *val)
963 {
964 expressionS exp;
965 exp.X_op = O_illegal;
966
967 if (is_immediate_prefix (*input_line_pointer))
968 {
969 input_line_pointer++;
970 expression (&exp);
971 }
972
973 if (exp.X_op != O_constant)
974 {
975 as_bad (_("expected #constant"));
976 ignore_rest_of_line ();
977 return FAIL;
978 }
979 *val = exp.X_add_number;
980 return SUCCESS;
981 }
982 #endif
983
984 /* Register parsing. */
985
986 /* Generic register parser. CCP points to what should be the
987 beginning of a register name. If it is indeed a valid register
988 name, advance CCP over it and return the reg_entry structure;
989 otherwise return NULL. Does not issue diagnostics. */
990
991 static struct reg_entry *
992 arm_reg_parse_multi (char **ccp)
993 {
994 char *start = *ccp;
995 char *p;
996 struct reg_entry *reg;
997
998 #ifdef REGISTER_PREFIX
999 if (*start != REGISTER_PREFIX)
1000 return NULL;
1001 start++;
1002 #endif
1003 #ifdef OPTIONAL_REGISTER_PREFIX
1004 if (*start == OPTIONAL_REGISTER_PREFIX)
1005 start++;
1006 #endif
1007
1008 p = start;
1009 if (!ISALPHA (*p) || !is_name_beginner (*p))
1010 return NULL;
1011
1012 do
1013 p++;
1014 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
1015
1016 reg = (struct reg_entry *) hash_find_n (arm_reg_hsh, start, p - start);
1017
1018 if (!reg)
1019 return NULL;
1020
1021 *ccp = p;
1022 return reg;
1023 }
1024
1025 static int
1026 arm_reg_alt_syntax (char **ccp, char *start, struct reg_entry *reg,
1027 enum arm_reg_type type)
1028 {
1029 /* Alternative syntaxes are accepted for a few register classes. */
1030 switch (type)
1031 {
1032 case REG_TYPE_MVF:
1033 case REG_TYPE_MVD:
1034 case REG_TYPE_MVFX:
1035 case REG_TYPE_MVDX:
1036 /* Generic coprocessor register names are allowed for these. */
1037 if (reg && reg->type == REG_TYPE_CN)
1038 return reg->number;
1039 break;
1040
1041 case REG_TYPE_CP:
1042 /* For backward compatibility, a bare number is valid here. */
1043 {
1044 unsigned long processor = strtoul (start, ccp, 10);
1045 if (*ccp != start && processor <= 15)
1046 return processor;
1047 }
1048
1049 case REG_TYPE_MMXWC:
1050 /* WC includes WCG. ??? I'm not sure this is true for all
1051 instructions that take WC registers. */
1052 if (reg && reg->type == REG_TYPE_MMXWCG)
1053 return reg->number;
1054 break;
1055
1056 default:
1057 break;
1058 }
1059
1060 return FAIL;
1061 }
1062
1063 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1064 return value is the register number or FAIL. */
1065
1066 static int
1067 arm_reg_parse (char **ccp, enum arm_reg_type type)
1068 {
1069 char *start = *ccp;
1070 struct reg_entry *reg = arm_reg_parse_multi (ccp);
1071 int ret;
1072
1073 /* Do not allow a scalar (reg+index) to parse as a register. */
1074 if (reg && reg->neon && (reg->neon->defined & NTA_HASINDEX))
1075 return FAIL;
1076
1077 if (reg && reg->type == type)
1078 return reg->number;
1079
1080 if ((ret = arm_reg_alt_syntax (ccp, start, reg, type)) != FAIL)
1081 return ret;
1082
1083 *ccp = start;
1084 return FAIL;
1085 }
1086
1087 /* Parse a Neon type specifier. *STR should point at the leading '.'
1088 character. Does no verification at this stage that the type fits the opcode
1089 properly. E.g.,
1090
1091 .i32.i32.s16
1092 .s32.f32
1093 .u16
1094
1095 Can all be legally parsed by this function.
1096
1097 Fills in neon_type struct pointer with parsed information, and updates STR
1098 to point after the parsed type specifier. Returns SUCCESS if this was a legal
1099 type, FAIL if not. */
1100
1101 static int
1102 parse_neon_type (struct neon_type *type, char **str)
1103 {
1104 char *ptr = *str;
1105
1106 if (type)
1107 type->elems = 0;
1108
1109 while (type->elems < NEON_MAX_TYPE_ELS)
1110 {
1111 enum neon_el_type thistype = NT_untyped;
1112 unsigned thissize = -1u;
1113
1114 if (*ptr != '.')
1115 break;
1116
1117 ptr++;
1118
1119 /* Just a size without an explicit type. */
1120 if (ISDIGIT (*ptr))
1121 goto parsesize;
1122
1123 switch (TOLOWER (*ptr))
1124 {
1125 case 'i': thistype = NT_integer; break;
1126 case 'f': thistype = NT_float; break;
1127 case 'p': thistype = NT_poly; break;
1128 case 's': thistype = NT_signed; break;
1129 case 'u': thistype = NT_unsigned; break;
1130 case 'd':
1131 thistype = NT_float;
1132 thissize = 64;
1133 ptr++;
1134 goto done;
1135 default:
1136 as_bad (_("unexpected character `%c' in type specifier"), *ptr);
1137 return FAIL;
1138 }
1139
1140 ptr++;
1141
1142 /* .f is an abbreviation for .f32. */
1143 if (thistype == NT_float && !ISDIGIT (*ptr))
1144 thissize = 32;
1145 else
1146 {
1147 parsesize:
1148 thissize = strtoul (ptr, &ptr, 10);
1149
1150 if (thissize != 8 && thissize != 16 && thissize != 32
1151 && thissize != 64)
1152 {
1153 as_bad (_("bad size %d in type specifier"), thissize);
1154 return FAIL;
1155 }
1156 }
1157
1158 done:
1159 if (type)
1160 {
1161 type->el[type->elems].type = thistype;
1162 type->el[type->elems].size = thissize;
1163 type->elems++;
1164 }
1165 }
1166
1167 /* Empty/missing type is not a successful parse. */
1168 if (type->elems == 0)
1169 return FAIL;
1170
1171 *str = ptr;
1172
1173 return SUCCESS;
1174 }
1175
1176 /* Errors may be set multiple times during parsing or bit encoding
1177 (particularly in the Neon bits), but usually the earliest error which is set
1178 will be the most meaningful. Avoid overwriting it with later (cascading)
1179 errors by calling this function. */
1180
1181 static void
1182 first_error (const char *err)
1183 {
1184 if (!inst.error)
1185 inst.error = err;
1186 }
1187
1188 /* Parse a single type, e.g. ".s32", leading period included. */
1189 static int
1190 parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
1191 {
1192 char *str = *ccp;
1193 struct neon_type optype;
1194
1195 if (*str == '.')
1196 {
1197 if (parse_neon_type (&optype, &str) == SUCCESS)
1198 {
1199 if (optype.elems == 1)
1200 *vectype = optype.el[0];
1201 else
1202 {
1203 first_error (_("only one type should be specified for operand"));
1204 return FAIL;
1205 }
1206 }
1207 else
1208 {
1209 first_error (_("vector type expected"));
1210 return FAIL;
1211 }
1212 }
1213 else
1214 return FAIL;
1215
1216 *ccp = str;
1217
1218 return SUCCESS;
1219 }
1220
1221 /* Special meanings for indices (which have a range of 0-7), which will fit into
1222 a 4-bit integer. */
1223
1224 #define NEON_ALL_LANES 15
1225 #define NEON_INTERLEAVE_LANES 14
1226
1227 /* Parse either a register or a scalar, with an optional type. Return the
1228 register number, and optionally fill in the actual type of the register
1229 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1230 type/index information in *TYPEINFO. */
1231
1232 static int
1233 parse_typed_reg_or_scalar (char **ccp, enum arm_reg_type type,
1234 enum arm_reg_type *rtype,
1235 struct neon_typed_alias *typeinfo)
1236 {
1237 char *str = *ccp;
1238 struct reg_entry *reg = arm_reg_parse_multi (&str);
1239 struct neon_typed_alias atype;
1240 struct neon_type_el parsetype;
1241
1242 atype.defined = 0;
1243 atype.index = -1;
1244 atype.eltype.type = NT_invtype;
1245 atype.eltype.size = -1;
1246
1247 /* Try alternate syntax for some types of register. Note these are mutually
1248 exclusive with the Neon syntax extensions. */
1249 if (reg == NULL)
1250 {
1251 int altreg = arm_reg_alt_syntax (&str, *ccp, reg, type);
1252 if (altreg != FAIL)
1253 *ccp = str;
1254 if (typeinfo)
1255 *typeinfo = atype;
1256 return altreg;
1257 }
1258
1259 /* Undo polymorphism when a set of register types may be accepted. */
1260 if ((type == REG_TYPE_NDQ
1261 && (reg->type == REG_TYPE_NQ || reg->type == REG_TYPE_VFD))
1262 || (type == REG_TYPE_VFSD
1263 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD))
1264 || (type == REG_TYPE_NSDQ
1265 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD
1266 || reg->type == REG_TYPE_NQ))
1267 || (type == REG_TYPE_MMXWC
1268 && (reg->type == REG_TYPE_MMXWCG)))
1269 type = reg->type;
1270
1271 if (type != reg->type)
1272 return FAIL;
1273
1274 if (reg->neon)
1275 atype = *reg->neon;
1276
1277 if (parse_neon_operand_type (&parsetype, &str) == SUCCESS)
1278 {
1279 if ((atype.defined & NTA_HASTYPE) != 0)
1280 {
1281 first_error (_("can't redefine type for operand"));
1282 return FAIL;
1283 }
1284 atype.defined |= NTA_HASTYPE;
1285 atype.eltype = parsetype;
1286 }
1287
1288 if (skip_past_char (&str, '[') == SUCCESS)
1289 {
1290 if (type != REG_TYPE_VFD)
1291 {
1292 first_error (_("only D registers may be indexed"));
1293 return FAIL;
1294 }
1295
1296 if ((atype.defined & NTA_HASINDEX) != 0)
1297 {
1298 first_error (_("can't change index for operand"));
1299 return FAIL;
1300 }
1301
1302 atype.defined |= NTA_HASINDEX;
1303
1304 if (skip_past_char (&str, ']') == SUCCESS)
1305 atype.index = NEON_ALL_LANES;
1306 else
1307 {
1308 expressionS exp;
1309
1310 my_get_expression (&exp, &str, GE_NO_PREFIX);
1311
1312 if (exp.X_op != O_constant)
1313 {
1314 first_error (_("constant expression required"));
1315 return FAIL;
1316 }
1317
1318 if (skip_past_char (&str, ']') == FAIL)
1319 return FAIL;
1320
1321 atype.index = exp.X_add_number;
1322 }
1323 }
1324
1325 if (typeinfo)
1326 *typeinfo = atype;
1327
1328 if (rtype)
1329 *rtype = type;
1330
1331 *ccp = str;
1332
1333 return reg->number;
1334 }
1335
1336 /* Like arm_reg_parse, but allow allow the following extra features:
1337 - If RTYPE is non-zero, return the (possibly restricted) type of the
1338 register (e.g. Neon double or quad reg when either has been requested).
1339 - If this is a Neon vector type with additional type information, fill
1340 in the struct pointed to by VECTYPE (if non-NULL).
1341 This function will fault on encountering a scalar.
1342 */
1343
1344 static int
1345 arm_typed_reg_parse (char **ccp, enum arm_reg_type type,
1346 enum arm_reg_type *rtype, struct neon_type_el *vectype)
1347 {
1348 struct neon_typed_alias atype;
1349 char *str = *ccp;
1350 int reg = parse_typed_reg_or_scalar (&str, type, rtype, &atype);
1351
1352 if (reg == FAIL)
1353 return FAIL;
1354
1355 /* Do not allow a scalar (reg+index) to parse as a register. */
1356 if ((atype.defined & NTA_HASINDEX) != 0)
1357 {
1358 first_error (_("register operand expected, but got scalar"));
1359 return FAIL;
1360 }
1361
1362 if (vectype)
1363 *vectype = atype.eltype;
1364
1365 *ccp = str;
1366
1367 return reg;
1368 }
1369
1370 #define NEON_SCALAR_REG(X) ((X) >> 4)
1371 #define NEON_SCALAR_INDEX(X) ((X) & 15)
1372
1373 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1374 have enough information to be able to do a good job bounds-checking. So, we
1375 just do easy checks here, and do further checks later. */
1376
1377 static int
1378 parse_scalar (char **ccp, int elsize, struct neon_type_el *type)
1379 {
1380 int reg;
1381 char *str = *ccp;
1382 struct neon_typed_alias atype;
1383
1384 reg = parse_typed_reg_or_scalar (&str, REG_TYPE_VFD, NULL, &atype);
1385
1386 if (reg == FAIL || (atype.defined & NTA_HASINDEX) == 0)
1387 return FAIL;
1388
1389 if (atype.index == NEON_ALL_LANES)
1390 {
1391 first_error (_("scalar must have an index"));
1392 return FAIL;
1393 }
1394 else if (atype.index >= 64 / elsize)
1395 {
1396 first_error (_("scalar index out of range"));
1397 return FAIL;
1398 }
1399
1400 if (type)
1401 *type = atype.eltype;
1402
1403 *ccp = str;
1404
1405 return reg * 16 + atype.index;
1406 }
1407
1408 /* Parse an ARM register list. Returns the bitmask, or FAIL. */
1409 static long
1410 parse_reg_list (char ** strp)
1411 {
1412 char * str = * strp;
1413 long range = 0;
1414 int another_range;
1415
1416 /* We come back here if we get ranges concatenated by '+' or '|'. */
1417 do
1418 {
1419 another_range = 0;
1420
1421 if (*str == '{')
1422 {
1423 int in_range = 0;
1424 int cur_reg = -1;
1425
1426 str++;
1427 do
1428 {
1429 int reg;
1430
1431 if ((reg = arm_reg_parse (&str, REG_TYPE_RN)) == FAIL)
1432 {
1433 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
1434 return FAIL;
1435 }
1436
1437 if (in_range)
1438 {
1439 int i;
1440
1441 if (reg <= cur_reg)
1442 {
1443 first_error (_("bad range in register list"));
1444 return FAIL;
1445 }
1446
1447 for (i = cur_reg + 1; i < reg; i++)
1448 {
1449 if (range & (1 << i))
1450 as_tsktsk
1451 (_("Warning: duplicated register (r%d) in register list"),
1452 i);
1453 else
1454 range |= 1 << i;
1455 }
1456 in_range = 0;
1457 }
1458
1459 if (range & (1 << reg))
1460 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1461 reg);
1462 else if (reg <= cur_reg)
1463 as_tsktsk (_("Warning: register range not in ascending order"));
1464
1465 range |= 1 << reg;
1466 cur_reg = reg;
1467 }
1468 while (skip_past_comma (&str) != FAIL
1469 || (in_range = 1, *str++ == '-'));
1470 str--;
1471
1472 if (*str++ != '}')
1473 {
1474 first_error (_("missing `}'"));
1475 return FAIL;
1476 }
1477 }
1478 else
1479 {
1480 expressionS expr;
1481
1482 if (my_get_expression (&expr, &str, GE_NO_PREFIX))
1483 return FAIL;
1484
1485 if (expr.X_op == O_constant)
1486 {
1487 if (expr.X_add_number
1488 != (expr.X_add_number & 0x0000ffff))
1489 {
1490 inst.error = _("invalid register mask");
1491 return FAIL;
1492 }
1493
1494 if ((range & expr.X_add_number) != 0)
1495 {
1496 int regno = range & expr.X_add_number;
1497
1498 regno &= -regno;
1499 regno = (1 << regno) - 1;
1500 as_tsktsk
1501 (_("Warning: duplicated register (r%d) in register list"),
1502 regno);
1503 }
1504
1505 range |= expr.X_add_number;
1506 }
1507 else
1508 {
1509 if (inst.reloc.type != 0)
1510 {
1511 inst.error = _("expression too complex");
1512 return FAIL;
1513 }
1514
1515 memcpy (&inst.reloc.exp, &expr, sizeof (expressionS));
1516 inst.reloc.type = BFD_RELOC_ARM_MULTI;
1517 inst.reloc.pc_rel = 0;
1518 }
1519 }
1520
1521 if (*str == '|' || *str == '+')
1522 {
1523 str++;
1524 another_range = 1;
1525 }
1526 }
1527 while (another_range);
1528
1529 *strp = str;
1530 return range;
1531 }
1532
1533 /* Types of registers in a list. */
1534
1535 enum reg_list_els
1536 {
1537 REGLIST_VFP_S,
1538 REGLIST_VFP_D,
1539 REGLIST_NEON_D
1540 };
1541
1542 /* Parse a VFP register list. If the string is invalid return FAIL.
1543 Otherwise return the number of registers, and set PBASE to the first
1544 register. Parses registers of type ETYPE.
1545 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1546 - Q registers can be used to specify pairs of D registers
1547 - { } can be omitted from around a singleton register list
1548 FIXME: This is not implemented, as it would require backtracking in
1549 some cases, e.g.:
1550 vtbl.8 d3,d4,d5
1551 This could be done (the meaning isn't really ambiguous), but doesn't
1552 fit in well with the current parsing framework.
1553 - 32 D registers may be used (also true for VFPv3).
1554 FIXME: Types are ignored in these register lists, which is probably a
1555 bug. */
1556
1557 static int
1558 parse_vfp_reg_list (char **ccp, unsigned int *pbase, enum reg_list_els etype)
1559 {
1560 char *str = *ccp;
1561 int base_reg;
1562 int new_base;
1563 enum arm_reg_type regtype = 0;
1564 int max_regs = 0;
1565 int count = 0;
1566 int warned = 0;
1567 unsigned long mask = 0;
1568 int i;
1569
1570 if (*str != '{')
1571 {
1572 inst.error = _("expecting {");
1573 return FAIL;
1574 }
1575
1576 str++;
1577
1578 switch (etype)
1579 {
1580 case REGLIST_VFP_S:
1581 regtype = REG_TYPE_VFS;
1582 max_regs = 32;
1583 break;
1584
1585 case REGLIST_VFP_D:
1586 regtype = REG_TYPE_VFD;
1587 break;
1588
1589 case REGLIST_NEON_D:
1590 regtype = REG_TYPE_NDQ;
1591 break;
1592 }
1593
1594 if (etype != REGLIST_VFP_S)
1595 {
1596 /* VFPv3 allows 32 D registers. */
1597 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3))
1598 {
1599 max_regs = 32;
1600 if (thumb_mode)
1601 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
1602 fpu_vfp_ext_v3);
1603 else
1604 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
1605 fpu_vfp_ext_v3);
1606 }
1607 else
1608 max_regs = 16;
1609 }
1610
1611 base_reg = max_regs;
1612
1613 do
1614 {
1615 int setmask = 1, addregs = 1;
1616
1617 new_base = arm_typed_reg_parse (&str, regtype, &regtype, NULL);
1618
1619 if (new_base == FAIL)
1620 {
1621 first_error (_(reg_expected_msgs[regtype]));
1622 return FAIL;
1623 }
1624
1625 if (new_base >= max_regs)
1626 {
1627 first_error (_("register out of range in list"));
1628 return FAIL;
1629 }
1630
1631 /* Note: a value of 2 * n is returned for the register Q<n>. */
1632 if (regtype == REG_TYPE_NQ)
1633 {
1634 setmask = 3;
1635 addregs = 2;
1636 }
1637
1638 if (new_base < base_reg)
1639 base_reg = new_base;
1640
1641 if (mask & (setmask << new_base))
1642 {
1643 first_error (_("invalid register list"));
1644 return FAIL;
1645 }
1646
1647 if ((mask >> new_base) != 0 && ! warned)
1648 {
1649 as_tsktsk (_("register list not in ascending order"));
1650 warned = 1;
1651 }
1652
1653 mask |= setmask << new_base;
1654 count += addregs;
1655
1656 if (*str == '-') /* We have the start of a range expression */
1657 {
1658 int high_range;
1659
1660 str++;
1661
1662 if ((high_range = arm_typed_reg_parse (&str, regtype, NULL, NULL))
1663 == FAIL)
1664 {
1665 inst.error = gettext (reg_expected_msgs[regtype]);
1666 return FAIL;
1667 }
1668
1669 if (high_range >= max_regs)
1670 {
1671 first_error (_("register out of range in list"));
1672 return FAIL;
1673 }
1674
1675 if (regtype == REG_TYPE_NQ)
1676 high_range = high_range + 1;
1677
1678 if (high_range <= new_base)
1679 {
1680 inst.error = _("register range not in ascending order");
1681 return FAIL;
1682 }
1683
1684 for (new_base += addregs; new_base <= high_range; new_base += addregs)
1685 {
1686 if (mask & (setmask << new_base))
1687 {
1688 inst.error = _("invalid register list");
1689 return FAIL;
1690 }
1691
1692 mask |= setmask << new_base;
1693 count += addregs;
1694 }
1695 }
1696 }
1697 while (skip_past_comma (&str) != FAIL);
1698
1699 str++;
1700
1701 /* Sanity check -- should have raised a parse error above. */
1702 if (count == 0 || count > max_regs)
1703 abort ();
1704
1705 *pbase = base_reg;
1706
1707 /* Final test -- the registers must be consecutive. */
1708 mask >>= base_reg;
1709 for (i = 0; i < count; i++)
1710 {
1711 if ((mask & (1u << i)) == 0)
1712 {
1713 inst.error = _("non-contiguous register range");
1714 return FAIL;
1715 }
1716 }
1717
1718 *ccp = str;
1719
1720 return count;
1721 }
1722
1723 /* True if two alias types are the same. */
1724
1725 static int
1726 neon_alias_types_same (struct neon_typed_alias *a, struct neon_typed_alias *b)
1727 {
1728 if (!a && !b)
1729 return 1;
1730
1731 if (!a || !b)
1732 return 0;
1733
1734 if (a->defined != b->defined)
1735 return 0;
1736
1737 if ((a->defined & NTA_HASTYPE) != 0
1738 && (a->eltype.type != b->eltype.type
1739 || a->eltype.size != b->eltype.size))
1740 return 0;
1741
1742 if ((a->defined & NTA_HASINDEX) != 0
1743 && (a->index != b->index))
1744 return 0;
1745
1746 return 1;
1747 }
1748
1749 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
1750 The base register is put in *PBASE.
1751 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
1752 the return value.
1753 The register stride (minus one) is put in bit 4 of the return value.
1754 Bits [6:5] encode the list length (minus one).
1755 The type of the list elements is put in *ELTYPE, if non-NULL. */
1756
1757 #define NEON_LANE(X) ((X) & 0xf)
1758 #define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1)
1759 #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
1760
1761 static int
1762 parse_neon_el_struct_list (char **str, unsigned *pbase,
1763 struct neon_type_el *eltype)
1764 {
1765 char *ptr = *str;
1766 int base_reg = -1;
1767 int reg_incr = -1;
1768 int count = 0;
1769 int lane = -1;
1770 int leading_brace = 0;
1771 enum arm_reg_type rtype = REG_TYPE_NDQ;
1772 int addregs = 1;
1773 const char *const incr_error = "register stride must be 1 or 2";
1774 const char *const type_error = "mismatched element/structure types in list";
1775 struct neon_typed_alias firsttype;
1776
1777 if (skip_past_char (&ptr, '{') == SUCCESS)
1778 leading_brace = 1;
1779
1780 do
1781 {
1782 struct neon_typed_alias atype;
1783 int getreg = parse_typed_reg_or_scalar (&ptr, rtype, &rtype, &atype);
1784
1785 if (getreg == FAIL)
1786 {
1787 first_error (_(reg_expected_msgs[rtype]));
1788 return FAIL;
1789 }
1790
1791 if (base_reg == -1)
1792 {
1793 base_reg = getreg;
1794 if (rtype == REG_TYPE_NQ)
1795 {
1796 reg_incr = 1;
1797 addregs = 2;
1798 }
1799 firsttype = atype;
1800 }
1801 else if (reg_incr == -1)
1802 {
1803 reg_incr = getreg - base_reg;
1804 if (reg_incr < 1 || reg_incr > 2)
1805 {
1806 first_error (_(incr_error));
1807 return FAIL;
1808 }
1809 }
1810 else if (getreg != base_reg + reg_incr * count)
1811 {
1812 first_error (_(incr_error));
1813 return FAIL;
1814 }
1815
1816 if (!neon_alias_types_same (&atype, &firsttype))
1817 {
1818 first_error (_(type_error));
1819 return FAIL;
1820 }
1821
1822 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
1823 modes. */
1824 if (ptr[0] == '-')
1825 {
1826 struct neon_typed_alias htype;
1827 int hireg, dregs = (rtype == REG_TYPE_NQ) ? 2 : 1;
1828 if (lane == -1)
1829 lane = NEON_INTERLEAVE_LANES;
1830 else if (lane != NEON_INTERLEAVE_LANES)
1831 {
1832 first_error (_(type_error));
1833 return FAIL;
1834 }
1835 if (reg_incr == -1)
1836 reg_incr = 1;
1837 else if (reg_incr != 1)
1838 {
1839 first_error (_("don't use Rn-Rm syntax with non-unit stride"));
1840 return FAIL;
1841 }
1842 ptr++;
1843 hireg = parse_typed_reg_or_scalar (&ptr, rtype, NULL, &htype);
1844 if (hireg == FAIL)
1845 {
1846 first_error (_(reg_expected_msgs[rtype]));
1847 return FAIL;
1848 }
1849 if (!neon_alias_types_same (&htype, &firsttype))
1850 {
1851 first_error (_(type_error));
1852 return FAIL;
1853 }
1854 count += hireg + dregs - getreg;
1855 continue;
1856 }
1857
1858 /* If we're using Q registers, we can't use [] or [n] syntax. */
1859 if (rtype == REG_TYPE_NQ)
1860 {
1861 count += 2;
1862 continue;
1863 }
1864
1865 if ((atype.defined & NTA_HASINDEX) != 0)
1866 {
1867 if (lane == -1)
1868 lane = atype.index;
1869 else if (lane != atype.index)
1870 {
1871 first_error (_(type_error));
1872 return FAIL;
1873 }
1874 }
1875 else if (lane == -1)
1876 lane = NEON_INTERLEAVE_LANES;
1877 else if (lane != NEON_INTERLEAVE_LANES)
1878 {
1879 first_error (_(type_error));
1880 return FAIL;
1881 }
1882 count++;
1883 }
1884 while ((count != 1 || leading_brace) && skip_past_comma (&ptr) != FAIL);
1885
1886 /* No lane set by [x]. We must be interleaving structures. */
1887 if (lane == -1)
1888 lane = NEON_INTERLEAVE_LANES;
1889
1890 /* Sanity check. */
1891 if (lane == -1 || base_reg == -1 || count < 1 || count > 4
1892 || (count > 1 && reg_incr == -1))
1893 {
1894 first_error (_("error parsing element/structure list"));
1895 return FAIL;
1896 }
1897
1898 if ((count > 1 || leading_brace) && skip_past_char (&ptr, '}') == FAIL)
1899 {
1900 first_error (_("expected }"));
1901 return FAIL;
1902 }
1903
1904 if (reg_incr == -1)
1905 reg_incr = 1;
1906
1907 if (eltype)
1908 *eltype = firsttype.eltype;
1909
1910 *pbase = base_reg;
1911 *str = ptr;
1912
1913 return lane | ((reg_incr - 1) << 4) | ((count - 1) << 5);
1914 }
1915
1916 /* Parse an explicit relocation suffix on an expression. This is
1917 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
1918 arm_reloc_hsh contains no entries, so this function can only
1919 succeed if there is no () after the word. Returns -1 on error,
1920 BFD_RELOC_UNUSED if there wasn't any suffix. */
1921 static int
1922 parse_reloc (char **str)
1923 {
1924 struct reloc_entry *r;
1925 char *p, *q;
1926
1927 if (**str != '(')
1928 return BFD_RELOC_UNUSED;
1929
1930 p = *str + 1;
1931 q = p;
1932
1933 while (*q && *q != ')' && *q != ',')
1934 q++;
1935 if (*q != ')')
1936 return -1;
1937
1938 if ((r = hash_find_n (arm_reloc_hsh, p, q - p)) == NULL)
1939 return -1;
1940
1941 *str = q + 1;
1942 return r->reloc;
1943 }
1944
1945 /* Directives: register aliases. */
1946
1947 static struct reg_entry *
1948 insert_reg_alias (char *str, int number, int type)
1949 {
1950 struct reg_entry *new;
1951 const char *name;
1952
1953 if ((new = hash_find (arm_reg_hsh, str)) != 0)
1954 {
1955 if (new->builtin)
1956 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str);
1957
1958 /* Only warn about a redefinition if it's not defined as the
1959 same register. */
1960 else if (new->number != number || new->type != type)
1961 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1962
1963 return 0;
1964 }
1965
1966 name = xstrdup (str);
1967 new = xmalloc (sizeof (struct reg_entry));
1968
1969 new->name = name;
1970 new->number = number;
1971 new->type = type;
1972 new->builtin = FALSE;
1973 new->neon = NULL;
1974
1975 if (hash_insert (arm_reg_hsh, name, (PTR) new))
1976 abort ();
1977
1978 return new;
1979 }
1980
1981 static void
1982 insert_neon_reg_alias (char *str, int number, int type,
1983 struct neon_typed_alias *atype)
1984 {
1985 struct reg_entry *reg = insert_reg_alias (str, number, type);
1986
1987 if (!reg)
1988 {
1989 first_error (_("attempt to redefine typed alias"));
1990 return;
1991 }
1992
1993 if (atype)
1994 {
1995 reg->neon = xmalloc (sizeof (struct neon_typed_alias));
1996 *reg->neon = *atype;
1997 }
1998 }
1999
2000 /* Look for the .req directive. This is of the form:
2001
2002 new_register_name .req existing_register_name
2003
2004 If we find one, or if it looks sufficiently like one that we want to
2005 handle any error here, return non-zero. Otherwise return zero. */
2006
2007 static int
2008 create_register_alias (char * newname, char *p)
2009 {
2010 struct reg_entry *old;
2011 char *oldname, *nbuf;
2012 size_t nlen;
2013
2014 /* The input scrubber ensures that whitespace after the mnemonic is
2015 collapsed to single spaces. */
2016 oldname = p;
2017 if (strncmp (oldname, " .req ", 6) != 0)
2018 return 0;
2019
2020 oldname += 6;
2021 if (*oldname == '\0')
2022 return 0;
2023
2024 old = hash_find (arm_reg_hsh, oldname);
2025 if (!old)
2026 {
2027 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
2028 return 1;
2029 }
2030
2031 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2032 the desired alias name, and p points to its end. If not, then
2033 the desired alias name is in the global original_case_string. */
2034 #ifdef TC_CASE_SENSITIVE
2035 nlen = p - newname;
2036 #else
2037 newname = original_case_string;
2038 nlen = strlen (newname);
2039 #endif
2040
2041 nbuf = alloca (nlen + 1);
2042 memcpy (nbuf, newname, nlen);
2043 nbuf[nlen] = '\0';
2044
2045 /* Create aliases under the new name as stated; an all-lowercase
2046 version of the new name; and an all-uppercase version of the new
2047 name. */
2048 insert_reg_alias (nbuf, old->number, old->type);
2049
2050 for (p = nbuf; *p; p++)
2051 *p = TOUPPER (*p);
2052
2053 if (strncmp (nbuf, newname, nlen))
2054 insert_reg_alias (nbuf, old->number, old->type);
2055
2056 for (p = nbuf; *p; p++)
2057 *p = TOLOWER (*p);
2058
2059 if (strncmp (nbuf, newname, nlen))
2060 insert_reg_alias (nbuf, old->number, old->type);
2061
2062 return 1;
2063 }
2064
2065 /* Create a Neon typed/indexed register alias using directives, e.g.:
2066 X .dn d5.s32[1]
2067 Y .qn 6.s16
2068 Z .dn d7
2069 T .dn Z[0]
2070 These typed registers can be used instead of the types specified after the
2071 Neon mnemonic, so long as all operands given have types. Types can also be
2072 specified directly, e.g.:
2073 vadd d0.s32, d1.s32, d2.s32
2074 */
2075
2076 static int
2077 create_neon_reg_alias (char *newname, char *p)
2078 {
2079 enum arm_reg_type basetype;
2080 struct reg_entry *basereg;
2081 struct reg_entry mybasereg;
2082 struct neon_type ntype;
2083 struct neon_typed_alias typeinfo;
2084 char *namebuf, *nameend;
2085 int namelen;
2086
2087 typeinfo.defined = 0;
2088 typeinfo.eltype.type = NT_invtype;
2089 typeinfo.eltype.size = -1;
2090 typeinfo.index = -1;
2091
2092 nameend = p;
2093
2094 if (strncmp (p, " .dn ", 5) == 0)
2095 basetype = REG_TYPE_VFD;
2096 else if (strncmp (p, " .qn ", 5) == 0)
2097 basetype = REG_TYPE_NQ;
2098 else
2099 return 0;
2100
2101 p += 5;
2102
2103 if (*p == '\0')
2104 return 0;
2105
2106 basereg = arm_reg_parse_multi (&p);
2107
2108 if (basereg && basereg->type != basetype)
2109 {
2110 as_bad (_("bad type for register"));
2111 return 0;
2112 }
2113
2114 if (basereg == NULL)
2115 {
2116 expressionS exp;
2117 /* Try parsing as an integer. */
2118 my_get_expression (&exp, &p, GE_NO_PREFIX);
2119 if (exp.X_op != O_constant)
2120 {
2121 as_bad (_("expression must be constant"));
2122 return 0;
2123 }
2124 basereg = &mybasereg;
2125 basereg->number = (basetype == REG_TYPE_NQ) ? exp.X_add_number * 2
2126 : exp.X_add_number;
2127 basereg->neon = 0;
2128 }
2129
2130 if (basereg->neon)
2131 typeinfo = *basereg->neon;
2132
2133 if (parse_neon_type (&ntype, &p) == SUCCESS)
2134 {
2135 /* We got a type. */
2136 if (typeinfo.defined & NTA_HASTYPE)
2137 {
2138 as_bad (_("can't redefine the type of a register alias"));
2139 return 0;
2140 }
2141
2142 typeinfo.defined |= NTA_HASTYPE;
2143 if (ntype.elems != 1)
2144 {
2145 as_bad (_("you must specify a single type only"));
2146 return 0;
2147 }
2148 typeinfo.eltype = ntype.el[0];
2149 }
2150
2151 if (skip_past_char (&p, '[') == SUCCESS)
2152 {
2153 expressionS exp;
2154 /* We got a scalar index. */
2155
2156 if (typeinfo.defined & NTA_HASINDEX)
2157 {
2158 as_bad (_("can't redefine the index of a scalar alias"));
2159 return 0;
2160 }
2161
2162 my_get_expression (&exp, &p, GE_NO_PREFIX);
2163
2164 if (exp.X_op != O_constant)
2165 {
2166 as_bad (_("scalar index must be constant"));
2167 return 0;
2168 }
2169
2170 typeinfo.defined |= NTA_HASINDEX;
2171 typeinfo.index = exp.X_add_number;
2172
2173 if (skip_past_char (&p, ']') == FAIL)
2174 {
2175 as_bad (_("expecting ]"));
2176 return 0;
2177 }
2178 }
2179
2180 namelen = nameend - newname;
2181 namebuf = alloca (namelen + 1);
2182 strncpy (namebuf, newname, namelen);
2183 namebuf[namelen] = '\0';
2184
2185 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2186 typeinfo.defined != 0 ? &typeinfo : NULL);
2187
2188 /* Insert name in all uppercase. */
2189 for (p = namebuf; *p; p++)
2190 *p = TOUPPER (*p);
2191
2192 if (strncmp (namebuf, newname, namelen))
2193 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2194 typeinfo.defined != 0 ? &typeinfo : NULL);
2195
2196 /* Insert name in all lowercase. */
2197 for (p = namebuf; *p; p++)
2198 *p = TOLOWER (*p);
2199
2200 if (strncmp (namebuf, newname, namelen))
2201 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2202 typeinfo.defined != 0 ? &typeinfo : NULL);
2203
2204 return 1;
2205 }
2206
2207 /* Should never be called, as .req goes between the alias and the
2208 register name, not at the beginning of the line. */
2209 static void
2210 s_req (int a ATTRIBUTE_UNUSED)
2211 {
2212 as_bad (_("invalid syntax for .req directive"));
2213 }
2214
2215 static void
2216 s_dn (int a ATTRIBUTE_UNUSED)
2217 {
2218 as_bad (_("invalid syntax for .dn directive"));
2219 }
2220
2221 static void
2222 s_qn (int a ATTRIBUTE_UNUSED)
2223 {
2224 as_bad (_("invalid syntax for .qn directive"));
2225 }
2226
2227 /* The .unreq directive deletes an alias which was previously defined
2228 by .req. For example:
2229
2230 my_alias .req r11
2231 .unreq my_alias */
2232
2233 static void
2234 s_unreq (int a ATTRIBUTE_UNUSED)
2235 {
2236 char * name;
2237 char saved_char;
2238
2239 name = input_line_pointer;
2240
2241 while (*input_line_pointer != 0
2242 && *input_line_pointer != ' '
2243 && *input_line_pointer != '\n')
2244 ++input_line_pointer;
2245
2246 saved_char = *input_line_pointer;
2247 *input_line_pointer = 0;
2248
2249 if (!*name)
2250 as_bad (_("invalid syntax for .unreq directive"));
2251 else
2252 {
2253 struct reg_entry *reg = hash_find (arm_reg_hsh, name);
2254
2255 if (!reg)
2256 as_bad (_("unknown register alias '%s'"), name);
2257 else if (reg->builtin)
2258 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
2259 name);
2260 else
2261 {
2262 hash_delete (arm_reg_hsh, name);
2263 free ((char *) reg->name);
2264 if (reg->neon)
2265 free (reg->neon);
2266 free (reg);
2267 }
2268 }
2269
2270 *input_line_pointer = saved_char;
2271 demand_empty_rest_of_line ();
2272 }
2273
2274 /* Directives: Instruction set selection. */
2275
2276 #ifdef OBJ_ELF
2277 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
2278 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2279 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2280 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
2281
2282 static enum mstate mapstate = MAP_UNDEFINED;
2283
2284 static void
2285 mapping_state (enum mstate state)
2286 {
2287 symbolS * symbolP;
2288 const char * symname;
2289 int type;
2290
2291 if (mapstate == state)
2292 /* The mapping symbol has already been emitted.
2293 There is nothing else to do. */
2294 return;
2295
2296 mapstate = state;
2297
2298 switch (state)
2299 {
2300 case MAP_DATA:
2301 symname = "$d";
2302 type = BSF_NO_FLAGS;
2303 break;
2304 case MAP_ARM:
2305 symname = "$a";
2306 type = BSF_NO_FLAGS;
2307 break;
2308 case MAP_THUMB:
2309 symname = "$t";
2310 type = BSF_NO_FLAGS;
2311 break;
2312 case MAP_UNDEFINED:
2313 return;
2314 default:
2315 abort ();
2316 }
2317
2318 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
2319
2320 symbolP = symbol_new (symname, now_seg, (valueT) frag_now_fix (), frag_now);
2321 symbol_table_insert (symbolP);
2322 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
2323
2324 switch (state)
2325 {
2326 case MAP_ARM:
2327 THUMB_SET_FUNC (symbolP, 0);
2328 ARM_SET_THUMB (symbolP, 0);
2329 ARM_SET_INTERWORK (symbolP, support_interwork);
2330 break;
2331
2332 case MAP_THUMB:
2333 THUMB_SET_FUNC (symbolP, 1);
2334 ARM_SET_THUMB (symbolP, 1);
2335 ARM_SET_INTERWORK (symbolP, support_interwork);
2336 break;
2337
2338 case MAP_DATA:
2339 default:
2340 return;
2341 }
2342 }
2343 #else
2344 #define mapping_state(x) /* nothing */
2345 #endif
2346
2347 /* Find the real, Thumb encoded start of a Thumb function. */
2348
2349 static symbolS *
2350 find_real_start (symbolS * symbolP)
2351 {
2352 char * real_start;
2353 const char * name = S_GET_NAME (symbolP);
2354 symbolS * new_target;
2355
2356 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
2357 #define STUB_NAME ".real_start_of"
2358
2359 if (name == NULL)
2360 abort ();
2361
2362 /* The compiler may generate BL instructions to local labels because
2363 it needs to perform a branch to a far away location. These labels
2364 do not have a corresponding ".real_start_of" label. We check
2365 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2366 the ".real_start_of" convention for nonlocal branches. */
2367 if (S_IS_LOCAL (symbolP) || name[0] == '.')
2368 return symbolP;
2369
2370 real_start = ACONCAT ((STUB_NAME, name, NULL));
2371 new_target = symbol_find (real_start);
2372
2373 if (new_target == NULL)
2374 {
2375 as_warn ("Failed to find real start of function: %s\n", name);
2376 new_target = symbolP;
2377 }
2378
2379 return new_target;
2380 }
2381
2382 static void
2383 opcode_select (int width)
2384 {
2385 switch (width)
2386 {
2387 case 16:
2388 if (! thumb_mode)
2389 {
2390 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
2391 as_bad (_("selected processor does not support THUMB opcodes"));
2392
2393 thumb_mode = 1;
2394 /* No need to force the alignment, since we will have been
2395 coming from ARM mode, which is word-aligned. */
2396 record_alignment (now_seg, 1);
2397 }
2398 mapping_state (MAP_THUMB);
2399 break;
2400
2401 case 32:
2402 if (thumb_mode)
2403 {
2404 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
2405 as_bad (_("selected processor does not support ARM opcodes"));
2406
2407 thumb_mode = 0;
2408
2409 if (!need_pass_2)
2410 frag_align (2, 0, 0);
2411
2412 record_alignment (now_seg, 1);
2413 }
2414 mapping_state (MAP_ARM);
2415 break;
2416
2417 default:
2418 as_bad (_("invalid instruction size selected (%d)"), width);
2419 }
2420 }
2421
2422 static void
2423 s_arm (int ignore ATTRIBUTE_UNUSED)
2424 {
2425 opcode_select (32);
2426 demand_empty_rest_of_line ();
2427 }
2428
2429 static void
2430 s_thumb (int ignore ATTRIBUTE_UNUSED)
2431 {
2432 opcode_select (16);
2433 demand_empty_rest_of_line ();
2434 }
2435
2436 static void
2437 s_code (int unused ATTRIBUTE_UNUSED)
2438 {
2439 int temp;
2440
2441 temp = get_absolute_expression ();
2442 switch (temp)
2443 {
2444 case 16:
2445 case 32:
2446 opcode_select (temp);
2447 break;
2448
2449 default:
2450 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp);
2451 }
2452 }
2453
2454 static void
2455 s_force_thumb (int ignore ATTRIBUTE_UNUSED)
2456 {
2457 /* If we are not already in thumb mode go into it, EVEN if
2458 the target processor does not support thumb instructions.
2459 This is used by gcc/config/arm/lib1funcs.asm for example
2460 to compile interworking support functions even if the
2461 target processor should not support interworking. */
2462 if (! thumb_mode)
2463 {
2464 thumb_mode = 2;
2465 record_alignment (now_seg, 1);
2466 }
2467
2468 demand_empty_rest_of_line ();
2469 }
2470
2471 static void
2472 s_thumb_func (int ignore ATTRIBUTE_UNUSED)
2473 {
2474 s_thumb (0);
2475
2476 /* The following label is the name/address of the start of a Thumb function.
2477 We need to know this for the interworking support. */
2478 label_is_thumb_function_name = TRUE;
2479 }
2480
2481 /* Perform a .set directive, but also mark the alias as
2482 being a thumb function. */
2483
2484 static void
2485 s_thumb_set (int equiv)
2486 {
2487 /* XXX the following is a duplicate of the code for s_set() in read.c
2488 We cannot just call that code as we need to get at the symbol that
2489 is created. */
2490 char * name;
2491 char delim;
2492 char * end_name;
2493 symbolS * symbolP;
2494
2495 /* Especial apologies for the random logic:
2496 This just grew, and could be parsed much more simply!
2497 Dean - in haste. */
2498 name = input_line_pointer;
2499 delim = get_symbol_end ();
2500 end_name = input_line_pointer;
2501 *end_name = delim;
2502
2503 if (*input_line_pointer != ',')
2504 {
2505 *end_name = 0;
2506 as_bad (_("expected comma after name \"%s\""), name);
2507 *end_name = delim;
2508 ignore_rest_of_line ();
2509 return;
2510 }
2511
2512 input_line_pointer++;
2513 *end_name = 0;
2514
2515 if (name[0] == '.' && name[1] == '\0')
2516 {
2517 /* XXX - this should not happen to .thumb_set. */
2518 abort ();
2519 }
2520
2521 if ((symbolP = symbol_find (name)) == NULL
2522 && (symbolP = md_undefined_symbol (name)) == NULL)
2523 {
2524 #ifndef NO_LISTING
2525 /* When doing symbol listings, play games with dummy fragments living
2526 outside the normal fragment chain to record the file and line info
2527 for this symbol. */
2528 if (listing & LISTING_SYMBOLS)
2529 {
2530 extern struct list_info_struct * listing_tail;
2531 fragS * dummy_frag = xmalloc (sizeof (fragS));
2532
2533 memset (dummy_frag, 0, sizeof (fragS));
2534 dummy_frag->fr_type = rs_fill;
2535 dummy_frag->line = listing_tail;
2536 symbolP = symbol_new (name, undefined_section, 0, dummy_frag);
2537 dummy_frag->fr_symbol = symbolP;
2538 }
2539 else
2540 #endif
2541 symbolP = symbol_new (name, undefined_section, 0, &zero_address_frag);
2542
2543 #ifdef OBJ_COFF
2544 /* "set" symbols are local unless otherwise specified. */
2545 SF_SET_LOCAL (symbolP);
2546 #endif /* OBJ_COFF */
2547 } /* Make a new symbol. */
2548
2549 symbol_table_insert (symbolP);
2550
2551 * end_name = delim;
2552
2553 if (equiv
2554 && S_IS_DEFINED (symbolP)
2555 && S_GET_SEGMENT (symbolP) != reg_section)
2556 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP));
2557
2558 pseudo_set (symbolP);
2559
2560 demand_empty_rest_of_line ();
2561
2562 /* XXX Now we come to the Thumb specific bit of code. */
2563
2564 THUMB_SET_FUNC (symbolP, 1);
2565 ARM_SET_THUMB (symbolP, 1);
2566 #if defined OBJ_ELF || defined OBJ_COFF
2567 ARM_SET_INTERWORK (symbolP, support_interwork);
2568 #endif
2569 }
2570
2571 /* Directives: Mode selection. */
2572
2573 /* .syntax [unified|divided] - choose the new unified syntax
2574 (same for Arm and Thumb encoding, modulo slight differences in what
2575 can be represented) or the old divergent syntax for each mode. */
2576 static void
2577 s_syntax (int unused ATTRIBUTE_UNUSED)
2578 {
2579 char *name, delim;
2580
2581 name = input_line_pointer;
2582 delim = get_symbol_end ();
2583
2584 if (!strcasecmp (name, "unified"))
2585 unified_syntax = TRUE;
2586 else if (!strcasecmp (name, "divided"))
2587 unified_syntax = FALSE;
2588 else
2589 {
2590 as_bad (_("unrecognized syntax mode \"%s\""), name);
2591 return;
2592 }
2593 *input_line_pointer = delim;
2594 demand_empty_rest_of_line ();
2595 }
2596
2597 /* Directives: sectioning and alignment. */
2598
2599 /* Same as s_align_ptwo but align 0 => align 2. */
2600
2601 static void
2602 s_align (int unused ATTRIBUTE_UNUSED)
2603 {
2604 int temp;
2605 long temp_fill;
2606 long max_alignment = 15;
2607
2608 temp = get_absolute_expression ();
2609 if (temp > max_alignment)
2610 as_bad (_("alignment too large: %d assumed"), temp = max_alignment);
2611 else if (temp < 0)
2612 {
2613 as_bad (_("alignment negative. 0 assumed."));
2614 temp = 0;
2615 }
2616
2617 if (*input_line_pointer == ',')
2618 {
2619 input_line_pointer++;
2620 temp_fill = get_absolute_expression ();
2621 }
2622 else
2623 temp_fill = 0;
2624
2625 if (!temp)
2626 temp = 2;
2627
2628 /* Only make a frag if we HAVE to. */
2629 if (temp && !need_pass_2)
2630 frag_align (temp, (int) temp_fill, 0);
2631 demand_empty_rest_of_line ();
2632
2633 record_alignment (now_seg, temp);
2634 }
2635
2636 static void
2637 s_bss (int ignore ATTRIBUTE_UNUSED)
2638 {
2639 /* We don't support putting frags in the BSS segment, we fake it by
2640 marking in_bss, then looking at s_skip for clues. */
2641 subseg_set (bss_section, 0);
2642 demand_empty_rest_of_line ();
2643 mapping_state (MAP_DATA);
2644 }
2645
2646 static void
2647 s_even (int ignore ATTRIBUTE_UNUSED)
2648 {
2649 /* Never make frag if expect extra pass. */
2650 if (!need_pass_2)
2651 frag_align (1, 0, 0);
2652
2653 record_alignment (now_seg, 1);
2654
2655 demand_empty_rest_of_line ();
2656 }
2657
2658 /* Directives: Literal pools. */
2659
2660 static literal_pool *
2661 find_literal_pool (void)
2662 {
2663 literal_pool * pool;
2664
2665 for (pool = list_of_pools; pool != NULL; pool = pool->next)
2666 {
2667 if (pool->section == now_seg
2668 && pool->sub_section == now_subseg)
2669 break;
2670 }
2671
2672 return pool;
2673 }
2674
2675 static literal_pool *
2676 find_or_make_literal_pool (void)
2677 {
2678 /* Next literal pool ID number. */
2679 static unsigned int latest_pool_num = 1;
2680 literal_pool * pool;
2681
2682 pool = find_literal_pool ();
2683
2684 if (pool == NULL)
2685 {
2686 /* Create a new pool. */
2687 pool = xmalloc (sizeof (* pool));
2688 if (! pool)
2689 return NULL;
2690
2691 pool->next_free_entry = 0;
2692 pool->section = now_seg;
2693 pool->sub_section = now_subseg;
2694 pool->next = list_of_pools;
2695 pool->symbol = NULL;
2696
2697 /* Add it to the list. */
2698 list_of_pools = pool;
2699 }
2700
2701 /* New pools, and emptied pools, will have a NULL symbol. */
2702 if (pool->symbol == NULL)
2703 {
2704 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
2705 (valueT) 0, &zero_address_frag);
2706 pool->id = latest_pool_num ++;
2707 }
2708
2709 /* Done. */
2710 return pool;
2711 }
2712
2713 /* Add the literal in the global 'inst'
2714 structure to the relevent literal pool. */
2715
2716 static int
2717 add_to_lit_pool (void)
2718 {
2719 literal_pool * pool;
2720 unsigned int entry;
2721
2722 pool = find_or_make_literal_pool ();
2723
2724 /* Check if this literal value is already in the pool. */
2725 for (entry = 0; entry < pool->next_free_entry; entry ++)
2726 {
2727 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
2728 && (inst.reloc.exp.X_op == O_constant)
2729 && (pool->literals[entry].X_add_number
2730 == inst.reloc.exp.X_add_number)
2731 && (pool->literals[entry].X_unsigned
2732 == inst.reloc.exp.X_unsigned))
2733 break;
2734
2735 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
2736 && (inst.reloc.exp.X_op == O_symbol)
2737 && (pool->literals[entry].X_add_number
2738 == inst.reloc.exp.X_add_number)
2739 && (pool->literals[entry].X_add_symbol
2740 == inst.reloc.exp.X_add_symbol)
2741 && (pool->literals[entry].X_op_symbol
2742 == inst.reloc.exp.X_op_symbol))
2743 break;
2744 }
2745
2746 /* Do we need to create a new entry? */
2747 if (entry == pool->next_free_entry)
2748 {
2749 if (entry >= MAX_LITERAL_POOL_SIZE)
2750 {
2751 inst.error = _("literal pool overflow");
2752 return FAIL;
2753 }
2754
2755 pool->literals[entry] = inst.reloc.exp;
2756 pool->next_free_entry += 1;
2757 }
2758
2759 inst.reloc.exp.X_op = O_symbol;
2760 inst.reloc.exp.X_add_number = ((int) entry) * 4;
2761 inst.reloc.exp.X_add_symbol = pool->symbol;
2762
2763 return SUCCESS;
2764 }
2765
2766 /* Can't use symbol_new here, so have to create a symbol and then at
2767 a later date assign it a value. Thats what these functions do. */
2768
2769 static void
2770 symbol_locate (symbolS * symbolP,
2771 const char * name, /* It is copied, the caller can modify. */
2772 segT segment, /* Segment identifier (SEG_<something>). */
2773 valueT valu, /* Symbol value. */
2774 fragS * frag) /* Associated fragment. */
2775 {
2776 unsigned int name_length;
2777 char * preserved_copy_of_name;
2778
2779 name_length = strlen (name) + 1; /* +1 for \0. */
2780 obstack_grow (&notes, name, name_length);
2781 preserved_copy_of_name = obstack_finish (&notes);
2782
2783 #ifdef tc_canonicalize_symbol_name
2784 preserved_copy_of_name =
2785 tc_canonicalize_symbol_name (preserved_copy_of_name);
2786 #endif
2787
2788 S_SET_NAME (symbolP, preserved_copy_of_name);
2789
2790 S_SET_SEGMENT (symbolP, segment);
2791 S_SET_VALUE (symbolP, valu);
2792 symbol_clear_list_pointers (symbolP);
2793
2794 symbol_set_frag (symbolP, frag);
2795
2796 /* Link to end of symbol chain. */
2797 {
2798 extern int symbol_table_frozen;
2799
2800 if (symbol_table_frozen)
2801 abort ();
2802 }
2803
2804 symbol_append (symbolP, symbol_lastP, & symbol_rootP, & symbol_lastP);
2805
2806 obj_symbol_new_hook (symbolP);
2807
2808 #ifdef tc_symbol_new_hook
2809 tc_symbol_new_hook (symbolP);
2810 #endif
2811
2812 #ifdef DEBUG_SYMS
2813 verify_symbol_chain (symbol_rootP, symbol_lastP);
2814 #endif /* DEBUG_SYMS */
2815 }
2816
2817
2818 static void
2819 s_ltorg (int ignored ATTRIBUTE_UNUSED)
2820 {
2821 unsigned int entry;
2822 literal_pool * pool;
2823 char sym_name[20];
2824
2825 pool = find_literal_pool ();
2826 if (pool == NULL
2827 || pool->symbol == NULL
2828 || pool->next_free_entry == 0)
2829 return;
2830
2831 mapping_state (MAP_DATA);
2832
2833 /* Align pool as you have word accesses.
2834 Only make a frag if we have to. */
2835 if (!need_pass_2)
2836 frag_align (2, 0, 0);
2837
2838 record_alignment (now_seg, 2);
2839
2840 sprintf (sym_name, "$$lit_\002%x", pool->id);
2841
2842 symbol_locate (pool->symbol, sym_name, now_seg,
2843 (valueT) frag_now_fix (), frag_now);
2844 symbol_table_insert (pool->symbol);
2845
2846 ARM_SET_THUMB (pool->symbol, thumb_mode);
2847
2848 #if defined OBJ_COFF || defined OBJ_ELF
2849 ARM_SET_INTERWORK (pool->symbol, support_interwork);
2850 #endif
2851
2852 for (entry = 0; entry < pool->next_free_entry; entry ++)
2853 /* First output the expression in the instruction to the pool. */
2854 emit_expr (&(pool->literals[entry]), 4); /* .word */
2855
2856 /* Mark the pool as empty. */
2857 pool->next_free_entry = 0;
2858 pool->symbol = NULL;
2859 }
2860
2861 #ifdef OBJ_ELF
2862 /* Forward declarations for functions below, in the MD interface
2863 section. */
2864 static void fix_new_arm (fragS *, int, short, expressionS *, int, int);
2865 static valueT create_unwind_entry (int);
2866 static void start_unwind_section (const segT, int);
2867 static void add_unwind_opcode (valueT, int);
2868 static void flush_pending_unwind (void);
2869
2870 /* Directives: Data. */
2871
2872 static void
2873 s_arm_elf_cons (int nbytes)
2874 {
2875 expressionS exp;
2876
2877 #ifdef md_flush_pending_output
2878 md_flush_pending_output ();
2879 #endif
2880
2881 if (is_it_end_of_statement ())
2882 {
2883 demand_empty_rest_of_line ();
2884 return;
2885 }
2886
2887 #ifdef md_cons_align
2888 md_cons_align (nbytes);
2889 #endif
2890
2891 mapping_state (MAP_DATA);
2892 do
2893 {
2894 int reloc;
2895 char *base = input_line_pointer;
2896
2897 expression (& exp);
2898
2899 if (exp.X_op != O_symbol)
2900 emit_expr (&exp, (unsigned int) nbytes);
2901 else
2902 {
2903 char *before_reloc = input_line_pointer;
2904 reloc = parse_reloc (&input_line_pointer);
2905 if (reloc == -1)
2906 {
2907 as_bad (_("unrecognized relocation suffix"));
2908 ignore_rest_of_line ();
2909 return;
2910 }
2911 else if (reloc == BFD_RELOC_UNUSED)
2912 emit_expr (&exp, (unsigned int) nbytes);
2913 else
2914 {
2915 reloc_howto_type *howto = bfd_reloc_type_lookup (stdoutput, reloc);
2916 int size = bfd_get_reloc_size (howto);
2917
2918 if (reloc == BFD_RELOC_ARM_PLT32)
2919 {
2920 as_bad (_("(plt) is only valid on branch targets"));
2921 reloc = BFD_RELOC_UNUSED;
2922 size = 0;
2923 }
2924
2925 if (size > nbytes)
2926 as_bad (_("%s relocations do not fit in %d bytes"),
2927 howto->name, nbytes);
2928 else
2929 {
2930 /* We've parsed an expression stopping at O_symbol.
2931 But there may be more expression left now that we
2932 have parsed the relocation marker. Parse it again.
2933 XXX Surely there is a cleaner way to do this. */
2934 char *p = input_line_pointer;
2935 int offset;
2936 char *save_buf = alloca (input_line_pointer - base);
2937 memcpy (save_buf, base, input_line_pointer - base);
2938 memmove (base + (input_line_pointer - before_reloc),
2939 base, before_reloc - base);
2940
2941 input_line_pointer = base + (input_line_pointer-before_reloc);
2942 expression (&exp);
2943 memcpy (base, save_buf, p - base);
2944
2945 offset = nbytes - size;
2946 p = frag_more ((int) nbytes);
2947 fix_new_exp (frag_now, p - frag_now->fr_literal + offset,
2948 size, &exp, 0, reloc);
2949 }
2950 }
2951 }
2952 }
2953 while (*input_line_pointer++ == ',');
2954
2955 /* Put terminator back into stream. */
2956 input_line_pointer --;
2957 demand_empty_rest_of_line ();
2958 }
2959
2960
2961 /* Parse a .rel31 directive. */
2962
2963 static void
2964 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED)
2965 {
2966 expressionS exp;
2967 char *p;
2968 valueT highbit;
2969
2970 highbit = 0;
2971 if (*input_line_pointer == '1')
2972 highbit = 0x80000000;
2973 else if (*input_line_pointer != '0')
2974 as_bad (_("expected 0 or 1"));
2975
2976 input_line_pointer++;
2977 if (*input_line_pointer != ',')
2978 as_bad (_("missing comma"));
2979 input_line_pointer++;
2980
2981 #ifdef md_flush_pending_output
2982 md_flush_pending_output ();
2983 #endif
2984
2985 #ifdef md_cons_align
2986 md_cons_align (4);
2987 #endif
2988
2989 mapping_state (MAP_DATA);
2990
2991 expression (&exp);
2992
2993 p = frag_more (4);
2994 md_number_to_chars (p, highbit, 4);
2995 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 1,
2996 BFD_RELOC_ARM_PREL31);
2997
2998 demand_empty_rest_of_line ();
2999 }
3000
3001 /* Directives: AEABI stack-unwind tables. */
3002
3003 /* Parse an unwind_fnstart directive. Simply records the current location. */
3004
3005 static void
3006 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED)
3007 {
3008 demand_empty_rest_of_line ();
3009 /* Mark the start of the function. */
3010 unwind.proc_start = expr_build_dot ();
3011
3012 /* Reset the rest of the unwind info. */
3013 unwind.opcode_count = 0;
3014 unwind.table_entry = NULL;
3015 unwind.personality_routine = NULL;
3016 unwind.personality_index = -1;
3017 unwind.frame_size = 0;
3018 unwind.fp_offset = 0;
3019 unwind.fp_reg = 13;
3020 unwind.fp_used = 0;
3021 unwind.sp_restored = 0;
3022 }
3023
3024
3025 /* Parse a handlerdata directive. Creates the exception handling table entry
3026 for the function. */
3027
3028 static void
3029 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED)
3030 {
3031 demand_empty_rest_of_line ();
3032 if (unwind.table_entry)
3033 as_bad (_("dupicate .handlerdata directive"));
3034
3035 create_unwind_entry (1);
3036 }
3037
3038 /* Parse an unwind_fnend directive. Generates the index table entry. */
3039
3040 static void
3041 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED)
3042 {
3043 long where;
3044 char *ptr;
3045 valueT val;
3046
3047 demand_empty_rest_of_line ();
3048
3049 /* Add eh table entry. */
3050 if (unwind.table_entry == NULL)
3051 val = create_unwind_entry (0);
3052 else
3053 val = 0;
3054
3055 /* Add index table entry. This is two words. */
3056 start_unwind_section (unwind.saved_seg, 1);
3057 frag_align (2, 0, 0);
3058 record_alignment (now_seg, 2);
3059
3060 ptr = frag_more (8);
3061 where = frag_now_fix () - 8;
3062
3063 /* Self relative offset of the function start. */
3064 fix_new (frag_now, where, 4, unwind.proc_start, 0, 1,
3065 BFD_RELOC_ARM_PREL31);
3066
3067 /* Indicate dependency on EHABI-defined personality routines to the
3068 linker, if it hasn't been done already. */
3069 if (unwind.personality_index >= 0 && unwind.personality_index < 3
3070 && !(marked_pr_dependency & (1 << unwind.personality_index)))
3071 {
3072 static const char *const name[] = {
3073 "__aeabi_unwind_cpp_pr0",
3074 "__aeabi_unwind_cpp_pr1",
3075 "__aeabi_unwind_cpp_pr2"
3076 };
3077 symbolS *pr = symbol_find_or_make (name[unwind.personality_index]);
3078 fix_new (frag_now, where, 0, pr, 0, 1, BFD_RELOC_NONE);
3079 marked_pr_dependency |= 1 << unwind.personality_index;
3080 seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency
3081 = marked_pr_dependency;
3082 }
3083
3084 if (val)
3085 /* Inline exception table entry. */
3086 md_number_to_chars (ptr + 4, val, 4);
3087 else
3088 /* Self relative offset of the table entry. */
3089 fix_new (frag_now, where + 4, 4, unwind.table_entry, 0, 1,
3090 BFD_RELOC_ARM_PREL31);
3091
3092 /* Restore the original section. */
3093 subseg_set (unwind.saved_seg, unwind.saved_subseg);
3094 }
3095
3096
3097 /* Parse an unwind_cantunwind directive. */
3098
3099 static void
3100 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED)
3101 {
3102 demand_empty_rest_of_line ();
3103 if (unwind.personality_routine || unwind.personality_index != -1)
3104 as_bad (_("personality routine specified for cantunwind frame"));
3105
3106 unwind.personality_index = -2;
3107 }
3108
3109
3110 /* Parse a personalityindex directive. */
3111
3112 static void
3113 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED)
3114 {
3115 expressionS exp;
3116
3117 if (unwind.personality_routine || unwind.personality_index != -1)
3118 as_bad (_("duplicate .personalityindex directive"));
3119
3120 expression (&exp);
3121
3122 if (exp.X_op != O_constant
3123 || exp.X_add_number < 0 || exp.X_add_number > 15)
3124 {
3125 as_bad (_("bad personality routine number"));
3126 ignore_rest_of_line ();
3127 return;
3128 }
3129
3130 unwind.personality_index = exp.X_add_number;
3131
3132 demand_empty_rest_of_line ();
3133 }
3134
3135
3136 /* Parse a personality directive. */
3137
3138 static void
3139 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED)
3140 {
3141 char *name, *p, c;
3142
3143 if (unwind.personality_routine || unwind.personality_index != -1)
3144 as_bad (_("duplicate .personality directive"));
3145
3146 name = input_line_pointer;
3147 c = get_symbol_end ();
3148 p = input_line_pointer;
3149 unwind.personality_routine = symbol_find_or_make (name);
3150 *p = c;
3151 demand_empty_rest_of_line ();
3152 }
3153
3154
3155 /* Parse a directive saving core registers. */
3156
3157 static void
3158 s_arm_unwind_save_core (void)
3159 {
3160 valueT op;
3161 long range;
3162 int n;
3163
3164 range = parse_reg_list (&input_line_pointer);
3165 if (range == FAIL)
3166 {
3167 as_bad (_("expected register list"));
3168 ignore_rest_of_line ();
3169 return;
3170 }
3171
3172 demand_empty_rest_of_line ();
3173
3174 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
3175 into .unwind_save {..., sp...}. We aren't bothered about the value of
3176 ip because it is clobbered by calls. */
3177 if (unwind.sp_restored && unwind.fp_reg == 12
3178 && (range & 0x3000) == 0x1000)
3179 {
3180 unwind.opcode_count--;
3181 unwind.sp_restored = 0;
3182 range = (range | 0x2000) & ~0x1000;
3183 unwind.pending_offset = 0;
3184 }
3185
3186 /* Pop r4-r15. */
3187 if (range & 0xfff0)
3188 {
3189 /* See if we can use the short opcodes. These pop a block of up to 8
3190 registers starting with r4, plus maybe r14. */
3191 for (n = 0; n < 8; n++)
3192 {
3193 /* Break at the first non-saved register. */
3194 if ((range & (1 << (n + 4))) == 0)
3195 break;
3196 }
3197 /* See if there are any other bits set. */
3198 if (n == 0 || (range & (0xfff0 << n) & 0xbff0) != 0)
3199 {
3200 /* Use the long form. */
3201 op = 0x8000 | ((range >> 4) & 0xfff);
3202 add_unwind_opcode (op, 2);
3203 }
3204 else
3205 {
3206 /* Use the short form. */
3207 if (range & 0x4000)
3208 op = 0xa8; /* Pop r14. */
3209 else
3210 op = 0xa0; /* Do not pop r14. */
3211 op |= (n - 1);
3212 add_unwind_opcode (op, 1);
3213 }
3214 }
3215
3216 /* Pop r0-r3. */
3217 if (range & 0xf)
3218 {
3219 op = 0xb100 | (range & 0xf);
3220 add_unwind_opcode (op, 2);
3221 }
3222
3223 /* Record the number of bytes pushed. */
3224 for (n = 0; n < 16; n++)
3225 {
3226 if (range & (1 << n))
3227 unwind.frame_size += 4;
3228 }
3229 }
3230
3231
3232 /* Parse a directive saving FPA registers. */
3233
3234 static void
3235 s_arm_unwind_save_fpa (int reg)
3236 {
3237 expressionS exp;
3238 int num_regs;
3239 valueT op;
3240
3241 /* Get Number of registers to transfer. */
3242 if (skip_past_comma (&input_line_pointer) != FAIL)
3243 expression (&exp);
3244 else
3245 exp.X_op = O_illegal;
3246
3247 if (exp.X_op != O_constant)
3248 {
3249 as_bad (_("expected , <constant>"));
3250 ignore_rest_of_line ();
3251 return;
3252 }
3253
3254 num_regs = exp.X_add_number;
3255
3256 if (num_regs < 1 || num_regs > 4)
3257 {
3258 as_bad (_("number of registers must be in the range [1:4]"));
3259 ignore_rest_of_line ();
3260 return;
3261 }
3262
3263 demand_empty_rest_of_line ();
3264
3265 if (reg == 4)
3266 {
3267 /* Short form. */
3268 op = 0xb4 | (num_regs - 1);
3269 add_unwind_opcode (op, 1);
3270 }
3271 else
3272 {
3273 /* Long form. */
3274 op = 0xc800 | (reg << 4) | (num_regs - 1);
3275 add_unwind_opcode (op, 2);
3276 }
3277 unwind.frame_size += num_regs * 12;
3278 }
3279
3280
3281 /* Parse a directive saving VFP registers for ARMv6 and above. */
3282
3283 static void
3284 s_arm_unwind_save_vfp_armv6 (void)
3285 {
3286 int count;
3287 unsigned int start;
3288 valueT op;
3289 int num_vfpv3_regs = 0;
3290 int num_regs_below_16;
3291
3292 count = parse_vfp_reg_list (&input_line_pointer, &start, REGLIST_VFP_D);
3293 if (count == FAIL)
3294 {
3295 as_bad (_("expected register list"));
3296 ignore_rest_of_line ();
3297 return;
3298 }
3299
3300 demand_empty_rest_of_line ();
3301
3302 /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather
3303 than FSTMX/FLDMX-style ones). */
3304
3305 /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31. */
3306 if (start >= 16)
3307 num_vfpv3_regs = count;
3308 else if (start + count > 16)
3309 num_vfpv3_regs = start + count - 16;
3310
3311 if (num_vfpv3_regs > 0)
3312 {
3313 int start_offset = start > 16 ? start - 16 : 0;
3314 op = 0xc800 | (start_offset << 4) | (num_vfpv3_regs - 1);
3315 add_unwind_opcode (op, 2);
3316 }
3317
3318 /* Generate opcode for registers numbered in the range 0 .. 15. */
3319 num_regs_below_16 = num_vfpv3_regs > 0 ? 16 - (int) start : count;
3320 assert (num_regs_below_16 + num_vfpv3_regs == count);
3321 if (num_regs_below_16 > 0)
3322 {
3323 op = 0xc900 | (start << 4) | (num_regs_below_16 - 1);
3324 add_unwind_opcode (op, 2);
3325 }
3326
3327 unwind.frame_size += count * 8;
3328 }
3329
3330
3331 /* Parse a directive saving VFP registers for pre-ARMv6. */
3332
3333 static void
3334 s_arm_unwind_save_vfp (void)
3335 {
3336 int count;
3337 unsigned int reg;
3338 valueT op;
3339
3340 count = parse_vfp_reg_list (&input_line_pointer, &reg, REGLIST_VFP_D);
3341 if (count == FAIL)
3342 {
3343 as_bad (_("expected register list"));
3344 ignore_rest_of_line ();
3345 return;
3346 }
3347
3348 demand_empty_rest_of_line ();
3349
3350 if (reg == 8)
3351 {
3352 /* Short form. */
3353 op = 0xb8 | (count - 1);
3354 add_unwind_opcode (op, 1);
3355 }
3356 else
3357 {
3358 /* Long form. */
3359 op = 0xb300 | (reg << 4) | (count - 1);
3360 add_unwind_opcode (op, 2);
3361 }
3362 unwind.frame_size += count * 8 + 4;
3363 }
3364
3365
3366 /* Parse a directive saving iWMMXt data registers. */
3367
3368 static void
3369 s_arm_unwind_save_mmxwr (void)
3370 {
3371 int reg;
3372 int hi_reg;
3373 int i;
3374 unsigned mask = 0;
3375 valueT op;
3376
3377 if (*input_line_pointer == '{')
3378 input_line_pointer++;
3379
3380 do
3381 {
3382 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
3383
3384 if (reg == FAIL)
3385 {
3386 as_bad (_(reg_expected_msgs[REG_TYPE_MMXWR]));
3387 goto error;
3388 }
3389
3390 if (mask >> reg)
3391 as_tsktsk (_("register list not in ascending order"));
3392 mask |= 1 << reg;
3393
3394 if (*input_line_pointer == '-')
3395 {
3396 input_line_pointer++;
3397 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
3398 if (hi_reg == FAIL)
3399 {
3400 as_bad (_(reg_expected_msgs[REG_TYPE_MMXWR]));
3401 goto error;
3402 }
3403 else if (reg >= hi_reg)
3404 {
3405 as_bad (_("bad register range"));
3406 goto error;
3407 }
3408 for (; reg < hi_reg; reg++)
3409 mask |= 1 << reg;
3410 }
3411 }
3412 while (skip_past_comma (&input_line_pointer) != FAIL);
3413
3414 if (*input_line_pointer == '}')
3415 input_line_pointer++;
3416
3417 demand_empty_rest_of_line ();
3418
3419 /* Generate any deferred opcodes because we're going to be looking at
3420 the list. */
3421 flush_pending_unwind ();
3422
3423 for (i = 0; i < 16; i++)
3424 {
3425 if (mask & (1 << i))
3426 unwind.frame_size += 8;
3427 }
3428
3429 /* Attempt to combine with a previous opcode. We do this because gcc
3430 likes to output separate unwind directives for a single block of
3431 registers. */
3432 if (unwind.opcode_count > 0)
3433 {
3434 i = unwind.opcodes[unwind.opcode_count - 1];
3435 if ((i & 0xf8) == 0xc0)
3436 {
3437 i &= 7;
3438 /* Only merge if the blocks are contiguous. */
3439 if (i < 6)
3440 {
3441 if ((mask & 0xfe00) == (1 << 9))
3442 {
3443 mask |= ((1 << (i + 11)) - 1) & 0xfc00;
3444 unwind.opcode_count--;
3445 }
3446 }
3447 else if (i == 6 && unwind.opcode_count >= 2)
3448 {
3449 i = unwind.opcodes[unwind.opcode_count - 2];
3450 reg = i >> 4;
3451 i &= 0xf;
3452
3453 op = 0xffff << (reg - 1);
3454 if (reg > 0
3455 && ((mask & op) == (1u << (reg - 1))))
3456 {
3457 op = (1 << (reg + i + 1)) - 1;
3458 op &= ~((1 << reg) - 1);
3459 mask |= op;
3460 unwind.opcode_count -= 2;
3461 }
3462 }
3463 }
3464 }
3465
3466 hi_reg = 15;
3467 /* We want to generate opcodes in the order the registers have been
3468 saved, ie. descending order. */
3469 for (reg = 15; reg >= -1; reg--)
3470 {
3471 /* Save registers in blocks. */
3472 if (reg < 0
3473 || !(mask & (1 << reg)))
3474 {
3475 /* We found an unsaved reg. Generate opcodes to save the
3476 preceeding block. */
3477 if (reg != hi_reg)
3478 {
3479 if (reg == 9)
3480 {
3481 /* Short form. */
3482 op = 0xc0 | (hi_reg - 10);
3483 add_unwind_opcode (op, 1);
3484 }
3485 else
3486 {
3487 /* Long form. */
3488 op = 0xc600 | ((reg + 1) << 4) | ((hi_reg - reg) - 1);
3489 add_unwind_opcode (op, 2);
3490 }
3491 }
3492 hi_reg = reg - 1;
3493 }
3494 }
3495
3496 return;
3497 error:
3498 ignore_rest_of_line ();
3499 }
3500
3501 static void
3502 s_arm_unwind_save_mmxwcg (void)
3503 {
3504 int reg;
3505 int hi_reg;
3506 unsigned mask = 0;
3507 valueT op;
3508
3509 if (*input_line_pointer == '{')
3510 input_line_pointer++;
3511
3512 do
3513 {
3514 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
3515
3516 if (reg == FAIL)
3517 {
3518 as_bad (_(reg_expected_msgs[REG_TYPE_MMXWCG]));
3519 goto error;
3520 }
3521
3522 reg -= 8;
3523 if (mask >> reg)
3524 as_tsktsk (_("register list not in ascending order"));
3525 mask |= 1 << reg;
3526
3527 if (*input_line_pointer == '-')
3528 {
3529 input_line_pointer++;
3530 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
3531 if (hi_reg == FAIL)
3532 {
3533 as_bad (_(reg_expected_msgs[REG_TYPE_MMXWCG]));
3534 goto error;
3535 }
3536 else if (reg >= hi_reg)
3537 {
3538 as_bad (_("bad register range"));
3539 goto error;
3540 }
3541 for (; reg < hi_reg; reg++)
3542 mask |= 1 << reg;
3543 }
3544 }
3545 while (skip_past_comma (&input_line_pointer) != FAIL);
3546
3547 if (*input_line_pointer == '}')
3548 input_line_pointer++;
3549
3550 demand_empty_rest_of_line ();
3551
3552 /* Generate any deferred opcodes because we're going to be looking at
3553 the list. */
3554 flush_pending_unwind ();
3555
3556 for (reg = 0; reg < 16; reg++)
3557 {
3558 if (mask & (1 << reg))
3559 unwind.frame_size += 4;
3560 }
3561 op = 0xc700 | mask;
3562 add_unwind_opcode (op, 2);
3563 return;
3564 error:
3565 ignore_rest_of_line ();
3566 }
3567
3568
3569 /* Parse an unwind_save directive.
3570 If the argument is non-zero, this is a .vsave directive. */
3571
3572 static void
3573 s_arm_unwind_save (int arch_v6)
3574 {
3575 char *peek;
3576 struct reg_entry *reg;
3577 bfd_boolean had_brace = FALSE;
3578
3579 /* Figure out what sort of save we have. */
3580 peek = input_line_pointer;
3581
3582 if (*peek == '{')
3583 {
3584 had_brace = TRUE;
3585 peek++;
3586 }
3587
3588 reg = arm_reg_parse_multi (&peek);
3589
3590 if (!reg)
3591 {
3592 as_bad (_("register expected"));
3593 ignore_rest_of_line ();
3594 return;
3595 }
3596
3597 switch (reg->type)
3598 {
3599 case REG_TYPE_FN:
3600 if (had_brace)
3601 {
3602 as_bad (_("FPA .unwind_save does not take a register list"));
3603 ignore_rest_of_line ();
3604 return;
3605 }
3606 s_arm_unwind_save_fpa (reg->number);
3607 return;
3608
3609 case REG_TYPE_RN: s_arm_unwind_save_core (); return;
3610 case REG_TYPE_VFD:
3611 if (arch_v6)
3612 s_arm_unwind_save_vfp_armv6 ();
3613 else
3614 s_arm_unwind_save_vfp ();
3615 return;
3616 case REG_TYPE_MMXWR: s_arm_unwind_save_mmxwr (); return;
3617 case REG_TYPE_MMXWCG: s_arm_unwind_save_mmxwcg (); return;
3618
3619 default:
3620 as_bad (_(".unwind_save does not support this kind of register"));
3621 ignore_rest_of_line ();
3622 }
3623 }
3624
3625
3626 /* Parse an unwind_movsp directive. */
3627
3628 static void
3629 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED)
3630 {
3631 int reg;
3632 valueT op;
3633 int offset;
3634
3635 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
3636 if (reg == FAIL)
3637 {
3638 as_bad (_(reg_expected_msgs[REG_TYPE_RN]));
3639 ignore_rest_of_line ();
3640 return;
3641 }
3642
3643 /* Optional constant. */
3644 if (skip_past_comma (&input_line_pointer) != FAIL)
3645 {
3646 if (immediate_for_directive (&offset) == FAIL)
3647 return;
3648 }
3649 else
3650 offset = 0;
3651
3652 demand_empty_rest_of_line ();
3653
3654 if (reg == REG_SP || reg == REG_PC)
3655 {
3656 as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
3657 return;
3658 }
3659
3660 if (unwind.fp_reg != REG_SP)
3661 as_bad (_("unexpected .unwind_movsp directive"));
3662
3663 /* Generate opcode to restore the value. */
3664 op = 0x90 | reg;
3665 add_unwind_opcode (op, 1);
3666
3667 /* Record the information for later. */
3668 unwind.fp_reg = reg;
3669 unwind.fp_offset = unwind.frame_size - offset;
3670 unwind.sp_restored = 1;
3671 }
3672
3673 /* Parse an unwind_pad directive. */
3674
3675 static void
3676 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED)
3677 {
3678 int offset;
3679
3680 if (immediate_for_directive (&offset) == FAIL)
3681 return;
3682
3683 if (offset & 3)
3684 {
3685 as_bad (_("stack increment must be multiple of 4"));
3686 ignore_rest_of_line ();
3687 return;
3688 }
3689
3690 /* Don't generate any opcodes, just record the details for later. */
3691 unwind.frame_size += offset;
3692 unwind.pending_offset += offset;
3693
3694 demand_empty_rest_of_line ();
3695 }
3696
3697 /* Parse an unwind_setfp directive. */
3698
3699 static void
3700 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED)
3701 {
3702 int sp_reg;
3703 int fp_reg;
3704 int offset;
3705
3706 fp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
3707 if (skip_past_comma (&input_line_pointer) == FAIL)
3708 sp_reg = FAIL;
3709 else
3710 sp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
3711
3712 if (fp_reg == FAIL || sp_reg == FAIL)
3713 {
3714 as_bad (_("expected <reg>, <reg>"));
3715 ignore_rest_of_line ();
3716 return;
3717 }
3718
3719 /* Optional constant. */
3720 if (skip_past_comma (&input_line_pointer) != FAIL)
3721 {
3722 if (immediate_for_directive (&offset) == FAIL)
3723 return;
3724 }
3725 else
3726 offset = 0;
3727
3728 demand_empty_rest_of_line ();
3729
3730 if (sp_reg != 13 && sp_reg != unwind.fp_reg)
3731 {
3732 as_bad (_("register must be either sp or set by a previous"
3733 "unwind_movsp directive"));
3734 return;
3735 }
3736
3737 /* Don't generate any opcodes, just record the information for later. */
3738 unwind.fp_reg = fp_reg;
3739 unwind.fp_used = 1;
3740 if (sp_reg == 13)
3741 unwind.fp_offset = unwind.frame_size - offset;
3742 else
3743 unwind.fp_offset -= offset;
3744 }
3745
3746 /* Parse an unwind_raw directive. */
3747
3748 static void
3749 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED)
3750 {
3751 expressionS exp;
3752 /* This is an arbitrary limit. */
3753 unsigned char op[16];
3754 int count;
3755
3756 expression (&exp);
3757 if (exp.X_op == O_constant
3758 && skip_past_comma (&input_line_pointer) != FAIL)
3759 {
3760 unwind.frame_size += exp.X_add_number;
3761 expression (&exp);
3762 }
3763 else
3764 exp.X_op = O_illegal;
3765
3766 if (exp.X_op != O_constant)
3767 {
3768 as_bad (_("expected <offset>, <opcode>"));
3769 ignore_rest_of_line ();
3770 return;
3771 }
3772
3773 count = 0;
3774
3775 /* Parse the opcode. */
3776 for (;;)
3777 {
3778 if (count >= 16)
3779 {
3780 as_bad (_("unwind opcode too long"));
3781 ignore_rest_of_line ();
3782 }
3783 if (exp.X_op != O_constant || exp.X_add_number & ~0xff)
3784 {
3785 as_bad (_("invalid unwind opcode"));
3786 ignore_rest_of_line ();
3787 return;
3788 }
3789 op[count++] = exp.X_add_number;
3790
3791 /* Parse the next byte. */
3792 if (skip_past_comma (&input_line_pointer) == FAIL)
3793 break;
3794
3795 expression (&exp);
3796 }
3797
3798 /* Add the opcode bytes in reverse order. */
3799 while (count--)
3800 add_unwind_opcode (op[count], 1);
3801
3802 demand_empty_rest_of_line ();
3803 }
3804
3805
3806 /* Parse a .eabi_attribute directive. */
3807
3808 static void
3809 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED)
3810 {
3811 expressionS exp;
3812 bfd_boolean is_string;
3813 int tag;
3814 unsigned int i = 0;
3815 char *s = NULL;
3816 char saved_char;
3817
3818 expression (& exp);
3819 if (exp.X_op != O_constant)
3820 goto bad;
3821
3822 tag = exp.X_add_number;
3823 if (tag == 4 || tag == 5 || tag == 32 || (tag > 32 && (tag & 1) != 0))
3824 is_string = 1;
3825 else
3826 is_string = 0;
3827
3828 if (skip_past_comma (&input_line_pointer) == FAIL)
3829 goto bad;
3830 if (tag == 32 || !is_string)
3831 {
3832 expression (& exp);
3833 if (exp.X_op != O_constant)
3834 {
3835 as_bad (_("expected numeric constant"));
3836 ignore_rest_of_line ();
3837 return;
3838 }
3839 i = exp.X_add_number;
3840 }
3841 if (tag == Tag_compatibility
3842 && skip_past_comma (&input_line_pointer) == FAIL)
3843 {
3844 as_bad (_("expected comma"));
3845 ignore_rest_of_line ();
3846 return;
3847 }
3848 if (is_string)
3849 {
3850 skip_whitespace(input_line_pointer);
3851 if (*input_line_pointer != '"')
3852 goto bad_string;
3853 input_line_pointer++;
3854 s = input_line_pointer;
3855 while (*input_line_pointer && *input_line_pointer != '"')
3856 input_line_pointer++;
3857 if (*input_line_pointer != '"')
3858 goto bad_string;
3859 saved_char = *input_line_pointer;
3860 *input_line_pointer = 0;
3861 }
3862 else
3863 {
3864 s = NULL;
3865 saved_char = 0;
3866 }
3867
3868 if (tag == Tag_compatibility)
3869 elf32_arm_add_eabi_attr_compat (stdoutput, i, s);
3870 else if (is_string)
3871 elf32_arm_add_eabi_attr_string (stdoutput, tag, s);
3872 else
3873 elf32_arm_add_eabi_attr_int (stdoutput, tag, i);
3874
3875 if (s)
3876 {
3877 *input_line_pointer = saved_char;
3878 input_line_pointer++;
3879 }
3880 demand_empty_rest_of_line ();
3881 return;
3882 bad_string:
3883 as_bad (_("bad string constant"));
3884 ignore_rest_of_line ();
3885 return;
3886 bad:
3887 as_bad (_("expected <tag> , <value>"));
3888 ignore_rest_of_line ();
3889 }
3890 #endif /* OBJ_ELF */
3891
3892 static void s_arm_arch (int);
3893 static void s_arm_cpu (int);
3894 static void s_arm_fpu (int);
3895
3896 #ifdef TE_PE
3897
3898 static void
3899 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
3900 {
3901 expressionS exp;
3902
3903 do
3904 {
3905 expression (&exp);
3906 if (exp.X_op == O_symbol)
3907 exp.X_op = O_secrel;
3908
3909 emit_expr (&exp, 4);
3910 }
3911 while (*input_line_pointer++ == ',');
3912
3913 input_line_pointer--;
3914 demand_empty_rest_of_line ();
3915 }
3916 #endif /* TE_PE */
3917
3918 /* This table describes all the machine specific pseudo-ops the assembler
3919 has to support. The fields are:
3920 pseudo-op name without dot
3921 function to call to execute this pseudo-op
3922 Integer arg to pass to the function. */
3923
3924 const pseudo_typeS md_pseudo_table[] =
3925 {
3926 /* Never called because '.req' does not start a line. */
3927 { "req", s_req, 0 },
3928 /* Following two are likewise never called. */
3929 { "dn", s_dn, 0 },
3930 { "qn", s_qn, 0 },
3931 { "unreq", s_unreq, 0 },
3932 { "bss", s_bss, 0 },
3933 { "align", s_align, 0 },
3934 { "arm", s_arm, 0 },
3935 { "thumb", s_thumb, 0 },
3936 { "code", s_code, 0 },
3937 { "force_thumb", s_force_thumb, 0 },
3938 { "thumb_func", s_thumb_func, 0 },
3939 { "thumb_set", s_thumb_set, 0 },
3940 { "even", s_even, 0 },
3941 { "ltorg", s_ltorg, 0 },
3942 { "pool", s_ltorg, 0 },
3943 { "syntax", s_syntax, 0 },
3944 { "cpu", s_arm_cpu, 0 },
3945 { "arch", s_arm_arch, 0 },
3946 { "fpu", s_arm_fpu, 0 },
3947 #ifdef OBJ_ELF
3948 { "word", s_arm_elf_cons, 4 },
3949 { "long", s_arm_elf_cons, 4 },
3950 { "rel31", s_arm_rel31, 0 },
3951 { "fnstart", s_arm_unwind_fnstart, 0 },
3952 { "fnend", s_arm_unwind_fnend, 0 },
3953 { "cantunwind", s_arm_unwind_cantunwind, 0 },
3954 { "personality", s_arm_unwind_personality, 0 },
3955 { "personalityindex", s_arm_unwind_personalityindex, 0 },
3956 { "handlerdata", s_arm_unwind_handlerdata, 0 },
3957 { "save", s_arm_unwind_save, 0 },
3958 { "vsave", s_arm_unwind_save, 1 },
3959 { "movsp", s_arm_unwind_movsp, 0 },
3960 { "pad", s_arm_unwind_pad, 0 },
3961 { "setfp", s_arm_unwind_setfp, 0 },
3962 { "unwind_raw", s_arm_unwind_raw, 0 },
3963 { "eabi_attribute", s_arm_eabi_attribute, 0 },
3964 #else
3965 { "word", cons, 4},
3966
3967 /* These are used for dwarf. */
3968 {"2byte", cons, 2},
3969 {"4byte", cons, 4},
3970 {"8byte", cons, 8},
3971 /* These are used for dwarf2. */
3972 { "file", (void (*) (int)) dwarf2_directive_file, 0 },
3973 { "loc", dwarf2_directive_loc, 0 },
3974 { "loc_mark_labels", dwarf2_directive_loc_mark_labels, 0 },
3975 #endif
3976 { "extend", float_cons, 'x' },
3977 { "ldouble", float_cons, 'x' },
3978 { "packed", float_cons, 'p' },
3979 #ifdef TE_PE
3980 {"secrel32", pe_directive_secrel, 0},
3981 #endif
3982 { 0, 0, 0 }
3983 };
3984 \f
3985 /* Parser functions used exclusively in instruction operands. */
3986
3987 /* Generic immediate-value read function for use in insn parsing.
3988 STR points to the beginning of the immediate (the leading #);
3989 VAL receives the value; if the value is outside [MIN, MAX]
3990 issue an error. PREFIX_OPT is true if the immediate prefix is
3991 optional. */
3992
3993 static int
3994 parse_immediate (char **str, int *val, int min, int max,
3995 bfd_boolean prefix_opt)
3996 {
3997 expressionS exp;
3998 my_get_expression (&exp, str, prefix_opt ? GE_OPT_PREFIX : GE_IMM_PREFIX);
3999 if (exp.X_op != O_constant)
4000 {
4001 inst.error = _("constant expression required");
4002 return FAIL;
4003 }
4004
4005 if (exp.X_add_number < min || exp.X_add_number > max)
4006 {
4007 inst.error = _("immediate value out of range");
4008 return FAIL;
4009 }
4010
4011 *val = exp.X_add_number;
4012 return SUCCESS;
4013 }
4014
4015 /* Less-generic immediate-value read function with the possibility of loading a
4016 big (64-bit) immediate, as required by Neon VMOV and VMVN immediate
4017 instructions. Puts the result directly in inst.operands[i]. */
4018
4019 static int
4020 parse_big_immediate (char **str, int i)
4021 {
4022 expressionS exp;
4023 char *ptr = *str;
4024
4025 my_get_expression (&exp, &ptr, GE_OPT_PREFIX_BIG);
4026
4027 if (exp.X_op == O_constant)
4028 inst.operands[i].imm = exp.X_add_number;
4029 else if (exp.X_op == O_big
4030 && LITTLENUM_NUMBER_OF_BITS * exp.X_add_number > 32
4031 && LITTLENUM_NUMBER_OF_BITS * exp.X_add_number <= 64)
4032 {
4033 unsigned parts = 32 / LITTLENUM_NUMBER_OF_BITS, j, idx = 0;
4034 /* Bignums have their least significant bits in
4035 generic_bignum[0]. Make sure we put 32 bits in imm and
4036 32 bits in reg, in a (hopefully) portable way. */
4037 assert (parts != 0);
4038 inst.operands[i].imm = 0;
4039 for (j = 0; j < parts; j++, idx++)
4040 inst.operands[i].imm |= generic_bignum[idx]
4041 << (LITTLENUM_NUMBER_OF_BITS * j);
4042 inst.operands[i].reg = 0;
4043 for (j = 0; j < parts; j++, idx++)
4044 inst.operands[i].reg |= generic_bignum[idx]
4045 << (LITTLENUM_NUMBER_OF_BITS * j);
4046 inst.operands[i].regisimm = 1;
4047 }
4048 else
4049 return FAIL;
4050
4051 *str = ptr;
4052
4053 return SUCCESS;
4054 }
4055
4056 /* Returns the pseudo-register number of an FPA immediate constant,
4057 or FAIL if there isn't a valid constant here. */
4058
4059 static int
4060 parse_fpa_immediate (char ** str)
4061 {
4062 LITTLENUM_TYPE words[MAX_LITTLENUMS];
4063 char * save_in;
4064 expressionS exp;
4065 int i;
4066 int j;
4067
4068 /* First try and match exact strings, this is to guarantee
4069 that some formats will work even for cross assembly. */
4070
4071 for (i = 0; fp_const[i]; i++)
4072 {
4073 if (strncmp (*str, fp_const[i], strlen (fp_const[i])) == 0)
4074 {
4075 char *start = *str;
4076
4077 *str += strlen (fp_const[i]);
4078 if (is_end_of_line[(unsigned char) **str])
4079 return i + 8;
4080 *str = start;
4081 }
4082 }
4083
4084 /* Just because we didn't get a match doesn't mean that the constant
4085 isn't valid, just that it is in a format that we don't
4086 automatically recognize. Try parsing it with the standard
4087 expression routines. */
4088
4089 memset (words, 0, MAX_LITTLENUMS * sizeof (LITTLENUM_TYPE));
4090
4091 /* Look for a raw floating point number. */
4092 if ((save_in = atof_ieee (*str, 'x', words)) != NULL
4093 && is_end_of_line[(unsigned char) *save_in])
4094 {
4095 for (i = 0; i < NUM_FLOAT_VALS; i++)
4096 {
4097 for (j = 0; j < MAX_LITTLENUMS; j++)
4098 {
4099 if (words[j] != fp_values[i][j])
4100 break;
4101 }
4102
4103 if (j == MAX_LITTLENUMS)
4104 {
4105 *str = save_in;
4106 return i + 8;
4107 }
4108 }
4109 }
4110
4111 /* Try and parse a more complex expression, this will probably fail
4112 unless the code uses a floating point prefix (eg "0f"). */
4113 save_in = input_line_pointer;
4114 input_line_pointer = *str;
4115 if (expression (&exp) == absolute_section
4116 && exp.X_op == O_big
4117 && exp.X_add_number < 0)
4118 {
4119 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
4120 Ditto for 15. */
4121 if (gen_to_words (words, 5, (long) 15) == 0)
4122 {
4123 for (i = 0; i < NUM_FLOAT_VALS; i++)
4124 {
4125 for (j = 0; j < MAX_LITTLENUMS; j++)
4126 {
4127 if (words[j] != fp_values[i][j])
4128 break;
4129 }
4130
4131 if (j == MAX_LITTLENUMS)
4132 {
4133 *str = input_line_pointer;
4134 input_line_pointer = save_in;
4135 return i + 8;
4136 }
4137 }
4138 }
4139 }
4140
4141 *str = input_line_pointer;
4142 input_line_pointer = save_in;
4143 inst.error = _("invalid FPA immediate expression");
4144 return FAIL;
4145 }
4146
4147 /* Returns 1 if a number has "quarter-precision" float format
4148 0baBbbbbbc defgh000 00000000 00000000. */
4149
4150 static int
4151 is_quarter_float (unsigned imm)
4152 {
4153 int bs = (imm & 0x20000000) ? 0x3e000000 : 0x40000000;
4154 return (imm & 0x7ffff) == 0 && ((imm & 0x7e000000) ^ bs) == 0;
4155 }
4156
4157 /* Parse an 8-bit "quarter-precision" floating point number of the form:
4158 0baBbbbbbc defgh000 00000000 00000000.
4159 The minus-zero case needs special handling, since it can't be encoded in the
4160 "quarter-precision" float format, but can nonetheless be loaded as an integer
4161 constant. */
4162
4163 static unsigned
4164 parse_qfloat_immediate (char **ccp, int *immed)
4165 {
4166 char *str = *ccp;
4167 LITTLENUM_TYPE words[MAX_LITTLENUMS];
4168
4169 skip_past_char (&str, '#');
4170
4171 if ((str = atof_ieee (str, 's', words)) != NULL)
4172 {
4173 unsigned fpword = 0;
4174 int i;
4175
4176 /* Our FP word must be 32 bits (single-precision FP). */
4177 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
4178 {
4179 fpword <<= LITTLENUM_NUMBER_OF_BITS;
4180 fpword |= words[i];
4181 }
4182
4183 if (is_quarter_float (fpword) || fpword == 0x80000000)
4184 *immed = fpword;
4185 else
4186 return FAIL;
4187
4188 *ccp = str;
4189
4190 return SUCCESS;
4191 }
4192
4193 return FAIL;
4194 }
4195
4196 /* Shift operands. */
4197 enum shift_kind
4198 {
4199 SHIFT_LSL, SHIFT_LSR, SHIFT_ASR, SHIFT_ROR, SHIFT_RRX
4200 };
4201
4202 struct asm_shift_name
4203 {
4204 const char *name;
4205 enum shift_kind kind;
4206 };
4207
4208 /* Third argument to parse_shift. */
4209 enum parse_shift_mode
4210 {
4211 NO_SHIFT_RESTRICT, /* Any kind of shift is accepted. */
4212 SHIFT_IMMEDIATE, /* Shift operand must be an immediate. */
4213 SHIFT_LSL_OR_ASR_IMMEDIATE, /* Shift must be LSL or ASR immediate. */
4214 SHIFT_ASR_IMMEDIATE, /* Shift must be ASR immediate. */
4215 SHIFT_LSL_IMMEDIATE, /* Shift must be LSL immediate. */
4216 };
4217
4218 /* Parse a <shift> specifier on an ARM data processing instruction.
4219 This has three forms:
4220
4221 (LSL|LSR|ASL|ASR|ROR) Rs
4222 (LSL|LSR|ASL|ASR|ROR) #imm
4223 RRX
4224
4225 Note that ASL is assimilated to LSL in the instruction encoding, and
4226 RRX to ROR #0 (which cannot be written as such). */
4227
4228 static int
4229 parse_shift (char **str, int i, enum parse_shift_mode mode)
4230 {
4231 const struct asm_shift_name *shift_name;
4232 enum shift_kind shift;
4233 char *s = *str;
4234 char *p = s;
4235 int reg;
4236
4237 for (p = *str; ISALPHA (*p); p++)
4238 ;
4239
4240 if (p == *str)
4241 {
4242 inst.error = _("shift expression expected");
4243 return FAIL;
4244 }
4245
4246 shift_name = hash_find_n (arm_shift_hsh, *str, p - *str);
4247
4248 if (shift_name == NULL)
4249 {
4250 inst.error = _("shift expression expected");
4251 return FAIL;
4252 }
4253
4254 shift = shift_name->kind;
4255
4256 switch (mode)
4257 {
4258 case NO_SHIFT_RESTRICT:
4259 case SHIFT_IMMEDIATE: break;
4260
4261 case SHIFT_LSL_OR_ASR_IMMEDIATE:
4262 if (shift != SHIFT_LSL && shift != SHIFT_ASR)
4263 {
4264 inst.error = _("'LSL' or 'ASR' required");
4265 return FAIL;
4266 }
4267 break;
4268
4269 case SHIFT_LSL_IMMEDIATE:
4270 if (shift != SHIFT_LSL)
4271 {
4272 inst.error = _("'LSL' required");
4273 return FAIL;
4274 }
4275 break;
4276
4277 case SHIFT_ASR_IMMEDIATE:
4278 if (shift != SHIFT_ASR)
4279 {
4280 inst.error = _("'ASR' required");
4281 return FAIL;
4282 }
4283 break;
4284
4285 default: abort ();
4286 }
4287
4288 if (shift != SHIFT_RRX)
4289 {
4290 /* Whitespace can appear here if the next thing is a bare digit. */
4291 skip_whitespace (p);
4292
4293 if (mode == NO_SHIFT_RESTRICT
4294 && (reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
4295 {
4296 inst.operands[i].imm = reg;
4297 inst.operands[i].immisreg = 1;
4298 }
4299 else if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
4300 return FAIL;
4301 }
4302 inst.operands[i].shift_kind = shift;
4303 inst.operands[i].shifted = 1;
4304 *str = p;
4305 return SUCCESS;
4306 }
4307
4308 /* Parse a <shifter_operand> for an ARM data processing instruction:
4309
4310 #<immediate>
4311 #<immediate>, <rotate>
4312 <Rm>
4313 <Rm>, <shift>
4314
4315 where <shift> is defined by parse_shift above, and <rotate> is a
4316 multiple of 2 between 0 and 30. Validation of immediate operands
4317 is deferred to md_apply_fix. */
4318
4319 static int
4320 parse_shifter_operand (char **str, int i)
4321 {
4322 int value;
4323 expressionS expr;
4324
4325 if ((value = arm_reg_parse (str, REG_TYPE_RN)) != FAIL)
4326 {
4327 inst.operands[i].reg = value;
4328 inst.operands[i].isreg = 1;
4329
4330 /* parse_shift will override this if appropriate */
4331 inst.reloc.exp.X_op = O_constant;
4332 inst.reloc.exp.X_add_number = 0;
4333
4334 if (skip_past_comma (str) == FAIL)
4335 return SUCCESS;
4336
4337 /* Shift operation on register. */
4338 return parse_shift (str, i, NO_SHIFT_RESTRICT);
4339 }
4340
4341 if (my_get_expression (&inst.reloc.exp, str, GE_IMM_PREFIX))
4342 return FAIL;
4343
4344 if (skip_past_comma (str) == SUCCESS)
4345 {
4346 /* #x, y -- ie explicit rotation by Y. */
4347 if (my_get_expression (&expr, str, GE_NO_PREFIX))
4348 return FAIL;
4349
4350 if (expr.X_op != O_constant || inst.reloc.exp.X_op != O_constant)
4351 {
4352 inst.error = _("constant expression expected");
4353 return FAIL;
4354 }
4355
4356 value = expr.X_add_number;
4357 if (value < 0 || value > 30 || value % 2 != 0)
4358 {
4359 inst.error = _("invalid rotation");
4360 return FAIL;
4361 }
4362 if (inst.reloc.exp.X_add_number < 0 || inst.reloc.exp.X_add_number > 255)
4363 {
4364 inst.error = _("invalid constant");
4365 return FAIL;
4366 }
4367
4368 /* Convert to decoded value. md_apply_fix will put it back. */
4369 inst.reloc.exp.X_add_number
4370 = (((inst.reloc.exp.X_add_number << (32 - value))
4371 | (inst.reloc.exp.X_add_number >> value)) & 0xffffffff);
4372 }
4373
4374 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
4375 inst.reloc.pc_rel = 0;
4376 return SUCCESS;
4377 }
4378
4379 /* Group relocation information. Each entry in the table contains the
4380 textual name of the relocation as may appear in assembler source
4381 and must end with a colon.
4382 Along with this textual name are the relocation codes to be used if
4383 the corresponding instruction is an ALU instruction (ADD or SUB only),
4384 an LDR, an LDRS, or an LDC. */
4385
4386 struct group_reloc_table_entry
4387 {
4388 const char *name;
4389 int alu_code;
4390 int ldr_code;
4391 int ldrs_code;
4392 int ldc_code;
4393 };
4394
4395 typedef enum
4396 {
4397 /* Varieties of non-ALU group relocation. */
4398
4399 GROUP_LDR,
4400 GROUP_LDRS,
4401 GROUP_LDC
4402 } group_reloc_type;
4403
4404 static struct group_reloc_table_entry group_reloc_table[] =
4405 { /* Program counter relative: */
4406 { "pc_g0_nc",
4407 BFD_RELOC_ARM_ALU_PC_G0_NC, /* ALU */
4408 0, /* LDR */
4409 0, /* LDRS */
4410 0 }, /* LDC */
4411 { "pc_g0",
4412 BFD_RELOC_ARM_ALU_PC_G0, /* ALU */
4413 BFD_RELOC_ARM_LDR_PC_G0, /* LDR */
4414 BFD_RELOC_ARM_LDRS_PC_G0, /* LDRS */
4415 BFD_RELOC_ARM_LDC_PC_G0 }, /* LDC */
4416 { "pc_g1_nc",
4417 BFD_RELOC_ARM_ALU_PC_G1_NC, /* ALU */
4418 0, /* LDR */
4419 0, /* LDRS */
4420 0 }, /* LDC */
4421 { "pc_g1",
4422 BFD_RELOC_ARM_ALU_PC_G1, /* ALU */
4423 BFD_RELOC_ARM_LDR_PC_G1, /* LDR */
4424 BFD_RELOC_ARM_LDRS_PC_G1, /* LDRS */
4425 BFD_RELOC_ARM_LDC_PC_G1 }, /* LDC */
4426 { "pc_g2",
4427 BFD_RELOC_ARM_ALU_PC_G2, /* ALU */
4428 BFD_RELOC_ARM_LDR_PC_G2, /* LDR */
4429 BFD_RELOC_ARM_LDRS_PC_G2, /* LDRS */
4430 BFD_RELOC_ARM_LDC_PC_G2 }, /* LDC */
4431 /* Section base relative */
4432 { "sb_g0_nc",
4433 BFD_RELOC_ARM_ALU_SB_G0_NC, /* ALU */
4434 0, /* LDR */
4435 0, /* LDRS */
4436 0 }, /* LDC */
4437 { "sb_g0",
4438 BFD_RELOC_ARM_ALU_SB_G0, /* ALU */
4439 BFD_RELOC_ARM_LDR_SB_G0, /* LDR */
4440 BFD_RELOC_ARM_LDRS_SB_G0, /* LDRS */
4441 BFD_RELOC_ARM_LDC_SB_G0 }, /* LDC */
4442 { "sb_g1_nc",
4443 BFD_RELOC_ARM_ALU_SB_G1_NC, /* ALU */
4444 0, /* LDR */
4445 0, /* LDRS */
4446 0 }, /* LDC */
4447 { "sb_g1",
4448 BFD_RELOC_ARM_ALU_SB_G1, /* ALU */
4449 BFD_RELOC_ARM_LDR_SB_G1, /* LDR */
4450 BFD_RELOC_ARM_LDRS_SB_G1, /* LDRS */
4451 BFD_RELOC_ARM_LDC_SB_G1 }, /* LDC */
4452 { "sb_g2",
4453 BFD_RELOC_ARM_ALU_SB_G2, /* ALU */
4454 BFD_RELOC_ARM_LDR_SB_G2, /* LDR */
4455 BFD_RELOC_ARM_LDRS_SB_G2, /* LDRS */
4456 BFD_RELOC_ARM_LDC_SB_G2 } }; /* LDC */
4457
4458 /* Given the address of a pointer pointing to the textual name of a group
4459 relocation as may appear in assembler source, attempt to find its details
4460 in group_reloc_table. The pointer will be updated to the character after
4461 the trailing colon. On failure, FAIL will be returned; SUCCESS
4462 otherwise. On success, *entry will be updated to point at the relevant
4463 group_reloc_table entry. */
4464
4465 static int
4466 find_group_reloc_table_entry (char **str, struct group_reloc_table_entry **out)
4467 {
4468 unsigned int i;
4469 for (i = 0; i < ARRAY_SIZE (group_reloc_table); i++)
4470 {
4471 int length = strlen (group_reloc_table[i].name);
4472
4473 if (strncasecmp (group_reloc_table[i].name, *str, length) == 0 &&
4474 (*str)[length] == ':')
4475 {
4476 *out = &group_reloc_table[i];
4477 *str += (length + 1);
4478 return SUCCESS;
4479 }
4480 }
4481
4482 return FAIL;
4483 }
4484
4485 /* Parse a <shifter_operand> for an ARM data processing instruction
4486 (as for parse_shifter_operand) where group relocations are allowed:
4487
4488 #<immediate>
4489 #<immediate>, <rotate>
4490 #:<group_reloc>:<expression>
4491 <Rm>
4492 <Rm>, <shift>
4493
4494 where <group_reloc> is one of the strings defined in group_reloc_table.
4495 The hashes are optional.
4496
4497 Everything else is as for parse_shifter_operand. */
4498
4499 static parse_operand_result
4500 parse_shifter_operand_group_reloc (char **str, int i)
4501 {
4502 /* Determine if we have the sequence of characters #: or just :
4503 coming next. If we do, then we check for a group relocation.
4504 If we don't, punt the whole lot to parse_shifter_operand. */
4505
4506 if (((*str)[0] == '#' && (*str)[1] == ':')
4507 || (*str)[0] == ':')
4508 {
4509 struct group_reloc_table_entry *entry;
4510
4511 if ((*str)[0] == '#')
4512 (*str) += 2;
4513 else
4514 (*str)++;
4515
4516 /* Try to parse a group relocation. Anything else is an error. */
4517 if (find_group_reloc_table_entry (str, &entry) == FAIL)
4518 {
4519 inst.error = _("unknown group relocation");
4520 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
4521 }
4522
4523 /* We now have the group relocation table entry corresponding to
4524 the name in the assembler source. Next, we parse the expression. */
4525 if (my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX))
4526 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
4527
4528 /* Record the relocation type (always the ALU variant here). */
4529 inst.reloc.type = entry->alu_code;
4530 assert (inst.reloc.type != 0);
4531
4532 return PARSE_OPERAND_SUCCESS;
4533 }
4534 else
4535 return parse_shifter_operand (str, i) == SUCCESS
4536 ? PARSE_OPERAND_SUCCESS : PARSE_OPERAND_FAIL;
4537
4538 /* Never reached. */
4539 }
4540
4541 /* Parse all forms of an ARM address expression. Information is written
4542 to inst.operands[i] and/or inst.reloc.
4543
4544 Preindexed addressing (.preind=1):
4545
4546 [Rn, #offset] .reg=Rn .reloc.exp=offset
4547 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4548 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4549 .shift_kind=shift .reloc.exp=shift_imm
4550
4551 These three may have a trailing ! which causes .writeback to be set also.
4552
4553 Postindexed addressing (.postind=1, .writeback=1):
4554
4555 [Rn], #offset .reg=Rn .reloc.exp=offset
4556 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4557 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4558 .shift_kind=shift .reloc.exp=shift_imm
4559
4560 Unindexed addressing (.preind=0, .postind=0):
4561
4562 [Rn], {option} .reg=Rn .imm=option .immisreg=0
4563
4564 Other:
4565
4566 [Rn]{!} shorthand for [Rn,#0]{!}
4567 =immediate .isreg=0 .reloc.exp=immediate
4568 label .reg=PC .reloc.pc_rel=1 .reloc.exp=label
4569
4570 It is the caller's responsibility to check for addressing modes not
4571 supported by the instruction, and to set inst.reloc.type. */
4572
4573 static parse_operand_result
4574 parse_address_main (char **str, int i, int group_relocations,
4575 group_reloc_type group_type)
4576 {
4577 char *p = *str;
4578 int reg;
4579
4580 if (skip_past_char (&p, '[') == FAIL)
4581 {
4582 if (skip_past_char (&p, '=') == FAIL)
4583 {
4584 /* bare address - translate to PC-relative offset */
4585 inst.reloc.pc_rel = 1;
4586 inst.operands[i].reg = REG_PC;
4587 inst.operands[i].isreg = 1;
4588 inst.operands[i].preind = 1;
4589 }
4590 /* else a load-constant pseudo op, no special treatment needed here */
4591
4592 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
4593 return PARSE_OPERAND_FAIL;
4594
4595 *str = p;
4596 return PARSE_OPERAND_SUCCESS;
4597 }
4598
4599 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
4600 {
4601 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
4602 return PARSE_OPERAND_FAIL;
4603 }
4604 inst.operands[i].reg = reg;
4605 inst.operands[i].isreg = 1;
4606
4607 if (skip_past_comma (&p) == SUCCESS)
4608 {
4609 inst.operands[i].preind = 1;
4610
4611 if (*p == '+') p++;
4612 else if (*p == '-') p++, inst.operands[i].negative = 1;
4613
4614 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
4615 {
4616 inst.operands[i].imm = reg;
4617 inst.operands[i].immisreg = 1;
4618
4619 if (skip_past_comma (&p) == SUCCESS)
4620 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
4621 return PARSE_OPERAND_FAIL;
4622 }
4623 else if (skip_past_char (&p, ':') == SUCCESS)
4624 {
4625 /* FIXME: '@' should be used here, but it's filtered out by generic
4626 code before we get to see it here. This may be subject to
4627 change. */
4628 expressionS exp;
4629 my_get_expression (&exp, &p, GE_NO_PREFIX);
4630 if (exp.X_op != O_constant)
4631 {
4632 inst.error = _("alignment must be constant");
4633 return PARSE_OPERAND_FAIL;
4634 }
4635 inst.operands[i].imm = exp.X_add_number << 8;
4636 inst.operands[i].immisalign = 1;
4637 /* Alignments are not pre-indexes. */
4638 inst.operands[i].preind = 0;
4639 }
4640 else
4641 {
4642 if (inst.operands[i].negative)
4643 {
4644 inst.operands[i].negative = 0;
4645 p--;
4646 }
4647
4648 if (group_relocations &&
4649 ((*p == '#' && *(p + 1) == ':') || *p == ':'))
4650
4651 {
4652 struct group_reloc_table_entry *entry;
4653
4654 /* Skip over the #: or : sequence. */
4655 if (*p == '#')
4656 p += 2;
4657 else
4658 p++;
4659
4660 /* Try to parse a group relocation. Anything else is an
4661 error. */
4662 if (find_group_reloc_table_entry (&p, &entry) == FAIL)
4663 {
4664 inst.error = _("unknown group relocation");
4665 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
4666 }
4667
4668 /* We now have the group relocation table entry corresponding to
4669 the name in the assembler source. Next, we parse the
4670 expression. */
4671 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
4672 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
4673
4674 /* Record the relocation type. */
4675 switch (group_type)
4676 {
4677 case GROUP_LDR:
4678 inst.reloc.type = entry->ldr_code;
4679 break;
4680
4681 case GROUP_LDRS:
4682 inst.reloc.type = entry->ldrs_code;
4683 break;
4684
4685 case GROUP_LDC:
4686 inst.reloc.type = entry->ldc_code;
4687 break;
4688
4689 default:
4690 assert (0);
4691 }
4692
4693 if (inst.reloc.type == 0)
4694 {
4695 inst.error = _("this group relocation is not allowed on this instruction");
4696 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
4697 }
4698 }
4699 else
4700 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
4701 return PARSE_OPERAND_FAIL;
4702 }
4703 }
4704
4705 if (skip_past_char (&p, ']') == FAIL)
4706 {
4707 inst.error = _("']' expected");
4708 return PARSE_OPERAND_FAIL;
4709 }
4710
4711 if (skip_past_char (&p, '!') == SUCCESS)
4712 inst.operands[i].writeback = 1;
4713
4714 else if (skip_past_comma (&p) == SUCCESS)
4715 {
4716 if (skip_past_char (&p, '{') == SUCCESS)
4717 {
4718 /* [Rn], {expr} - unindexed, with option */
4719 if (parse_immediate (&p, &inst.operands[i].imm,
4720 0, 255, TRUE) == FAIL)
4721 return PARSE_OPERAND_FAIL;
4722
4723 if (skip_past_char (&p, '}') == FAIL)
4724 {
4725 inst.error = _("'}' expected at end of 'option' field");
4726 return PARSE_OPERAND_FAIL;
4727 }
4728 if (inst.operands[i].preind)
4729 {
4730 inst.error = _("cannot combine index with option");
4731 return PARSE_OPERAND_FAIL;
4732 }
4733 *str = p;
4734 return PARSE_OPERAND_SUCCESS;
4735 }
4736 else
4737 {
4738 inst.operands[i].postind = 1;
4739 inst.operands[i].writeback = 1;
4740
4741 if (inst.operands[i].preind)
4742 {
4743 inst.error = _("cannot combine pre- and post-indexing");
4744 return PARSE_OPERAND_FAIL;
4745 }
4746
4747 if (*p == '+') p++;
4748 else if (*p == '-') p++, inst.operands[i].negative = 1;
4749
4750 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
4751 {
4752 /* We might be using the immediate for alignment already. If we
4753 are, OR the register number into the low-order bits. */
4754 if (inst.operands[i].immisalign)
4755 inst.operands[i].imm |= reg;
4756 else
4757 inst.operands[i].imm = reg;
4758 inst.operands[i].immisreg = 1;
4759
4760 if (skip_past_comma (&p) == SUCCESS)
4761 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
4762 return PARSE_OPERAND_FAIL;
4763 }
4764 else
4765 {
4766 if (inst.operands[i].negative)
4767 {
4768 inst.operands[i].negative = 0;
4769 p--;
4770 }
4771 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
4772 return PARSE_OPERAND_FAIL;
4773 }
4774 }
4775 }
4776
4777 /* If at this point neither .preind nor .postind is set, we have a
4778 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
4779 if (inst.operands[i].preind == 0 && inst.operands[i].postind == 0)
4780 {
4781 inst.operands[i].preind = 1;
4782 inst.reloc.exp.X_op = O_constant;
4783 inst.reloc.exp.X_add_number = 0;
4784 }
4785 *str = p;
4786 return PARSE_OPERAND_SUCCESS;
4787 }
4788
4789 static int
4790 parse_address (char **str, int i)
4791 {
4792 return parse_address_main (str, i, 0, 0) == PARSE_OPERAND_SUCCESS
4793 ? SUCCESS : FAIL;
4794 }
4795
4796 static parse_operand_result
4797 parse_address_group_reloc (char **str, int i, group_reloc_type type)
4798 {
4799 return parse_address_main (str, i, 1, type);
4800 }
4801
4802 /* Parse an operand for a MOVW or MOVT instruction. */
4803 static int
4804 parse_half (char **str)
4805 {
4806 char * p;
4807
4808 p = *str;
4809 skip_past_char (&p, '#');
4810 if (strncasecmp (p, ":lower16:", 9) == 0)
4811 inst.reloc.type = BFD_RELOC_ARM_MOVW;
4812 else if (strncasecmp (p, ":upper16:", 9) == 0)
4813 inst.reloc.type = BFD_RELOC_ARM_MOVT;
4814
4815 if (inst.reloc.type != BFD_RELOC_UNUSED)
4816 {
4817 p += 9;
4818 skip_whitespace(p);
4819 }
4820
4821 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
4822 return FAIL;
4823
4824 if (inst.reloc.type == BFD_RELOC_UNUSED)
4825 {
4826 if (inst.reloc.exp.X_op != O_constant)
4827 {
4828 inst.error = _("constant expression expected");
4829 return FAIL;
4830 }
4831 if (inst.reloc.exp.X_add_number < 0
4832 || inst.reloc.exp.X_add_number > 0xffff)
4833 {
4834 inst.error = _("immediate value out of range");
4835 return FAIL;
4836 }
4837 }
4838 *str = p;
4839 return SUCCESS;
4840 }
4841
4842 /* Miscellaneous. */
4843
4844 /* Parse a PSR flag operand. The value returned is FAIL on syntax error,
4845 or a bitmask suitable to be or-ed into the ARM msr instruction. */
4846 static int
4847 parse_psr (char **str)
4848 {
4849 char *p;
4850 unsigned long psr_field;
4851 const struct asm_psr *psr;
4852 char *start;
4853
4854 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
4855 feature for ease of use and backwards compatibility. */
4856 p = *str;
4857 if (strncasecmp (p, "SPSR", 4) == 0)
4858 psr_field = SPSR_BIT;
4859 else if (strncasecmp (p, "CPSR", 4) == 0)
4860 psr_field = 0;
4861 else
4862 {
4863 start = p;
4864 do
4865 p++;
4866 while (ISALNUM (*p) || *p == '_');
4867
4868 psr = hash_find_n (arm_v7m_psr_hsh, start, p - start);
4869 if (!psr)
4870 return FAIL;
4871
4872 *str = p;
4873 return psr->field;
4874 }
4875
4876 p += 4;
4877 if (*p == '_')
4878 {
4879 /* A suffix follows. */
4880 p++;
4881 start = p;
4882
4883 do
4884 p++;
4885 while (ISALNUM (*p) || *p == '_');
4886
4887 psr = hash_find_n (arm_psr_hsh, start, p - start);
4888 if (!psr)
4889 goto error;
4890
4891 psr_field |= psr->field;
4892 }
4893 else
4894 {
4895 if (ISALNUM (*p))
4896 goto error; /* Garbage after "[CS]PSR". */
4897
4898 psr_field |= (PSR_c | PSR_f);
4899 }
4900 *str = p;
4901 return psr_field;
4902
4903 error:
4904 inst.error = _("flag for {c}psr instruction expected");
4905 return FAIL;
4906 }
4907
4908 /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
4909 value suitable for splatting into the AIF field of the instruction. */
4910
4911 static int
4912 parse_cps_flags (char **str)
4913 {
4914 int val = 0;
4915 int saw_a_flag = 0;
4916 char *s = *str;
4917
4918 for (;;)
4919 switch (*s++)
4920 {
4921 case '\0': case ',':
4922 goto done;
4923
4924 case 'a': case 'A': saw_a_flag = 1; val |= 0x4; break;
4925 case 'i': case 'I': saw_a_flag = 1; val |= 0x2; break;
4926 case 'f': case 'F': saw_a_flag = 1; val |= 0x1; break;
4927
4928 default:
4929 inst.error = _("unrecognized CPS flag");
4930 return FAIL;
4931 }
4932
4933 done:
4934 if (saw_a_flag == 0)
4935 {
4936 inst.error = _("missing CPS flags");
4937 return FAIL;
4938 }
4939
4940 *str = s - 1;
4941 return val;
4942 }
4943
4944 /* Parse an endian specifier ("BE" or "LE", case insensitive);
4945 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
4946
4947 static int
4948 parse_endian_specifier (char **str)
4949 {
4950 int little_endian;
4951 char *s = *str;
4952
4953 if (strncasecmp (s, "BE", 2))
4954 little_endian = 0;
4955 else if (strncasecmp (s, "LE", 2))
4956 little_endian = 1;
4957 else
4958 {
4959 inst.error = _("valid endian specifiers are be or le");
4960 return FAIL;
4961 }
4962
4963 if (ISALNUM (s[2]) || s[2] == '_')
4964 {
4965 inst.error = _("valid endian specifiers are be or le");
4966 return FAIL;
4967 }
4968
4969 *str = s + 2;
4970 return little_endian;
4971 }
4972
4973 /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
4974 value suitable for poking into the rotate field of an sxt or sxta
4975 instruction, or FAIL on error. */
4976
4977 static int
4978 parse_ror (char **str)
4979 {
4980 int rot;
4981 char *s = *str;
4982
4983 if (strncasecmp (s, "ROR", 3) == 0)
4984 s += 3;
4985 else
4986 {
4987 inst.error = _("missing rotation field after comma");
4988 return FAIL;
4989 }
4990
4991 if (parse_immediate (&s, &rot, 0, 24, FALSE) == FAIL)
4992 return FAIL;
4993
4994 switch (rot)
4995 {
4996 case 0: *str = s; return 0x0;
4997 case 8: *str = s; return 0x1;
4998 case 16: *str = s; return 0x2;
4999 case 24: *str = s; return 0x3;
5000
5001 default:
5002 inst.error = _("rotation can only be 0, 8, 16, or 24");
5003 return FAIL;
5004 }
5005 }
5006
5007 /* Parse a conditional code (from conds[] below). The value returned is in the
5008 range 0 .. 14, or FAIL. */
5009 static int
5010 parse_cond (char **str)
5011 {
5012 char *p, *q;
5013 const struct asm_cond *c;
5014
5015 p = q = *str;
5016 while (ISALPHA (*q))
5017 q++;
5018
5019 c = hash_find_n (arm_cond_hsh, p, q - p);
5020 if (!c)
5021 {
5022 inst.error = _("condition required");
5023 return FAIL;
5024 }
5025
5026 *str = q;
5027 return c->value;
5028 }
5029
5030 /* Parse an option for a barrier instruction. Returns the encoding for the
5031 option, or FAIL. */
5032 static int
5033 parse_barrier (char **str)
5034 {
5035 char *p, *q;
5036 const struct asm_barrier_opt *o;
5037
5038 p = q = *str;
5039 while (ISALPHA (*q))
5040 q++;
5041
5042 o = hash_find_n (arm_barrier_opt_hsh, p, q - p);
5043 if (!o)
5044 return FAIL;
5045
5046 *str = q;
5047 return o->value;
5048 }
5049
5050 /* Parse the operands of a table branch instruction. Similar to a memory
5051 operand. */
5052 static int
5053 parse_tb (char **str)
5054 {
5055 char * p = *str;
5056 int reg;
5057
5058 if (skip_past_char (&p, '[') == FAIL)
5059 {
5060 inst.error = _("'[' expected");
5061 return FAIL;
5062 }
5063
5064 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5065 {
5066 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
5067 return FAIL;
5068 }
5069 inst.operands[0].reg = reg;
5070
5071 if (skip_past_comma (&p) == FAIL)
5072 {
5073 inst.error = _("',' expected");
5074 return FAIL;
5075 }
5076
5077 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5078 {
5079 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
5080 return FAIL;
5081 }
5082 inst.operands[0].imm = reg;
5083
5084 if (skip_past_comma (&p) == SUCCESS)
5085 {
5086 if (parse_shift (&p, 0, SHIFT_LSL_IMMEDIATE) == FAIL)
5087 return FAIL;
5088 if (inst.reloc.exp.X_add_number != 1)
5089 {
5090 inst.error = _("invalid shift");
5091 return FAIL;
5092 }
5093 inst.operands[0].shifted = 1;
5094 }
5095
5096 if (skip_past_char (&p, ']') == FAIL)
5097 {
5098 inst.error = _("']' expected");
5099 return FAIL;
5100 }
5101 *str = p;
5102 return SUCCESS;
5103 }
5104
5105 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
5106 information on the types the operands can take and how they are encoded.
5107 Up to four operands may be read; this function handles setting the
5108 ".present" field for each read operand itself.
5109 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
5110 else returns FAIL. */
5111
5112 static int
5113 parse_neon_mov (char **str, int *which_operand)
5114 {
5115 int i = *which_operand, val;
5116 enum arm_reg_type rtype;
5117 char *ptr = *str;
5118 struct neon_type_el optype;
5119
5120 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
5121 {
5122 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
5123 inst.operands[i].reg = val;
5124 inst.operands[i].isscalar = 1;
5125 inst.operands[i].vectype = optype;
5126 inst.operands[i++].present = 1;
5127
5128 if (skip_past_comma (&ptr) == FAIL)
5129 goto wanted_comma;
5130
5131 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
5132 goto wanted_arm;
5133
5134 inst.operands[i].reg = val;
5135 inst.operands[i].isreg = 1;
5136 inst.operands[i].present = 1;
5137 }
5138 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype, &optype))
5139 != FAIL)
5140 {
5141 /* Cases 0, 1, 2, 3, 5 (D only). */
5142 if (skip_past_comma (&ptr) == FAIL)
5143 goto wanted_comma;
5144
5145 inst.operands[i].reg = val;
5146 inst.operands[i].isreg = 1;
5147 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
5148 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
5149 inst.operands[i].isvec = 1;
5150 inst.operands[i].vectype = optype;
5151 inst.operands[i++].present = 1;
5152
5153 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
5154 {
5155 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
5156 Case 13: VMOV <Sd>, <Rm> */
5157 inst.operands[i].reg = val;
5158 inst.operands[i].isreg = 1;
5159 inst.operands[i].present = 1;
5160
5161 if (rtype == REG_TYPE_NQ)
5162 {
5163 first_error (_("can't use Neon quad register here"));
5164 return FAIL;
5165 }
5166 else if (rtype != REG_TYPE_VFS)
5167 {
5168 i++;
5169 if (skip_past_comma (&ptr) == FAIL)
5170 goto wanted_comma;
5171 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
5172 goto wanted_arm;
5173 inst.operands[i].reg = val;
5174 inst.operands[i].isreg = 1;
5175 inst.operands[i].present = 1;
5176 }
5177 }
5178 else if (parse_qfloat_immediate (&ptr, &inst.operands[i].imm) == SUCCESS)
5179 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
5180 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
5181 Case 10: VMOV.F32 <Sd>, #<imm>
5182 Case 11: VMOV.F64 <Dd>, #<imm> */
5183 ;
5184 else if (parse_big_immediate (&ptr, i) == SUCCESS)
5185 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
5186 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
5187 ;
5188 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype,
5189 &optype)) != FAIL)
5190 {
5191 /* Case 0: VMOV<c><q> <Qd>, <Qm>
5192 Case 1: VMOV<c><q> <Dd>, <Dm>
5193 Case 8: VMOV.F32 <Sd>, <Sm>
5194 Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm> */
5195
5196 inst.operands[i].reg = val;
5197 inst.operands[i].isreg = 1;
5198 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
5199 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
5200 inst.operands[i].isvec = 1;
5201 inst.operands[i].vectype = optype;
5202 inst.operands[i].present = 1;
5203
5204 if (skip_past_comma (&ptr) == SUCCESS)
5205 {
5206 /* Case 15. */
5207 i++;
5208
5209 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
5210 goto wanted_arm;
5211
5212 inst.operands[i].reg = val;
5213 inst.operands[i].isreg = 1;
5214 inst.operands[i++].present = 1;
5215
5216 if (skip_past_comma (&ptr) == FAIL)
5217 goto wanted_comma;
5218
5219 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
5220 goto wanted_arm;
5221
5222 inst.operands[i].reg = val;
5223 inst.operands[i].isreg = 1;
5224 inst.operands[i++].present = 1;
5225 }
5226 }
5227 else
5228 {
5229 first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
5230 return FAIL;
5231 }
5232 }
5233 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
5234 {
5235 /* Cases 6, 7. */
5236 inst.operands[i].reg = val;
5237 inst.operands[i].isreg = 1;
5238 inst.operands[i++].present = 1;
5239
5240 if (skip_past_comma (&ptr) == FAIL)
5241 goto wanted_comma;
5242
5243 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
5244 {
5245 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
5246 inst.operands[i].reg = val;
5247 inst.operands[i].isscalar = 1;
5248 inst.operands[i].present = 1;
5249 inst.operands[i].vectype = optype;
5250 }
5251 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
5252 {
5253 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
5254 inst.operands[i].reg = val;
5255 inst.operands[i].isreg = 1;
5256 inst.operands[i++].present = 1;
5257
5258 if (skip_past_comma (&ptr) == FAIL)
5259 goto wanted_comma;
5260
5261 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFSD, &rtype, &optype))
5262 == FAIL)
5263 {
5264 first_error (_(reg_expected_msgs[REG_TYPE_VFSD]));
5265 return FAIL;
5266 }
5267
5268 inst.operands[i].reg = val;
5269 inst.operands[i].isreg = 1;
5270 inst.operands[i].isvec = 1;
5271 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
5272 inst.operands[i].vectype = optype;
5273 inst.operands[i].present = 1;
5274
5275 if (rtype == REG_TYPE_VFS)
5276 {
5277 /* Case 14. */
5278 i++;
5279 if (skip_past_comma (&ptr) == FAIL)
5280 goto wanted_comma;
5281 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL,
5282 &optype)) == FAIL)
5283 {
5284 first_error (_(reg_expected_msgs[REG_TYPE_VFS]));
5285 return FAIL;
5286 }
5287 inst.operands[i].reg = val;
5288 inst.operands[i].isreg = 1;
5289 inst.operands[i].isvec = 1;
5290 inst.operands[i].issingle = 1;
5291 inst.operands[i].vectype = optype;
5292 inst.operands[i].present = 1;
5293 }
5294 }
5295 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL, &optype))
5296 != FAIL)
5297 {
5298 /* Case 13. */
5299 inst.operands[i].reg = val;
5300 inst.operands[i].isreg = 1;
5301 inst.operands[i].isvec = 1;
5302 inst.operands[i].issingle = 1;
5303 inst.operands[i].vectype = optype;
5304 inst.operands[i++].present = 1;
5305 }
5306 }
5307 else
5308 {
5309 first_error (_("parse error"));
5310 return FAIL;
5311 }
5312
5313 /* Successfully parsed the operands. Update args. */
5314 *which_operand = i;
5315 *str = ptr;
5316 return SUCCESS;
5317
5318 wanted_comma:
5319 first_error (_("expected comma"));
5320 return FAIL;
5321
5322 wanted_arm:
5323 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
5324 return FAIL;
5325 }
5326
5327 /* Matcher codes for parse_operands. */
5328 enum operand_parse_code
5329 {
5330 OP_stop, /* end of line */
5331
5332 OP_RR, /* ARM register */
5333 OP_RRnpc, /* ARM register, not r15 */
5334 OP_RRnpcb, /* ARM register, not r15, in square brackets */
5335 OP_RRw, /* ARM register, not r15, optional trailing ! */
5336 OP_RCP, /* Coprocessor number */
5337 OP_RCN, /* Coprocessor register */
5338 OP_RF, /* FPA register */
5339 OP_RVS, /* VFP single precision register */
5340 OP_RVD, /* VFP double precision register (0..15) */
5341 OP_RND, /* Neon double precision register (0..31) */
5342 OP_RNQ, /* Neon quad precision register */
5343 OP_RVSD, /* VFP single or double precision register */
5344 OP_RNDQ, /* Neon double or quad precision register */
5345 OP_RNSDQ, /* Neon single, double or quad precision register */
5346 OP_RNSC, /* Neon scalar D[X] */
5347 OP_RVC, /* VFP control register */
5348 OP_RMF, /* Maverick F register */
5349 OP_RMD, /* Maverick D register */
5350 OP_RMFX, /* Maverick FX register */
5351 OP_RMDX, /* Maverick DX register */
5352 OP_RMAX, /* Maverick AX register */
5353 OP_RMDS, /* Maverick DSPSC register */
5354 OP_RIWR, /* iWMMXt wR register */
5355 OP_RIWC, /* iWMMXt wC register */
5356 OP_RIWG, /* iWMMXt wCG register */
5357 OP_RXA, /* XScale accumulator register */
5358
5359 OP_REGLST, /* ARM register list */
5360 OP_VRSLST, /* VFP single-precision register list */
5361 OP_VRDLST, /* VFP double-precision register list */
5362 OP_VRSDLST, /* VFP single or double-precision register list (& quad) */
5363 OP_NRDLST, /* Neon double-precision register list (d0-d31, qN aliases) */
5364 OP_NSTRLST, /* Neon element/structure list */
5365
5366 OP_NILO, /* Neon immediate/logic operands 2 or 2+3. (VBIC, VORR...) */
5367 OP_RNDQ_I0, /* Neon D or Q reg, or immediate zero. */
5368 OP_RVSD_I0, /* VFP S or D reg, or immediate zero. */
5369 OP_RR_RNSC, /* ARM reg or Neon scalar. */
5370 OP_RNSDQ_RNSC, /* Vector S, D or Q reg, or Neon scalar. */
5371 OP_RNDQ_RNSC, /* Neon D or Q reg, or Neon scalar. */
5372 OP_RND_RNSC, /* Neon D reg, or Neon scalar. */
5373 OP_VMOV, /* Neon VMOV operands. */
5374 OP_RNDQ_IMVNb,/* Neon D or Q reg, or immediate good for VMVN. */
5375 OP_RNDQ_I63b, /* Neon D or Q reg, or immediate for shift. */
5376 OP_RIWR_I32z, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2. */
5377
5378 OP_I0, /* immediate zero */
5379 OP_I7, /* immediate value 0 .. 7 */
5380 OP_I15, /* 0 .. 15 */
5381 OP_I16, /* 1 .. 16 */
5382 OP_I16z, /* 0 .. 16 */
5383 OP_I31, /* 0 .. 31 */
5384 OP_I31w, /* 0 .. 31, optional trailing ! */
5385 OP_I32, /* 1 .. 32 */
5386 OP_I32z, /* 0 .. 32 */
5387 OP_I63, /* 0 .. 63 */
5388 OP_I63s, /* -64 .. 63 */
5389 OP_I64, /* 1 .. 64 */
5390 OP_I64z, /* 0 .. 64 */
5391 OP_I255, /* 0 .. 255 */
5392
5393 OP_I4b, /* immediate, prefix optional, 1 .. 4 */
5394 OP_I7b, /* 0 .. 7 */
5395 OP_I15b, /* 0 .. 15 */
5396 OP_I31b, /* 0 .. 31 */
5397
5398 OP_SH, /* shifter operand */
5399 OP_SHG, /* shifter operand with possible group relocation */
5400 OP_ADDR, /* Memory address expression (any mode) */
5401 OP_ADDRGLDR, /* Mem addr expr (any mode) with possible LDR group reloc */
5402 OP_ADDRGLDRS, /* Mem addr expr (any mode) with possible LDRS group reloc */
5403 OP_ADDRGLDC, /* Mem addr expr (any mode) with possible LDC group reloc */
5404 OP_EXP, /* arbitrary expression */
5405 OP_EXPi, /* same, with optional immediate prefix */
5406 OP_EXPr, /* same, with optional relocation suffix */
5407 OP_HALF, /* 0 .. 65535 or low/high reloc. */
5408
5409 OP_CPSF, /* CPS flags */
5410 OP_ENDI, /* Endianness specifier */
5411 OP_PSR, /* CPSR/SPSR mask for msr */
5412 OP_COND, /* conditional code */
5413 OP_TB, /* Table branch. */
5414
5415 OP_RVC_PSR, /* CPSR/SPSR mask for msr, or VFP control register. */
5416 OP_APSR_RR, /* ARM register or "APSR_nzcv". */
5417
5418 OP_RRnpc_I0, /* ARM register or literal 0 */
5419 OP_RR_EXr, /* ARM register or expression with opt. reloc suff. */
5420 OP_RR_EXi, /* ARM register or expression with imm prefix */
5421 OP_RF_IF, /* FPA register or immediate */
5422 OP_RIWR_RIWC, /* iWMMXt R or C reg */
5423 OP_RIWC_RIWG, /* iWMMXt wC or wCG reg */
5424
5425 /* Optional operands. */
5426 OP_oI7b, /* immediate, prefix optional, 0 .. 7 */
5427 OP_oI31b, /* 0 .. 31 */
5428 OP_oI32b, /* 1 .. 32 */
5429 OP_oIffffb, /* 0 .. 65535 */
5430 OP_oI255c, /* curly-brace enclosed, 0 .. 255 */
5431
5432 OP_oRR, /* ARM register */
5433 OP_oRRnpc, /* ARM register, not the PC */
5434 OP_oRND, /* Optional Neon double precision register */
5435 OP_oRNQ, /* Optional Neon quad precision register */
5436 OP_oRNDQ, /* Optional Neon double or quad precision register */
5437 OP_oRNSDQ, /* Optional single, double or quad precision vector register */
5438 OP_oSHll, /* LSL immediate */
5439 OP_oSHar, /* ASR immediate */
5440 OP_oSHllar, /* LSL or ASR immediate */
5441 OP_oROR, /* ROR 0/8/16/24 */
5442 OP_oBARRIER, /* Option argument for a barrier instruction. */
5443
5444 OP_FIRST_OPTIONAL = OP_oI7b
5445 };
5446
5447 /* Generic instruction operand parser. This does no encoding and no
5448 semantic validation; it merely squirrels values away in the inst
5449 structure. Returns SUCCESS or FAIL depending on whether the
5450 specified grammar matched. */
5451 static int
5452 parse_operands (char *str, const unsigned char *pattern)
5453 {
5454 unsigned const char *upat = pattern;
5455 char *backtrack_pos = 0;
5456 const char *backtrack_error = 0;
5457 int i, val, backtrack_index = 0;
5458 enum arm_reg_type rtype;
5459 parse_operand_result result;
5460
5461 #define po_char_or_fail(chr) do { \
5462 if (skip_past_char (&str, chr) == FAIL) \
5463 goto bad_args; \
5464 } while (0)
5465
5466 #define po_reg_or_fail(regtype) do { \
5467 val = arm_typed_reg_parse (&str, regtype, &rtype, \
5468 &inst.operands[i].vectype); \
5469 if (val == FAIL) \
5470 { \
5471 first_error (_(reg_expected_msgs[regtype])); \
5472 goto failure; \
5473 } \
5474 inst.operands[i].reg = val; \
5475 inst.operands[i].isreg = 1; \
5476 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
5477 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
5478 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
5479 || rtype == REG_TYPE_VFD \
5480 || rtype == REG_TYPE_NQ); \
5481 } while (0)
5482
5483 #define po_reg_or_goto(regtype, label) do { \
5484 val = arm_typed_reg_parse (&str, regtype, &rtype, \
5485 &inst.operands[i].vectype); \
5486 if (val == FAIL) \
5487 goto label; \
5488 \
5489 inst.operands[i].reg = val; \
5490 inst.operands[i].isreg = 1; \
5491 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
5492 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
5493 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
5494 || rtype == REG_TYPE_VFD \
5495 || rtype == REG_TYPE_NQ); \
5496 } while (0)
5497
5498 #define po_imm_or_fail(min, max, popt) do { \
5499 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
5500 goto failure; \
5501 inst.operands[i].imm = val; \
5502 } while (0)
5503
5504 #define po_scalar_or_goto(elsz, label) do { \
5505 val = parse_scalar (&str, elsz, &inst.operands[i].vectype); \
5506 if (val == FAIL) \
5507 goto label; \
5508 inst.operands[i].reg = val; \
5509 inst.operands[i].isscalar = 1; \
5510 } while (0)
5511
5512 #define po_misc_or_fail(expr) do { \
5513 if (expr) \
5514 goto failure; \
5515 } while (0)
5516
5517 #define po_misc_or_fail_no_backtrack(expr) do { \
5518 result = expr; \
5519 if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK)\
5520 backtrack_pos = 0; \
5521 if (result != PARSE_OPERAND_SUCCESS) \
5522 goto failure; \
5523 } while (0)
5524
5525 skip_whitespace (str);
5526
5527 for (i = 0; upat[i] != OP_stop; i++)
5528 {
5529 if (upat[i] >= OP_FIRST_OPTIONAL)
5530 {
5531 /* Remember where we are in case we need to backtrack. */
5532 assert (!backtrack_pos);
5533 backtrack_pos = str;
5534 backtrack_error = inst.error;
5535 backtrack_index = i;
5536 }
5537
5538 if (i > 0)
5539 po_char_or_fail (',');
5540
5541 switch (upat[i])
5542 {
5543 /* Registers */
5544 case OP_oRRnpc:
5545 case OP_RRnpc:
5546 case OP_oRR:
5547 case OP_RR: po_reg_or_fail (REG_TYPE_RN); break;
5548 case OP_RCP: po_reg_or_fail (REG_TYPE_CP); break;
5549 case OP_RCN: po_reg_or_fail (REG_TYPE_CN); break;
5550 case OP_RF: po_reg_or_fail (REG_TYPE_FN); break;
5551 case OP_RVS: po_reg_or_fail (REG_TYPE_VFS); break;
5552 case OP_RVD: po_reg_or_fail (REG_TYPE_VFD); break;
5553 case OP_oRND:
5554 case OP_RND: po_reg_or_fail (REG_TYPE_VFD); break;
5555 case OP_RVC: po_reg_or_fail (REG_TYPE_VFC); break;
5556 case OP_RMF: po_reg_or_fail (REG_TYPE_MVF); break;
5557 case OP_RMD: po_reg_or_fail (REG_TYPE_MVD); break;
5558 case OP_RMFX: po_reg_or_fail (REG_TYPE_MVFX); break;
5559 case OP_RMDX: po_reg_or_fail (REG_TYPE_MVDX); break;
5560 case OP_RMAX: po_reg_or_fail (REG_TYPE_MVAX); break;
5561 case OP_RMDS: po_reg_or_fail (REG_TYPE_DSPSC); break;
5562 case OP_RIWR: po_reg_or_fail (REG_TYPE_MMXWR); break;
5563 case OP_RIWC: po_reg_or_fail (REG_TYPE_MMXWC); break;
5564 case OP_RIWG: po_reg_or_fail (REG_TYPE_MMXWCG); break;
5565 case OP_RXA: po_reg_or_fail (REG_TYPE_XSCALE); break;
5566 case OP_oRNQ:
5567 case OP_RNQ: po_reg_or_fail (REG_TYPE_NQ); break;
5568 case OP_oRNDQ:
5569 case OP_RNDQ: po_reg_or_fail (REG_TYPE_NDQ); break;
5570 case OP_RVSD: po_reg_or_fail (REG_TYPE_VFSD); break;
5571 case OP_oRNSDQ:
5572 case OP_RNSDQ: po_reg_or_fail (REG_TYPE_NSDQ); break;
5573
5574 /* Neon scalar. Using an element size of 8 means that some invalid
5575 scalars are accepted here, so deal with those in later code. */
5576 case OP_RNSC: po_scalar_or_goto (8, failure); break;
5577
5578 /* WARNING: We can expand to two operands here. This has the potential
5579 to totally confuse the backtracking mechanism! It will be OK at
5580 least as long as we don't try to use optional args as well,
5581 though. */
5582 case OP_NILO:
5583 {
5584 po_reg_or_goto (REG_TYPE_NDQ, try_imm);
5585 inst.operands[i].present = 1;
5586 i++;
5587 skip_past_comma (&str);
5588 po_reg_or_goto (REG_TYPE_NDQ, one_reg_only);
5589 break;
5590 one_reg_only:
5591 /* Optional register operand was omitted. Unfortunately, it's in
5592 operands[i-1] and we need it to be in inst.operands[i]. Fix that
5593 here (this is a bit grotty). */
5594 inst.operands[i] = inst.operands[i-1];
5595 inst.operands[i-1].present = 0;
5596 break;
5597 try_imm:
5598 /* Immediate gets verified properly later, so accept any now. */
5599 po_imm_or_fail (INT_MIN, INT_MAX, TRUE);
5600 }
5601 break;
5602
5603 case OP_RNDQ_I0:
5604 {
5605 po_reg_or_goto (REG_TYPE_NDQ, try_imm0);
5606 break;
5607 try_imm0:
5608 po_imm_or_fail (0, 0, TRUE);
5609 }
5610 break;
5611
5612 case OP_RVSD_I0:
5613 po_reg_or_goto (REG_TYPE_VFSD, try_imm0);
5614 break;
5615
5616 case OP_RR_RNSC:
5617 {
5618 po_scalar_or_goto (8, try_rr);
5619 break;
5620 try_rr:
5621 po_reg_or_fail (REG_TYPE_RN);
5622 }
5623 break;
5624
5625 case OP_RNSDQ_RNSC:
5626 {
5627 po_scalar_or_goto (8, try_nsdq);
5628 break;
5629 try_nsdq:
5630 po_reg_or_fail (REG_TYPE_NSDQ);
5631 }
5632 break;
5633
5634 case OP_RNDQ_RNSC:
5635 {
5636 po_scalar_or_goto (8, try_ndq);
5637 break;
5638 try_ndq:
5639 po_reg_or_fail (REG_TYPE_NDQ);
5640 }
5641 break;
5642
5643 case OP_RND_RNSC:
5644 {
5645 po_scalar_or_goto (8, try_vfd);
5646 break;
5647 try_vfd:
5648 po_reg_or_fail (REG_TYPE_VFD);
5649 }
5650 break;
5651
5652 case OP_VMOV:
5653 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
5654 not careful then bad things might happen. */
5655 po_misc_or_fail (parse_neon_mov (&str, &i) == FAIL);
5656 break;
5657
5658 case OP_RNDQ_IMVNb:
5659 {
5660 po_reg_or_goto (REG_TYPE_NDQ, try_mvnimm);
5661 break;
5662 try_mvnimm:
5663 /* There's a possibility of getting a 64-bit immediate here, so
5664 we need special handling. */
5665 if (parse_big_immediate (&str, i) == FAIL)
5666 {
5667 inst.error = _("immediate value is out of range");
5668 goto failure;
5669 }
5670 }
5671 break;
5672
5673 case OP_RNDQ_I63b:
5674 {
5675 po_reg_or_goto (REG_TYPE_NDQ, try_shimm);
5676 break;
5677 try_shimm:
5678 po_imm_or_fail (0, 63, TRUE);
5679 }
5680 break;
5681
5682 case OP_RRnpcb:
5683 po_char_or_fail ('[');
5684 po_reg_or_fail (REG_TYPE_RN);
5685 po_char_or_fail (']');
5686 break;
5687
5688 case OP_RRw:
5689 po_reg_or_fail (REG_TYPE_RN);
5690 if (skip_past_char (&str, '!') == SUCCESS)
5691 inst.operands[i].writeback = 1;
5692 break;
5693
5694 /* Immediates */
5695 case OP_I7: po_imm_or_fail ( 0, 7, FALSE); break;
5696 case OP_I15: po_imm_or_fail ( 0, 15, FALSE); break;
5697 case OP_I16: po_imm_or_fail ( 1, 16, FALSE); break;
5698 case OP_I16z: po_imm_or_fail ( 0, 16, FALSE); break;
5699 case OP_I31: po_imm_or_fail ( 0, 31, FALSE); break;
5700 case OP_I32: po_imm_or_fail ( 1, 32, FALSE); break;
5701 case OP_I32z: po_imm_or_fail ( 0, 32, FALSE); break;
5702 case OP_I63s: po_imm_or_fail (-64, 63, FALSE); break;
5703 case OP_I63: po_imm_or_fail ( 0, 63, FALSE); break;
5704 case OP_I64: po_imm_or_fail ( 1, 64, FALSE); break;
5705 case OP_I64z: po_imm_or_fail ( 0, 64, FALSE); break;
5706 case OP_I255: po_imm_or_fail ( 0, 255, FALSE); break;
5707
5708 case OP_I4b: po_imm_or_fail ( 1, 4, TRUE); break;
5709 case OP_oI7b:
5710 case OP_I7b: po_imm_or_fail ( 0, 7, TRUE); break;
5711 case OP_I15b: po_imm_or_fail ( 0, 15, TRUE); break;
5712 case OP_oI31b:
5713 case OP_I31b: po_imm_or_fail ( 0, 31, TRUE); break;
5714 case OP_oI32b: po_imm_or_fail ( 1, 32, TRUE); break;
5715 case OP_oIffffb: po_imm_or_fail ( 0, 0xffff, TRUE); break;
5716
5717 /* Immediate variants */
5718 case OP_oI255c:
5719 po_char_or_fail ('{');
5720 po_imm_or_fail (0, 255, TRUE);
5721 po_char_or_fail ('}');
5722 break;
5723
5724 case OP_I31w:
5725 /* The expression parser chokes on a trailing !, so we have
5726 to find it first and zap it. */
5727 {
5728 char *s = str;
5729 while (*s && *s != ',')
5730 s++;
5731 if (s[-1] == '!')
5732 {
5733 s[-1] = '\0';
5734 inst.operands[i].writeback = 1;
5735 }
5736 po_imm_or_fail (0, 31, TRUE);
5737 if (str == s - 1)
5738 str = s;
5739 }
5740 break;
5741
5742 /* Expressions */
5743 case OP_EXPi: EXPi:
5744 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
5745 GE_OPT_PREFIX));
5746 break;
5747
5748 case OP_EXP:
5749 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
5750 GE_NO_PREFIX));
5751 break;
5752
5753 case OP_EXPr: EXPr:
5754 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
5755 GE_NO_PREFIX));
5756 if (inst.reloc.exp.X_op == O_symbol)
5757 {
5758 val = parse_reloc (&str);
5759 if (val == -1)
5760 {
5761 inst.error = _("unrecognized relocation suffix");
5762 goto failure;
5763 }
5764 else if (val != BFD_RELOC_UNUSED)
5765 {
5766 inst.operands[i].imm = val;
5767 inst.operands[i].hasreloc = 1;
5768 }
5769 }
5770 break;
5771
5772 /* Operand for MOVW or MOVT. */
5773 case OP_HALF:
5774 po_misc_or_fail (parse_half (&str));
5775 break;
5776
5777 /* Register or expression */
5778 case OP_RR_EXr: po_reg_or_goto (REG_TYPE_RN, EXPr); break;
5779 case OP_RR_EXi: po_reg_or_goto (REG_TYPE_RN, EXPi); break;
5780
5781 /* Register or immediate */
5782 case OP_RRnpc_I0: po_reg_or_goto (REG_TYPE_RN, I0); break;
5783 I0: po_imm_or_fail (0, 0, FALSE); break;
5784
5785 case OP_RF_IF: po_reg_or_goto (REG_TYPE_FN, IF); break;
5786 IF:
5787 if (!is_immediate_prefix (*str))
5788 goto bad_args;
5789 str++;
5790 val = parse_fpa_immediate (&str);
5791 if (val == FAIL)
5792 goto failure;
5793 /* FPA immediates are encoded as registers 8-15.
5794 parse_fpa_immediate has already applied the offset. */
5795 inst.operands[i].reg = val;
5796 inst.operands[i].isreg = 1;
5797 break;
5798
5799 case OP_RIWR_I32z: po_reg_or_goto (REG_TYPE_MMXWR, I32z); break;
5800 I32z: po_imm_or_fail (0, 32, FALSE); break;
5801
5802 /* Two kinds of register */
5803 case OP_RIWR_RIWC:
5804 {
5805 struct reg_entry *rege = arm_reg_parse_multi (&str);
5806 if (!rege
5807 || (rege->type != REG_TYPE_MMXWR
5808 && rege->type != REG_TYPE_MMXWC
5809 && rege->type != REG_TYPE_MMXWCG))
5810 {
5811 inst.error = _("iWMMXt data or control register expected");
5812 goto failure;
5813 }
5814 inst.operands[i].reg = rege->number;
5815 inst.operands[i].isreg = (rege->type == REG_TYPE_MMXWR);
5816 }
5817 break;
5818
5819 case OP_RIWC_RIWG:
5820 {
5821 struct reg_entry *rege = arm_reg_parse_multi (&str);
5822 if (!rege
5823 || (rege->type != REG_TYPE_MMXWC
5824 && rege->type != REG_TYPE_MMXWCG))
5825 {
5826 inst.error = _("iWMMXt control register expected");
5827 goto failure;
5828 }
5829 inst.operands[i].reg = rege->number;
5830 inst.operands[i].isreg = 1;
5831 }
5832 break;
5833
5834 /* Misc */
5835 case OP_CPSF: val = parse_cps_flags (&str); break;
5836 case OP_ENDI: val = parse_endian_specifier (&str); break;
5837 case OP_oROR: val = parse_ror (&str); break;
5838 case OP_PSR: val = parse_psr (&str); break;
5839 case OP_COND: val = parse_cond (&str); break;
5840 case OP_oBARRIER:val = parse_barrier (&str); break;
5841
5842 case OP_RVC_PSR:
5843 po_reg_or_goto (REG_TYPE_VFC, try_psr);
5844 inst.operands[i].isvec = 1; /* Mark VFP control reg as vector. */
5845 break;
5846 try_psr:
5847 val = parse_psr (&str);
5848 break;
5849
5850 case OP_APSR_RR:
5851 po_reg_or_goto (REG_TYPE_RN, try_apsr);
5852 break;
5853 try_apsr:
5854 /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
5855 instruction). */
5856 if (strncasecmp (str, "APSR_", 5) == 0)
5857 {
5858 unsigned found = 0;
5859 str += 5;
5860 while (found < 15)
5861 switch (*str++)
5862 {
5863 case 'c': found = (found & 1) ? 16 : found | 1; break;
5864 case 'n': found = (found & 2) ? 16 : found | 2; break;
5865 case 'z': found = (found & 4) ? 16 : found | 4; break;
5866 case 'v': found = (found & 8) ? 16 : found | 8; break;
5867 default: found = 16;
5868 }
5869 if (found != 15)
5870 goto failure;
5871 inst.operands[i].isvec = 1;
5872 }
5873 else
5874 goto failure;
5875 break;
5876
5877 case OP_TB:
5878 po_misc_or_fail (parse_tb (&str));
5879 break;
5880
5881 /* Register lists */
5882 case OP_REGLST:
5883 val = parse_reg_list (&str);
5884 if (*str == '^')
5885 {
5886 inst.operands[1].writeback = 1;
5887 str++;
5888 }
5889 break;
5890
5891 case OP_VRSLST:
5892 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_S);
5893 break;
5894
5895 case OP_VRDLST:
5896 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_D);
5897 break;
5898
5899 case OP_VRSDLST:
5900 /* Allow Q registers too. */
5901 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
5902 REGLIST_NEON_D);
5903 if (val == FAIL)
5904 {
5905 inst.error = NULL;
5906 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
5907 REGLIST_VFP_S);
5908 inst.operands[i].issingle = 1;
5909 }
5910 break;
5911
5912 case OP_NRDLST:
5913 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
5914 REGLIST_NEON_D);
5915 break;
5916
5917 case OP_NSTRLST:
5918 val = parse_neon_el_struct_list (&str, &inst.operands[i].reg,
5919 &inst.operands[i].vectype);
5920 break;
5921
5922 /* Addressing modes */
5923 case OP_ADDR:
5924 po_misc_or_fail (parse_address (&str, i));
5925 break;
5926
5927 case OP_ADDRGLDR:
5928 po_misc_or_fail_no_backtrack (
5929 parse_address_group_reloc (&str, i, GROUP_LDR));
5930 break;
5931
5932 case OP_ADDRGLDRS:
5933 po_misc_or_fail_no_backtrack (
5934 parse_address_group_reloc (&str, i, GROUP_LDRS));
5935 break;
5936
5937 case OP_ADDRGLDC:
5938 po_misc_or_fail_no_backtrack (
5939 parse_address_group_reloc (&str, i, GROUP_LDC));
5940 break;
5941
5942 case OP_SH:
5943 po_misc_or_fail (parse_shifter_operand (&str, i));
5944 break;
5945
5946 case OP_SHG:
5947 po_misc_or_fail_no_backtrack (
5948 parse_shifter_operand_group_reloc (&str, i));
5949 break;
5950
5951 case OP_oSHll:
5952 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_IMMEDIATE));
5953 break;
5954
5955 case OP_oSHar:
5956 po_misc_or_fail (parse_shift (&str, i, SHIFT_ASR_IMMEDIATE));
5957 break;
5958
5959 case OP_oSHllar:
5960 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_OR_ASR_IMMEDIATE));
5961 break;
5962
5963 default:
5964 as_fatal ("unhandled operand code %d", upat[i]);
5965 }
5966
5967 /* Various value-based sanity checks and shared operations. We
5968 do not signal immediate failures for the register constraints;
5969 this allows a syntax error to take precedence. */
5970 switch (upat[i])
5971 {
5972 case OP_oRRnpc:
5973 case OP_RRnpc:
5974 case OP_RRnpcb:
5975 case OP_RRw:
5976 case OP_RRnpc_I0:
5977 if (inst.operands[i].isreg && inst.operands[i].reg == REG_PC)
5978 inst.error = BAD_PC;
5979 break;
5980
5981 case OP_CPSF:
5982 case OP_ENDI:
5983 case OP_oROR:
5984 case OP_PSR:
5985 case OP_RVC_PSR:
5986 case OP_COND:
5987 case OP_oBARRIER:
5988 case OP_REGLST:
5989 case OP_VRSLST:
5990 case OP_VRDLST:
5991 case OP_VRSDLST:
5992 case OP_NRDLST:
5993 case OP_NSTRLST:
5994 if (val == FAIL)
5995 goto failure;
5996 inst.operands[i].imm = val;
5997 break;
5998
5999 default:
6000 break;
6001 }
6002
6003 /* If we get here, this operand was successfully parsed. */
6004 inst.operands[i].present = 1;
6005 continue;
6006
6007 bad_args:
6008 inst.error = BAD_ARGS;
6009
6010 failure:
6011 if (!backtrack_pos)
6012 {
6013 /* The parse routine should already have set inst.error, but set a
6014 defaut here just in case. */
6015 if (!inst.error)
6016 inst.error = _("syntax error");
6017 return FAIL;
6018 }
6019
6020 /* Do not backtrack over a trailing optional argument that
6021 absorbed some text. We will only fail again, with the
6022 'garbage following instruction' error message, which is
6023 probably less helpful than the current one. */
6024 if (backtrack_index == i && backtrack_pos != str
6025 && upat[i+1] == OP_stop)
6026 {
6027 if (!inst.error)
6028 inst.error = _("syntax error");
6029 return FAIL;
6030 }
6031
6032 /* Try again, skipping the optional argument at backtrack_pos. */
6033 str = backtrack_pos;
6034 inst.error = backtrack_error;
6035 inst.operands[backtrack_index].present = 0;
6036 i = backtrack_index;
6037 backtrack_pos = 0;
6038 }
6039
6040 /* Check that we have parsed all the arguments. */
6041 if (*str != '\0' && !inst.error)
6042 inst.error = _("garbage following instruction");
6043
6044 return inst.error ? FAIL : SUCCESS;
6045 }
6046
6047 #undef po_char_or_fail
6048 #undef po_reg_or_fail
6049 #undef po_reg_or_goto
6050 #undef po_imm_or_fail
6051 #undef po_scalar_or_fail
6052 \f
6053 /* Shorthand macro for instruction encoding functions issuing errors. */
6054 #define constraint(expr, err) do { \
6055 if (expr) \
6056 { \
6057 inst.error = err; \
6058 return; \
6059 } \
6060 } while (0)
6061
6062 /* Functions for operand encoding. ARM, then Thumb. */
6063
6064 #define rotate_left(v, n) (v << n | v >> (32 - n))
6065
6066 /* If VAL can be encoded in the immediate field of an ARM instruction,
6067 return the encoded form. Otherwise, return FAIL. */
6068
6069 static unsigned int
6070 encode_arm_immediate (unsigned int val)
6071 {
6072 unsigned int a, i;
6073
6074 for (i = 0; i < 32; i += 2)
6075 if ((a = rotate_left (val, i)) <= 0xff)
6076 return a | (i << 7); /* 12-bit pack: [shift-cnt,const]. */
6077
6078 return FAIL;
6079 }
6080
6081 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
6082 return the encoded form. Otherwise, return FAIL. */
6083 static unsigned int
6084 encode_thumb32_immediate (unsigned int val)
6085 {
6086 unsigned int a, i;
6087
6088 if (val <= 0xff)
6089 return val;
6090
6091 for (i = 1; i <= 24; i++)
6092 {
6093 a = val >> i;
6094 if ((val & ~(0xff << i)) == 0)
6095 return ((val >> i) & 0x7f) | ((32 - i) << 7);
6096 }
6097
6098 a = val & 0xff;
6099 if (val == ((a << 16) | a))
6100 return 0x100 | a;
6101 if (val == ((a << 24) | (a << 16) | (a << 8) | a))
6102 return 0x300 | a;
6103
6104 a = val & 0xff00;
6105 if (val == ((a << 16) | a))
6106 return 0x200 | (a >> 8);
6107
6108 return FAIL;
6109 }
6110 /* Encode a VFP SP or DP register number into inst.instruction. */
6111
6112 static void
6113 encode_arm_vfp_reg (int reg, enum vfp_reg_pos pos)
6114 {
6115 if ((pos == VFP_REG_Dd || pos == VFP_REG_Dn || pos == VFP_REG_Dm)
6116 && reg > 15)
6117 {
6118 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3))
6119 {
6120 if (thumb_mode)
6121 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
6122 fpu_vfp_ext_v3);
6123 else
6124 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
6125 fpu_vfp_ext_v3);
6126 }
6127 else
6128 {
6129 first_error (_("D register out of range for selected VFP version"));
6130 return;
6131 }
6132 }
6133
6134 switch (pos)
6135 {
6136 case VFP_REG_Sd:
6137 inst.instruction |= ((reg >> 1) << 12) | ((reg & 1) << 22);
6138 break;
6139
6140 case VFP_REG_Sn:
6141 inst.instruction |= ((reg >> 1) << 16) | ((reg & 1) << 7);
6142 break;
6143
6144 case VFP_REG_Sm:
6145 inst.instruction |= ((reg >> 1) << 0) | ((reg & 1) << 5);
6146 break;
6147
6148 case VFP_REG_Dd:
6149 inst.instruction |= ((reg & 15) << 12) | ((reg >> 4) << 22);
6150 break;
6151
6152 case VFP_REG_Dn:
6153 inst.instruction |= ((reg & 15) << 16) | ((reg >> 4) << 7);
6154 break;
6155
6156 case VFP_REG_Dm:
6157 inst.instruction |= (reg & 15) | ((reg >> 4) << 5);
6158 break;
6159
6160 default:
6161 abort ();
6162 }
6163 }
6164
6165 /* Encode a <shift> in an ARM-format instruction. The immediate,
6166 if any, is handled by md_apply_fix. */
6167 static void
6168 encode_arm_shift (int i)
6169 {
6170 if (inst.operands[i].shift_kind == SHIFT_RRX)
6171 inst.instruction |= SHIFT_ROR << 5;
6172 else
6173 {
6174 inst.instruction |= inst.operands[i].shift_kind << 5;
6175 if (inst.operands[i].immisreg)
6176 {
6177 inst.instruction |= SHIFT_BY_REG;
6178 inst.instruction |= inst.operands[i].imm << 8;
6179 }
6180 else
6181 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
6182 }
6183 }
6184
6185 static void
6186 encode_arm_shifter_operand (int i)
6187 {
6188 if (inst.operands[i].isreg)
6189 {
6190 inst.instruction |= inst.operands[i].reg;
6191 encode_arm_shift (i);
6192 }
6193 else
6194 inst.instruction |= INST_IMMEDIATE;
6195 }
6196
6197 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
6198 static void
6199 encode_arm_addr_mode_common (int i, bfd_boolean is_t)
6200 {
6201 assert (inst.operands[i].isreg);
6202 inst.instruction |= inst.operands[i].reg << 16;
6203
6204 if (inst.operands[i].preind)
6205 {
6206 if (is_t)
6207 {
6208 inst.error = _("instruction does not accept preindexed addressing");
6209 return;
6210 }
6211 inst.instruction |= PRE_INDEX;
6212 if (inst.operands[i].writeback)
6213 inst.instruction |= WRITE_BACK;
6214
6215 }
6216 else if (inst.operands[i].postind)
6217 {
6218 assert (inst.operands[i].writeback);
6219 if (is_t)
6220 inst.instruction |= WRITE_BACK;
6221 }
6222 else /* unindexed - only for coprocessor */
6223 {
6224 inst.error = _("instruction does not accept unindexed addressing");
6225 return;
6226 }
6227
6228 if (((inst.instruction & WRITE_BACK) || !(inst.instruction & PRE_INDEX))
6229 && (((inst.instruction & 0x000f0000) >> 16)
6230 == ((inst.instruction & 0x0000f000) >> 12)))
6231 as_warn ((inst.instruction & LOAD_BIT)
6232 ? _("destination register same as write-back base")
6233 : _("source register same as write-back base"));
6234 }
6235
6236 /* inst.operands[i] was set up by parse_address. Encode it into an
6237 ARM-format mode 2 load or store instruction. If is_t is true,
6238 reject forms that cannot be used with a T instruction (i.e. not
6239 post-indexed). */
6240 static void
6241 encode_arm_addr_mode_2 (int i, bfd_boolean is_t)
6242 {
6243 encode_arm_addr_mode_common (i, is_t);
6244
6245 if (inst.operands[i].immisreg)
6246 {
6247 inst.instruction |= INST_IMMEDIATE; /* yes, this is backwards */
6248 inst.instruction |= inst.operands[i].imm;
6249 if (!inst.operands[i].negative)
6250 inst.instruction |= INDEX_UP;
6251 if (inst.operands[i].shifted)
6252 {
6253 if (inst.operands[i].shift_kind == SHIFT_RRX)
6254 inst.instruction |= SHIFT_ROR << 5;
6255 else
6256 {
6257 inst.instruction |= inst.operands[i].shift_kind << 5;
6258 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
6259 }
6260 }
6261 }
6262 else /* immediate offset in inst.reloc */
6263 {
6264 if (inst.reloc.type == BFD_RELOC_UNUSED)
6265 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM;
6266 }
6267 }
6268
6269 /* inst.operands[i] was set up by parse_address. Encode it into an
6270 ARM-format mode 3 load or store instruction. Reject forms that
6271 cannot be used with such instructions. If is_t is true, reject
6272 forms that cannot be used with a T instruction (i.e. not
6273 post-indexed). */
6274 static void
6275 encode_arm_addr_mode_3 (int i, bfd_boolean is_t)
6276 {
6277 if (inst.operands[i].immisreg && inst.operands[i].shifted)
6278 {
6279 inst.error = _("instruction does not accept scaled register index");
6280 return;
6281 }
6282
6283 encode_arm_addr_mode_common (i, is_t);
6284
6285 if (inst.operands[i].immisreg)
6286 {
6287 inst.instruction |= inst.operands[i].imm;
6288 if (!inst.operands[i].negative)
6289 inst.instruction |= INDEX_UP;
6290 }
6291 else /* immediate offset in inst.reloc */
6292 {
6293 inst.instruction |= HWOFFSET_IMM;
6294 if (inst.reloc.type == BFD_RELOC_UNUSED)
6295 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM8;
6296 }
6297 }
6298
6299 /* inst.operands[i] was set up by parse_address. Encode it into an
6300 ARM-format instruction. Reject all forms which cannot be encoded
6301 into a coprocessor load/store instruction. If wb_ok is false,
6302 reject use of writeback; if unind_ok is false, reject use of
6303 unindexed addressing. If reloc_override is not 0, use it instead
6304 of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
6305 (in which case it is preserved). */
6306
6307 static int
6308 encode_arm_cp_address (int i, int wb_ok, int unind_ok, int reloc_override)
6309 {
6310 inst.instruction |= inst.operands[i].reg << 16;
6311
6312 assert (!(inst.operands[i].preind && inst.operands[i].postind));
6313
6314 if (!inst.operands[i].preind && !inst.operands[i].postind) /* unindexed */
6315 {
6316 assert (!inst.operands[i].writeback);
6317 if (!unind_ok)
6318 {
6319 inst.error = _("instruction does not support unindexed addressing");
6320 return FAIL;
6321 }
6322 inst.instruction |= inst.operands[i].imm;
6323 inst.instruction |= INDEX_UP;
6324 return SUCCESS;
6325 }
6326
6327 if (inst.operands[i].preind)
6328 inst.instruction |= PRE_INDEX;
6329
6330 if (inst.operands[i].writeback)
6331 {
6332 if (inst.operands[i].reg == REG_PC)
6333 {
6334 inst.error = _("pc may not be used with write-back");
6335 return FAIL;
6336 }
6337 if (!wb_ok)
6338 {
6339 inst.error = _("instruction does not support writeback");
6340 return FAIL;
6341 }
6342 inst.instruction |= WRITE_BACK;
6343 }
6344
6345 if (reloc_override)
6346 inst.reloc.type = reloc_override;
6347 else if ((inst.reloc.type < BFD_RELOC_ARM_ALU_PC_G0_NC
6348 || inst.reloc.type > BFD_RELOC_ARM_LDC_SB_G2)
6349 && inst.reloc.type != BFD_RELOC_ARM_LDR_PC_G0)
6350 {
6351 if (thumb_mode)
6352 inst.reloc.type = BFD_RELOC_ARM_T32_CP_OFF_IMM;
6353 else
6354 inst.reloc.type = BFD_RELOC_ARM_CP_OFF_IMM;
6355 }
6356
6357 return SUCCESS;
6358 }
6359
6360 /* inst.reloc.exp describes an "=expr" load pseudo-operation.
6361 Determine whether it can be performed with a move instruction; if
6362 it can, convert inst.instruction to that move instruction and
6363 return 1; if it can't, convert inst.instruction to a literal-pool
6364 load and return 0. If this is not a valid thing to do in the
6365 current context, set inst.error and return 1.
6366
6367 inst.operands[i] describes the destination register. */
6368
6369 static int
6370 move_or_literal_pool (int i, bfd_boolean thumb_p, bfd_boolean mode_3)
6371 {
6372 unsigned long tbit;
6373
6374 if (thumb_p)
6375 tbit = (inst.instruction > 0xffff) ? THUMB2_LOAD_BIT : THUMB_LOAD_BIT;
6376 else
6377 tbit = LOAD_BIT;
6378
6379 if ((inst.instruction & tbit) == 0)
6380 {
6381 inst.error = _("invalid pseudo operation");
6382 return 1;
6383 }
6384 if (inst.reloc.exp.X_op != O_constant && inst.reloc.exp.X_op != O_symbol)
6385 {
6386 inst.error = _("constant expression expected");
6387 return 1;
6388 }
6389 if (inst.reloc.exp.X_op == O_constant)
6390 {
6391 if (thumb_p)
6392 {
6393 if (!unified_syntax && (inst.reloc.exp.X_add_number & ~0xFF) == 0)
6394 {
6395 /* This can be done with a mov(1) instruction. */
6396 inst.instruction = T_OPCODE_MOV_I8 | (inst.operands[i].reg << 8);
6397 inst.instruction |= inst.reloc.exp.X_add_number;
6398 return 1;
6399 }
6400 }
6401 else
6402 {
6403 int value = encode_arm_immediate (inst.reloc.exp.X_add_number);
6404 if (value != FAIL)
6405 {
6406 /* This can be done with a mov instruction. */
6407 inst.instruction &= LITERAL_MASK;
6408 inst.instruction |= INST_IMMEDIATE | (OPCODE_MOV << DATA_OP_SHIFT);
6409 inst.instruction |= value & 0xfff;
6410 return 1;
6411 }
6412
6413 value = encode_arm_immediate (~inst.reloc.exp.X_add_number);
6414 if (value != FAIL)
6415 {
6416 /* This can be done with a mvn instruction. */
6417 inst.instruction &= LITERAL_MASK;
6418 inst.instruction |= INST_IMMEDIATE | (OPCODE_MVN << DATA_OP_SHIFT);
6419 inst.instruction |= value & 0xfff;
6420 return 1;
6421 }
6422 }
6423 }
6424
6425 if (add_to_lit_pool () == FAIL)
6426 {
6427 inst.error = _("literal pool insertion failed");
6428 return 1;
6429 }
6430 inst.operands[1].reg = REG_PC;
6431 inst.operands[1].isreg = 1;
6432 inst.operands[1].preind = 1;
6433 inst.reloc.pc_rel = 1;
6434 inst.reloc.type = (thumb_p
6435 ? BFD_RELOC_ARM_THUMB_OFFSET
6436 : (mode_3
6437 ? BFD_RELOC_ARM_HWLITERAL
6438 : BFD_RELOC_ARM_LITERAL));
6439 return 0;
6440 }
6441
6442 /* Functions for instruction encoding, sorted by subarchitecture.
6443 First some generics; their names are taken from the conventional
6444 bit positions for register arguments in ARM format instructions. */
6445
6446 static void
6447 do_noargs (void)
6448 {
6449 }
6450
6451 static void
6452 do_rd (void)
6453 {
6454 inst.instruction |= inst.operands[0].reg << 12;
6455 }
6456
6457 static void
6458 do_rd_rm (void)
6459 {
6460 inst.instruction |= inst.operands[0].reg << 12;
6461 inst.instruction |= inst.operands[1].reg;
6462 }
6463
6464 static void
6465 do_rd_rn (void)
6466 {
6467 inst.instruction |= inst.operands[0].reg << 12;
6468 inst.instruction |= inst.operands[1].reg << 16;
6469 }
6470
6471 static void
6472 do_rn_rd (void)
6473 {
6474 inst.instruction |= inst.operands[0].reg << 16;
6475 inst.instruction |= inst.operands[1].reg << 12;
6476 }
6477
6478 static void
6479 do_rd_rm_rn (void)
6480 {
6481 unsigned Rn = inst.operands[2].reg;
6482 /* Enforce restrictions on SWP instruction. */
6483 if ((inst.instruction & 0x0fbfffff) == 0x01000090)
6484 constraint (Rn == inst.operands[0].reg || Rn == inst.operands[1].reg,
6485 _("Rn must not overlap other operands"));
6486 inst.instruction |= inst.operands[0].reg << 12;
6487 inst.instruction |= inst.operands[1].reg;
6488 inst.instruction |= Rn << 16;
6489 }
6490
6491 static void
6492 do_rd_rn_rm (void)
6493 {
6494 inst.instruction |= inst.operands[0].reg << 12;
6495 inst.instruction |= inst.operands[1].reg << 16;
6496 inst.instruction |= inst.operands[2].reg;
6497 }
6498
6499 static void
6500 do_rm_rd_rn (void)
6501 {
6502 inst.instruction |= inst.operands[0].reg;
6503 inst.instruction |= inst.operands[1].reg << 12;
6504 inst.instruction |= inst.operands[2].reg << 16;
6505 }
6506
6507 static void
6508 do_imm0 (void)
6509 {
6510 inst.instruction |= inst.operands[0].imm;
6511 }
6512
6513 static void
6514 do_rd_cpaddr (void)
6515 {
6516 inst.instruction |= inst.operands[0].reg << 12;
6517 encode_arm_cp_address (1, TRUE, TRUE, 0);
6518 }
6519
6520 /* ARM instructions, in alphabetical order by function name (except
6521 that wrapper functions appear immediately after the function they
6522 wrap). */
6523
6524 /* This is a pseudo-op of the form "adr rd, label" to be converted
6525 into a relative address of the form "add rd, pc, #label-.-8". */
6526
6527 static void
6528 do_adr (void)
6529 {
6530 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
6531
6532 /* Frag hacking will turn this into a sub instruction if the offset turns
6533 out to be negative. */
6534 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
6535 inst.reloc.pc_rel = 1;
6536 inst.reloc.exp.X_add_number -= 8;
6537 }
6538
6539 /* This is a pseudo-op of the form "adrl rd, label" to be converted
6540 into a relative address of the form:
6541 add rd, pc, #low(label-.-8)"
6542 add rd, rd, #high(label-.-8)" */
6543
6544 static void
6545 do_adrl (void)
6546 {
6547 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
6548
6549 /* Frag hacking will turn this into a sub instruction if the offset turns
6550 out to be negative. */
6551 inst.reloc.type = BFD_RELOC_ARM_ADRL_IMMEDIATE;
6552 inst.reloc.pc_rel = 1;
6553 inst.size = INSN_SIZE * 2;
6554 inst.reloc.exp.X_add_number -= 8;
6555 }
6556
6557 static void
6558 do_arit (void)
6559 {
6560 if (!inst.operands[1].present)
6561 inst.operands[1].reg = inst.operands[0].reg;
6562 inst.instruction |= inst.operands[0].reg << 12;
6563 inst.instruction |= inst.operands[1].reg << 16;
6564 encode_arm_shifter_operand (2);
6565 }
6566
6567 static void
6568 do_barrier (void)
6569 {
6570 if (inst.operands[0].present)
6571 {
6572 constraint ((inst.instruction & 0xf0) != 0x40
6573 && inst.operands[0].imm != 0xf,
6574 "bad barrier type");
6575 inst.instruction |= inst.operands[0].imm;
6576 }
6577 else
6578 inst.instruction |= 0xf;
6579 }
6580
6581 static void
6582 do_bfc (void)
6583 {
6584 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
6585 constraint (msb > 32, _("bit-field extends past end of register"));
6586 /* The instruction encoding stores the LSB and MSB,
6587 not the LSB and width. */
6588 inst.instruction |= inst.operands[0].reg << 12;
6589 inst.instruction |= inst.operands[1].imm << 7;
6590 inst.instruction |= (msb - 1) << 16;
6591 }
6592
6593 static void
6594 do_bfi (void)
6595 {
6596 unsigned int msb;
6597
6598 /* #0 in second position is alternative syntax for bfc, which is
6599 the same instruction but with REG_PC in the Rm field. */
6600 if (!inst.operands[1].isreg)
6601 inst.operands[1].reg = REG_PC;
6602
6603 msb = inst.operands[2].imm + inst.operands[3].imm;
6604 constraint (msb > 32, _("bit-field extends past end of register"));
6605 /* The instruction encoding stores the LSB and MSB,
6606 not the LSB and width. */
6607 inst.instruction |= inst.operands[0].reg << 12;
6608 inst.instruction |= inst.operands[1].reg;
6609 inst.instruction |= inst.operands[2].imm << 7;
6610 inst.instruction |= (msb - 1) << 16;
6611 }
6612
6613 static void
6614 do_bfx (void)
6615 {
6616 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
6617 _("bit-field extends past end of register"));
6618 inst.instruction |= inst.operands[0].reg << 12;
6619 inst.instruction |= inst.operands[1].reg;
6620 inst.instruction |= inst.operands[2].imm << 7;
6621 inst.instruction |= (inst.operands[3].imm - 1) << 16;
6622 }
6623
6624 /* ARM V5 breakpoint instruction (argument parse)
6625 BKPT <16 bit unsigned immediate>
6626 Instruction is not conditional.
6627 The bit pattern given in insns[] has the COND_ALWAYS condition,
6628 and it is an error if the caller tried to override that. */
6629
6630 static void
6631 do_bkpt (void)
6632 {
6633 /* Top 12 of 16 bits to bits 19:8. */
6634 inst.instruction |= (inst.operands[0].imm & 0xfff0) << 4;
6635
6636 /* Bottom 4 of 16 bits to bits 3:0. */
6637 inst.instruction |= inst.operands[0].imm & 0xf;
6638 }
6639
6640 static void
6641 encode_branch (int default_reloc)
6642 {
6643 if (inst.operands[0].hasreloc)
6644 {
6645 constraint (inst.operands[0].imm != BFD_RELOC_ARM_PLT32,
6646 _("the only suffix valid here is '(plt)'"));
6647 inst.reloc.type = BFD_RELOC_ARM_PLT32;
6648 }
6649 else
6650 {
6651 inst.reloc.type = default_reloc;
6652 }
6653 inst.reloc.pc_rel = 1;
6654 }
6655
6656 static void
6657 do_branch (void)
6658 {
6659 #ifdef OBJ_ELF
6660 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
6661 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
6662 else
6663 #endif
6664 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
6665 }
6666
6667 static void
6668 do_bl (void)
6669 {
6670 #ifdef OBJ_ELF
6671 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
6672 {
6673 if (inst.cond == COND_ALWAYS)
6674 encode_branch (BFD_RELOC_ARM_PCREL_CALL);
6675 else
6676 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
6677 }
6678 else
6679 #endif
6680 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
6681 }
6682
6683 /* ARM V5 branch-link-exchange instruction (argument parse)
6684 BLX <target_addr> ie BLX(1)
6685 BLX{<condition>} <Rm> ie BLX(2)
6686 Unfortunately, there are two different opcodes for this mnemonic.
6687 So, the insns[].value is not used, and the code here zaps values
6688 into inst.instruction.
6689 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
6690
6691 static void
6692 do_blx (void)
6693 {
6694 if (inst.operands[0].isreg)
6695 {
6696 /* Arg is a register; the opcode provided by insns[] is correct.
6697 It is not illegal to do "blx pc", just useless. */
6698 if (inst.operands[0].reg == REG_PC)
6699 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
6700
6701 inst.instruction |= inst.operands[0].reg;
6702 }
6703 else
6704 {
6705 /* Arg is an address; this instruction cannot be executed
6706 conditionally, and the opcode must be adjusted. */
6707 constraint (inst.cond != COND_ALWAYS, BAD_COND);
6708 inst.instruction = 0xfa000000;
6709 #ifdef OBJ_ELF
6710 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
6711 encode_branch (BFD_RELOC_ARM_PCREL_CALL);
6712 else
6713 #endif
6714 encode_branch (BFD_RELOC_ARM_PCREL_BLX);
6715 }
6716 }
6717
6718 static void
6719 do_bx (void)
6720 {
6721 if (inst.operands[0].reg == REG_PC)
6722 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
6723
6724 inst.instruction |= inst.operands[0].reg;
6725 }
6726
6727
6728 /* ARM v5TEJ. Jump to Jazelle code. */
6729
6730 static void
6731 do_bxj (void)
6732 {
6733 if (inst.operands[0].reg == REG_PC)
6734 as_tsktsk (_("use of r15 in bxj is not really useful"));
6735
6736 inst.instruction |= inst.operands[0].reg;
6737 }
6738
6739 /* Co-processor data operation:
6740 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
6741 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
6742 static void
6743 do_cdp (void)
6744 {
6745 inst.instruction |= inst.operands[0].reg << 8;
6746 inst.instruction |= inst.operands[1].imm << 20;
6747 inst.instruction |= inst.operands[2].reg << 12;
6748 inst.instruction |= inst.operands[3].reg << 16;
6749 inst.instruction |= inst.operands[4].reg;
6750 inst.instruction |= inst.operands[5].imm << 5;
6751 }
6752
6753 static void
6754 do_cmp (void)
6755 {
6756 inst.instruction |= inst.operands[0].reg << 16;
6757 encode_arm_shifter_operand (1);
6758 }
6759
6760 /* Transfer between coprocessor and ARM registers.
6761 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
6762 MRC2
6763 MCR{cond}
6764 MCR2
6765
6766 No special properties. */
6767
6768 static void
6769 do_co_reg (void)
6770 {
6771 inst.instruction |= inst.operands[0].reg << 8;
6772 inst.instruction |= inst.operands[1].imm << 21;
6773 inst.instruction |= inst.operands[2].reg << 12;
6774 inst.instruction |= inst.operands[3].reg << 16;
6775 inst.instruction |= inst.operands[4].reg;
6776 inst.instruction |= inst.operands[5].imm << 5;
6777 }
6778
6779 /* Transfer between coprocessor register and pair of ARM registers.
6780 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
6781 MCRR2
6782 MRRC{cond}
6783 MRRC2
6784
6785 Two XScale instructions are special cases of these:
6786
6787 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
6788 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
6789
6790 Result unpredicatable if Rd or Rn is R15. */
6791
6792 static void
6793 do_co_reg2c (void)
6794 {
6795 inst.instruction |= inst.operands[0].reg << 8;
6796 inst.instruction |= inst.operands[1].imm << 4;
6797 inst.instruction |= inst.operands[2].reg << 12;
6798 inst.instruction |= inst.operands[3].reg << 16;
6799 inst.instruction |= inst.operands[4].reg;
6800 }
6801
6802 static void
6803 do_cpsi (void)
6804 {
6805 inst.instruction |= inst.operands[0].imm << 6;
6806 inst.instruction |= inst.operands[1].imm;
6807 }
6808
6809 static void
6810 do_dbg (void)
6811 {
6812 inst.instruction |= inst.operands[0].imm;
6813 }
6814
6815 static void
6816 do_it (void)
6817 {
6818 /* There is no IT instruction in ARM mode. We
6819 process it but do not generate code for it. */
6820 inst.size = 0;
6821 }
6822
6823 static void
6824 do_ldmstm (void)
6825 {
6826 int base_reg = inst.operands[0].reg;
6827 int range = inst.operands[1].imm;
6828
6829 inst.instruction |= base_reg << 16;
6830 inst.instruction |= range;
6831
6832 if (inst.operands[1].writeback)
6833 inst.instruction |= LDM_TYPE_2_OR_3;
6834
6835 if (inst.operands[0].writeback)
6836 {
6837 inst.instruction |= WRITE_BACK;
6838 /* Check for unpredictable uses of writeback. */
6839 if (inst.instruction & LOAD_BIT)
6840 {
6841 /* Not allowed in LDM type 2. */
6842 if ((inst.instruction & LDM_TYPE_2_OR_3)
6843 && ((range & (1 << REG_PC)) == 0))
6844 as_warn (_("writeback of base register is UNPREDICTABLE"));
6845 /* Only allowed if base reg not in list for other types. */
6846 else if (range & (1 << base_reg))
6847 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
6848 }
6849 else /* STM. */
6850 {
6851 /* Not allowed for type 2. */
6852 if (inst.instruction & LDM_TYPE_2_OR_3)
6853 as_warn (_("writeback of base register is UNPREDICTABLE"));
6854 /* Only allowed if base reg not in list, or first in list. */
6855 else if ((range & (1 << base_reg))
6856 && (range & ((1 << base_reg) - 1)))
6857 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
6858 }
6859 }
6860 }
6861
6862 /* ARMv5TE load-consecutive (argument parse)
6863 Mode is like LDRH.
6864
6865 LDRccD R, mode
6866 STRccD R, mode. */
6867
6868 static void
6869 do_ldrd (void)
6870 {
6871 constraint (inst.operands[0].reg % 2 != 0,
6872 _("first destination register must be even"));
6873 constraint (inst.operands[1].present
6874 && inst.operands[1].reg != inst.operands[0].reg + 1,
6875 _("can only load two consecutive registers"));
6876 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
6877 constraint (!inst.operands[2].isreg, _("'[' expected"));
6878
6879 if (!inst.operands[1].present)
6880 inst.operands[1].reg = inst.operands[0].reg + 1;
6881
6882 if (inst.instruction & LOAD_BIT)
6883 {
6884 /* encode_arm_addr_mode_3 will diagnose overlap between the base
6885 register and the first register written; we have to diagnose
6886 overlap between the base and the second register written here. */
6887
6888 if (inst.operands[2].reg == inst.operands[1].reg
6889 && (inst.operands[2].writeback || inst.operands[2].postind))
6890 as_warn (_("base register written back, and overlaps "
6891 "second destination register"));
6892
6893 /* For an index-register load, the index register must not overlap the
6894 destination (even if not write-back). */
6895 else if (inst.operands[2].immisreg
6896 && ((unsigned) inst.operands[2].imm == inst.operands[0].reg
6897 || (unsigned) inst.operands[2].imm == inst.operands[1].reg))
6898 as_warn (_("index register overlaps destination register"));
6899 }
6900
6901 inst.instruction |= inst.operands[0].reg << 12;
6902 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE);
6903 }
6904
6905 static void
6906 do_ldrex (void)
6907 {
6908 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
6909 || inst.operands[1].postind || inst.operands[1].writeback
6910 || inst.operands[1].immisreg || inst.operands[1].shifted
6911 || inst.operands[1].negative
6912 /* This can arise if the programmer has written
6913 strex rN, rM, foo
6914 or if they have mistakenly used a register name as the last
6915 operand, eg:
6916 strex rN, rM, rX
6917 It is very difficult to distinguish between these two cases
6918 because "rX" might actually be a label. ie the register
6919 name has been occluded by a symbol of the same name. So we
6920 just generate a general 'bad addressing mode' type error
6921 message and leave it up to the programmer to discover the
6922 true cause and fix their mistake. */
6923 || (inst.operands[1].reg == REG_PC),
6924 BAD_ADDR_MODE);
6925
6926 constraint (inst.reloc.exp.X_op != O_constant
6927 || inst.reloc.exp.X_add_number != 0,
6928 _("offset must be zero in ARM encoding"));
6929
6930 inst.instruction |= inst.operands[0].reg << 12;
6931 inst.instruction |= inst.operands[1].reg << 16;
6932 inst.reloc.type = BFD_RELOC_UNUSED;
6933 }
6934
6935 static void
6936 do_ldrexd (void)
6937 {
6938 constraint (inst.operands[0].reg % 2 != 0,
6939 _("even register required"));
6940 constraint (inst.operands[1].present
6941 && inst.operands[1].reg != inst.operands[0].reg + 1,
6942 _("can only load two consecutive registers"));
6943 /* If op 1 were present and equal to PC, this function wouldn't
6944 have been called in the first place. */
6945 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
6946
6947 inst.instruction |= inst.operands[0].reg << 12;
6948 inst.instruction |= inst.operands[2].reg << 16;
6949 }
6950
6951 static void
6952 do_ldst (void)
6953 {
6954 inst.instruction |= inst.operands[0].reg << 12;
6955 if (!inst.operands[1].isreg)
6956 if (move_or_literal_pool (0, /*thumb_p=*/FALSE, /*mode_3=*/FALSE))
6957 return;
6958 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE);
6959 }
6960
6961 static void
6962 do_ldstt (void)
6963 {
6964 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
6965 reject [Rn,...]. */
6966 if (inst.operands[1].preind)
6967 {
6968 constraint (inst.reloc.exp.X_op != O_constant ||
6969 inst.reloc.exp.X_add_number != 0,
6970 _("this instruction requires a post-indexed address"));
6971
6972 inst.operands[1].preind = 0;
6973 inst.operands[1].postind = 1;
6974 inst.operands[1].writeback = 1;
6975 }
6976 inst.instruction |= inst.operands[0].reg << 12;
6977 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE);
6978 }
6979
6980 /* Halfword and signed-byte load/store operations. */
6981
6982 static void
6983 do_ldstv4 (void)
6984 {
6985 inst.instruction |= inst.operands[0].reg << 12;
6986 if (!inst.operands[1].isreg)
6987 if (move_or_literal_pool (0, /*thumb_p=*/FALSE, /*mode_3=*/TRUE))
6988 return;
6989 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE);
6990 }
6991
6992 static void
6993 do_ldsttv4 (void)
6994 {
6995 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
6996 reject [Rn,...]. */
6997 if (inst.operands[1].preind)
6998 {
6999 constraint (inst.reloc.exp.X_op != O_constant ||
7000 inst.reloc.exp.X_add_number != 0,
7001 _("this instruction requires a post-indexed address"));
7002
7003 inst.operands[1].preind = 0;
7004 inst.operands[1].postind = 1;
7005 inst.operands[1].writeback = 1;
7006 }
7007 inst.instruction |= inst.operands[0].reg << 12;
7008 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE);
7009 }
7010
7011 /* Co-processor register load/store.
7012 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
7013 static void
7014 do_lstc (void)
7015 {
7016 inst.instruction |= inst.operands[0].reg << 8;
7017 inst.instruction |= inst.operands[1].reg << 12;
7018 encode_arm_cp_address (2, TRUE, TRUE, 0);
7019 }
7020
7021 static void
7022 do_mlas (void)
7023 {
7024 /* This restriction does not apply to mls (nor to mla in v6, but
7025 that's hard to detect at present). */
7026 if (inst.operands[0].reg == inst.operands[1].reg
7027 && !(inst.instruction & 0x00400000))
7028 as_tsktsk (_("rd and rm should be different in mla"));
7029
7030 inst.instruction |= inst.operands[0].reg << 16;
7031 inst.instruction |= inst.operands[1].reg;
7032 inst.instruction |= inst.operands[2].reg << 8;
7033 inst.instruction |= inst.operands[3].reg << 12;
7034
7035 }
7036
7037 static void
7038 do_mov (void)
7039 {
7040 inst.instruction |= inst.operands[0].reg << 12;
7041 encode_arm_shifter_operand (1);
7042 }
7043
7044 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
7045 static void
7046 do_mov16 (void)
7047 {
7048 bfd_vma imm;
7049 bfd_boolean top;
7050
7051 top = (inst.instruction & 0x00400000) != 0;
7052 constraint (top && inst.reloc.type == BFD_RELOC_ARM_MOVW,
7053 _(":lower16: not allowed this instruction"));
7054 constraint (!top && inst.reloc.type == BFD_RELOC_ARM_MOVT,
7055 _(":upper16: not allowed instruction"));
7056 inst.instruction |= inst.operands[0].reg << 12;
7057 if (inst.reloc.type == BFD_RELOC_UNUSED)
7058 {
7059 imm = inst.reloc.exp.X_add_number;
7060 /* The value is in two pieces: 0:11, 16:19. */
7061 inst.instruction |= (imm & 0x00000fff);
7062 inst.instruction |= (imm & 0x0000f000) << 4;
7063 }
7064 }
7065
7066 static void do_vfp_nsyn_opcode (const char *);
7067
7068 static int
7069 do_vfp_nsyn_mrs (void)
7070 {
7071 if (inst.operands[0].isvec)
7072 {
7073 if (inst.operands[1].reg != 1)
7074 first_error (_("operand 1 must be FPSCR"));
7075 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
7076 memset (&inst.operands[1], '\0', sizeof (inst.operands[1]));
7077 do_vfp_nsyn_opcode ("fmstat");
7078 }
7079 else if (inst.operands[1].isvec)
7080 do_vfp_nsyn_opcode ("fmrx");
7081 else
7082 return FAIL;
7083
7084 return SUCCESS;
7085 }
7086
7087 static int
7088 do_vfp_nsyn_msr (void)
7089 {
7090 if (inst.operands[0].isvec)
7091 do_vfp_nsyn_opcode ("fmxr");
7092 else
7093 return FAIL;
7094
7095 return SUCCESS;
7096 }
7097
7098 static void
7099 do_mrs (void)
7100 {
7101 if (do_vfp_nsyn_mrs () == SUCCESS)
7102 return;
7103
7104 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
7105 constraint ((inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f))
7106 != (PSR_c|PSR_f),
7107 _("'CPSR' or 'SPSR' expected"));
7108 inst.instruction |= inst.operands[0].reg << 12;
7109 inst.instruction |= (inst.operands[1].imm & SPSR_BIT);
7110 }
7111
7112 /* Two possible forms:
7113 "{C|S}PSR_<field>, Rm",
7114 "{C|S}PSR_f, #expression". */
7115
7116 static void
7117 do_msr (void)
7118 {
7119 if (do_vfp_nsyn_msr () == SUCCESS)
7120 return;
7121
7122 inst.instruction |= inst.operands[0].imm;
7123 if (inst.operands[1].isreg)
7124 inst.instruction |= inst.operands[1].reg;
7125 else
7126 {
7127 inst.instruction |= INST_IMMEDIATE;
7128 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
7129 inst.reloc.pc_rel = 0;
7130 }
7131 }
7132
7133 static void
7134 do_mul (void)
7135 {
7136 if (!inst.operands[2].present)
7137 inst.operands[2].reg = inst.operands[0].reg;
7138 inst.instruction |= inst.operands[0].reg << 16;
7139 inst.instruction |= inst.operands[1].reg;
7140 inst.instruction |= inst.operands[2].reg << 8;
7141
7142 if (inst.operands[0].reg == inst.operands[1].reg)
7143 as_tsktsk (_("rd and rm should be different in mul"));
7144 }
7145
7146 /* Long Multiply Parser
7147 UMULL RdLo, RdHi, Rm, Rs
7148 SMULL RdLo, RdHi, Rm, Rs
7149 UMLAL RdLo, RdHi, Rm, Rs
7150 SMLAL RdLo, RdHi, Rm, Rs. */
7151
7152 static void
7153 do_mull (void)
7154 {
7155 inst.instruction |= inst.operands[0].reg << 12;
7156 inst.instruction |= inst.operands[1].reg << 16;
7157 inst.instruction |= inst.operands[2].reg;
7158 inst.instruction |= inst.operands[3].reg << 8;
7159
7160 /* rdhi, rdlo and rm must all be different. */
7161 if (inst.operands[0].reg == inst.operands[1].reg
7162 || inst.operands[0].reg == inst.operands[2].reg
7163 || inst.operands[1].reg == inst.operands[2].reg)
7164 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
7165 }
7166
7167 static void
7168 do_nop (void)
7169 {
7170 if (inst.operands[0].present)
7171 {
7172 /* Architectural NOP hints are CPSR sets with no bits selected. */
7173 inst.instruction &= 0xf0000000;
7174 inst.instruction |= 0x0320f000 + inst.operands[0].imm;
7175 }
7176 }
7177
7178 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
7179 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
7180 Condition defaults to COND_ALWAYS.
7181 Error if Rd, Rn or Rm are R15. */
7182
7183 static void
7184 do_pkhbt (void)
7185 {
7186 inst.instruction |= inst.operands[0].reg << 12;
7187 inst.instruction |= inst.operands[1].reg << 16;
7188 inst.instruction |= inst.operands[2].reg;
7189 if (inst.operands[3].present)
7190 encode_arm_shift (3);
7191 }
7192
7193 /* ARM V6 PKHTB (Argument Parse). */
7194
7195 static void
7196 do_pkhtb (void)
7197 {
7198 if (!inst.operands[3].present)
7199 {
7200 /* If the shift specifier is omitted, turn the instruction
7201 into pkhbt rd, rm, rn. */
7202 inst.instruction &= 0xfff00010;
7203 inst.instruction |= inst.operands[0].reg << 12;
7204 inst.instruction |= inst.operands[1].reg;
7205 inst.instruction |= inst.operands[2].reg << 16;
7206 }
7207 else
7208 {
7209 inst.instruction |= inst.operands[0].reg << 12;
7210 inst.instruction |= inst.operands[1].reg << 16;
7211 inst.instruction |= inst.operands[2].reg;
7212 encode_arm_shift (3);
7213 }
7214 }
7215
7216 /* ARMv5TE: Preload-Cache
7217
7218 PLD <addr_mode>
7219
7220 Syntactically, like LDR with B=1, W=0, L=1. */
7221
7222 static void
7223 do_pld (void)
7224 {
7225 constraint (!inst.operands[0].isreg,
7226 _("'[' expected after PLD mnemonic"));
7227 constraint (inst.operands[0].postind,
7228 _("post-indexed expression used in preload instruction"));
7229 constraint (inst.operands[0].writeback,
7230 _("writeback used in preload instruction"));
7231 constraint (!inst.operands[0].preind,
7232 _("unindexed addressing used in preload instruction"));
7233 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
7234 }
7235
7236 /* ARMv7: PLI <addr_mode> */
7237 static void
7238 do_pli (void)
7239 {
7240 constraint (!inst.operands[0].isreg,
7241 _("'[' expected after PLI mnemonic"));
7242 constraint (inst.operands[0].postind,
7243 _("post-indexed expression used in preload instruction"));
7244 constraint (inst.operands[0].writeback,
7245 _("writeback used in preload instruction"));
7246 constraint (!inst.operands[0].preind,
7247 _("unindexed addressing used in preload instruction"));
7248 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
7249 inst.instruction &= ~PRE_INDEX;
7250 }
7251
7252 static void
7253 do_push_pop (void)
7254 {
7255 inst.operands[1] = inst.operands[0];
7256 memset (&inst.operands[0], 0, sizeof inst.operands[0]);
7257 inst.operands[0].isreg = 1;
7258 inst.operands[0].writeback = 1;
7259 inst.operands[0].reg = REG_SP;
7260 do_ldmstm ();
7261 }
7262
7263 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
7264 word at the specified address and the following word
7265 respectively.
7266 Unconditionally executed.
7267 Error if Rn is R15. */
7268
7269 static void
7270 do_rfe (void)
7271 {
7272 inst.instruction |= inst.operands[0].reg << 16;
7273 if (inst.operands[0].writeback)
7274 inst.instruction |= WRITE_BACK;
7275 }
7276
7277 /* ARM V6 ssat (argument parse). */
7278
7279 static void
7280 do_ssat (void)
7281 {
7282 inst.instruction |= inst.operands[0].reg << 12;
7283 inst.instruction |= (inst.operands[1].imm - 1) << 16;
7284 inst.instruction |= inst.operands[2].reg;
7285
7286 if (inst.operands[3].present)
7287 encode_arm_shift (3);
7288 }
7289
7290 /* ARM V6 usat (argument parse). */
7291
7292 static void
7293 do_usat (void)
7294 {
7295 inst.instruction |= inst.operands[0].reg << 12;
7296 inst.instruction |= inst.operands[1].imm << 16;
7297 inst.instruction |= inst.operands[2].reg;
7298
7299 if (inst.operands[3].present)
7300 encode_arm_shift (3);
7301 }
7302
7303 /* ARM V6 ssat16 (argument parse). */
7304
7305 static void
7306 do_ssat16 (void)
7307 {
7308 inst.instruction |= inst.operands[0].reg << 12;
7309 inst.instruction |= ((inst.operands[1].imm - 1) << 16);
7310 inst.instruction |= inst.operands[2].reg;
7311 }
7312
7313 static void
7314 do_usat16 (void)
7315 {
7316 inst.instruction |= inst.operands[0].reg << 12;
7317 inst.instruction |= inst.operands[1].imm << 16;
7318 inst.instruction |= inst.operands[2].reg;
7319 }
7320
7321 /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
7322 preserving the other bits.
7323
7324 setend <endian_specifier>, where <endian_specifier> is either
7325 BE or LE. */
7326
7327 static void
7328 do_setend (void)
7329 {
7330 if (inst.operands[0].imm)
7331 inst.instruction |= 0x200;
7332 }
7333
7334 static void
7335 do_shift (void)
7336 {
7337 unsigned int Rm = (inst.operands[1].present
7338 ? inst.operands[1].reg
7339 : inst.operands[0].reg);
7340
7341 inst.instruction |= inst.operands[0].reg << 12;
7342 inst.instruction |= Rm;
7343 if (inst.operands[2].isreg) /* Rd, {Rm,} Rs */
7344 {
7345 inst.instruction |= inst.operands[2].reg << 8;
7346 inst.instruction |= SHIFT_BY_REG;
7347 }
7348 else
7349 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
7350 }
7351
7352 static void
7353 do_smc (void)
7354 {
7355 inst.reloc.type = BFD_RELOC_ARM_SMC;
7356 inst.reloc.pc_rel = 0;
7357 }
7358
7359 static void
7360 do_swi (void)
7361 {
7362 inst.reloc.type = BFD_RELOC_ARM_SWI;
7363 inst.reloc.pc_rel = 0;
7364 }
7365
7366 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
7367 SMLAxy{cond} Rd,Rm,Rs,Rn
7368 SMLAWy{cond} Rd,Rm,Rs,Rn
7369 Error if any register is R15. */
7370
7371 static void
7372 do_smla (void)
7373 {
7374 inst.instruction |= inst.operands[0].reg << 16;
7375 inst.instruction |= inst.operands[1].reg;
7376 inst.instruction |= inst.operands[2].reg << 8;
7377 inst.instruction |= inst.operands[3].reg << 12;
7378 }
7379
7380 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
7381 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
7382 Error if any register is R15.
7383 Warning if Rdlo == Rdhi. */
7384
7385 static void
7386 do_smlal (void)
7387 {
7388 inst.instruction |= inst.operands[0].reg << 12;
7389 inst.instruction |= inst.operands[1].reg << 16;
7390 inst.instruction |= inst.operands[2].reg;
7391 inst.instruction |= inst.operands[3].reg << 8;
7392
7393 if (inst.operands[0].reg == inst.operands[1].reg)
7394 as_tsktsk (_("rdhi and rdlo must be different"));
7395 }
7396
7397 /* ARM V5E (El Segundo) signed-multiply (argument parse)
7398 SMULxy{cond} Rd,Rm,Rs
7399 Error if any register is R15. */
7400
7401 static void
7402 do_smul (void)
7403 {
7404 inst.instruction |= inst.operands[0].reg << 16;
7405 inst.instruction |= inst.operands[1].reg;
7406 inst.instruction |= inst.operands[2].reg << 8;
7407 }
7408
7409 /* ARM V6 srs (argument parse). */
7410
7411 static void
7412 do_srs (void)
7413 {
7414 inst.instruction |= inst.operands[0].imm;
7415 if (inst.operands[0].writeback)
7416 inst.instruction |= WRITE_BACK;
7417 }
7418
7419 /* ARM V6 strex (argument parse). */
7420
7421 static void
7422 do_strex (void)
7423 {
7424 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
7425 || inst.operands[2].postind || inst.operands[2].writeback
7426 || inst.operands[2].immisreg || inst.operands[2].shifted
7427 || inst.operands[2].negative
7428 /* See comment in do_ldrex(). */
7429 || (inst.operands[2].reg == REG_PC),
7430 BAD_ADDR_MODE);
7431
7432 constraint (inst.operands[0].reg == inst.operands[1].reg
7433 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
7434
7435 constraint (inst.reloc.exp.X_op != O_constant
7436 || inst.reloc.exp.X_add_number != 0,
7437 _("offset must be zero in ARM encoding"));
7438
7439 inst.instruction |= inst.operands[0].reg << 12;
7440 inst.instruction |= inst.operands[1].reg;
7441 inst.instruction |= inst.operands[2].reg << 16;
7442 inst.reloc.type = BFD_RELOC_UNUSED;
7443 }
7444
7445 static void
7446 do_strexd (void)
7447 {
7448 constraint (inst.operands[1].reg % 2 != 0,
7449 _("even register required"));
7450 constraint (inst.operands[2].present
7451 && inst.operands[2].reg != inst.operands[1].reg + 1,
7452 _("can only store two consecutive registers"));
7453 /* If op 2 were present and equal to PC, this function wouldn't
7454 have been called in the first place. */
7455 constraint (inst.operands[1].reg == REG_LR, _("r14 not allowed here"));
7456
7457 constraint (inst.operands[0].reg == inst.operands[1].reg
7458 || inst.operands[0].reg == inst.operands[1].reg + 1
7459 || inst.operands[0].reg == inst.operands[3].reg,
7460 BAD_OVERLAP);
7461
7462 inst.instruction |= inst.operands[0].reg << 12;
7463 inst.instruction |= inst.operands[1].reg;
7464 inst.instruction |= inst.operands[3].reg << 16;
7465 }
7466
7467 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
7468 extends it to 32-bits, and adds the result to a value in another
7469 register. You can specify a rotation by 0, 8, 16, or 24 bits
7470 before extracting the 16-bit value.
7471 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
7472 Condition defaults to COND_ALWAYS.
7473 Error if any register uses R15. */
7474
7475 static void
7476 do_sxtah (void)
7477 {
7478 inst.instruction |= inst.operands[0].reg << 12;
7479 inst.instruction |= inst.operands[1].reg << 16;
7480 inst.instruction |= inst.operands[2].reg;
7481 inst.instruction |= inst.operands[3].imm << 10;
7482 }
7483
7484 /* ARM V6 SXTH.
7485
7486 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
7487 Condition defaults to COND_ALWAYS.
7488 Error if any register uses R15. */
7489
7490 static void
7491 do_sxth (void)
7492 {
7493 inst.instruction |= inst.operands[0].reg << 12;
7494 inst.instruction |= inst.operands[1].reg;
7495 inst.instruction |= inst.operands[2].imm << 10;
7496 }
7497 \f
7498 /* VFP instructions. In a logical order: SP variant first, monad
7499 before dyad, arithmetic then move then load/store. */
7500
7501 static void
7502 do_vfp_sp_monadic (void)
7503 {
7504 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7505 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
7506 }
7507
7508 static void
7509 do_vfp_sp_dyadic (void)
7510 {
7511 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7512 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
7513 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
7514 }
7515
7516 static void
7517 do_vfp_sp_compare_z (void)
7518 {
7519 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7520 }
7521
7522 static void
7523 do_vfp_dp_sp_cvt (void)
7524 {
7525 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7526 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
7527 }
7528
7529 static void
7530 do_vfp_sp_dp_cvt (void)
7531 {
7532 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7533 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
7534 }
7535
7536 static void
7537 do_vfp_reg_from_sp (void)
7538 {
7539 inst.instruction |= inst.operands[0].reg << 12;
7540 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
7541 }
7542
7543 static void
7544 do_vfp_reg2_from_sp2 (void)
7545 {
7546 constraint (inst.operands[2].imm != 2,
7547 _("only two consecutive VFP SP registers allowed here"));
7548 inst.instruction |= inst.operands[0].reg << 12;
7549 inst.instruction |= inst.operands[1].reg << 16;
7550 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
7551 }
7552
7553 static void
7554 do_vfp_sp_from_reg (void)
7555 {
7556 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sn);
7557 inst.instruction |= inst.operands[1].reg << 12;
7558 }
7559
7560 static void
7561 do_vfp_sp2_from_reg2 (void)
7562 {
7563 constraint (inst.operands[0].imm != 2,
7564 _("only two consecutive VFP SP registers allowed here"));
7565 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sm);
7566 inst.instruction |= inst.operands[1].reg << 12;
7567 inst.instruction |= inst.operands[2].reg << 16;
7568 }
7569
7570 static void
7571 do_vfp_sp_ldst (void)
7572 {
7573 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7574 encode_arm_cp_address (1, FALSE, TRUE, 0);
7575 }
7576
7577 static void
7578 do_vfp_dp_ldst (void)
7579 {
7580 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7581 encode_arm_cp_address (1, FALSE, TRUE, 0);
7582 }
7583
7584
7585 static void
7586 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type)
7587 {
7588 if (inst.operands[0].writeback)
7589 inst.instruction |= WRITE_BACK;
7590 else
7591 constraint (ldstm_type != VFP_LDSTMIA,
7592 _("this addressing mode requires base-register writeback"));
7593 inst.instruction |= inst.operands[0].reg << 16;
7594 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sd);
7595 inst.instruction |= inst.operands[1].imm;
7596 }
7597
7598 static void
7599 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type)
7600 {
7601 int count;
7602
7603 if (inst.operands[0].writeback)
7604 inst.instruction |= WRITE_BACK;
7605 else
7606 constraint (ldstm_type != VFP_LDSTMIA && ldstm_type != VFP_LDSTMIAX,
7607 _("this addressing mode requires base-register writeback"));
7608
7609 inst.instruction |= inst.operands[0].reg << 16;
7610 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
7611
7612 count = inst.operands[1].imm << 1;
7613 if (ldstm_type == VFP_LDSTMIAX || ldstm_type == VFP_LDSTMDBX)
7614 count += 1;
7615
7616 inst.instruction |= count;
7617 }
7618
7619 static void
7620 do_vfp_sp_ldstmia (void)
7621 {
7622 vfp_sp_ldstm (VFP_LDSTMIA);
7623 }
7624
7625 static void
7626 do_vfp_sp_ldstmdb (void)
7627 {
7628 vfp_sp_ldstm (VFP_LDSTMDB);
7629 }
7630
7631 static void
7632 do_vfp_dp_ldstmia (void)
7633 {
7634 vfp_dp_ldstm (VFP_LDSTMIA);
7635 }
7636
7637 static void
7638 do_vfp_dp_ldstmdb (void)
7639 {
7640 vfp_dp_ldstm (VFP_LDSTMDB);
7641 }
7642
7643 static void
7644 do_vfp_xp_ldstmia (void)
7645 {
7646 vfp_dp_ldstm (VFP_LDSTMIAX);
7647 }
7648
7649 static void
7650 do_vfp_xp_ldstmdb (void)
7651 {
7652 vfp_dp_ldstm (VFP_LDSTMDBX);
7653 }
7654
7655 static void
7656 do_vfp_dp_rd_rm (void)
7657 {
7658 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7659 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
7660 }
7661
7662 static void
7663 do_vfp_dp_rn_rd (void)
7664 {
7665 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dn);
7666 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
7667 }
7668
7669 static void
7670 do_vfp_dp_rd_rn (void)
7671 {
7672 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7673 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
7674 }
7675
7676 static void
7677 do_vfp_dp_rd_rn_rm (void)
7678 {
7679 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7680 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
7681 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dm);
7682 }
7683
7684 static void
7685 do_vfp_dp_rd (void)
7686 {
7687 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7688 }
7689
7690 static void
7691 do_vfp_dp_rm_rd_rn (void)
7692 {
7693 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dm);
7694 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
7695 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dn);
7696 }
7697
7698 /* VFPv3 instructions. */
7699 static void
7700 do_vfp_sp_const (void)
7701 {
7702 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7703 inst.instruction |= (inst.operands[1].imm & 15) << 16;
7704 inst.instruction |= (inst.operands[1].imm >> 4);
7705 }
7706
7707 static void
7708 do_vfp_dp_const (void)
7709 {
7710 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7711 inst.instruction |= (inst.operands[1].imm & 15) << 16;
7712 inst.instruction |= (inst.operands[1].imm >> 4);
7713 }
7714
7715 static void
7716 vfp_conv (int srcsize)
7717 {
7718 unsigned immbits = srcsize - inst.operands[1].imm;
7719 inst.instruction |= (immbits & 1) << 5;
7720 inst.instruction |= (immbits >> 1);
7721 }
7722
7723 static void
7724 do_vfp_sp_conv_16 (void)
7725 {
7726 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7727 vfp_conv (16);
7728 }
7729
7730 static void
7731 do_vfp_dp_conv_16 (void)
7732 {
7733 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7734 vfp_conv (16);
7735 }
7736
7737 static void
7738 do_vfp_sp_conv_32 (void)
7739 {
7740 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7741 vfp_conv (32);
7742 }
7743
7744 static void
7745 do_vfp_dp_conv_32 (void)
7746 {
7747 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7748 vfp_conv (32);
7749 }
7750
7751 \f
7752 /* FPA instructions. Also in a logical order. */
7753
7754 static void
7755 do_fpa_cmp (void)
7756 {
7757 inst.instruction |= inst.operands[0].reg << 16;
7758 inst.instruction |= inst.operands[1].reg;
7759 }
7760
7761 static void
7762 do_fpa_ldmstm (void)
7763 {
7764 inst.instruction |= inst.operands[0].reg << 12;
7765 switch (inst.operands[1].imm)
7766 {
7767 case 1: inst.instruction |= CP_T_X; break;
7768 case 2: inst.instruction |= CP_T_Y; break;
7769 case 3: inst.instruction |= CP_T_Y | CP_T_X; break;
7770 case 4: break;
7771 default: abort ();
7772 }
7773
7774 if (inst.instruction & (PRE_INDEX | INDEX_UP))
7775 {
7776 /* The instruction specified "ea" or "fd", so we can only accept
7777 [Rn]{!}. The instruction does not really support stacking or
7778 unstacking, so we have to emulate these by setting appropriate
7779 bits and offsets. */
7780 constraint (inst.reloc.exp.X_op != O_constant
7781 || inst.reloc.exp.X_add_number != 0,
7782 _("this instruction does not support indexing"));
7783
7784 if ((inst.instruction & PRE_INDEX) || inst.operands[2].writeback)
7785 inst.reloc.exp.X_add_number = 12 * inst.operands[1].imm;
7786
7787 if (!(inst.instruction & INDEX_UP))
7788 inst.reloc.exp.X_add_number = -inst.reloc.exp.X_add_number;
7789
7790 if (!(inst.instruction & PRE_INDEX) && inst.operands[2].writeback)
7791 {
7792 inst.operands[2].preind = 0;
7793 inst.operands[2].postind = 1;
7794 }
7795 }
7796
7797 encode_arm_cp_address (2, TRUE, TRUE, 0);
7798 }
7799
7800 \f
7801 /* iWMMXt instructions: strictly in alphabetical order. */
7802
7803 static void
7804 do_iwmmxt_tandorc (void)
7805 {
7806 constraint (inst.operands[0].reg != REG_PC, _("only r15 allowed here"));
7807 }
7808
7809 static void
7810 do_iwmmxt_textrc (void)
7811 {
7812 inst.instruction |= inst.operands[0].reg << 12;
7813 inst.instruction |= inst.operands[1].imm;
7814 }
7815
7816 static void
7817 do_iwmmxt_textrm (void)
7818 {
7819 inst.instruction |= inst.operands[0].reg << 12;
7820 inst.instruction |= inst.operands[1].reg << 16;
7821 inst.instruction |= inst.operands[2].imm;
7822 }
7823
7824 static void
7825 do_iwmmxt_tinsr (void)
7826 {
7827 inst.instruction |= inst.operands[0].reg << 16;
7828 inst.instruction |= inst.operands[1].reg << 12;
7829 inst.instruction |= inst.operands[2].imm;
7830 }
7831
7832 static void
7833 do_iwmmxt_tmia (void)
7834 {
7835 inst.instruction |= inst.operands[0].reg << 5;
7836 inst.instruction |= inst.operands[1].reg;
7837 inst.instruction |= inst.operands[2].reg << 12;
7838 }
7839
7840 static void
7841 do_iwmmxt_waligni (void)
7842 {
7843 inst.instruction |= inst.operands[0].reg << 12;
7844 inst.instruction |= inst.operands[1].reg << 16;
7845 inst.instruction |= inst.operands[2].reg;
7846 inst.instruction |= inst.operands[3].imm << 20;
7847 }
7848
7849 static void
7850 do_iwmmxt_wmerge (void)
7851 {
7852 inst.instruction |= inst.operands[0].reg << 12;
7853 inst.instruction |= inst.operands[1].reg << 16;
7854 inst.instruction |= inst.operands[2].reg;
7855 inst.instruction |= inst.operands[3].imm << 21;
7856 }
7857
7858 static void
7859 do_iwmmxt_wmov (void)
7860 {
7861 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
7862 inst.instruction |= inst.operands[0].reg << 12;
7863 inst.instruction |= inst.operands[1].reg << 16;
7864 inst.instruction |= inst.operands[1].reg;
7865 }
7866
7867 static void
7868 do_iwmmxt_wldstbh (void)
7869 {
7870 int reloc;
7871 inst.instruction |= inst.operands[0].reg << 12;
7872 if (thumb_mode)
7873 reloc = BFD_RELOC_ARM_T32_CP_OFF_IMM_S2;
7874 else
7875 reloc = BFD_RELOC_ARM_CP_OFF_IMM_S2;
7876 encode_arm_cp_address (1, TRUE, FALSE, reloc);
7877 }
7878
7879 static void
7880 do_iwmmxt_wldstw (void)
7881 {
7882 /* RIWR_RIWC clears .isreg for a control register. */
7883 if (!inst.operands[0].isreg)
7884 {
7885 constraint (inst.cond != COND_ALWAYS, BAD_COND);
7886 inst.instruction |= 0xf0000000;
7887 }
7888
7889 inst.instruction |= inst.operands[0].reg << 12;
7890 encode_arm_cp_address (1, TRUE, TRUE, 0);
7891 }
7892
7893 static void
7894 do_iwmmxt_wldstd (void)
7895 {
7896 inst.instruction |= inst.operands[0].reg << 12;
7897 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2)
7898 && inst.operands[1].immisreg)
7899 {
7900 inst.instruction &= ~0x1a000ff;
7901 inst.instruction |= (0xf << 28);
7902 if (inst.operands[1].preind)
7903 inst.instruction |= PRE_INDEX;
7904 if (!inst.operands[1].negative)
7905 inst.instruction |= INDEX_UP;
7906 if (inst.operands[1].writeback)
7907 inst.instruction |= WRITE_BACK;
7908 inst.instruction |= inst.operands[1].reg << 16;
7909 inst.instruction |= inst.reloc.exp.X_add_number << 4;
7910 inst.instruction |= inst.operands[1].imm;
7911 }
7912 else
7913 encode_arm_cp_address (1, TRUE, FALSE, 0);
7914 }
7915
7916 static void
7917 do_iwmmxt_wshufh (void)
7918 {
7919 inst.instruction |= inst.operands[0].reg << 12;
7920 inst.instruction |= inst.operands[1].reg << 16;
7921 inst.instruction |= ((inst.operands[2].imm & 0xf0) << 16);
7922 inst.instruction |= (inst.operands[2].imm & 0x0f);
7923 }
7924
7925 static void
7926 do_iwmmxt_wzero (void)
7927 {
7928 /* WZERO reg is an alias for WANDN reg, reg, reg. */
7929 inst.instruction |= inst.operands[0].reg;
7930 inst.instruction |= inst.operands[0].reg << 12;
7931 inst.instruction |= inst.operands[0].reg << 16;
7932 }
7933
7934 static void
7935 do_iwmmxt_wrwrwr_or_imm5 (void)
7936 {
7937 if (inst.operands[2].isreg)
7938 do_rd_rn_rm ();
7939 else {
7940 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2),
7941 _("immediate operand requires iWMMXt2"));
7942 do_rd_rn ();
7943 if (inst.operands[2].imm == 0)
7944 {
7945 switch ((inst.instruction >> 20) & 0xf)
7946 {
7947 case 4:
7948 case 5:
7949 case 6:
7950 case 7:
7951 /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16. */
7952 inst.operands[2].imm = 16;
7953 inst.instruction = (inst.instruction & 0xff0fffff) | (0x7 << 20);
7954 break;
7955 case 8:
7956 case 9:
7957 case 10:
7958 case 11:
7959 /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32. */
7960 inst.operands[2].imm = 32;
7961 inst.instruction = (inst.instruction & 0xff0fffff) | (0xb << 20);
7962 break;
7963 case 12:
7964 case 13:
7965 case 14:
7966 case 15:
7967 {
7968 /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn. */
7969 unsigned long wrn;
7970 wrn = (inst.instruction >> 16) & 0xf;
7971 inst.instruction &= 0xff0fff0f;
7972 inst.instruction |= wrn;
7973 /* Bail out here; the instruction is now assembled. */
7974 return;
7975 }
7976 }
7977 }
7978 /* Map 32 -> 0, etc. */
7979 inst.operands[2].imm &= 0x1f;
7980 inst.instruction |= (0xf << 28) | ((inst.operands[2].imm & 0x10) << 4) | (inst.operands[2].imm & 0xf);
7981 }
7982 }
7983 \f
7984 /* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
7985 operations first, then control, shift, and load/store. */
7986
7987 /* Insns like "foo X,Y,Z". */
7988
7989 static void
7990 do_mav_triple (void)
7991 {
7992 inst.instruction |= inst.operands[0].reg << 16;
7993 inst.instruction |= inst.operands[1].reg;
7994 inst.instruction |= inst.operands[2].reg << 12;
7995 }
7996
7997 /* Insns like "foo W,X,Y,Z".
7998 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
7999
8000 static void
8001 do_mav_quad (void)
8002 {
8003 inst.instruction |= inst.operands[0].reg << 5;
8004 inst.instruction |= inst.operands[1].reg << 12;
8005 inst.instruction |= inst.operands[2].reg << 16;
8006 inst.instruction |= inst.operands[3].reg;
8007 }
8008
8009 /* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
8010 static void
8011 do_mav_dspsc (void)
8012 {
8013 inst.instruction |= inst.operands[1].reg << 12;
8014 }
8015
8016 /* Maverick shift immediate instructions.
8017 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
8018 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
8019
8020 static void
8021 do_mav_shift (void)
8022 {
8023 int imm = inst.operands[2].imm;
8024
8025 inst.instruction |= inst.operands[0].reg << 12;
8026 inst.instruction |= inst.operands[1].reg << 16;
8027
8028 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
8029 Bits 5-7 of the insn should have bits 4-6 of the immediate.
8030 Bit 4 should be 0. */
8031 imm = (imm & 0xf) | ((imm & 0x70) << 1);
8032
8033 inst.instruction |= imm;
8034 }
8035 \f
8036 /* XScale instructions. Also sorted arithmetic before move. */
8037
8038 /* Xscale multiply-accumulate (argument parse)
8039 MIAcc acc0,Rm,Rs
8040 MIAPHcc acc0,Rm,Rs
8041 MIAxycc acc0,Rm,Rs. */
8042
8043 static void
8044 do_xsc_mia (void)
8045 {
8046 inst.instruction |= inst.operands[1].reg;
8047 inst.instruction |= inst.operands[2].reg << 12;
8048 }
8049
8050 /* Xscale move-accumulator-register (argument parse)
8051
8052 MARcc acc0,RdLo,RdHi. */
8053
8054 static void
8055 do_xsc_mar (void)
8056 {
8057 inst.instruction |= inst.operands[1].reg << 12;
8058 inst.instruction |= inst.operands[2].reg << 16;
8059 }
8060
8061 /* Xscale move-register-accumulator (argument parse)
8062
8063 MRAcc RdLo,RdHi,acc0. */
8064
8065 static void
8066 do_xsc_mra (void)
8067 {
8068 constraint (inst.operands[0].reg == inst.operands[1].reg, BAD_OVERLAP);
8069 inst.instruction |= inst.operands[0].reg << 12;
8070 inst.instruction |= inst.operands[1].reg << 16;
8071 }
8072 \f
8073 /* Encoding functions relevant only to Thumb. */
8074
8075 /* inst.operands[i] is a shifted-register operand; encode
8076 it into inst.instruction in the format used by Thumb32. */
8077
8078 static void
8079 encode_thumb32_shifted_operand (int i)
8080 {
8081 unsigned int value = inst.reloc.exp.X_add_number;
8082 unsigned int shift = inst.operands[i].shift_kind;
8083
8084 constraint (inst.operands[i].immisreg,
8085 _("shift by register not allowed in thumb mode"));
8086 inst.instruction |= inst.operands[i].reg;
8087 if (shift == SHIFT_RRX)
8088 inst.instruction |= SHIFT_ROR << 4;
8089 else
8090 {
8091 constraint (inst.reloc.exp.X_op != O_constant,
8092 _("expression too complex"));
8093
8094 constraint (value > 32
8095 || (value == 32 && (shift == SHIFT_LSL
8096 || shift == SHIFT_ROR)),
8097 _("shift expression is too large"));
8098
8099 if (value == 0)
8100 shift = SHIFT_LSL;
8101 else if (value == 32)
8102 value = 0;
8103
8104 inst.instruction |= shift << 4;
8105 inst.instruction |= (value & 0x1c) << 10;
8106 inst.instruction |= (value & 0x03) << 6;
8107 }
8108 }
8109
8110
8111 /* inst.operands[i] was set up by parse_address. Encode it into a
8112 Thumb32 format load or store instruction. Reject forms that cannot
8113 be used with such instructions. If is_t is true, reject forms that
8114 cannot be used with a T instruction; if is_d is true, reject forms
8115 that cannot be used with a D instruction. */
8116
8117 static void
8118 encode_thumb32_addr_mode (int i, bfd_boolean is_t, bfd_boolean is_d)
8119 {
8120 bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
8121
8122 constraint (!inst.operands[i].isreg,
8123 _("Instruction does not support =N addresses"));
8124
8125 inst.instruction |= inst.operands[i].reg << 16;
8126 if (inst.operands[i].immisreg)
8127 {
8128 constraint (is_pc, _("cannot use register index with PC-relative addressing"));
8129 constraint (is_t || is_d, _("cannot use register index with this instruction"));
8130 constraint (inst.operands[i].negative,
8131 _("Thumb does not support negative register indexing"));
8132 constraint (inst.operands[i].postind,
8133 _("Thumb does not support register post-indexing"));
8134 constraint (inst.operands[i].writeback,
8135 _("Thumb does not support register indexing with writeback"));
8136 constraint (inst.operands[i].shifted && inst.operands[i].shift_kind != SHIFT_LSL,
8137 _("Thumb supports only LSL in shifted register indexing"));
8138
8139 inst.instruction |= inst.operands[i].imm;
8140 if (inst.operands[i].shifted)
8141 {
8142 constraint (inst.reloc.exp.X_op != O_constant,
8143 _("expression too complex"));
8144 constraint (inst.reloc.exp.X_add_number < 0
8145 || inst.reloc.exp.X_add_number > 3,
8146 _("shift out of range"));
8147 inst.instruction |= inst.reloc.exp.X_add_number << 4;
8148 }
8149 inst.reloc.type = BFD_RELOC_UNUSED;
8150 }
8151 else if (inst.operands[i].preind)
8152 {
8153 constraint (is_pc && inst.operands[i].writeback,
8154 _("cannot use writeback with PC-relative addressing"));
8155 constraint (is_t && inst.operands[i].writeback,
8156 _("cannot use writeback with this instruction"));
8157
8158 if (is_d)
8159 {
8160 inst.instruction |= 0x01000000;
8161 if (inst.operands[i].writeback)
8162 inst.instruction |= 0x00200000;
8163 }
8164 else
8165 {
8166 inst.instruction |= 0x00000c00;
8167 if (inst.operands[i].writeback)
8168 inst.instruction |= 0x00000100;
8169 }
8170 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
8171 }
8172 else if (inst.operands[i].postind)
8173 {
8174 assert (inst.operands[i].writeback);
8175 constraint (is_pc, _("cannot use post-indexing with PC-relative addressing"));
8176 constraint (is_t, _("cannot use post-indexing with this instruction"));
8177
8178 if (is_d)
8179 inst.instruction |= 0x00200000;
8180 else
8181 inst.instruction |= 0x00000900;
8182 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
8183 }
8184 else /* unindexed - only for coprocessor */
8185 inst.error = _("instruction does not accept unindexed addressing");
8186 }
8187
8188 /* Table of Thumb instructions which exist in both 16- and 32-bit
8189 encodings (the latter only in post-V6T2 cores). The index is the
8190 value used in the insns table below. When there is more than one
8191 possible 16-bit encoding for the instruction, this table always
8192 holds variant (1).
8193 Also contains several pseudo-instructions used during relaxation. */
8194 #define T16_32_TAB \
8195 X(adc, 4140, eb400000), \
8196 X(adcs, 4140, eb500000), \
8197 X(add, 1c00, eb000000), \
8198 X(adds, 1c00, eb100000), \
8199 X(addi, 0000, f1000000), \
8200 X(addis, 0000, f1100000), \
8201 X(add_pc,000f, f20f0000), \
8202 X(add_sp,000d, f10d0000), \
8203 X(adr, 000f, f20f0000), \
8204 X(and, 4000, ea000000), \
8205 X(ands, 4000, ea100000), \
8206 X(asr, 1000, fa40f000), \
8207 X(asrs, 1000, fa50f000), \
8208 X(b, e000, f000b000), \
8209 X(bcond, d000, f0008000), \
8210 X(bic, 4380, ea200000), \
8211 X(bics, 4380, ea300000), \
8212 X(cmn, 42c0, eb100f00), \
8213 X(cmp, 2800, ebb00f00), \
8214 X(cpsie, b660, f3af8400), \
8215 X(cpsid, b670, f3af8600), \
8216 X(cpy, 4600, ea4f0000), \
8217 X(dec_sp,80dd, f1bd0d00), \
8218 X(eor, 4040, ea800000), \
8219 X(eors, 4040, ea900000), \
8220 X(inc_sp,00dd, f10d0d00), \
8221 X(ldmia, c800, e8900000), \
8222 X(ldr, 6800, f8500000), \
8223 X(ldrb, 7800, f8100000), \
8224 X(ldrh, 8800, f8300000), \
8225 X(ldrsb, 5600, f9100000), \
8226 X(ldrsh, 5e00, f9300000), \
8227 X(ldr_pc,4800, f85f0000), \
8228 X(ldr_pc2,4800, f85f0000), \
8229 X(ldr_sp,9800, f85d0000), \
8230 X(lsl, 0000, fa00f000), \
8231 X(lsls, 0000, fa10f000), \
8232 X(lsr, 0800, fa20f000), \
8233 X(lsrs, 0800, fa30f000), \
8234 X(mov, 2000, ea4f0000), \
8235 X(movs, 2000, ea5f0000), \
8236 X(mul, 4340, fb00f000), \
8237 X(muls, 4340, ffffffff), /* no 32b muls */ \
8238 X(mvn, 43c0, ea6f0000), \
8239 X(mvns, 43c0, ea7f0000), \
8240 X(neg, 4240, f1c00000), /* rsb #0 */ \
8241 X(negs, 4240, f1d00000), /* rsbs #0 */ \
8242 X(orr, 4300, ea400000), \
8243 X(orrs, 4300, ea500000), \
8244 X(pop, bc00, e8bd0000), /* ldmia sp!,... */ \
8245 X(push, b400, e92d0000), /* stmdb sp!,... */ \
8246 X(rev, ba00, fa90f080), \
8247 X(rev16, ba40, fa90f090), \
8248 X(revsh, bac0, fa90f0b0), \
8249 X(ror, 41c0, fa60f000), \
8250 X(rors, 41c0, fa70f000), \
8251 X(sbc, 4180, eb600000), \
8252 X(sbcs, 4180, eb700000), \
8253 X(stmia, c000, e8800000), \
8254 X(str, 6000, f8400000), \
8255 X(strb, 7000, f8000000), \
8256 X(strh, 8000, f8200000), \
8257 X(str_sp,9000, f84d0000), \
8258 X(sub, 1e00, eba00000), \
8259 X(subs, 1e00, ebb00000), \
8260 X(subi, 8000, f1a00000), \
8261 X(subis, 8000, f1b00000), \
8262 X(sxtb, b240, fa4ff080), \
8263 X(sxth, b200, fa0ff080), \
8264 X(tst, 4200, ea100f00), \
8265 X(uxtb, b2c0, fa5ff080), \
8266 X(uxth, b280, fa1ff080), \
8267 X(nop, bf00, f3af8000), \
8268 X(yield, bf10, f3af8001), \
8269 X(wfe, bf20, f3af8002), \
8270 X(wfi, bf30, f3af8003), \
8271 X(sev, bf40, f3af9004), /* typo, 8004? */
8272
8273 /* To catch errors in encoding functions, the codes are all offset by
8274 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
8275 as 16-bit instructions. */
8276 #define X(a,b,c) T_MNEM_##a
8277 enum t16_32_codes { T16_32_OFFSET = 0xF7FF, T16_32_TAB };
8278 #undef X
8279
8280 #define X(a,b,c) 0x##b
8281 static const unsigned short thumb_op16[] = { T16_32_TAB };
8282 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
8283 #undef X
8284
8285 #define X(a,b,c) 0x##c
8286 static const unsigned int thumb_op32[] = { T16_32_TAB };
8287 #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
8288 #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
8289 #undef X
8290 #undef T16_32_TAB
8291
8292 /* Thumb instruction encoders, in alphabetical order. */
8293
8294 /* ADDW or SUBW. */
8295 static void
8296 do_t_add_sub_w (void)
8297 {
8298 int Rd, Rn;
8299
8300 Rd = inst.operands[0].reg;
8301 Rn = inst.operands[1].reg;
8302
8303 constraint (Rd == 15, _("PC not allowed as destination"));
8304 inst.instruction |= (Rn << 16) | (Rd << 8);
8305 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
8306 }
8307
8308 /* Parse an add or subtract instruction. We get here with inst.instruction
8309 equalling any of THUMB_OPCODE_add, adds, sub, or subs. */
8310
8311 static void
8312 do_t_add_sub (void)
8313 {
8314 int Rd, Rs, Rn;
8315
8316 Rd = inst.operands[0].reg;
8317 Rs = (inst.operands[1].present
8318 ? inst.operands[1].reg /* Rd, Rs, foo */
8319 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
8320
8321 if (unified_syntax)
8322 {
8323 bfd_boolean flags;
8324 bfd_boolean narrow;
8325 int opcode;
8326
8327 flags = (inst.instruction == T_MNEM_adds
8328 || inst.instruction == T_MNEM_subs);
8329 if (flags)
8330 narrow = (current_it_mask == 0);
8331 else
8332 narrow = (current_it_mask != 0);
8333 if (!inst.operands[2].isreg)
8334 {
8335 int add;
8336
8337 add = (inst.instruction == T_MNEM_add
8338 || inst.instruction == T_MNEM_adds);
8339 opcode = 0;
8340 if (inst.size_req != 4)
8341 {
8342 /* Attempt to use a narrow opcode, with relaxation if
8343 appropriate. */
8344 if (Rd == REG_SP && Rs == REG_SP && !flags)
8345 opcode = add ? T_MNEM_inc_sp : T_MNEM_dec_sp;
8346 else if (Rd <= 7 && Rs == REG_SP && add && !flags)
8347 opcode = T_MNEM_add_sp;
8348 else if (Rd <= 7 && Rs == REG_PC && add && !flags)
8349 opcode = T_MNEM_add_pc;
8350 else if (Rd <= 7 && Rs <= 7 && narrow)
8351 {
8352 if (flags)
8353 opcode = add ? T_MNEM_addis : T_MNEM_subis;
8354 else
8355 opcode = add ? T_MNEM_addi : T_MNEM_subi;
8356 }
8357 if (opcode)
8358 {
8359 inst.instruction = THUMB_OP16(opcode);
8360 inst.instruction |= (Rd << 4) | Rs;
8361 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
8362 if (inst.size_req != 2)
8363 inst.relax = opcode;
8364 }
8365 else
8366 constraint (inst.size_req == 2, BAD_HIREG);
8367 }
8368 if (inst.size_req == 4
8369 || (inst.size_req != 2 && !opcode))
8370 {
8371 if (Rs == REG_PC)
8372 {
8373 /* Always use addw/subw. */
8374 inst.instruction = add ? 0xf20f0000 : 0xf2af0000;
8375 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
8376 }
8377 else
8378 {
8379 inst.instruction = THUMB_OP32 (inst.instruction);
8380 inst.instruction = (inst.instruction & 0xe1ffffff)
8381 | 0x10000000;
8382 if (flags)
8383 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
8384 else
8385 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_IMM;
8386 }
8387 inst.instruction |= inst.operands[0].reg << 8;
8388 inst.instruction |= inst.operands[1].reg << 16;
8389 }
8390 }
8391 else
8392 {
8393 Rn = inst.operands[2].reg;
8394 /* See if we can do this with a 16-bit instruction. */
8395 if (!inst.operands[2].shifted && inst.size_req != 4)
8396 {
8397 if (Rd > 7 || Rs > 7 || Rn > 7)
8398 narrow = FALSE;
8399
8400 if (narrow)
8401 {
8402 inst.instruction = ((inst.instruction == T_MNEM_adds
8403 || inst.instruction == T_MNEM_add)
8404 ? T_OPCODE_ADD_R3
8405 : T_OPCODE_SUB_R3);
8406 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
8407 return;
8408 }
8409
8410 if (inst.instruction == T_MNEM_add)
8411 {
8412 if (Rd == Rs)
8413 {
8414 inst.instruction = T_OPCODE_ADD_HI;
8415 inst.instruction |= (Rd & 8) << 4;
8416 inst.instruction |= (Rd & 7);
8417 inst.instruction |= Rn << 3;
8418 return;
8419 }
8420 /* ... because addition is commutative! */
8421 else if (Rd == Rn)
8422 {
8423 inst.instruction = T_OPCODE_ADD_HI;
8424 inst.instruction |= (Rd & 8) << 4;
8425 inst.instruction |= (Rd & 7);
8426 inst.instruction |= Rs << 3;
8427 return;
8428 }
8429 }
8430 }
8431 /* If we get here, it can't be done in 16 bits. */
8432 constraint (inst.operands[2].shifted && inst.operands[2].immisreg,
8433 _("shift must be constant"));
8434 inst.instruction = THUMB_OP32 (inst.instruction);
8435 inst.instruction |= Rd << 8;
8436 inst.instruction |= Rs << 16;
8437 encode_thumb32_shifted_operand (2);
8438 }
8439 }
8440 else
8441 {
8442 constraint (inst.instruction == T_MNEM_adds
8443 || inst.instruction == T_MNEM_subs,
8444 BAD_THUMB32);
8445
8446 if (!inst.operands[2].isreg) /* Rd, Rs, #imm */
8447 {
8448 constraint ((Rd > 7 && (Rd != REG_SP || Rs != REG_SP))
8449 || (Rs > 7 && Rs != REG_SP && Rs != REG_PC),
8450 BAD_HIREG);
8451
8452 inst.instruction = (inst.instruction == T_MNEM_add
8453 ? 0x0000 : 0x8000);
8454 inst.instruction |= (Rd << 4) | Rs;
8455 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
8456 return;
8457 }
8458
8459 Rn = inst.operands[2].reg;
8460 constraint (inst.operands[2].shifted, _("unshifted register required"));
8461
8462 /* We now have Rd, Rs, and Rn set to registers. */
8463 if (Rd > 7 || Rs > 7 || Rn > 7)
8464 {
8465 /* Can't do this for SUB. */
8466 constraint (inst.instruction == T_MNEM_sub, BAD_HIREG);
8467 inst.instruction = T_OPCODE_ADD_HI;
8468 inst.instruction |= (Rd & 8) << 4;
8469 inst.instruction |= (Rd & 7);
8470 if (Rs == Rd)
8471 inst.instruction |= Rn << 3;
8472 else if (Rn == Rd)
8473 inst.instruction |= Rs << 3;
8474 else
8475 constraint (1, _("dest must overlap one source register"));
8476 }
8477 else
8478 {
8479 inst.instruction = (inst.instruction == T_MNEM_add
8480 ? T_OPCODE_ADD_R3 : T_OPCODE_SUB_R3);
8481 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
8482 }
8483 }
8484 }
8485
8486 static void
8487 do_t_adr (void)
8488 {
8489 if (unified_syntax && inst.size_req == 0 && inst.operands[0].reg <= 7)
8490 {
8491 /* Defer to section relaxation. */
8492 inst.relax = inst.instruction;
8493 inst.instruction = THUMB_OP16 (inst.instruction);
8494 inst.instruction |= inst.operands[0].reg << 4;
8495 }
8496 else if (unified_syntax && inst.size_req != 2)
8497 {
8498 /* Generate a 32-bit opcode. */
8499 inst.instruction = THUMB_OP32 (inst.instruction);
8500 inst.instruction |= inst.operands[0].reg << 8;
8501 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_PC12;
8502 inst.reloc.pc_rel = 1;
8503 }
8504 else
8505 {
8506 /* Generate a 16-bit opcode. */
8507 inst.instruction = THUMB_OP16 (inst.instruction);
8508 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
8509 inst.reloc.exp.X_add_number -= 4; /* PC relative adjust. */
8510 inst.reloc.pc_rel = 1;
8511
8512 inst.instruction |= inst.operands[0].reg << 4;
8513 }
8514 }
8515
8516 /* Arithmetic instructions for which there is just one 16-bit
8517 instruction encoding, and it allows only two low registers.
8518 For maximal compatibility with ARM syntax, we allow three register
8519 operands even when Thumb-32 instructions are not available, as long
8520 as the first two are identical. For instance, both "sbc r0,r1" and
8521 "sbc r0,r0,r1" are allowed. */
8522 static void
8523 do_t_arit3 (void)
8524 {
8525 int Rd, Rs, Rn;
8526
8527 Rd = inst.operands[0].reg;
8528 Rs = (inst.operands[1].present
8529 ? inst.operands[1].reg /* Rd, Rs, foo */
8530 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
8531 Rn = inst.operands[2].reg;
8532
8533 if (unified_syntax)
8534 {
8535 if (!inst.operands[2].isreg)
8536 {
8537 /* For an immediate, we always generate a 32-bit opcode;
8538 section relaxation will shrink it later if possible. */
8539 inst.instruction = THUMB_OP32 (inst.instruction);
8540 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
8541 inst.instruction |= Rd << 8;
8542 inst.instruction |= Rs << 16;
8543 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
8544 }
8545 else
8546 {
8547 bfd_boolean narrow;
8548
8549 /* See if we can do this with a 16-bit instruction. */
8550 if (THUMB_SETS_FLAGS (inst.instruction))
8551 narrow = current_it_mask == 0;
8552 else
8553 narrow = current_it_mask != 0;
8554
8555 if (Rd > 7 || Rn > 7 || Rs > 7)
8556 narrow = FALSE;
8557 if (inst.operands[2].shifted)
8558 narrow = FALSE;
8559 if (inst.size_req == 4)
8560 narrow = FALSE;
8561
8562 if (narrow
8563 && Rd == Rs)
8564 {
8565 inst.instruction = THUMB_OP16 (inst.instruction);
8566 inst.instruction |= Rd;
8567 inst.instruction |= Rn << 3;
8568 return;
8569 }
8570
8571 /* If we get here, it can't be done in 16 bits. */
8572 constraint (inst.operands[2].shifted
8573 && inst.operands[2].immisreg,
8574 _("shift must be constant"));
8575 inst.instruction = THUMB_OP32 (inst.instruction);
8576 inst.instruction |= Rd << 8;
8577 inst.instruction |= Rs << 16;
8578 encode_thumb32_shifted_operand (2);
8579 }
8580 }
8581 else
8582 {
8583 /* On its face this is a lie - the instruction does set the
8584 flags. However, the only supported mnemonic in this mode
8585 says it doesn't. */
8586 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
8587
8588 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
8589 _("unshifted register required"));
8590 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
8591 constraint (Rd != Rs,
8592 _("dest and source1 must be the same register"));
8593
8594 inst.instruction = THUMB_OP16 (inst.instruction);
8595 inst.instruction |= Rd;
8596 inst.instruction |= Rn << 3;
8597 }
8598 }
8599
8600 /* Similarly, but for instructions where the arithmetic operation is
8601 commutative, so we can allow either of them to be different from
8602 the destination operand in a 16-bit instruction. For instance, all
8603 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
8604 accepted. */
8605 static void
8606 do_t_arit3c (void)
8607 {
8608 int Rd, Rs, Rn;
8609
8610 Rd = inst.operands[0].reg;
8611 Rs = (inst.operands[1].present
8612 ? inst.operands[1].reg /* Rd, Rs, foo */
8613 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
8614 Rn = inst.operands[2].reg;
8615
8616 if (unified_syntax)
8617 {
8618 if (!inst.operands[2].isreg)
8619 {
8620 /* For an immediate, we always generate a 32-bit opcode;
8621 section relaxation will shrink it later if possible. */
8622 inst.instruction = THUMB_OP32 (inst.instruction);
8623 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
8624 inst.instruction |= Rd << 8;
8625 inst.instruction |= Rs << 16;
8626 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
8627 }
8628 else
8629 {
8630 bfd_boolean narrow;
8631
8632 /* See if we can do this with a 16-bit instruction. */
8633 if (THUMB_SETS_FLAGS (inst.instruction))
8634 narrow = current_it_mask == 0;
8635 else
8636 narrow = current_it_mask != 0;
8637
8638 if (Rd > 7 || Rn > 7 || Rs > 7)
8639 narrow = FALSE;
8640 if (inst.operands[2].shifted)
8641 narrow = FALSE;
8642 if (inst.size_req == 4)
8643 narrow = FALSE;
8644
8645 if (narrow)
8646 {
8647 if (Rd == Rs)
8648 {
8649 inst.instruction = THUMB_OP16 (inst.instruction);
8650 inst.instruction |= Rd;
8651 inst.instruction |= Rn << 3;
8652 return;
8653 }
8654 if (Rd == Rn)
8655 {
8656 inst.instruction = THUMB_OP16 (inst.instruction);
8657 inst.instruction |= Rd;
8658 inst.instruction |= Rs << 3;
8659 return;
8660 }
8661 }
8662
8663 /* If we get here, it can't be done in 16 bits. */
8664 constraint (inst.operands[2].shifted
8665 && inst.operands[2].immisreg,
8666 _("shift must be constant"));
8667 inst.instruction = THUMB_OP32 (inst.instruction);
8668 inst.instruction |= Rd << 8;
8669 inst.instruction |= Rs << 16;
8670 encode_thumb32_shifted_operand (2);
8671 }
8672 }
8673 else
8674 {
8675 /* On its face this is a lie - the instruction does set the
8676 flags. However, the only supported mnemonic in this mode
8677 says it doesn't. */
8678 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
8679
8680 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
8681 _("unshifted register required"));
8682 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
8683
8684 inst.instruction = THUMB_OP16 (inst.instruction);
8685 inst.instruction |= Rd;
8686
8687 if (Rd == Rs)
8688 inst.instruction |= Rn << 3;
8689 else if (Rd == Rn)
8690 inst.instruction |= Rs << 3;
8691 else
8692 constraint (1, _("dest must overlap one source register"));
8693 }
8694 }
8695
8696 static void
8697 do_t_barrier (void)
8698 {
8699 if (inst.operands[0].present)
8700 {
8701 constraint ((inst.instruction & 0xf0) != 0x40
8702 && inst.operands[0].imm != 0xf,
8703 "bad barrier type");
8704 inst.instruction |= inst.operands[0].imm;
8705 }
8706 else
8707 inst.instruction |= 0xf;
8708 }
8709
8710 static void
8711 do_t_bfc (void)
8712 {
8713 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
8714 constraint (msb > 32, _("bit-field extends past end of register"));
8715 /* The instruction encoding stores the LSB and MSB,
8716 not the LSB and width. */
8717 inst.instruction |= inst.operands[0].reg << 8;
8718 inst.instruction |= (inst.operands[1].imm & 0x1c) << 10;
8719 inst.instruction |= (inst.operands[1].imm & 0x03) << 6;
8720 inst.instruction |= msb - 1;
8721 }
8722
8723 static void
8724 do_t_bfi (void)
8725 {
8726 unsigned int msb;
8727
8728 /* #0 in second position is alternative syntax for bfc, which is
8729 the same instruction but with REG_PC in the Rm field. */
8730 if (!inst.operands[1].isreg)
8731 inst.operands[1].reg = REG_PC;
8732
8733 msb = inst.operands[2].imm + inst.operands[3].imm;
8734 constraint (msb > 32, _("bit-field extends past end of register"));
8735 /* The instruction encoding stores the LSB and MSB,
8736 not the LSB and width. */
8737 inst.instruction |= inst.operands[0].reg << 8;
8738 inst.instruction |= inst.operands[1].reg << 16;
8739 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
8740 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
8741 inst.instruction |= msb - 1;
8742 }
8743
8744 static void
8745 do_t_bfx (void)
8746 {
8747 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
8748 _("bit-field extends past end of register"));
8749 inst.instruction |= inst.operands[0].reg << 8;
8750 inst.instruction |= inst.operands[1].reg << 16;
8751 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
8752 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
8753 inst.instruction |= inst.operands[3].imm - 1;
8754 }
8755
8756 /* ARM V5 Thumb BLX (argument parse)
8757 BLX <target_addr> which is BLX(1)
8758 BLX <Rm> which is BLX(2)
8759 Unfortunately, there are two different opcodes for this mnemonic.
8760 So, the insns[].value is not used, and the code here zaps values
8761 into inst.instruction.
8762
8763 ??? How to take advantage of the additional two bits of displacement
8764 available in Thumb32 mode? Need new relocation? */
8765
8766 static void
8767 do_t_blx (void)
8768 {
8769 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
8770 if (inst.operands[0].isreg)
8771 /* We have a register, so this is BLX(2). */
8772 inst.instruction |= inst.operands[0].reg << 3;
8773 else
8774 {
8775 /* No register. This must be BLX(1). */
8776 inst.instruction = 0xf000e800;
8777 #ifdef OBJ_ELF
8778 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
8779 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH23;
8780 else
8781 #endif
8782 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BLX;
8783 inst.reloc.pc_rel = 1;
8784 }
8785 }
8786
8787 static void
8788 do_t_branch (void)
8789 {
8790 int opcode;
8791 int cond;
8792
8793 if (current_it_mask)
8794 {
8795 /* Conditional branches inside IT blocks are encoded as unconditional
8796 branches. */
8797 cond = COND_ALWAYS;
8798 /* A branch must be the last instruction in an IT block. */
8799 constraint (current_it_mask != 0x10, BAD_BRANCH);
8800 }
8801 else
8802 cond = inst.cond;
8803
8804 if (cond != COND_ALWAYS)
8805 opcode = T_MNEM_bcond;
8806 else
8807 opcode = inst.instruction;
8808
8809 if (unified_syntax && inst.size_req == 4)
8810 {
8811 inst.instruction = THUMB_OP32(opcode);
8812 if (cond == COND_ALWAYS)
8813 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH25;
8814 else
8815 {
8816 assert (cond != 0xF);
8817 inst.instruction |= cond << 22;
8818 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH20;
8819 }
8820 }
8821 else
8822 {
8823 inst.instruction = THUMB_OP16(opcode);
8824 if (cond == COND_ALWAYS)
8825 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH12;
8826 else
8827 {
8828 inst.instruction |= cond << 8;
8829 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH9;
8830 }
8831 /* Allow section relaxation. */
8832 if (unified_syntax && inst.size_req != 2)
8833 inst.relax = opcode;
8834 }
8835
8836 inst.reloc.pc_rel = 1;
8837 }
8838
8839 static void
8840 do_t_bkpt (void)
8841 {
8842 constraint (inst.cond != COND_ALWAYS,
8843 _("instruction is always unconditional"));
8844 if (inst.operands[0].present)
8845 {
8846 constraint (inst.operands[0].imm > 255,
8847 _("immediate value out of range"));
8848 inst.instruction |= inst.operands[0].imm;
8849 }
8850 }
8851
8852 static void
8853 do_t_branch23 (void)
8854 {
8855 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
8856 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH23;
8857 inst.reloc.pc_rel = 1;
8858
8859 /* If the destination of the branch is a defined symbol which does not have
8860 the THUMB_FUNC attribute, then we must be calling a function which has
8861 the (interfacearm) attribute. We look for the Thumb entry point to that
8862 function and change the branch to refer to that function instead. */
8863 if ( inst.reloc.exp.X_op == O_symbol
8864 && inst.reloc.exp.X_add_symbol != NULL
8865 && S_IS_DEFINED (inst.reloc.exp.X_add_symbol)
8866 && ! THUMB_IS_FUNC (inst.reloc.exp.X_add_symbol))
8867 inst.reloc.exp.X_add_symbol =
8868 find_real_start (inst.reloc.exp.X_add_symbol);
8869 }
8870
8871 static void
8872 do_t_bx (void)
8873 {
8874 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
8875 inst.instruction |= inst.operands[0].reg << 3;
8876 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
8877 should cause the alignment to be checked once it is known. This is
8878 because BX PC only works if the instruction is word aligned. */
8879 }
8880
8881 static void
8882 do_t_bxj (void)
8883 {
8884 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
8885 if (inst.operands[0].reg == REG_PC)
8886 as_tsktsk (_("use of r15 in bxj is not really useful"));
8887
8888 inst.instruction |= inst.operands[0].reg << 16;
8889 }
8890
8891 static void
8892 do_t_clz (void)
8893 {
8894 inst.instruction |= inst.operands[0].reg << 8;
8895 inst.instruction |= inst.operands[1].reg << 16;
8896 inst.instruction |= inst.operands[1].reg;
8897 }
8898
8899 static void
8900 do_t_cps (void)
8901 {
8902 constraint (current_it_mask, BAD_NOT_IT);
8903 inst.instruction |= inst.operands[0].imm;
8904 }
8905
8906 static void
8907 do_t_cpsi (void)
8908 {
8909 constraint (current_it_mask, BAD_NOT_IT);
8910 if (unified_syntax
8911 && (inst.operands[1].present || inst.size_req == 4)
8912 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6_notm))
8913 {
8914 unsigned int imod = (inst.instruction & 0x0030) >> 4;
8915 inst.instruction = 0xf3af8000;
8916 inst.instruction |= imod << 9;
8917 inst.instruction |= inst.operands[0].imm << 5;
8918 if (inst.operands[1].present)
8919 inst.instruction |= 0x100 | inst.operands[1].imm;
8920 }
8921 else
8922 {
8923 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1)
8924 && (inst.operands[0].imm & 4),
8925 _("selected processor does not support 'A' form "
8926 "of this instruction"));
8927 constraint (inst.operands[1].present || inst.size_req == 4,
8928 _("Thumb does not support the 2-argument "
8929 "form of this instruction"));
8930 inst.instruction |= inst.operands[0].imm;
8931 }
8932 }
8933
8934 /* THUMB CPY instruction (argument parse). */
8935
8936 static void
8937 do_t_cpy (void)
8938 {
8939 if (inst.size_req == 4)
8940 {
8941 inst.instruction = THUMB_OP32 (T_MNEM_mov);
8942 inst.instruction |= inst.operands[0].reg << 8;
8943 inst.instruction |= inst.operands[1].reg;
8944 }
8945 else
8946 {
8947 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
8948 inst.instruction |= (inst.operands[0].reg & 0x7);
8949 inst.instruction |= inst.operands[1].reg << 3;
8950 }
8951 }
8952
8953 static void
8954 do_t_czb (void)
8955 {
8956 constraint (current_it_mask, BAD_NOT_IT);
8957 constraint (inst.operands[0].reg > 7, BAD_HIREG);
8958 inst.instruction |= inst.operands[0].reg;
8959 inst.reloc.pc_rel = 1;
8960 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH7;
8961 }
8962
8963 static void
8964 do_t_dbg (void)
8965 {
8966 inst.instruction |= inst.operands[0].imm;
8967 }
8968
8969 static void
8970 do_t_div (void)
8971 {
8972 if (!inst.operands[1].present)
8973 inst.operands[1].reg = inst.operands[0].reg;
8974 inst.instruction |= inst.operands[0].reg << 8;
8975 inst.instruction |= inst.operands[1].reg << 16;
8976 inst.instruction |= inst.operands[2].reg;
8977 }
8978
8979 static void
8980 do_t_hint (void)
8981 {
8982 if (unified_syntax && inst.size_req == 4)
8983 inst.instruction = THUMB_OP32 (inst.instruction);
8984 else
8985 inst.instruction = THUMB_OP16 (inst.instruction);
8986 }
8987
8988 static void
8989 do_t_it (void)
8990 {
8991 unsigned int cond = inst.operands[0].imm;
8992
8993 constraint (current_it_mask, BAD_NOT_IT);
8994 current_it_mask = (inst.instruction & 0xf) | 0x10;
8995 current_cc = cond;
8996
8997 /* If the condition is a negative condition, invert the mask. */
8998 if ((cond & 0x1) == 0x0)
8999 {
9000 unsigned int mask = inst.instruction & 0x000f;
9001
9002 if ((mask & 0x7) == 0)
9003 /* no conversion needed */;
9004 else if ((mask & 0x3) == 0)
9005 mask ^= 0x8;
9006 else if ((mask & 0x1) == 0)
9007 mask ^= 0xC;
9008 else
9009 mask ^= 0xE;
9010
9011 inst.instruction &= 0xfff0;
9012 inst.instruction |= mask;
9013 }
9014
9015 inst.instruction |= cond << 4;
9016 }
9017
9018 static void
9019 do_t_ldmstm (void)
9020 {
9021 /* This really doesn't seem worth it. */
9022 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
9023 _("expression too complex"));
9024 constraint (inst.operands[1].writeback,
9025 _("Thumb load/store multiple does not support {reglist}^"));
9026
9027 if (unified_syntax)
9028 {
9029 /* See if we can use a 16-bit instruction. */
9030 if (inst.instruction < 0xffff /* not ldmdb/stmdb */
9031 && inst.size_req != 4
9032 && inst.operands[0].reg <= 7
9033 && !(inst.operands[1].imm & ~0xff)
9034 && (inst.instruction == T_MNEM_stmia
9035 ? inst.operands[0].writeback
9036 : (inst.operands[0].writeback
9037 == !(inst.operands[1].imm & (1 << inst.operands[0].reg)))))
9038 {
9039 if (inst.instruction == T_MNEM_stmia
9040 && (inst.operands[1].imm & (1 << inst.operands[0].reg))
9041 && (inst.operands[1].imm & ((1 << inst.operands[0].reg) - 1)))
9042 as_warn (_("value stored for r%d is UNPREDICTABLE"),
9043 inst.operands[0].reg);
9044
9045 inst.instruction = THUMB_OP16 (inst.instruction);
9046 inst.instruction |= inst.operands[0].reg << 8;
9047 inst.instruction |= inst.operands[1].imm;
9048 }
9049 else
9050 {
9051 if (inst.operands[1].imm & (1 << 13))
9052 as_warn (_("SP should not be in register list"));
9053 if (inst.instruction == T_MNEM_stmia)
9054 {
9055 if (inst.operands[1].imm & (1 << 15))
9056 as_warn (_("PC should not be in register list"));
9057 if (inst.operands[1].imm & (1 << inst.operands[0].reg))
9058 as_warn (_("value stored for r%d is UNPREDICTABLE"),
9059 inst.operands[0].reg);
9060 }
9061 else
9062 {
9063 if (inst.operands[1].imm & (1 << 14)
9064 && inst.operands[1].imm & (1 << 15))
9065 as_warn (_("LR and PC should not both be in register list"));
9066 if ((inst.operands[1].imm & (1 << inst.operands[0].reg))
9067 && inst.operands[0].writeback)
9068 as_warn (_("base register should not be in register list "
9069 "when written back"));
9070 }
9071 if (inst.instruction < 0xffff)
9072 inst.instruction = THUMB_OP32 (inst.instruction);
9073 inst.instruction |= inst.operands[0].reg << 16;
9074 inst.instruction |= inst.operands[1].imm;
9075 if (inst.operands[0].writeback)
9076 inst.instruction |= WRITE_BACK;
9077 }
9078 }
9079 else
9080 {
9081 constraint (inst.operands[0].reg > 7
9082 || (inst.operands[1].imm & ~0xff), BAD_HIREG);
9083 if (inst.instruction == T_MNEM_stmia)
9084 {
9085 if (!inst.operands[0].writeback)
9086 as_warn (_("this instruction will write back the base register"));
9087 if ((inst.operands[1].imm & (1 << inst.operands[0].reg))
9088 && (inst.operands[1].imm & ((1 << inst.operands[0].reg) - 1)))
9089 as_warn (_("value stored for r%d is UNPREDICTABLE"),
9090 inst.operands[0].reg);
9091 }
9092 else
9093 {
9094 if (!inst.operands[0].writeback
9095 && !(inst.operands[1].imm & (1 << inst.operands[0].reg)))
9096 as_warn (_("this instruction will write back the base register"));
9097 else if (inst.operands[0].writeback
9098 && (inst.operands[1].imm & (1 << inst.operands[0].reg)))
9099 as_warn (_("this instruction will not write back the base register"));
9100 }
9101
9102 inst.instruction = THUMB_OP16 (inst.instruction);
9103 inst.instruction |= inst.operands[0].reg << 8;
9104 inst.instruction |= inst.operands[1].imm;
9105 }
9106 }
9107
9108 static void
9109 do_t_ldrex (void)
9110 {
9111 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
9112 || inst.operands[1].postind || inst.operands[1].writeback
9113 || inst.operands[1].immisreg || inst.operands[1].shifted
9114 || inst.operands[1].negative,
9115 BAD_ADDR_MODE);
9116
9117 inst.instruction |= inst.operands[0].reg << 12;
9118 inst.instruction |= inst.operands[1].reg << 16;
9119 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
9120 }
9121
9122 static void
9123 do_t_ldrexd (void)
9124 {
9125 if (!inst.operands[1].present)
9126 {
9127 constraint (inst.operands[0].reg == REG_LR,
9128 _("r14 not allowed as first register "
9129 "when second register is omitted"));
9130 inst.operands[1].reg = inst.operands[0].reg + 1;
9131 }
9132 constraint (inst.operands[0].reg == inst.operands[1].reg,
9133 BAD_OVERLAP);
9134
9135 inst.instruction |= inst.operands[0].reg << 12;
9136 inst.instruction |= inst.operands[1].reg << 8;
9137 inst.instruction |= inst.operands[2].reg << 16;
9138 }
9139
9140 static void
9141 do_t_ldst (void)
9142 {
9143 unsigned long opcode;
9144 int Rn;
9145
9146 opcode = inst.instruction;
9147 if (unified_syntax)
9148 {
9149 if (!inst.operands[1].isreg)
9150 {
9151 if (opcode <= 0xffff)
9152 inst.instruction = THUMB_OP32 (opcode);
9153 if (move_or_literal_pool (0, /*thumb_p=*/TRUE, /*mode_3=*/FALSE))
9154 return;
9155 }
9156 if (inst.operands[1].isreg
9157 && !inst.operands[1].writeback
9158 && !inst.operands[1].shifted && !inst.operands[1].postind
9159 && !inst.operands[1].negative && inst.operands[0].reg <= 7
9160 && opcode <= 0xffff
9161 && inst.size_req != 4)
9162 {
9163 /* Insn may have a 16-bit form. */
9164 Rn = inst.operands[1].reg;
9165 if (inst.operands[1].immisreg)
9166 {
9167 inst.instruction = THUMB_OP16 (opcode);
9168 /* [Rn, Ri] */
9169 if (Rn <= 7 && inst.operands[1].imm <= 7)
9170 goto op16;
9171 }
9172 else if ((Rn <= 7 && opcode != T_MNEM_ldrsh
9173 && opcode != T_MNEM_ldrsb)
9174 || ((Rn == REG_PC || Rn == REG_SP) && opcode == T_MNEM_ldr)
9175 || (Rn == REG_SP && opcode == T_MNEM_str))
9176 {
9177 /* [Rn, #const] */
9178 if (Rn > 7)
9179 {
9180 if (Rn == REG_PC)
9181 {
9182 if (inst.reloc.pc_rel)
9183 opcode = T_MNEM_ldr_pc2;
9184 else
9185 opcode = T_MNEM_ldr_pc;
9186 }
9187 else
9188 {
9189 if (opcode == T_MNEM_ldr)
9190 opcode = T_MNEM_ldr_sp;
9191 else
9192 opcode = T_MNEM_str_sp;
9193 }
9194 inst.instruction = inst.operands[0].reg << 8;
9195 }
9196 else
9197 {
9198 inst.instruction = inst.operands[0].reg;
9199 inst.instruction |= inst.operands[1].reg << 3;
9200 }
9201 inst.instruction |= THUMB_OP16 (opcode);
9202 if (inst.size_req == 2)
9203 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
9204 else
9205 inst.relax = opcode;
9206 return;
9207 }
9208 }
9209 /* Definitely a 32-bit variant. */
9210 inst.instruction = THUMB_OP32 (opcode);
9211 inst.instruction |= inst.operands[0].reg << 12;
9212 encode_thumb32_addr_mode (1, /*is_t=*/FALSE, /*is_d=*/FALSE);
9213 return;
9214 }
9215
9216 constraint (inst.operands[0].reg > 7, BAD_HIREG);
9217
9218 if (inst.instruction == T_MNEM_ldrsh || inst.instruction == T_MNEM_ldrsb)
9219 {
9220 /* Only [Rn,Rm] is acceptable. */
9221 constraint (inst.operands[1].reg > 7 || inst.operands[1].imm > 7, BAD_HIREG);
9222 constraint (!inst.operands[1].isreg || !inst.operands[1].immisreg
9223 || inst.operands[1].postind || inst.operands[1].shifted
9224 || inst.operands[1].negative,
9225 _("Thumb does not support this addressing mode"));
9226 inst.instruction = THUMB_OP16 (inst.instruction);
9227 goto op16;
9228 }
9229
9230 inst.instruction = THUMB_OP16 (inst.instruction);
9231 if (!inst.operands[1].isreg)
9232 if (move_or_literal_pool (0, /*thumb_p=*/TRUE, /*mode_3=*/FALSE))
9233 return;
9234
9235 constraint (!inst.operands[1].preind
9236 || inst.operands[1].shifted
9237 || inst.operands[1].writeback,
9238 _("Thumb does not support this addressing mode"));
9239 if (inst.operands[1].reg == REG_PC || inst.operands[1].reg == REG_SP)
9240 {
9241 constraint (inst.instruction & 0x0600,
9242 _("byte or halfword not valid for base register"));
9243 constraint (inst.operands[1].reg == REG_PC
9244 && !(inst.instruction & THUMB_LOAD_BIT),
9245 _("r15 based store not allowed"));
9246 constraint (inst.operands[1].immisreg,
9247 _("invalid base register for register offset"));
9248
9249 if (inst.operands[1].reg == REG_PC)
9250 inst.instruction = T_OPCODE_LDR_PC;
9251 else if (inst.instruction & THUMB_LOAD_BIT)
9252 inst.instruction = T_OPCODE_LDR_SP;
9253 else
9254 inst.instruction = T_OPCODE_STR_SP;
9255
9256 inst.instruction |= inst.operands[0].reg << 8;
9257 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
9258 return;
9259 }
9260
9261 constraint (inst.operands[1].reg > 7, BAD_HIREG);
9262 if (!inst.operands[1].immisreg)
9263 {
9264 /* Immediate offset. */
9265 inst.instruction |= inst.operands[0].reg;
9266 inst.instruction |= inst.operands[1].reg << 3;
9267 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
9268 return;
9269 }
9270
9271 /* Register offset. */
9272 constraint (inst.operands[1].imm > 7, BAD_HIREG);
9273 constraint (inst.operands[1].negative,
9274 _("Thumb does not support this addressing mode"));
9275
9276 op16:
9277 switch (inst.instruction)
9278 {
9279 case T_OPCODE_STR_IW: inst.instruction = T_OPCODE_STR_RW; break;
9280 case T_OPCODE_STR_IH: inst.instruction = T_OPCODE_STR_RH; break;
9281 case T_OPCODE_STR_IB: inst.instruction = T_OPCODE_STR_RB; break;
9282 case T_OPCODE_LDR_IW: inst.instruction = T_OPCODE_LDR_RW; break;
9283 case T_OPCODE_LDR_IH: inst.instruction = T_OPCODE_LDR_RH; break;
9284 case T_OPCODE_LDR_IB: inst.instruction = T_OPCODE_LDR_RB; break;
9285 case 0x5600 /* ldrsb */:
9286 case 0x5e00 /* ldrsh */: break;
9287 default: abort ();
9288 }
9289
9290 inst.instruction |= inst.operands[0].reg;
9291 inst.instruction |= inst.operands[1].reg << 3;
9292 inst.instruction |= inst.operands[1].imm << 6;
9293 }
9294
9295 static void
9296 do_t_ldstd (void)
9297 {
9298 if (!inst.operands[1].present)
9299 {
9300 inst.operands[1].reg = inst.operands[0].reg + 1;
9301 constraint (inst.operands[0].reg == REG_LR,
9302 _("r14 not allowed here"));
9303 }
9304 inst.instruction |= inst.operands[0].reg << 12;
9305 inst.instruction |= inst.operands[1].reg << 8;
9306 encode_thumb32_addr_mode (2, /*is_t=*/FALSE, /*is_d=*/TRUE);
9307
9308 }
9309
9310 static void
9311 do_t_ldstt (void)
9312 {
9313 inst.instruction |= inst.operands[0].reg << 12;
9314 encode_thumb32_addr_mode (1, /*is_t=*/TRUE, /*is_d=*/FALSE);
9315 }
9316
9317 static void
9318 do_t_mla (void)
9319 {
9320 inst.instruction |= inst.operands[0].reg << 8;
9321 inst.instruction |= inst.operands[1].reg << 16;
9322 inst.instruction |= inst.operands[2].reg;
9323 inst.instruction |= inst.operands[3].reg << 12;
9324 }
9325
9326 static void
9327 do_t_mlal (void)
9328 {
9329 inst.instruction |= inst.operands[0].reg << 12;
9330 inst.instruction |= inst.operands[1].reg << 8;
9331 inst.instruction |= inst.operands[2].reg << 16;
9332 inst.instruction |= inst.operands[3].reg;
9333 }
9334
9335 static void
9336 do_t_mov_cmp (void)
9337 {
9338 if (unified_syntax)
9339 {
9340 int r0off = (inst.instruction == T_MNEM_mov
9341 || inst.instruction == T_MNEM_movs) ? 8 : 16;
9342 unsigned long opcode;
9343 bfd_boolean narrow;
9344 bfd_boolean low_regs;
9345
9346 low_regs = (inst.operands[0].reg <= 7 && inst.operands[1].reg <= 7);
9347 opcode = inst.instruction;
9348 if (current_it_mask)
9349 narrow = opcode != T_MNEM_movs;
9350 else
9351 narrow = opcode != T_MNEM_movs || low_regs;
9352 if (inst.size_req == 4
9353 || inst.operands[1].shifted)
9354 narrow = FALSE;
9355
9356 if (!inst.operands[1].isreg)
9357 {
9358 /* Immediate operand. */
9359 if (current_it_mask == 0 && opcode == T_MNEM_mov)
9360 narrow = 0;
9361 if (low_regs && narrow)
9362 {
9363 inst.instruction = THUMB_OP16 (opcode);
9364 inst.instruction |= inst.operands[0].reg << 8;
9365 if (inst.size_req == 2)
9366 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
9367 else
9368 inst.relax = opcode;
9369 }
9370 else
9371 {
9372 inst.instruction = THUMB_OP32 (inst.instruction);
9373 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
9374 inst.instruction |= inst.operands[0].reg << r0off;
9375 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
9376 }
9377 }
9378 else if (!narrow)
9379 {
9380 inst.instruction = THUMB_OP32 (inst.instruction);
9381 inst.instruction |= inst.operands[0].reg << r0off;
9382 encode_thumb32_shifted_operand (1);
9383 }
9384 else
9385 switch (inst.instruction)
9386 {
9387 case T_MNEM_mov:
9388 inst.instruction = T_OPCODE_MOV_HR;
9389 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
9390 inst.instruction |= (inst.operands[0].reg & 0x7);
9391 inst.instruction |= inst.operands[1].reg << 3;
9392 break;
9393
9394 case T_MNEM_movs:
9395 /* We know we have low registers at this point.
9396 Generate ADD Rd, Rs, #0. */
9397 inst.instruction = T_OPCODE_ADD_I3;
9398 inst.instruction |= inst.operands[0].reg;
9399 inst.instruction |= inst.operands[1].reg << 3;
9400 break;
9401
9402 case T_MNEM_cmp:
9403 if (low_regs)
9404 {
9405 inst.instruction = T_OPCODE_CMP_LR;
9406 inst.instruction |= inst.operands[0].reg;
9407 inst.instruction |= inst.operands[1].reg << 3;
9408 }
9409 else
9410 {
9411 inst.instruction = T_OPCODE_CMP_HR;
9412 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
9413 inst.instruction |= (inst.operands[0].reg & 0x7);
9414 inst.instruction |= inst.operands[1].reg << 3;
9415 }
9416 break;
9417 }
9418 return;
9419 }
9420
9421 inst.instruction = THUMB_OP16 (inst.instruction);
9422 if (inst.operands[1].isreg)
9423 {
9424 if (inst.operands[0].reg < 8 && inst.operands[1].reg < 8)
9425 {
9426 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
9427 since a MOV instruction produces unpredictable results. */
9428 if (inst.instruction == T_OPCODE_MOV_I8)
9429 inst.instruction = T_OPCODE_ADD_I3;
9430 else
9431 inst.instruction = T_OPCODE_CMP_LR;
9432
9433 inst.instruction |= inst.operands[0].reg;
9434 inst.instruction |= inst.operands[1].reg << 3;
9435 }
9436 else
9437 {
9438 if (inst.instruction == T_OPCODE_MOV_I8)
9439 inst.instruction = T_OPCODE_MOV_HR;
9440 else
9441 inst.instruction = T_OPCODE_CMP_HR;
9442 do_t_cpy ();
9443 }
9444 }
9445 else
9446 {
9447 constraint (inst.operands[0].reg > 7,
9448 _("only lo regs allowed with immediate"));
9449 inst.instruction |= inst.operands[0].reg << 8;
9450 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
9451 }
9452 }
9453
9454 static void
9455 do_t_mov16 (void)
9456 {
9457 bfd_vma imm;
9458 bfd_boolean top;
9459
9460 top = (inst.instruction & 0x00800000) != 0;
9461 if (inst.reloc.type == BFD_RELOC_ARM_MOVW)
9462 {
9463 constraint (top, _(":lower16: not allowed this instruction"));
9464 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVW;
9465 }
9466 else if (inst.reloc.type == BFD_RELOC_ARM_MOVT)
9467 {
9468 constraint (!top, _(":upper16: not allowed this instruction"));
9469 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVT;
9470 }
9471
9472 inst.instruction |= inst.operands[0].reg << 8;
9473 if (inst.reloc.type == BFD_RELOC_UNUSED)
9474 {
9475 imm = inst.reloc.exp.X_add_number;
9476 inst.instruction |= (imm & 0xf000) << 4;
9477 inst.instruction |= (imm & 0x0800) << 15;
9478 inst.instruction |= (imm & 0x0700) << 4;
9479 inst.instruction |= (imm & 0x00ff);
9480 }
9481 }
9482
9483 static void
9484 do_t_mvn_tst (void)
9485 {
9486 if (unified_syntax)
9487 {
9488 int r0off = (inst.instruction == T_MNEM_mvn
9489 || inst.instruction == T_MNEM_mvns) ? 8 : 16;
9490 bfd_boolean narrow;
9491
9492 if (inst.size_req == 4
9493 || inst.instruction > 0xffff
9494 || inst.operands[1].shifted
9495 || inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
9496 narrow = FALSE;
9497 else if (inst.instruction == T_MNEM_cmn)
9498 narrow = TRUE;
9499 else if (THUMB_SETS_FLAGS (inst.instruction))
9500 narrow = (current_it_mask == 0);
9501 else
9502 narrow = (current_it_mask != 0);
9503
9504 if (!inst.operands[1].isreg)
9505 {
9506 /* For an immediate, we always generate a 32-bit opcode;
9507 section relaxation will shrink it later if possible. */
9508 if (inst.instruction < 0xffff)
9509 inst.instruction = THUMB_OP32 (inst.instruction);
9510 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
9511 inst.instruction |= inst.operands[0].reg << r0off;
9512 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
9513 }
9514 else
9515 {
9516 /* See if we can do this with a 16-bit instruction. */
9517 if (narrow)
9518 {
9519 inst.instruction = THUMB_OP16 (inst.instruction);
9520 inst.instruction |= inst.operands[0].reg;
9521 inst.instruction |= inst.operands[1].reg << 3;
9522 }
9523 else
9524 {
9525 constraint (inst.operands[1].shifted
9526 && inst.operands[1].immisreg,
9527 _("shift must be constant"));
9528 if (inst.instruction < 0xffff)
9529 inst.instruction = THUMB_OP32 (inst.instruction);
9530 inst.instruction |= inst.operands[0].reg << r0off;
9531 encode_thumb32_shifted_operand (1);
9532 }
9533 }
9534 }
9535 else
9536 {
9537 constraint (inst.instruction > 0xffff
9538 || inst.instruction == T_MNEM_mvns, BAD_THUMB32);
9539 constraint (!inst.operands[1].isreg || inst.operands[1].shifted,
9540 _("unshifted register required"));
9541 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
9542 BAD_HIREG);
9543
9544 inst.instruction = THUMB_OP16 (inst.instruction);
9545 inst.instruction |= inst.operands[0].reg;
9546 inst.instruction |= inst.operands[1].reg << 3;
9547 }
9548 }
9549
9550 static void
9551 do_t_mrs (void)
9552 {
9553 int flags;
9554
9555 if (do_vfp_nsyn_mrs () == SUCCESS)
9556 return;
9557
9558 flags = inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
9559 if (flags == 0)
9560 {
9561 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7m),
9562 _("selected processor does not support "
9563 "requested special purpose register"));
9564 }
9565 else
9566 {
9567 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1),
9568 _("selected processor does not support "
9569 "requested special purpose register %x"));
9570 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
9571 constraint ((flags & ~SPSR_BIT) != (PSR_c|PSR_f),
9572 _("'CPSR' or 'SPSR' expected"));
9573 }
9574
9575 inst.instruction |= inst.operands[0].reg << 8;
9576 inst.instruction |= (flags & SPSR_BIT) >> 2;
9577 inst.instruction |= inst.operands[1].imm & 0xff;
9578 }
9579
9580 static void
9581 do_t_msr (void)
9582 {
9583 int flags;
9584
9585 if (do_vfp_nsyn_msr () == SUCCESS)
9586 return;
9587
9588 constraint (!inst.operands[1].isreg,
9589 _("Thumb encoding does not support an immediate here"));
9590 flags = inst.operands[0].imm;
9591 if (flags & ~0xff)
9592 {
9593 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1),
9594 _("selected processor does not support "
9595 "requested special purpose register"));
9596 }
9597 else
9598 {
9599 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7m),
9600 _("selected processor does not support "
9601 "requested special purpose register"));
9602 flags |= PSR_f;
9603 }
9604 inst.instruction |= (flags & SPSR_BIT) >> 2;
9605 inst.instruction |= (flags & ~SPSR_BIT) >> 8;
9606 inst.instruction |= (flags & 0xff);
9607 inst.instruction |= inst.operands[1].reg << 16;
9608 }
9609
9610 static void
9611 do_t_mul (void)
9612 {
9613 if (!inst.operands[2].present)
9614 inst.operands[2].reg = inst.operands[0].reg;
9615
9616 /* There is no 32-bit MULS and no 16-bit MUL. */
9617 if (unified_syntax && inst.instruction == T_MNEM_mul)
9618 {
9619 inst.instruction = THUMB_OP32 (inst.instruction);
9620 inst.instruction |= inst.operands[0].reg << 8;
9621 inst.instruction |= inst.operands[1].reg << 16;
9622 inst.instruction |= inst.operands[2].reg << 0;
9623 }
9624 else
9625 {
9626 constraint (!unified_syntax
9627 && inst.instruction == T_MNEM_muls, BAD_THUMB32);
9628 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
9629 BAD_HIREG);
9630
9631 inst.instruction = THUMB_OP16 (inst.instruction);
9632 inst.instruction |= inst.operands[0].reg;
9633
9634 if (inst.operands[0].reg == inst.operands[1].reg)
9635 inst.instruction |= inst.operands[2].reg << 3;
9636 else if (inst.operands[0].reg == inst.operands[2].reg)
9637 inst.instruction |= inst.operands[1].reg << 3;
9638 else
9639 constraint (1, _("dest must overlap one source register"));
9640 }
9641 }
9642
9643 static void
9644 do_t_mull (void)
9645 {
9646 inst.instruction |= inst.operands[0].reg << 12;
9647 inst.instruction |= inst.operands[1].reg << 8;
9648 inst.instruction |= inst.operands[2].reg << 16;
9649 inst.instruction |= inst.operands[3].reg;
9650
9651 if (inst.operands[0].reg == inst.operands[1].reg)
9652 as_tsktsk (_("rdhi and rdlo must be different"));
9653 }
9654
9655 static void
9656 do_t_nop (void)
9657 {
9658 if (unified_syntax)
9659 {
9660 if (inst.size_req == 4 || inst.operands[0].imm > 15)
9661 {
9662 inst.instruction = THUMB_OP32 (inst.instruction);
9663 inst.instruction |= inst.operands[0].imm;
9664 }
9665 else
9666 {
9667 inst.instruction = THUMB_OP16 (inst.instruction);
9668 inst.instruction |= inst.operands[0].imm << 4;
9669 }
9670 }
9671 else
9672 {
9673 constraint (inst.operands[0].present,
9674 _("Thumb does not support NOP with hints"));
9675 inst.instruction = 0x46c0;
9676 }
9677 }
9678
9679 static void
9680 do_t_neg (void)
9681 {
9682 if (unified_syntax)
9683 {
9684 bfd_boolean narrow;
9685
9686 if (THUMB_SETS_FLAGS (inst.instruction))
9687 narrow = (current_it_mask == 0);
9688 else
9689 narrow = (current_it_mask != 0);
9690 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
9691 narrow = FALSE;
9692 if (inst.size_req == 4)
9693 narrow = FALSE;
9694
9695 if (!narrow)
9696 {
9697 inst.instruction = THUMB_OP32 (inst.instruction);
9698 inst.instruction |= inst.operands[0].reg << 8;
9699 inst.instruction |= inst.operands[1].reg << 16;
9700 }
9701 else
9702 {
9703 inst.instruction = THUMB_OP16 (inst.instruction);
9704 inst.instruction |= inst.operands[0].reg;
9705 inst.instruction |= inst.operands[1].reg << 3;
9706 }
9707 }
9708 else
9709 {
9710 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
9711 BAD_HIREG);
9712 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
9713
9714 inst.instruction = THUMB_OP16 (inst.instruction);
9715 inst.instruction |= inst.operands[0].reg;
9716 inst.instruction |= inst.operands[1].reg << 3;
9717 }
9718 }
9719
9720 static void
9721 do_t_pkhbt (void)
9722 {
9723 inst.instruction |= inst.operands[0].reg << 8;
9724 inst.instruction |= inst.operands[1].reg << 16;
9725 inst.instruction |= inst.operands[2].reg;
9726 if (inst.operands[3].present)
9727 {
9728 unsigned int val = inst.reloc.exp.X_add_number;
9729 constraint (inst.reloc.exp.X_op != O_constant,
9730 _("expression too complex"));
9731 inst.instruction |= (val & 0x1c) << 10;
9732 inst.instruction |= (val & 0x03) << 6;
9733 }
9734 }
9735
9736 static void
9737 do_t_pkhtb (void)
9738 {
9739 if (!inst.operands[3].present)
9740 inst.instruction &= ~0x00000020;
9741 do_t_pkhbt ();
9742 }
9743
9744 static void
9745 do_t_pld (void)
9746 {
9747 encode_thumb32_addr_mode (0, /*is_t=*/FALSE, /*is_d=*/FALSE);
9748 }
9749
9750 static void
9751 do_t_push_pop (void)
9752 {
9753 unsigned mask;
9754
9755 constraint (inst.operands[0].writeback,
9756 _("push/pop do not support {reglist}^"));
9757 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
9758 _("expression too complex"));
9759
9760 mask = inst.operands[0].imm;
9761 if ((mask & ~0xff) == 0)
9762 inst.instruction = THUMB_OP16 (inst.instruction);
9763 else if ((inst.instruction == T_MNEM_push
9764 && (mask & ~0xff) == 1 << REG_LR)
9765 || (inst.instruction == T_MNEM_pop
9766 && (mask & ~0xff) == 1 << REG_PC))
9767 {
9768 inst.instruction = THUMB_OP16 (inst.instruction);
9769 inst.instruction |= THUMB_PP_PC_LR;
9770 mask &= 0xff;
9771 }
9772 else if (unified_syntax)
9773 {
9774 if (mask & (1 << 13))
9775 inst.error = _("SP not allowed in register list");
9776 if (inst.instruction == T_MNEM_push)
9777 {
9778 if (mask & (1 << 15))
9779 inst.error = _("PC not allowed in register list");
9780 }
9781 else
9782 {
9783 if (mask & (1 << 14)
9784 && mask & (1 << 15))
9785 inst.error = _("LR and PC should not both be in register list");
9786 }
9787 if ((mask & (mask - 1)) == 0)
9788 {
9789 /* Single register push/pop implemented as str/ldr. */
9790 if (inst.instruction == T_MNEM_push)
9791 inst.instruction = 0xf84d0d04; /* str reg, [sp, #-4]! */
9792 else
9793 inst.instruction = 0xf85d0b04; /* ldr reg, [sp], #4 */
9794 mask = ffs(mask) - 1;
9795 mask <<= 12;
9796 }
9797 else
9798 inst.instruction = THUMB_OP32 (inst.instruction);
9799 }
9800 else
9801 {
9802 inst.error = _("invalid register list to push/pop instruction");
9803 return;
9804 }
9805
9806 inst.instruction |= mask;
9807 }
9808
9809 static void
9810 do_t_rbit (void)
9811 {
9812 inst.instruction |= inst.operands[0].reg << 8;
9813 inst.instruction |= inst.operands[1].reg << 16;
9814 }
9815
9816 static void
9817 do_t_rev (void)
9818 {
9819 if (inst.operands[0].reg <= 7 && inst.operands[1].reg <= 7
9820 && inst.size_req != 4)
9821 {
9822 inst.instruction = THUMB_OP16 (inst.instruction);
9823 inst.instruction |= inst.operands[0].reg;
9824 inst.instruction |= inst.operands[1].reg << 3;
9825 }
9826 else if (unified_syntax)
9827 {
9828 inst.instruction = THUMB_OP32 (inst.instruction);
9829 inst.instruction |= inst.operands[0].reg << 8;
9830 inst.instruction |= inst.operands[1].reg << 16;
9831 inst.instruction |= inst.operands[1].reg;
9832 }
9833 else
9834 inst.error = BAD_HIREG;
9835 }
9836
9837 static void
9838 do_t_rsb (void)
9839 {
9840 int Rd, Rs;
9841
9842 Rd = inst.operands[0].reg;
9843 Rs = (inst.operands[1].present
9844 ? inst.operands[1].reg /* Rd, Rs, foo */
9845 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
9846
9847 inst.instruction |= Rd << 8;
9848 inst.instruction |= Rs << 16;
9849 if (!inst.operands[2].isreg)
9850 {
9851 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
9852 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
9853 }
9854 else
9855 encode_thumb32_shifted_operand (2);
9856 }
9857
9858 static void
9859 do_t_setend (void)
9860 {
9861 constraint (current_it_mask, BAD_NOT_IT);
9862 if (inst.operands[0].imm)
9863 inst.instruction |= 0x8;
9864 }
9865
9866 static void
9867 do_t_shift (void)
9868 {
9869 if (!inst.operands[1].present)
9870 inst.operands[1].reg = inst.operands[0].reg;
9871
9872 if (unified_syntax)
9873 {
9874 bfd_boolean narrow;
9875 int shift_kind;
9876
9877 switch (inst.instruction)
9878 {
9879 case T_MNEM_asr:
9880 case T_MNEM_asrs: shift_kind = SHIFT_ASR; break;
9881 case T_MNEM_lsl:
9882 case T_MNEM_lsls: shift_kind = SHIFT_LSL; break;
9883 case T_MNEM_lsr:
9884 case T_MNEM_lsrs: shift_kind = SHIFT_LSR; break;
9885 case T_MNEM_ror:
9886 case T_MNEM_rors: shift_kind = SHIFT_ROR; break;
9887 default: abort ();
9888 }
9889
9890 if (THUMB_SETS_FLAGS (inst.instruction))
9891 narrow = (current_it_mask == 0);
9892 else
9893 narrow = (current_it_mask != 0);
9894 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
9895 narrow = FALSE;
9896 if (!inst.operands[2].isreg && shift_kind == SHIFT_ROR)
9897 narrow = FALSE;
9898 if (inst.operands[2].isreg
9899 && (inst.operands[1].reg != inst.operands[0].reg
9900 || inst.operands[2].reg > 7))
9901 narrow = FALSE;
9902 if (inst.size_req == 4)
9903 narrow = FALSE;
9904
9905 if (!narrow)
9906 {
9907 if (inst.operands[2].isreg)
9908 {
9909 inst.instruction = THUMB_OP32 (inst.instruction);
9910 inst.instruction |= inst.operands[0].reg << 8;
9911 inst.instruction |= inst.operands[1].reg << 16;
9912 inst.instruction |= inst.operands[2].reg;
9913 }
9914 else
9915 {
9916 inst.operands[1].shifted = 1;
9917 inst.operands[1].shift_kind = shift_kind;
9918 inst.instruction = THUMB_OP32 (THUMB_SETS_FLAGS (inst.instruction)
9919 ? T_MNEM_movs : T_MNEM_mov);
9920 inst.instruction |= inst.operands[0].reg << 8;
9921 encode_thumb32_shifted_operand (1);
9922 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
9923 inst.reloc.type = BFD_RELOC_UNUSED;
9924 }
9925 }
9926 else
9927 {
9928 if (inst.operands[2].isreg)
9929 {
9930 switch (shift_kind)
9931 {
9932 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_R; break;
9933 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_R; break;
9934 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_R; break;
9935 case SHIFT_ROR: inst.instruction = T_OPCODE_ROR_R; break;
9936 default: abort ();
9937 }
9938
9939 inst.instruction |= inst.operands[0].reg;
9940 inst.instruction |= inst.operands[2].reg << 3;
9941 }
9942 else
9943 {
9944 switch (shift_kind)
9945 {
9946 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
9947 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
9948 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
9949 default: abort ();
9950 }
9951 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
9952 inst.instruction |= inst.operands[0].reg;
9953 inst.instruction |= inst.operands[1].reg << 3;
9954 }
9955 }
9956 }
9957 else
9958 {
9959 constraint (inst.operands[0].reg > 7
9960 || inst.operands[1].reg > 7, BAD_HIREG);
9961 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
9962
9963 if (inst.operands[2].isreg) /* Rd, {Rs,} Rn */
9964 {
9965 constraint (inst.operands[2].reg > 7, BAD_HIREG);
9966 constraint (inst.operands[0].reg != inst.operands[1].reg,
9967 _("source1 and dest must be same register"));
9968
9969 switch (inst.instruction)
9970 {
9971 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_R; break;
9972 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_R; break;
9973 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_R; break;
9974 case T_MNEM_ror: inst.instruction = T_OPCODE_ROR_R; break;
9975 default: abort ();
9976 }
9977
9978 inst.instruction |= inst.operands[0].reg;
9979 inst.instruction |= inst.operands[2].reg << 3;
9980 }
9981 else
9982 {
9983 switch (inst.instruction)
9984 {
9985 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_I; break;
9986 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_I; break;
9987 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_I; break;
9988 case T_MNEM_ror: inst.error = _("ror #imm not supported"); return;
9989 default: abort ();
9990 }
9991 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
9992 inst.instruction |= inst.operands[0].reg;
9993 inst.instruction |= inst.operands[1].reg << 3;
9994 }
9995 }
9996 }
9997
9998 static void
9999 do_t_simd (void)
10000 {
10001 inst.instruction |= inst.operands[0].reg << 8;
10002 inst.instruction |= inst.operands[1].reg << 16;
10003 inst.instruction |= inst.operands[2].reg;
10004 }
10005
10006 static void
10007 do_t_smc (void)
10008 {
10009 unsigned int value = inst.reloc.exp.X_add_number;
10010 constraint (inst.reloc.exp.X_op != O_constant,
10011 _("expression too complex"));
10012 inst.reloc.type = BFD_RELOC_UNUSED;
10013 inst.instruction |= (value & 0xf000) >> 12;
10014 inst.instruction |= (value & 0x0ff0);
10015 inst.instruction |= (value & 0x000f) << 16;
10016 }
10017
10018 static void
10019 do_t_ssat (void)
10020 {
10021 inst.instruction |= inst.operands[0].reg << 8;
10022 inst.instruction |= inst.operands[1].imm - 1;
10023 inst.instruction |= inst.operands[2].reg << 16;
10024
10025 if (inst.operands[3].present)
10026 {
10027 constraint (inst.reloc.exp.X_op != O_constant,
10028 _("expression too complex"));
10029
10030 if (inst.reloc.exp.X_add_number != 0)
10031 {
10032 if (inst.operands[3].shift_kind == SHIFT_ASR)
10033 inst.instruction |= 0x00200000; /* sh bit */
10034 inst.instruction |= (inst.reloc.exp.X_add_number & 0x1c) << 10;
10035 inst.instruction |= (inst.reloc.exp.X_add_number & 0x03) << 6;
10036 }
10037 inst.reloc.type = BFD_RELOC_UNUSED;
10038 }
10039 }
10040
10041 static void
10042 do_t_ssat16 (void)
10043 {
10044 inst.instruction |= inst.operands[0].reg << 8;
10045 inst.instruction |= inst.operands[1].imm - 1;
10046 inst.instruction |= inst.operands[2].reg << 16;
10047 }
10048
10049 static void
10050 do_t_strex (void)
10051 {
10052 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
10053 || inst.operands[2].postind || inst.operands[2].writeback
10054 || inst.operands[2].immisreg || inst.operands[2].shifted
10055 || inst.operands[2].negative,
10056 BAD_ADDR_MODE);
10057
10058 inst.instruction |= inst.operands[0].reg << 8;
10059 inst.instruction |= inst.operands[1].reg << 12;
10060 inst.instruction |= inst.operands[2].reg << 16;
10061 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
10062 }
10063
10064 static void
10065 do_t_strexd (void)
10066 {
10067 if (!inst.operands[2].present)
10068 inst.operands[2].reg = inst.operands[1].reg + 1;
10069
10070 constraint (inst.operands[0].reg == inst.operands[1].reg
10071 || inst.operands[0].reg == inst.operands[2].reg
10072 || inst.operands[0].reg == inst.operands[3].reg
10073 || inst.operands[1].reg == inst.operands[2].reg,
10074 BAD_OVERLAP);
10075
10076 inst.instruction |= inst.operands[0].reg;
10077 inst.instruction |= inst.operands[1].reg << 12;
10078 inst.instruction |= inst.operands[2].reg << 8;
10079 inst.instruction |= inst.operands[3].reg << 16;
10080 }
10081
10082 static void
10083 do_t_sxtah (void)
10084 {
10085 inst.instruction |= inst.operands[0].reg << 8;
10086 inst.instruction |= inst.operands[1].reg << 16;
10087 inst.instruction |= inst.operands[2].reg;
10088 inst.instruction |= inst.operands[3].imm << 4;
10089 }
10090
10091 static void
10092 do_t_sxth (void)
10093 {
10094 if (inst.instruction <= 0xffff && inst.size_req != 4
10095 && inst.operands[0].reg <= 7 && inst.operands[1].reg <= 7
10096 && (!inst.operands[2].present || inst.operands[2].imm == 0))
10097 {
10098 inst.instruction = THUMB_OP16 (inst.instruction);
10099 inst.instruction |= inst.operands[0].reg;
10100 inst.instruction |= inst.operands[1].reg << 3;
10101 }
10102 else if (unified_syntax)
10103 {
10104 if (inst.instruction <= 0xffff)
10105 inst.instruction = THUMB_OP32 (inst.instruction);
10106 inst.instruction |= inst.operands[0].reg << 8;
10107 inst.instruction |= inst.operands[1].reg;
10108 inst.instruction |= inst.operands[2].imm << 4;
10109 }
10110 else
10111 {
10112 constraint (inst.operands[2].present && inst.operands[2].imm != 0,
10113 _("Thumb encoding does not support rotation"));
10114 constraint (1, BAD_HIREG);
10115 }
10116 }
10117
10118 static void
10119 do_t_swi (void)
10120 {
10121 inst.reloc.type = BFD_RELOC_ARM_SWI;
10122 }
10123
10124 static void
10125 do_t_tb (void)
10126 {
10127 int half;
10128
10129 half = (inst.instruction & 0x10) != 0;
10130 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
10131 constraint (inst.operands[0].immisreg,
10132 _("instruction requires register index"));
10133 constraint (inst.operands[0].imm == 15,
10134 _("PC is not a valid index register"));
10135 constraint (!half && inst.operands[0].shifted,
10136 _("instruction does not allow shifted index"));
10137 inst.instruction |= (inst.operands[0].reg << 16) | inst.operands[0].imm;
10138 }
10139
10140 static void
10141 do_t_usat (void)
10142 {
10143 inst.instruction |= inst.operands[0].reg << 8;
10144 inst.instruction |= inst.operands[1].imm;
10145 inst.instruction |= inst.operands[2].reg << 16;
10146
10147 if (inst.operands[3].present)
10148 {
10149 constraint (inst.reloc.exp.X_op != O_constant,
10150 _("expression too complex"));
10151 if (inst.reloc.exp.X_add_number != 0)
10152 {
10153 if (inst.operands[3].shift_kind == SHIFT_ASR)
10154 inst.instruction |= 0x00200000; /* sh bit */
10155
10156 inst.instruction |= (inst.reloc.exp.X_add_number & 0x1c) << 10;
10157 inst.instruction |= (inst.reloc.exp.X_add_number & 0x03) << 6;
10158 }
10159 inst.reloc.type = BFD_RELOC_UNUSED;
10160 }
10161 }
10162
10163 static void
10164 do_t_usat16 (void)
10165 {
10166 inst.instruction |= inst.operands[0].reg << 8;
10167 inst.instruction |= inst.operands[1].imm;
10168 inst.instruction |= inst.operands[2].reg << 16;
10169 }
10170
10171 /* Neon instruction encoder helpers. */
10172
10173 /* Encodings for the different types for various Neon opcodes. */
10174
10175 /* An "invalid" code for the following tables. */
10176 #define N_INV -1u
10177
10178 struct neon_tab_entry
10179 {
10180 unsigned integer;
10181 unsigned float_or_poly;
10182 unsigned scalar_or_imm;
10183 };
10184
10185 /* Map overloaded Neon opcodes to their respective encodings. */
10186 #define NEON_ENC_TAB \
10187 X(vabd, 0x0000700, 0x1200d00, N_INV), \
10188 X(vmax, 0x0000600, 0x0000f00, N_INV), \
10189 X(vmin, 0x0000610, 0x0200f00, N_INV), \
10190 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
10191 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
10192 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
10193 X(vadd, 0x0000800, 0x0000d00, N_INV), \
10194 X(vsub, 0x1000800, 0x0200d00, N_INV), \
10195 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
10196 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
10197 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
10198 /* Register variants of the following two instructions are encoded as
10199 vcge / vcgt with the operands reversed. */ \
10200 X(vclt, 0x0000310, 0x1000e00, 0x1b10200), \
10201 X(vcle, 0x0000300, 0x1200e00, 0x1b10180), \
10202 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
10203 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
10204 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
10205 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
10206 X(vmlal, 0x0800800, N_INV, 0x0800240), \
10207 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
10208 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
10209 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
10210 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
10211 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
10212 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
10213 X(vshl, 0x0000400, N_INV, 0x0800510), \
10214 X(vqshl, 0x0000410, N_INV, 0x0800710), \
10215 X(vand, 0x0000110, N_INV, 0x0800030), \
10216 X(vbic, 0x0100110, N_INV, 0x0800030), \
10217 X(veor, 0x1000110, N_INV, N_INV), \
10218 X(vorn, 0x0300110, N_INV, 0x0800010), \
10219 X(vorr, 0x0200110, N_INV, 0x0800010), \
10220 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
10221 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
10222 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
10223 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
10224 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
10225 X(vst1, 0x0000000, 0x0800000, N_INV), \
10226 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
10227 X(vst2, 0x0000100, 0x0800100, N_INV), \
10228 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
10229 X(vst3, 0x0000200, 0x0800200, N_INV), \
10230 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
10231 X(vst4, 0x0000300, 0x0800300, N_INV), \
10232 X(vmovn, 0x1b20200, N_INV, N_INV), \
10233 X(vtrn, 0x1b20080, N_INV, N_INV), \
10234 X(vqmovn, 0x1b20200, N_INV, N_INV), \
10235 X(vqmovun, 0x1b20240, N_INV, N_INV), \
10236 X(vnmul, 0xe200a40, 0xe200b40, N_INV), \
10237 X(vnmla, 0xe000a40, 0xe000b40, N_INV), \
10238 X(vnmls, 0xe100a40, 0xe100b40, N_INV), \
10239 X(vcmp, 0xeb40a40, 0xeb40b40, N_INV), \
10240 X(vcmpz, 0xeb50a40, 0xeb50b40, N_INV), \
10241 X(vcmpe, 0xeb40ac0, 0xeb40bc0, N_INV), \
10242 X(vcmpez, 0xeb50ac0, 0xeb50bc0, N_INV)
10243
10244 enum neon_opc
10245 {
10246 #define X(OPC,I,F,S) N_MNEM_##OPC
10247 NEON_ENC_TAB
10248 #undef X
10249 };
10250
10251 static const struct neon_tab_entry neon_enc_tab[] =
10252 {
10253 #define X(OPC,I,F,S) { (I), (F), (S) }
10254 NEON_ENC_TAB
10255 #undef X
10256 };
10257
10258 #define NEON_ENC_INTEGER(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
10259 #define NEON_ENC_ARMREG(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
10260 #define NEON_ENC_POLY(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
10261 #define NEON_ENC_FLOAT(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
10262 #define NEON_ENC_SCALAR(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
10263 #define NEON_ENC_IMMED(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
10264 #define NEON_ENC_INTERLV(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
10265 #define NEON_ENC_LANE(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
10266 #define NEON_ENC_DUP(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
10267 #define NEON_ENC_SINGLE(X) \
10268 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
10269 #define NEON_ENC_DOUBLE(X) \
10270 ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
10271
10272 /* Define shapes for instruction operands. The following mnemonic characters
10273 are used in this table:
10274
10275 F - VFP S<n> register
10276 D - Neon D<n> register
10277 Q - Neon Q<n> register
10278 I - Immediate
10279 S - Scalar
10280 R - ARM register
10281 L - D<n> register list
10282
10283 This table is used to generate various data:
10284 - enumerations of the form NS_DDR to be used as arguments to
10285 neon_select_shape.
10286 - a table classifying shapes into single, double, quad, mixed.
10287 - a table used to drive neon_select_shape.
10288 */
10289
10290 #define NEON_SHAPE_DEF \
10291 X(3, (D, D, D), DOUBLE), \
10292 X(3, (Q, Q, Q), QUAD), \
10293 X(3, (D, D, I), DOUBLE), \
10294 X(3, (Q, Q, I), QUAD), \
10295 X(3, (D, D, S), DOUBLE), \
10296 X(3, (Q, Q, S), QUAD), \
10297 X(2, (D, D), DOUBLE), \
10298 X(2, (Q, Q), QUAD), \
10299 X(2, (D, S), DOUBLE), \
10300 X(2, (Q, S), QUAD), \
10301 X(2, (D, R), DOUBLE), \
10302 X(2, (Q, R), QUAD), \
10303 X(2, (D, I), DOUBLE), \
10304 X(2, (Q, I), QUAD), \
10305 X(3, (D, L, D), DOUBLE), \
10306 X(2, (D, Q), MIXED), \
10307 X(2, (Q, D), MIXED), \
10308 X(3, (D, Q, I), MIXED), \
10309 X(3, (Q, D, I), MIXED), \
10310 X(3, (Q, D, D), MIXED), \
10311 X(3, (D, Q, Q), MIXED), \
10312 X(3, (Q, Q, D), MIXED), \
10313 X(3, (Q, D, S), MIXED), \
10314 X(3, (D, Q, S), MIXED), \
10315 X(4, (D, D, D, I), DOUBLE), \
10316 X(4, (Q, Q, Q, I), QUAD), \
10317 X(2, (F, F), SINGLE), \
10318 X(3, (F, F, F), SINGLE), \
10319 X(2, (F, I), SINGLE), \
10320 X(2, (F, D), MIXED), \
10321 X(2, (D, F), MIXED), \
10322 X(3, (F, F, I), MIXED), \
10323 X(4, (R, R, F, F), SINGLE), \
10324 X(4, (F, F, R, R), SINGLE), \
10325 X(3, (D, R, R), DOUBLE), \
10326 X(3, (R, R, D), DOUBLE), \
10327 X(2, (S, R), SINGLE), \
10328 X(2, (R, S), SINGLE), \
10329 X(2, (F, R), SINGLE), \
10330 X(2, (R, F), SINGLE)
10331
10332 #define S2(A,B) NS_##A##B
10333 #define S3(A,B,C) NS_##A##B##C
10334 #define S4(A,B,C,D) NS_##A##B##C##D
10335
10336 #define X(N, L, C) S##N L
10337
10338 enum neon_shape
10339 {
10340 NEON_SHAPE_DEF,
10341 NS_NULL
10342 };
10343
10344 #undef X
10345 #undef S2
10346 #undef S3
10347 #undef S4
10348
10349 enum neon_shape_class
10350 {
10351 SC_SINGLE,
10352 SC_DOUBLE,
10353 SC_QUAD,
10354 SC_MIXED
10355 };
10356
10357 #define X(N, L, C) SC_##C
10358
10359 static enum neon_shape_class neon_shape_class[] =
10360 {
10361 NEON_SHAPE_DEF
10362 };
10363
10364 #undef X
10365
10366 enum neon_shape_el
10367 {
10368 SE_F,
10369 SE_D,
10370 SE_Q,
10371 SE_I,
10372 SE_S,
10373 SE_R,
10374 SE_L
10375 };
10376
10377 /* Register widths of above. */
10378 static unsigned neon_shape_el_size[] =
10379 {
10380 32,
10381 64,
10382 128,
10383 0,
10384 32,
10385 32,
10386 0
10387 };
10388
10389 struct neon_shape_info
10390 {
10391 unsigned els;
10392 enum neon_shape_el el[NEON_MAX_TYPE_ELS];
10393 };
10394
10395 #define S2(A,B) { SE_##A, SE_##B }
10396 #define S3(A,B,C) { SE_##A, SE_##B, SE_##C }
10397 #define S4(A,B,C,D) { SE_##A, SE_##B, SE_##C, SE_##D }
10398
10399 #define X(N, L, C) { N, S##N L }
10400
10401 static struct neon_shape_info neon_shape_tab[] =
10402 {
10403 NEON_SHAPE_DEF
10404 };
10405
10406 #undef X
10407 #undef S2
10408 #undef S3
10409 #undef S4
10410
10411 /* Bit masks used in type checking given instructions.
10412 'N_EQK' means the type must be the same as (or based on in some way) the key
10413 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
10414 set, various other bits can be set as well in order to modify the meaning of
10415 the type constraint. */
10416
10417 enum neon_type_mask
10418 {
10419 N_S8 = 0x000001,
10420 N_S16 = 0x000002,
10421 N_S32 = 0x000004,
10422 N_S64 = 0x000008,
10423 N_U8 = 0x000010,
10424 N_U16 = 0x000020,
10425 N_U32 = 0x000040,
10426 N_U64 = 0x000080,
10427 N_I8 = 0x000100,
10428 N_I16 = 0x000200,
10429 N_I32 = 0x000400,
10430 N_I64 = 0x000800,
10431 N_8 = 0x001000,
10432 N_16 = 0x002000,
10433 N_32 = 0x004000,
10434 N_64 = 0x008000,
10435 N_P8 = 0x010000,
10436 N_P16 = 0x020000,
10437 N_F32 = 0x040000,
10438 N_F64 = 0x080000,
10439 N_KEY = 0x100000, /* key element (main type specifier). */
10440 N_EQK = 0x200000, /* given operand has the same type & size as the key. */
10441 N_VFP = 0x400000, /* VFP mode: operand size must match register width. */
10442 N_DBL = 0x000001, /* if N_EQK, this operand is twice the size. */
10443 N_HLF = 0x000002, /* if N_EQK, this operand is half the size. */
10444 N_SGN = 0x000004, /* if N_EQK, this operand is forced to be signed. */
10445 N_UNS = 0x000008, /* if N_EQK, this operand is forced to be unsigned. */
10446 N_INT = 0x000010, /* if N_EQK, this operand is forced to be integer. */
10447 N_FLT = 0x000020, /* if N_EQK, this operand is forced to be float. */
10448 N_SIZ = 0x000040, /* if N_EQK, this operand is forced to be size-only. */
10449 N_UTYP = 0,
10450 N_MAX_NONSPECIAL = N_F64
10451 };
10452
10453 #define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
10454
10455 #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
10456 #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
10457 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
10458 #define N_SUF_32 (N_SU_32 | N_F32)
10459 #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
10460 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F32)
10461
10462 /* Pass this as the first type argument to neon_check_type to ignore types
10463 altogether. */
10464 #define N_IGNORE_TYPE (N_KEY | N_EQK)
10465
10466 /* Select a "shape" for the current instruction (describing register types or
10467 sizes) from a list of alternatives. Return NS_NULL if the current instruction
10468 doesn't fit. For non-polymorphic shapes, checking is usually done as a
10469 function of operand parsing, so this function doesn't need to be called.
10470 Shapes should be listed in order of decreasing length. */
10471
10472 static enum neon_shape
10473 neon_select_shape (enum neon_shape shape, ...)
10474 {
10475 va_list ap;
10476 enum neon_shape first_shape = shape;
10477
10478 /* Fix missing optional operands. FIXME: we don't know at this point how
10479 many arguments we should have, so this makes the assumption that we have
10480 > 1. This is true of all current Neon opcodes, I think, but may not be
10481 true in the future. */
10482 if (!inst.operands[1].present)
10483 inst.operands[1] = inst.operands[0];
10484
10485 va_start (ap, shape);
10486
10487 for (; shape != NS_NULL; shape = va_arg (ap, int))
10488 {
10489 unsigned j;
10490 int matches = 1;
10491
10492 for (j = 0; j < neon_shape_tab[shape].els; j++)
10493 {
10494 if (!inst.operands[j].present)
10495 {
10496 matches = 0;
10497 break;
10498 }
10499
10500 switch (neon_shape_tab[shape].el[j])
10501 {
10502 case SE_F:
10503 if (!(inst.operands[j].isreg
10504 && inst.operands[j].isvec
10505 && inst.operands[j].issingle
10506 && !inst.operands[j].isquad))
10507 matches = 0;
10508 break;
10509
10510 case SE_D:
10511 if (!(inst.operands[j].isreg
10512 && inst.operands[j].isvec
10513 && !inst.operands[j].isquad
10514 && !inst.operands[j].issingle))
10515 matches = 0;
10516 break;
10517
10518 case SE_R:
10519 if (!(inst.operands[j].isreg
10520 && !inst.operands[j].isvec))
10521 matches = 0;
10522 break;
10523
10524 case SE_Q:
10525 if (!(inst.operands[j].isreg
10526 && inst.operands[j].isvec
10527 && inst.operands[j].isquad
10528 && !inst.operands[j].issingle))
10529 matches = 0;
10530 break;
10531
10532 case SE_I:
10533 if (!(!inst.operands[j].isreg
10534 && !inst.operands[j].isscalar))
10535 matches = 0;
10536 break;
10537
10538 case SE_S:
10539 if (!(!inst.operands[j].isreg
10540 && inst.operands[j].isscalar))
10541 matches = 0;
10542 break;
10543
10544 case SE_L:
10545 break;
10546 }
10547 }
10548 if (matches)
10549 break;
10550 }
10551
10552 va_end (ap);
10553
10554 if (shape == NS_NULL && first_shape != NS_NULL)
10555 first_error (_("invalid instruction shape"));
10556
10557 return shape;
10558 }
10559
10560 /* True if SHAPE is predominantly a quadword operation (most of the time, this
10561 means the Q bit should be set). */
10562
10563 static int
10564 neon_quad (enum neon_shape shape)
10565 {
10566 return neon_shape_class[shape] == SC_QUAD;
10567 }
10568
10569 static void
10570 neon_modify_type_size (unsigned typebits, enum neon_el_type *g_type,
10571 unsigned *g_size)
10572 {
10573 /* Allow modification to be made to types which are constrained to be
10574 based on the key element, based on bits set alongside N_EQK. */
10575 if ((typebits & N_EQK) != 0)
10576 {
10577 if ((typebits & N_HLF) != 0)
10578 *g_size /= 2;
10579 else if ((typebits & N_DBL) != 0)
10580 *g_size *= 2;
10581 if ((typebits & N_SGN) != 0)
10582 *g_type = NT_signed;
10583 else if ((typebits & N_UNS) != 0)
10584 *g_type = NT_unsigned;
10585 else if ((typebits & N_INT) != 0)
10586 *g_type = NT_integer;
10587 else if ((typebits & N_FLT) != 0)
10588 *g_type = NT_float;
10589 else if ((typebits & N_SIZ) != 0)
10590 *g_type = NT_untyped;
10591 }
10592 }
10593
10594 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
10595 operand type, i.e. the single type specified in a Neon instruction when it
10596 is the only one given. */
10597
10598 static struct neon_type_el
10599 neon_type_promote (struct neon_type_el *key, unsigned thisarg)
10600 {
10601 struct neon_type_el dest = *key;
10602
10603 assert ((thisarg & N_EQK) != 0);
10604
10605 neon_modify_type_size (thisarg, &dest.type, &dest.size);
10606
10607 return dest;
10608 }
10609
10610 /* Convert Neon type and size into compact bitmask representation. */
10611
10612 static enum neon_type_mask
10613 type_chk_of_el_type (enum neon_el_type type, unsigned size)
10614 {
10615 switch (type)
10616 {
10617 case NT_untyped:
10618 switch (size)
10619 {
10620 case 8: return N_8;
10621 case 16: return N_16;
10622 case 32: return N_32;
10623 case 64: return N_64;
10624 default: ;
10625 }
10626 break;
10627
10628 case NT_integer:
10629 switch (size)
10630 {
10631 case 8: return N_I8;
10632 case 16: return N_I16;
10633 case 32: return N_I32;
10634 case 64: return N_I64;
10635 default: ;
10636 }
10637 break;
10638
10639 case NT_float:
10640 switch (size)
10641 {
10642 case 32: return N_F32;
10643 case 64: return N_F64;
10644 default: ;
10645 }
10646 break;
10647
10648 case NT_poly:
10649 switch (size)
10650 {
10651 case 8: return N_P8;
10652 case 16: return N_P16;
10653 default: ;
10654 }
10655 break;
10656
10657 case NT_signed:
10658 switch (size)
10659 {
10660 case 8: return N_S8;
10661 case 16: return N_S16;
10662 case 32: return N_S32;
10663 case 64: return N_S64;
10664 default: ;
10665 }
10666 break;
10667
10668 case NT_unsigned:
10669 switch (size)
10670 {
10671 case 8: return N_U8;
10672 case 16: return N_U16;
10673 case 32: return N_U32;
10674 case 64: return N_U64;
10675 default: ;
10676 }
10677 break;
10678
10679 default: ;
10680 }
10681
10682 return N_UTYP;
10683 }
10684
10685 /* Convert compact Neon bitmask type representation to a type and size. Only
10686 handles the case where a single bit is set in the mask. */
10687
10688 static int
10689 el_type_of_type_chk (enum neon_el_type *type, unsigned *size,
10690 enum neon_type_mask mask)
10691 {
10692 if ((mask & N_EQK) != 0)
10693 return FAIL;
10694
10695 if ((mask & (N_S8 | N_U8 | N_I8 | N_8 | N_P8)) != 0)
10696 *size = 8;
10697 else if ((mask & (N_S16 | N_U16 | N_I16 | N_16 | N_P16)) != 0)
10698 *size = 16;
10699 else if ((mask & (N_S32 | N_U32 | N_I32 | N_32 | N_F32)) != 0)
10700 *size = 32;
10701 else if ((mask & (N_S64 | N_U64 | N_I64 | N_64 | N_F64)) != 0)
10702 *size = 64;
10703 else
10704 return FAIL;
10705
10706 if ((mask & (N_S8 | N_S16 | N_S32 | N_S64)) != 0)
10707 *type = NT_signed;
10708 else if ((mask & (N_U8 | N_U16 | N_U32 | N_U64)) != 0)
10709 *type = NT_unsigned;
10710 else if ((mask & (N_I8 | N_I16 | N_I32 | N_I64)) != 0)
10711 *type = NT_integer;
10712 else if ((mask & (N_8 | N_16 | N_32 | N_64)) != 0)
10713 *type = NT_untyped;
10714 else if ((mask & (N_P8 | N_P16)) != 0)
10715 *type = NT_poly;
10716 else if ((mask & (N_F32 | N_F64)) != 0)
10717 *type = NT_float;
10718 else
10719 return FAIL;
10720
10721 return SUCCESS;
10722 }
10723
10724 /* Modify a bitmask of allowed types. This is only needed for type
10725 relaxation. */
10726
10727 static unsigned
10728 modify_types_allowed (unsigned allowed, unsigned mods)
10729 {
10730 unsigned size;
10731 enum neon_el_type type;
10732 unsigned destmask;
10733 int i;
10734
10735 destmask = 0;
10736
10737 for (i = 1; i <= N_MAX_NONSPECIAL; i <<= 1)
10738 {
10739 if (el_type_of_type_chk (&type, &size, allowed & i) == SUCCESS)
10740 {
10741 neon_modify_type_size (mods, &type, &size);
10742 destmask |= type_chk_of_el_type (type, size);
10743 }
10744 }
10745
10746 return destmask;
10747 }
10748
10749 /* Check type and return type classification.
10750 The manual states (paraphrase): If one datatype is given, it indicates the
10751 type given in:
10752 - the second operand, if there is one
10753 - the operand, if there is no second operand
10754 - the result, if there are no operands.
10755 This isn't quite good enough though, so we use a concept of a "key" datatype
10756 which is set on a per-instruction basis, which is the one which matters when
10757 only one data type is written.
10758 Note: this function has side-effects (e.g. filling in missing operands). All
10759 Neon instructions should call it before performing bit encoding. */
10760
10761 static struct neon_type_el
10762 neon_check_type (unsigned els, enum neon_shape ns, ...)
10763 {
10764 va_list ap;
10765 unsigned i, pass, key_el = 0;
10766 unsigned types[NEON_MAX_TYPE_ELS];
10767 enum neon_el_type k_type = NT_invtype;
10768 unsigned k_size = -1u;
10769 struct neon_type_el badtype = {NT_invtype, -1};
10770 unsigned key_allowed = 0;
10771
10772 /* Optional registers in Neon instructions are always (not) in operand 1.
10773 Fill in the missing operand here, if it was omitted. */
10774 if (els > 1 && !inst.operands[1].present)
10775 inst.operands[1] = inst.operands[0];
10776
10777 /* Suck up all the varargs. */
10778 va_start (ap, ns);
10779 for (i = 0; i < els; i++)
10780 {
10781 unsigned thisarg = va_arg (ap, unsigned);
10782 if (thisarg == N_IGNORE_TYPE)
10783 {
10784 va_end (ap);
10785 return badtype;
10786 }
10787 types[i] = thisarg;
10788 if ((thisarg & N_KEY) != 0)
10789 key_el = i;
10790 }
10791 va_end (ap);
10792
10793 if (inst.vectype.elems > 0)
10794 for (i = 0; i < els; i++)
10795 if (inst.operands[i].vectype.type != NT_invtype)
10796 {
10797 first_error (_("types specified in both the mnemonic and operands"));
10798 return badtype;
10799 }
10800
10801 /* Duplicate inst.vectype elements here as necessary.
10802 FIXME: No idea if this is exactly the same as the ARM assembler,
10803 particularly when an insn takes one register and one non-register
10804 operand. */
10805 if (inst.vectype.elems == 1 && els > 1)
10806 {
10807 unsigned j;
10808 inst.vectype.elems = els;
10809 inst.vectype.el[key_el] = inst.vectype.el[0];
10810 for (j = 0; j < els; j++)
10811 if (j != key_el)
10812 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
10813 types[j]);
10814 }
10815 else if (inst.vectype.elems == 0 && els > 0)
10816 {
10817 unsigned j;
10818 /* No types were given after the mnemonic, so look for types specified
10819 after each operand. We allow some flexibility here; as long as the
10820 "key" operand has a type, we can infer the others. */
10821 for (j = 0; j < els; j++)
10822 if (inst.operands[j].vectype.type != NT_invtype)
10823 inst.vectype.el[j] = inst.operands[j].vectype;
10824
10825 if (inst.operands[key_el].vectype.type != NT_invtype)
10826 {
10827 for (j = 0; j < els; j++)
10828 if (inst.operands[j].vectype.type == NT_invtype)
10829 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
10830 types[j]);
10831 }
10832 else
10833 {
10834 first_error (_("operand types can't be inferred"));
10835 return badtype;
10836 }
10837 }
10838 else if (inst.vectype.elems != els)
10839 {
10840 first_error (_("type specifier has the wrong number of parts"));
10841 return badtype;
10842 }
10843
10844 for (pass = 0; pass < 2; pass++)
10845 {
10846 for (i = 0; i < els; i++)
10847 {
10848 unsigned thisarg = types[i];
10849 unsigned types_allowed = ((thisarg & N_EQK) != 0 && pass != 0)
10850 ? modify_types_allowed (key_allowed, thisarg) : thisarg;
10851 enum neon_el_type g_type = inst.vectype.el[i].type;
10852 unsigned g_size = inst.vectype.el[i].size;
10853
10854 /* Decay more-specific signed & unsigned types to sign-insensitive
10855 integer types if sign-specific variants are unavailable. */
10856 if ((g_type == NT_signed || g_type == NT_unsigned)
10857 && (types_allowed & N_SU_ALL) == 0)
10858 g_type = NT_integer;
10859
10860 /* If only untyped args are allowed, decay any more specific types to
10861 them. Some instructions only care about signs for some element
10862 sizes, so handle that properly. */
10863 if ((g_size == 8 && (types_allowed & N_8) != 0)
10864 || (g_size == 16 && (types_allowed & N_16) != 0)
10865 || (g_size == 32 && (types_allowed & N_32) != 0)
10866 || (g_size == 64 && (types_allowed & N_64) != 0))
10867 g_type = NT_untyped;
10868
10869 if (pass == 0)
10870 {
10871 if ((thisarg & N_KEY) != 0)
10872 {
10873 k_type = g_type;
10874 k_size = g_size;
10875 key_allowed = thisarg & ~N_KEY;
10876 }
10877 }
10878 else
10879 {
10880 if ((thisarg & N_VFP) != 0)
10881 {
10882 enum neon_shape_el regshape = neon_shape_tab[ns].el[i];
10883 unsigned regwidth = neon_shape_el_size[regshape], match;
10884
10885 /* In VFP mode, operands must match register widths. If we
10886 have a key operand, use its width, else use the width of
10887 the current operand. */
10888 if (k_size != -1u)
10889 match = k_size;
10890 else
10891 match = g_size;
10892
10893 if (regwidth != match)
10894 {
10895 first_error (_("operand size must match register width"));
10896 return badtype;
10897 }
10898 }
10899
10900 if ((thisarg & N_EQK) == 0)
10901 {
10902 unsigned given_type = type_chk_of_el_type (g_type, g_size);
10903
10904 if ((given_type & types_allowed) == 0)
10905 {
10906 first_error (_("bad type in Neon instruction"));
10907 return badtype;
10908 }
10909 }
10910 else
10911 {
10912 enum neon_el_type mod_k_type = k_type;
10913 unsigned mod_k_size = k_size;
10914 neon_modify_type_size (thisarg, &mod_k_type, &mod_k_size);
10915 if (g_type != mod_k_type || g_size != mod_k_size)
10916 {
10917 first_error (_("inconsistent types in Neon instruction"));
10918 return badtype;
10919 }
10920 }
10921 }
10922 }
10923 }
10924
10925 return inst.vectype.el[key_el];
10926 }
10927
10928 /* Neon-style VFP instruction forwarding. */
10929
10930 /* Thumb VFP instructions have 0xE in the condition field. */
10931
10932 static void
10933 do_vfp_cond_or_thumb (void)
10934 {
10935 if (thumb_mode)
10936 inst.instruction |= 0xe0000000;
10937 else
10938 inst.instruction |= inst.cond << 28;
10939 }
10940
10941 /* Look up and encode a simple mnemonic, for use as a helper function for the
10942 Neon-style VFP syntax. This avoids duplication of bits of the insns table,
10943 etc. It is assumed that operand parsing has already been done, and that the
10944 operands are in the form expected by the given opcode (this isn't necessarily
10945 the same as the form in which they were parsed, hence some massaging must
10946 take place before this function is called).
10947 Checks current arch version against that in the looked-up opcode. */
10948
10949 static void
10950 do_vfp_nsyn_opcode (const char *opname)
10951 {
10952 const struct asm_opcode *opcode;
10953
10954 opcode = hash_find (arm_ops_hsh, opname);
10955
10956 if (!opcode)
10957 abort ();
10958
10959 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant,
10960 thumb_mode ? *opcode->tvariant : *opcode->avariant),
10961 _(BAD_FPU));
10962
10963 if (thumb_mode)
10964 {
10965 inst.instruction = opcode->tvalue;
10966 opcode->tencode ();
10967 }
10968 else
10969 {
10970 inst.instruction = (inst.cond << 28) | opcode->avalue;
10971 opcode->aencode ();
10972 }
10973 }
10974
10975 static void
10976 do_vfp_nsyn_add_sub (enum neon_shape rs)
10977 {
10978 int is_add = (inst.instruction & 0x0fffffff) == N_MNEM_vadd;
10979
10980 if (rs == NS_FFF)
10981 {
10982 if (is_add)
10983 do_vfp_nsyn_opcode ("fadds");
10984 else
10985 do_vfp_nsyn_opcode ("fsubs");
10986 }
10987 else
10988 {
10989 if (is_add)
10990 do_vfp_nsyn_opcode ("faddd");
10991 else
10992 do_vfp_nsyn_opcode ("fsubd");
10993 }
10994 }
10995
10996 /* Check operand types to see if this is a VFP instruction, and if so call
10997 PFN (). */
10998
10999 static int
11000 try_vfp_nsyn (int args, void (*pfn) (enum neon_shape))
11001 {
11002 enum neon_shape rs;
11003 struct neon_type_el et;
11004
11005 switch (args)
11006 {
11007 case 2:
11008 rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
11009 et = neon_check_type (2, rs,
11010 N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
11011 break;
11012
11013 case 3:
11014 rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
11015 et = neon_check_type (3, rs,
11016 N_EQK | N_VFP, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
11017 break;
11018
11019 default:
11020 abort ();
11021 }
11022
11023 if (et.type != NT_invtype)
11024 {
11025 pfn (rs);
11026 return SUCCESS;
11027 }
11028 else
11029 inst.error = NULL;
11030
11031 return FAIL;
11032 }
11033
11034 static void
11035 do_vfp_nsyn_mla_mls (enum neon_shape rs)
11036 {
11037 int is_mla = (inst.instruction & 0x0fffffff) == N_MNEM_vmla;
11038
11039 if (rs == NS_FFF)
11040 {
11041 if (is_mla)
11042 do_vfp_nsyn_opcode ("fmacs");
11043 else
11044 do_vfp_nsyn_opcode ("fmscs");
11045 }
11046 else
11047 {
11048 if (is_mla)
11049 do_vfp_nsyn_opcode ("fmacd");
11050 else
11051 do_vfp_nsyn_opcode ("fmscd");
11052 }
11053 }
11054
11055 static void
11056 do_vfp_nsyn_mul (enum neon_shape rs)
11057 {
11058 if (rs == NS_FFF)
11059 do_vfp_nsyn_opcode ("fmuls");
11060 else
11061 do_vfp_nsyn_opcode ("fmuld");
11062 }
11063
11064 static void
11065 do_vfp_nsyn_abs_neg (enum neon_shape rs)
11066 {
11067 int is_neg = (inst.instruction & 0x80) != 0;
11068 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_VFP | N_KEY);
11069
11070 if (rs == NS_FF)
11071 {
11072 if (is_neg)
11073 do_vfp_nsyn_opcode ("fnegs");
11074 else
11075 do_vfp_nsyn_opcode ("fabss");
11076 }
11077 else
11078 {
11079 if (is_neg)
11080 do_vfp_nsyn_opcode ("fnegd");
11081 else
11082 do_vfp_nsyn_opcode ("fabsd");
11083 }
11084 }
11085
11086 /* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
11087 insns belong to Neon, and are handled elsewhere. */
11088
11089 static void
11090 do_vfp_nsyn_ldm_stm (int is_dbmode)
11091 {
11092 int is_ldm = (inst.instruction & (1 << 20)) != 0;
11093 if (is_ldm)
11094 {
11095 if (is_dbmode)
11096 do_vfp_nsyn_opcode ("fldmdbs");
11097 else
11098 do_vfp_nsyn_opcode ("fldmias");
11099 }
11100 else
11101 {
11102 if (is_dbmode)
11103 do_vfp_nsyn_opcode ("fstmdbs");
11104 else
11105 do_vfp_nsyn_opcode ("fstmias");
11106 }
11107 }
11108
11109 static void
11110 do_vfp_nsyn_sqrt (void)
11111 {
11112 enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
11113 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
11114
11115 if (rs == NS_FF)
11116 do_vfp_nsyn_opcode ("fsqrts");
11117 else
11118 do_vfp_nsyn_opcode ("fsqrtd");
11119 }
11120
11121 static void
11122 do_vfp_nsyn_div (void)
11123 {
11124 enum neon_shape rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
11125 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
11126 N_F32 | N_F64 | N_KEY | N_VFP);
11127
11128 if (rs == NS_FFF)
11129 do_vfp_nsyn_opcode ("fdivs");
11130 else
11131 do_vfp_nsyn_opcode ("fdivd");
11132 }
11133
11134 static void
11135 do_vfp_nsyn_nmul (void)
11136 {
11137 enum neon_shape rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
11138 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
11139 N_F32 | N_F64 | N_KEY | N_VFP);
11140
11141 if (rs == NS_FFF)
11142 {
11143 inst.instruction = NEON_ENC_SINGLE (inst.instruction);
11144 do_vfp_sp_dyadic ();
11145 }
11146 else
11147 {
11148 inst.instruction = NEON_ENC_DOUBLE (inst.instruction);
11149 do_vfp_dp_rd_rn_rm ();
11150 }
11151 do_vfp_cond_or_thumb ();
11152 }
11153
11154 static void
11155 do_vfp_nsyn_cmp (void)
11156 {
11157 if (inst.operands[1].isreg)
11158 {
11159 enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
11160 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
11161
11162 if (rs == NS_FF)
11163 {
11164 inst.instruction = NEON_ENC_SINGLE (inst.instruction);
11165 do_vfp_sp_monadic ();
11166 }
11167 else
11168 {
11169 inst.instruction = NEON_ENC_DOUBLE (inst.instruction);
11170 do_vfp_dp_rd_rm ();
11171 }
11172 }
11173 else
11174 {
11175 enum neon_shape rs = neon_select_shape (NS_FI, NS_DI, NS_NULL);
11176 neon_check_type (2, rs, N_F32 | N_F64 | N_KEY | N_VFP, N_EQK);
11177
11178 switch (inst.instruction & 0x0fffffff)
11179 {
11180 case N_MNEM_vcmp:
11181 inst.instruction += N_MNEM_vcmpz - N_MNEM_vcmp;
11182 break;
11183 case N_MNEM_vcmpe:
11184 inst.instruction += N_MNEM_vcmpez - N_MNEM_vcmpe;
11185 break;
11186 default:
11187 abort ();
11188 }
11189
11190 if (rs == NS_FI)
11191 {
11192 inst.instruction = NEON_ENC_SINGLE (inst.instruction);
11193 do_vfp_sp_compare_z ();
11194 }
11195 else
11196 {
11197 inst.instruction = NEON_ENC_DOUBLE (inst.instruction);
11198 do_vfp_dp_rd ();
11199 }
11200 }
11201 do_vfp_cond_or_thumb ();
11202 }
11203
11204 static void
11205 nsyn_insert_sp (void)
11206 {
11207 inst.operands[1] = inst.operands[0];
11208 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
11209 inst.operands[0].reg = 13;
11210 inst.operands[0].isreg = 1;
11211 inst.operands[0].writeback = 1;
11212 inst.operands[0].present = 1;
11213 }
11214
11215 static void
11216 do_vfp_nsyn_push (void)
11217 {
11218 nsyn_insert_sp ();
11219 if (inst.operands[1].issingle)
11220 do_vfp_nsyn_opcode ("fstmdbs");
11221 else
11222 do_vfp_nsyn_opcode ("fstmdbd");
11223 }
11224
11225 static void
11226 do_vfp_nsyn_pop (void)
11227 {
11228 nsyn_insert_sp ();
11229 if (inst.operands[1].issingle)
11230 do_vfp_nsyn_opcode ("fldmdbs");
11231 else
11232 do_vfp_nsyn_opcode ("fldmdbd");
11233 }
11234
11235 /* Fix up Neon data-processing instructions, ORing in the correct bits for
11236 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
11237
11238 static unsigned
11239 neon_dp_fixup (unsigned i)
11240 {
11241 if (thumb_mode)
11242 {
11243 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
11244 if (i & (1 << 24))
11245 i |= 1 << 28;
11246
11247 i &= ~(1 << 24);
11248
11249 i |= 0xef000000;
11250 }
11251 else
11252 i |= 0xf2000000;
11253
11254 return i;
11255 }
11256
11257 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
11258 (0, 1, 2, 3). */
11259
11260 static unsigned
11261 neon_logbits (unsigned x)
11262 {
11263 return ffs (x) - 4;
11264 }
11265
11266 #define LOW4(R) ((R) & 0xf)
11267 #define HI1(R) (((R) >> 4) & 1)
11268
11269 /* Encode insns with bit pattern:
11270
11271 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
11272 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
11273
11274 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
11275 different meaning for some instruction. */
11276
11277 static void
11278 neon_three_same (int isquad, int ubit, int size)
11279 {
11280 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11281 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11282 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
11283 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
11284 inst.instruction |= LOW4 (inst.operands[2].reg);
11285 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
11286 inst.instruction |= (isquad != 0) << 6;
11287 inst.instruction |= (ubit != 0) << 24;
11288 if (size != -1)
11289 inst.instruction |= neon_logbits (size) << 20;
11290
11291 inst.instruction = neon_dp_fixup (inst.instruction);
11292 }
11293
11294 /* Encode instructions of the form:
11295
11296 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
11297 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
11298
11299 Don't write size if SIZE == -1. */
11300
11301 static void
11302 neon_two_same (int qbit, int ubit, int size)
11303 {
11304 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11305 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11306 inst.instruction |= LOW4 (inst.operands[1].reg);
11307 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
11308 inst.instruction |= (qbit != 0) << 6;
11309 inst.instruction |= (ubit != 0) << 24;
11310
11311 if (size != -1)
11312 inst.instruction |= neon_logbits (size) << 18;
11313
11314 inst.instruction = neon_dp_fixup (inst.instruction);
11315 }
11316
11317 /* Neon instruction encoders, in approximate order of appearance. */
11318
11319 static void
11320 do_neon_dyadic_i_su (void)
11321 {
11322 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
11323 struct neon_type_el et = neon_check_type (3, rs,
11324 N_EQK, N_EQK, N_SU_32 | N_KEY);
11325 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
11326 }
11327
11328 static void
11329 do_neon_dyadic_i64_su (void)
11330 {
11331 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
11332 struct neon_type_el et = neon_check_type (3, rs,
11333 N_EQK, N_EQK, N_SU_ALL | N_KEY);
11334 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
11335 }
11336
11337 static void
11338 neon_imm_shift (int write_ubit, int uval, int isquad, struct neon_type_el et,
11339 unsigned immbits)
11340 {
11341 unsigned size = et.size >> 3;
11342 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11343 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11344 inst.instruction |= LOW4 (inst.operands[1].reg);
11345 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
11346 inst.instruction |= (isquad != 0) << 6;
11347 inst.instruction |= immbits << 16;
11348 inst.instruction |= (size >> 3) << 7;
11349 inst.instruction |= (size & 0x7) << 19;
11350 if (write_ubit)
11351 inst.instruction |= (uval != 0) << 24;
11352
11353 inst.instruction = neon_dp_fixup (inst.instruction);
11354 }
11355
11356 static void
11357 do_neon_shl_imm (void)
11358 {
11359 if (!inst.operands[2].isreg)
11360 {
11361 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
11362 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_KEY | N_I_ALL);
11363 inst.instruction = NEON_ENC_IMMED (inst.instruction);
11364 neon_imm_shift (FALSE, 0, neon_quad (rs), et, inst.operands[2].imm);
11365 }
11366 else
11367 {
11368 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
11369 struct neon_type_el et = neon_check_type (3, rs,
11370 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
11371 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
11372 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
11373 }
11374 }
11375
11376 static void
11377 do_neon_qshl_imm (void)
11378 {
11379 if (!inst.operands[2].isreg)
11380 {
11381 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
11382 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
11383 inst.instruction = NEON_ENC_IMMED (inst.instruction);
11384 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et,
11385 inst.operands[2].imm);
11386 }
11387 else
11388 {
11389 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
11390 struct neon_type_el et = neon_check_type (3, rs,
11391 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
11392 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
11393 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
11394 }
11395 }
11396
11397 static int
11398 neon_cmode_for_logic_imm (unsigned immediate, unsigned *immbits, int size)
11399 {
11400 /* Handle .I8 and .I64 as pseudo-instructions. */
11401 switch (size)
11402 {
11403 case 8:
11404 /* Unfortunately, this will make everything apart from zero out-of-range.
11405 FIXME is this the intended semantics? There doesn't seem much point in
11406 accepting .I8 if so. */
11407 immediate |= immediate << 8;
11408 size = 16;
11409 break;
11410 case 64:
11411 /* Similarly, anything other than zero will be replicated in bits [63:32],
11412 which probably isn't want we want if we specified .I64. */
11413 if (immediate != 0)
11414 goto bad_immediate;
11415 size = 32;
11416 break;
11417 default: ;
11418 }
11419
11420 if (immediate == (immediate & 0x000000ff))
11421 {
11422 *immbits = immediate;
11423 return (size == 16) ? 0x9 : 0x1;
11424 }
11425 else if (immediate == (immediate & 0x0000ff00))
11426 {
11427 *immbits = immediate >> 8;
11428 return (size == 16) ? 0xb : 0x3;
11429 }
11430 else if (immediate == (immediate & 0x00ff0000))
11431 {
11432 *immbits = immediate >> 16;
11433 return 0x5;
11434 }
11435 else if (immediate == (immediate & 0xff000000))
11436 {
11437 *immbits = immediate >> 24;
11438 return 0x7;
11439 }
11440
11441 bad_immediate:
11442 first_error (_("immediate value out of range"));
11443 return FAIL;
11444 }
11445
11446 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
11447 A, B, C, D. */
11448
11449 static int
11450 neon_bits_same_in_bytes (unsigned imm)
11451 {
11452 return ((imm & 0x000000ff) == 0 || (imm & 0x000000ff) == 0x000000ff)
11453 && ((imm & 0x0000ff00) == 0 || (imm & 0x0000ff00) == 0x0000ff00)
11454 && ((imm & 0x00ff0000) == 0 || (imm & 0x00ff0000) == 0x00ff0000)
11455 && ((imm & 0xff000000) == 0 || (imm & 0xff000000) == 0xff000000);
11456 }
11457
11458 /* For immediate of above form, return 0bABCD. */
11459
11460 static unsigned
11461 neon_squash_bits (unsigned imm)
11462 {
11463 return (imm & 0x01) | ((imm & 0x0100) >> 7) | ((imm & 0x010000) >> 14)
11464 | ((imm & 0x01000000) >> 21);
11465 }
11466
11467 /* Compress quarter-float representation to 0b...000 abcdefgh. */
11468
11469 static unsigned
11470 neon_qfloat_bits (unsigned imm)
11471 {
11472 return ((imm >> 19) & 0x7f) | ((imm >> 24) & 0x80);
11473 }
11474
11475 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
11476 the instruction. *OP is passed as the initial value of the op field, and
11477 may be set to a different value depending on the constant (i.e.
11478 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
11479 MVN). */
11480
11481 static int
11482 neon_cmode_for_move_imm (unsigned immlo, unsigned immhi, unsigned *immbits,
11483 int *op, int size, enum neon_el_type type)
11484 {
11485 if (type == NT_float && is_quarter_float (immlo) && immhi == 0)
11486 {
11487 if (size != 32 || *op == 1)
11488 return FAIL;
11489 *immbits = neon_qfloat_bits (immlo);
11490 return 0xf;
11491 }
11492 else if (size == 64 && neon_bits_same_in_bytes (immhi)
11493 && neon_bits_same_in_bytes (immlo))
11494 {
11495 /* Check this one first so we don't have to bother with immhi in later
11496 tests. */
11497 if (*op == 1)
11498 return FAIL;
11499 *immbits = (neon_squash_bits (immhi) << 4) | neon_squash_bits (immlo);
11500 *op = 1;
11501 return 0xe;
11502 }
11503 else if (immhi != 0)
11504 return FAIL;
11505 else if (immlo == (immlo & 0x000000ff))
11506 {
11507 /* 64-bit case was already handled. Don't allow MVN with 8-bit
11508 immediate. */
11509 if ((size != 8 && size != 16 && size != 32)
11510 || (size == 8 && *op == 1))
11511 return FAIL;
11512 *immbits = immlo;
11513 return (size == 8) ? 0xe : (size == 16) ? 0x8 : 0x0;
11514 }
11515 else if (immlo == (immlo & 0x0000ff00))
11516 {
11517 if (size != 16 && size != 32)
11518 return FAIL;
11519 *immbits = immlo >> 8;
11520 return (size == 16) ? 0xa : 0x2;
11521 }
11522 else if (immlo == (immlo & 0x00ff0000))
11523 {
11524 if (size != 32)
11525 return FAIL;
11526 *immbits = immlo >> 16;
11527 return 0x4;
11528 }
11529 else if (immlo == (immlo & 0xff000000))
11530 {
11531 if (size != 32)
11532 return FAIL;
11533 *immbits = immlo >> 24;
11534 return 0x6;
11535 }
11536 else if (immlo == ((immlo & 0x0000ff00) | 0x000000ff))
11537 {
11538 if (size != 32)
11539 return FAIL;
11540 *immbits = (immlo >> 8) & 0xff;
11541 return 0xc;
11542 }
11543 else if (immlo == ((immlo & 0x00ff0000) | 0x0000ffff))
11544 {
11545 if (size != 32)
11546 return FAIL;
11547 *immbits = (immlo >> 16) & 0xff;
11548 return 0xd;
11549 }
11550
11551 return FAIL;
11552 }
11553
11554 /* Write immediate bits [7:0] to the following locations:
11555
11556 |28/24|23 19|18 16|15 4|3 0|
11557 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
11558
11559 This function is used by VMOV/VMVN/VORR/VBIC. */
11560
11561 static void
11562 neon_write_immbits (unsigned immbits)
11563 {
11564 inst.instruction |= immbits & 0xf;
11565 inst.instruction |= ((immbits >> 4) & 0x7) << 16;
11566 inst.instruction |= ((immbits >> 7) & 0x1) << 24;
11567 }
11568
11569 /* Invert low-order SIZE bits of XHI:XLO. */
11570
11571 static void
11572 neon_invert_size (unsigned *xlo, unsigned *xhi, int size)
11573 {
11574 unsigned immlo = xlo ? *xlo : 0;
11575 unsigned immhi = xhi ? *xhi : 0;
11576
11577 switch (size)
11578 {
11579 case 8:
11580 immlo = (~immlo) & 0xff;
11581 break;
11582
11583 case 16:
11584 immlo = (~immlo) & 0xffff;
11585 break;
11586
11587 case 64:
11588 immhi = (~immhi) & 0xffffffff;
11589 /* fall through. */
11590
11591 case 32:
11592 immlo = (~immlo) & 0xffffffff;
11593 break;
11594
11595 default:
11596 abort ();
11597 }
11598
11599 if (xlo)
11600 *xlo = immlo;
11601
11602 if (xhi)
11603 *xhi = immhi;
11604 }
11605
11606 static void
11607 do_neon_logic (void)
11608 {
11609 if (inst.operands[2].present && inst.operands[2].isreg)
11610 {
11611 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
11612 neon_check_type (3, rs, N_IGNORE_TYPE);
11613 /* U bit and size field were set as part of the bitmask. */
11614 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
11615 neon_three_same (neon_quad (rs), 0, -1);
11616 }
11617 else
11618 {
11619 enum neon_shape rs = neon_select_shape (NS_DI, NS_QI, NS_NULL);
11620 struct neon_type_el et = neon_check_type (2, rs,
11621 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
11622 enum neon_opc opcode = inst.instruction & 0x0fffffff;
11623 unsigned immbits;
11624 int cmode;
11625
11626 if (et.type == NT_invtype)
11627 return;
11628
11629 inst.instruction = NEON_ENC_IMMED (inst.instruction);
11630
11631 switch (opcode)
11632 {
11633 case N_MNEM_vbic:
11634 cmode = neon_cmode_for_logic_imm (inst.operands[1].imm, &immbits,
11635 et.size);
11636 break;
11637
11638 case N_MNEM_vorr:
11639 cmode = neon_cmode_for_logic_imm (inst.operands[1].imm, &immbits,
11640 et.size);
11641 break;
11642
11643 case N_MNEM_vand:
11644 /* Pseudo-instruction for VBIC. */
11645 immbits = inst.operands[1].imm;
11646 neon_invert_size (&immbits, 0, et.size);
11647 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
11648 break;
11649
11650 case N_MNEM_vorn:
11651 /* Pseudo-instruction for VORR. */
11652 immbits = inst.operands[1].imm;
11653 neon_invert_size (&immbits, 0, et.size);
11654 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
11655 break;
11656
11657 default:
11658 abort ();
11659 }
11660
11661 if (cmode == FAIL)
11662 return;
11663
11664 inst.instruction |= neon_quad (rs) << 6;
11665 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11666 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11667 inst.instruction |= cmode << 8;
11668 neon_write_immbits (immbits);
11669
11670 inst.instruction = neon_dp_fixup (inst.instruction);
11671 }
11672 }
11673
11674 static void
11675 do_neon_bitfield (void)
11676 {
11677 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
11678 neon_check_type (3, rs, N_IGNORE_TYPE);
11679 neon_three_same (neon_quad (rs), 0, -1);
11680 }
11681
11682 static void
11683 neon_dyadic_misc (enum neon_el_type ubit_meaning, unsigned types,
11684 unsigned destbits)
11685 {
11686 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
11687 struct neon_type_el et = neon_check_type (3, rs, N_EQK | destbits, N_EQK,
11688 types | N_KEY);
11689 if (et.type == NT_float)
11690 {
11691 inst.instruction = NEON_ENC_FLOAT (inst.instruction);
11692 neon_three_same (neon_quad (rs), 0, -1);
11693 }
11694 else
11695 {
11696 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
11697 neon_three_same (neon_quad (rs), et.type == ubit_meaning, et.size);
11698 }
11699 }
11700
11701 static void
11702 do_neon_dyadic_if_su (void)
11703 {
11704 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
11705 }
11706
11707 static void
11708 do_neon_dyadic_if_su_d (void)
11709 {
11710 /* This version only allow D registers, but that constraint is enforced during
11711 operand parsing so we don't need to do anything extra here. */
11712 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
11713 }
11714
11715 static void
11716 do_neon_dyadic_if_i_d (void)
11717 {
11718 /* The "untyped" case can't happen. Do this to stop the "U" bit being
11719 affected if we specify unsigned args. */
11720 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
11721 }
11722
11723 enum vfp_or_neon_is_neon_bits
11724 {
11725 NEON_CHECK_CC = 1,
11726 NEON_CHECK_ARCH = 2
11727 };
11728
11729 /* Call this function if an instruction which may have belonged to the VFP or
11730 Neon instruction sets, but turned out to be a Neon instruction (due to the
11731 operand types involved, etc.). We have to check and/or fix-up a couple of
11732 things:
11733
11734 - Make sure the user hasn't attempted to make a Neon instruction
11735 conditional.
11736 - Alter the value in the condition code field if necessary.
11737 - Make sure that the arch supports Neon instructions.
11738
11739 Which of these operations take place depends on bits from enum
11740 vfp_or_neon_is_neon_bits.
11741
11742 WARNING: This function has side effects! If NEON_CHECK_CC is used and the
11743 current instruction's condition is COND_ALWAYS, the condition field is
11744 changed to inst.uncond_value. This is necessary because instructions shared
11745 between VFP and Neon may be conditional for the VFP variants only, and the
11746 unconditional Neon version must have, e.g., 0xF in the condition field. */
11747
11748 static int
11749 vfp_or_neon_is_neon (unsigned check)
11750 {
11751 /* Conditions are always legal in Thumb mode (IT blocks). */
11752 if (!thumb_mode && (check & NEON_CHECK_CC))
11753 {
11754 if (inst.cond != COND_ALWAYS)
11755 {
11756 first_error (_(BAD_COND));
11757 return FAIL;
11758 }
11759 if (inst.uncond_value != -1)
11760 inst.instruction |= inst.uncond_value << 28;
11761 }
11762
11763 if ((check & NEON_CHECK_ARCH)
11764 && !ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1))
11765 {
11766 first_error (_(BAD_FPU));
11767 return FAIL;
11768 }
11769
11770 return SUCCESS;
11771 }
11772
11773 static void
11774 do_neon_addsub_if_i (void)
11775 {
11776 if (try_vfp_nsyn (3, do_vfp_nsyn_add_sub) == SUCCESS)
11777 return;
11778
11779 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
11780 return;
11781
11782 /* The "untyped" case can't happen. Do this to stop the "U" bit being
11783 affected if we specify unsigned args. */
11784 neon_dyadic_misc (NT_untyped, N_IF_32 | N_I64, 0);
11785 }
11786
11787 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
11788 result to be:
11789 V<op> A,B (A is operand 0, B is operand 2)
11790 to mean:
11791 V<op> A,B,A
11792 not:
11793 V<op> A,B,B
11794 so handle that case specially. */
11795
11796 static void
11797 neon_exchange_operands (void)
11798 {
11799 void *scratch = alloca (sizeof (inst.operands[0]));
11800 if (inst.operands[1].present)
11801 {
11802 /* Swap operands[1] and operands[2]. */
11803 memcpy (scratch, &inst.operands[1], sizeof (inst.operands[0]));
11804 inst.operands[1] = inst.operands[2];
11805 memcpy (&inst.operands[2], scratch, sizeof (inst.operands[0]));
11806 }
11807 else
11808 {
11809 inst.operands[1] = inst.operands[2];
11810 inst.operands[2] = inst.operands[0];
11811 }
11812 }
11813
11814 static void
11815 neon_compare (unsigned regtypes, unsigned immtypes, int invert)
11816 {
11817 if (inst.operands[2].isreg)
11818 {
11819 if (invert)
11820 neon_exchange_operands ();
11821 neon_dyadic_misc (NT_unsigned, regtypes, N_SIZ);
11822 }
11823 else
11824 {
11825 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
11826 struct neon_type_el et = neon_check_type (2, rs,
11827 N_EQK | N_SIZ, immtypes | N_KEY);
11828
11829 inst.instruction = NEON_ENC_IMMED (inst.instruction);
11830 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11831 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11832 inst.instruction |= LOW4 (inst.operands[1].reg);
11833 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
11834 inst.instruction |= neon_quad (rs) << 6;
11835 inst.instruction |= (et.type == NT_float) << 10;
11836 inst.instruction |= neon_logbits (et.size) << 18;
11837
11838 inst.instruction = neon_dp_fixup (inst.instruction);
11839 }
11840 }
11841
11842 static void
11843 do_neon_cmp (void)
11844 {
11845 neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, FALSE);
11846 }
11847
11848 static void
11849 do_neon_cmp_inv (void)
11850 {
11851 neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, TRUE);
11852 }
11853
11854 static void
11855 do_neon_ceq (void)
11856 {
11857 neon_compare (N_IF_32, N_IF_32, FALSE);
11858 }
11859
11860 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
11861 scalars, which are encoded in 5 bits, M : Rm.
11862 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
11863 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
11864 index in M. */
11865
11866 static unsigned
11867 neon_scalar_for_mul (unsigned scalar, unsigned elsize)
11868 {
11869 unsigned regno = NEON_SCALAR_REG (scalar);
11870 unsigned elno = NEON_SCALAR_INDEX (scalar);
11871
11872 switch (elsize)
11873 {
11874 case 16:
11875 if (regno > 7 || elno > 3)
11876 goto bad_scalar;
11877 return regno | (elno << 3);
11878
11879 case 32:
11880 if (regno > 15 || elno > 1)
11881 goto bad_scalar;
11882 return regno | (elno << 4);
11883
11884 default:
11885 bad_scalar:
11886 first_error (_("scalar out of range for multiply instruction"));
11887 }
11888
11889 return 0;
11890 }
11891
11892 /* Encode multiply / multiply-accumulate scalar instructions. */
11893
11894 static void
11895 neon_mul_mac (struct neon_type_el et, int ubit)
11896 {
11897 unsigned scalar;
11898
11899 /* Give a more helpful error message if we have an invalid type. */
11900 if (et.type == NT_invtype)
11901 return;
11902
11903 scalar = neon_scalar_for_mul (inst.operands[2].reg, et.size);
11904 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11905 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11906 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
11907 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
11908 inst.instruction |= LOW4 (scalar);
11909 inst.instruction |= HI1 (scalar) << 5;
11910 inst.instruction |= (et.type == NT_float) << 8;
11911 inst.instruction |= neon_logbits (et.size) << 20;
11912 inst.instruction |= (ubit != 0) << 24;
11913
11914 inst.instruction = neon_dp_fixup (inst.instruction);
11915 }
11916
11917 static void
11918 do_neon_mac_maybe_scalar (void)
11919 {
11920 if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls) == SUCCESS)
11921 return;
11922
11923 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
11924 return;
11925
11926 if (inst.operands[2].isscalar)
11927 {
11928 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
11929 struct neon_type_el et = neon_check_type (3, rs,
11930 N_EQK, N_EQK, N_I16 | N_I32 | N_F32 | N_KEY);
11931 inst.instruction = NEON_ENC_SCALAR (inst.instruction);
11932 neon_mul_mac (et, neon_quad (rs));
11933 }
11934 else
11935 {
11936 /* The "untyped" case can't happen. Do this to stop the "U" bit being
11937 affected if we specify unsigned args. */
11938 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
11939 }
11940 }
11941
11942 static void
11943 do_neon_tst (void)
11944 {
11945 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
11946 struct neon_type_el et = neon_check_type (3, rs,
11947 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_KEY);
11948 neon_three_same (neon_quad (rs), 0, et.size);
11949 }
11950
11951 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
11952 same types as the MAC equivalents. The polynomial type for this instruction
11953 is encoded the same as the integer type. */
11954
11955 static void
11956 do_neon_mul (void)
11957 {
11958 if (try_vfp_nsyn (3, do_vfp_nsyn_mul) == SUCCESS)
11959 return;
11960
11961 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
11962 return;
11963
11964 if (inst.operands[2].isscalar)
11965 do_neon_mac_maybe_scalar ();
11966 else
11967 neon_dyadic_misc (NT_poly, N_I8 | N_I16 | N_I32 | N_F32 | N_P8, 0);
11968 }
11969
11970 static void
11971 do_neon_qdmulh (void)
11972 {
11973 if (inst.operands[2].isscalar)
11974 {
11975 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
11976 struct neon_type_el et = neon_check_type (3, rs,
11977 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
11978 inst.instruction = NEON_ENC_SCALAR (inst.instruction);
11979 neon_mul_mac (et, neon_quad (rs));
11980 }
11981 else
11982 {
11983 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
11984 struct neon_type_el et = neon_check_type (3, rs,
11985 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
11986 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
11987 /* The U bit (rounding) comes from bit mask. */
11988 neon_three_same (neon_quad (rs), 0, et.size);
11989 }
11990 }
11991
11992 static void
11993 do_neon_fcmp_absolute (void)
11994 {
11995 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
11996 neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY);
11997 /* Size field comes from bit mask. */
11998 neon_three_same (neon_quad (rs), 1, -1);
11999 }
12000
12001 static void
12002 do_neon_fcmp_absolute_inv (void)
12003 {
12004 neon_exchange_operands ();
12005 do_neon_fcmp_absolute ();
12006 }
12007
12008 static void
12009 do_neon_step (void)
12010 {
12011 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
12012 neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY);
12013 neon_three_same (neon_quad (rs), 0, -1);
12014 }
12015
12016 static void
12017 do_neon_abs_neg (void)
12018 {
12019 enum neon_shape rs;
12020 struct neon_type_el et;
12021
12022 if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg) == SUCCESS)
12023 return;
12024
12025 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
12026 return;
12027
12028 rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
12029 et = neon_check_type (2, rs, N_EQK, N_S8 | N_S16 | N_S32 | N_F32 | N_KEY);
12030
12031 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12032 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12033 inst.instruction |= LOW4 (inst.operands[1].reg);
12034 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
12035 inst.instruction |= neon_quad (rs) << 6;
12036 inst.instruction |= (et.type == NT_float) << 10;
12037 inst.instruction |= neon_logbits (et.size) << 18;
12038
12039 inst.instruction = neon_dp_fixup (inst.instruction);
12040 }
12041
12042 static void
12043 do_neon_sli (void)
12044 {
12045 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
12046 struct neon_type_el et = neon_check_type (2, rs,
12047 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
12048 int imm = inst.operands[2].imm;
12049 constraint (imm < 0 || (unsigned)imm >= et.size,
12050 _("immediate out of range for insert"));
12051 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
12052 }
12053
12054 static void
12055 do_neon_sri (void)
12056 {
12057 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
12058 struct neon_type_el et = neon_check_type (2, rs,
12059 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
12060 int imm = inst.operands[2].imm;
12061 constraint (imm < 1 || (unsigned)imm > et.size,
12062 _("immediate out of range for insert"));
12063 neon_imm_shift (FALSE, 0, neon_quad (rs), et, et.size - imm);
12064 }
12065
12066 static void
12067 do_neon_qshlu_imm (void)
12068 {
12069 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
12070 struct neon_type_el et = neon_check_type (2, rs,
12071 N_EQK | N_UNS, N_S8 | N_S16 | N_S32 | N_S64 | N_KEY);
12072 int imm = inst.operands[2].imm;
12073 constraint (imm < 0 || (unsigned)imm >= et.size,
12074 _("immediate out of range for shift"));
12075 /* Only encodes the 'U present' variant of the instruction.
12076 In this case, signed types have OP (bit 8) set to 0.
12077 Unsigned types have OP set to 1. */
12078 inst.instruction |= (et.type == NT_unsigned) << 8;
12079 /* The rest of the bits are the same as other immediate shifts. */
12080 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
12081 }
12082
12083 static void
12084 do_neon_qmovn (void)
12085 {
12086 struct neon_type_el et = neon_check_type (2, NS_DQ,
12087 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
12088 /* Saturating move where operands can be signed or unsigned, and the
12089 destination has the same signedness. */
12090 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12091 if (et.type == NT_unsigned)
12092 inst.instruction |= 0xc0;
12093 else
12094 inst.instruction |= 0x80;
12095 neon_two_same (0, 1, et.size / 2);
12096 }
12097
12098 static void
12099 do_neon_qmovun (void)
12100 {
12101 struct neon_type_el et = neon_check_type (2, NS_DQ,
12102 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
12103 /* Saturating move with unsigned results. Operands must be signed. */
12104 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12105 neon_two_same (0, 1, et.size / 2);
12106 }
12107
12108 static void
12109 do_neon_rshift_sat_narrow (void)
12110 {
12111 /* FIXME: Types for narrowing. If operands are signed, results can be signed
12112 or unsigned. If operands are unsigned, results must also be unsigned. */
12113 struct neon_type_el et = neon_check_type (2, NS_DQI,
12114 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
12115 int imm = inst.operands[2].imm;
12116 /* This gets the bounds check, size encoding and immediate bits calculation
12117 right. */
12118 et.size /= 2;
12119
12120 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
12121 VQMOVN.I<size> <Dd>, <Qm>. */
12122 if (imm == 0)
12123 {
12124 inst.operands[2].present = 0;
12125 inst.instruction = N_MNEM_vqmovn;
12126 do_neon_qmovn ();
12127 return;
12128 }
12129
12130 constraint (imm < 1 || (unsigned)imm > et.size,
12131 _("immediate out of range"));
12132 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, et.size - imm);
12133 }
12134
12135 static void
12136 do_neon_rshift_sat_narrow_u (void)
12137 {
12138 /* FIXME: Types for narrowing. If operands are signed, results can be signed
12139 or unsigned. If operands are unsigned, results must also be unsigned. */
12140 struct neon_type_el et = neon_check_type (2, NS_DQI,
12141 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
12142 int imm = inst.operands[2].imm;
12143 /* This gets the bounds check, size encoding and immediate bits calculation
12144 right. */
12145 et.size /= 2;
12146
12147 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
12148 VQMOVUN.I<size> <Dd>, <Qm>. */
12149 if (imm == 0)
12150 {
12151 inst.operands[2].present = 0;
12152 inst.instruction = N_MNEM_vqmovun;
12153 do_neon_qmovun ();
12154 return;
12155 }
12156
12157 constraint (imm < 1 || (unsigned)imm > et.size,
12158 _("immediate out of range"));
12159 /* FIXME: The manual is kind of unclear about what value U should have in
12160 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
12161 must be 1. */
12162 neon_imm_shift (TRUE, 1, 0, et, et.size - imm);
12163 }
12164
12165 static void
12166 do_neon_movn (void)
12167 {
12168 struct neon_type_el et = neon_check_type (2, NS_DQ,
12169 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
12170 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12171 neon_two_same (0, 1, et.size / 2);
12172 }
12173
12174 static void
12175 do_neon_rshift_narrow (void)
12176 {
12177 struct neon_type_el et = neon_check_type (2, NS_DQI,
12178 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
12179 int imm = inst.operands[2].imm;
12180 /* This gets the bounds check, size encoding and immediate bits calculation
12181 right. */
12182 et.size /= 2;
12183
12184 /* If immediate is zero then we are a pseudo-instruction for
12185 VMOVN.I<size> <Dd>, <Qm> */
12186 if (imm == 0)
12187 {
12188 inst.operands[2].present = 0;
12189 inst.instruction = N_MNEM_vmovn;
12190 do_neon_movn ();
12191 return;
12192 }
12193
12194 constraint (imm < 1 || (unsigned)imm > et.size,
12195 _("immediate out of range for narrowing operation"));
12196 neon_imm_shift (FALSE, 0, 0, et, et.size - imm);
12197 }
12198
12199 static void
12200 do_neon_shll (void)
12201 {
12202 /* FIXME: Type checking when lengthening. */
12203 struct neon_type_el et = neon_check_type (2, NS_QDI,
12204 N_EQK | N_DBL, N_I8 | N_I16 | N_I32 | N_KEY);
12205 unsigned imm = inst.operands[2].imm;
12206
12207 if (imm == et.size)
12208 {
12209 /* Maximum shift variant. */
12210 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12211 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12212 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12213 inst.instruction |= LOW4 (inst.operands[1].reg);
12214 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
12215 inst.instruction |= neon_logbits (et.size) << 18;
12216
12217 inst.instruction = neon_dp_fixup (inst.instruction);
12218 }
12219 else
12220 {
12221 /* A more-specific type check for non-max versions. */
12222 et = neon_check_type (2, NS_QDI,
12223 N_EQK | N_DBL, N_SU_32 | N_KEY);
12224 inst.instruction = NEON_ENC_IMMED (inst.instruction);
12225 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, imm);
12226 }
12227 }
12228
12229 /* Check the various types for the VCVT instruction, and return which version
12230 the current instruction is. */
12231
12232 static int
12233 neon_cvt_flavour (enum neon_shape rs)
12234 {
12235 #define CVT_VAR(C,X,Y) \
12236 et = neon_check_type (2, rs, whole_reg | (X), whole_reg | (Y)); \
12237 if (et.type != NT_invtype) \
12238 { \
12239 inst.error = NULL; \
12240 return (C); \
12241 }
12242 struct neon_type_el et;
12243 unsigned whole_reg = (rs == NS_FFI || rs == NS_FD || rs == NS_DF
12244 || rs == NS_FF) ? N_VFP : 0;
12245 /* The instruction versions which take an immediate take one register
12246 argument, which is extended to the width of the full register. Thus the
12247 "source" and "destination" registers must have the same width. Hack that
12248 here by making the size equal to the key (wider, in this case) operand. */
12249 unsigned key = (rs == NS_QQI || rs == NS_DDI || rs == NS_FFI) ? N_KEY : 0;
12250
12251 CVT_VAR (0, N_S32, N_F32);
12252 CVT_VAR (1, N_U32, N_F32);
12253 CVT_VAR (2, N_F32, N_S32);
12254 CVT_VAR (3, N_F32, N_U32);
12255
12256 whole_reg = N_VFP;
12257
12258 /* VFP instructions. */
12259 CVT_VAR (4, N_F32, N_F64);
12260 CVT_VAR (5, N_F64, N_F32);
12261 CVT_VAR (6, N_S32, N_F64 | key);
12262 CVT_VAR (7, N_U32, N_F64 | key);
12263 CVT_VAR (8, N_F64 | key, N_S32);
12264 CVT_VAR (9, N_F64 | key, N_U32);
12265 /* VFP instructions with bitshift. */
12266 CVT_VAR (10, N_F32 | key, N_S16);
12267 CVT_VAR (11, N_F32 | key, N_U16);
12268 CVT_VAR (12, N_F64 | key, N_S16);
12269 CVT_VAR (13, N_F64 | key, N_U16);
12270 CVT_VAR (14, N_S16, N_F32 | key);
12271 CVT_VAR (15, N_U16, N_F32 | key);
12272 CVT_VAR (16, N_S16, N_F64 | key);
12273 CVT_VAR (17, N_U16, N_F64 | key);
12274
12275 return -1;
12276 #undef CVT_VAR
12277 }
12278
12279 /* Neon-syntax VFP conversions. */
12280
12281 static void
12282 do_vfp_nsyn_cvt (enum neon_shape rs, int flavour)
12283 {
12284 const char *opname = 0;
12285
12286 if (rs == NS_DDI || rs == NS_QQI || rs == NS_FFI)
12287 {
12288 /* Conversions with immediate bitshift. */
12289 const char *enc[] =
12290 {
12291 "ftosls",
12292 "ftouls",
12293 "fsltos",
12294 "fultos",
12295 NULL,
12296 NULL,
12297 "ftosld",
12298 "ftould",
12299 "fsltod",
12300 "fultod",
12301 "fshtos",
12302 "fuhtos",
12303 "fshtod",
12304 "fuhtod",
12305 "ftoshs",
12306 "ftouhs",
12307 "ftoshd",
12308 "ftouhd"
12309 };
12310
12311 if (flavour >= 0 && flavour < (int) ARRAY_SIZE (enc))
12312 {
12313 opname = enc[flavour];
12314 constraint (inst.operands[0].reg != inst.operands[1].reg,
12315 _("operands 0 and 1 must be the same register"));
12316 inst.operands[1] = inst.operands[2];
12317 memset (&inst.operands[2], '\0', sizeof (inst.operands[2]));
12318 }
12319 }
12320 else
12321 {
12322 /* Conversions without bitshift. */
12323 const char *enc[] =
12324 {
12325 "ftosis",
12326 "ftouis",
12327 "fsitos",
12328 "fuitos",
12329 "fcvtsd",
12330 "fcvtds",
12331 "ftosid",
12332 "ftouid",
12333 "fsitod",
12334 "fuitod"
12335 };
12336
12337 if (flavour >= 0 && flavour < (int) ARRAY_SIZE (enc))
12338 opname = enc[flavour];
12339 }
12340
12341 if (opname)
12342 do_vfp_nsyn_opcode (opname);
12343 }
12344
12345 static void
12346 do_vfp_nsyn_cvtz (void)
12347 {
12348 enum neon_shape rs = neon_select_shape (NS_FF, NS_FD, NS_NULL);
12349 int flavour = neon_cvt_flavour (rs);
12350 const char *enc[] =
12351 {
12352 "ftosizs",
12353 "ftouizs",
12354 NULL,
12355 NULL,
12356 NULL,
12357 NULL,
12358 "ftosizd",
12359 "ftouizd"
12360 };
12361
12362 if (flavour >= 0 && flavour < (int) ARRAY_SIZE (enc) && enc[flavour])
12363 do_vfp_nsyn_opcode (enc[flavour]);
12364 }
12365
12366 static void
12367 do_neon_cvt (void)
12368 {
12369 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_FFI, NS_DD, NS_QQ,
12370 NS_FD, NS_DF, NS_FF, NS_NULL);
12371 int flavour = neon_cvt_flavour (rs);
12372
12373 /* VFP rather than Neon conversions. */
12374 if (flavour >= 4)
12375 {
12376 do_vfp_nsyn_cvt (rs, flavour);
12377 return;
12378 }
12379
12380 switch (rs)
12381 {
12382 case NS_DDI:
12383 case NS_QQI:
12384 {
12385 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
12386 return;
12387
12388 /* Fixed-point conversion with #0 immediate is encoded as an
12389 integer conversion. */
12390 if (inst.operands[2].present && inst.operands[2].imm == 0)
12391 goto int_encode;
12392 unsigned immbits = 32 - inst.operands[2].imm;
12393 unsigned enctab[] = { 0x0000100, 0x1000100, 0x0, 0x1000000 };
12394 inst.instruction = NEON_ENC_IMMED (inst.instruction);
12395 if (flavour != -1)
12396 inst.instruction |= enctab[flavour];
12397 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12398 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12399 inst.instruction |= LOW4 (inst.operands[1].reg);
12400 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
12401 inst.instruction |= neon_quad (rs) << 6;
12402 inst.instruction |= 1 << 21;
12403 inst.instruction |= immbits << 16;
12404
12405 inst.instruction = neon_dp_fixup (inst.instruction);
12406 }
12407 break;
12408
12409 case NS_DD:
12410 case NS_QQ:
12411 int_encode:
12412 {
12413 unsigned enctab[] = { 0x100, 0x180, 0x0, 0x080 };
12414
12415 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12416
12417 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
12418 return;
12419
12420 if (flavour != -1)
12421 inst.instruction |= enctab[flavour];
12422
12423 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12424 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12425 inst.instruction |= LOW4 (inst.operands[1].reg);
12426 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
12427 inst.instruction |= neon_quad (rs) << 6;
12428 inst.instruction |= 2 << 18;
12429
12430 inst.instruction = neon_dp_fixup (inst.instruction);
12431 }
12432 break;
12433
12434 default:
12435 /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32). */
12436 do_vfp_nsyn_cvt (rs, flavour);
12437 }
12438 }
12439
12440 static void
12441 neon_move_immediate (void)
12442 {
12443 enum neon_shape rs = neon_select_shape (NS_DI, NS_QI, NS_NULL);
12444 struct neon_type_el et = neon_check_type (2, rs,
12445 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
12446 unsigned immlo, immhi = 0, immbits;
12447 int op, cmode;
12448
12449 constraint (et.type == NT_invtype,
12450 _("operand size must be specified for immediate VMOV"));
12451
12452 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
12453 op = (inst.instruction & (1 << 5)) != 0;
12454
12455 immlo = inst.operands[1].imm;
12456 if (inst.operands[1].regisimm)
12457 immhi = inst.operands[1].reg;
12458
12459 constraint (et.size < 32 && (immlo & ~((1 << et.size) - 1)) != 0,
12460 _("immediate has bits set outside the operand size"));
12461
12462 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, &immbits, &op,
12463 et.size, et.type)) == FAIL)
12464 {
12465 /* Invert relevant bits only. */
12466 neon_invert_size (&immlo, &immhi, et.size);
12467 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
12468 with one or the other; those cases are caught by
12469 neon_cmode_for_move_imm. */
12470 op = !op;
12471 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, &immbits, &op,
12472 et.size, et.type)) == FAIL)
12473 {
12474 first_error (_("immediate out of range"));
12475 return;
12476 }
12477 }
12478
12479 inst.instruction &= ~(1 << 5);
12480 inst.instruction |= op << 5;
12481
12482 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12483 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12484 inst.instruction |= neon_quad (rs) << 6;
12485 inst.instruction |= cmode << 8;
12486
12487 neon_write_immbits (immbits);
12488 }
12489
12490 static void
12491 do_neon_mvn (void)
12492 {
12493 if (inst.operands[1].isreg)
12494 {
12495 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
12496
12497 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12498 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12499 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12500 inst.instruction |= LOW4 (inst.operands[1].reg);
12501 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
12502 inst.instruction |= neon_quad (rs) << 6;
12503 }
12504 else
12505 {
12506 inst.instruction = NEON_ENC_IMMED (inst.instruction);
12507 neon_move_immediate ();
12508 }
12509
12510 inst.instruction = neon_dp_fixup (inst.instruction);
12511 }
12512
12513 /* Encode instructions of form:
12514
12515 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
12516 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm |
12517
12518 */
12519
12520 static void
12521 neon_mixed_length (struct neon_type_el et, unsigned size)
12522 {
12523 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12524 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12525 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
12526 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
12527 inst.instruction |= LOW4 (inst.operands[2].reg);
12528 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
12529 inst.instruction |= (et.type == NT_unsigned) << 24;
12530 inst.instruction |= neon_logbits (size) << 20;
12531
12532 inst.instruction = neon_dp_fixup (inst.instruction);
12533 }
12534
12535 static void
12536 do_neon_dyadic_long (void)
12537 {
12538 /* FIXME: Type checking for lengthening op. */
12539 struct neon_type_el et = neon_check_type (3, NS_QDD,
12540 N_EQK | N_DBL, N_EQK, N_SU_32 | N_KEY);
12541 neon_mixed_length (et, et.size);
12542 }
12543
12544 static void
12545 do_neon_abal (void)
12546 {
12547 struct neon_type_el et = neon_check_type (3, NS_QDD,
12548 N_EQK | N_INT | N_DBL, N_EQK, N_SU_32 | N_KEY);
12549 neon_mixed_length (et, et.size);
12550 }
12551
12552 static void
12553 neon_mac_reg_scalar_long (unsigned regtypes, unsigned scalartypes)
12554 {
12555 if (inst.operands[2].isscalar)
12556 {
12557 struct neon_type_el et = neon_check_type (3, NS_QDS,
12558 N_EQK | N_DBL, N_EQK, regtypes | N_KEY);
12559 inst.instruction = NEON_ENC_SCALAR (inst.instruction);
12560 neon_mul_mac (et, et.type == NT_unsigned);
12561 }
12562 else
12563 {
12564 struct neon_type_el et = neon_check_type (3, NS_QDD,
12565 N_EQK | N_DBL, N_EQK, scalartypes | N_KEY);
12566 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12567 neon_mixed_length (et, et.size);
12568 }
12569 }
12570
12571 static void
12572 do_neon_mac_maybe_scalar_long (void)
12573 {
12574 neon_mac_reg_scalar_long (N_S16 | N_S32 | N_U16 | N_U32, N_SU_32);
12575 }
12576
12577 static void
12578 do_neon_dyadic_wide (void)
12579 {
12580 struct neon_type_el et = neon_check_type (3, NS_QQD,
12581 N_EQK | N_DBL, N_EQK | N_DBL, N_SU_32 | N_KEY);
12582 neon_mixed_length (et, et.size);
12583 }
12584
12585 static void
12586 do_neon_dyadic_narrow (void)
12587 {
12588 struct neon_type_el et = neon_check_type (3, NS_QDD,
12589 N_EQK | N_DBL, N_EQK, N_I16 | N_I32 | N_I64 | N_KEY);
12590 /* Operand sign is unimportant, and the U bit is part of the opcode,
12591 so force the operand type to integer. */
12592 et.type = NT_integer;
12593 neon_mixed_length (et, et.size / 2);
12594 }
12595
12596 static void
12597 do_neon_mul_sat_scalar_long (void)
12598 {
12599 neon_mac_reg_scalar_long (N_S16 | N_S32, N_S16 | N_S32);
12600 }
12601
12602 static void
12603 do_neon_vmull (void)
12604 {
12605 if (inst.operands[2].isscalar)
12606 do_neon_mac_maybe_scalar_long ();
12607 else
12608 {
12609 struct neon_type_el et = neon_check_type (3, NS_QDD,
12610 N_EQK | N_DBL, N_EQK, N_SU_32 | N_P8 | N_KEY);
12611 if (et.type == NT_poly)
12612 inst.instruction = NEON_ENC_POLY (inst.instruction);
12613 else
12614 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12615 /* For polynomial encoding, size field must be 0b00 and the U bit must be
12616 zero. Should be OK as-is. */
12617 neon_mixed_length (et, et.size);
12618 }
12619 }
12620
12621 static void
12622 do_neon_ext (void)
12623 {
12624 enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
12625 struct neon_type_el et = neon_check_type (3, rs,
12626 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
12627 unsigned imm = (inst.operands[3].imm * et.size) / 8;
12628 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12629 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12630 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
12631 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
12632 inst.instruction |= LOW4 (inst.operands[2].reg);
12633 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
12634 inst.instruction |= neon_quad (rs) << 6;
12635 inst.instruction |= imm << 8;
12636
12637 inst.instruction = neon_dp_fixup (inst.instruction);
12638 }
12639
12640 static void
12641 do_neon_rev (void)
12642 {
12643 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
12644 struct neon_type_el et = neon_check_type (2, rs,
12645 N_EQK, N_8 | N_16 | N_32 | N_KEY);
12646 unsigned op = (inst.instruction >> 7) & 3;
12647 /* N (width of reversed regions) is encoded as part of the bitmask. We
12648 extract it here to check the elements to be reversed are smaller.
12649 Otherwise we'd get a reserved instruction. */
12650 unsigned elsize = (op == 2) ? 16 : (op == 1) ? 32 : (op == 0) ? 64 : 0;
12651 assert (elsize != 0);
12652 constraint (et.size >= elsize,
12653 _("elements must be smaller than reversal region"));
12654 neon_two_same (neon_quad (rs), 1, et.size);
12655 }
12656
12657 static void
12658 do_neon_dup (void)
12659 {
12660 if (inst.operands[1].isscalar)
12661 {
12662 enum neon_shape rs = neon_select_shape (NS_DS, NS_QS, NS_NULL);
12663 struct neon_type_el et = neon_check_type (2, rs,
12664 N_EQK, N_8 | N_16 | N_32 | N_KEY);
12665 unsigned sizebits = et.size >> 3;
12666 unsigned dm = NEON_SCALAR_REG (inst.operands[1].reg);
12667 int logsize = neon_logbits (et.size);
12668 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg) << logsize;
12669
12670 if (vfp_or_neon_is_neon (NEON_CHECK_CC) == FAIL)
12671 return;
12672
12673 inst.instruction = NEON_ENC_SCALAR (inst.instruction);
12674 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12675 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12676 inst.instruction |= LOW4 (dm);
12677 inst.instruction |= HI1 (dm) << 5;
12678 inst.instruction |= neon_quad (rs) << 6;
12679 inst.instruction |= x << 17;
12680 inst.instruction |= sizebits << 16;
12681
12682 inst.instruction = neon_dp_fixup (inst.instruction);
12683 }
12684 else
12685 {
12686 enum neon_shape rs = neon_select_shape (NS_DR, NS_QR, NS_NULL);
12687 struct neon_type_el et = neon_check_type (2, rs,
12688 N_8 | N_16 | N_32 | N_KEY, N_EQK);
12689 /* Duplicate ARM register to lanes of vector. */
12690 inst.instruction = NEON_ENC_ARMREG (inst.instruction);
12691 switch (et.size)
12692 {
12693 case 8: inst.instruction |= 0x400000; break;
12694 case 16: inst.instruction |= 0x000020; break;
12695 case 32: inst.instruction |= 0x000000; break;
12696 default: break;
12697 }
12698 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
12699 inst.instruction |= LOW4 (inst.operands[0].reg) << 16;
12700 inst.instruction |= HI1 (inst.operands[0].reg) << 7;
12701 inst.instruction |= neon_quad (rs) << 21;
12702 /* The encoding for this instruction is identical for the ARM and Thumb
12703 variants, except for the condition field. */
12704 do_vfp_cond_or_thumb ();
12705 }
12706 }
12707
12708 /* VMOV has particularly many variations. It can be one of:
12709 0. VMOV<c><q> <Qd>, <Qm>
12710 1. VMOV<c><q> <Dd>, <Dm>
12711 (Register operations, which are VORR with Rm = Rn.)
12712 2. VMOV<c><q>.<dt> <Qd>, #<imm>
12713 3. VMOV<c><q>.<dt> <Dd>, #<imm>
12714 (Immediate loads.)
12715 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
12716 (ARM register to scalar.)
12717 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
12718 (Two ARM registers to vector.)
12719 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
12720 (Scalar to ARM register.)
12721 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
12722 (Vector to two ARM registers.)
12723 8. VMOV.F32 <Sd>, <Sm>
12724 9. VMOV.F64 <Dd>, <Dm>
12725 (VFP register moves.)
12726 10. VMOV.F32 <Sd>, #imm
12727 11. VMOV.F64 <Dd>, #imm
12728 (VFP float immediate load.)
12729 12. VMOV <Rd>, <Sm>
12730 (VFP single to ARM reg.)
12731 13. VMOV <Sd>, <Rm>
12732 (ARM reg to VFP single.)
12733 14. VMOV <Rd>, <Re>, <Sn>, <Sm>
12734 (Two ARM regs to two VFP singles.)
12735 15. VMOV <Sd>, <Se>, <Rn>, <Rm>
12736 (Two VFP singles to two ARM regs.)
12737
12738 These cases can be disambiguated using neon_select_shape, except cases 1/9
12739 and 3/11 which depend on the operand type too.
12740
12741 All the encoded bits are hardcoded by this function.
12742
12743 Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
12744 Cases 5, 7 may be used with VFPv2 and above.
12745
12746 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
12747 can specify a type where it doesn't make sense to, and is ignored).
12748 */
12749
12750 static void
12751 do_neon_mov (void)
12752 {
12753 enum neon_shape rs = neon_select_shape (NS_RRFF, NS_FFRR, NS_DRR, NS_RRD,
12754 NS_QQ, NS_DD, NS_QI, NS_DI, NS_SR, NS_RS, NS_FF, NS_FI, NS_RF, NS_FR,
12755 NS_NULL);
12756 struct neon_type_el et;
12757 const char *ldconst = 0;
12758
12759 switch (rs)
12760 {
12761 case NS_DD: /* case 1/9. */
12762 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
12763 /* It is not an error here if no type is given. */
12764 inst.error = NULL;
12765 if (et.type == NT_float && et.size == 64)
12766 {
12767 do_vfp_nsyn_opcode ("fcpyd");
12768 break;
12769 }
12770 /* fall through. */
12771
12772 case NS_QQ: /* case 0/1. */
12773 {
12774 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
12775 return;
12776 /* The architecture manual I have doesn't explicitly state which
12777 value the U bit should have for register->register moves, but
12778 the equivalent VORR instruction has U = 0, so do that. */
12779 inst.instruction = 0x0200110;
12780 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12781 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12782 inst.instruction |= LOW4 (inst.operands[1].reg);
12783 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
12784 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
12785 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
12786 inst.instruction |= neon_quad (rs) << 6;
12787
12788 inst.instruction = neon_dp_fixup (inst.instruction);
12789 }
12790 break;
12791
12792 case NS_DI: /* case 3/11. */
12793 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
12794 inst.error = NULL;
12795 if (et.type == NT_float && et.size == 64)
12796 {
12797 /* case 11 (fconstd). */
12798 ldconst = "fconstd";
12799 goto encode_fconstd;
12800 }
12801 /* fall through. */
12802
12803 case NS_QI: /* case 2/3. */
12804 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
12805 return;
12806 inst.instruction = 0x0800010;
12807 neon_move_immediate ();
12808 inst.instruction = neon_dp_fixup (inst.instruction);
12809 break;
12810
12811 case NS_SR: /* case 4. */
12812 {
12813 unsigned bcdebits = 0;
12814 struct neon_type_el et = neon_check_type (2, NS_NULL,
12815 N_8 | N_16 | N_32 | N_KEY, N_EQK);
12816 int logsize = neon_logbits (et.size);
12817 unsigned dn = NEON_SCALAR_REG (inst.operands[0].reg);
12818 unsigned x = NEON_SCALAR_INDEX (inst.operands[0].reg);
12819
12820 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
12821 _(BAD_FPU));
12822 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
12823 && et.size != 32, _(BAD_FPU));
12824 constraint (et.type == NT_invtype, _("bad type for scalar"));
12825 constraint (x >= 64 / et.size, _("scalar index out of range"));
12826
12827 switch (et.size)
12828 {
12829 case 8: bcdebits = 0x8; break;
12830 case 16: bcdebits = 0x1; break;
12831 case 32: bcdebits = 0x0; break;
12832 default: ;
12833 }
12834
12835 bcdebits |= x << logsize;
12836
12837 inst.instruction = 0xe000b10;
12838 do_vfp_cond_or_thumb ();
12839 inst.instruction |= LOW4 (dn) << 16;
12840 inst.instruction |= HI1 (dn) << 7;
12841 inst.instruction |= inst.operands[1].reg << 12;
12842 inst.instruction |= (bcdebits & 3) << 5;
12843 inst.instruction |= (bcdebits >> 2) << 21;
12844 }
12845 break;
12846
12847 case NS_DRR: /* case 5 (fmdrr). */
12848 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
12849 _(BAD_FPU));
12850
12851 inst.instruction = 0xc400b10;
12852 do_vfp_cond_or_thumb ();
12853 inst.instruction |= LOW4 (inst.operands[0].reg);
12854 inst.instruction |= HI1 (inst.operands[0].reg) << 5;
12855 inst.instruction |= inst.operands[1].reg << 12;
12856 inst.instruction |= inst.operands[2].reg << 16;
12857 break;
12858
12859 case NS_RS: /* case 6. */
12860 {
12861 struct neon_type_el et = neon_check_type (2, NS_NULL,
12862 N_EQK, N_S8 | N_S16 | N_U8 | N_U16 | N_32 | N_KEY);
12863 unsigned logsize = neon_logbits (et.size);
12864 unsigned dn = NEON_SCALAR_REG (inst.operands[1].reg);
12865 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg);
12866 unsigned abcdebits = 0;
12867
12868 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
12869 _(BAD_FPU));
12870 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
12871 && et.size != 32, _(BAD_FPU));
12872 constraint (et.type == NT_invtype, _("bad type for scalar"));
12873 constraint (x >= 64 / et.size, _("scalar index out of range"));
12874
12875 switch (et.size)
12876 {
12877 case 8: abcdebits = (et.type == NT_signed) ? 0x08 : 0x18; break;
12878 case 16: abcdebits = (et.type == NT_signed) ? 0x01 : 0x11; break;
12879 case 32: abcdebits = 0x00; break;
12880 default: ;
12881 }
12882
12883 abcdebits |= x << logsize;
12884 inst.instruction = 0xe100b10;
12885 do_vfp_cond_or_thumb ();
12886 inst.instruction |= LOW4 (dn) << 16;
12887 inst.instruction |= HI1 (dn) << 7;
12888 inst.instruction |= inst.operands[0].reg << 12;
12889 inst.instruction |= (abcdebits & 3) << 5;
12890 inst.instruction |= (abcdebits >> 2) << 21;
12891 }
12892 break;
12893
12894 case NS_RRD: /* case 7 (fmrrd). */
12895 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
12896 _(BAD_FPU));
12897
12898 inst.instruction = 0xc500b10;
12899 do_vfp_cond_or_thumb ();
12900 inst.instruction |= inst.operands[0].reg << 12;
12901 inst.instruction |= inst.operands[1].reg << 16;
12902 inst.instruction |= LOW4 (inst.operands[2].reg);
12903 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
12904 break;
12905
12906 case NS_FF: /* case 8 (fcpys). */
12907 do_vfp_nsyn_opcode ("fcpys");
12908 break;
12909
12910 case NS_FI: /* case 10 (fconsts). */
12911 ldconst = "fconsts";
12912 encode_fconstd:
12913 if (is_quarter_float (inst.operands[1].imm))
12914 {
12915 inst.operands[1].imm = neon_qfloat_bits (inst.operands[1].imm);
12916 do_vfp_nsyn_opcode (ldconst);
12917 }
12918 else
12919 first_error (_("immediate out of range"));
12920 break;
12921
12922 case NS_RF: /* case 12 (fmrs). */
12923 do_vfp_nsyn_opcode ("fmrs");
12924 break;
12925
12926 case NS_FR: /* case 13 (fmsr). */
12927 do_vfp_nsyn_opcode ("fmsr");
12928 break;
12929
12930 /* The encoders for the fmrrs and fmsrr instructions expect three operands
12931 (one of which is a list), but we have parsed four. Do some fiddling to
12932 make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
12933 expect. */
12934 case NS_RRFF: /* case 14 (fmrrs). */
12935 constraint (inst.operands[3].reg != inst.operands[2].reg + 1,
12936 _("VFP registers must be adjacent"));
12937 inst.operands[2].imm = 2;
12938 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
12939 do_vfp_nsyn_opcode ("fmrrs");
12940 break;
12941
12942 case NS_FFRR: /* case 15 (fmsrr). */
12943 constraint (inst.operands[1].reg != inst.operands[0].reg + 1,
12944 _("VFP registers must be adjacent"));
12945 inst.operands[1] = inst.operands[2];
12946 inst.operands[2] = inst.operands[3];
12947 inst.operands[0].imm = 2;
12948 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
12949 do_vfp_nsyn_opcode ("fmsrr");
12950 break;
12951
12952 default:
12953 abort ();
12954 }
12955 }
12956
12957 static void
12958 do_neon_rshift_round_imm (void)
12959 {
12960 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
12961 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
12962 int imm = inst.operands[2].imm;
12963
12964 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
12965 if (imm == 0)
12966 {
12967 inst.operands[2].present = 0;
12968 do_neon_mov ();
12969 return;
12970 }
12971
12972 constraint (imm < 1 || (unsigned)imm > et.size,
12973 _("immediate out of range for shift"));
12974 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et,
12975 et.size - imm);
12976 }
12977
12978 static void
12979 do_neon_movl (void)
12980 {
12981 struct neon_type_el et = neon_check_type (2, NS_QD,
12982 N_EQK | N_DBL, N_SU_32 | N_KEY);
12983 unsigned sizebits = et.size >> 3;
12984 inst.instruction |= sizebits << 19;
12985 neon_two_same (0, et.type == NT_unsigned, -1);
12986 }
12987
12988 static void
12989 do_neon_trn (void)
12990 {
12991 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
12992 struct neon_type_el et = neon_check_type (2, rs,
12993 N_EQK, N_8 | N_16 | N_32 | N_KEY);
12994 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12995 neon_two_same (neon_quad (rs), 1, et.size);
12996 }
12997
12998 static void
12999 do_neon_zip_uzp (void)
13000 {
13001 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13002 struct neon_type_el et = neon_check_type (2, rs,
13003 N_EQK, N_8 | N_16 | N_32 | N_KEY);
13004 if (rs == NS_DD && et.size == 32)
13005 {
13006 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
13007 inst.instruction = N_MNEM_vtrn;
13008 do_neon_trn ();
13009 return;
13010 }
13011 neon_two_same (neon_quad (rs), 1, et.size);
13012 }
13013
13014 static void
13015 do_neon_sat_abs_neg (void)
13016 {
13017 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13018 struct neon_type_el et = neon_check_type (2, rs,
13019 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
13020 neon_two_same (neon_quad (rs), 1, et.size);
13021 }
13022
13023 static void
13024 do_neon_pair_long (void)
13025 {
13026 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13027 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_32 | N_KEY);
13028 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
13029 inst.instruction |= (et.type == NT_unsigned) << 7;
13030 neon_two_same (neon_quad (rs), 1, et.size);
13031 }
13032
13033 static void
13034 do_neon_recip_est (void)
13035 {
13036 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13037 struct neon_type_el et = neon_check_type (2, rs,
13038 N_EQK | N_FLT, N_F32 | N_U32 | N_KEY);
13039 inst.instruction |= (et.type == NT_float) << 8;
13040 neon_two_same (neon_quad (rs), 1, et.size);
13041 }
13042
13043 static void
13044 do_neon_cls (void)
13045 {
13046 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13047 struct neon_type_el et = neon_check_type (2, rs,
13048 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
13049 neon_two_same (neon_quad (rs), 1, et.size);
13050 }
13051
13052 static void
13053 do_neon_clz (void)
13054 {
13055 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13056 struct neon_type_el et = neon_check_type (2, rs,
13057 N_EQK, N_I8 | N_I16 | N_I32 | N_KEY);
13058 neon_two_same (neon_quad (rs), 1, et.size);
13059 }
13060
13061 static void
13062 do_neon_cnt (void)
13063 {
13064 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13065 struct neon_type_el et = neon_check_type (2, rs,
13066 N_EQK | N_INT, N_8 | N_KEY);
13067 neon_two_same (neon_quad (rs), 1, et.size);
13068 }
13069
13070 static void
13071 do_neon_swp (void)
13072 {
13073 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13074 neon_two_same (neon_quad (rs), 1, -1);
13075 }
13076
13077 static void
13078 do_neon_tbl_tbx (void)
13079 {
13080 unsigned listlenbits;
13081 neon_check_type (3, NS_DLD, N_EQK, N_EQK, N_8 | N_KEY);
13082
13083 if (inst.operands[1].imm < 1 || inst.operands[1].imm > 4)
13084 {
13085 first_error (_("bad list length for table lookup"));
13086 return;
13087 }
13088
13089 listlenbits = inst.operands[1].imm - 1;
13090 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13091 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13092 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
13093 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
13094 inst.instruction |= LOW4 (inst.operands[2].reg);
13095 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
13096 inst.instruction |= listlenbits << 8;
13097
13098 inst.instruction = neon_dp_fixup (inst.instruction);
13099 }
13100
13101 static void
13102 do_neon_ldm_stm (void)
13103 {
13104 /* P, U and L bits are part of bitmask. */
13105 int is_dbmode = (inst.instruction & (1 << 24)) != 0;
13106 unsigned offsetbits = inst.operands[1].imm * 2;
13107
13108 if (inst.operands[1].issingle)
13109 {
13110 do_vfp_nsyn_ldm_stm (is_dbmode);
13111 return;
13112 }
13113
13114 constraint (is_dbmode && !inst.operands[0].writeback,
13115 _("writeback (!) must be used for VLDMDB and VSTMDB"));
13116
13117 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
13118 _("register list must contain at least 1 and at most 16 "
13119 "registers"));
13120
13121 inst.instruction |= inst.operands[0].reg << 16;
13122 inst.instruction |= inst.operands[0].writeback << 21;
13123 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
13124 inst.instruction |= HI1 (inst.operands[1].reg) << 22;
13125
13126 inst.instruction |= offsetbits;
13127
13128 do_vfp_cond_or_thumb ();
13129 }
13130
13131 static void
13132 do_neon_ldr_str (void)
13133 {
13134 int is_ldr = (inst.instruction & (1 << 20)) != 0;
13135
13136 if (inst.operands[0].issingle)
13137 {
13138 if (is_ldr)
13139 do_vfp_nsyn_opcode ("flds");
13140 else
13141 do_vfp_nsyn_opcode ("fsts");
13142 }
13143 else
13144 {
13145 if (is_ldr)
13146 do_vfp_nsyn_opcode ("fldd");
13147 else
13148 do_vfp_nsyn_opcode ("fstd");
13149 }
13150 }
13151
13152 /* "interleave" version also handles non-interleaving register VLD1/VST1
13153 instructions. */
13154
13155 static void
13156 do_neon_ld_st_interleave (void)
13157 {
13158 struct neon_type_el et = neon_check_type (1, NS_NULL,
13159 N_8 | N_16 | N_32 | N_64);
13160 unsigned alignbits = 0;
13161 unsigned idx;
13162 /* The bits in this table go:
13163 0: register stride of one (0) or two (1)
13164 1,2: register list length, minus one (1, 2, 3, 4).
13165 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
13166 We use -1 for invalid entries. */
13167 const int typetable[] =
13168 {
13169 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
13170 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
13171 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
13172 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
13173 };
13174 int typebits;
13175
13176 if (et.type == NT_invtype)
13177 return;
13178
13179 if (inst.operands[1].immisalign)
13180 switch (inst.operands[1].imm >> 8)
13181 {
13182 case 64: alignbits = 1; break;
13183 case 128:
13184 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) == 3)
13185 goto bad_alignment;
13186 alignbits = 2;
13187 break;
13188 case 256:
13189 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) == 3)
13190 goto bad_alignment;
13191 alignbits = 3;
13192 break;
13193 default:
13194 bad_alignment:
13195 first_error (_("bad alignment"));
13196 return;
13197 }
13198
13199 inst.instruction |= alignbits << 4;
13200 inst.instruction |= neon_logbits (et.size) << 6;
13201
13202 /* Bits [4:6] of the immediate in a list specifier encode register stride
13203 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
13204 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
13205 up the right value for "type" in a table based on this value and the given
13206 list style, then stick it back. */
13207 idx = ((inst.operands[0].imm >> 4) & 7)
13208 | (((inst.instruction >> 8) & 3) << 3);
13209
13210 typebits = typetable[idx];
13211
13212 constraint (typebits == -1, _("bad list type for instruction"));
13213
13214 inst.instruction &= ~0xf00;
13215 inst.instruction |= typebits << 8;
13216 }
13217
13218 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
13219 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
13220 otherwise. The variable arguments are a list of pairs of legal (size, align)
13221 values, terminated with -1. */
13222
13223 static int
13224 neon_alignment_bit (int size, int align, int *do_align, ...)
13225 {
13226 va_list ap;
13227 int result = FAIL, thissize, thisalign;
13228
13229 if (!inst.operands[1].immisalign)
13230 {
13231 *do_align = 0;
13232 return SUCCESS;
13233 }
13234
13235 va_start (ap, do_align);
13236
13237 do
13238 {
13239 thissize = va_arg (ap, int);
13240 if (thissize == -1)
13241 break;
13242 thisalign = va_arg (ap, int);
13243
13244 if (size == thissize && align == thisalign)
13245 result = SUCCESS;
13246 }
13247 while (result != SUCCESS);
13248
13249 va_end (ap);
13250
13251 if (result == SUCCESS)
13252 *do_align = 1;
13253 else
13254 first_error (_("unsupported alignment for instruction"));
13255
13256 return result;
13257 }
13258
13259 static void
13260 do_neon_ld_st_lane (void)
13261 {
13262 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
13263 int align_good, do_align = 0;
13264 int logsize = neon_logbits (et.size);
13265 int align = inst.operands[1].imm >> 8;
13266 int n = (inst.instruction >> 8) & 3;
13267 int max_el = 64 / et.size;
13268
13269 if (et.type == NT_invtype)
13270 return;
13271
13272 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != n + 1,
13273 _("bad list length"));
13274 constraint (NEON_LANE (inst.operands[0].imm) >= max_el,
13275 _("scalar index out of range"));
13276 constraint (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2
13277 && et.size == 8,
13278 _("stride of 2 unavailable when element size is 8"));
13279
13280 switch (n)
13281 {
13282 case 0: /* VLD1 / VST1. */
13283 align_good = neon_alignment_bit (et.size, align, &do_align, 16, 16,
13284 32, 32, -1);
13285 if (align_good == FAIL)
13286 return;
13287 if (do_align)
13288 {
13289 unsigned alignbits = 0;
13290 switch (et.size)
13291 {
13292 case 16: alignbits = 0x1; break;
13293 case 32: alignbits = 0x3; break;
13294 default: ;
13295 }
13296 inst.instruction |= alignbits << 4;
13297 }
13298 break;
13299
13300 case 1: /* VLD2 / VST2. */
13301 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 16, 16, 32,
13302 32, 64, -1);
13303 if (align_good == FAIL)
13304 return;
13305 if (do_align)
13306 inst.instruction |= 1 << 4;
13307 break;
13308
13309 case 2: /* VLD3 / VST3. */
13310 constraint (inst.operands[1].immisalign,
13311 _("can't use alignment with this instruction"));
13312 break;
13313
13314 case 3: /* VLD4 / VST4. */
13315 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32,
13316 16, 64, 32, 64, 32, 128, -1);
13317 if (align_good == FAIL)
13318 return;
13319 if (do_align)
13320 {
13321 unsigned alignbits = 0;
13322 switch (et.size)
13323 {
13324 case 8: alignbits = 0x1; break;
13325 case 16: alignbits = 0x1; break;
13326 case 32: alignbits = (align == 64) ? 0x1 : 0x2; break;
13327 default: ;
13328 }
13329 inst.instruction |= alignbits << 4;
13330 }
13331 break;
13332
13333 default: ;
13334 }
13335
13336 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
13337 if (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2)
13338 inst.instruction |= 1 << (4 + logsize);
13339
13340 inst.instruction |= NEON_LANE (inst.operands[0].imm) << (logsize + 5);
13341 inst.instruction |= logsize << 10;
13342 }
13343
13344 /* Encode single n-element structure to all lanes VLD<n> instructions. */
13345
13346 static void
13347 do_neon_ld_dup (void)
13348 {
13349 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
13350 int align_good, do_align = 0;
13351
13352 if (et.type == NT_invtype)
13353 return;
13354
13355 switch ((inst.instruction >> 8) & 3)
13356 {
13357 case 0: /* VLD1. */
13358 assert (NEON_REG_STRIDE (inst.operands[0].imm) != 2);
13359 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
13360 &do_align, 16, 16, 32, 32, -1);
13361 if (align_good == FAIL)
13362 return;
13363 switch (NEON_REGLIST_LENGTH (inst.operands[0].imm))
13364 {
13365 case 1: break;
13366 case 2: inst.instruction |= 1 << 5; break;
13367 default: first_error (_("bad list length")); return;
13368 }
13369 inst.instruction |= neon_logbits (et.size) << 6;
13370 break;
13371
13372 case 1: /* VLD2. */
13373 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
13374 &do_align, 8, 16, 16, 32, 32, 64, -1);
13375 if (align_good == FAIL)
13376 return;
13377 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2,
13378 _("bad list length"));
13379 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
13380 inst.instruction |= 1 << 5;
13381 inst.instruction |= neon_logbits (et.size) << 6;
13382 break;
13383
13384 case 2: /* VLD3. */
13385 constraint (inst.operands[1].immisalign,
13386 _("can't use alignment with this instruction"));
13387 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 3,
13388 _("bad list length"));
13389 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
13390 inst.instruction |= 1 << 5;
13391 inst.instruction |= neon_logbits (et.size) << 6;
13392 break;
13393
13394 case 3: /* VLD4. */
13395 {
13396 int align = inst.operands[1].imm >> 8;
13397 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32,
13398 16, 64, 32, 64, 32, 128, -1);
13399 if (align_good == FAIL)
13400 return;
13401 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4,
13402 _("bad list length"));
13403 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
13404 inst.instruction |= 1 << 5;
13405 if (et.size == 32 && align == 128)
13406 inst.instruction |= 0x3 << 6;
13407 else
13408 inst.instruction |= neon_logbits (et.size) << 6;
13409 }
13410 break;
13411
13412 default: ;
13413 }
13414
13415 inst.instruction |= do_align << 4;
13416 }
13417
13418 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
13419 apart from bits [11:4]. */
13420
13421 static void
13422 do_neon_ldx_stx (void)
13423 {
13424 switch (NEON_LANE (inst.operands[0].imm))
13425 {
13426 case NEON_INTERLEAVE_LANES:
13427 inst.instruction = NEON_ENC_INTERLV (inst.instruction);
13428 do_neon_ld_st_interleave ();
13429 break;
13430
13431 case NEON_ALL_LANES:
13432 inst.instruction = NEON_ENC_DUP (inst.instruction);
13433 do_neon_ld_dup ();
13434 break;
13435
13436 default:
13437 inst.instruction = NEON_ENC_LANE (inst.instruction);
13438 do_neon_ld_st_lane ();
13439 }
13440
13441 /* L bit comes from bit mask. */
13442 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13443 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13444 inst.instruction |= inst.operands[1].reg << 16;
13445
13446 if (inst.operands[1].postind)
13447 {
13448 int postreg = inst.operands[1].imm & 0xf;
13449 constraint (!inst.operands[1].immisreg,
13450 _("post-index must be a register"));
13451 constraint (postreg == 0xd || postreg == 0xf,
13452 _("bad register for post-index"));
13453 inst.instruction |= postreg;
13454 }
13455 else if (inst.operands[1].writeback)
13456 {
13457 inst.instruction |= 0xd;
13458 }
13459 else
13460 inst.instruction |= 0xf;
13461
13462 if (thumb_mode)
13463 inst.instruction |= 0xf9000000;
13464 else
13465 inst.instruction |= 0xf4000000;
13466 }
13467
13468 \f
13469 /* Overall per-instruction processing. */
13470
13471 /* We need to be able to fix up arbitrary expressions in some statements.
13472 This is so that we can handle symbols that are an arbitrary distance from
13473 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
13474 which returns part of an address in a form which will be valid for
13475 a data instruction. We do this by pushing the expression into a symbol
13476 in the expr_section, and creating a fix for that. */
13477
13478 static void
13479 fix_new_arm (fragS * frag,
13480 int where,
13481 short int size,
13482 expressionS * exp,
13483 int pc_rel,
13484 int reloc)
13485 {
13486 fixS * new_fix;
13487
13488 switch (exp->X_op)
13489 {
13490 case O_constant:
13491 case O_symbol:
13492 case O_add:
13493 case O_subtract:
13494 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
13495 break;
13496
13497 default:
13498 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
13499 pc_rel, reloc);
13500 break;
13501 }
13502
13503 /* Mark whether the fix is to a THUMB instruction, or an ARM
13504 instruction. */
13505 new_fix->tc_fix_data = thumb_mode;
13506 }
13507
13508 /* Create a frg for an instruction requiring relaxation. */
13509 static void
13510 output_relax_insn (void)
13511 {
13512 char * to;
13513 symbolS *sym;
13514 int offset;
13515
13516 /* The size of the instruction is unknown, so tie the debug info to the
13517 start of the instruction. */
13518 dwarf2_emit_insn (0);
13519
13520 switch (inst.reloc.exp.X_op)
13521 {
13522 case O_symbol:
13523 sym = inst.reloc.exp.X_add_symbol;
13524 offset = inst.reloc.exp.X_add_number;
13525 break;
13526 case O_constant:
13527 sym = NULL;
13528 offset = inst.reloc.exp.X_add_number;
13529 break;
13530 default:
13531 sym = make_expr_symbol (&inst.reloc.exp);
13532 offset = 0;
13533 break;
13534 }
13535 to = frag_var (rs_machine_dependent, INSN_SIZE, THUMB_SIZE,
13536 inst.relax, sym, offset, NULL/*offset, opcode*/);
13537 md_number_to_chars (to, inst.instruction, THUMB_SIZE);
13538 }
13539
13540 /* Write a 32-bit thumb instruction to buf. */
13541 static void
13542 put_thumb32_insn (char * buf, unsigned long insn)
13543 {
13544 md_number_to_chars (buf, insn >> 16, THUMB_SIZE);
13545 md_number_to_chars (buf + THUMB_SIZE, insn, THUMB_SIZE);
13546 }
13547
13548 static void
13549 output_inst (const char * str)
13550 {
13551 char * to = NULL;
13552
13553 if (inst.error)
13554 {
13555 as_bad ("%s -- `%s'", inst.error, str);
13556 return;
13557 }
13558 if (inst.relax) {
13559 output_relax_insn();
13560 return;
13561 }
13562 if (inst.size == 0)
13563 return;
13564
13565 to = frag_more (inst.size);
13566
13567 if (thumb_mode && (inst.size > THUMB_SIZE))
13568 {
13569 assert (inst.size == (2 * THUMB_SIZE));
13570 put_thumb32_insn (to, inst.instruction);
13571 }
13572 else if (inst.size > INSN_SIZE)
13573 {
13574 assert (inst.size == (2 * INSN_SIZE));
13575 md_number_to_chars (to, inst.instruction, INSN_SIZE);
13576 md_number_to_chars (to + INSN_SIZE, inst.instruction, INSN_SIZE);
13577 }
13578 else
13579 md_number_to_chars (to, inst.instruction, inst.size);
13580
13581 if (inst.reloc.type != BFD_RELOC_UNUSED)
13582 fix_new_arm (frag_now, to - frag_now->fr_literal,
13583 inst.size, & inst.reloc.exp, inst.reloc.pc_rel,
13584 inst.reloc.type);
13585
13586 dwarf2_emit_insn (inst.size);
13587 }
13588
13589 /* Tag values used in struct asm_opcode's tag field. */
13590 enum opcode_tag
13591 {
13592 OT_unconditional, /* Instruction cannot be conditionalized.
13593 The ARM condition field is still 0xE. */
13594 OT_unconditionalF, /* Instruction cannot be conditionalized
13595 and carries 0xF in its ARM condition field. */
13596 OT_csuffix, /* Instruction takes a conditional suffix. */
13597 OT_csuffixF, /* Some forms of the instruction take a conditional
13598 suffix, others place 0xF where the condition field
13599 would be. */
13600 OT_cinfix3, /* Instruction takes a conditional infix,
13601 beginning at character index 3. (In
13602 unified mode, it becomes a suffix.) */
13603 OT_cinfix3_deprecated, /* The same as OT_cinfix3. This is used for
13604 tsts, cmps, cmns, and teqs. */
13605 OT_cinfix3_legacy, /* Legacy instruction takes a conditional infix at
13606 character index 3, even in unified mode. Used for
13607 legacy instructions where suffix and infix forms
13608 may be ambiguous. */
13609 OT_csuf_or_in3, /* Instruction takes either a conditional
13610 suffix or an infix at character index 3. */
13611 OT_odd_infix_unc, /* This is the unconditional variant of an
13612 instruction that takes a conditional infix
13613 at an unusual position. In unified mode,
13614 this variant will accept a suffix. */
13615 OT_odd_infix_0 /* Values greater than or equal to OT_odd_infix_0
13616 are the conditional variants of instructions that
13617 take conditional infixes in unusual positions.
13618 The infix appears at character index
13619 (tag - OT_odd_infix_0). These are not accepted
13620 in unified mode. */
13621 };
13622
13623 /* Subroutine of md_assemble, responsible for looking up the primary
13624 opcode from the mnemonic the user wrote. STR points to the
13625 beginning of the mnemonic.
13626
13627 This is not simply a hash table lookup, because of conditional
13628 variants. Most instructions have conditional variants, which are
13629 expressed with a _conditional affix_ to the mnemonic. If we were
13630 to encode each conditional variant as a literal string in the opcode
13631 table, it would have approximately 20,000 entries.
13632
13633 Most mnemonics take this affix as a suffix, and in unified syntax,
13634 'most' is upgraded to 'all'. However, in the divided syntax, some
13635 instructions take the affix as an infix, notably the s-variants of
13636 the arithmetic instructions. Of those instructions, all but six
13637 have the infix appear after the third character of the mnemonic.
13638
13639 Accordingly, the algorithm for looking up primary opcodes given
13640 an identifier is:
13641
13642 1. Look up the identifier in the opcode table.
13643 If we find a match, go to step U.
13644
13645 2. Look up the last two characters of the identifier in the
13646 conditions table. If we find a match, look up the first N-2
13647 characters of the identifier in the opcode table. If we
13648 find a match, go to step CE.
13649
13650 3. Look up the fourth and fifth characters of the identifier in
13651 the conditions table. If we find a match, extract those
13652 characters from the identifier, and look up the remaining
13653 characters in the opcode table. If we find a match, go
13654 to step CM.
13655
13656 4. Fail.
13657
13658 U. Examine the tag field of the opcode structure, in case this is
13659 one of the six instructions with its conditional infix in an
13660 unusual place. If it is, the tag tells us where to find the
13661 infix; look it up in the conditions table and set inst.cond
13662 accordingly. Otherwise, this is an unconditional instruction.
13663 Again set inst.cond accordingly. Return the opcode structure.
13664
13665 CE. Examine the tag field to make sure this is an instruction that
13666 should receive a conditional suffix. If it is not, fail.
13667 Otherwise, set inst.cond from the suffix we already looked up,
13668 and return the opcode structure.
13669
13670 CM. Examine the tag field to make sure this is an instruction that
13671 should receive a conditional infix after the third character.
13672 If it is not, fail. Otherwise, undo the edits to the current
13673 line of input and proceed as for case CE. */
13674
13675 static const struct asm_opcode *
13676 opcode_lookup (char **str)
13677 {
13678 char *end, *base;
13679 char *affix;
13680 const struct asm_opcode *opcode;
13681 const struct asm_cond *cond;
13682 char save[2];
13683 bfd_boolean neon_supported;
13684
13685 neon_supported = ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1);
13686
13687 /* Scan up to the end of the mnemonic, which must end in white space,
13688 '.' (in unified mode, or for Neon instructions), or end of string. */
13689 for (base = end = *str; *end != '\0'; end++)
13690 if (*end == ' ' || ((unified_syntax || neon_supported) && *end == '.'))
13691 break;
13692
13693 if (end == base)
13694 return 0;
13695
13696 /* Handle a possible width suffix and/or Neon type suffix. */
13697 if (end[0] == '.')
13698 {
13699 int offset = 2;
13700
13701 /* The .w and .n suffixes are only valid if the unified syntax is in
13702 use. */
13703 if (unified_syntax && end[1] == 'w')
13704 inst.size_req = 4;
13705 else if (unified_syntax && end[1] == 'n')
13706 inst.size_req = 2;
13707 else
13708 offset = 0;
13709
13710 inst.vectype.elems = 0;
13711
13712 *str = end + offset;
13713
13714 if (end[offset] == '.')
13715 {
13716 /* See if we have a Neon type suffix (possible in either unified or
13717 non-unified ARM syntax mode). */
13718 if (parse_neon_type (&inst.vectype, str) == FAIL)
13719 return 0;
13720 }
13721 else if (end[offset] != '\0' && end[offset] != ' ')
13722 return 0;
13723 }
13724 else
13725 *str = end;
13726
13727 /* Look for unaffixed or special-case affixed mnemonic. */
13728 opcode = hash_find_n (arm_ops_hsh, base, end - base);
13729 if (opcode)
13730 {
13731 /* step U */
13732 if (opcode->tag < OT_odd_infix_0)
13733 {
13734 inst.cond = COND_ALWAYS;
13735 return opcode;
13736 }
13737
13738 if (unified_syntax)
13739 as_warn (_("conditional infixes are deprecated in unified syntax"));
13740 affix = base + (opcode->tag - OT_odd_infix_0);
13741 cond = hash_find_n (arm_cond_hsh, affix, 2);
13742 assert (cond);
13743
13744 inst.cond = cond->value;
13745 return opcode;
13746 }
13747
13748 /* Cannot have a conditional suffix on a mnemonic of less than two
13749 characters. */
13750 if (end - base < 3)
13751 return 0;
13752
13753 /* Look for suffixed mnemonic. */
13754 affix = end - 2;
13755 cond = hash_find_n (arm_cond_hsh, affix, 2);
13756 opcode = hash_find_n (arm_ops_hsh, base, affix - base);
13757 if (opcode && cond)
13758 {
13759 /* step CE */
13760 switch (opcode->tag)
13761 {
13762 case OT_cinfix3_legacy:
13763 /* Ignore conditional suffixes matched on infix only mnemonics. */
13764 break;
13765
13766 case OT_cinfix3:
13767 case OT_cinfix3_deprecated:
13768 case OT_odd_infix_unc:
13769 if (!unified_syntax)
13770 return 0;
13771 /* else fall through */
13772
13773 case OT_csuffix:
13774 case OT_csuffixF:
13775 case OT_csuf_or_in3:
13776 inst.cond = cond->value;
13777 return opcode;
13778
13779 case OT_unconditional:
13780 case OT_unconditionalF:
13781 if (thumb_mode)
13782 {
13783 inst.cond = cond->value;
13784 }
13785 else
13786 {
13787 /* delayed diagnostic */
13788 inst.error = BAD_COND;
13789 inst.cond = COND_ALWAYS;
13790 }
13791 return opcode;
13792
13793 default:
13794 return 0;
13795 }
13796 }
13797
13798 /* Cannot have a usual-position infix on a mnemonic of less than
13799 six characters (five would be a suffix). */
13800 if (end - base < 6)
13801 return 0;
13802
13803 /* Look for infixed mnemonic in the usual position. */
13804 affix = base + 3;
13805 cond = hash_find_n (arm_cond_hsh, affix, 2);
13806 if (!cond)
13807 return 0;
13808
13809 memcpy (save, affix, 2);
13810 memmove (affix, affix + 2, (end - affix) - 2);
13811 opcode = hash_find_n (arm_ops_hsh, base, (end - base) - 2);
13812 memmove (affix + 2, affix, (end - affix) - 2);
13813 memcpy (affix, save, 2);
13814
13815 if (opcode
13816 && (opcode->tag == OT_cinfix3
13817 || opcode->tag == OT_cinfix3_deprecated
13818 || opcode->tag == OT_csuf_or_in3
13819 || opcode->tag == OT_cinfix3_legacy))
13820 {
13821 /* step CM */
13822 if (unified_syntax
13823 && (opcode->tag == OT_cinfix3
13824 || opcode->tag == OT_cinfix3_deprecated))
13825 as_warn (_("conditional infixes are deprecated in unified syntax"));
13826
13827 inst.cond = cond->value;
13828 return opcode;
13829 }
13830
13831 return 0;
13832 }
13833
13834 void
13835 md_assemble (char *str)
13836 {
13837 char *p = str;
13838 const struct asm_opcode * opcode;
13839
13840 /* Align the previous label if needed. */
13841 if (last_label_seen != NULL)
13842 {
13843 symbol_set_frag (last_label_seen, frag_now);
13844 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
13845 S_SET_SEGMENT (last_label_seen, now_seg);
13846 }
13847
13848 memset (&inst, '\0', sizeof (inst));
13849 inst.reloc.type = BFD_RELOC_UNUSED;
13850
13851 opcode = opcode_lookup (&p);
13852 if (!opcode)
13853 {
13854 /* It wasn't an instruction, but it might be a register alias of
13855 the form alias .req reg, or a Neon .dn/.qn directive. */
13856 if (!create_register_alias (str, p)
13857 && !create_neon_reg_alias (str, p))
13858 as_bad (_("bad instruction `%s'"), str);
13859
13860 return;
13861 }
13862
13863 if (opcode->tag == OT_cinfix3_deprecated)
13864 as_warn (_("s suffix on comparison instruction is deprecated"));
13865
13866 /* The value which unconditional instructions should have in place of the
13867 condition field. */
13868 inst.uncond_value = (opcode->tag == OT_csuffixF) ? 0xf : -1;
13869
13870 if (thumb_mode)
13871 {
13872 arm_feature_set variant;
13873
13874 variant = cpu_variant;
13875 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
13876 if (!ARM_CPU_HAS_FEATURE (variant, arm_arch_t2))
13877 ARM_CLEAR_FEATURE (variant, variant, fpu_any_hard);
13878 /* Check that this instruction is supported for this CPU. */
13879 if (!opcode->tvariant
13880 || (thumb_mode == 1
13881 && !ARM_CPU_HAS_FEATURE (variant, *opcode->tvariant)))
13882 {
13883 as_bad (_("selected processor does not support `%s'"), str);
13884 return;
13885 }
13886 if (inst.cond != COND_ALWAYS && !unified_syntax
13887 && opcode->tencode != do_t_branch)
13888 {
13889 as_bad (_("Thumb does not support conditional execution"));
13890 return;
13891 }
13892
13893 /* Check conditional suffixes. */
13894 if (current_it_mask)
13895 {
13896 int cond;
13897 cond = current_cc ^ ((current_it_mask >> 4) & 1) ^ 1;
13898 current_it_mask <<= 1;
13899 current_it_mask &= 0x1f;
13900 /* The BKPT instruction is unconditional even in an IT block. */
13901 if (!inst.error
13902 && cond != inst.cond && opcode->tencode != do_t_bkpt)
13903 {
13904 as_bad (_("incorrect condition in IT block"));
13905 return;
13906 }
13907 }
13908 else if (inst.cond != COND_ALWAYS && opcode->tencode != do_t_branch)
13909 {
13910 as_bad (_("thumb conditional instrunction not in IT block"));
13911 return;
13912 }
13913
13914 mapping_state (MAP_THUMB);
13915 inst.instruction = opcode->tvalue;
13916
13917 if (!parse_operands (p, opcode->operands))
13918 opcode->tencode ();
13919
13920 /* Clear current_it_mask at the end of an IT block. */
13921 if (current_it_mask == 0x10)
13922 current_it_mask = 0;
13923
13924 if (!(inst.error || inst.relax))
13925 {
13926 assert (inst.instruction < 0xe800 || inst.instruction > 0xffff);
13927 inst.size = (inst.instruction > 0xffff ? 4 : 2);
13928 if (inst.size_req && inst.size_req != inst.size)
13929 {
13930 as_bad (_("cannot honor width suffix -- `%s'"), str);
13931 return;
13932 }
13933 }
13934 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
13935 *opcode->tvariant);
13936 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
13937 set those bits when Thumb-2 32-bit instructions are seen. ie.
13938 anything other than bl/blx.
13939 This is overly pessimistic for relaxable instructions. */
13940 if ((inst.size == 4 && (inst.instruction & 0xf800e800) != 0xf000e800)
13941 || inst.relax)
13942 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
13943 arm_ext_v6t2);
13944 }
13945 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
13946 {
13947 /* Check that this instruction is supported for this CPU. */
13948 if (!opcode->avariant ||
13949 !ARM_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant))
13950 {
13951 as_bad (_("selected processor does not support `%s'"), str);
13952 return;
13953 }
13954 if (inst.size_req)
13955 {
13956 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str);
13957 return;
13958 }
13959
13960 mapping_state (MAP_ARM);
13961 inst.instruction = opcode->avalue;
13962 if (opcode->tag == OT_unconditionalF)
13963 inst.instruction |= 0xF << 28;
13964 else
13965 inst.instruction |= inst.cond << 28;
13966 inst.size = INSN_SIZE;
13967 if (!parse_operands (p, opcode->operands))
13968 opcode->aencode ();
13969 /* Arm mode bx is marked as both v4T and v5 because it's still required
13970 on a hypothetical non-thumb v5 core. */
13971 if (ARM_CPU_HAS_FEATURE (*opcode->avariant, arm_ext_v4t)
13972 || ARM_CPU_HAS_FEATURE (*opcode->avariant, arm_ext_v5))
13973 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, arm_ext_v4t);
13974 else
13975 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
13976 *opcode->avariant);
13977 }
13978 else
13979 {
13980 as_bad (_("attempt to use an ARM instruction on a Thumb-only processor "
13981 "-- `%s'"), str);
13982 return;
13983 }
13984 output_inst (str);
13985 }
13986
13987 /* Various frobbings of labels and their addresses. */
13988
13989 void
13990 arm_start_line_hook (void)
13991 {
13992 last_label_seen = NULL;
13993 }
13994
13995 void
13996 arm_frob_label (symbolS * sym)
13997 {
13998 last_label_seen = sym;
13999
14000 ARM_SET_THUMB (sym, thumb_mode);
14001
14002 #if defined OBJ_COFF || defined OBJ_ELF
14003 ARM_SET_INTERWORK (sym, support_interwork);
14004 #endif
14005
14006 /* Note - do not allow local symbols (.Lxxx) to be labeled
14007 as Thumb functions. This is because these labels, whilst
14008 they exist inside Thumb code, are not the entry points for
14009 possible ARM->Thumb calls. Also, these labels can be used
14010 as part of a computed goto or switch statement. eg gcc
14011 can generate code that looks like this:
14012
14013 ldr r2, [pc, .Laaa]
14014 lsl r3, r3, #2
14015 ldr r2, [r3, r2]
14016 mov pc, r2
14017
14018 .Lbbb: .word .Lxxx
14019 .Lccc: .word .Lyyy
14020 ..etc...
14021 .Laaa: .word Lbbb
14022
14023 The first instruction loads the address of the jump table.
14024 The second instruction converts a table index into a byte offset.
14025 The third instruction gets the jump address out of the table.
14026 The fourth instruction performs the jump.
14027
14028 If the address stored at .Laaa is that of a symbol which has the
14029 Thumb_Func bit set, then the linker will arrange for this address
14030 to have the bottom bit set, which in turn would mean that the
14031 address computation performed by the third instruction would end
14032 up with the bottom bit set. Since the ARM is capable of unaligned
14033 word loads, the instruction would then load the incorrect address
14034 out of the jump table, and chaos would ensue. */
14035 if (label_is_thumb_function_name
14036 && (S_GET_NAME (sym)[0] != '.' || S_GET_NAME (sym)[1] != 'L')
14037 && (bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE) != 0)
14038 {
14039 /* When the address of a Thumb function is taken the bottom
14040 bit of that address should be set. This will allow
14041 interworking between Arm and Thumb functions to work
14042 correctly. */
14043
14044 THUMB_SET_FUNC (sym, 1);
14045
14046 label_is_thumb_function_name = FALSE;
14047 }
14048
14049 dwarf2_emit_label (sym);
14050 }
14051
14052 int
14053 arm_data_in_code (void)
14054 {
14055 if (thumb_mode && ! strncmp (input_line_pointer + 1, "data:", 5))
14056 {
14057 *input_line_pointer = '/';
14058 input_line_pointer += 5;
14059 *input_line_pointer = 0;
14060 return 1;
14061 }
14062
14063 return 0;
14064 }
14065
14066 char *
14067 arm_canonicalize_symbol_name (char * name)
14068 {
14069 int len;
14070
14071 if (thumb_mode && (len = strlen (name)) > 5
14072 && streq (name + len - 5, "/data"))
14073 *(name + len - 5) = 0;
14074
14075 return name;
14076 }
14077 \f
14078 /* Table of all register names defined by default. The user can
14079 define additional names with .req. Note that all register names
14080 should appear in both upper and lowercase variants. Some registers
14081 also have mixed-case names. */
14082
14083 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
14084 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
14085 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
14086 #define REGSET(p,t) \
14087 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
14088 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
14089 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
14090 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
14091 #define REGSETH(p,t) \
14092 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
14093 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
14094 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
14095 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
14096 #define REGSET2(p,t) \
14097 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
14098 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
14099 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
14100 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
14101
14102 static const struct reg_entry reg_names[] =
14103 {
14104 /* ARM integer registers. */
14105 REGSET(r, RN), REGSET(R, RN),
14106
14107 /* ATPCS synonyms. */
14108 REGDEF(a1,0,RN), REGDEF(a2,1,RN), REGDEF(a3, 2,RN), REGDEF(a4, 3,RN),
14109 REGDEF(v1,4,RN), REGDEF(v2,5,RN), REGDEF(v3, 6,RN), REGDEF(v4, 7,RN),
14110 REGDEF(v5,8,RN), REGDEF(v6,9,RN), REGDEF(v7,10,RN), REGDEF(v8,11,RN),
14111
14112 REGDEF(A1,0,RN), REGDEF(A2,1,RN), REGDEF(A3, 2,RN), REGDEF(A4, 3,RN),
14113 REGDEF(V1,4,RN), REGDEF(V2,5,RN), REGDEF(V3, 6,RN), REGDEF(V4, 7,RN),
14114 REGDEF(V5,8,RN), REGDEF(V6,9,RN), REGDEF(V7,10,RN), REGDEF(V8,11,RN),
14115
14116 /* Well-known aliases. */
14117 REGDEF(wr, 7,RN), REGDEF(sb, 9,RN), REGDEF(sl,10,RN), REGDEF(fp,11,RN),
14118 REGDEF(ip,12,RN), REGDEF(sp,13,RN), REGDEF(lr,14,RN), REGDEF(pc,15,RN),
14119
14120 REGDEF(WR, 7,RN), REGDEF(SB, 9,RN), REGDEF(SL,10,RN), REGDEF(FP,11,RN),
14121 REGDEF(IP,12,RN), REGDEF(SP,13,RN), REGDEF(LR,14,RN), REGDEF(PC,15,RN),
14122
14123 /* Coprocessor numbers. */
14124 REGSET(p, CP), REGSET(P, CP),
14125
14126 /* Coprocessor register numbers. The "cr" variants are for backward
14127 compatibility. */
14128 REGSET(c, CN), REGSET(C, CN),
14129 REGSET(cr, CN), REGSET(CR, CN),
14130
14131 /* FPA registers. */
14132 REGNUM(f,0,FN), REGNUM(f,1,FN), REGNUM(f,2,FN), REGNUM(f,3,FN),
14133 REGNUM(f,4,FN), REGNUM(f,5,FN), REGNUM(f,6,FN), REGNUM(f,7, FN),
14134
14135 REGNUM(F,0,FN), REGNUM(F,1,FN), REGNUM(F,2,FN), REGNUM(F,3,FN),
14136 REGNUM(F,4,FN), REGNUM(F,5,FN), REGNUM(F,6,FN), REGNUM(F,7, FN),
14137
14138 /* VFP SP registers. */
14139 REGSET(s,VFS), REGSET(S,VFS),
14140 REGSETH(s,VFS), REGSETH(S,VFS),
14141
14142 /* VFP DP Registers. */
14143 REGSET(d,VFD), REGSET(D,VFD),
14144 /* Extra Neon DP registers. */
14145 REGSETH(d,VFD), REGSETH(D,VFD),
14146
14147 /* Neon QP registers. */
14148 REGSET2(q,NQ), REGSET2(Q,NQ),
14149
14150 /* VFP control registers. */
14151 REGDEF(fpsid,0,VFC), REGDEF(fpscr,1,VFC), REGDEF(fpexc,8,VFC),
14152 REGDEF(FPSID,0,VFC), REGDEF(FPSCR,1,VFC), REGDEF(FPEXC,8,VFC),
14153
14154 /* Maverick DSP coprocessor registers. */
14155 REGSET(mvf,MVF), REGSET(mvd,MVD), REGSET(mvfx,MVFX), REGSET(mvdx,MVDX),
14156 REGSET(MVF,MVF), REGSET(MVD,MVD), REGSET(MVFX,MVFX), REGSET(MVDX,MVDX),
14157
14158 REGNUM(mvax,0,MVAX), REGNUM(mvax,1,MVAX),
14159 REGNUM(mvax,2,MVAX), REGNUM(mvax,3,MVAX),
14160 REGDEF(dspsc,0,DSPSC),
14161
14162 REGNUM(MVAX,0,MVAX), REGNUM(MVAX,1,MVAX),
14163 REGNUM(MVAX,2,MVAX), REGNUM(MVAX,3,MVAX),
14164 REGDEF(DSPSC,0,DSPSC),
14165
14166 /* iWMMXt data registers - p0, c0-15. */
14167 REGSET(wr,MMXWR), REGSET(wR,MMXWR), REGSET(WR, MMXWR),
14168
14169 /* iWMMXt control registers - p1, c0-3. */
14170 REGDEF(wcid, 0,MMXWC), REGDEF(wCID, 0,MMXWC), REGDEF(WCID, 0,MMXWC),
14171 REGDEF(wcon, 1,MMXWC), REGDEF(wCon, 1,MMXWC), REGDEF(WCON, 1,MMXWC),
14172 REGDEF(wcssf, 2,MMXWC), REGDEF(wCSSF, 2,MMXWC), REGDEF(WCSSF, 2,MMXWC),
14173 REGDEF(wcasf, 3,MMXWC), REGDEF(wCASF, 3,MMXWC), REGDEF(WCASF, 3,MMXWC),
14174
14175 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
14176 REGDEF(wcgr0, 8,MMXWCG), REGDEF(wCGR0, 8,MMXWCG), REGDEF(WCGR0, 8,MMXWCG),
14177 REGDEF(wcgr1, 9,MMXWCG), REGDEF(wCGR1, 9,MMXWCG), REGDEF(WCGR1, 9,MMXWCG),
14178 REGDEF(wcgr2,10,MMXWCG), REGDEF(wCGR2,10,MMXWCG), REGDEF(WCGR2,10,MMXWCG),
14179 REGDEF(wcgr3,11,MMXWCG), REGDEF(wCGR3,11,MMXWCG), REGDEF(WCGR3,11,MMXWCG),
14180
14181 /* XScale accumulator registers. */
14182 REGNUM(acc,0,XSCALE), REGNUM(ACC,0,XSCALE),
14183 };
14184 #undef REGDEF
14185 #undef REGNUM
14186 #undef REGSET
14187
14188 /* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
14189 within psr_required_here. */
14190 static const struct asm_psr psrs[] =
14191 {
14192 /* Backward compatibility notation. Note that "all" is no longer
14193 truly all possible PSR bits. */
14194 {"all", PSR_c | PSR_f},
14195 {"flg", PSR_f},
14196 {"ctl", PSR_c},
14197
14198 /* Individual flags. */
14199 {"f", PSR_f},
14200 {"c", PSR_c},
14201 {"x", PSR_x},
14202 {"s", PSR_s},
14203 /* Combinations of flags. */
14204 {"fs", PSR_f | PSR_s},
14205 {"fx", PSR_f | PSR_x},
14206 {"fc", PSR_f | PSR_c},
14207 {"sf", PSR_s | PSR_f},
14208 {"sx", PSR_s | PSR_x},
14209 {"sc", PSR_s | PSR_c},
14210 {"xf", PSR_x | PSR_f},
14211 {"xs", PSR_x | PSR_s},
14212 {"xc", PSR_x | PSR_c},
14213 {"cf", PSR_c | PSR_f},
14214 {"cs", PSR_c | PSR_s},
14215 {"cx", PSR_c | PSR_x},
14216 {"fsx", PSR_f | PSR_s | PSR_x},
14217 {"fsc", PSR_f | PSR_s | PSR_c},
14218 {"fxs", PSR_f | PSR_x | PSR_s},
14219 {"fxc", PSR_f | PSR_x | PSR_c},
14220 {"fcs", PSR_f | PSR_c | PSR_s},
14221 {"fcx", PSR_f | PSR_c | PSR_x},
14222 {"sfx", PSR_s | PSR_f | PSR_x},
14223 {"sfc", PSR_s | PSR_f | PSR_c},
14224 {"sxf", PSR_s | PSR_x | PSR_f},
14225 {"sxc", PSR_s | PSR_x | PSR_c},
14226 {"scf", PSR_s | PSR_c | PSR_f},
14227 {"scx", PSR_s | PSR_c | PSR_x},
14228 {"xfs", PSR_x | PSR_f | PSR_s},
14229 {"xfc", PSR_x | PSR_f | PSR_c},
14230 {"xsf", PSR_x | PSR_s | PSR_f},
14231 {"xsc", PSR_x | PSR_s | PSR_c},
14232 {"xcf", PSR_x | PSR_c | PSR_f},
14233 {"xcs", PSR_x | PSR_c | PSR_s},
14234 {"cfs", PSR_c | PSR_f | PSR_s},
14235 {"cfx", PSR_c | PSR_f | PSR_x},
14236 {"csf", PSR_c | PSR_s | PSR_f},
14237 {"csx", PSR_c | PSR_s | PSR_x},
14238 {"cxf", PSR_c | PSR_x | PSR_f},
14239 {"cxs", PSR_c | PSR_x | PSR_s},
14240 {"fsxc", PSR_f | PSR_s | PSR_x | PSR_c},
14241 {"fscx", PSR_f | PSR_s | PSR_c | PSR_x},
14242 {"fxsc", PSR_f | PSR_x | PSR_s | PSR_c},
14243 {"fxcs", PSR_f | PSR_x | PSR_c | PSR_s},
14244 {"fcsx", PSR_f | PSR_c | PSR_s | PSR_x},
14245 {"fcxs", PSR_f | PSR_c | PSR_x | PSR_s},
14246 {"sfxc", PSR_s | PSR_f | PSR_x | PSR_c},
14247 {"sfcx", PSR_s | PSR_f | PSR_c | PSR_x},
14248 {"sxfc", PSR_s | PSR_x | PSR_f | PSR_c},
14249 {"sxcf", PSR_s | PSR_x | PSR_c | PSR_f},
14250 {"scfx", PSR_s | PSR_c | PSR_f | PSR_x},
14251 {"scxf", PSR_s | PSR_c | PSR_x | PSR_f},
14252 {"xfsc", PSR_x | PSR_f | PSR_s | PSR_c},
14253 {"xfcs", PSR_x | PSR_f | PSR_c | PSR_s},
14254 {"xsfc", PSR_x | PSR_s | PSR_f | PSR_c},
14255 {"xscf", PSR_x | PSR_s | PSR_c | PSR_f},
14256 {"xcfs", PSR_x | PSR_c | PSR_f | PSR_s},
14257 {"xcsf", PSR_x | PSR_c | PSR_s | PSR_f},
14258 {"cfsx", PSR_c | PSR_f | PSR_s | PSR_x},
14259 {"cfxs", PSR_c | PSR_f | PSR_x | PSR_s},
14260 {"csfx", PSR_c | PSR_s | PSR_f | PSR_x},
14261 {"csxf", PSR_c | PSR_s | PSR_x | PSR_f},
14262 {"cxfs", PSR_c | PSR_x | PSR_f | PSR_s},
14263 {"cxsf", PSR_c | PSR_x | PSR_s | PSR_f},
14264 };
14265
14266 /* Table of V7M psr names. */
14267 static const struct asm_psr v7m_psrs[] =
14268 {
14269 {"apsr", 0 },
14270 {"iapsr", 1 },
14271 {"eapsr", 2 },
14272 {"psr", 3 },
14273 {"ipsr", 5 },
14274 {"epsr", 6 },
14275 {"iepsr", 7 },
14276 {"msp", 8 },
14277 {"psp", 9 },
14278 {"primask", 16},
14279 {"basepri", 17},
14280 {"basepri_max", 18},
14281 {"faultmask", 19},
14282 {"control", 20}
14283 };
14284
14285 /* Table of all shift-in-operand names. */
14286 static const struct asm_shift_name shift_names [] =
14287 {
14288 { "asl", SHIFT_LSL }, { "ASL", SHIFT_LSL },
14289 { "lsl", SHIFT_LSL }, { "LSL", SHIFT_LSL },
14290 { "lsr", SHIFT_LSR }, { "LSR", SHIFT_LSR },
14291 { "asr", SHIFT_ASR }, { "ASR", SHIFT_ASR },
14292 { "ror", SHIFT_ROR }, { "ROR", SHIFT_ROR },
14293 { "rrx", SHIFT_RRX }, { "RRX", SHIFT_RRX }
14294 };
14295
14296 /* Table of all explicit relocation names. */
14297 #ifdef OBJ_ELF
14298 static struct reloc_entry reloc_names[] =
14299 {
14300 { "got", BFD_RELOC_ARM_GOT32 }, { "GOT", BFD_RELOC_ARM_GOT32 },
14301 { "gotoff", BFD_RELOC_ARM_GOTOFF }, { "GOTOFF", BFD_RELOC_ARM_GOTOFF },
14302 { "plt", BFD_RELOC_ARM_PLT32 }, { "PLT", BFD_RELOC_ARM_PLT32 },
14303 { "target1", BFD_RELOC_ARM_TARGET1 }, { "TARGET1", BFD_RELOC_ARM_TARGET1 },
14304 { "target2", BFD_RELOC_ARM_TARGET2 }, { "TARGET2", BFD_RELOC_ARM_TARGET2 },
14305 { "sbrel", BFD_RELOC_ARM_SBREL32 }, { "SBREL", BFD_RELOC_ARM_SBREL32 },
14306 { "tlsgd", BFD_RELOC_ARM_TLS_GD32}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32},
14307 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32},
14308 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32},
14309 { "gottpoff",BFD_RELOC_ARM_TLS_IE32}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32},
14310 { "tpoff", BFD_RELOC_ARM_TLS_LE32}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32}
14311 };
14312 #endif
14313
14314 /* Table of all conditional affixes. 0xF is not defined as a condition code. */
14315 static const struct asm_cond conds[] =
14316 {
14317 {"eq", 0x0},
14318 {"ne", 0x1},
14319 {"cs", 0x2}, {"hs", 0x2},
14320 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
14321 {"mi", 0x4},
14322 {"pl", 0x5},
14323 {"vs", 0x6},
14324 {"vc", 0x7},
14325 {"hi", 0x8},
14326 {"ls", 0x9},
14327 {"ge", 0xa},
14328 {"lt", 0xb},
14329 {"gt", 0xc},
14330 {"le", 0xd},
14331 {"al", 0xe}
14332 };
14333
14334 static struct asm_barrier_opt barrier_opt_names[] =
14335 {
14336 { "sy", 0xf },
14337 { "un", 0x7 },
14338 { "st", 0xe },
14339 { "unst", 0x6 }
14340 };
14341
14342 /* Table of ARM-format instructions. */
14343
14344 /* Macros for gluing together operand strings. N.B. In all cases
14345 other than OPS0, the trailing OP_stop comes from default
14346 zero-initialization of the unspecified elements of the array. */
14347 #define OPS0() { OP_stop, }
14348 #define OPS1(a) { OP_##a, }
14349 #define OPS2(a,b) { OP_##a,OP_##b, }
14350 #define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
14351 #define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
14352 #define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
14353 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
14354
14355 /* These macros abstract out the exact format of the mnemonic table and
14356 save some repeated characters. */
14357
14358 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
14359 #define TxCE(mnem, op, top, nops, ops, ae, te) \
14360 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
14361 THUMB_VARIANT, do_##ae, do_##te }
14362
14363 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
14364 a T_MNEM_xyz enumerator. */
14365 #define TCE(mnem, aop, top, nops, ops, ae, te) \
14366 TxCE(mnem, aop, 0x##top, nops, ops, ae, te)
14367 #define tCE(mnem, aop, top, nops, ops, ae, te) \
14368 TxCE(mnem, aop, T_MNEM_##top, nops, ops, ae, te)
14369
14370 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
14371 infix after the third character. */
14372 #define TxC3(mnem, op, top, nops, ops, ae, te) \
14373 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
14374 THUMB_VARIANT, do_##ae, do_##te }
14375 #define TxC3w(mnem, op, top, nops, ops, ae, te) \
14376 { #mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
14377 THUMB_VARIANT, do_##ae, do_##te }
14378 #define TC3(mnem, aop, top, nops, ops, ae, te) \
14379 TxC3(mnem, aop, 0x##top, nops, ops, ae, te)
14380 #define TC3w(mnem, aop, top, nops, ops, ae, te) \
14381 TxC3w(mnem, aop, 0x##top, nops, ops, ae, te)
14382 #define tC3(mnem, aop, top, nops, ops, ae, te) \
14383 TxC3(mnem, aop, T_MNEM_##top, nops, ops, ae, te)
14384 #define tC3w(mnem, aop, top, nops, ops, ae, te) \
14385 TxC3w(mnem, aop, T_MNEM_##top, nops, ops, ae, te)
14386
14387 /* Mnemonic with a conditional infix in an unusual place. Each and every variant has to
14388 appear in the condition table. */
14389 #define TxCM_(m1, m2, m3, op, top, nops, ops, ae, te) \
14390 { #m1 #m2 #m3, OPS##nops ops, sizeof(#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof(#m1) - 1, \
14391 0x##op, top, ARM_VARIANT, THUMB_VARIANT, do_##ae, do_##te }
14392
14393 #define TxCM(m1, m2, op, top, nops, ops, ae, te) \
14394 TxCM_(m1, , m2, op, top, nops, ops, ae, te), \
14395 TxCM_(m1, eq, m2, op, top, nops, ops, ae, te), \
14396 TxCM_(m1, ne, m2, op, top, nops, ops, ae, te), \
14397 TxCM_(m1, cs, m2, op, top, nops, ops, ae, te), \
14398 TxCM_(m1, hs, m2, op, top, nops, ops, ae, te), \
14399 TxCM_(m1, cc, m2, op, top, nops, ops, ae, te), \
14400 TxCM_(m1, ul, m2, op, top, nops, ops, ae, te), \
14401 TxCM_(m1, lo, m2, op, top, nops, ops, ae, te), \
14402 TxCM_(m1, mi, m2, op, top, nops, ops, ae, te), \
14403 TxCM_(m1, pl, m2, op, top, nops, ops, ae, te), \
14404 TxCM_(m1, vs, m2, op, top, nops, ops, ae, te), \
14405 TxCM_(m1, vc, m2, op, top, nops, ops, ae, te), \
14406 TxCM_(m1, hi, m2, op, top, nops, ops, ae, te), \
14407 TxCM_(m1, ls, m2, op, top, nops, ops, ae, te), \
14408 TxCM_(m1, ge, m2, op, top, nops, ops, ae, te), \
14409 TxCM_(m1, lt, m2, op, top, nops, ops, ae, te), \
14410 TxCM_(m1, gt, m2, op, top, nops, ops, ae, te), \
14411 TxCM_(m1, le, m2, op, top, nops, ops, ae, te), \
14412 TxCM_(m1, al, m2, op, top, nops, ops, ae, te)
14413
14414 #define TCM(m1,m2, aop, top, nops, ops, ae, te) \
14415 TxCM(m1,m2, aop, 0x##top, nops, ops, ae, te)
14416 #define tCM(m1,m2, aop, top, nops, ops, ae, te) \
14417 TxCM(m1,m2, aop, T_MNEM_##top, nops, ops, ae, te)
14418
14419 /* Mnemonic that cannot be conditionalized. The ARM condition-code
14420 field is still 0xE. Many of the Thumb variants can be executed
14421 conditionally, so this is checked separately. */
14422 #define TUE(mnem, op, top, nops, ops, ae, te) \
14423 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
14424 THUMB_VARIANT, do_##ae, do_##te }
14425
14426 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
14427 condition code field. */
14428 #define TUF(mnem, op, top, nops, ops, ae, te) \
14429 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
14430 THUMB_VARIANT, do_##ae, do_##te }
14431
14432 /* ARM-only variants of all the above. */
14433 #define CE(mnem, op, nops, ops, ae) \
14434 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
14435
14436 #define C3(mnem, op, nops, ops, ae) \
14437 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
14438
14439 /* Legacy mnemonics that always have conditional infix after the third
14440 character. */
14441 #define CL(mnem, op, nops, ops, ae) \
14442 { #mnem, OPS##nops ops, OT_cinfix3_legacy, \
14443 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
14444
14445 /* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
14446 #define cCE(mnem, op, nops, ops, ae) \
14447 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
14448
14449 /* Legacy coprocessor instructions where conditional infix and conditional
14450 suffix are ambiguous. For consistency this includes all FPA instructions,
14451 not just the potentially ambiguous ones. */
14452 #define cCL(mnem, op, nops, ops, ae) \
14453 { #mnem, OPS##nops ops, OT_cinfix3_legacy, \
14454 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
14455
14456 /* Coprocessor, takes either a suffix or a position-3 infix
14457 (for an FPA corner case). */
14458 #define C3E(mnem, op, nops, ops, ae) \
14459 { #mnem, OPS##nops ops, OT_csuf_or_in3, \
14460 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
14461
14462 #define xCM_(m1, m2, m3, op, nops, ops, ae) \
14463 { #m1 #m2 #m3, OPS##nops ops, \
14464 sizeof(#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof(#m1) - 1, \
14465 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
14466
14467 #define CM(m1, m2, op, nops, ops, ae) \
14468 xCM_(m1, , m2, op, nops, ops, ae), \
14469 xCM_(m1, eq, m2, op, nops, ops, ae), \
14470 xCM_(m1, ne, m2, op, nops, ops, ae), \
14471 xCM_(m1, cs, m2, op, nops, ops, ae), \
14472 xCM_(m1, hs, m2, op, nops, ops, ae), \
14473 xCM_(m1, cc, m2, op, nops, ops, ae), \
14474 xCM_(m1, ul, m2, op, nops, ops, ae), \
14475 xCM_(m1, lo, m2, op, nops, ops, ae), \
14476 xCM_(m1, mi, m2, op, nops, ops, ae), \
14477 xCM_(m1, pl, m2, op, nops, ops, ae), \
14478 xCM_(m1, vs, m2, op, nops, ops, ae), \
14479 xCM_(m1, vc, m2, op, nops, ops, ae), \
14480 xCM_(m1, hi, m2, op, nops, ops, ae), \
14481 xCM_(m1, ls, m2, op, nops, ops, ae), \
14482 xCM_(m1, ge, m2, op, nops, ops, ae), \
14483 xCM_(m1, lt, m2, op, nops, ops, ae), \
14484 xCM_(m1, gt, m2, op, nops, ops, ae), \
14485 xCM_(m1, le, m2, op, nops, ops, ae), \
14486 xCM_(m1, al, m2, op, nops, ops, ae)
14487
14488 #define UE(mnem, op, nops, ops, ae) \
14489 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
14490
14491 #define UF(mnem, op, nops, ops, ae) \
14492 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
14493
14494 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
14495 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
14496 use the same encoding function for each. */
14497 #define NUF(mnem, op, nops, ops, enc) \
14498 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
14499 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
14500
14501 /* Neon data processing, version which indirects through neon_enc_tab for
14502 the various overloaded versions of opcodes. */
14503 #define nUF(mnem, op, nops, ops, enc) \
14504 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM_##op, N_MNEM_##op, \
14505 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
14506
14507 /* Neon insn with conditional suffix for the ARM version, non-overloaded
14508 version. */
14509 #define NCE_tag(mnem, op, nops, ops, enc, tag) \
14510 { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT, \
14511 THUMB_VARIANT, do_##enc, do_##enc }
14512
14513 #define NCE(mnem, op, nops, ops, enc) \
14514 NCE_tag(mnem, op, nops, ops, enc, OT_csuffix)
14515
14516 #define NCEF(mnem, op, nops, ops, enc) \
14517 NCE_tag(mnem, op, nops, ops, enc, OT_csuffixF)
14518
14519 /* Neon insn with conditional suffix for the ARM version, overloaded types. */
14520 #define nCE_tag(mnem, op, nops, ops, enc, tag) \
14521 { #mnem, OPS##nops ops, tag, N_MNEM_##op, N_MNEM_##op, \
14522 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
14523
14524 #define nCE(mnem, op, nops, ops, enc) \
14525 nCE_tag(mnem, op, nops, ops, enc, OT_csuffix)
14526
14527 #define nCEF(mnem, op, nops, ops, enc) \
14528 nCE_tag(mnem, op, nops, ops, enc, OT_csuffixF)
14529
14530 #define do_0 0
14531
14532 /* Thumb-only, unconditional. */
14533 #define UT(mnem, op, nops, ops, te) TUE(mnem, 0, op, nops, ops, 0, te)
14534
14535 static const struct asm_opcode insns[] =
14536 {
14537 #define ARM_VARIANT &arm_ext_v1 /* Core ARM Instructions. */
14538 #define THUMB_VARIANT &arm_ext_v4t
14539 tCE(and, 0000000, and, 3, (RR, oRR, SH), arit, t_arit3c),
14540 tC3(ands, 0100000, ands, 3, (RR, oRR, SH), arit, t_arit3c),
14541 tCE(eor, 0200000, eor, 3, (RR, oRR, SH), arit, t_arit3c),
14542 tC3(eors, 0300000, eors, 3, (RR, oRR, SH), arit, t_arit3c),
14543 tCE(sub, 0400000, sub, 3, (RR, oRR, SH), arit, t_add_sub),
14544 tC3(subs, 0500000, subs, 3, (RR, oRR, SH), arit, t_add_sub),
14545 tCE(add, 0800000, add, 3, (RR, oRR, SHG), arit, t_add_sub),
14546 tC3(adds, 0900000, adds, 3, (RR, oRR, SHG), arit, t_add_sub),
14547 tCE(adc, 0a00000, adc, 3, (RR, oRR, SH), arit, t_arit3c),
14548 tC3(adcs, 0b00000, adcs, 3, (RR, oRR, SH), arit, t_arit3c),
14549 tCE(sbc, 0c00000, sbc, 3, (RR, oRR, SH), arit, t_arit3),
14550 tC3(sbcs, 0d00000, sbcs, 3, (RR, oRR, SH), arit, t_arit3),
14551 tCE(orr, 1800000, orr, 3, (RR, oRR, SH), arit, t_arit3c),
14552 tC3(orrs, 1900000, orrs, 3, (RR, oRR, SH), arit, t_arit3c),
14553 tCE(bic, 1c00000, bic, 3, (RR, oRR, SH), arit, t_arit3),
14554 tC3(bics, 1d00000, bics, 3, (RR, oRR, SH), arit, t_arit3),
14555
14556 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
14557 for setting PSR flag bits. They are obsolete in V6 and do not
14558 have Thumb equivalents. */
14559 tCE(tst, 1100000, tst, 2, (RR, SH), cmp, t_mvn_tst),
14560 tC3w(tsts, 1100000, tst, 2, (RR, SH), cmp, t_mvn_tst),
14561 CL(tstp, 110f000, 2, (RR, SH), cmp),
14562 tCE(cmp, 1500000, cmp, 2, (RR, SH), cmp, t_mov_cmp),
14563 tC3w(cmps, 1500000, cmp, 2, (RR, SH), cmp, t_mov_cmp),
14564 CL(cmpp, 150f000, 2, (RR, SH), cmp),
14565 tCE(cmn, 1700000, cmn, 2, (RR, SH), cmp, t_mvn_tst),
14566 tC3w(cmns, 1700000, cmn, 2, (RR, SH), cmp, t_mvn_tst),
14567 CL(cmnp, 170f000, 2, (RR, SH), cmp),
14568
14569 tCE(mov, 1a00000, mov, 2, (RR, SH), mov, t_mov_cmp),
14570 tC3(movs, 1b00000, movs, 2, (RR, SH), mov, t_mov_cmp),
14571 tCE(mvn, 1e00000, mvn, 2, (RR, SH), mov, t_mvn_tst),
14572 tC3(mvns, 1f00000, mvns, 2, (RR, SH), mov, t_mvn_tst),
14573
14574 tCE(ldr, 4100000, ldr, 2, (RR, ADDRGLDR),ldst, t_ldst),
14575 tC3(ldrb, 4500000, ldrb, 2, (RR, ADDRGLDR),ldst, t_ldst),
14576 tCE(str, 4000000, str, 2, (RR, ADDRGLDR),ldst, t_ldst),
14577 tC3(strb, 4400000, strb, 2, (RR, ADDRGLDR),ldst, t_ldst),
14578
14579 tCE(stm, 8800000, stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
14580 tC3(stmia, 8800000, stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
14581 tC3(stmea, 8800000, stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
14582 tCE(ldm, 8900000, ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
14583 tC3(ldmia, 8900000, ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
14584 tC3(ldmfd, 8900000, ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
14585
14586 TCE(swi, f000000, df00, 1, (EXPi), swi, t_swi),
14587 TCE(svc, f000000, df00, 1, (EXPi), swi, t_swi),
14588 tCE(b, a000000, b, 1, (EXPr), branch, t_branch),
14589 TCE(bl, b000000, f000f800, 1, (EXPr), bl, t_branch23),
14590
14591 /* Pseudo ops. */
14592 tCE(adr, 28f0000, adr, 2, (RR, EXP), adr, t_adr),
14593 C3(adrl, 28f0000, 2, (RR, EXP), adrl),
14594 tCE(nop, 1a00000, nop, 1, (oI255c), nop, t_nop),
14595
14596 /* Thumb-compatibility pseudo ops. */
14597 tCE(lsl, 1a00000, lsl, 3, (RR, oRR, SH), shift, t_shift),
14598 tC3(lsls, 1b00000, lsls, 3, (RR, oRR, SH), shift, t_shift),
14599 tCE(lsr, 1a00020, lsr, 3, (RR, oRR, SH), shift, t_shift),
14600 tC3(lsrs, 1b00020, lsrs, 3, (RR, oRR, SH), shift, t_shift),
14601 tCE(asr, 1a00040, asr, 3, (RR, oRR, SH), shift, t_shift),
14602 tC3(asrs, 1b00040, asrs, 3, (RR, oRR, SH), shift, t_shift),
14603 tCE(ror, 1a00060, ror, 3, (RR, oRR, SH), shift, t_shift),
14604 tC3(rors, 1b00060, rors, 3, (RR, oRR, SH), shift, t_shift),
14605 tCE(neg, 2600000, neg, 2, (RR, RR), rd_rn, t_neg),
14606 tC3(negs, 2700000, negs, 2, (RR, RR), rd_rn, t_neg),
14607 tCE(push, 92d0000, push, 1, (REGLST), push_pop, t_push_pop),
14608 tCE(pop, 8bd0000, pop, 1, (REGLST), push_pop, t_push_pop),
14609
14610 #undef THUMB_VARIANT
14611 #define THUMB_VARIANT &arm_ext_v6
14612 TCE(cpy, 1a00000, 4600, 2, (RR, RR), rd_rm, t_cpy),
14613
14614 /* V1 instructions with no Thumb analogue prior to V6T2. */
14615 #undef THUMB_VARIANT
14616 #define THUMB_VARIANT &arm_ext_v6t2
14617 TCE(rsb, 0600000, ebc00000, 3, (RR, oRR, SH), arit, t_rsb),
14618 TC3(rsbs, 0700000, ebd00000, 3, (RR, oRR, SH), arit, t_rsb),
14619 TCE(teq, 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
14620 TC3w(teqs, 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
14621 CL(teqp, 130f000, 2, (RR, SH), cmp),
14622
14623 TC3(ldrt, 4300000, f8500e00, 2, (RR, ADDR), ldstt, t_ldstt),
14624 TC3(ldrbt, 4700000, f8100e00, 2, (RR, ADDR), ldstt, t_ldstt),
14625 TC3(strt, 4200000, f8400e00, 2, (RR, ADDR), ldstt, t_ldstt),
14626 TC3(strbt, 4600000, f8000e00, 2, (RR, ADDR), ldstt, t_ldstt),
14627
14628 TC3(stmdb, 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
14629 TC3(stmfd, 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
14630
14631 TC3(ldmdb, 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
14632 TC3(ldmea, 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
14633
14634 /* V1 instructions with no Thumb analogue at all. */
14635 CE(rsc, 0e00000, 3, (RR, oRR, SH), arit),
14636 C3(rscs, 0f00000, 3, (RR, oRR, SH), arit),
14637
14638 C3(stmib, 9800000, 2, (RRw, REGLST), ldmstm),
14639 C3(stmfa, 9800000, 2, (RRw, REGLST), ldmstm),
14640 C3(stmda, 8000000, 2, (RRw, REGLST), ldmstm),
14641 C3(stmed, 8000000, 2, (RRw, REGLST), ldmstm),
14642 C3(ldmib, 9900000, 2, (RRw, REGLST), ldmstm),
14643 C3(ldmed, 9900000, 2, (RRw, REGLST), ldmstm),
14644 C3(ldmda, 8100000, 2, (RRw, REGLST), ldmstm),
14645 C3(ldmfa, 8100000, 2, (RRw, REGLST), ldmstm),
14646
14647 #undef ARM_VARIANT
14648 #define ARM_VARIANT &arm_ext_v2 /* ARM 2 - multiplies. */
14649 #undef THUMB_VARIANT
14650 #define THUMB_VARIANT &arm_ext_v4t
14651 tCE(mul, 0000090, mul, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
14652 tC3(muls, 0100090, muls, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
14653
14654 #undef THUMB_VARIANT
14655 #define THUMB_VARIANT &arm_ext_v6t2
14656 TCE(mla, 0200090, fb000000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
14657 C3(mlas, 0300090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas),
14658
14659 /* Generic coprocessor instructions. */
14660 TCE(cdp, e000000, ee000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
14661 TCE(ldc, c100000, ec100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
14662 TC3(ldcl, c500000, ec500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
14663 TCE(stc, c000000, ec000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
14664 TC3(stcl, c400000, ec400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
14665 TCE(mcr, e000010, ee000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
14666 TCE(mrc, e100010, ee100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
14667
14668 #undef ARM_VARIANT
14669 #define ARM_VARIANT &arm_ext_v2s /* ARM 3 - swp instructions. */
14670 CE(swp, 1000090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
14671 C3(swpb, 1400090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
14672
14673 #undef ARM_VARIANT
14674 #define ARM_VARIANT &arm_ext_v3 /* ARM 6 Status register instructions. */
14675 TCE(mrs, 10f0000, f3ef8000, 2, (APSR_RR, RVC_PSR), mrs, t_mrs),
14676 TCE(msr, 120f000, f3808000, 2, (RVC_PSR, RR_EXi), msr, t_msr),
14677
14678 #undef ARM_VARIANT
14679 #define ARM_VARIANT &arm_ext_v3m /* ARM 7M long multiplies. */
14680 TCE(smull, 0c00090, fb800000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
14681 CM(smull,s, 0d00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
14682 TCE(umull, 0800090, fba00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
14683 CM(umull,s, 0900090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
14684 TCE(smlal, 0e00090, fbc00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
14685 CM(smlal,s, 0f00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
14686 TCE(umlal, 0a00090, fbe00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
14687 CM(umlal,s, 0b00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
14688
14689 #undef ARM_VARIANT
14690 #define ARM_VARIANT &arm_ext_v4 /* ARM Architecture 4. */
14691 #undef THUMB_VARIANT
14692 #define THUMB_VARIANT &arm_ext_v4t
14693 tC3(ldrh, 01000b0, ldrh, 2, (RR, ADDRGLDRS), ldstv4, t_ldst),
14694 tC3(strh, 00000b0, strh, 2, (RR, ADDRGLDRS), ldstv4, t_ldst),
14695 tC3(ldrsh, 01000f0, ldrsh, 2, (RR, ADDRGLDRS), ldstv4, t_ldst),
14696 tC3(ldrsb, 01000d0, ldrsb, 2, (RR, ADDRGLDRS), ldstv4, t_ldst),
14697 tCM(ld,sh, 01000f0, ldrsh, 2, (RR, ADDRGLDRS), ldstv4, t_ldst),
14698 tCM(ld,sb, 01000d0, ldrsb, 2, (RR, ADDRGLDRS), ldstv4, t_ldst),
14699
14700 #undef ARM_VARIANT
14701 #define ARM_VARIANT &arm_ext_v4t_5
14702 /* ARM Architecture 4T. */
14703 /* Note: bx (and blx) are required on V5, even if the processor does
14704 not support Thumb. */
14705 TCE(bx, 12fff10, 4700, 1, (RR), bx, t_bx),
14706
14707 #undef ARM_VARIANT
14708 #define ARM_VARIANT &arm_ext_v5 /* ARM Architecture 5T. */
14709 #undef THUMB_VARIANT
14710 #define THUMB_VARIANT &arm_ext_v5t
14711 /* Note: blx has 2 variants; the .value coded here is for
14712 BLX(2). Only this variant has conditional execution. */
14713 TCE(blx, 12fff30, 4780, 1, (RR_EXr), blx, t_blx),
14714 TUE(bkpt, 1200070, be00, 1, (oIffffb), bkpt, t_bkpt),
14715
14716 #undef THUMB_VARIANT
14717 #define THUMB_VARIANT &arm_ext_v6t2
14718 TCE(clz, 16f0f10, fab0f080, 2, (RRnpc, RRnpc), rd_rm, t_clz),
14719 TUF(ldc2, c100000, fc100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
14720 TUF(ldc2l, c500000, fc500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
14721 TUF(stc2, c000000, fc000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
14722 TUF(stc2l, c400000, fc400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
14723 TUF(cdp2, e000000, fe000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
14724 TUF(mcr2, e000010, fe000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
14725 TUF(mrc2, e100010, fe100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
14726
14727 #undef ARM_VARIANT
14728 #define ARM_VARIANT &arm_ext_v5exp /* ARM Architecture 5TExP. */
14729 TCE(smlabb, 1000080, fb100000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
14730 TCE(smlatb, 10000a0, fb100020, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
14731 TCE(smlabt, 10000c0, fb100010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
14732 TCE(smlatt, 10000e0, fb100030, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
14733
14734 TCE(smlawb, 1200080, fb300000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
14735 TCE(smlawt, 12000c0, fb300010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
14736
14737 TCE(smlalbb, 1400080, fbc00080, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
14738 TCE(smlaltb, 14000a0, fbc000a0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
14739 TCE(smlalbt, 14000c0, fbc00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
14740 TCE(smlaltt, 14000e0, fbc000b0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
14741
14742 TCE(smulbb, 1600080, fb10f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
14743 TCE(smultb, 16000a0, fb10f020, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
14744 TCE(smulbt, 16000c0, fb10f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
14745 TCE(smultt, 16000e0, fb10f030, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
14746
14747 TCE(smulwb, 12000a0, fb30f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
14748 TCE(smulwt, 12000e0, fb30f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
14749
14750 TCE(qadd, 1000050, fa80f080, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, rd_rm_rn),
14751 TCE(qdadd, 1400050, fa80f090, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, rd_rm_rn),
14752 TCE(qsub, 1200050, fa80f0a0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, rd_rm_rn),
14753 TCE(qdsub, 1600050, fa80f0b0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, rd_rm_rn),
14754
14755 #undef ARM_VARIANT
14756 #define ARM_VARIANT &arm_ext_v5e /* ARM Architecture 5TE. */
14757 TUF(pld, 450f000, f810f000, 1, (ADDR), pld, t_pld),
14758 TC3(ldrd, 00000d0, e9500000, 3, (RRnpc, oRRnpc, ADDRGLDRS), ldrd, t_ldstd),
14759 TC3(strd, 00000f0, e9400000, 3, (RRnpc, oRRnpc, ADDRGLDRS), ldrd, t_ldstd),
14760
14761 TCE(mcrr, c400000, ec400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
14762 TCE(mrrc, c500000, ec500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
14763
14764 #undef ARM_VARIANT
14765 #define ARM_VARIANT &arm_ext_v5j /* ARM Architecture 5TEJ. */
14766 TCE(bxj, 12fff20, f3c08f00, 1, (RR), bxj, t_bxj),
14767
14768 #undef ARM_VARIANT
14769 #define ARM_VARIANT &arm_ext_v6 /* ARM V6. */
14770 #undef THUMB_VARIANT
14771 #define THUMB_VARIANT &arm_ext_v6
14772 TUF(cpsie, 1080000, b660, 2, (CPSF, oI31b), cpsi, t_cpsi),
14773 TUF(cpsid, 10c0000, b670, 2, (CPSF, oI31b), cpsi, t_cpsi),
14774 tCE(rev, 6bf0f30, rev, 2, (RRnpc, RRnpc), rd_rm, t_rev),
14775 tCE(rev16, 6bf0fb0, rev16, 2, (RRnpc, RRnpc), rd_rm, t_rev),
14776 tCE(revsh, 6ff0fb0, revsh, 2, (RRnpc, RRnpc), rd_rm, t_rev),
14777 tCE(sxth, 6bf0070, sxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
14778 tCE(uxth, 6ff0070, uxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
14779 tCE(sxtb, 6af0070, sxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
14780 tCE(uxtb, 6ef0070, uxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
14781 TUF(setend, 1010000, b650, 1, (ENDI), setend, t_setend),
14782
14783 #undef THUMB_VARIANT
14784 #define THUMB_VARIANT &arm_ext_v6t2
14785 TCE(ldrex, 1900f9f, e8500f00, 2, (RRnpc, ADDR), ldrex, t_ldrex),
14786 TUF(mcrr2, c400000, fc400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
14787 TUF(mrrc2, c500000, fc500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
14788
14789 TCE(ssat, 6a00010, f3000000, 4, (RRnpc, I32, RRnpc, oSHllar),ssat, t_ssat),
14790 TCE(usat, 6e00010, f3800000, 4, (RRnpc, I31, RRnpc, oSHllar),usat, t_usat),
14791
14792 /* ARM V6 not included in V7M (eg. integer SIMD). */
14793 #undef THUMB_VARIANT
14794 #define THUMB_VARIANT &arm_ext_v6_notm
14795 TUF(cps, 1020000, f3af8100, 1, (I31b), imm0, t_cps),
14796 TCE(pkhbt, 6800010, eac00000, 4, (RRnpc, RRnpc, RRnpc, oSHll), pkhbt, t_pkhbt),
14797 TCE(pkhtb, 6800050, eac00020, 4, (RRnpc, RRnpc, RRnpc, oSHar), pkhtb, t_pkhtb),
14798 TCE(qadd16, 6200f10, fa90f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14799 TCE(qadd8, 6200f90, fa80f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14800 TCE(qaddsubx, 6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14801 TCE(qsub16, 6200f70, fad0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14802 TCE(qsub8, 6200ff0, fac0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14803 TCE(qsubaddx, 6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14804 TCE(sadd16, 6100f10, fa90f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14805 TCE(sadd8, 6100f90, fa80f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14806 TCE(saddsubx, 6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14807 TCE(shadd16, 6300f10, fa90f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14808 TCE(shadd8, 6300f90, fa80f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14809 TCE(shaddsubx, 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14810 TCE(shsub16, 6300f70, fad0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14811 TCE(shsub8, 6300ff0, fac0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14812 TCE(shsubaddx, 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14813 TCE(ssub16, 6100f70, fad0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14814 TCE(ssub8, 6100ff0, fac0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14815 TCE(ssubaddx, 6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14816 TCE(uadd16, 6500f10, fa90f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14817 TCE(uadd8, 6500f90, fa80f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14818 TCE(uaddsubx, 6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14819 TCE(uhadd16, 6700f10, fa90f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14820 TCE(uhadd8, 6700f90, fa80f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14821 TCE(uhaddsubx, 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14822 TCE(uhsub16, 6700f70, fad0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14823 TCE(uhsub8, 6700ff0, fac0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14824 TCE(uhsubaddx, 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14825 TCE(uqadd16, 6600f10, fa90f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14826 TCE(uqadd8, 6600f90, fa80f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14827 TCE(uqaddsubx, 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14828 TCE(uqsub16, 6600f70, fad0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14829 TCE(uqsub8, 6600ff0, fac0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14830 TCE(uqsubaddx, 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14831 TCE(usub16, 6500f70, fad0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14832 TCE(usub8, 6500ff0, fac0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14833 TCE(usubaddx, 6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14834 TUF(rfeia, 8900a00, e990c000, 1, (RRw), rfe, rfe),
14835 UF(rfeib, 9900a00, 1, (RRw), rfe),
14836 UF(rfeda, 8100a00, 1, (RRw), rfe),
14837 TUF(rfedb, 9100a00, e810c000, 1, (RRw), rfe, rfe),
14838 TUF(rfefd, 8900a00, e990c000, 1, (RRw), rfe, rfe),
14839 UF(rfefa, 9900a00, 1, (RRw), rfe),
14840 UF(rfeea, 8100a00, 1, (RRw), rfe),
14841 TUF(rfeed, 9100a00, e810c000, 1, (RRw), rfe, rfe),
14842 TCE(sxtah, 6b00070, fa00f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
14843 TCE(sxtab16, 6800070, fa20f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
14844 TCE(sxtab, 6a00070, fa40f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
14845 TCE(sxtb16, 68f0070, fa2ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
14846 TCE(uxtah, 6f00070, fa10f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
14847 TCE(uxtab16, 6c00070, fa30f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
14848 TCE(uxtab, 6e00070, fa50f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
14849 TCE(uxtb16, 6cf0070, fa3ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
14850 TCE(sel, 6800fb0, faa0f080, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14851 TCE(smlad, 7000010, fb200000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
14852 TCE(smladx, 7000030, fb200010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
14853 TCE(smlald, 7400010, fbc000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
14854 TCE(smlaldx, 7400030, fbc000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
14855 TCE(smlsd, 7000050, fb400000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
14856 TCE(smlsdx, 7000070, fb400010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
14857 TCE(smlsld, 7400050, fbd000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
14858 TCE(smlsldx, 7400070, fbd000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
14859 TCE(smmla, 7500010, fb500000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
14860 TCE(smmlar, 7500030, fb500010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
14861 TCE(smmls, 75000d0, fb600000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
14862 TCE(smmlsr, 75000f0, fb600010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
14863 TCE(smmul, 750f010, fb50f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
14864 TCE(smmulr, 750f030, fb50f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
14865 TCE(smuad, 700f010, fb20f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
14866 TCE(smuadx, 700f030, fb20f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
14867 TCE(smusd, 700f050, fb40f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
14868 TCE(smusdx, 700f070, fb40f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
14869 TUF(srsia, 8cd0500, e980c000, 1, (I31w), srs, srs),
14870 UF(srsib, 9cd0500, 1, (I31w), srs),
14871 UF(srsda, 84d0500, 1, (I31w), srs),
14872 TUF(srsdb, 94d0500, e800c000, 1, (I31w), srs, srs),
14873 TCE(ssat16, 6a00f30, f3200000, 3, (RRnpc, I16, RRnpc), ssat16, t_ssat16),
14874 TCE(strex, 1800f90, e8400000, 3, (RRnpc, RRnpc, ADDR), strex, t_strex),
14875 TCE(umaal, 0400090, fbe00060, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal, t_mlal),
14876 TCE(usad8, 780f010, fb70f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
14877 TCE(usada8, 7800010, fb700000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
14878 TCE(usat16, 6e00f30, f3a00000, 3, (RRnpc, I15, RRnpc), usat16, t_usat16),
14879
14880 #undef ARM_VARIANT
14881 #define ARM_VARIANT &arm_ext_v6k
14882 #undef THUMB_VARIANT
14883 #define THUMB_VARIANT &arm_ext_v6k
14884 tCE(yield, 320f001, yield, 0, (), noargs, t_hint),
14885 tCE(wfe, 320f002, wfe, 0, (), noargs, t_hint),
14886 tCE(wfi, 320f003, wfi, 0, (), noargs, t_hint),
14887 tCE(sev, 320f004, sev, 0, (), noargs, t_hint),
14888
14889 #undef THUMB_VARIANT
14890 #define THUMB_VARIANT &arm_ext_v6_notm
14891 TCE(ldrexd, 1b00f9f, e8d0007f, 3, (RRnpc, oRRnpc, RRnpcb), ldrexd, t_ldrexd),
14892 TCE(strexd, 1a00f90, e8c00070, 4, (RRnpc, RRnpc, oRRnpc, RRnpcb), strexd, t_strexd),
14893
14894 #undef THUMB_VARIANT
14895 #define THUMB_VARIANT &arm_ext_v6t2
14896 TCE(ldrexb, 1d00f9f, e8d00f4f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
14897 TCE(ldrexh, 1f00f9f, e8d00f5f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
14898 TCE(strexb, 1c00f90, e8c00f40, 3, (RRnpc, RRnpc, ADDR), strex, rm_rd_rn),
14899 TCE(strexh, 1e00f90, e8c00f50, 3, (RRnpc, RRnpc, ADDR), strex, rm_rd_rn),
14900 TUF(clrex, 57ff01f, f3bf8f2f, 0, (), noargs, noargs),
14901
14902 #undef ARM_VARIANT
14903 #define ARM_VARIANT &arm_ext_v6z
14904 TCE(smc, 1600070, f7f08000, 1, (EXPi), smc, t_smc),
14905
14906 #undef ARM_VARIANT
14907 #define ARM_VARIANT &arm_ext_v6t2
14908 TCE(bfc, 7c0001f, f36f0000, 3, (RRnpc, I31, I32), bfc, t_bfc),
14909 TCE(bfi, 7c00010, f3600000, 4, (RRnpc, RRnpc_I0, I31, I32), bfi, t_bfi),
14910 TCE(sbfx, 7a00050, f3400000, 4, (RR, RR, I31, I32), bfx, t_bfx),
14911 TCE(ubfx, 7e00050, f3c00000, 4, (RR, RR, I31, I32), bfx, t_bfx),
14912
14913 TCE(mls, 0600090, fb000010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
14914 TCE(movw, 3000000, f2400000, 2, (RRnpc, HALF), mov16, t_mov16),
14915 TCE(movt, 3400000, f2c00000, 2, (RRnpc, HALF), mov16, t_mov16),
14916 TCE(rbit, 6ff0f30, fa90f0a0, 2, (RR, RR), rd_rm, t_rbit),
14917
14918 TC3(ldrht, 03000b0, f8300e00, 2, (RR, ADDR), ldsttv4, t_ldstt),
14919 TC3(ldrsht, 03000f0, f9300e00, 2, (RR, ADDR), ldsttv4, t_ldstt),
14920 TC3(ldrsbt, 03000d0, f9100e00, 2, (RR, ADDR), ldsttv4, t_ldstt),
14921 TC3(strht, 02000b0, f8200e00, 2, (RR, ADDR), ldsttv4, t_ldstt),
14922
14923 UT(cbnz, b900, 2, (RR, EXP), t_czb),
14924 UT(cbz, b100, 2, (RR, EXP), t_czb),
14925 /* ARM does not really have an IT instruction, so always allow it. */
14926 #undef ARM_VARIANT
14927 #define ARM_VARIANT &arm_ext_v1
14928 TUE(it, 0, bf08, 1, (COND), it, t_it),
14929 TUE(itt, 0, bf0c, 1, (COND), it, t_it),
14930 TUE(ite, 0, bf04, 1, (COND), it, t_it),
14931 TUE(ittt, 0, bf0e, 1, (COND), it, t_it),
14932 TUE(itet, 0, bf06, 1, (COND), it, t_it),
14933 TUE(itte, 0, bf0a, 1, (COND), it, t_it),
14934 TUE(itee, 0, bf02, 1, (COND), it, t_it),
14935 TUE(itttt, 0, bf0f, 1, (COND), it, t_it),
14936 TUE(itett, 0, bf07, 1, (COND), it, t_it),
14937 TUE(ittet, 0, bf0b, 1, (COND), it, t_it),
14938 TUE(iteet, 0, bf03, 1, (COND), it, t_it),
14939 TUE(ittte, 0, bf0d, 1, (COND), it, t_it),
14940 TUE(itete, 0, bf05, 1, (COND), it, t_it),
14941 TUE(ittee, 0, bf09, 1, (COND), it, t_it),
14942 TUE(iteee, 0, bf01, 1, (COND), it, t_it),
14943
14944 /* Thumb2 only instructions. */
14945 #undef ARM_VARIANT
14946 #define ARM_VARIANT NULL
14947
14948 TCE(addw, 0, f2000000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
14949 TCE(subw, 0, f2a00000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
14950 TCE(tbb, 0, e8d0f000, 1, (TB), 0, t_tb),
14951 TCE(tbh, 0, e8d0f010, 1, (TB), 0, t_tb),
14952
14953 /* Thumb-2 hardware division instructions (R and M profiles only). */
14954 #undef THUMB_VARIANT
14955 #define THUMB_VARIANT &arm_ext_div
14956 TCE(sdiv, 0, fb90f0f0, 3, (RR, oRR, RR), 0, t_div),
14957 TCE(udiv, 0, fbb0f0f0, 3, (RR, oRR, RR), 0, t_div),
14958
14959 /* ARM V7 instructions. */
14960 #undef ARM_VARIANT
14961 #define ARM_VARIANT &arm_ext_v7
14962 #undef THUMB_VARIANT
14963 #define THUMB_VARIANT &arm_ext_v7
14964 TUF(pli, 450f000, f910f000, 1, (ADDR), pli, t_pld),
14965 TCE(dbg, 320f0f0, f3af80f0, 1, (I15), dbg, t_dbg),
14966 TUF(dmb, 57ff050, f3bf8f50, 1, (oBARRIER), barrier, t_barrier),
14967 TUF(dsb, 57ff040, f3bf8f40, 1, (oBARRIER), barrier, t_barrier),
14968 TUF(isb, 57ff060, f3bf8f60, 1, (oBARRIER), barrier, t_barrier),
14969
14970 #undef ARM_VARIANT
14971 #define ARM_VARIANT &fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
14972 cCE(wfs, e200110, 1, (RR), rd),
14973 cCE(rfs, e300110, 1, (RR), rd),
14974 cCE(wfc, e400110, 1, (RR), rd),
14975 cCE(rfc, e500110, 1, (RR), rd),
14976
14977 cCL(ldfs, c100100, 2, (RF, ADDRGLDC), rd_cpaddr),
14978 cCL(ldfd, c108100, 2, (RF, ADDRGLDC), rd_cpaddr),
14979 cCL(ldfe, c500100, 2, (RF, ADDRGLDC), rd_cpaddr),
14980 cCL(ldfp, c508100, 2, (RF, ADDRGLDC), rd_cpaddr),
14981
14982 cCL(stfs, c000100, 2, (RF, ADDRGLDC), rd_cpaddr),
14983 cCL(stfd, c008100, 2, (RF, ADDRGLDC), rd_cpaddr),
14984 cCL(stfe, c400100, 2, (RF, ADDRGLDC), rd_cpaddr),
14985 cCL(stfp, c408100, 2, (RF, ADDRGLDC), rd_cpaddr),
14986
14987 cCL(mvfs, e008100, 2, (RF, RF_IF), rd_rm),
14988 cCL(mvfsp, e008120, 2, (RF, RF_IF), rd_rm),
14989 cCL(mvfsm, e008140, 2, (RF, RF_IF), rd_rm),
14990 cCL(mvfsz, e008160, 2, (RF, RF_IF), rd_rm),
14991 cCL(mvfd, e008180, 2, (RF, RF_IF), rd_rm),
14992 cCL(mvfdp, e0081a0, 2, (RF, RF_IF), rd_rm),
14993 cCL(mvfdm, e0081c0, 2, (RF, RF_IF), rd_rm),
14994 cCL(mvfdz, e0081e0, 2, (RF, RF_IF), rd_rm),
14995 cCL(mvfe, e088100, 2, (RF, RF_IF), rd_rm),
14996 cCL(mvfep, e088120, 2, (RF, RF_IF), rd_rm),
14997 cCL(mvfem, e088140, 2, (RF, RF_IF), rd_rm),
14998 cCL(mvfez, e088160, 2, (RF, RF_IF), rd_rm),
14999
15000 cCL(mnfs, e108100, 2, (RF, RF_IF), rd_rm),
15001 cCL(mnfsp, e108120, 2, (RF, RF_IF), rd_rm),
15002 cCL(mnfsm, e108140, 2, (RF, RF_IF), rd_rm),
15003 cCL(mnfsz, e108160, 2, (RF, RF_IF), rd_rm),
15004 cCL(mnfd, e108180, 2, (RF, RF_IF), rd_rm),
15005 cCL(mnfdp, e1081a0, 2, (RF, RF_IF), rd_rm),
15006 cCL(mnfdm, e1081c0, 2, (RF, RF_IF), rd_rm),
15007 cCL(mnfdz, e1081e0, 2, (RF, RF_IF), rd_rm),
15008 cCL(mnfe, e188100, 2, (RF, RF_IF), rd_rm),
15009 cCL(mnfep, e188120, 2, (RF, RF_IF), rd_rm),
15010 cCL(mnfem, e188140, 2, (RF, RF_IF), rd_rm),
15011 cCL(mnfez, e188160, 2, (RF, RF_IF), rd_rm),
15012
15013 cCL(abss, e208100, 2, (RF, RF_IF), rd_rm),
15014 cCL(abssp, e208120, 2, (RF, RF_IF), rd_rm),
15015 cCL(abssm, e208140, 2, (RF, RF_IF), rd_rm),
15016 cCL(abssz, e208160, 2, (RF, RF_IF), rd_rm),
15017 cCL(absd, e208180, 2, (RF, RF_IF), rd_rm),
15018 cCL(absdp, e2081a0, 2, (RF, RF_IF), rd_rm),
15019 cCL(absdm, e2081c0, 2, (RF, RF_IF), rd_rm),
15020 cCL(absdz, e2081e0, 2, (RF, RF_IF), rd_rm),
15021 cCL(abse, e288100, 2, (RF, RF_IF), rd_rm),
15022 cCL(absep, e288120, 2, (RF, RF_IF), rd_rm),
15023 cCL(absem, e288140, 2, (RF, RF_IF), rd_rm),
15024 cCL(absez, e288160, 2, (RF, RF_IF), rd_rm),
15025
15026 cCL(rnds, e308100, 2, (RF, RF_IF), rd_rm),
15027 cCL(rndsp, e308120, 2, (RF, RF_IF), rd_rm),
15028 cCL(rndsm, e308140, 2, (RF, RF_IF), rd_rm),
15029 cCL(rndsz, e308160, 2, (RF, RF_IF), rd_rm),
15030 cCL(rndd, e308180, 2, (RF, RF_IF), rd_rm),
15031 cCL(rnddp, e3081a0, 2, (RF, RF_IF), rd_rm),
15032 cCL(rnddm, e3081c0, 2, (RF, RF_IF), rd_rm),
15033 cCL(rnddz, e3081e0, 2, (RF, RF_IF), rd_rm),
15034 cCL(rnde, e388100, 2, (RF, RF_IF), rd_rm),
15035 cCL(rndep, e388120, 2, (RF, RF_IF), rd_rm),
15036 cCL(rndem, e388140, 2, (RF, RF_IF), rd_rm),
15037 cCL(rndez, e388160, 2, (RF, RF_IF), rd_rm),
15038
15039 cCL(sqts, e408100, 2, (RF, RF_IF), rd_rm),
15040 cCL(sqtsp, e408120, 2, (RF, RF_IF), rd_rm),
15041 cCL(sqtsm, e408140, 2, (RF, RF_IF), rd_rm),
15042 cCL(sqtsz, e408160, 2, (RF, RF_IF), rd_rm),
15043 cCL(sqtd, e408180, 2, (RF, RF_IF), rd_rm),
15044 cCL(sqtdp, e4081a0, 2, (RF, RF_IF), rd_rm),
15045 cCL(sqtdm, e4081c0, 2, (RF, RF_IF), rd_rm),
15046 cCL(sqtdz, e4081e0, 2, (RF, RF_IF), rd_rm),
15047 cCL(sqte, e488100, 2, (RF, RF_IF), rd_rm),
15048 cCL(sqtep, e488120, 2, (RF, RF_IF), rd_rm),
15049 cCL(sqtem, e488140, 2, (RF, RF_IF), rd_rm),
15050 cCL(sqtez, e488160, 2, (RF, RF_IF), rd_rm),
15051
15052 cCL(logs, e508100, 2, (RF, RF_IF), rd_rm),
15053 cCL(logsp, e508120, 2, (RF, RF_IF), rd_rm),
15054 cCL(logsm, e508140, 2, (RF, RF_IF), rd_rm),
15055 cCL(logsz, e508160, 2, (RF, RF_IF), rd_rm),
15056 cCL(logd, e508180, 2, (RF, RF_IF), rd_rm),
15057 cCL(logdp, e5081a0, 2, (RF, RF_IF), rd_rm),
15058 cCL(logdm, e5081c0, 2, (RF, RF_IF), rd_rm),
15059 cCL(logdz, e5081e0, 2, (RF, RF_IF), rd_rm),
15060 cCL(loge, e588100, 2, (RF, RF_IF), rd_rm),
15061 cCL(logep, e588120, 2, (RF, RF_IF), rd_rm),
15062 cCL(logem, e588140, 2, (RF, RF_IF), rd_rm),
15063 cCL(logez, e588160, 2, (RF, RF_IF), rd_rm),
15064
15065 cCL(lgns, e608100, 2, (RF, RF_IF), rd_rm),
15066 cCL(lgnsp, e608120, 2, (RF, RF_IF), rd_rm),
15067 cCL(lgnsm, e608140, 2, (RF, RF_IF), rd_rm),
15068 cCL(lgnsz, e608160, 2, (RF, RF_IF), rd_rm),
15069 cCL(lgnd, e608180, 2, (RF, RF_IF), rd_rm),
15070 cCL(lgndp, e6081a0, 2, (RF, RF_IF), rd_rm),
15071 cCL(lgndm, e6081c0, 2, (RF, RF_IF), rd_rm),
15072 cCL(lgndz, e6081e0, 2, (RF, RF_IF), rd_rm),
15073 cCL(lgne, e688100, 2, (RF, RF_IF), rd_rm),
15074 cCL(lgnep, e688120, 2, (RF, RF_IF), rd_rm),
15075 cCL(lgnem, e688140, 2, (RF, RF_IF), rd_rm),
15076 cCL(lgnez, e688160, 2, (RF, RF_IF), rd_rm),
15077
15078 cCL(exps, e708100, 2, (RF, RF_IF), rd_rm),
15079 cCL(expsp, e708120, 2, (RF, RF_IF), rd_rm),
15080 cCL(expsm, e708140, 2, (RF, RF_IF), rd_rm),
15081 cCL(expsz, e708160, 2, (RF, RF_IF), rd_rm),
15082 cCL(expd, e708180, 2, (RF, RF_IF), rd_rm),
15083 cCL(expdp, e7081a0, 2, (RF, RF_IF), rd_rm),
15084 cCL(expdm, e7081c0, 2, (RF, RF_IF), rd_rm),
15085 cCL(expdz, e7081e0, 2, (RF, RF_IF), rd_rm),
15086 cCL(expe, e788100, 2, (RF, RF_IF), rd_rm),
15087 cCL(expep, e788120, 2, (RF, RF_IF), rd_rm),
15088 cCL(expem, e788140, 2, (RF, RF_IF), rd_rm),
15089 cCL(expdz, e788160, 2, (RF, RF_IF), rd_rm),
15090
15091 cCL(sins, e808100, 2, (RF, RF_IF), rd_rm),
15092 cCL(sinsp, e808120, 2, (RF, RF_IF), rd_rm),
15093 cCL(sinsm, e808140, 2, (RF, RF_IF), rd_rm),
15094 cCL(sinsz, e808160, 2, (RF, RF_IF), rd_rm),
15095 cCL(sind, e808180, 2, (RF, RF_IF), rd_rm),
15096 cCL(sindp, e8081a0, 2, (RF, RF_IF), rd_rm),
15097 cCL(sindm, e8081c0, 2, (RF, RF_IF), rd_rm),
15098 cCL(sindz, e8081e0, 2, (RF, RF_IF), rd_rm),
15099 cCL(sine, e888100, 2, (RF, RF_IF), rd_rm),
15100 cCL(sinep, e888120, 2, (RF, RF_IF), rd_rm),
15101 cCL(sinem, e888140, 2, (RF, RF_IF), rd_rm),
15102 cCL(sinez, e888160, 2, (RF, RF_IF), rd_rm),
15103
15104 cCL(coss, e908100, 2, (RF, RF_IF), rd_rm),
15105 cCL(cossp, e908120, 2, (RF, RF_IF), rd_rm),
15106 cCL(cossm, e908140, 2, (RF, RF_IF), rd_rm),
15107 cCL(cossz, e908160, 2, (RF, RF_IF), rd_rm),
15108 cCL(cosd, e908180, 2, (RF, RF_IF), rd_rm),
15109 cCL(cosdp, e9081a0, 2, (RF, RF_IF), rd_rm),
15110 cCL(cosdm, e9081c0, 2, (RF, RF_IF), rd_rm),
15111 cCL(cosdz, e9081e0, 2, (RF, RF_IF), rd_rm),
15112 cCL(cose, e988100, 2, (RF, RF_IF), rd_rm),
15113 cCL(cosep, e988120, 2, (RF, RF_IF), rd_rm),
15114 cCL(cosem, e988140, 2, (RF, RF_IF), rd_rm),
15115 cCL(cosez, e988160, 2, (RF, RF_IF), rd_rm),
15116
15117 cCL(tans, ea08100, 2, (RF, RF_IF), rd_rm),
15118 cCL(tansp, ea08120, 2, (RF, RF_IF), rd_rm),
15119 cCL(tansm, ea08140, 2, (RF, RF_IF), rd_rm),
15120 cCL(tansz, ea08160, 2, (RF, RF_IF), rd_rm),
15121 cCL(tand, ea08180, 2, (RF, RF_IF), rd_rm),
15122 cCL(tandp, ea081a0, 2, (RF, RF_IF), rd_rm),
15123 cCL(tandm, ea081c0, 2, (RF, RF_IF), rd_rm),
15124 cCL(tandz, ea081e0, 2, (RF, RF_IF), rd_rm),
15125 cCL(tane, ea88100, 2, (RF, RF_IF), rd_rm),
15126 cCL(tanep, ea88120, 2, (RF, RF_IF), rd_rm),
15127 cCL(tanem, ea88140, 2, (RF, RF_IF), rd_rm),
15128 cCL(tanez, ea88160, 2, (RF, RF_IF), rd_rm),
15129
15130 cCL(asns, eb08100, 2, (RF, RF_IF), rd_rm),
15131 cCL(asnsp, eb08120, 2, (RF, RF_IF), rd_rm),
15132 cCL(asnsm, eb08140, 2, (RF, RF_IF), rd_rm),
15133 cCL(asnsz, eb08160, 2, (RF, RF_IF), rd_rm),
15134 cCL(asnd, eb08180, 2, (RF, RF_IF), rd_rm),
15135 cCL(asndp, eb081a0, 2, (RF, RF_IF), rd_rm),
15136 cCL(asndm, eb081c0, 2, (RF, RF_IF), rd_rm),
15137 cCL(asndz, eb081e0, 2, (RF, RF_IF), rd_rm),
15138 cCL(asne, eb88100, 2, (RF, RF_IF), rd_rm),
15139 cCL(asnep, eb88120, 2, (RF, RF_IF), rd_rm),
15140 cCL(asnem, eb88140, 2, (RF, RF_IF), rd_rm),
15141 cCL(asnez, eb88160, 2, (RF, RF_IF), rd_rm),
15142
15143 cCL(acss, ec08100, 2, (RF, RF_IF), rd_rm),
15144 cCL(acssp, ec08120, 2, (RF, RF_IF), rd_rm),
15145 cCL(acssm, ec08140, 2, (RF, RF_IF), rd_rm),
15146 cCL(acssz, ec08160, 2, (RF, RF_IF), rd_rm),
15147 cCL(acsd, ec08180, 2, (RF, RF_IF), rd_rm),
15148 cCL(acsdp, ec081a0, 2, (RF, RF_IF), rd_rm),
15149 cCL(acsdm, ec081c0, 2, (RF, RF_IF), rd_rm),
15150 cCL(acsdz, ec081e0, 2, (RF, RF_IF), rd_rm),
15151 cCL(acse, ec88100, 2, (RF, RF_IF), rd_rm),
15152 cCL(acsep, ec88120, 2, (RF, RF_IF), rd_rm),
15153 cCL(acsem, ec88140, 2, (RF, RF_IF), rd_rm),
15154 cCL(acsez, ec88160, 2, (RF, RF_IF), rd_rm),
15155
15156 cCL(atns, ed08100, 2, (RF, RF_IF), rd_rm),
15157 cCL(atnsp, ed08120, 2, (RF, RF_IF), rd_rm),
15158 cCL(atnsm, ed08140, 2, (RF, RF_IF), rd_rm),
15159 cCL(atnsz, ed08160, 2, (RF, RF_IF), rd_rm),
15160 cCL(atnd, ed08180, 2, (RF, RF_IF), rd_rm),
15161 cCL(atndp, ed081a0, 2, (RF, RF_IF), rd_rm),
15162 cCL(atndm, ed081c0, 2, (RF, RF_IF), rd_rm),
15163 cCL(atndz, ed081e0, 2, (RF, RF_IF), rd_rm),
15164 cCL(atne, ed88100, 2, (RF, RF_IF), rd_rm),
15165 cCL(atnep, ed88120, 2, (RF, RF_IF), rd_rm),
15166 cCL(atnem, ed88140, 2, (RF, RF_IF), rd_rm),
15167 cCL(atnez, ed88160, 2, (RF, RF_IF), rd_rm),
15168
15169 cCL(urds, ee08100, 2, (RF, RF_IF), rd_rm),
15170 cCL(urdsp, ee08120, 2, (RF, RF_IF), rd_rm),
15171 cCL(urdsm, ee08140, 2, (RF, RF_IF), rd_rm),
15172 cCL(urdsz, ee08160, 2, (RF, RF_IF), rd_rm),
15173 cCL(urdd, ee08180, 2, (RF, RF_IF), rd_rm),
15174 cCL(urddp, ee081a0, 2, (RF, RF_IF), rd_rm),
15175 cCL(urddm, ee081c0, 2, (RF, RF_IF), rd_rm),
15176 cCL(urddz, ee081e0, 2, (RF, RF_IF), rd_rm),
15177 cCL(urde, ee88100, 2, (RF, RF_IF), rd_rm),
15178 cCL(urdep, ee88120, 2, (RF, RF_IF), rd_rm),
15179 cCL(urdem, ee88140, 2, (RF, RF_IF), rd_rm),
15180 cCL(urdez, ee88160, 2, (RF, RF_IF), rd_rm),
15181
15182 cCL(nrms, ef08100, 2, (RF, RF_IF), rd_rm),
15183 cCL(nrmsp, ef08120, 2, (RF, RF_IF), rd_rm),
15184 cCL(nrmsm, ef08140, 2, (RF, RF_IF), rd_rm),
15185 cCL(nrmsz, ef08160, 2, (RF, RF_IF), rd_rm),
15186 cCL(nrmd, ef08180, 2, (RF, RF_IF), rd_rm),
15187 cCL(nrmdp, ef081a0, 2, (RF, RF_IF), rd_rm),
15188 cCL(nrmdm, ef081c0, 2, (RF, RF_IF), rd_rm),
15189 cCL(nrmdz, ef081e0, 2, (RF, RF_IF), rd_rm),
15190 cCL(nrme, ef88100, 2, (RF, RF_IF), rd_rm),
15191 cCL(nrmep, ef88120, 2, (RF, RF_IF), rd_rm),
15192 cCL(nrmem, ef88140, 2, (RF, RF_IF), rd_rm),
15193 cCL(nrmez, ef88160, 2, (RF, RF_IF), rd_rm),
15194
15195 cCL(adfs, e000100, 3, (RF, RF, RF_IF), rd_rn_rm),
15196 cCL(adfsp, e000120, 3, (RF, RF, RF_IF), rd_rn_rm),
15197 cCL(adfsm, e000140, 3, (RF, RF, RF_IF), rd_rn_rm),
15198 cCL(adfsz, e000160, 3, (RF, RF, RF_IF), rd_rn_rm),
15199 cCL(adfd, e000180, 3, (RF, RF, RF_IF), rd_rn_rm),
15200 cCL(adfdp, e0001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15201 cCL(adfdm, e0001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15202 cCL(adfdz, e0001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15203 cCL(adfe, e080100, 3, (RF, RF, RF_IF), rd_rn_rm),
15204 cCL(adfep, e080120, 3, (RF, RF, RF_IF), rd_rn_rm),
15205 cCL(adfem, e080140, 3, (RF, RF, RF_IF), rd_rn_rm),
15206 cCL(adfez, e080160, 3, (RF, RF, RF_IF), rd_rn_rm),
15207
15208 cCL(sufs, e200100, 3, (RF, RF, RF_IF), rd_rn_rm),
15209 cCL(sufsp, e200120, 3, (RF, RF, RF_IF), rd_rn_rm),
15210 cCL(sufsm, e200140, 3, (RF, RF, RF_IF), rd_rn_rm),
15211 cCL(sufsz, e200160, 3, (RF, RF, RF_IF), rd_rn_rm),
15212 cCL(sufd, e200180, 3, (RF, RF, RF_IF), rd_rn_rm),
15213 cCL(sufdp, e2001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15214 cCL(sufdm, e2001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15215 cCL(sufdz, e2001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15216 cCL(sufe, e280100, 3, (RF, RF, RF_IF), rd_rn_rm),
15217 cCL(sufep, e280120, 3, (RF, RF, RF_IF), rd_rn_rm),
15218 cCL(sufem, e280140, 3, (RF, RF, RF_IF), rd_rn_rm),
15219 cCL(sufez, e280160, 3, (RF, RF, RF_IF), rd_rn_rm),
15220
15221 cCL(rsfs, e300100, 3, (RF, RF, RF_IF), rd_rn_rm),
15222 cCL(rsfsp, e300120, 3, (RF, RF, RF_IF), rd_rn_rm),
15223 cCL(rsfsm, e300140, 3, (RF, RF, RF_IF), rd_rn_rm),
15224 cCL(rsfsz, e300160, 3, (RF, RF, RF_IF), rd_rn_rm),
15225 cCL(rsfd, e300180, 3, (RF, RF, RF_IF), rd_rn_rm),
15226 cCL(rsfdp, e3001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15227 cCL(rsfdm, e3001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15228 cCL(rsfdz, e3001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15229 cCL(rsfe, e380100, 3, (RF, RF, RF_IF), rd_rn_rm),
15230 cCL(rsfep, e380120, 3, (RF, RF, RF_IF), rd_rn_rm),
15231 cCL(rsfem, e380140, 3, (RF, RF, RF_IF), rd_rn_rm),
15232 cCL(rsfez, e380160, 3, (RF, RF, RF_IF), rd_rn_rm),
15233
15234 cCL(mufs, e100100, 3, (RF, RF, RF_IF), rd_rn_rm),
15235 cCL(mufsp, e100120, 3, (RF, RF, RF_IF), rd_rn_rm),
15236 cCL(mufsm, e100140, 3, (RF, RF, RF_IF), rd_rn_rm),
15237 cCL(mufsz, e100160, 3, (RF, RF, RF_IF), rd_rn_rm),
15238 cCL(mufd, e100180, 3, (RF, RF, RF_IF), rd_rn_rm),
15239 cCL(mufdp, e1001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15240 cCL(mufdm, e1001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15241 cCL(mufdz, e1001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15242 cCL(mufe, e180100, 3, (RF, RF, RF_IF), rd_rn_rm),
15243 cCL(mufep, e180120, 3, (RF, RF, RF_IF), rd_rn_rm),
15244 cCL(mufem, e180140, 3, (RF, RF, RF_IF), rd_rn_rm),
15245 cCL(mufez, e180160, 3, (RF, RF, RF_IF), rd_rn_rm),
15246
15247 cCL(dvfs, e400100, 3, (RF, RF, RF_IF), rd_rn_rm),
15248 cCL(dvfsp, e400120, 3, (RF, RF, RF_IF), rd_rn_rm),
15249 cCL(dvfsm, e400140, 3, (RF, RF, RF_IF), rd_rn_rm),
15250 cCL(dvfsz, e400160, 3, (RF, RF, RF_IF), rd_rn_rm),
15251 cCL(dvfd, e400180, 3, (RF, RF, RF_IF), rd_rn_rm),
15252 cCL(dvfdp, e4001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15253 cCL(dvfdm, e4001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15254 cCL(dvfdz, e4001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15255 cCL(dvfe, e480100, 3, (RF, RF, RF_IF), rd_rn_rm),
15256 cCL(dvfep, e480120, 3, (RF, RF, RF_IF), rd_rn_rm),
15257 cCL(dvfem, e480140, 3, (RF, RF, RF_IF), rd_rn_rm),
15258 cCL(dvfez, e480160, 3, (RF, RF, RF_IF), rd_rn_rm),
15259
15260 cCL(rdfs, e500100, 3, (RF, RF, RF_IF), rd_rn_rm),
15261 cCL(rdfsp, e500120, 3, (RF, RF, RF_IF), rd_rn_rm),
15262 cCL(rdfsm, e500140, 3, (RF, RF, RF_IF), rd_rn_rm),
15263 cCL(rdfsz, e500160, 3, (RF, RF, RF_IF), rd_rn_rm),
15264 cCL(rdfd, e500180, 3, (RF, RF, RF_IF), rd_rn_rm),
15265 cCL(rdfdp, e5001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15266 cCL(rdfdm, e5001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15267 cCL(rdfdz, e5001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15268 cCL(rdfe, e580100, 3, (RF, RF, RF_IF), rd_rn_rm),
15269 cCL(rdfep, e580120, 3, (RF, RF, RF_IF), rd_rn_rm),
15270 cCL(rdfem, e580140, 3, (RF, RF, RF_IF), rd_rn_rm),
15271 cCL(rdfez, e580160, 3, (RF, RF, RF_IF), rd_rn_rm),
15272
15273 cCL(pows, e600100, 3, (RF, RF, RF_IF), rd_rn_rm),
15274 cCL(powsp, e600120, 3, (RF, RF, RF_IF), rd_rn_rm),
15275 cCL(powsm, e600140, 3, (RF, RF, RF_IF), rd_rn_rm),
15276 cCL(powsz, e600160, 3, (RF, RF, RF_IF), rd_rn_rm),
15277 cCL(powd, e600180, 3, (RF, RF, RF_IF), rd_rn_rm),
15278 cCL(powdp, e6001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15279 cCL(powdm, e6001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15280 cCL(powdz, e6001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15281 cCL(powe, e680100, 3, (RF, RF, RF_IF), rd_rn_rm),
15282 cCL(powep, e680120, 3, (RF, RF, RF_IF), rd_rn_rm),
15283 cCL(powem, e680140, 3, (RF, RF, RF_IF), rd_rn_rm),
15284 cCL(powez, e680160, 3, (RF, RF, RF_IF), rd_rn_rm),
15285
15286 cCL(rpws, e700100, 3, (RF, RF, RF_IF), rd_rn_rm),
15287 cCL(rpwsp, e700120, 3, (RF, RF, RF_IF), rd_rn_rm),
15288 cCL(rpwsm, e700140, 3, (RF, RF, RF_IF), rd_rn_rm),
15289 cCL(rpwsz, e700160, 3, (RF, RF, RF_IF), rd_rn_rm),
15290 cCL(rpwd, e700180, 3, (RF, RF, RF_IF), rd_rn_rm),
15291 cCL(rpwdp, e7001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15292 cCL(rpwdm, e7001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15293 cCL(rpwdz, e7001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15294 cCL(rpwe, e780100, 3, (RF, RF, RF_IF), rd_rn_rm),
15295 cCL(rpwep, e780120, 3, (RF, RF, RF_IF), rd_rn_rm),
15296 cCL(rpwem, e780140, 3, (RF, RF, RF_IF), rd_rn_rm),
15297 cCL(rpwez, e780160, 3, (RF, RF, RF_IF), rd_rn_rm),
15298
15299 cCL(rmfs, e800100, 3, (RF, RF, RF_IF), rd_rn_rm),
15300 cCL(rmfsp, e800120, 3, (RF, RF, RF_IF), rd_rn_rm),
15301 cCL(rmfsm, e800140, 3, (RF, RF, RF_IF), rd_rn_rm),
15302 cCL(rmfsz, e800160, 3, (RF, RF, RF_IF), rd_rn_rm),
15303 cCL(rmfd, e800180, 3, (RF, RF, RF_IF), rd_rn_rm),
15304 cCL(rmfdp, e8001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15305 cCL(rmfdm, e8001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15306 cCL(rmfdz, e8001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15307 cCL(rmfe, e880100, 3, (RF, RF, RF_IF), rd_rn_rm),
15308 cCL(rmfep, e880120, 3, (RF, RF, RF_IF), rd_rn_rm),
15309 cCL(rmfem, e880140, 3, (RF, RF, RF_IF), rd_rn_rm),
15310 cCL(rmfez, e880160, 3, (RF, RF, RF_IF), rd_rn_rm),
15311
15312 cCL(fmls, e900100, 3, (RF, RF, RF_IF), rd_rn_rm),
15313 cCL(fmlsp, e900120, 3, (RF, RF, RF_IF), rd_rn_rm),
15314 cCL(fmlsm, e900140, 3, (RF, RF, RF_IF), rd_rn_rm),
15315 cCL(fmlsz, e900160, 3, (RF, RF, RF_IF), rd_rn_rm),
15316 cCL(fmld, e900180, 3, (RF, RF, RF_IF), rd_rn_rm),
15317 cCL(fmldp, e9001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15318 cCL(fmldm, e9001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15319 cCL(fmldz, e9001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15320 cCL(fmle, e980100, 3, (RF, RF, RF_IF), rd_rn_rm),
15321 cCL(fmlep, e980120, 3, (RF, RF, RF_IF), rd_rn_rm),
15322 cCL(fmlem, e980140, 3, (RF, RF, RF_IF), rd_rn_rm),
15323 cCL(fmlez, e980160, 3, (RF, RF, RF_IF), rd_rn_rm),
15324
15325 cCL(fdvs, ea00100, 3, (RF, RF, RF_IF), rd_rn_rm),
15326 cCL(fdvsp, ea00120, 3, (RF, RF, RF_IF), rd_rn_rm),
15327 cCL(fdvsm, ea00140, 3, (RF, RF, RF_IF), rd_rn_rm),
15328 cCL(fdvsz, ea00160, 3, (RF, RF, RF_IF), rd_rn_rm),
15329 cCL(fdvd, ea00180, 3, (RF, RF, RF_IF), rd_rn_rm),
15330 cCL(fdvdp, ea001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15331 cCL(fdvdm, ea001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15332 cCL(fdvdz, ea001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15333 cCL(fdve, ea80100, 3, (RF, RF, RF_IF), rd_rn_rm),
15334 cCL(fdvep, ea80120, 3, (RF, RF, RF_IF), rd_rn_rm),
15335 cCL(fdvem, ea80140, 3, (RF, RF, RF_IF), rd_rn_rm),
15336 cCL(fdvez, ea80160, 3, (RF, RF, RF_IF), rd_rn_rm),
15337
15338 cCL(frds, eb00100, 3, (RF, RF, RF_IF), rd_rn_rm),
15339 cCL(frdsp, eb00120, 3, (RF, RF, RF_IF), rd_rn_rm),
15340 cCL(frdsm, eb00140, 3, (RF, RF, RF_IF), rd_rn_rm),
15341 cCL(frdsz, eb00160, 3, (RF, RF, RF_IF), rd_rn_rm),
15342 cCL(frdd, eb00180, 3, (RF, RF, RF_IF), rd_rn_rm),
15343 cCL(frddp, eb001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15344 cCL(frddm, eb001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15345 cCL(frddz, eb001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15346 cCL(frde, eb80100, 3, (RF, RF, RF_IF), rd_rn_rm),
15347 cCL(frdep, eb80120, 3, (RF, RF, RF_IF), rd_rn_rm),
15348 cCL(frdem, eb80140, 3, (RF, RF, RF_IF), rd_rn_rm),
15349 cCL(frdez, eb80160, 3, (RF, RF, RF_IF), rd_rn_rm),
15350
15351 cCL(pols, ec00100, 3, (RF, RF, RF_IF), rd_rn_rm),
15352 cCL(polsp, ec00120, 3, (RF, RF, RF_IF), rd_rn_rm),
15353 cCL(polsm, ec00140, 3, (RF, RF, RF_IF), rd_rn_rm),
15354 cCL(polsz, ec00160, 3, (RF, RF, RF_IF), rd_rn_rm),
15355 cCL(pold, ec00180, 3, (RF, RF, RF_IF), rd_rn_rm),
15356 cCL(poldp, ec001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15357 cCL(poldm, ec001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15358 cCL(poldz, ec001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15359 cCL(pole, ec80100, 3, (RF, RF, RF_IF), rd_rn_rm),
15360 cCL(polep, ec80120, 3, (RF, RF, RF_IF), rd_rn_rm),
15361 cCL(polem, ec80140, 3, (RF, RF, RF_IF), rd_rn_rm),
15362 cCL(polez, ec80160, 3, (RF, RF, RF_IF), rd_rn_rm),
15363
15364 cCE(cmf, e90f110, 2, (RF, RF_IF), fpa_cmp),
15365 C3E(cmfe, ed0f110, 2, (RF, RF_IF), fpa_cmp),
15366 cCE(cnf, eb0f110, 2, (RF, RF_IF), fpa_cmp),
15367 C3E(cnfe, ef0f110, 2, (RF, RF_IF), fpa_cmp),
15368
15369 cCL(flts, e000110, 2, (RF, RR), rn_rd),
15370 cCL(fltsp, e000130, 2, (RF, RR), rn_rd),
15371 cCL(fltsm, e000150, 2, (RF, RR), rn_rd),
15372 cCL(fltsz, e000170, 2, (RF, RR), rn_rd),
15373 cCL(fltd, e000190, 2, (RF, RR), rn_rd),
15374 cCL(fltdp, e0001b0, 2, (RF, RR), rn_rd),
15375 cCL(fltdm, e0001d0, 2, (RF, RR), rn_rd),
15376 cCL(fltdz, e0001f0, 2, (RF, RR), rn_rd),
15377 cCL(flte, e080110, 2, (RF, RR), rn_rd),
15378 cCL(fltep, e080130, 2, (RF, RR), rn_rd),
15379 cCL(fltem, e080150, 2, (RF, RR), rn_rd),
15380 cCL(fltez, e080170, 2, (RF, RR), rn_rd),
15381
15382 /* The implementation of the FIX instruction is broken on some
15383 assemblers, in that it accepts a precision specifier as well as a
15384 rounding specifier, despite the fact that this is meaningless.
15385 To be more compatible, we accept it as well, though of course it
15386 does not set any bits. */
15387 cCE(fix, e100110, 2, (RR, RF), rd_rm),
15388 cCL(fixp, e100130, 2, (RR, RF), rd_rm),
15389 cCL(fixm, e100150, 2, (RR, RF), rd_rm),
15390 cCL(fixz, e100170, 2, (RR, RF), rd_rm),
15391 cCL(fixsp, e100130, 2, (RR, RF), rd_rm),
15392 cCL(fixsm, e100150, 2, (RR, RF), rd_rm),
15393 cCL(fixsz, e100170, 2, (RR, RF), rd_rm),
15394 cCL(fixdp, e100130, 2, (RR, RF), rd_rm),
15395 cCL(fixdm, e100150, 2, (RR, RF), rd_rm),
15396 cCL(fixdz, e100170, 2, (RR, RF), rd_rm),
15397 cCL(fixep, e100130, 2, (RR, RF), rd_rm),
15398 cCL(fixem, e100150, 2, (RR, RF), rd_rm),
15399 cCL(fixez, e100170, 2, (RR, RF), rd_rm),
15400
15401 /* Instructions that were new with the real FPA, call them V2. */
15402 #undef ARM_VARIANT
15403 #define ARM_VARIANT &fpu_fpa_ext_v2
15404 cCE(lfm, c100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
15405 cCL(lfmfd, c900200, 3, (RF, I4b, ADDR), fpa_ldmstm),
15406 cCL(lfmea, d100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
15407 cCE(sfm, c000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
15408 cCL(sfmfd, d000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
15409 cCL(sfmea, c800200, 3, (RF, I4b, ADDR), fpa_ldmstm),
15410
15411 #undef ARM_VARIANT
15412 #define ARM_VARIANT &fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
15413 /* Moves and type conversions. */
15414 cCE(fcpys, eb00a40, 2, (RVS, RVS), vfp_sp_monadic),
15415 cCE(fmrs, e100a10, 2, (RR, RVS), vfp_reg_from_sp),
15416 cCE(fmsr, e000a10, 2, (RVS, RR), vfp_sp_from_reg),
15417 cCE(fmstat, ef1fa10, 0, (), noargs),
15418 cCE(fsitos, eb80ac0, 2, (RVS, RVS), vfp_sp_monadic),
15419 cCE(fuitos, eb80a40, 2, (RVS, RVS), vfp_sp_monadic),
15420 cCE(ftosis, ebd0a40, 2, (RVS, RVS), vfp_sp_monadic),
15421 cCE(ftosizs, ebd0ac0, 2, (RVS, RVS), vfp_sp_monadic),
15422 cCE(ftouis, ebc0a40, 2, (RVS, RVS), vfp_sp_monadic),
15423 cCE(ftouizs, ebc0ac0, 2, (RVS, RVS), vfp_sp_monadic),
15424 cCE(fmrx, ef00a10, 2, (RR, RVC), rd_rn),
15425 cCE(fmxr, ee00a10, 2, (RVC, RR), rn_rd),
15426
15427 /* Memory operations. */
15428 cCE(flds, d100a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
15429 cCE(fsts, d000a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
15430 cCE(fldmias, c900a00, 2, (RRw, VRSLST), vfp_sp_ldstmia),
15431 cCE(fldmfds, c900a00, 2, (RRw, VRSLST), vfp_sp_ldstmia),
15432 cCE(fldmdbs, d300a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb),
15433 cCE(fldmeas, d300a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb),
15434 cCE(fldmiax, c900b00, 2, (RRw, VRDLST), vfp_xp_ldstmia),
15435 cCE(fldmfdx, c900b00, 2, (RRw, VRDLST), vfp_xp_ldstmia),
15436 cCE(fldmdbx, d300b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb),
15437 cCE(fldmeax, d300b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb),
15438 cCE(fstmias, c800a00, 2, (RRw, VRSLST), vfp_sp_ldstmia),
15439 cCE(fstmeas, c800a00, 2, (RRw, VRSLST), vfp_sp_ldstmia),
15440 cCE(fstmdbs, d200a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb),
15441 cCE(fstmfds, d200a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb),
15442 cCE(fstmiax, c800b00, 2, (RRw, VRDLST), vfp_xp_ldstmia),
15443 cCE(fstmeax, c800b00, 2, (RRw, VRDLST), vfp_xp_ldstmia),
15444 cCE(fstmdbx, d200b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb),
15445 cCE(fstmfdx, d200b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb),
15446
15447 /* Monadic operations. */
15448 cCE(fabss, eb00ac0, 2, (RVS, RVS), vfp_sp_monadic),
15449 cCE(fnegs, eb10a40, 2, (RVS, RVS), vfp_sp_monadic),
15450 cCE(fsqrts, eb10ac0, 2, (RVS, RVS), vfp_sp_monadic),
15451
15452 /* Dyadic operations. */
15453 cCE(fadds, e300a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
15454 cCE(fsubs, e300a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
15455 cCE(fmuls, e200a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
15456 cCE(fdivs, e800a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
15457 cCE(fmacs, e000a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
15458 cCE(fmscs, e100a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
15459 cCE(fnmuls, e200a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
15460 cCE(fnmacs, e000a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
15461 cCE(fnmscs, e100a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
15462
15463 /* Comparisons. */
15464 cCE(fcmps, eb40a40, 2, (RVS, RVS), vfp_sp_monadic),
15465 cCE(fcmpzs, eb50a40, 1, (RVS), vfp_sp_compare_z),
15466 cCE(fcmpes, eb40ac0, 2, (RVS, RVS), vfp_sp_monadic),
15467 cCE(fcmpezs, eb50ac0, 1, (RVS), vfp_sp_compare_z),
15468
15469 #undef ARM_VARIANT
15470 #define ARM_VARIANT &fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
15471 /* Moves and type conversions. */
15472 cCE(fcpyd, eb00b40, 2, (RVD, RVD), vfp_dp_rd_rm),
15473 cCE(fcvtds, eb70ac0, 2, (RVD, RVS), vfp_dp_sp_cvt),
15474 cCE(fcvtsd, eb70bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
15475 cCE(fmdhr, e200b10, 2, (RVD, RR), vfp_dp_rn_rd),
15476 cCE(fmdlr, e000b10, 2, (RVD, RR), vfp_dp_rn_rd),
15477 cCE(fmrdh, e300b10, 2, (RR, RVD), vfp_dp_rd_rn),
15478 cCE(fmrdl, e100b10, 2, (RR, RVD), vfp_dp_rd_rn),
15479 cCE(fsitod, eb80bc0, 2, (RVD, RVS), vfp_dp_sp_cvt),
15480 cCE(fuitod, eb80b40, 2, (RVD, RVS), vfp_dp_sp_cvt),
15481 cCE(ftosid, ebd0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
15482 cCE(ftosizd, ebd0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
15483 cCE(ftouid, ebc0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
15484 cCE(ftouizd, ebc0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
15485
15486 /* Memory operations. */
15487 cCE(fldd, d100b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
15488 cCE(fstd, d000b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
15489 cCE(fldmiad, c900b00, 2, (RRw, VRDLST), vfp_dp_ldstmia),
15490 cCE(fldmfdd, c900b00, 2, (RRw, VRDLST), vfp_dp_ldstmia),
15491 cCE(fldmdbd, d300b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb),
15492 cCE(fldmead, d300b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb),
15493 cCE(fstmiad, c800b00, 2, (RRw, VRDLST), vfp_dp_ldstmia),
15494 cCE(fstmead, c800b00, 2, (RRw, VRDLST), vfp_dp_ldstmia),
15495 cCE(fstmdbd, d200b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb),
15496 cCE(fstmfdd, d200b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb),
15497
15498 /* Monadic operations. */
15499 cCE(fabsd, eb00bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
15500 cCE(fnegd, eb10b40, 2, (RVD, RVD), vfp_dp_rd_rm),
15501 cCE(fsqrtd, eb10bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
15502
15503 /* Dyadic operations. */
15504 cCE(faddd, e300b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
15505 cCE(fsubd, e300b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
15506 cCE(fmuld, e200b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
15507 cCE(fdivd, e800b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
15508 cCE(fmacd, e000b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
15509 cCE(fmscd, e100b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
15510 cCE(fnmuld, e200b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
15511 cCE(fnmacd, e000b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
15512 cCE(fnmscd, e100b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
15513
15514 /* Comparisons. */
15515 cCE(fcmpd, eb40b40, 2, (RVD, RVD), vfp_dp_rd_rm),
15516 cCE(fcmpzd, eb50b40, 1, (RVD), vfp_dp_rd),
15517 cCE(fcmped, eb40bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
15518 cCE(fcmpezd, eb50bc0, 1, (RVD), vfp_dp_rd),
15519
15520 #undef ARM_VARIANT
15521 #define ARM_VARIANT &fpu_vfp_ext_v2
15522 cCE(fmsrr, c400a10, 3, (VRSLST, RR, RR), vfp_sp2_from_reg2),
15523 cCE(fmrrs, c500a10, 3, (RR, RR, VRSLST), vfp_reg2_from_sp2),
15524 cCE(fmdrr, c400b10, 3, (RVD, RR, RR), vfp_dp_rm_rd_rn),
15525 cCE(fmrrd, c500b10, 3, (RR, RR, RVD), vfp_dp_rd_rn_rm),
15526
15527 /* Instructions which may belong to either the Neon or VFP instruction sets.
15528 Individual encoder functions perform additional architecture checks. */
15529 #undef ARM_VARIANT
15530 #define ARM_VARIANT &fpu_vfp_ext_v1xd
15531 #undef THUMB_VARIANT
15532 #define THUMB_VARIANT &fpu_vfp_ext_v1xd
15533 /* These mnemonics are unique to VFP. */
15534 NCE(vsqrt, 0, 2, (RVSD, RVSD), vfp_nsyn_sqrt),
15535 NCE(vdiv, 0, 3, (RVSD, RVSD, RVSD), vfp_nsyn_div),
15536 nCE(vnmul, vnmul, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
15537 nCE(vnmla, vnmla, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
15538 nCE(vnmls, vnmls, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
15539 nCE(vcmp, vcmp, 2, (RVSD, RVSD_I0), vfp_nsyn_cmp),
15540 nCE(vcmpe, vcmpe, 2, (RVSD, RVSD_I0), vfp_nsyn_cmp),
15541 NCE(vpush, 0, 1, (VRSDLST), vfp_nsyn_push),
15542 NCE(vpop, 0, 1, (VRSDLST), vfp_nsyn_pop),
15543 NCE(vcvtz, 0, 2, (RVSD, RVSD), vfp_nsyn_cvtz),
15544
15545 /* Mnemonics shared by Neon and VFP. */
15546 nCEF(vmul, vmul, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mul),
15547 nCEF(vmla, vmla, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
15548 nCEF(vmls, vmls, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
15549
15550 nCEF(vadd, vadd, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
15551 nCEF(vsub, vsub, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
15552
15553 NCEF(vabs, 1b10300, 2, (RNSDQ, RNSDQ), neon_abs_neg),
15554 NCEF(vneg, 1b10380, 2, (RNSDQ, RNSDQ), neon_abs_neg),
15555
15556 NCE(vldm, c900b00, 2, (RRw, VRSDLST), neon_ldm_stm),
15557 NCE(vldmia, c900b00, 2, (RRw, VRSDLST), neon_ldm_stm),
15558 NCE(vldmdb, d100b00, 2, (RRw, VRSDLST), neon_ldm_stm),
15559 NCE(vstm, c800b00, 2, (RRw, VRSDLST), neon_ldm_stm),
15560 NCE(vstmia, c800b00, 2, (RRw, VRSDLST), neon_ldm_stm),
15561 NCE(vstmdb, d000b00, 2, (RRw, VRSDLST), neon_ldm_stm),
15562 NCE(vldr, d100b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
15563 NCE(vstr, d000b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
15564
15565 nCEF(vcvt, vcvt, 3, (RNSDQ, RNSDQ, oI32b), neon_cvt),
15566
15567 /* NOTE: All VMOV encoding is special-cased! */
15568 NCE(vmov, 0, 1, (VMOV), neon_mov),
15569 NCE(vmovq, 0, 1, (VMOV), neon_mov),
15570
15571 #undef THUMB_VARIANT
15572 #define THUMB_VARIANT &fpu_neon_ext_v1
15573 #undef ARM_VARIANT
15574 #define ARM_VARIANT &fpu_neon_ext_v1
15575 /* Data processing with three registers of the same length. */
15576 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
15577 NUF(vaba, 0000710, 3, (RNDQ, RNDQ, RNDQ), neon_dyadic_i_su),
15578 NUF(vabaq, 0000710, 3, (RNQ, RNQ, RNQ), neon_dyadic_i_su),
15579 NUF(vhadd, 0000000, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
15580 NUF(vhaddq, 0000000, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
15581 NUF(vrhadd, 0000100, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
15582 NUF(vrhaddq, 0000100, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
15583 NUF(vhsub, 0000200, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
15584 NUF(vhsubq, 0000200, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
15585 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
15586 NUF(vqadd, 0000010, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
15587 NUF(vqaddq, 0000010, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
15588 NUF(vqsub, 0000210, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
15589 NUF(vqsubq, 0000210, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
15590 NUF(vrshl, 0000500, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
15591 NUF(vrshlq, 0000500, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
15592 NUF(vqrshl, 0000510, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
15593 NUF(vqrshlq, 0000510, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
15594 /* If not immediate, fall back to neon_dyadic_i64_su.
15595 shl_imm should accept I8 I16 I32 I64,
15596 qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64. */
15597 nUF(vshl, vshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_shl_imm),
15598 nUF(vshlq, vshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_shl_imm),
15599 nUF(vqshl, vqshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_qshl_imm),
15600 nUF(vqshlq, vqshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_qshl_imm),
15601 /* Logic ops, types optional & ignored. */
15602 nUF(vand, vand, 2, (RNDQ, NILO), neon_logic),
15603 nUF(vandq, vand, 2, (RNQ, NILO), neon_logic),
15604 nUF(vbic, vbic, 2, (RNDQ, NILO), neon_logic),
15605 nUF(vbicq, vbic, 2, (RNQ, NILO), neon_logic),
15606 nUF(vorr, vorr, 2, (RNDQ, NILO), neon_logic),
15607 nUF(vorrq, vorr, 2, (RNQ, NILO), neon_logic),
15608 nUF(vorn, vorn, 2, (RNDQ, NILO), neon_logic),
15609 nUF(vornq, vorn, 2, (RNQ, NILO), neon_logic),
15610 nUF(veor, veor, 3, (RNDQ, oRNDQ, RNDQ), neon_logic),
15611 nUF(veorq, veor, 3, (RNQ, oRNQ, RNQ), neon_logic),
15612 /* Bitfield ops, untyped. */
15613 NUF(vbsl, 1100110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
15614 NUF(vbslq, 1100110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
15615 NUF(vbit, 1200110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
15616 NUF(vbitq, 1200110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
15617 NUF(vbif, 1300110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
15618 NUF(vbifq, 1300110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
15619 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F32. */
15620 nUF(vabd, vabd, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
15621 nUF(vabdq, vabd, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
15622 nUF(vmax, vmax, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
15623 nUF(vmaxq, vmax, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
15624 nUF(vmin, vmin, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
15625 nUF(vminq, vmin, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
15626 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
15627 back to neon_dyadic_if_su. */
15628 nUF(vcge, vcge, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
15629 nUF(vcgeq, vcge, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
15630 nUF(vcgt, vcgt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
15631 nUF(vcgtq, vcgt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
15632 nUF(vclt, vclt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
15633 nUF(vcltq, vclt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
15634 nUF(vcle, vcle, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
15635 nUF(vcleq, vcle, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
15636 /* Comparison. Type I8 I16 I32 F32. */
15637 nUF(vceq, vceq, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_ceq),
15638 nUF(vceqq, vceq, 3, (RNQ, oRNQ, RNDQ_I0), neon_ceq),
15639 /* As above, D registers only. */
15640 nUF(vpmax, vpmax, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
15641 nUF(vpmin, vpmin, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
15642 /* Int and float variants, signedness unimportant. */
15643 nUF(vmlaq, vmla, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
15644 nUF(vmlsq, vmls, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
15645 nUF(vpadd, vpadd, 3, (RND, oRND, RND), neon_dyadic_if_i_d),
15646 /* Add/sub take types I8 I16 I32 I64 F32. */
15647 nUF(vaddq, vadd, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
15648 nUF(vsubq, vsub, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
15649 /* vtst takes sizes 8, 16, 32. */
15650 NUF(vtst, 0000810, 3, (RNDQ, oRNDQ, RNDQ), neon_tst),
15651 NUF(vtstq, 0000810, 3, (RNQ, oRNQ, RNQ), neon_tst),
15652 /* VMUL takes I8 I16 I32 F32 P8. */
15653 nUF(vmulq, vmul, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mul),
15654 /* VQD{R}MULH takes S16 S32. */
15655 nUF(vqdmulh, vqdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
15656 nUF(vqdmulhq, vqdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
15657 nUF(vqrdmulh, vqrdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
15658 nUF(vqrdmulhq, vqrdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
15659 NUF(vacge, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
15660 NUF(vacgeq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
15661 NUF(vacgt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
15662 NUF(vacgtq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
15663 NUF(vaclt, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
15664 NUF(vacltq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
15665 NUF(vacle, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
15666 NUF(vacleq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
15667 NUF(vrecps, 0000f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
15668 NUF(vrecpsq, 0000f10, 3, (RNQ, oRNQ, RNQ), neon_step),
15669 NUF(vrsqrts, 0200f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
15670 NUF(vrsqrtsq, 0200f10, 3, (RNQ, oRNQ, RNQ), neon_step),
15671
15672 /* Two address, int/float. Types S8 S16 S32 F32. */
15673 NUF(vabsq, 1b10300, 2, (RNQ, RNQ), neon_abs_neg),
15674 NUF(vnegq, 1b10380, 2, (RNQ, RNQ), neon_abs_neg),
15675
15676 /* Data processing with two registers and a shift amount. */
15677 /* Right shifts, and variants with rounding.
15678 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
15679 NUF(vshr, 0800010, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
15680 NUF(vshrq, 0800010, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
15681 NUF(vrshr, 0800210, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
15682 NUF(vrshrq, 0800210, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
15683 NUF(vsra, 0800110, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
15684 NUF(vsraq, 0800110, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
15685 NUF(vrsra, 0800310, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
15686 NUF(vrsraq, 0800310, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
15687 /* Shift and insert. Sizes accepted 8 16 32 64. */
15688 NUF(vsli, 1800510, 3, (RNDQ, oRNDQ, I63), neon_sli),
15689 NUF(vsliq, 1800510, 3, (RNQ, oRNQ, I63), neon_sli),
15690 NUF(vsri, 1800410, 3, (RNDQ, oRNDQ, I64), neon_sri),
15691 NUF(vsriq, 1800410, 3, (RNQ, oRNQ, I64), neon_sri),
15692 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
15693 NUF(vqshlu, 1800610, 3, (RNDQ, oRNDQ, I63), neon_qshlu_imm),
15694 NUF(vqshluq, 1800610, 3, (RNQ, oRNQ, I63), neon_qshlu_imm),
15695 /* Right shift immediate, saturating & narrowing, with rounding variants.
15696 Types accepted S16 S32 S64 U16 U32 U64. */
15697 NUF(vqshrn, 0800910, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
15698 NUF(vqrshrn, 0800950, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
15699 /* As above, unsigned. Types accepted S16 S32 S64. */
15700 NUF(vqshrun, 0800810, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
15701 NUF(vqrshrun, 0800850, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
15702 /* Right shift narrowing. Types accepted I16 I32 I64. */
15703 NUF(vshrn, 0800810, 3, (RND, RNQ, I32z), neon_rshift_narrow),
15704 NUF(vrshrn, 0800850, 3, (RND, RNQ, I32z), neon_rshift_narrow),
15705 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
15706 nUF(vshll, vshll, 3, (RNQ, RND, I32), neon_shll),
15707 /* CVT with optional immediate for fixed-point variant. */
15708 nUF(vcvtq, vcvt, 3, (RNQ, RNQ, oI32b), neon_cvt),
15709
15710 nUF(vmvn, vmvn, 2, (RNDQ, RNDQ_IMVNb), neon_mvn),
15711 nUF(vmvnq, vmvn, 2, (RNQ, RNDQ_IMVNb), neon_mvn),
15712
15713 /* Data processing, three registers of different lengths. */
15714 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
15715 NUF(vabal, 0800500, 3, (RNQ, RND, RND), neon_abal),
15716 NUF(vabdl, 0800700, 3, (RNQ, RND, RND), neon_dyadic_long),
15717 NUF(vaddl, 0800000, 3, (RNQ, RND, RND), neon_dyadic_long),
15718 NUF(vsubl, 0800200, 3, (RNQ, RND, RND), neon_dyadic_long),
15719 /* If not scalar, fall back to neon_dyadic_long.
15720 Vector types as above, scalar types S16 S32 U16 U32. */
15721 nUF(vmlal, vmlal, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
15722 nUF(vmlsl, vmlsl, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
15723 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
15724 NUF(vaddw, 0800100, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
15725 NUF(vsubw, 0800300, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
15726 /* Dyadic, narrowing insns. Types I16 I32 I64. */
15727 NUF(vaddhn, 0800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
15728 NUF(vraddhn, 1800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
15729 NUF(vsubhn, 0800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
15730 NUF(vrsubhn, 1800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
15731 /* Saturating doubling multiplies. Types S16 S32. */
15732 nUF(vqdmlal, vqdmlal, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
15733 nUF(vqdmlsl, vqdmlsl, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
15734 nUF(vqdmull, vqdmull, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
15735 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
15736 S16 S32 U16 U32. */
15737 nUF(vmull, vmull, 3, (RNQ, RND, RND_RNSC), neon_vmull),
15738
15739 /* Extract. Size 8. */
15740 NUF(vext, 0b00000, 4, (RNDQ, oRNDQ, RNDQ, I7), neon_ext),
15741 NUF(vextq, 0b00000, 4, (RNQ, oRNQ, RNQ, I7), neon_ext),
15742
15743 /* Two registers, miscellaneous. */
15744 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
15745 NUF(vrev64, 1b00000, 2, (RNDQ, RNDQ), neon_rev),
15746 NUF(vrev64q, 1b00000, 2, (RNQ, RNQ), neon_rev),
15747 NUF(vrev32, 1b00080, 2, (RNDQ, RNDQ), neon_rev),
15748 NUF(vrev32q, 1b00080, 2, (RNQ, RNQ), neon_rev),
15749 NUF(vrev16, 1b00100, 2, (RNDQ, RNDQ), neon_rev),
15750 NUF(vrev16q, 1b00100, 2, (RNQ, RNQ), neon_rev),
15751 /* Vector replicate. Sizes 8 16 32. */
15752 nCE(vdup, vdup, 2, (RNDQ, RR_RNSC), neon_dup),
15753 nCE(vdupq, vdup, 2, (RNQ, RR_RNSC), neon_dup),
15754 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
15755 NUF(vmovl, 0800a10, 2, (RNQ, RND), neon_movl),
15756 /* VMOVN. Types I16 I32 I64. */
15757 nUF(vmovn, vmovn, 2, (RND, RNQ), neon_movn),
15758 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
15759 nUF(vqmovn, vqmovn, 2, (RND, RNQ), neon_qmovn),
15760 /* VQMOVUN. Types S16 S32 S64. */
15761 nUF(vqmovun, vqmovun, 2, (RND, RNQ), neon_qmovun),
15762 /* VZIP / VUZP. Sizes 8 16 32. */
15763 NUF(vzip, 1b20180, 2, (RNDQ, RNDQ), neon_zip_uzp),
15764 NUF(vzipq, 1b20180, 2, (RNQ, RNQ), neon_zip_uzp),
15765 NUF(vuzp, 1b20100, 2, (RNDQ, RNDQ), neon_zip_uzp),
15766 NUF(vuzpq, 1b20100, 2, (RNQ, RNQ), neon_zip_uzp),
15767 /* VQABS / VQNEG. Types S8 S16 S32. */
15768 NUF(vqabs, 1b00700, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
15769 NUF(vqabsq, 1b00700, 2, (RNQ, RNQ), neon_sat_abs_neg),
15770 NUF(vqneg, 1b00780, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
15771 NUF(vqnegq, 1b00780, 2, (RNQ, RNQ), neon_sat_abs_neg),
15772 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
15773 NUF(vpadal, 1b00600, 2, (RNDQ, RNDQ), neon_pair_long),
15774 NUF(vpadalq, 1b00600, 2, (RNQ, RNQ), neon_pair_long),
15775 NUF(vpaddl, 1b00200, 2, (RNDQ, RNDQ), neon_pair_long),
15776 NUF(vpaddlq, 1b00200, 2, (RNQ, RNQ), neon_pair_long),
15777 /* Reciprocal estimates. Types U32 F32. */
15778 NUF(vrecpe, 1b30400, 2, (RNDQ, RNDQ), neon_recip_est),
15779 NUF(vrecpeq, 1b30400, 2, (RNQ, RNQ), neon_recip_est),
15780 NUF(vrsqrte, 1b30480, 2, (RNDQ, RNDQ), neon_recip_est),
15781 NUF(vrsqrteq, 1b30480, 2, (RNQ, RNQ), neon_recip_est),
15782 /* VCLS. Types S8 S16 S32. */
15783 NUF(vcls, 1b00400, 2, (RNDQ, RNDQ), neon_cls),
15784 NUF(vclsq, 1b00400, 2, (RNQ, RNQ), neon_cls),
15785 /* VCLZ. Types I8 I16 I32. */
15786 NUF(vclz, 1b00480, 2, (RNDQ, RNDQ), neon_clz),
15787 NUF(vclzq, 1b00480, 2, (RNQ, RNQ), neon_clz),
15788 /* VCNT. Size 8. */
15789 NUF(vcnt, 1b00500, 2, (RNDQ, RNDQ), neon_cnt),
15790 NUF(vcntq, 1b00500, 2, (RNQ, RNQ), neon_cnt),
15791 /* Two address, untyped. */
15792 NUF(vswp, 1b20000, 2, (RNDQ, RNDQ), neon_swp),
15793 NUF(vswpq, 1b20000, 2, (RNQ, RNQ), neon_swp),
15794 /* VTRN. Sizes 8 16 32. */
15795 nUF(vtrn, vtrn, 2, (RNDQ, RNDQ), neon_trn),
15796 nUF(vtrnq, vtrn, 2, (RNQ, RNQ), neon_trn),
15797
15798 /* Table lookup. Size 8. */
15799 NUF(vtbl, 1b00800, 3, (RND, NRDLST, RND), neon_tbl_tbx),
15800 NUF(vtbx, 1b00840, 3, (RND, NRDLST, RND), neon_tbl_tbx),
15801
15802 #undef THUMB_VARIANT
15803 #define THUMB_VARIANT &fpu_vfp_v3_or_neon_ext
15804 #undef ARM_VARIANT
15805 #define ARM_VARIANT &fpu_vfp_v3_or_neon_ext
15806 /* Neon element/structure load/store. */
15807 nUF(vld1, vld1, 2, (NSTRLST, ADDR), neon_ldx_stx),
15808 nUF(vst1, vst1, 2, (NSTRLST, ADDR), neon_ldx_stx),
15809 nUF(vld2, vld2, 2, (NSTRLST, ADDR), neon_ldx_stx),
15810 nUF(vst2, vst2, 2, (NSTRLST, ADDR), neon_ldx_stx),
15811 nUF(vld3, vld3, 2, (NSTRLST, ADDR), neon_ldx_stx),
15812 nUF(vst3, vst3, 2, (NSTRLST, ADDR), neon_ldx_stx),
15813 nUF(vld4, vld4, 2, (NSTRLST, ADDR), neon_ldx_stx),
15814 nUF(vst4, vst4, 2, (NSTRLST, ADDR), neon_ldx_stx),
15815
15816 #undef THUMB_VARIANT
15817 #define THUMB_VARIANT &fpu_vfp_ext_v3
15818 #undef ARM_VARIANT
15819 #define ARM_VARIANT &fpu_vfp_ext_v3
15820 cCE(fconsts, eb00a00, 2, (RVS, I255), vfp_sp_const),
15821 cCE(fconstd, eb00b00, 2, (RVD, I255), vfp_dp_const),
15822 cCE(fshtos, eba0a40, 2, (RVS, I16z), vfp_sp_conv_16),
15823 cCE(fshtod, eba0b40, 2, (RVD, I16z), vfp_dp_conv_16),
15824 cCE(fsltos, eba0ac0, 2, (RVS, I32), vfp_sp_conv_32),
15825 cCE(fsltod, eba0bc0, 2, (RVD, I32), vfp_dp_conv_32),
15826 cCE(fuhtos, ebb0a40, 2, (RVS, I16z), vfp_sp_conv_16),
15827 cCE(fuhtod, ebb0b40, 2, (RVD, I16z), vfp_dp_conv_16),
15828 cCE(fultos, ebb0ac0, 2, (RVS, I32), vfp_sp_conv_32),
15829 cCE(fultod, ebb0bc0, 2, (RVD, I32), vfp_dp_conv_32),
15830 cCE(ftoshs, ebe0a40, 2, (RVS, I16z), vfp_sp_conv_16),
15831 cCE(ftoshd, ebe0b40, 2, (RVD, I16z), vfp_dp_conv_16),
15832 cCE(ftosls, ebe0ac0, 2, (RVS, I32), vfp_sp_conv_32),
15833 cCE(ftosld, ebe0bc0, 2, (RVD, I32), vfp_dp_conv_32),
15834 cCE(ftouhs, ebf0a40, 2, (RVS, I16z), vfp_sp_conv_16),
15835 cCE(ftouhd, ebf0b40, 2, (RVD, I16z), vfp_dp_conv_16),
15836 cCE(ftouls, ebf0ac0, 2, (RVS, I32), vfp_sp_conv_32),
15837 cCE(ftould, ebf0bc0, 2, (RVD, I32), vfp_dp_conv_32),
15838
15839 #undef THUMB_VARIANT
15840 #undef ARM_VARIANT
15841 #define ARM_VARIANT &arm_cext_xscale /* Intel XScale extensions. */
15842 cCE(mia, e200010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
15843 cCE(miaph, e280010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
15844 cCE(miabb, e2c0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
15845 cCE(miabt, e2d0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
15846 cCE(miatb, e2e0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
15847 cCE(miatt, e2f0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
15848 cCE(mar, c400000, 3, (RXA, RRnpc, RRnpc), xsc_mar),
15849 cCE(mra, c500000, 3, (RRnpc, RRnpc, RXA), xsc_mra),
15850
15851 #undef ARM_VARIANT
15852 #define ARM_VARIANT &arm_cext_iwmmxt /* Intel Wireless MMX technology. */
15853 cCE(tandcb, e13f130, 1, (RR), iwmmxt_tandorc),
15854 cCE(tandch, e53f130, 1, (RR), iwmmxt_tandorc),
15855 cCE(tandcw, e93f130, 1, (RR), iwmmxt_tandorc),
15856 cCE(tbcstb, e400010, 2, (RIWR, RR), rn_rd),
15857 cCE(tbcsth, e400050, 2, (RIWR, RR), rn_rd),
15858 cCE(tbcstw, e400090, 2, (RIWR, RR), rn_rd),
15859 cCE(textrcb, e130170, 2, (RR, I7), iwmmxt_textrc),
15860 cCE(textrch, e530170, 2, (RR, I7), iwmmxt_textrc),
15861 cCE(textrcw, e930170, 2, (RR, I7), iwmmxt_textrc),
15862 cCE(textrmub, e100070, 3, (RR, RIWR, I7), iwmmxt_textrm),
15863 cCE(textrmuh, e500070, 3, (RR, RIWR, I7), iwmmxt_textrm),
15864 cCE(textrmuw, e900070, 3, (RR, RIWR, I7), iwmmxt_textrm),
15865 cCE(textrmsb, e100078, 3, (RR, RIWR, I7), iwmmxt_textrm),
15866 cCE(textrmsh, e500078, 3, (RR, RIWR, I7), iwmmxt_textrm),
15867 cCE(textrmsw, e900078, 3, (RR, RIWR, I7), iwmmxt_textrm),
15868 cCE(tinsrb, e600010, 3, (RIWR, RR, I7), iwmmxt_tinsr),
15869 cCE(tinsrh, e600050, 3, (RIWR, RR, I7), iwmmxt_tinsr),
15870 cCE(tinsrw, e600090, 3, (RIWR, RR, I7), iwmmxt_tinsr),
15871 cCE(tmcr, e000110, 2, (RIWC_RIWG, RR), rn_rd),
15872 cCE(tmcrr, c400000, 3, (RIWR, RR, RR), rm_rd_rn),
15873 cCE(tmia, e200010, 3, (RIWR, RR, RR), iwmmxt_tmia),
15874 cCE(tmiaph, e280010, 3, (RIWR, RR, RR), iwmmxt_tmia),
15875 cCE(tmiabb, e2c0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
15876 cCE(tmiabt, e2d0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
15877 cCE(tmiatb, e2e0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
15878 cCE(tmiatt, e2f0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
15879 cCE(tmovmskb, e100030, 2, (RR, RIWR), rd_rn),
15880 cCE(tmovmskh, e500030, 2, (RR, RIWR), rd_rn),
15881 cCE(tmovmskw, e900030, 2, (RR, RIWR), rd_rn),
15882 cCE(tmrc, e100110, 2, (RR, RIWC_RIWG), rd_rn),
15883 cCE(tmrrc, c500000, 3, (RR, RR, RIWR), rd_rn_rm),
15884 cCE(torcb, e13f150, 1, (RR), iwmmxt_tandorc),
15885 cCE(torch, e53f150, 1, (RR), iwmmxt_tandorc),
15886 cCE(torcw, e93f150, 1, (RR), iwmmxt_tandorc),
15887 cCE(waccb, e0001c0, 2, (RIWR, RIWR), rd_rn),
15888 cCE(wacch, e4001c0, 2, (RIWR, RIWR), rd_rn),
15889 cCE(waccw, e8001c0, 2, (RIWR, RIWR), rd_rn),
15890 cCE(waddbss, e300180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15891 cCE(waddb, e000180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15892 cCE(waddbus, e100180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15893 cCE(waddhss, e700180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15894 cCE(waddh, e400180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15895 cCE(waddhus, e500180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15896 cCE(waddwss, eb00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15897 cCE(waddw, e800180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15898 cCE(waddwus, e900180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15899 cCE(waligni, e000020, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_waligni),
15900 cCE(walignr0, e800020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15901 cCE(walignr1, e900020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15902 cCE(walignr2, ea00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15903 cCE(walignr3, eb00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15904 cCE(wand, e200000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15905 cCE(wandn, e300000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15906 cCE(wavg2b, e800000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15907 cCE(wavg2br, e900000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15908 cCE(wavg2h, ec00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15909 cCE(wavg2hr, ed00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15910 cCE(wcmpeqb, e000060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15911 cCE(wcmpeqh, e400060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15912 cCE(wcmpeqw, e800060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15913 cCE(wcmpgtub, e100060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15914 cCE(wcmpgtuh, e500060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15915 cCE(wcmpgtuw, e900060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15916 cCE(wcmpgtsb, e300060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15917 cCE(wcmpgtsh, e700060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15918 cCE(wcmpgtsw, eb00060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15919 cCE(wldrb, c100000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
15920 cCE(wldrh, c500000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
15921 cCE(wldrw, c100100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
15922 cCE(wldrd, c500100, 2, (RIWR, ADDR), iwmmxt_wldstd),
15923 cCE(wmacs, e600100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15924 cCE(wmacsz, e700100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15925 cCE(wmacu, e400100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15926 cCE(wmacuz, e500100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15927 cCE(wmadds, ea00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15928 cCE(wmaddu, e800100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15929 cCE(wmaxsb, e200160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15930 cCE(wmaxsh, e600160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15931 cCE(wmaxsw, ea00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15932 cCE(wmaxub, e000160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15933 cCE(wmaxuh, e400160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15934 cCE(wmaxuw, e800160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15935 cCE(wminsb, e300160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15936 cCE(wminsh, e700160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15937 cCE(wminsw, eb00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15938 cCE(wminub, e100160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15939 cCE(wminuh, e500160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15940 cCE(wminuw, e900160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15941 cCE(wmov, e000000, 2, (RIWR, RIWR), iwmmxt_wmov),
15942 cCE(wmulsm, e300100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15943 cCE(wmulsl, e200100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15944 cCE(wmulum, e100100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15945 cCE(wmulul, e000100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15946 cCE(wor, e000000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15947 cCE(wpackhss, e700080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15948 cCE(wpackhus, e500080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15949 cCE(wpackwss, eb00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15950 cCE(wpackwus, e900080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15951 cCE(wpackdss, ef00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15952 cCE(wpackdus, ed00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15953 cCE(wrorh, e700040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
15954 cCE(wrorhg, e700148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
15955 cCE(wrorw, eb00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
15956 cCE(wrorwg, eb00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
15957 cCE(wrord, ef00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
15958 cCE(wrordg, ef00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
15959 cCE(wsadb, e000120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15960 cCE(wsadbz, e100120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15961 cCE(wsadh, e400120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15962 cCE(wsadhz, e500120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15963 cCE(wshufh, e0001e0, 3, (RIWR, RIWR, I255), iwmmxt_wshufh),
15964 cCE(wsllh, e500040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
15965 cCE(wsllhg, e500148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
15966 cCE(wsllw, e900040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
15967 cCE(wsllwg, e900148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
15968 cCE(wslld, ed00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
15969 cCE(wslldg, ed00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
15970 cCE(wsrah, e400040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
15971 cCE(wsrahg, e400148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
15972 cCE(wsraw, e800040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
15973 cCE(wsrawg, e800148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
15974 cCE(wsrad, ec00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
15975 cCE(wsradg, ec00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
15976 cCE(wsrlh, e600040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
15977 cCE(wsrlhg, e600148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
15978 cCE(wsrlw, ea00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
15979 cCE(wsrlwg, ea00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
15980 cCE(wsrld, ee00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
15981 cCE(wsrldg, ee00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
15982 cCE(wstrb, c000000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
15983 cCE(wstrh, c400000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
15984 cCE(wstrw, c000100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
15985 cCE(wstrd, c400100, 2, (RIWR, ADDR), iwmmxt_wldstd),
15986 cCE(wsubbss, e3001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15987 cCE(wsubb, e0001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15988 cCE(wsubbus, e1001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15989 cCE(wsubhss, e7001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15990 cCE(wsubh, e4001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15991 cCE(wsubhus, e5001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15992 cCE(wsubwss, eb001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15993 cCE(wsubw, e8001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15994 cCE(wsubwus, e9001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15995 cCE(wunpckehub,e0000c0, 2, (RIWR, RIWR), rd_rn),
15996 cCE(wunpckehuh,e4000c0, 2, (RIWR, RIWR), rd_rn),
15997 cCE(wunpckehuw,e8000c0, 2, (RIWR, RIWR), rd_rn),
15998 cCE(wunpckehsb,e2000c0, 2, (RIWR, RIWR), rd_rn),
15999 cCE(wunpckehsh,e6000c0, 2, (RIWR, RIWR), rd_rn),
16000 cCE(wunpckehsw,ea000c0, 2, (RIWR, RIWR), rd_rn),
16001 cCE(wunpckihb, e1000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16002 cCE(wunpckihh, e5000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16003 cCE(wunpckihw, e9000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16004 cCE(wunpckelub,e0000e0, 2, (RIWR, RIWR), rd_rn),
16005 cCE(wunpckeluh,e4000e0, 2, (RIWR, RIWR), rd_rn),
16006 cCE(wunpckeluw,e8000e0, 2, (RIWR, RIWR), rd_rn),
16007 cCE(wunpckelsb,e2000e0, 2, (RIWR, RIWR), rd_rn),
16008 cCE(wunpckelsh,e6000e0, 2, (RIWR, RIWR), rd_rn),
16009 cCE(wunpckelsw,ea000e0, 2, (RIWR, RIWR), rd_rn),
16010 cCE(wunpckilb, e1000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16011 cCE(wunpckilh, e5000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16012 cCE(wunpckilw, e9000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16013 cCE(wxor, e100000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16014 cCE(wzero, e300000, 1, (RIWR), iwmmxt_wzero),
16015
16016 #undef ARM_VARIANT
16017 #define ARM_VARIANT &arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2. */
16018 cCE(torvscb, e13f190, 1, (RR), iwmmxt_tandorc),
16019 cCE(torvsch, e53f190, 1, (RR), iwmmxt_tandorc),
16020 cCE(torvscw, e93f190, 1, (RR), iwmmxt_tandorc),
16021 cCE(wabsb, e2001c0, 2, (RIWR, RIWR), rd_rn),
16022 cCE(wabsh, e6001c0, 2, (RIWR, RIWR), rd_rn),
16023 cCE(wabsw, ea001c0, 2, (RIWR, RIWR), rd_rn),
16024 cCE(wabsdiffb, e1001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16025 cCE(wabsdiffh, e5001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16026 cCE(wabsdiffw, e9001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16027 cCE(waddbhusl, e2001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16028 cCE(waddbhusm, e6001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16029 cCE(waddhc, e600180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16030 cCE(waddwc, ea00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16031 cCE(waddsubhx, ea001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16032 cCE(wavg4, e400000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16033 cCE(wavg4r, e500000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16034 cCE(wmaddsn, ee00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16035 cCE(wmaddsx, eb00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16036 cCE(wmaddun, ec00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16037 cCE(wmaddux, e900100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16038 cCE(wmerge, e000080, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_wmerge),
16039 cCE(wmiabb, e0000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16040 cCE(wmiabt, e1000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16041 cCE(wmiatb, e2000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16042 cCE(wmiatt, e3000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16043 cCE(wmiabbn, e4000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16044 cCE(wmiabtn, e5000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16045 cCE(wmiatbn, e6000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16046 cCE(wmiattn, e7000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16047 cCE(wmiawbb, e800120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16048 cCE(wmiawbt, e900120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16049 cCE(wmiawtb, ea00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16050 cCE(wmiawtt, eb00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16051 cCE(wmiawbbn, ec00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16052 cCE(wmiawbtn, ed00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16053 cCE(wmiawtbn, ee00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16054 cCE(wmiawttn, ef00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16055 cCE(wmulsmr, ef00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16056 cCE(wmulumr, ed00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16057 cCE(wmulwumr, ec000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16058 cCE(wmulwsmr, ee000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16059 cCE(wmulwum, ed000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16060 cCE(wmulwsm, ef000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16061 cCE(wmulwl, eb000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16062 cCE(wqmiabb, e8000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16063 cCE(wqmiabt, e9000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16064 cCE(wqmiatb, ea000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16065 cCE(wqmiatt, eb000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16066 cCE(wqmiabbn, ec000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16067 cCE(wqmiabtn, ed000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16068 cCE(wqmiatbn, ee000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16069 cCE(wqmiattn, ef000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16070 cCE(wqmulm, e100080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16071 cCE(wqmulmr, e300080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16072 cCE(wqmulwm, ec000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16073 cCE(wqmulwmr, ee000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16074 cCE(wsubaddhx, ed001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16075
16076 #undef ARM_VARIANT
16077 #define ARM_VARIANT &arm_cext_maverick /* Cirrus Maverick instructions. */
16078 cCE(cfldrs, c100400, 2, (RMF, ADDRGLDC), rd_cpaddr),
16079 cCE(cfldrd, c500400, 2, (RMD, ADDRGLDC), rd_cpaddr),
16080 cCE(cfldr32, c100500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
16081 cCE(cfldr64, c500500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
16082 cCE(cfstrs, c000400, 2, (RMF, ADDRGLDC), rd_cpaddr),
16083 cCE(cfstrd, c400400, 2, (RMD, ADDRGLDC), rd_cpaddr),
16084 cCE(cfstr32, c000500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
16085 cCE(cfstr64, c400500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
16086 cCE(cfmvsr, e000450, 2, (RMF, RR), rn_rd),
16087 cCE(cfmvrs, e100450, 2, (RR, RMF), rd_rn),
16088 cCE(cfmvdlr, e000410, 2, (RMD, RR), rn_rd),
16089 cCE(cfmvrdl, e100410, 2, (RR, RMD), rd_rn),
16090 cCE(cfmvdhr, e000430, 2, (RMD, RR), rn_rd),
16091 cCE(cfmvrdh, e100430, 2, (RR, RMD), rd_rn),
16092 cCE(cfmv64lr, e000510, 2, (RMDX, RR), rn_rd),
16093 cCE(cfmvr64l, e100510, 2, (RR, RMDX), rd_rn),
16094 cCE(cfmv64hr, e000530, 2, (RMDX, RR), rn_rd),
16095 cCE(cfmvr64h, e100530, 2, (RR, RMDX), rd_rn),
16096 cCE(cfmval32, e200440, 2, (RMAX, RMFX), rd_rn),
16097 cCE(cfmv32al, e100440, 2, (RMFX, RMAX), rd_rn),
16098 cCE(cfmvam32, e200460, 2, (RMAX, RMFX), rd_rn),
16099 cCE(cfmv32am, e100460, 2, (RMFX, RMAX), rd_rn),
16100 cCE(cfmvah32, e200480, 2, (RMAX, RMFX), rd_rn),
16101 cCE(cfmv32ah, e100480, 2, (RMFX, RMAX), rd_rn),
16102 cCE(cfmva32, e2004a0, 2, (RMAX, RMFX), rd_rn),
16103 cCE(cfmv32a, e1004a0, 2, (RMFX, RMAX), rd_rn),
16104 cCE(cfmva64, e2004c0, 2, (RMAX, RMDX), rd_rn),
16105 cCE(cfmv64a, e1004c0, 2, (RMDX, RMAX), rd_rn),
16106 cCE(cfmvsc32, e2004e0, 2, (RMDS, RMDX), mav_dspsc),
16107 cCE(cfmv32sc, e1004e0, 2, (RMDX, RMDS), rd),
16108 cCE(cfcpys, e000400, 2, (RMF, RMF), rd_rn),
16109 cCE(cfcpyd, e000420, 2, (RMD, RMD), rd_rn),
16110 cCE(cfcvtsd, e000460, 2, (RMD, RMF), rd_rn),
16111 cCE(cfcvtds, e000440, 2, (RMF, RMD), rd_rn),
16112 cCE(cfcvt32s, e000480, 2, (RMF, RMFX), rd_rn),
16113 cCE(cfcvt32d, e0004a0, 2, (RMD, RMFX), rd_rn),
16114 cCE(cfcvt64s, e0004c0, 2, (RMF, RMDX), rd_rn),
16115 cCE(cfcvt64d, e0004e0, 2, (RMD, RMDX), rd_rn),
16116 cCE(cfcvts32, e100580, 2, (RMFX, RMF), rd_rn),
16117 cCE(cfcvtd32, e1005a0, 2, (RMFX, RMD), rd_rn),
16118 cCE(cftruncs32,e1005c0, 2, (RMFX, RMF), rd_rn),
16119 cCE(cftruncd32,e1005e0, 2, (RMFX, RMD), rd_rn),
16120 cCE(cfrshl32, e000550, 3, (RMFX, RMFX, RR), mav_triple),
16121 cCE(cfrshl64, e000570, 3, (RMDX, RMDX, RR), mav_triple),
16122 cCE(cfsh32, e000500, 3, (RMFX, RMFX, I63s), mav_shift),
16123 cCE(cfsh64, e200500, 3, (RMDX, RMDX, I63s), mav_shift),
16124 cCE(cfcmps, e100490, 3, (RR, RMF, RMF), rd_rn_rm),
16125 cCE(cfcmpd, e1004b0, 3, (RR, RMD, RMD), rd_rn_rm),
16126 cCE(cfcmp32, e100590, 3, (RR, RMFX, RMFX), rd_rn_rm),
16127 cCE(cfcmp64, e1005b0, 3, (RR, RMDX, RMDX), rd_rn_rm),
16128 cCE(cfabss, e300400, 2, (RMF, RMF), rd_rn),
16129 cCE(cfabsd, e300420, 2, (RMD, RMD), rd_rn),
16130 cCE(cfnegs, e300440, 2, (RMF, RMF), rd_rn),
16131 cCE(cfnegd, e300460, 2, (RMD, RMD), rd_rn),
16132 cCE(cfadds, e300480, 3, (RMF, RMF, RMF), rd_rn_rm),
16133 cCE(cfaddd, e3004a0, 3, (RMD, RMD, RMD), rd_rn_rm),
16134 cCE(cfsubs, e3004c0, 3, (RMF, RMF, RMF), rd_rn_rm),
16135 cCE(cfsubd, e3004e0, 3, (RMD, RMD, RMD), rd_rn_rm),
16136 cCE(cfmuls, e100400, 3, (RMF, RMF, RMF), rd_rn_rm),
16137 cCE(cfmuld, e100420, 3, (RMD, RMD, RMD), rd_rn_rm),
16138 cCE(cfabs32, e300500, 2, (RMFX, RMFX), rd_rn),
16139 cCE(cfabs64, e300520, 2, (RMDX, RMDX), rd_rn),
16140 cCE(cfneg32, e300540, 2, (RMFX, RMFX), rd_rn),
16141 cCE(cfneg64, e300560, 2, (RMDX, RMDX), rd_rn),
16142 cCE(cfadd32, e300580, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
16143 cCE(cfadd64, e3005a0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
16144 cCE(cfsub32, e3005c0, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
16145 cCE(cfsub64, e3005e0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
16146 cCE(cfmul32, e100500, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
16147 cCE(cfmul64, e100520, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
16148 cCE(cfmac32, e100540, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
16149 cCE(cfmsc32, e100560, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
16150 cCE(cfmadd32, e000600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
16151 cCE(cfmsub32, e100600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
16152 cCE(cfmadda32, e200600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
16153 cCE(cfmsuba32, e300600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
16154 };
16155 #undef ARM_VARIANT
16156 #undef THUMB_VARIANT
16157 #undef TCE
16158 #undef TCM
16159 #undef TUE
16160 #undef TUF
16161 #undef TCC
16162 #undef cCE
16163 #undef cCL
16164 #undef C3E
16165 #undef CE
16166 #undef CM
16167 #undef UE
16168 #undef UF
16169 #undef UT
16170 #undef NUF
16171 #undef nUF
16172 #undef NCE
16173 #undef nCE
16174 #undef OPS0
16175 #undef OPS1
16176 #undef OPS2
16177 #undef OPS3
16178 #undef OPS4
16179 #undef OPS5
16180 #undef OPS6
16181 #undef do_0
16182 \f
16183 /* MD interface: bits in the object file. */
16184
16185 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
16186 for use in the a.out file, and stores them in the array pointed to by buf.
16187 This knows about the endian-ness of the target machine and does
16188 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
16189 2 (short) and 4 (long) Floating numbers are put out as a series of
16190 LITTLENUMS (shorts, here at least). */
16191
16192 void
16193 md_number_to_chars (char * buf, valueT val, int n)
16194 {
16195 if (target_big_endian)
16196 number_to_chars_bigendian (buf, val, n);
16197 else
16198 number_to_chars_littleendian (buf, val, n);
16199 }
16200
16201 static valueT
16202 md_chars_to_number (char * buf, int n)
16203 {
16204 valueT result = 0;
16205 unsigned char * where = (unsigned char *) buf;
16206
16207 if (target_big_endian)
16208 {
16209 while (n--)
16210 {
16211 result <<= 8;
16212 result |= (*where++ & 255);
16213 }
16214 }
16215 else
16216 {
16217 while (n--)
16218 {
16219 result <<= 8;
16220 result |= (where[n] & 255);
16221 }
16222 }
16223
16224 return result;
16225 }
16226
16227 /* MD interface: Sections. */
16228
16229 /* Estimate the size of a frag before relaxing. Assume everything fits in
16230 2 bytes. */
16231
16232 int
16233 md_estimate_size_before_relax (fragS * fragp,
16234 segT segtype ATTRIBUTE_UNUSED)
16235 {
16236 fragp->fr_var = 2;
16237 return 2;
16238 }
16239
16240 /* Convert a machine dependent frag. */
16241
16242 void
16243 md_convert_frag (bfd *abfd, segT asec ATTRIBUTE_UNUSED, fragS *fragp)
16244 {
16245 unsigned long insn;
16246 unsigned long old_op;
16247 char *buf;
16248 expressionS exp;
16249 fixS *fixp;
16250 int reloc_type;
16251 int pc_rel;
16252 int opcode;
16253
16254 buf = fragp->fr_literal + fragp->fr_fix;
16255
16256 old_op = bfd_get_16(abfd, buf);
16257 if (fragp->fr_symbol) {
16258 exp.X_op = O_symbol;
16259 exp.X_add_symbol = fragp->fr_symbol;
16260 } else {
16261 exp.X_op = O_constant;
16262 }
16263 exp.X_add_number = fragp->fr_offset;
16264 opcode = fragp->fr_subtype;
16265 switch (opcode)
16266 {
16267 case T_MNEM_ldr_pc:
16268 case T_MNEM_ldr_pc2:
16269 case T_MNEM_ldr_sp:
16270 case T_MNEM_str_sp:
16271 case T_MNEM_ldr:
16272 case T_MNEM_ldrb:
16273 case T_MNEM_ldrh:
16274 case T_MNEM_str:
16275 case T_MNEM_strb:
16276 case T_MNEM_strh:
16277 if (fragp->fr_var == 4)
16278 {
16279 insn = THUMB_OP32(opcode);
16280 if ((old_op >> 12) == 4 || (old_op >> 12) == 9)
16281 {
16282 insn |= (old_op & 0x700) << 4;
16283 }
16284 else
16285 {
16286 insn |= (old_op & 7) << 12;
16287 insn |= (old_op & 0x38) << 13;
16288 }
16289 insn |= 0x00000c00;
16290 put_thumb32_insn (buf, insn);
16291 reloc_type = BFD_RELOC_ARM_T32_OFFSET_IMM;
16292 }
16293 else
16294 {
16295 reloc_type = BFD_RELOC_ARM_THUMB_OFFSET;
16296 }
16297 pc_rel = (opcode == T_MNEM_ldr_pc2);
16298 break;
16299 case T_MNEM_adr:
16300 if (fragp->fr_var == 4)
16301 {
16302 insn = THUMB_OP32 (opcode);
16303 insn |= (old_op & 0xf0) << 4;
16304 put_thumb32_insn (buf, insn);
16305 reloc_type = BFD_RELOC_ARM_T32_ADD_PC12;
16306 }
16307 else
16308 {
16309 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
16310 exp.X_add_number -= 4;
16311 }
16312 pc_rel = 1;
16313 break;
16314 case T_MNEM_mov:
16315 case T_MNEM_movs:
16316 case T_MNEM_cmp:
16317 case T_MNEM_cmn:
16318 if (fragp->fr_var == 4)
16319 {
16320 int r0off = (opcode == T_MNEM_mov
16321 || opcode == T_MNEM_movs) ? 0 : 8;
16322 insn = THUMB_OP32 (opcode);
16323 insn = (insn & 0xe1ffffff) | 0x10000000;
16324 insn |= (old_op & 0x700) << r0off;
16325 put_thumb32_insn (buf, insn);
16326 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
16327 }
16328 else
16329 {
16330 reloc_type = BFD_RELOC_ARM_THUMB_IMM;
16331 }
16332 pc_rel = 0;
16333 break;
16334 case T_MNEM_b:
16335 if (fragp->fr_var == 4)
16336 {
16337 insn = THUMB_OP32(opcode);
16338 put_thumb32_insn (buf, insn);
16339 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH25;
16340 }
16341 else
16342 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH12;
16343 pc_rel = 1;
16344 break;
16345 case T_MNEM_bcond:
16346 if (fragp->fr_var == 4)
16347 {
16348 insn = THUMB_OP32(opcode);
16349 insn |= (old_op & 0xf00) << 14;
16350 put_thumb32_insn (buf, insn);
16351 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH20;
16352 }
16353 else
16354 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH9;
16355 pc_rel = 1;
16356 break;
16357 case T_MNEM_add_sp:
16358 case T_MNEM_add_pc:
16359 case T_MNEM_inc_sp:
16360 case T_MNEM_dec_sp:
16361 if (fragp->fr_var == 4)
16362 {
16363 /* ??? Choose between add and addw. */
16364 insn = THUMB_OP32 (opcode);
16365 insn |= (old_op & 0xf0) << 4;
16366 put_thumb32_insn (buf, insn);
16367 if (opcode == T_MNEM_add_pc)
16368 reloc_type = BFD_RELOC_ARM_T32_IMM12;
16369 else
16370 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
16371 }
16372 else
16373 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
16374 pc_rel = 0;
16375 break;
16376
16377 case T_MNEM_addi:
16378 case T_MNEM_addis:
16379 case T_MNEM_subi:
16380 case T_MNEM_subis:
16381 if (fragp->fr_var == 4)
16382 {
16383 insn = THUMB_OP32 (opcode);
16384 insn |= (old_op & 0xf0) << 4;
16385 insn |= (old_op & 0xf) << 16;
16386 put_thumb32_insn (buf, insn);
16387 if (insn & (1 << 20))
16388 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
16389 else
16390 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
16391 }
16392 else
16393 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
16394 pc_rel = 0;
16395 break;
16396 default:
16397 abort();
16398 }
16399 fixp = fix_new_exp (fragp, fragp->fr_fix, fragp->fr_var, &exp, pc_rel,
16400 reloc_type);
16401 fixp->fx_file = fragp->fr_file;
16402 fixp->fx_line = fragp->fr_line;
16403 fragp->fr_fix += fragp->fr_var;
16404 }
16405
16406 /* Return the size of a relaxable immediate operand instruction.
16407 SHIFT and SIZE specify the form of the allowable immediate. */
16408 static int
16409 relax_immediate (fragS *fragp, int size, int shift)
16410 {
16411 offsetT offset;
16412 offsetT mask;
16413 offsetT low;
16414
16415 /* ??? Should be able to do better than this. */
16416 if (fragp->fr_symbol)
16417 return 4;
16418
16419 low = (1 << shift) - 1;
16420 mask = (1 << (shift + size)) - (1 << shift);
16421 offset = fragp->fr_offset;
16422 /* Force misaligned offsets to 32-bit variant. */
16423 if (offset & low)
16424 return -4;
16425 if (offset & ~mask)
16426 return 4;
16427 return 2;
16428 }
16429
16430 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
16431 load. */
16432 static int
16433 relax_adr (fragS *fragp, asection *sec)
16434 {
16435 addressT addr;
16436 offsetT val;
16437
16438 /* Assume worst case for symbols not known to be in the same section. */
16439 if (!S_IS_DEFINED(fragp->fr_symbol)
16440 || sec != S_GET_SEGMENT (fragp->fr_symbol))
16441 return 4;
16442
16443 val = S_GET_VALUE(fragp->fr_symbol) + fragp->fr_offset;
16444 addr = fragp->fr_address + fragp->fr_fix;
16445 addr = (addr + 4) & ~3;
16446 /* Fix the insn as the 4-byte version if the target address is not
16447 sufficiently aligned. This is prevents an infinite loop when two
16448 instructions have contradictory range/alignment requirements. */
16449 if (val & 3)
16450 return -4;
16451 val -= addr;
16452 if (val < 0 || val > 1020)
16453 return 4;
16454 return 2;
16455 }
16456
16457 /* Return the size of a relaxable add/sub immediate instruction. */
16458 static int
16459 relax_addsub (fragS *fragp, asection *sec)
16460 {
16461 char *buf;
16462 int op;
16463
16464 buf = fragp->fr_literal + fragp->fr_fix;
16465 op = bfd_get_16(sec->owner, buf);
16466 if ((op & 0xf) == ((op >> 4) & 0xf))
16467 return relax_immediate (fragp, 8, 0);
16468 else
16469 return relax_immediate (fragp, 3, 0);
16470 }
16471
16472
16473 /* Return the size of a relaxable branch instruction. BITS is the
16474 size of the offset field in the narrow instruction. */
16475
16476 static int
16477 relax_branch (fragS *fragp, asection *sec, int bits)
16478 {
16479 addressT addr;
16480 offsetT val;
16481 offsetT limit;
16482
16483 /* Assume worst case for symbols not known to be in the same section. */
16484 if (!S_IS_DEFINED(fragp->fr_symbol)
16485 || sec != S_GET_SEGMENT (fragp->fr_symbol))
16486 return 4;
16487
16488 val = S_GET_VALUE(fragp->fr_symbol) + fragp->fr_offset;
16489 addr = fragp->fr_address + fragp->fr_fix + 4;
16490 val -= addr;
16491
16492 /* Offset is a signed value *2 */
16493 limit = 1 << bits;
16494 if (val >= limit || val < -limit)
16495 return 4;
16496 return 2;
16497 }
16498
16499
16500 /* Relax a machine dependent frag. This returns the amount by which
16501 the current size of the frag should change. */
16502
16503 int
16504 arm_relax_frag (asection *sec, fragS *fragp, long stretch ATTRIBUTE_UNUSED)
16505 {
16506 int oldsize;
16507 int newsize;
16508
16509 oldsize = fragp->fr_var;
16510 switch (fragp->fr_subtype)
16511 {
16512 case T_MNEM_ldr_pc2:
16513 newsize = relax_adr(fragp, sec);
16514 break;
16515 case T_MNEM_ldr_pc:
16516 case T_MNEM_ldr_sp:
16517 case T_MNEM_str_sp:
16518 newsize = relax_immediate(fragp, 8, 2);
16519 break;
16520 case T_MNEM_ldr:
16521 case T_MNEM_str:
16522 newsize = relax_immediate(fragp, 5, 2);
16523 break;
16524 case T_MNEM_ldrh:
16525 case T_MNEM_strh:
16526 newsize = relax_immediate(fragp, 5, 1);
16527 break;
16528 case T_MNEM_ldrb:
16529 case T_MNEM_strb:
16530 newsize = relax_immediate(fragp, 5, 0);
16531 break;
16532 case T_MNEM_adr:
16533 newsize = relax_adr(fragp, sec);
16534 break;
16535 case T_MNEM_mov:
16536 case T_MNEM_movs:
16537 case T_MNEM_cmp:
16538 case T_MNEM_cmn:
16539 newsize = relax_immediate(fragp, 8, 0);
16540 break;
16541 case T_MNEM_b:
16542 newsize = relax_branch(fragp, sec, 11);
16543 break;
16544 case T_MNEM_bcond:
16545 newsize = relax_branch(fragp, sec, 8);
16546 break;
16547 case T_MNEM_add_sp:
16548 case T_MNEM_add_pc:
16549 newsize = relax_immediate (fragp, 8, 2);
16550 break;
16551 case T_MNEM_inc_sp:
16552 case T_MNEM_dec_sp:
16553 newsize = relax_immediate (fragp, 7, 2);
16554 break;
16555 case T_MNEM_addi:
16556 case T_MNEM_addis:
16557 case T_MNEM_subi:
16558 case T_MNEM_subis:
16559 newsize = relax_addsub (fragp, sec);
16560 break;
16561 default:
16562 abort();
16563 }
16564 if (newsize < 0)
16565 {
16566 fragp->fr_var = -newsize;
16567 md_convert_frag (sec->owner, sec, fragp);
16568 frag_wane(fragp);
16569 return -(newsize + oldsize);
16570 }
16571 fragp->fr_var = newsize;
16572 return newsize - oldsize;
16573 }
16574
16575 /* Round up a section size to the appropriate boundary. */
16576
16577 valueT
16578 md_section_align (segT segment ATTRIBUTE_UNUSED,
16579 valueT size)
16580 {
16581 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
16582 if (OUTPUT_FLAVOR == bfd_target_aout_flavour)
16583 {
16584 /* For a.out, force the section size to be aligned. If we don't do
16585 this, BFD will align it for us, but it will not write out the
16586 final bytes of the section. This may be a bug in BFD, but it is
16587 easier to fix it here since that is how the other a.out targets
16588 work. */
16589 int align;
16590
16591 align = bfd_get_section_alignment (stdoutput, segment);
16592 size = ((size + (1 << align) - 1) & ((valueT) -1 << align));
16593 }
16594 #endif
16595
16596 return size;
16597 }
16598
16599 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
16600 of an rs_align_code fragment. */
16601
16602 void
16603 arm_handle_align (fragS * fragP)
16604 {
16605 static char const arm_noop[4] = { 0x00, 0x00, 0xa0, 0xe1 };
16606 static char const thumb_noop[2] = { 0xc0, 0x46 };
16607 static char const arm_bigend_noop[4] = { 0xe1, 0xa0, 0x00, 0x00 };
16608 static char const thumb_bigend_noop[2] = { 0x46, 0xc0 };
16609
16610 int bytes, fix, noop_size;
16611 char * p;
16612 const char * noop;
16613
16614 if (fragP->fr_type != rs_align_code)
16615 return;
16616
16617 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
16618 p = fragP->fr_literal + fragP->fr_fix;
16619 fix = 0;
16620
16621 if (bytes > MAX_MEM_FOR_RS_ALIGN_CODE)
16622 bytes &= MAX_MEM_FOR_RS_ALIGN_CODE;
16623
16624 if (fragP->tc_frag_data)
16625 {
16626 if (target_big_endian)
16627 noop = thumb_bigend_noop;
16628 else
16629 noop = thumb_noop;
16630 noop_size = sizeof (thumb_noop);
16631 }
16632 else
16633 {
16634 if (target_big_endian)
16635 noop = arm_bigend_noop;
16636 else
16637 noop = arm_noop;
16638 noop_size = sizeof (arm_noop);
16639 }
16640
16641 if (bytes & (noop_size - 1))
16642 {
16643 fix = bytes & (noop_size - 1);
16644 memset (p, 0, fix);
16645 p += fix;
16646 bytes -= fix;
16647 }
16648
16649 while (bytes >= noop_size)
16650 {
16651 memcpy (p, noop, noop_size);
16652 p += noop_size;
16653 bytes -= noop_size;
16654 fix += noop_size;
16655 }
16656
16657 fragP->fr_fix += fix;
16658 fragP->fr_var = noop_size;
16659 }
16660
16661 /* Called from md_do_align. Used to create an alignment
16662 frag in a code section. */
16663
16664 void
16665 arm_frag_align_code (int n, int max)
16666 {
16667 char * p;
16668
16669 /* We assume that there will never be a requirement
16670 to support alignments greater than 32 bytes. */
16671 if (max > MAX_MEM_FOR_RS_ALIGN_CODE)
16672 as_fatal (_("alignments greater than 32 bytes not supported in .text sections."));
16673
16674 p = frag_var (rs_align_code,
16675 MAX_MEM_FOR_RS_ALIGN_CODE,
16676 1,
16677 (relax_substateT) max,
16678 (symbolS *) NULL,
16679 (offsetT) n,
16680 (char *) NULL);
16681 *p = 0;
16682 }
16683
16684 /* Perform target specific initialisation of a frag. */
16685
16686 void
16687 arm_init_frag (fragS * fragP)
16688 {
16689 /* Record whether this frag is in an ARM or a THUMB area. */
16690 fragP->tc_frag_data = thumb_mode;
16691 }
16692
16693 #ifdef OBJ_ELF
16694 /* When we change sections we need to issue a new mapping symbol. */
16695
16696 void
16697 arm_elf_change_section (void)
16698 {
16699 flagword flags;
16700 segment_info_type *seginfo;
16701
16702 /* Link an unlinked unwind index table section to the .text section. */
16703 if (elf_section_type (now_seg) == SHT_ARM_EXIDX
16704 && elf_linked_to_section (now_seg) == NULL)
16705 elf_linked_to_section (now_seg) = text_section;
16706
16707 if (!SEG_NORMAL (now_seg))
16708 return;
16709
16710 flags = bfd_get_section_flags (stdoutput, now_seg);
16711
16712 /* We can ignore sections that only contain debug info. */
16713 if ((flags & SEC_ALLOC) == 0)
16714 return;
16715
16716 seginfo = seg_info (now_seg);
16717 mapstate = seginfo->tc_segment_info_data.mapstate;
16718 marked_pr_dependency = seginfo->tc_segment_info_data.marked_pr_dependency;
16719 }
16720
16721 int
16722 arm_elf_section_type (const char * str, size_t len)
16723 {
16724 if (len == 5 && strncmp (str, "exidx", 5) == 0)
16725 return SHT_ARM_EXIDX;
16726
16727 return -1;
16728 }
16729 \f
16730 /* Code to deal with unwinding tables. */
16731
16732 static void add_unwind_adjustsp (offsetT);
16733
16734 /* Cenerate and deferred unwind frame offset. */
16735
16736 static void
16737 flush_pending_unwind (void)
16738 {
16739 offsetT offset;
16740
16741 offset = unwind.pending_offset;
16742 unwind.pending_offset = 0;
16743 if (offset != 0)
16744 add_unwind_adjustsp (offset);
16745 }
16746
16747 /* Add an opcode to this list for this function. Two-byte opcodes should
16748 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse
16749 order. */
16750
16751 static void
16752 add_unwind_opcode (valueT op, int length)
16753 {
16754 /* Add any deferred stack adjustment. */
16755 if (unwind.pending_offset)
16756 flush_pending_unwind ();
16757
16758 unwind.sp_restored = 0;
16759
16760 if (unwind.opcode_count + length > unwind.opcode_alloc)
16761 {
16762 unwind.opcode_alloc += ARM_OPCODE_CHUNK_SIZE;
16763 if (unwind.opcodes)
16764 unwind.opcodes = xrealloc (unwind.opcodes,
16765 unwind.opcode_alloc);
16766 else
16767 unwind.opcodes = xmalloc (unwind.opcode_alloc);
16768 }
16769 while (length > 0)
16770 {
16771 length--;
16772 unwind.opcodes[unwind.opcode_count] = op & 0xff;
16773 op >>= 8;
16774 unwind.opcode_count++;
16775 }
16776 }
16777
16778 /* Add unwind opcodes to adjust the stack pointer. */
16779
16780 static void
16781 add_unwind_adjustsp (offsetT offset)
16782 {
16783 valueT op;
16784
16785 if (offset > 0x200)
16786 {
16787 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */
16788 char bytes[5];
16789 int n;
16790 valueT o;
16791
16792 /* Long form: 0xb2, uleb128. */
16793 /* This might not fit in a word so add the individual bytes,
16794 remembering the list is built in reverse order. */
16795 o = (valueT) ((offset - 0x204) >> 2);
16796 if (o == 0)
16797 add_unwind_opcode (0, 1);
16798
16799 /* Calculate the uleb128 encoding of the offset. */
16800 n = 0;
16801 while (o)
16802 {
16803 bytes[n] = o & 0x7f;
16804 o >>= 7;
16805 if (o)
16806 bytes[n] |= 0x80;
16807 n++;
16808 }
16809 /* Add the insn. */
16810 for (; n; n--)
16811 add_unwind_opcode (bytes[n - 1], 1);
16812 add_unwind_opcode (0xb2, 1);
16813 }
16814 else if (offset > 0x100)
16815 {
16816 /* Two short opcodes. */
16817 add_unwind_opcode (0x3f, 1);
16818 op = (offset - 0x104) >> 2;
16819 add_unwind_opcode (op, 1);
16820 }
16821 else if (offset > 0)
16822 {
16823 /* Short opcode. */
16824 op = (offset - 4) >> 2;
16825 add_unwind_opcode (op, 1);
16826 }
16827 else if (offset < 0)
16828 {
16829 offset = -offset;
16830 while (offset > 0x100)
16831 {
16832 add_unwind_opcode (0x7f, 1);
16833 offset -= 0x100;
16834 }
16835 op = ((offset - 4) >> 2) | 0x40;
16836 add_unwind_opcode (op, 1);
16837 }
16838 }
16839
16840 /* Finish the list of unwind opcodes for this function. */
16841 static void
16842 finish_unwind_opcodes (void)
16843 {
16844 valueT op;
16845
16846 if (unwind.fp_used)
16847 {
16848 /* Adjust sp as necessary. */
16849 unwind.pending_offset += unwind.fp_offset - unwind.frame_size;
16850 flush_pending_unwind ();
16851
16852 /* After restoring sp from the frame pointer. */
16853 op = 0x90 | unwind.fp_reg;
16854 add_unwind_opcode (op, 1);
16855 }
16856 else
16857 flush_pending_unwind ();
16858 }
16859
16860
16861 /* Start an exception table entry. If idx is nonzero this is an index table
16862 entry. */
16863
16864 static void
16865 start_unwind_section (const segT text_seg, int idx)
16866 {
16867 const char * text_name;
16868 const char * prefix;
16869 const char * prefix_once;
16870 const char * group_name;
16871 size_t prefix_len;
16872 size_t text_len;
16873 char * sec_name;
16874 size_t sec_name_len;
16875 int type;
16876 int flags;
16877 int linkonce;
16878
16879 if (idx)
16880 {
16881 prefix = ELF_STRING_ARM_unwind;
16882 prefix_once = ELF_STRING_ARM_unwind_once;
16883 type = SHT_ARM_EXIDX;
16884 }
16885 else
16886 {
16887 prefix = ELF_STRING_ARM_unwind_info;
16888 prefix_once = ELF_STRING_ARM_unwind_info_once;
16889 type = SHT_PROGBITS;
16890 }
16891
16892 text_name = segment_name (text_seg);
16893 if (streq (text_name, ".text"))
16894 text_name = "";
16895
16896 if (strncmp (text_name, ".gnu.linkonce.t.",
16897 strlen (".gnu.linkonce.t.")) == 0)
16898 {
16899 prefix = prefix_once;
16900 text_name += strlen (".gnu.linkonce.t.");
16901 }
16902
16903 prefix_len = strlen (prefix);
16904 text_len = strlen (text_name);
16905 sec_name_len = prefix_len + text_len;
16906 sec_name = xmalloc (sec_name_len + 1);
16907 memcpy (sec_name, prefix, prefix_len);
16908 memcpy (sec_name + prefix_len, text_name, text_len);
16909 sec_name[prefix_len + text_len] = '\0';
16910
16911 flags = SHF_ALLOC;
16912 linkonce = 0;
16913 group_name = 0;
16914
16915 /* Handle COMDAT group. */
16916 if (prefix != prefix_once && (text_seg->flags & SEC_LINK_ONCE) != 0)
16917 {
16918 group_name = elf_group_name (text_seg);
16919 if (group_name == NULL)
16920 {
16921 as_bad ("Group section `%s' has no group signature",
16922 segment_name (text_seg));
16923 ignore_rest_of_line ();
16924 return;
16925 }
16926 flags |= SHF_GROUP;
16927 linkonce = 1;
16928 }
16929
16930 obj_elf_change_section (sec_name, type, flags, 0, group_name, linkonce, 0);
16931
16932 /* Set the setion link for index tables. */
16933 if (idx)
16934 elf_linked_to_section (now_seg) = text_seg;
16935 }
16936
16937
16938 /* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
16939 personality routine data. Returns zero, or the index table value for
16940 and inline entry. */
16941
16942 static valueT
16943 create_unwind_entry (int have_data)
16944 {
16945 int size;
16946 addressT where;
16947 char *ptr;
16948 /* The current word of data. */
16949 valueT data;
16950 /* The number of bytes left in this word. */
16951 int n;
16952
16953 finish_unwind_opcodes ();
16954
16955 /* Remember the current text section. */
16956 unwind.saved_seg = now_seg;
16957 unwind.saved_subseg = now_subseg;
16958
16959 start_unwind_section (now_seg, 0);
16960
16961 if (unwind.personality_routine == NULL)
16962 {
16963 if (unwind.personality_index == -2)
16964 {
16965 if (have_data)
16966 as_bad (_("handerdata in cantunwind frame"));
16967 return 1; /* EXIDX_CANTUNWIND. */
16968 }
16969
16970 /* Use a default personality routine if none is specified. */
16971 if (unwind.personality_index == -1)
16972 {
16973 if (unwind.opcode_count > 3)
16974 unwind.personality_index = 1;
16975 else
16976 unwind.personality_index = 0;
16977 }
16978
16979 /* Space for the personality routine entry. */
16980 if (unwind.personality_index == 0)
16981 {
16982 if (unwind.opcode_count > 3)
16983 as_bad (_("too many unwind opcodes for personality routine 0"));
16984
16985 if (!have_data)
16986 {
16987 /* All the data is inline in the index table. */
16988 data = 0x80;
16989 n = 3;
16990 while (unwind.opcode_count > 0)
16991 {
16992 unwind.opcode_count--;
16993 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
16994 n--;
16995 }
16996
16997 /* Pad with "finish" opcodes. */
16998 while (n--)
16999 data = (data << 8) | 0xb0;
17000
17001 return data;
17002 }
17003 size = 0;
17004 }
17005 else
17006 /* We get two opcodes "free" in the first word. */
17007 size = unwind.opcode_count - 2;
17008 }
17009 else
17010 /* An extra byte is required for the opcode count. */
17011 size = unwind.opcode_count + 1;
17012
17013 size = (size + 3) >> 2;
17014 if (size > 0xff)
17015 as_bad (_("too many unwind opcodes"));
17016
17017 frag_align (2, 0, 0);
17018 record_alignment (now_seg, 2);
17019 unwind.table_entry = expr_build_dot ();
17020
17021 /* Allocate the table entry. */
17022 ptr = frag_more ((size << 2) + 4);
17023 where = frag_now_fix () - ((size << 2) + 4);
17024
17025 switch (unwind.personality_index)
17026 {
17027 case -1:
17028 /* ??? Should this be a PLT generating relocation? */
17029 /* Custom personality routine. */
17030 fix_new (frag_now, where, 4, unwind.personality_routine, 0, 1,
17031 BFD_RELOC_ARM_PREL31);
17032
17033 where += 4;
17034 ptr += 4;
17035
17036 /* Set the first byte to the number of additional words. */
17037 data = size - 1;
17038 n = 3;
17039 break;
17040
17041 /* ABI defined personality routines. */
17042 case 0:
17043 /* Three opcodes bytes are packed into the first word. */
17044 data = 0x80;
17045 n = 3;
17046 break;
17047
17048 case 1:
17049 case 2:
17050 /* The size and first two opcode bytes go in the first word. */
17051 data = ((0x80 + unwind.personality_index) << 8) | size;
17052 n = 2;
17053 break;
17054
17055 default:
17056 /* Should never happen. */
17057 abort ();
17058 }
17059
17060 /* Pack the opcodes into words (MSB first), reversing the list at the same
17061 time. */
17062 while (unwind.opcode_count > 0)
17063 {
17064 if (n == 0)
17065 {
17066 md_number_to_chars (ptr, data, 4);
17067 ptr += 4;
17068 n = 4;
17069 data = 0;
17070 }
17071 unwind.opcode_count--;
17072 n--;
17073 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
17074 }
17075
17076 /* Finish off the last word. */
17077 if (n < 4)
17078 {
17079 /* Pad with "finish" opcodes. */
17080 while (n--)
17081 data = (data << 8) | 0xb0;
17082
17083 md_number_to_chars (ptr, data, 4);
17084 }
17085
17086 if (!have_data)
17087 {
17088 /* Add an empty descriptor if there is no user-specified data. */
17089 ptr = frag_more (4);
17090 md_number_to_chars (ptr, 0, 4);
17091 }
17092
17093 return 0;
17094 }
17095
17096
17097 /* Initialize the DWARF-2 unwind information for this procedure. */
17098
17099 void
17100 tc_arm_frame_initial_instructions (void)
17101 {
17102 cfi_add_CFA_def_cfa (REG_SP, 0);
17103 }
17104 #endif /* OBJ_ELF */
17105
17106 /* Convert REGNAME to a DWARF-2 register number. */
17107
17108 int
17109 tc_arm_regname_to_dw2regnum (char *regname)
17110 {
17111 int reg = arm_reg_parse (&regname, REG_TYPE_RN);
17112
17113 if (reg == FAIL)
17114 return -1;
17115
17116 return reg;
17117 }
17118
17119 #ifdef TE_PE
17120 void
17121 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
17122 {
17123 expressionS expr;
17124
17125 expr.X_op = O_secrel;
17126 expr.X_add_symbol = symbol;
17127 expr.X_add_number = 0;
17128 emit_expr (&expr, size);
17129 }
17130 #endif
17131
17132 /* MD interface: Symbol and relocation handling. */
17133
17134 /* Return the address within the segment that a PC-relative fixup is
17135 relative to. For ARM, PC-relative fixups applied to instructions
17136 are generally relative to the location of the fixup plus 8 bytes.
17137 Thumb branches are offset by 4, and Thumb loads relative to PC
17138 require special handling. */
17139
17140 long
17141 md_pcrel_from_section (fixS * fixP, segT seg)
17142 {
17143 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
17144
17145 /* If this is pc-relative and we are going to emit a relocation
17146 then we just want to put out any pipeline compensation that the linker
17147 will need. Otherwise we want to use the calculated base.
17148 For WinCE we skip the bias for externals as well, since this
17149 is how the MS ARM-CE assembler behaves and we want to be compatible. */
17150 if (fixP->fx_pcrel
17151 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
17152 || (arm_force_relocation (fixP)
17153 #ifdef TE_WINCE
17154 && !S_IS_EXTERNAL (fixP->fx_addsy)
17155 #endif
17156 )))
17157 base = 0;
17158
17159 switch (fixP->fx_r_type)
17160 {
17161 /* PC relative addressing on the Thumb is slightly odd as the
17162 bottom two bits of the PC are forced to zero for the
17163 calculation. This happens *after* application of the
17164 pipeline offset. However, Thumb adrl already adjusts for
17165 this, so we need not do it again. */
17166 case BFD_RELOC_ARM_THUMB_ADD:
17167 return base & ~3;
17168
17169 case BFD_RELOC_ARM_THUMB_OFFSET:
17170 case BFD_RELOC_ARM_T32_OFFSET_IMM:
17171 case BFD_RELOC_ARM_T32_ADD_PC12:
17172 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
17173 return (base + 4) & ~3;
17174
17175 /* Thumb branches are simply offset by +4. */
17176 case BFD_RELOC_THUMB_PCREL_BRANCH7:
17177 case BFD_RELOC_THUMB_PCREL_BRANCH9:
17178 case BFD_RELOC_THUMB_PCREL_BRANCH12:
17179 case BFD_RELOC_THUMB_PCREL_BRANCH20:
17180 case BFD_RELOC_THUMB_PCREL_BRANCH23:
17181 case BFD_RELOC_THUMB_PCREL_BRANCH25:
17182 case BFD_RELOC_THUMB_PCREL_BLX:
17183 return base + 4;
17184
17185 /* ARM mode branches are offset by +8. However, the Windows CE
17186 loader expects the relocation not to take this into account. */
17187 case BFD_RELOC_ARM_PCREL_BRANCH:
17188 case BFD_RELOC_ARM_PCREL_CALL:
17189 case BFD_RELOC_ARM_PCREL_JUMP:
17190 case BFD_RELOC_ARM_PCREL_BLX:
17191 case BFD_RELOC_ARM_PLT32:
17192 #ifdef TE_WINCE
17193 /* When handling fixups immediately, because we have already
17194 discovered the value of a symbol, or the address of the frag involved
17195 we must account for the offset by +8, as the OS loader will never see the reloc.
17196 see fixup_segment() in write.c
17197 The S_IS_EXTERNAL test handles the case of global symbols.
17198 Those need the calculated base, not just the pipe compensation the linker will need. */
17199 if (fixP->fx_pcrel
17200 && fixP->fx_addsy != NULL
17201 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
17202 && (S_IS_EXTERNAL (fixP->fx_addsy) || !arm_force_relocation (fixP)))
17203 return base + 8;
17204 return base;
17205 #else
17206 return base + 8;
17207 #endif
17208
17209 /* ARM mode loads relative to PC are also offset by +8. Unlike
17210 branches, the Windows CE loader *does* expect the relocation
17211 to take this into account. */
17212 case BFD_RELOC_ARM_OFFSET_IMM:
17213 case BFD_RELOC_ARM_OFFSET_IMM8:
17214 case BFD_RELOC_ARM_HWLITERAL:
17215 case BFD_RELOC_ARM_LITERAL:
17216 case BFD_RELOC_ARM_CP_OFF_IMM:
17217 return base + 8;
17218
17219
17220 /* Other PC-relative relocations are un-offset. */
17221 default:
17222 return base;
17223 }
17224 }
17225
17226 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
17227 Otherwise we have no need to default values of symbols. */
17228
17229 symbolS *
17230 md_undefined_symbol (char * name ATTRIBUTE_UNUSED)
17231 {
17232 #ifdef OBJ_ELF
17233 if (name[0] == '_' && name[1] == 'G'
17234 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
17235 {
17236 if (!GOT_symbol)
17237 {
17238 if (symbol_find (name))
17239 as_bad ("GOT already in the symbol table");
17240
17241 GOT_symbol = symbol_new (name, undefined_section,
17242 (valueT) 0, & zero_address_frag);
17243 }
17244
17245 return GOT_symbol;
17246 }
17247 #endif
17248
17249 return 0;
17250 }
17251
17252 /* Subroutine of md_apply_fix. Check to see if an immediate can be
17253 computed as two separate immediate values, added together. We
17254 already know that this value cannot be computed by just one ARM
17255 instruction. */
17256
17257 static unsigned int
17258 validate_immediate_twopart (unsigned int val,
17259 unsigned int * highpart)
17260 {
17261 unsigned int a;
17262 unsigned int i;
17263
17264 for (i = 0; i < 32; i += 2)
17265 if (((a = rotate_left (val, i)) & 0xff) != 0)
17266 {
17267 if (a & 0xff00)
17268 {
17269 if (a & ~ 0xffff)
17270 continue;
17271 * highpart = (a >> 8) | ((i + 24) << 7);
17272 }
17273 else if (a & 0xff0000)
17274 {
17275 if (a & 0xff000000)
17276 continue;
17277 * highpart = (a >> 16) | ((i + 16) << 7);
17278 }
17279 else
17280 {
17281 assert (a & 0xff000000);
17282 * highpart = (a >> 24) | ((i + 8) << 7);
17283 }
17284
17285 return (a & 0xff) | (i << 7);
17286 }
17287
17288 return FAIL;
17289 }
17290
17291 static int
17292 validate_offset_imm (unsigned int val, int hwse)
17293 {
17294 if ((hwse && val > 255) || val > 4095)
17295 return FAIL;
17296 return val;
17297 }
17298
17299 /* Subroutine of md_apply_fix. Do those data_ops which can take a
17300 negative immediate constant by altering the instruction. A bit of
17301 a hack really.
17302 MOV <-> MVN
17303 AND <-> BIC
17304 ADC <-> SBC
17305 by inverting the second operand, and
17306 ADD <-> SUB
17307 CMP <-> CMN
17308 by negating the second operand. */
17309
17310 static int
17311 negate_data_op (unsigned long * instruction,
17312 unsigned long value)
17313 {
17314 int op, new_inst;
17315 unsigned long negated, inverted;
17316
17317 negated = encode_arm_immediate (-value);
17318 inverted = encode_arm_immediate (~value);
17319
17320 op = (*instruction >> DATA_OP_SHIFT) & 0xf;
17321 switch (op)
17322 {
17323 /* First negates. */
17324 case OPCODE_SUB: /* ADD <-> SUB */
17325 new_inst = OPCODE_ADD;
17326 value = negated;
17327 break;
17328
17329 case OPCODE_ADD:
17330 new_inst = OPCODE_SUB;
17331 value = negated;
17332 break;
17333
17334 case OPCODE_CMP: /* CMP <-> CMN */
17335 new_inst = OPCODE_CMN;
17336 value = negated;
17337 break;
17338
17339 case OPCODE_CMN:
17340 new_inst = OPCODE_CMP;
17341 value = negated;
17342 break;
17343
17344 /* Now Inverted ops. */
17345 case OPCODE_MOV: /* MOV <-> MVN */
17346 new_inst = OPCODE_MVN;
17347 value = inverted;
17348 break;
17349
17350 case OPCODE_MVN:
17351 new_inst = OPCODE_MOV;
17352 value = inverted;
17353 break;
17354
17355 case OPCODE_AND: /* AND <-> BIC */
17356 new_inst = OPCODE_BIC;
17357 value = inverted;
17358 break;
17359
17360 case OPCODE_BIC:
17361 new_inst = OPCODE_AND;
17362 value = inverted;
17363 break;
17364
17365 case OPCODE_ADC: /* ADC <-> SBC */
17366 new_inst = OPCODE_SBC;
17367 value = inverted;
17368 break;
17369
17370 case OPCODE_SBC:
17371 new_inst = OPCODE_ADC;
17372 value = inverted;
17373 break;
17374
17375 /* We cannot do anything. */
17376 default:
17377 return FAIL;
17378 }
17379
17380 if (value == (unsigned) FAIL)
17381 return FAIL;
17382
17383 *instruction &= OPCODE_MASK;
17384 *instruction |= new_inst << DATA_OP_SHIFT;
17385 return value;
17386 }
17387
17388 /* Like negate_data_op, but for Thumb-2. */
17389
17390 static unsigned int
17391 thumb32_negate_data_op (offsetT *instruction, unsigned int value)
17392 {
17393 int op, new_inst;
17394 int rd;
17395 unsigned int negated, inverted;
17396
17397 negated = encode_thumb32_immediate (-value);
17398 inverted = encode_thumb32_immediate (~value);
17399
17400 rd = (*instruction >> 8) & 0xf;
17401 op = (*instruction >> T2_DATA_OP_SHIFT) & 0xf;
17402 switch (op)
17403 {
17404 /* ADD <-> SUB. Includes CMP <-> CMN. */
17405 case T2_OPCODE_SUB:
17406 new_inst = T2_OPCODE_ADD;
17407 value = negated;
17408 break;
17409
17410 case T2_OPCODE_ADD:
17411 new_inst = T2_OPCODE_SUB;
17412 value = negated;
17413 break;
17414
17415 /* ORR <-> ORN. Includes MOV <-> MVN. */
17416 case T2_OPCODE_ORR:
17417 new_inst = T2_OPCODE_ORN;
17418 value = inverted;
17419 break;
17420
17421 case T2_OPCODE_ORN:
17422 new_inst = T2_OPCODE_ORR;
17423 value = inverted;
17424 break;
17425
17426 /* AND <-> BIC. TST has no inverted equivalent. */
17427 case T2_OPCODE_AND:
17428 new_inst = T2_OPCODE_BIC;
17429 if (rd == 15)
17430 value = FAIL;
17431 else
17432 value = inverted;
17433 break;
17434
17435 case T2_OPCODE_BIC:
17436 new_inst = T2_OPCODE_AND;
17437 value = inverted;
17438 break;
17439
17440 /* ADC <-> SBC */
17441 case T2_OPCODE_ADC:
17442 new_inst = T2_OPCODE_SBC;
17443 value = inverted;
17444 break;
17445
17446 case T2_OPCODE_SBC:
17447 new_inst = T2_OPCODE_ADC;
17448 value = inverted;
17449 break;
17450
17451 /* We cannot do anything. */
17452 default:
17453 return FAIL;
17454 }
17455
17456 if (value == (unsigned int)FAIL)
17457 return FAIL;
17458
17459 *instruction &= T2_OPCODE_MASK;
17460 *instruction |= new_inst << T2_DATA_OP_SHIFT;
17461 return value;
17462 }
17463
17464 /* Read a 32-bit thumb instruction from buf. */
17465 static unsigned long
17466 get_thumb32_insn (char * buf)
17467 {
17468 unsigned long insn;
17469 insn = md_chars_to_number (buf, THUMB_SIZE) << 16;
17470 insn |= md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
17471
17472 return insn;
17473 }
17474
17475
17476 /* We usually want to set the low bit on the address of thumb function
17477 symbols. In particular .word foo - . should have the low bit set.
17478 Generic code tries to fold the difference of two symbols to
17479 a constant. Prevent this and force a relocation when the first symbols
17480 is a thumb function. */
17481 int
17482 arm_optimize_expr (expressionS *l, operatorT op, expressionS *r)
17483 {
17484 if (op == O_subtract
17485 && l->X_op == O_symbol
17486 && r->X_op == O_symbol
17487 && THUMB_IS_FUNC (l->X_add_symbol))
17488 {
17489 l->X_op = O_subtract;
17490 l->X_op_symbol = r->X_add_symbol;
17491 l->X_add_number -= r->X_add_number;
17492 return 1;
17493 }
17494 /* Process as normal. */
17495 return 0;
17496 }
17497
17498 void
17499 md_apply_fix (fixS * fixP,
17500 valueT * valP,
17501 segT seg)
17502 {
17503 offsetT value = * valP;
17504 offsetT newval;
17505 unsigned int newimm;
17506 unsigned long temp;
17507 int sign;
17508 char * buf = fixP->fx_where + fixP->fx_frag->fr_literal;
17509
17510 assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
17511
17512 /* Note whether this will delete the relocation. */
17513
17514 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
17515 fixP->fx_done = 1;
17516
17517 /* On a 64-bit host, silently truncate 'value' to 32 bits for
17518 consistency with the behavior on 32-bit hosts. Remember value
17519 for emit_reloc. */
17520 value &= 0xffffffff;
17521 value ^= 0x80000000;
17522 value -= 0x80000000;
17523
17524 *valP = value;
17525 fixP->fx_addnumber = value;
17526
17527 /* Same treatment for fixP->fx_offset. */
17528 fixP->fx_offset &= 0xffffffff;
17529 fixP->fx_offset ^= 0x80000000;
17530 fixP->fx_offset -= 0x80000000;
17531
17532 switch (fixP->fx_r_type)
17533 {
17534 case BFD_RELOC_NONE:
17535 /* This will need to go in the object file. */
17536 fixP->fx_done = 0;
17537 break;
17538
17539 case BFD_RELOC_ARM_IMMEDIATE:
17540 /* We claim that this fixup has been processed here,
17541 even if in fact we generate an error because we do
17542 not have a reloc for it, so tc_gen_reloc will reject it. */
17543 fixP->fx_done = 1;
17544
17545 if (fixP->fx_addsy
17546 && ! S_IS_DEFINED (fixP->fx_addsy))
17547 {
17548 as_bad_where (fixP->fx_file, fixP->fx_line,
17549 _("undefined symbol %s used as an immediate value"),
17550 S_GET_NAME (fixP->fx_addsy));
17551 break;
17552 }
17553
17554 newimm = encode_arm_immediate (value);
17555 temp = md_chars_to_number (buf, INSN_SIZE);
17556
17557 /* If the instruction will fail, see if we can fix things up by
17558 changing the opcode. */
17559 if (newimm == (unsigned int) FAIL
17560 && (newimm = negate_data_op (&temp, value)) == (unsigned int) FAIL)
17561 {
17562 as_bad_where (fixP->fx_file, fixP->fx_line,
17563 _("invalid constant (%lx) after fixup"),
17564 (unsigned long) value);
17565 break;
17566 }
17567
17568 newimm |= (temp & 0xfffff000);
17569 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
17570 break;
17571
17572 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
17573 {
17574 unsigned int highpart = 0;
17575 unsigned int newinsn = 0xe1a00000; /* nop. */
17576
17577 newimm = encode_arm_immediate (value);
17578 temp = md_chars_to_number (buf, INSN_SIZE);
17579
17580 /* If the instruction will fail, see if we can fix things up by
17581 changing the opcode. */
17582 if (newimm == (unsigned int) FAIL
17583 && (newimm = negate_data_op (& temp, value)) == (unsigned int) FAIL)
17584 {
17585 /* No ? OK - try using two ADD instructions to generate
17586 the value. */
17587 newimm = validate_immediate_twopart (value, & highpart);
17588
17589 /* Yes - then make sure that the second instruction is
17590 also an add. */
17591 if (newimm != (unsigned int) FAIL)
17592 newinsn = temp;
17593 /* Still No ? Try using a negated value. */
17594 else if ((newimm = validate_immediate_twopart (- value, & highpart)) != (unsigned int) FAIL)
17595 temp = newinsn = (temp & OPCODE_MASK) | OPCODE_SUB << DATA_OP_SHIFT;
17596 /* Otherwise - give up. */
17597 else
17598 {
17599 as_bad_where (fixP->fx_file, fixP->fx_line,
17600 _("unable to compute ADRL instructions for PC offset of 0x%lx"),
17601 (long) value);
17602 break;
17603 }
17604
17605 /* Replace the first operand in the 2nd instruction (which
17606 is the PC) with the destination register. We have
17607 already added in the PC in the first instruction and we
17608 do not want to do it again. */
17609 newinsn &= ~ 0xf0000;
17610 newinsn |= ((newinsn & 0x0f000) << 4);
17611 }
17612
17613 newimm |= (temp & 0xfffff000);
17614 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
17615
17616 highpart |= (newinsn & 0xfffff000);
17617 md_number_to_chars (buf + INSN_SIZE, (valueT) highpart, INSN_SIZE);
17618 }
17619 break;
17620
17621 case BFD_RELOC_ARM_OFFSET_IMM:
17622 if (!fixP->fx_done && seg->use_rela_p)
17623 value = 0;
17624
17625 case BFD_RELOC_ARM_LITERAL:
17626 sign = value >= 0;
17627
17628 if (value < 0)
17629 value = - value;
17630
17631 if (validate_offset_imm (value, 0) == FAIL)
17632 {
17633 if (fixP->fx_r_type == BFD_RELOC_ARM_LITERAL)
17634 as_bad_where (fixP->fx_file, fixP->fx_line,
17635 _("invalid literal constant: pool needs to be closer"));
17636 else
17637 as_bad_where (fixP->fx_file, fixP->fx_line,
17638 _("bad immediate value for offset (%ld)"),
17639 (long) value);
17640 break;
17641 }
17642
17643 newval = md_chars_to_number (buf, INSN_SIZE);
17644 newval &= 0xff7ff000;
17645 newval |= value | (sign ? INDEX_UP : 0);
17646 md_number_to_chars (buf, newval, INSN_SIZE);
17647 break;
17648
17649 case BFD_RELOC_ARM_OFFSET_IMM8:
17650 case BFD_RELOC_ARM_HWLITERAL:
17651 sign = value >= 0;
17652
17653 if (value < 0)
17654 value = - value;
17655
17656 if (validate_offset_imm (value, 1) == FAIL)
17657 {
17658 if (fixP->fx_r_type == BFD_RELOC_ARM_HWLITERAL)
17659 as_bad_where (fixP->fx_file, fixP->fx_line,
17660 _("invalid literal constant: pool needs to be closer"));
17661 else
17662 as_bad (_("bad immediate value for half-word offset (%ld)"),
17663 (long) value);
17664 break;
17665 }
17666
17667 newval = md_chars_to_number (buf, INSN_SIZE);
17668 newval &= 0xff7ff0f0;
17669 newval |= ((value >> 4) << 8) | (value & 0xf) | (sign ? INDEX_UP : 0);
17670 md_number_to_chars (buf, newval, INSN_SIZE);
17671 break;
17672
17673 case BFD_RELOC_ARM_T32_OFFSET_U8:
17674 if (value < 0 || value > 1020 || value % 4 != 0)
17675 as_bad_where (fixP->fx_file, fixP->fx_line,
17676 _("bad immediate value for offset (%ld)"), (long) value);
17677 value /= 4;
17678
17679 newval = md_chars_to_number (buf+2, THUMB_SIZE);
17680 newval |= value;
17681 md_number_to_chars (buf+2, newval, THUMB_SIZE);
17682 break;
17683
17684 case BFD_RELOC_ARM_T32_OFFSET_IMM:
17685 /* This is a complicated relocation used for all varieties of Thumb32
17686 load/store instruction with immediate offset:
17687
17688 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
17689 *4, optional writeback(W)
17690 (doubleword load/store)
17691
17692 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
17693 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
17694 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
17695 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
17696 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
17697
17698 Uppercase letters indicate bits that are already encoded at
17699 this point. Lowercase letters are our problem. For the
17700 second block of instructions, the secondary opcode nybble
17701 (bits 8..11) is present, and bit 23 is zero, even if this is
17702 a PC-relative operation. */
17703 newval = md_chars_to_number (buf, THUMB_SIZE);
17704 newval <<= 16;
17705 newval |= md_chars_to_number (buf+THUMB_SIZE, THUMB_SIZE);
17706
17707 if ((newval & 0xf0000000) == 0xe0000000)
17708 {
17709 /* Doubleword load/store: 8-bit offset, scaled by 4. */
17710 if (value >= 0)
17711 newval |= (1 << 23);
17712 else
17713 value = -value;
17714 if (value % 4 != 0)
17715 {
17716 as_bad_where (fixP->fx_file, fixP->fx_line,
17717 _("offset not a multiple of 4"));
17718 break;
17719 }
17720 value /= 4;
17721 if (value > 0xff)
17722 {
17723 as_bad_where (fixP->fx_file, fixP->fx_line,
17724 _("offset out of range"));
17725 break;
17726 }
17727 newval &= ~0xff;
17728 }
17729 else if ((newval & 0x000f0000) == 0x000f0000)
17730 {
17731 /* PC-relative, 12-bit offset. */
17732 if (value >= 0)
17733 newval |= (1 << 23);
17734 else
17735 value = -value;
17736 if (value > 0xfff)
17737 {
17738 as_bad_where (fixP->fx_file, fixP->fx_line,
17739 _("offset out of range"));
17740 break;
17741 }
17742 newval &= ~0xfff;
17743 }
17744 else if ((newval & 0x00000100) == 0x00000100)
17745 {
17746 /* Writeback: 8-bit, +/- offset. */
17747 if (value >= 0)
17748 newval |= (1 << 9);
17749 else
17750 value = -value;
17751 if (value > 0xff)
17752 {
17753 as_bad_where (fixP->fx_file, fixP->fx_line,
17754 _("offset out of range"));
17755 break;
17756 }
17757 newval &= ~0xff;
17758 }
17759 else if ((newval & 0x00000f00) == 0x00000e00)
17760 {
17761 /* T-instruction: positive 8-bit offset. */
17762 if (value < 0 || value > 0xff)
17763 {
17764 as_bad_where (fixP->fx_file, fixP->fx_line,
17765 _("offset out of range"));
17766 break;
17767 }
17768 newval &= ~0xff;
17769 newval |= value;
17770 }
17771 else
17772 {
17773 /* Positive 12-bit or negative 8-bit offset. */
17774 int limit;
17775 if (value >= 0)
17776 {
17777 newval |= (1 << 23);
17778 limit = 0xfff;
17779 }
17780 else
17781 {
17782 value = -value;
17783 limit = 0xff;
17784 }
17785 if (value > limit)
17786 {
17787 as_bad_where (fixP->fx_file, fixP->fx_line,
17788 _("offset out of range"));
17789 break;
17790 }
17791 newval &= ~limit;
17792 }
17793
17794 newval |= value;
17795 md_number_to_chars (buf, (newval >> 16) & 0xffff, THUMB_SIZE);
17796 md_number_to_chars (buf + THUMB_SIZE, newval & 0xffff, THUMB_SIZE);
17797 break;
17798
17799 case BFD_RELOC_ARM_SHIFT_IMM:
17800 newval = md_chars_to_number (buf, INSN_SIZE);
17801 if (((unsigned long) value) > 32
17802 || (value == 32
17803 && (((newval & 0x60) == 0) || (newval & 0x60) == 0x60)))
17804 {
17805 as_bad_where (fixP->fx_file, fixP->fx_line,
17806 _("shift expression is too large"));
17807 break;
17808 }
17809
17810 if (value == 0)
17811 /* Shifts of zero must be done as lsl. */
17812 newval &= ~0x60;
17813 else if (value == 32)
17814 value = 0;
17815 newval &= 0xfffff07f;
17816 newval |= (value & 0x1f) << 7;
17817 md_number_to_chars (buf, newval, INSN_SIZE);
17818 break;
17819
17820 case BFD_RELOC_ARM_T32_IMMEDIATE:
17821 case BFD_RELOC_ARM_T32_ADD_IMM:
17822 case BFD_RELOC_ARM_T32_IMM12:
17823 case BFD_RELOC_ARM_T32_ADD_PC12:
17824 /* We claim that this fixup has been processed here,
17825 even if in fact we generate an error because we do
17826 not have a reloc for it, so tc_gen_reloc will reject it. */
17827 fixP->fx_done = 1;
17828
17829 if (fixP->fx_addsy
17830 && ! S_IS_DEFINED (fixP->fx_addsy))
17831 {
17832 as_bad_where (fixP->fx_file, fixP->fx_line,
17833 _("undefined symbol %s used as an immediate value"),
17834 S_GET_NAME (fixP->fx_addsy));
17835 break;
17836 }
17837
17838 newval = md_chars_to_number (buf, THUMB_SIZE);
17839 newval <<= 16;
17840 newval |= md_chars_to_number (buf+2, THUMB_SIZE);
17841
17842 newimm = FAIL;
17843 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
17844 || fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
17845 {
17846 newimm = encode_thumb32_immediate (value);
17847 if (newimm == (unsigned int) FAIL)
17848 newimm = thumb32_negate_data_op (&newval, value);
17849 }
17850 if (fixP->fx_r_type != BFD_RELOC_ARM_T32_IMMEDIATE
17851 && newimm == (unsigned int) FAIL)
17852 {
17853 /* Turn add/sum into addw/subw. */
17854 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
17855 newval = (newval & 0xfeffffff) | 0x02000000;
17856
17857 /* 12 bit immediate for addw/subw. */
17858 if (value < 0)
17859 {
17860 value = -value;
17861 newval ^= 0x00a00000;
17862 }
17863 if (value > 0xfff)
17864 newimm = (unsigned int) FAIL;
17865 else
17866 newimm = value;
17867 }
17868
17869 if (newimm == (unsigned int)FAIL)
17870 {
17871 as_bad_where (fixP->fx_file, fixP->fx_line,
17872 _("invalid constant (%lx) after fixup"),
17873 (unsigned long) value);
17874 break;
17875 }
17876
17877 newval |= (newimm & 0x800) << 15;
17878 newval |= (newimm & 0x700) << 4;
17879 newval |= (newimm & 0x0ff);
17880
17881 md_number_to_chars (buf, (valueT) ((newval >> 16) & 0xffff), THUMB_SIZE);
17882 md_number_to_chars (buf+2, (valueT) (newval & 0xffff), THUMB_SIZE);
17883 break;
17884
17885 case BFD_RELOC_ARM_SMC:
17886 if (((unsigned long) value) > 0xffff)
17887 as_bad_where (fixP->fx_file, fixP->fx_line,
17888 _("invalid smc expression"));
17889 newval = md_chars_to_number (buf, INSN_SIZE);
17890 newval |= (value & 0xf) | ((value & 0xfff0) << 4);
17891 md_number_to_chars (buf, newval, INSN_SIZE);
17892 break;
17893
17894 case BFD_RELOC_ARM_SWI:
17895 if (fixP->tc_fix_data != 0)
17896 {
17897 if (((unsigned long) value) > 0xff)
17898 as_bad_where (fixP->fx_file, fixP->fx_line,
17899 _("invalid swi expression"));
17900 newval = md_chars_to_number (buf, THUMB_SIZE);
17901 newval |= value;
17902 md_number_to_chars (buf, newval, THUMB_SIZE);
17903 }
17904 else
17905 {
17906 if (((unsigned long) value) > 0x00ffffff)
17907 as_bad_where (fixP->fx_file, fixP->fx_line,
17908 _("invalid swi expression"));
17909 newval = md_chars_to_number (buf, INSN_SIZE);
17910 newval |= value;
17911 md_number_to_chars (buf, newval, INSN_SIZE);
17912 }
17913 break;
17914
17915 case BFD_RELOC_ARM_MULTI:
17916 if (((unsigned long) value) > 0xffff)
17917 as_bad_where (fixP->fx_file, fixP->fx_line,
17918 _("invalid expression in load/store multiple"));
17919 newval = value | md_chars_to_number (buf, INSN_SIZE);
17920 md_number_to_chars (buf, newval, INSN_SIZE);
17921 break;
17922
17923 #ifdef OBJ_ELF
17924 case BFD_RELOC_ARM_PCREL_CALL:
17925 newval = md_chars_to_number (buf, INSN_SIZE);
17926 if ((newval & 0xf0000000) == 0xf0000000)
17927 temp = 1;
17928 else
17929 temp = 3;
17930 goto arm_branch_common;
17931
17932 case BFD_RELOC_ARM_PCREL_JUMP:
17933 case BFD_RELOC_ARM_PLT32:
17934 #endif
17935 case BFD_RELOC_ARM_PCREL_BRANCH:
17936 temp = 3;
17937 goto arm_branch_common;
17938
17939 case BFD_RELOC_ARM_PCREL_BLX:
17940 temp = 1;
17941 arm_branch_common:
17942 /* We are going to store value (shifted right by two) in the
17943 instruction, in a 24 bit, signed field. Bits 26 through 32 either
17944 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
17945 also be be clear. */
17946 if (value & temp)
17947 as_bad_where (fixP->fx_file, fixP->fx_line,
17948 _("misaligned branch destination"));
17949 if ((value & (offsetT)0xfe000000) != (offsetT)0
17950 && (value & (offsetT)0xfe000000) != (offsetT)0xfe000000)
17951 as_bad_where (fixP->fx_file, fixP->fx_line,
17952 _("branch out of range"));
17953
17954 if (fixP->fx_done || !seg->use_rela_p)
17955 {
17956 newval = md_chars_to_number (buf, INSN_SIZE);
17957 newval |= (value >> 2) & 0x00ffffff;
17958 /* Set the H bit on BLX instructions. */
17959 if (temp == 1)
17960 {
17961 if (value & 2)
17962 newval |= 0x01000000;
17963 else
17964 newval &= ~0x01000000;
17965 }
17966 md_number_to_chars (buf, newval, INSN_SIZE);
17967 }
17968 break;
17969
17970 case BFD_RELOC_THUMB_PCREL_BRANCH7: /* CZB */
17971 /* CZB can only branch forward. */
17972 if (value & ~0x7e)
17973 as_bad_where (fixP->fx_file, fixP->fx_line,
17974 _("branch out of range"));
17975
17976 if (fixP->fx_done || !seg->use_rela_p)
17977 {
17978 newval = md_chars_to_number (buf, THUMB_SIZE);
17979 newval |= ((value & 0x3e) << 2) | ((value & 0x40) << 3);
17980 md_number_to_chars (buf, newval, THUMB_SIZE);
17981 }
17982 break;
17983
17984 case BFD_RELOC_THUMB_PCREL_BRANCH9: /* Conditional branch. */
17985 if ((value & ~0xff) && ((value & ~0xff) != ~0xff))
17986 as_bad_where (fixP->fx_file, fixP->fx_line,
17987 _("branch out of range"));
17988
17989 if (fixP->fx_done || !seg->use_rela_p)
17990 {
17991 newval = md_chars_to_number (buf, THUMB_SIZE);
17992 newval |= (value & 0x1ff) >> 1;
17993 md_number_to_chars (buf, newval, THUMB_SIZE);
17994 }
17995 break;
17996
17997 case BFD_RELOC_THUMB_PCREL_BRANCH12: /* Unconditional branch. */
17998 if ((value & ~0x7ff) && ((value & ~0x7ff) != ~0x7ff))
17999 as_bad_where (fixP->fx_file, fixP->fx_line,
18000 _("branch out of range"));
18001
18002 if (fixP->fx_done || !seg->use_rela_p)
18003 {
18004 newval = md_chars_to_number (buf, THUMB_SIZE);
18005 newval |= (value & 0xfff) >> 1;
18006 md_number_to_chars (buf, newval, THUMB_SIZE);
18007 }
18008 break;
18009
18010 case BFD_RELOC_THUMB_PCREL_BRANCH20:
18011 if ((value & ~0x1fffff) && ((value & ~0x1fffff) != ~0x1fffff))
18012 as_bad_where (fixP->fx_file, fixP->fx_line,
18013 _("conditional branch out of range"));
18014
18015 if (fixP->fx_done || !seg->use_rela_p)
18016 {
18017 offsetT newval2;
18018 addressT S, J1, J2, lo, hi;
18019
18020 S = (value & 0x00100000) >> 20;
18021 J2 = (value & 0x00080000) >> 19;
18022 J1 = (value & 0x00040000) >> 18;
18023 hi = (value & 0x0003f000) >> 12;
18024 lo = (value & 0x00000ffe) >> 1;
18025
18026 newval = md_chars_to_number (buf, THUMB_SIZE);
18027 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
18028 newval |= (S << 10) | hi;
18029 newval2 |= (J1 << 13) | (J2 << 11) | lo;
18030 md_number_to_chars (buf, newval, THUMB_SIZE);
18031 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
18032 }
18033 break;
18034
18035 case BFD_RELOC_THUMB_PCREL_BLX:
18036 case BFD_RELOC_THUMB_PCREL_BRANCH23:
18037 if ((value & ~0x3fffff) && ((value & ~0x3fffff) != ~0x3fffff))
18038 as_bad_where (fixP->fx_file, fixP->fx_line,
18039 _("branch out of range"));
18040
18041 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
18042 /* For a BLX instruction, make sure that the relocation is rounded up
18043 to a word boundary. This follows the semantics of the instruction
18044 which specifies that bit 1 of the target address will come from bit
18045 1 of the base address. */
18046 value = (value + 1) & ~ 1;
18047
18048 if (fixP->fx_done || !seg->use_rela_p)
18049 {
18050 offsetT newval2;
18051
18052 newval = md_chars_to_number (buf, THUMB_SIZE);
18053 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
18054 newval |= (value & 0x7fffff) >> 12;
18055 newval2 |= (value & 0xfff) >> 1;
18056 md_number_to_chars (buf, newval, THUMB_SIZE);
18057 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
18058 }
18059 break;
18060
18061 case BFD_RELOC_THUMB_PCREL_BRANCH25:
18062 if ((value & ~0x1ffffff) && ((value & ~0x1ffffff) != ~0x1ffffff))
18063 as_bad_where (fixP->fx_file, fixP->fx_line,
18064 _("branch out of range"));
18065
18066 if (fixP->fx_done || !seg->use_rela_p)
18067 {
18068 offsetT newval2;
18069 addressT S, I1, I2, lo, hi;
18070
18071 S = (value & 0x01000000) >> 24;
18072 I1 = (value & 0x00800000) >> 23;
18073 I2 = (value & 0x00400000) >> 22;
18074 hi = (value & 0x003ff000) >> 12;
18075 lo = (value & 0x00000ffe) >> 1;
18076
18077 I1 = !(I1 ^ S);
18078 I2 = !(I2 ^ S);
18079
18080 newval = md_chars_to_number (buf, THUMB_SIZE);
18081 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
18082 newval |= (S << 10) | hi;
18083 newval2 |= (I1 << 13) | (I2 << 11) | lo;
18084 md_number_to_chars (buf, newval, THUMB_SIZE);
18085 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
18086 }
18087 break;
18088
18089 case BFD_RELOC_8:
18090 if (fixP->fx_done || !seg->use_rela_p)
18091 md_number_to_chars (buf, value, 1);
18092 break;
18093
18094 case BFD_RELOC_16:
18095 if (fixP->fx_done || !seg->use_rela_p)
18096 md_number_to_chars (buf, value, 2);
18097 break;
18098
18099 #ifdef OBJ_ELF
18100 case BFD_RELOC_ARM_TLS_GD32:
18101 case BFD_RELOC_ARM_TLS_LE32:
18102 case BFD_RELOC_ARM_TLS_IE32:
18103 case BFD_RELOC_ARM_TLS_LDM32:
18104 case BFD_RELOC_ARM_TLS_LDO32:
18105 S_SET_THREAD_LOCAL (fixP->fx_addsy);
18106 /* fall through */
18107
18108 case BFD_RELOC_ARM_GOT32:
18109 case BFD_RELOC_ARM_GOTOFF:
18110 case BFD_RELOC_ARM_TARGET2:
18111 if (fixP->fx_done || !seg->use_rela_p)
18112 md_number_to_chars (buf, 0, 4);
18113 break;
18114 #endif
18115
18116 case BFD_RELOC_RVA:
18117 case BFD_RELOC_32:
18118 case BFD_RELOC_ARM_TARGET1:
18119 case BFD_RELOC_ARM_ROSEGREL32:
18120 case BFD_RELOC_ARM_SBREL32:
18121 case BFD_RELOC_32_PCREL:
18122 #ifdef TE_PE
18123 case BFD_RELOC_32_SECREL:
18124 #endif
18125 if (fixP->fx_done || !seg->use_rela_p)
18126 #ifdef TE_WINCE
18127 /* For WinCE we only do this for pcrel fixups. */
18128 if (fixP->fx_done || fixP->fx_pcrel)
18129 #endif
18130 md_number_to_chars (buf, value, 4);
18131 break;
18132
18133 #ifdef OBJ_ELF
18134 case BFD_RELOC_ARM_PREL31:
18135 if (fixP->fx_done || !seg->use_rela_p)
18136 {
18137 newval = md_chars_to_number (buf, 4) & 0x80000000;
18138 if ((value ^ (value >> 1)) & 0x40000000)
18139 {
18140 as_bad_where (fixP->fx_file, fixP->fx_line,
18141 _("rel31 relocation overflow"));
18142 }
18143 newval |= value & 0x7fffffff;
18144 md_number_to_chars (buf, newval, 4);
18145 }
18146 break;
18147 #endif
18148
18149 case BFD_RELOC_ARM_CP_OFF_IMM:
18150 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
18151 if (value < -1023 || value > 1023 || (value & 3))
18152 as_bad_where (fixP->fx_file, fixP->fx_line,
18153 _("co-processor offset out of range"));
18154 cp_off_common:
18155 sign = value >= 0;
18156 if (value < 0)
18157 value = -value;
18158 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
18159 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
18160 newval = md_chars_to_number (buf, INSN_SIZE);
18161 else
18162 newval = get_thumb32_insn (buf);
18163 newval &= 0xff7fff00;
18164 newval |= (value >> 2) | (sign ? INDEX_UP : 0);
18165 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
18166 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
18167 md_number_to_chars (buf, newval, INSN_SIZE);
18168 else
18169 put_thumb32_insn (buf, newval);
18170 break;
18171
18172 case BFD_RELOC_ARM_CP_OFF_IMM_S2:
18173 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2:
18174 if (value < -255 || value > 255)
18175 as_bad_where (fixP->fx_file, fixP->fx_line,
18176 _("co-processor offset out of range"));
18177 value *= 4;
18178 goto cp_off_common;
18179
18180 case BFD_RELOC_ARM_THUMB_OFFSET:
18181 newval = md_chars_to_number (buf, THUMB_SIZE);
18182 /* Exactly what ranges, and where the offset is inserted depends
18183 on the type of instruction, we can establish this from the
18184 top 4 bits. */
18185 switch (newval >> 12)
18186 {
18187 case 4: /* PC load. */
18188 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
18189 forced to zero for these loads; md_pcrel_from has already
18190 compensated for this. */
18191 if (value & 3)
18192 as_bad_where (fixP->fx_file, fixP->fx_line,
18193 _("invalid offset, target not word aligned (0x%08lX)"),
18194 (((unsigned long) fixP->fx_frag->fr_address
18195 + (unsigned long) fixP->fx_where) & ~3)
18196 + (unsigned long) value);
18197
18198 if (value & ~0x3fc)
18199 as_bad_where (fixP->fx_file, fixP->fx_line,
18200 _("invalid offset, value too big (0x%08lX)"),
18201 (long) value);
18202
18203 newval |= value >> 2;
18204 break;
18205
18206 case 9: /* SP load/store. */
18207 if (value & ~0x3fc)
18208 as_bad_where (fixP->fx_file, fixP->fx_line,
18209 _("invalid offset, value too big (0x%08lX)"),
18210 (long) value);
18211 newval |= value >> 2;
18212 break;
18213
18214 case 6: /* Word load/store. */
18215 if (value & ~0x7c)
18216 as_bad_where (fixP->fx_file, fixP->fx_line,
18217 _("invalid offset, value too big (0x%08lX)"),
18218 (long) value);
18219 newval |= value << 4; /* 6 - 2. */
18220 break;
18221
18222 case 7: /* Byte load/store. */
18223 if (value & ~0x1f)
18224 as_bad_where (fixP->fx_file, fixP->fx_line,
18225 _("invalid offset, value too big (0x%08lX)"),
18226 (long) value);
18227 newval |= value << 6;
18228 break;
18229
18230 case 8: /* Halfword load/store. */
18231 if (value & ~0x3e)
18232 as_bad_where (fixP->fx_file, fixP->fx_line,
18233 _("invalid offset, value too big (0x%08lX)"),
18234 (long) value);
18235 newval |= value << 5; /* 6 - 1. */
18236 break;
18237
18238 default:
18239 as_bad_where (fixP->fx_file, fixP->fx_line,
18240 "Unable to process relocation for thumb opcode: %lx",
18241 (unsigned long) newval);
18242 break;
18243 }
18244 md_number_to_chars (buf, newval, THUMB_SIZE);
18245 break;
18246
18247 case BFD_RELOC_ARM_THUMB_ADD:
18248 /* This is a complicated relocation, since we use it for all of
18249 the following immediate relocations:
18250
18251 3bit ADD/SUB
18252 8bit ADD/SUB
18253 9bit ADD/SUB SP word-aligned
18254 10bit ADD PC/SP word-aligned
18255
18256 The type of instruction being processed is encoded in the
18257 instruction field:
18258
18259 0x8000 SUB
18260 0x00F0 Rd
18261 0x000F Rs
18262 */
18263 newval = md_chars_to_number (buf, THUMB_SIZE);
18264 {
18265 int rd = (newval >> 4) & 0xf;
18266 int rs = newval & 0xf;
18267 int subtract = !!(newval & 0x8000);
18268
18269 /* Check for HI regs, only very restricted cases allowed:
18270 Adjusting SP, and using PC or SP to get an address. */
18271 if ((rd > 7 && (rd != REG_SP || rs != REG_SP))
18272 || (rs > 7 && rs != REG_SP && rs != REG_PC))
18273 as_bad_where (fixP->fx_file, fixP->fx_line,
18274 _("invalid Hi register with immediate"));
18275
18276 /* If value is negative, choose the opposite instruction. */
18277 if (value < 0)
18278 {
18279 value = -value;
18280 subtract = !subtract;
18281 if (value < 0)
18282 as_bad_where (fixP->fx_file, fixP->fx_line,
18283 _("immediate value out of range"));
18284 }
18285
18286 if (rd == REG_SP)
18287 {
18288 if (value & ~0x1fc)
18289 as_bad_where (fixP->fx_file, fixP->fx_line,
18290 _("invalid immediate for stack address calculation"));
18291 newval = subtract ? T_OPCODE_SUB_ST : T_OPCODE_ADD_ST;
18292 newval |= value >> 2;
18293 }
18294 else if (rs == REG_PC || rs == REG_SP)
18295 {
18296 if (subtract || value & ~0x3fc)
18297 as_bad_where (fixP->fx_file, fixP->fx_line,
18298 _("invalid immediate for address calculation (value = 0x%08lX)"),
18299 (unsigned long) value);
18300 newval = (rs == REG_PC ? T_OPCODE_ADD_PC : T_OPCODE_ADD_SP);
18301 newval |= rd << 8;
18302 newval |= value >> 2;
18303 }
18304 else if (rs == rd)
18305 {
18306 if (value & ~0xff)
18307 as_bad_where (fixP->fx_file, fixP->fx_line,
18308 _("immediate value out of range"));
18309 newval = subtract ? T_OPCODE_SUB_I8 : T_OPCODE_ADD_I8;
18310 newval |= (rd << 8) | value;
18311 }
18312 else
18313 {
18314 if (value & ~0x7)
18315 as_bad_where (fixP->fx_file, fixP->fx_line,
18316 _("immediate value out of range"));
18317 newval = subtract ? T_OPCODE_SUB_I3 : T_OPCODE_ADD_I3;
18318 newval |= rd | (rs << 3) | (value << 6);
18319 }
18320 }
18321 md_number_to_chars (buf, newval, THUMB_SIZE);
18322 break;
18323
18324 case BFD_RELOC_ARM_THUMB_IMM:
18325 newval = md_chars_to_number (buf, THUMB_SIZE);
18326 if (value < 0 || value > 255)
18327 as_bad_where (fixP->fx_file, fixP->fx_line,
18328 _("invalid immediate: %ld is too large"),
18329 (long) value);
18330 newval |= value;
18331 md_number_to_chars (buf, newval, THUMB_SIZE);
18332 break;
18333
18334 case BFD_RELOC_ARM_THUMB_SHIFT:
18335 /* 5bit shift value (0..32). LSL cannot take 32. */
18336 newval = md_chars_to_number (buf, THUMB_SIZE) & 0xf83f;
18337 temp = newval & 0xf800;
18338 if (value < 0 || value > 32 || (value == 32 && temp == T_OPCODE_LSL_I))
18339 as_bad_where (fixP->fx_file, fixP->fx_line,
18340 _("invalid shift value: %ld"), (long) value);
18341 /* Shifts of zero must be encoded as LSL. */
18342 if (value == 0)
18343 newval = (newval & 0x003f) | T_OPCODE_LSL_I;
18344 /* Shifts of 32 are encoded as zero. */
18345 else if (value == 32)
18346 value = 0;
18347 newval |= value << 6;
18348 md_number_to_chars (buf, newval, THUMB_SIZE);
18349 break;
18350
18351 case BFD_RELOC_VTABLE_INHERIT:
18352 case BFD_RELOC_VTABLE_ENTRY:
18353 fixP->fx_done = 0;
18354 return;
18355
18356 case BFD_RELOC_ARM_MOVW:
18357 case BFD_RELOC_ARM_MOVT:
18358 case BFD_RELOC_ARM_THUMB_MOVW:
18359 case BFD_RELOC_ARM_THUMB_MOVT:
18360 if (fixP->fx_done || !seg->use_rela_p)
18361 {
18362 /* REL format relocations are limited to a 16-bit addend. */
18363 if (!fixP->fx_done)
18364 {
18365 if (value < -0x1000 || value > 0xffff)
18366 as_bad_where (fixP->fx_file, fixP->fx_line,
18367 _("offset too big"));
18368 }
18369 else if (fixP->fx_r_type == BFD_RELOC_ARM_MOVT
18370 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
18371 {
18372 value >>= 16;
18373 }
18374
18375 if (fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
18376 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
18377 {
18378 newval = get_thumb32_insn (buf);
18379 newval &= 0xfbf08f00;
18380 newval |= (value & 0xf000) << 4;
18381 newval |= (value & 0x0800) << 15;
18382 newval |= (value & 0x0700) << 4;
18383 newval |= (value & 0x00ff);
18384 put_thumb32_insn (buf, newval);
18385 }
18386 else
18387 {
18388 newval = md_chars_to_number (buf, 4);
18389 newval &= 0xfff0f000;
18390 newval |= value & 0x0fff;
18391 newval |= (value & 0xf000) << 4;
18392 md_number_to_chars (buf, newval, 4);
18393 }
18394 }
18395 return;
18396
18397 case BFD_RELOC_ARM_ALU_PC_G0_NC:
18398 case BFD_RELOC_ARM_ALU_PC_G0:
18399 case BFD_RELOC_ARM_ALU_PC_G1_NC:
18400 case BFD_RELOC_ARM_ALU_PC_G1:
18401 case BFD_RELOC_ARM_ALU_PC_G2:
18402 case BFD_RELOC_ARM_ALU_SB_G0_NC:
18403 case BFD_RELOC_ARM_ALU_SB_G0:
18404 case BFD_RELOC_ARM_ALU_SB_G1_NC:
18405 case BFD_RELOC_ARM_ALU_SB_G1:
18406 case BFD_RELOC_ARM_ALU_SB_G2:
18407 assert (!fixP->fx_done);
18408 if (!seg->use_rela_p)
18409 {
18410 bfd_vma insn;
18411 bfd_vma encoded_addend;
18412 bfd_vma addend_abs = abs (value);
18413
18414 /* Check that the absolute value of the addend can be
18415 expressed as an 8-bit constant plus a rotation. */
18416 encoded_addend = encode_arm_immediate (addend_abs);
18417 if (encoded_addend == (unsigned int) FAIL)
18418 as_bad_where (fixP->fx_file, fixP->fx_line,
18419 _("the offset 0x%08lX is not representable"),
18420 addend_abs);
18421
18422 /* Extract the instruction. */
18423 insn = md_chars_to_number (buf, INSN_SIZE);
18424
18425 /* If the addend is positive, use an ADD instruction.
18426 Otherwise use a SUB. Take care not to destroy the S bit. */
18427 insn &= 0xff1fffff;
18428 if (value < 0)
18429 insn |= 1 << 22;
18430 else
18431 insn |= 1 << 23;
18432
18433 /* Place the encoded addend into the first 12 bits of the
18434 instruction. */
18435 insn &= 0xfffff000;
18436 insn |= encoded_addend;
18437
18438 /* Update the instruction. */
18439 md_number_to_chars (buf, insn, INSN_SIZE);
18440 }
18441 break;
18442
18443 case BFD_RELOC_ARM_LDR_PC_G0:
18444 case BFD_RELOC_ARM_LDR_PC_G1:
18445 case BFD_RELOC_ARM_LDR_PC_G2:
18446 case BFD_RELOC_ARM_LDR_SB_G0:
18447 case BFD_RELOC_ARM_LDR_SB_G1:
18448 case BFD_RELOC_ARM_LDR_SB_G2:
18449 assert (!fixP->fx_done);
18450 if (!seg->use_rela_p)
18451 {
18452 bfd_vma insn;
18453 bfd_vma addend_abs = abs (value);
18454
18455 /* Check that the absolute value of the addend can be
18456 encoded in 12 bits. */
18457 if (addend_abs >= 0x1000)
18458 as_bad_where (fixP->fx_file, fixP->fx_line,
18459 _("bad offset 0x%08lX (only 12 bits available for the magnitude)"),
18460 addend_abs);
18461
18462 /* Extract the instruction. */
18463 insn = md_chars_to_number (buf, INSN_SIZE);
18464
18465 /* If the addend is negative, clear bit 23 of the instruction.
18466 Otherwise set it. */
18467 if (value < 0)
18468 insn &= ~(1 << 23);
18469 else
18470 insn |= 1 << 23;
18471
18472 /* Place the absolute value of the addend into the first 12 bits
18473 of the instruction. */
18474 insn &= 0xfffff000;
18475 insn |= addend_abs;
18476
18477 /* Update the instruction. */
18478 md_number_to_chars (buf, insn, INSN_SIZE);
18479 }
18480 break;
18481
18482 case BFD_RELOC_ARM_LDRS_PC_G0:
18483 case BFD_RELOC_ARM_LDRS_PC_G1:
18484 case BFD_RELOC_ARM_LDRS_PC_G2:
18485 case BFD_RELOC_ARM_LDRS_SB_G0:
18486 case BFD_RELOC_ARM_LDRS_SB_G1:
18487 case BFD_RELOC_ARM_LDRS_SB_G2:
18488 assert (!fixP->fx_done);
18489 if (!seg->use_rela_p)
18490 {
18491 bfd_vma insn;
18492 bfd_vma addend_abs = abs (value);
18493
18494 /* Check that the absolute value of the addend can be
18495 encoded in 8 bits. */
18496 if (addend_abs >= 0x100)
18497 as_bad_where (fixP->fx_file, fixP->fx_line,
18498 _("bad offset 0x%08lX (only 8 bits available for the magnitude)"),
18499 addend_abs);
18500
18501 /* Extract the instruction. */
18502 insn = md_chars_to_number (buf, INSN_SIZE);
18503
18504 /* If the addend is negative, clear bit 23 of the instruction.
18505 Otherwise set it. */
18506 if (value < 0)
18507 insn &= ~(1 << 23);
18508 else
18509 insn |= 1 << 23;
18510
18511 /* Place the first four bits of the absolute value of the addend
18512 into the first 4 bits of the instruction, and the remaining
18513 four into bits 8 .. 11. */
18514 insn &= 0xfffff0f0;
18515 insn |= (addend_abs & 0xf) | ((addend_abs & 0xf0) << 4);
18516
18517 /* Update the instruction. */
18518 md_number_to_chars (buf, insn, INSN_SIZE);
18519 }
18520 break;
18521
18522 case BFD_RELOC_ARM_LDC_PC_G0:
18523 case BFD_RELOC_ARM_LDC_PC_G1:
18524 case BFD_RELOC_ARM_LDC_PC_G2:
18525 case BFD_RELOC_ARM_LDC_SB_G0:
18526 case BFD_RELOC_ARM_LDC_SB_G1:
18527 case BFD_RELOC_ARM_LDC_SB_G2:
18528 assert (!fixP->fx_done);
18529 if (!seg->use_rela_p)
18530 {
18531 bfd_vma insn;
18532 bfd_vma addend_abs = abs (value);
18533
18534 /* Check that the absolute value of the addend is a multiple of
18535 four and, when divided by four, fits in 8 bits. */
18536 if (addend_abs & 0x3)
18537 as_bad_where (fixP->fx_file, fixP->fx_line,
18538 _("bad offset 0x%08lX (must be word-aligned)"),
18539 addend_abs);
18540
18541 if ((addend_abs >> 2) > 0xff)
18542 as_bad_where (fixP->fx_file, fixP->fx_line,
18543 _("bad offset 0x%08lX (must be an 8-bit number of words)"),
18544 addend_abs);
18545
18546 /* Extract the instruction. */
18547 insn = md_chars_to_number (buf, INSN_SIZE);
18548
18549 /* If the addend is negative, clear bit 23 of the instruction.
18550 Otherwise set it. */
18551 if (value < 0)
18552 insn &= ~(1 << 23);
18553 else
18554 insn |= 1 << 23;
18555
18556 /* Place the addend (divided by four) into the first eight
18557 bits of the instruction. */
18558 insn &= 0xfffffff0;
18559 insn |= addend_abs >> 2;
18560
18561 /* Update the instruction. */
18562 md_number_to_chars (buf, insn, INSN_SIZE);
18563 }
18564 break;
18565
18566 case BFD_RELOC_UNUSED:
18567 default:
18568 as_bad_where (fixP->fx_file, fixP->fx_line,
18569 _("bad relocation fixup type (%d)"), fixP->fx_r_type);
18570 }
18571 }
18572
18573 /* Translate internal representation of relocation info to BFD target
18574 format. */
18575
18576 arelent *
18577 tc_gen_reloc (asection *section, fixS *fixp)
18578 {
18579 arelent * reloc;
18580 bfd_reloc_code_real_type code;
18581
18582 reloc = xmalloc (sizeof (arelent));
18583
18584 reloc->sym_ptr_ptr = xmalloc (sizeof (asymbol *));
18585 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
18586 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
18587
18588 if (fixp->fx_pcrel)
18589 {
18590 if (section->use_rela_p)
18591 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
18592 else
18593 fixp->fx_offset = reloc->address;
18594 }
18595 reloc->addend = fixp->fx_offset;
18596
18597 switch (fixp->fx_r_type)
18598 {
18599 case BFD_RELOC_8:
18600 if (fixp->fx_pcrel)
18601 {
18602 code = BFD_RELOC_8_PCREL;
18603 break;
18604 }
18605
18606 case BFD_RELOC_16:
18607 if (fixp->fx_pcrel)
18608 {
18609 code = BFD_RELOC_16_PCREL;
18610 break;
18611 }
18612
18613 case BFD_RELOC_32:
18614 if (fixp->fx_pcrel)
18615 {
18616 code = BFD_RELOC_32_PCREL;
18617 break;
18618 }
18619
18620 case BFD_RELOC_ARM_MOVW:
18621 if (fixp->fx_pcrel)
18622 {
18623 code = BFD_RELOC_ARM_MOVW_PCREL;
18624 break;
18625 }
18626
18627 case BFD_RELOC_ARM_MOVT:
18628 if (fixp->fx_pcrel)
18629 {
18630 code = BFD_RELOC_ARM_MOVT_PCREL;
18631 break;
18632 }
18633
18634 case BFD_RELOC_ARM_THUMB_MOVW:
18635 if (fixp->fx_pcrel)
18636 {
18637 code = BFD_RELOC_ARM_THUMB_MOVW_PCREL;
18638 break;
18639 }
18640
18641 case BFD_RELOC_ARM_THUMB_MOVT:
18642 if (fixp->fx_pcrel)
18643 {
18644 code = BFD_RELOC_ARM_THUMB_MOVT_PCREL;
18645 break;
18646 }
18647
18648 case BFD_RELOC_NONE:
18649 case BFD_RELOC_ARM_PCREL_BRANCH:
18650 case BFD_RELOC_ARM_PCREL_BLX:
18651 case BFD_RELOC_RVA:
18652 case BFD_RELOC_THUMB_PCREL_BRANCH7:
18653 case BFD_RELOC_THUMB_PCREL_BRANCH9:
18654 case BFD_RELOC_THUMB_PCREL_BRANCH12:
18655 case BFD_RELOC_THUMB_PCREL_BRANCH20:
18656 case BFD_RELOC_THUMB_PCREL_BRANCH23:
18657 case BFD_RELOC_THUMB_PCREL_BRANCH25:
18658 case BFD_RELOC_THUMB_PCREL_BLX:
18659 case BFD_RELOC_VTABLE_ENTRY:
18660 case BFD_RELOC_VTABLE_INHERIT:
18661 #ifdef TE_PE
18662 case BFD_RELOC_32_SECREL:
18663 #endif
18664 code = fixp->fx_r_type;
18665 break;
18666
18667 case BFD_RELOC_ARM_LITERAL:
18668 case BFD_RELOC_ARM_HWLITERAL:
18669 /* If this is called then the a literal has
18670 been referenced across a section boundary. */
18671 as_bad_where (fixp->fx_file, fixp->fx_line,
18672 _("literal referenced across section boundary"));
18673 return NULL;
18674
18675 #ifdef OBJ_ELF
18676 case BFD_RELOC_ARM_GOT32:
18677 case BFD_RELOC_ARM_GOTOFF:
18678 case BFD_RELOC_ARM_PLT32:
18679 case BFD_RELOC_ARM_TARGET1:
18680 case BFD_RELOC_ARM_ROSEGREL32:
18681 case BFD_RELOC_ARM_SBREL32:
18682 case BFD_RELOC_ARM_PREL31:
18683 case BFD_RELOC_ARM_TARGET2:
18684 case BFD_RELOC_ARM_TLS_LE32:
18685 case BFD_RELOC_ARM_TLS_LDO32:
18686 case BFD_RELOC_ARM_PCREL_CALL:
18687 case BFD_RELOC_ARM_PCREL_JUMP:
18688 case BFD_RELOC_ARM_ALU_PC_G0_NC:
18689 case BFD_RELOC_ARM_ALU_PC_G0:
18690 case BFD_RELOC_ARM_ALU_PC_G1_NC:
18691 case BFD_RELOC_ARM_ALU_PC_G1:
18692 case BFD_RELOC_ARM_ALU_PC_G2:
18693 case BFD_RELOC_ARM_LDR_PC_G0:
18694 case BFD_RELOC_ARM_LDR_PC_G1:
18695 case BFD_RELOC_ARM_LDR_PC_G2:
18696 case BFD_RELOC_ARM_LDRS_PC_G0:
18697 case BFD_RELOC_ARM_LDRS_PC_G1:
18698 case BFD_RELOC_ARM_LDRS_PC_G2:
18699 case BFD_RELOC_ARM_LDC_PC_G0:
18700 case BFD_RELOC_ARM_LDC_PC_G1:
18701 case BFD_RELOC_ARM_LDC_PC_G2:
18702 case BFD_RELOC_ARM_ALU_SB_G0_NC:
18703 case BFD_RELOC_ARM_ALU_SB_G0:
18704 case BFD_RELOC_ARM_ALU_SB_G1_NC:
18705 case BFD_RELOC_ARM_ALU_SB_G1:
18706 case BFD_RELOC_ARM_ALU_SB_G2:
18707 case BFD_RELOC_ARM_LDR_SB_G0:
18708 case BFD_RELOC_ARM_LDR_SB_G1:
18709 case BFD_RELOC_ARM_LDR_SB_G2:
18710 case BFD_RELOC_ARM_LDRS_SB_G0:
18711 case BFD_RELOC_ARM_LDRS_SB_G1:
18712 case BFD_RELOC_ARM_LDRS_SB_G2:
18713 case BFD_RELOC_ARM_LDC_SB_G0:
18714 case BFD_RELOC_ARM_LDC_SB_G1:
18715 case BFD_RELOC_ARM_LDC_SB_G2:
18716 code = fixp->fx_r_type;
18717 break;
18718
18719 case BFD_RELOC_ARM_TLS_GD32:
18720 case BFD_RELOC_ARM_TLS_IE32:
18721 case BFD_RELOC_ARM_TLS_LDM32:
18722 /* BFD will include the symbol's address in the addend.
18723 But we don't want that, so subtract it out again here. */
18724 if (!S_IS_COMMON (fixp->fx_addsy))
18725 reloc->addend -= (*reloc->sym_ptr_ptr)->value;
18726 code = fixp->fx_r_type;
18727 break;
18728 #endif
18729
18730 case BFD_RELOC_ARM_IMMEDIATE:
18731 as_bad_where (fixp->fx_file, fixp->fx_line,
18732 _("internal relocation (type: IMMEDIATE) not fixed up"));
18733 return NULL;
18734
18735 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
18736 as_bad_where (fixp->fx_file, fixp->fx_line,
18737 _("ADRL used for a symbol not defined in the same file"));
18738 return NULL;
18739
18740 case BFD_RELOC_ARM_OFFSET_IMM:
18741 if (section->use_rela_p)
18742 {
18743 code = fixp->fx_r_type;
18744 break;
18745 }
18746
18747 if (fixp->fx_addsy != NULL
18748 && !S_IS_DEFINED (fixp->fx_addsy)
18749 && S_IS_LOCAL (fixp->fx_addsy))
18750 {
18751 as_bad_where (fixp->fx_file, fixp->fx_line,
18752 _("undefined local label `%s'"),
18753 S_GET_NAME (fixp->fx_addsy));
18754 return NULL;
18755 }
18756
18757 as_bad_where (fixp->fx_file, fixp->fx_line,
18758 _("internal_relocation (type: OFFSET_IMM) not fixed up"));
18759 return NULL;
18760
18761 default:
18762 {
18763 char * type;
18764
18765 switch (fixp->fx_r_type)
18766 {
18767 case BFD_RELOC_NONE: type = "NONE"; break;
18768 case BFD_RELOC_ARM_OFFSET_IMM8: type = "OFFSET_IMM8"; break;
18769 case BFD_RELOC_ARM_SHIFT_IMM: type = "SHIFT_IMM"; break;
18770 case BFD_RELOC_ARM_SMC: type = "SMC"; break;
18771 case BFD_RELOC_ARM_SWI: type = "SWI"; break;
18772 case BFD_RELOC_ARM_MULTI: type = "MULTI"; break;
18773 case BFD_RELOC_ARM_CP_OFF_IMM: type = "CP_OFF_IMM"; break;
18774 case BFD_RELOC_ARM_T32_CP_OFF_IMM: type = "T32_CP_OFF_IMM"; break;
18775 case BFD_RELOC_ARM_THUMB_ADD: type = "THUMB_ADD"; break;
18776 case BFD_RELOC_ARM_THUMB_SHIFT: type = "THUMB_SHIFT"; break;
18777 case BFD_RELOC_ARM_THUMB_IMM: type = "THUMB_IMM"; break;
18778 case BFD_RELOC_ARM_THUMB_OFFSET: type = "THUMB_OFFSET"; break;
18779 default: type = _("<unknown>"); break;
18780 }
18781 as_bad_where (fixp->fx_file, fixp->fx_line,
18782 _("cannot represent %s relocation in this object file format"),
18783 type);
18784 return NULL;
18785 }
18786 }
18787
18788 #ifdef OBJ_ELF
18789 if ((code == BFD_RELOC_32_PCREL || code == BFD_RELOC_32)
18790 && GOT_symbol
18791 && fixp->fx_addsy == GOT_symbol)
18792 {
18793 code = BFD_RELOC_ARM_GOTPC;
18794 reloc->addend = fixp->fx_offset = reloc->address;
18795 }
18796 #endif
18797
18798 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
18799
18800 if (reloc->howto == NULL)
18801 {
18802 as_bad_where (fixp->fx_file, fixp->fx_line,
18803 _("cannot represent %s relocation in this object file format"),
18804 bfd_get_reloc_code_name (code));
18805 return NULL;
18806 }
18807
18808 /* HACK: Since arm ELF uses Rel instead of Rela, encode the
18809 vtable entry to be used in the relocation's section offset. */
18810 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
18811 reloc->address = fixp->fx_offset;
18812
18813 return reloc;
18814 }
18815
18816 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
18817
18818 void
18819 cons_fix_new_arm (fragS * frag,
18820 int where,
18821 int size,
18822 expressionS * exp)
18823 {
18824 bfd_reloc_code_real_type type;
18825 int pcrel = 0;
18826
18827 /* Pick a reloc.
18828 FIXME: @@ Should look at CPU word size. */
18829 switch (size)
18830 {
18831 case 1:
18832 type = BFD_RELOC_8;
18833 break;
18834 case 2:
18835 type = BFD_RELOC_16;
18836 break;
18837 case 4:
18838 default:
18839 type = BFD_RELOC_32;
18840 break;
18841 case 8:
18842 type = BFD_RELOC_64;
18843 break;
18844 }
18845
18846 #ifdef TE_PE
18847 if (exp->X_op == O_secrel)
18848 {
18849 exp->X_op = O_symbol;
18850 type = BFD_RELOC_32_SECREL;
18851 }
18852 #endif
18853
18854 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
18855 }
18856
18857 #if defined OBJ_COFF || defined OBJ_ELF
18858 void
18859 arm_validate_fix (fixS * fixP)
18860 {
18861 /* If the destination of the branch is a defined symbol which does not have
18862 the THUMB_FUNC attribute, then we must be calling a function which has
18863 the (interfacearm) attribute. We look for the Thumb entry point to that
18864 function and change the branch to refer to that function instead. */
18865 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BRANCH23
18866 && fixP->fx_addsy != NULL
18867 && S_IS_DEFINED (fixP->fx_addsy)
18868 && ! THUMB_IS_FUNC (fixP->fx_addsy))
18869 {
18870 fixP->fx_addsy = find_real_start (fixP->fx_addsy);
18871 }
18872 }
18873 #endif
18874
18875 int
18876 arm_force_relocation (struct fix * fixp)
18877 {
18878 #if defined (OBJ_COFF) && defined (TE_PE)
18879 if (fixp->fx_r_type == BFD_RELOC_RVA)
18880 return 1;
18881 #endif
18882
18883 /* Resolve these relocations even if the symbol is extern or weak. */
18884 if (fixp->fx_r_type == BFD_RELOC_ARM_IMMEDIATE
18885 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM
18886 || fixp->fx_r_type == BFD_RELOC_ARM_ADRL_IMMEDIATE
18887 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM
18888 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
18889 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMM12
18890 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_PC12)
18891 return 0;
18892
18893 /* Always leave these relocations for the linker. */
18894 if ((fixp->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
18895 && fixp->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
18896 || fixp->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
18897 return 1;
18898
18899 return generic_force_reloc (fixp);
18900 }
18901
18902 #ifdef OBJ_COFF
18903 bfd_boolean
18904 arm_fix_adjustable (fixS * fixP)
18905 {
18906 /* This is a little hack to help the gas/arm/adrl.s test. It prevents
18907 local labels from being added to the output symbol table when they
18908 are used with the ADRL pseudo op. The ADRL relocation should always
18909 be resolved before the binbary is emitted, so it is safe to say that
18910 it is adjustable. */
18911 if (fixP->fx_r_type == BFD_RELOC_ARM_ADRL_IMMEDIATE)
18912 return 1;
18913
18914 /* This is a hack for the gas/all/redef2.s test. This test causes symbols
18915 to be cloned, and without this test relocs would still be generated
18916 against the original, pre-cloned symbol. Such symbols would not appear
18917 in the symbol table however, and so a valid reloc could not be
18918 generated. So check to see if the fixup is against a symbol which has
18919 been removed from the symbol chain, and if it is, then allow it to be
18920 adjusted into a reloc against a section symbol. */
18921 if (fixP->fx_addsy != NULL
18922 && ! S_IS_LOCAL (fixP->fx_addsy)
18923 && symbol_next (fixP->fx_addsy) == NULL
18924 && symbol_next (fixP->fx_addsy) == symbol_previous (fixP->fx_addsy))
18925 return 1;
18926
18927 return 0;
18928 }
18929 #endif
18930
18931 #ifdef OBJ_ELF
18932 /* Relocations against function names must be left unadjusted,
18933 so that the linker can use this information to generate interworking
18934 stubs. The MIPS version of this function
18935 also prevents relocations that are mips-16 specific, but I do not
18936 know why it does this.
18937
18938 FIXME:
18939 There is one other problem that ought to be addressed here, but
18940 which currently is not: Taking the address of a label (rather
18941 than a function) and then later jumping to that address. Such
18942 addresses also ought to have their bottom bit set (assuming that
18943 they reside in Thumb code), but at the moment they will not. */
18944
18945 bfd_boolean
18946 arm_fix_adjustable (fixS * fixP)
18947 {
18948 if (fixP->fx_addsy == NULL)
18949 return 1;
18950
18951 /* Preserve relocations against symbols with function type. */
18952 if (symbol_get_bfdsym (fixP->fx_addsy)->flags & BSF_FUNCTION)
18953 return 0;
18954
18955 if (THUMB_IS_FUNC (fixP->fx_addsy)
18956 && fixP->fx_subsy == NULL)
18957 return 0;
18958
18959 /* We need the symbol name for the VTABLE entries. */
18960 if ( fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
18961 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
18962 return 0;
18963
18964 /* Don't allow symbols to be discarded on GOT related relocs. */
18965 if (fixP->fx_r_type == BFD_RELOC_ARM_PLT32
18966 || fixP->fx_r_type == BFD_RELOC_ARM_GOT32
18967 || fixP->fx_r_type == BFD_RELOC_ARM_GOTOFF
18968 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GD32
18969 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LE32
18970 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_IE32
18971 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDM32
18972 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDO32
18973 || fixP->fx_r_type == BFD_RELOC_ARM_TARGET2)
18974 return 0;
18975
18976 /* Similarly for group relocations. */
18977 if ((fixP->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
18978 && fixP->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
18979 || fixP->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
18980 return 0;
18981
18982 return 1;
18983 }
18984
18985 const char *
18986 elf32_arm_target_format (void)
18987 {
18988 #ifdef TE_SYMBIAN
18989 return (target_big_endian
18990 ? "elf32-bigarm-symbian"
18991 : "elf32-littlearm-symbian");
18992 #elif defined (TE_VXWORKS)
18993 return (target_big_endian
18994 ? "elf32-bigarm-vxworks"
18995 : "elf32-littlearm-vxworks");
18996 #else
18997 if (target_big_endian)
18998 return "elf32-bigarm";
18999 else
19000 return "elf32-littlearm";
19001 #endif
19002 }
19003
19004 void
19005 armelf_frob_symbol (symbolS * symp,
19006 int * puntp)
19007 {
19008 elf_frob_symbol (symp, puntp);
19009 }
19010 #endif
19011
19012 /* MD interface: Finalization. */
19013
19014 /* A good place to do this, although this was probably not intended
19015 for this kind of use. We need to dump the literal pool before
19016 references are made to a null symbol pointer. */
19017
19018 void
19019 arm_cleanup (void)
19020 {
19021 literal_pool * pool;
19022
19023 for (pool = list_of_pools; pool; pool = pool->next)
19024 {
19025 /* Put it at the end of the relevent section. */
19026 subseg_set (pool->section, pool->sub_section);
19027 #ifdef OBJ_ELF
19028 arm_elf_change_section ();
19029 #endif
19030 s_ltorg (0);
19031 }
19032 }
19033
19034 /* Adjust the symbol table. This marks Thumb symbols as distinct from
19035 ARM ones. */
19036
19037 void
19038 arm_adjust_symtab (void)
19039 {
19040 #ifdef OBJ_COFF
19041 symbolS * sym;
19042
19043 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
19044 {
19045 if (ARM_IS_THUMB (sym))
19046 {
19047 if (THUMB_IS_FUNC (sym))
19048 {
19049 /* Mark the symbol as a Thumb function. */
19050 if ( S_GET_STORAGE_CLASS (sym) == C_STAT
19051 || S_GET_STORAGE_CLASS (sym) == C_LABEL) /* This can happen! */
19052 S_SET_STORAGE_CLASS (sym, C_THUMBSTATFUNC);
19053
19054 else if (S_GET_STORAGE_CLASS (sym) == C_EXT)
19055 S_SET_STORAGE_CLASS (sym, C_THUMBEXTFUNC);
19056 else
19057 as_bad (_("%s: unexpected function type: %d"),
19058 S_GET_NAME (sym), S_GET_STORAGE_CLASS (sym));
19059 }
19060 else switch (S_GET_STORAGE_CLASS (sym))
19061 {
19062 case C_EXT:
19063 S_SET_STORAGE_CLASS (sym, C_THUMBEXT);
19064 break;
19065 case C_STAT:
19066 S_SET_STORAGE_CLASS (sym, C_THUMBSTAT);
19067 break;
19068 case C_LABEL:
19069 S_SET_STORAGE_CLASS (sym, C_THUMBLABEL);
19070 break;
19071 default:
19072 /* Do nothing. */
19073 break;
19074 }
19075 }
19076
19077 if (ARM_IS_INTERWORK (sym))
19078 coffsymbol (symbol_get_bfdsym (sym))->native->u.syment.n_flags = 0xFF;
19079 }
19080 #endif
19081 #ifdef OBJ_ELF
19082 symbolS * sym;
19083 char bind;
19084
19085 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
19086 {
19087 if (ARM_IS_THUMB (sym))
19088 {
19089 elf_symbol_type * elf_sym;
19090
19091 elf_sym = elf_symbol (symbol_get_bfdsym (sym));
19092 bind = ELF_ST_BIND (elf_sym->internal_elf_sym.st_info);
19093
19094 if (! bfd_is_arm_special_symbol_name (elf_sym->symbol.name,
19095 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
19096 {
19097 /* If it's a .thumb_func, declare it as so,
19098 otherwise tag label as .code 16. */
19099 if (THUMB_IS_FUNC (sym))
19100 elf_sym->internal_elf_sym.st_info =
19101 ELF_ST_INFO (bind, STT_ARM_TFUNC);
19102 else
19103 elf_sym->internal_elf_sym.st_info =
19104 ELF_ST_INFO (bind, STT_ARM_16BIT);
19105 }
19106 }
19107 }
19108 #endif
19109 }
19110
19111 /* MD interface: Initialization. */
19112
19113 static void
19114 set_constant_flonums (void)
19115 {
19116 int i;
19117
19118 for (i = 0; i < NUM_FLOAT_VALS; i++)
19119 if (atof_ieee ((char *) fp_const[i], 'x', fp_values[i]) == NULL)
19120 abort ();
19121 }
19122
19123 /* Auto-select Thumb mode if it's the only available instruction set for the
19124 given architecture. */
19125
19126 static void
19127 autoselect_thumb_from_cpu_variant (void)
19128 {
19129 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
19130 opcode_select (16);
19131 }
19132
19133 void
19134 md_begin (void)
19135 {
19136 unsigned mach;
19137 unsigned int i;
19138
19139 if ( (arm_ops_hsh = hash_new ()) == NULL
19140 || (arm_cond_hsh = hash_new ()) == NULL
19141 || (arm_shift_hsh = hash_new ()) == NULL
19142 || (arm_psr_hsh = hash_new ()) == NULL
19143 || (arm_v7m_psr_hsh = hash_new ()) == NULL
19144 || (arm_reg_hsh = hash_new ()) == NULL
19145 || (arm_reloc_hsh = hash_new ()) == NULL
19146 || (arm_barrier_opt_hsh = hash_new ()) == NULL)
19147 as_fatal (_("virtual memory exhausted"));
19148
19149 for (i = 0; i < sizeof (insns) / sizeof (struct asm_opcode); i++)
19150 hash_insert (arm_ops_hsh, insns[i].template, (PTR) (insns + i));
19151 for (i = 0; i < sizeof (conds) / sizeof (struct asm_cond); i++)
19152 hash_insert (arm_cond_hsh, conds[i].template, (PTR) (conds + i));
19153 for (i = 0; i < sizeof (shift_names) / sizeof (struct asm_shift_name); i++)
19154 hash_insert (arm_shift_hsh, shift_names[i].name, (PTR) (shift_names + i));
19155 for (i = 0; i < sizeof (psrs) / sizeof (struct asm_psr); i++)
19156 hash_insert (arm_psr_hsh, psrs[i].template, (PTR) (psrs + i));
19157 for (i = 0; i < sizeof (v7m_psrs) / sizeof (struct asm_psr); i++)
19158 hash_insert (arm_v7m_psr_hsh, v7m_psrs[i].template, (PTR) (v7m_psrs + i));
19159 for (i = 0; i < sizeof (reg_names) / sizeof (struct reg_entry); i++)
19160 hash_insert (arm_reg_hsh, reg_names[i].name, (PTR) (reg_names + i));
19161 for (i = 0;
19162 i < sizeof (barrier_opt_names) / sizeof (struct asm_barrier_opt);
19163 i++)
19164 hash_insert (arm_barrier_opt_hsh, barrier_opt_names[i].template,
19165 (PTR) (barrier_opt_names + i));
19166 #ifdef OBJ_ELF
19167 for (i = 0; i < sizeof (reloc_names) / sizeof (struct reloc_entry); i++)
19168 hash_insert (arm_reloc_hsh, reloc_names[i].name, (PTR) (reloc_names + i));
19169 #endif
19170
19171 set_constant_flonums ();
19172
19173 /* Set the cpu variant based on the command-line options. We prefer
19174 -mcpu= over -march= if both are set (as for GCC); and we prefer
19175 -mfpu= over any other way of setting the floating point unit.
19176 Use of legacy options with new options are faulted. */
19177 if (legacy_cpu)
19178 {
19179 if (mcpu_cpu_opt || march_cpu_opt)
19180 as_bad (_("use of old and new-style options to set CPU type"));
19181
19182 mcpu_cpu_opt = legacy_cpu;
19183 }
19184 else if (!mcpu_cpu_opt)
19185 mcpu_cpu_opt = march_cpu_opt;
19186
19187 if (legacy_fpu)
19188 {
19189 if (mfpu_opt)
19190 as_bad (_("use of old and new-style options to set FPU type"));
19191
19192 mfpu_opt = legacy_fpu;
19193 }
19194 else if (!mfpu_opt)
19195 {
19196 #if !(defined (TE_LINUX) || defined (TE_NetBSD) || defined (TE_VXWORKS))
19197 /* Some environments specify a default FPU. If they don't, infer it
19198 from the processor. */
19199 if (mcpu_fpu_opt)
19200 mfpu_opt = mcpu_fpu_opt;
19201 else
19202 mfpu_opt = march_fpu_opt;
19203 #else
19204 mfpu_opt = &fpu_default;
19205 #endif
19206 }
19207
19208 if (!mfpu_opt)
19209 {
19210 if (!mcpu_cpu_opt)
19211 mfpu_opt = &fpu_default;
19212 else if (ARM_CPU_HAS_FEATURE (*mcpu_fpu_opt, arm_ext_v5))
19213 mfpu_opt = &fpu_arch_vfp_v2;
19214 else
19215 mfpu_opt = &fpu_arch_fpa;
19216 }
19217
19218 #ifdef CPU_DEFAULT
19219 if (!mcpu_cpu_opt)
19220 {
19221 mcpu_cpu_opt = &cpu_default;
19222 selected_cpu = cpu_default;
19223 }
19224 #else
19225 if (mcpu_cpu_opt)
19226 selected_cpu = *mcpu_cpu_opt;
19227 else
19228 mcpu_cpu_opt = &arm_arch_any;
19229 #endif
19230
19231 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
19232
19233 autoselect_thumb_from_cpu_variant ();
19234
19235 arm_arch_used = thumb_arch_used = arm_arch_none;
19236
19237 #if defined OBJ_COFF || defined OBJ_ELF
19238 {
19239 unsigned int flags = 0;
19240
19241 #if defined OBJ_ELF
19242 flags = meabi_flags;
19243
19244 switch (meabi_flags)
19245 {
19246 case EF_ARM_EABI_UNKNOWN:
19247 #endif
19248 /* Set the flags in the private structure. */
19249 if (uses_apcs_26) flags |= F_APCS26;
19250 if (support_interwork) flags |= F_INTERWORK;
19251 if (uses_apcs_float) flags |= F_APCS_FLOAT;
19252 if (pic_code) flags |= F_PIC;
19253 if (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_any_hard))
19254 flags |= F_SOFT_FLOAT;
19255
19256 switch (mfloat_abi_opt)
19257 {
19258 case ARM_FLOAT_ABI_SOFT:
19259 case ARM_FLOAT_ABI_SOFTFP:
19260 flags |= F_SOFT_FLOAT;
19261 break;
19262
19263 case ARM_FLOAT_ABI_HARD:
19264 if (flags & F_SOFT_FLOAT)
19265 as_bad (_("hard-float conflicts with specified fpu"));
19266 break;
19267 }
19268
19269 /* Using pure-endian doubles (even if soft-float). */
19270 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
19271 flags |= F_VFP_FLOAT;
19272
19273 #if defined OBJ_ELF
19274 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_maverick))
19275 flags |= EF_ARM_MAVERICK_FLOAT;
19276 break;
19277
19278 case EF_ARM_EABI_VER4:
19279 case EF_ARM_EABI_VER5:
19280 /* No additional flags to set. */
19281 break;
19282
19283 default:
19284 abort ();
19285 }
19286 #endif
19287 bfd_set_private_flags (stdoutput, flags);
19288
19289 /* We have run out flags in the COFF header to encode the
19290 status of ATPCS support, so instead we create a dummy,
19291 empty, debug section called .arm.atpcs. */
19292 if (atpcs)
19293 {
19294 asection * sec;
19295
19296 sec = bfd_make_section (stdoutput, ".arm.atpcs");
19297
19298 if (sec != NULL)
19299 {
19300 bfd_set_section_flags
19301 (stdoutput, sec, SEC_READONLY | SEC_DEBUGGING /* | SEC_HAS_CONTENTS */);
19302 bfd_set_section_size (stdoutput, sec, 0);
19303 bfd_set_section_contents (stdoutput, sec, NULL, 0, 0);
19304 }
19305 }
19306 }
19307 #endif
19308
19309 /* Record the CPU type as well. */
19310 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2))
19311 mach = bfd_mach_arm_iWMMXt2;
19312 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt))
19313 mach = bfd_mach_arm_iWMMXt;
19314 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_xscale))
19315 mach = bfd_mach_arm_XScale;
19316 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_maverick))
19317 mach = bfd_mach_arm_ep9312;
19318 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5e))
19319 mach = bfd_mach_arm_5TE;
19320 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5))
19321 {
19322 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
19323 mach = bfd_mach_arm_5T;
19324 else
19325 mach = bfd_mach_arm_5;
19326 }
19327 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4))
19328 {
19329 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
19330 mach = bfd_mach_arm_4T;
19331 else
19332 mach = bfd_mach_arm_4;
19333 }
19334 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3m))
19335 mach = bfd_mach_arm_3M;
19336 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3))
19337 mach = bfd_mach_arm_3;
19338 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2s))
19339 mach = bfd_mach_arm_2a;
19340 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2))
19341 mach = bfd_mach_arm_2;
19342 else
19343 mach = bfd_mach_arm_unknown;
19344
19345 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
19346 }
19347
19348 /* Command line processing. */
19349
19350 /* md_parse_option
19351 Invocation line includes a switch not recognized by the base assembler.
19352 See if it's a processor-specific option.
19353
19354 This routine is somewhat complicated by the need for backwards
19355 compatibility (since older releases of gcc can't be changed).
19356 The new options try to make the interface as compatible as
19357 possible with GCC.
19358
19359 New options (supported) are:
19360
19361 -mcpu=<cpu name> Assemble for selected processor
19362 -march=<architecture name> Assemble for selected architecture
19363 -mfpu=<fpu architecture> Assemble for selected FPU.
19364 -EB/-mbig-endian Big-endian
19365 -EL/-mlittle-endian Little-endian
19366 -k Generate PIC code
19367 -mthumb Start in Thumb mode
19368 -mthumb-interwork Code supports ARM/Thumb interworking
19369
19370 For now we will also provide support for:
19371
19372 -mapcs-32 32-bit Program counter
19373 -mapcs-26 26-bit Program counter
19374 -macps-float Floats passed in FP registers
19375 -mapcs-reentrant Reentrant code
19376 -matpcs
19377 (sometime these will probably be replaced with -mapcs=<list of options>
19378 and -matpcs=<list of options>)
19379
19380 The remaining options are only supported for back-wards compatibility.
19381 Cpu variants, the arm part is optional:
19382 -m[arm]1 Currently not supported.
19383 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor
19384 -m[arm]3 Arm 3 processor
19385 -m[arm]6[xx], Arm 6 processors
19386 -m[arm]7[xx][t][[d]m] Arm 7 processors
19387 -m[arm]8[10] Arm 8 processors
19388 -m[arm]9[20][tdmi] Arm 9 processors
19389 -mstrongarm[110[0]] StrongARM processors
19390 -mxscale XScale processors
19391 -m[arm]v[2345[t[e]]] Arm architectures
19392 -mall All (except the ARM1)
19393 FP variants:
19394 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions
19395 -mfpe-old (No float load/store multiples)
19396 -mvfpxd VFP Single precision
19397 -mvfp All VFP
19398 -mno-fpu Disable all floating point instructions
19399
19400 The following CPU names are recognized:
19401 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
19402 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
19403 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
19404 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
19405 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
19406 arm10t arm10e, arm1020t, arm1020e, arm10200e,
19407 strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
19408
19409 */
19410
19411 const char * md_shortopts = "m:k";
19412
19413 #ifdef ARM_BI_ENDIAN
19414 #define OPTION_EB (OPTION_MD_BASE + 0)
19415 #define OPTION_EL (OPTION_MD_BASE + 1)
19416 #else
19417 #if TARGET_BYTES_BIG_ENDIAN
19418 #define OPTION_EB (OPTION_MD_BASE + 0)
19419 #else
19420 #define OPTION_EL (OPTION_MD_BASE + 1)
19421 #endif
19422 #endif
19423
19424 struct option md_longopts[] =
19425 {
19426 #ifdef OPTION_EB
19427 {"EB", no_argument, NULL, OPTION_EB},
19428 #endif
19429 #ifdef OPTION_EL
19430 {"EL", no_argument, NULL, OPTION_EL},
19431 #endif
19432 {NULL, no_argument, NULL, 0}
19433 };
19434
19435 size_t md_longopts_size = sizeof (md_longopts);
19436
19437 struct arm_option_table
19438 {
19439 char *option; /* Option name to match. */
19440 char *help; /* Help information. */
19441 int *var; /* Variable to change. */
19442 int value; /* What to change it to. */
19443 char *deprecated; /* If non-null, print this message. */
19444 };
19445
19446 struct arm_option_table arm_opts[] =
19447 {
19448 {"k", N_("generate PIC code"), &pic_code, 1, NULL},
19449 {"mthumb", N_("assemble Thumb code"), &thumb_mode, 1, NULL},
19450 {"mthumb-interwork", N_("support ARM/Thumb interworking"),
19451 &support_interwork, 1, NULL},
19452 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26, 0, NULL},
19453 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26, 1, NULL},
19454 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float,
19455 1, NULL},
19456 {"mapcs-reentrant", N_("re-entrant code"), &pic_code, 1, NULL},
19457 {"matpcs", N_("code is ATPCS conformant"), &atpcs, 1, NULL},
19458 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
19459 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
19460 NULL},
19461
19462 /* These are recognized by the assembler, but have no affect on code. */
19463 {"mapcs-frame", N_("use frame pointer"), NULL, 0, NULL},
19464 {"mapcs-stack-check", N_("use stack size checking"), NULL, 0, NULL},
19465 {NULL, NULL, NULL, 0, NULL}
19466 };
19467
19468 struct arm_legacy_option_table
19469 {
19470 char *option; /* Option name to match. */
19471 const arm_feature_set **var; /* Variable to change. */
19472 const arm_feature_set value; /* What to change it to. */
19473 char *deprecated; /* If non-null, print this message. */
19474 };
19475
19476 const struct arm_legacy_option_table arm_legacy_opts[] =
19477 {
19478 /* DON'T add any new processors to this list -- we want the whole list
19479 to go away... Add them to the processors table instead. */
19480 {"marm1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
19481 {"m1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
19482 {"marm2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
19483 {"m2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
19484 {"marm250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
19485 {"m250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
19486 {"marm3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
19487 {"m3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
19488 {"marm6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
19489 {"m6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
19490 {"marm600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
19491 {"m600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
19492 {"marm610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
19493 {"m610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
19494 {"marm620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
19495 {"m620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
19496 {"marm7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
19497 {"m7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
19498 {"marm70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
19499 {"m70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
19500 {"marm700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
19501 {"m700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
19502 {"marm700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
19503 {"m700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
19504 {"marm710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
19505 {"m710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
19506 {"marm710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
19507 {"m710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
19508 {"marm720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
19509 {"m720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
19510 {"marm7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
19511 {"m7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
19512 {"marm7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
19513 {"m7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
19514 {"marm7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
19515 {"m7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
19516 {"marm7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
19517 {"m7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
19518 {"marm7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
19519 {"m7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
19520 {"marm7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
19521 {"m7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
19522 {"marm7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
19523 {"m7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
19524 {"marm7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
19525 {"m7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
19526 {"marm7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
19527 {"m7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
19528 {"marm7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
19529 {"m7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
19530 {"marm710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
19531 {"m710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
19532 {"marm720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
19533 {"m720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
19534 {"marm740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
19535 {"m740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
19536 {"marm8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
19537 {"m8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
19538 {"marm810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
19539 {"m810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
19540 {"marm9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
19541 {"m9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
19542 {"marm9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
19543 {"m9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
19544 {"marm920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
19545 {"m920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
19546 {"marm940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
19547 {"m940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
19548 {"mstrongarm", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=strongarm")},
19549 {"mstrongarm110", &legacy_cpu, ARM_ARCH_V4,
19550 N_("use -mcpu=strongarm110")},
19551 {"mstrongarm1100", &legacy_cpu, ARM_ARCH_V4,
19552 N_("use -mcpu=strongarm1100")},
19553 {"mstrongarm1110", &legacy_cpu, ARM_ARCH_V4,
19554 N_("use -mcpu=strongarm1110")},
19555 {"mxscale", &legacy_cpu, ARM_ARCH_XSCALE, N_("use -mcpu=xscale")},
19556 {"miwmmxt", &legacy_cpu, ARM_ARCH_IWMMXT, N_("use -mcpu=iwmmxt")},
19557 {"mall", &legacy_cpu, ARM_ANY, N_("use -mcpu=all")},
19558
19559 /* Architecture variants -- don't add any more to this list either. */
19560 {"mv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
19561 {"marmv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
19562 {"mv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
19563 {"marmv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
19564 {"mv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
19565 {"marmv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
19566 {"mv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
19567 {"marmv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
19568 {"mv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
19569 {"marmv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
19570 {"mv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
19571 {"marmv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
19572 {"mv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
19573 {"marmv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
19574 {"mv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
19575 {"marmv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
19576 {"mv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
19577 {"marmv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
19578
19579 /* Floating point variants -- don't add any more to this list either. */
19580 {"mfpe-old", &legacy_fpu, FPU_ARCH_FPE, N_("use -mfpu=fpe")},
19581 {"mfpa10", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa10")},
19582 {"mfpa11", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa11")},
19583 {"mno-fpu", &legacy_fpu, ARM_ARCH_NONE,
19584 N_("use either -mfpu=softfpa or -mfpu=softvfp")},
19585
19586 {NULL, NULL, ARM_ARCH_NONE, NULL}
19587 };
19588
19589 struct arm_cpu_option_table
19590 {
19591 char *name;
19592 const arm_feature_set value;
19593 /* For some CPUs we assume an FPU unless the user explicitly sets
19594 -mfpu=... */
19595 const arm_feature_set default_fpu;
19596 /* The canonical name of the CPU, or NULL to use NAME converted to upper
19597 case. */
19598 const char *canonical_name;
19599 };
19600
19601 /* This list should, at a minimum, contain all the cpu names
19602 recognized by GCC. */
19603 static const struct arm_cpu_option_table arm_cpus[] =
19604 {
19605 {"all", ARM_ANY, FPU_ARCH_FPA, NULL},
19606 {"arm1", ARM_ARCH_V1, FPU_ARCH_FPA, NULL},
19607 {"arm2", ARM_ARCH_V2, FPU_ARCH_FPA, NULL},
19608 {"arm250", ARM_ARCH_V2S, FPU_ARCH_FPA, NULL},
19609 {"arm3", ARM_ARCH_V2S, FPU_ARCH_FPA, NULL},
19610 {"arm6", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19611 {"arm60", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19612 {"arm600", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19613 {"arm610", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19614 {"arm620", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19615 {"arm7", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19616 {"arm7m", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL},
19617 {"arm7d", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19618 {"arm7dm", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL},
19619 {"arm7di", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19620 {"arm7dmi", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL},
19621 {"arm70", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19622 {"arm700", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19623 {"arm700i", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19624 {"arm710", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19625 {"arm710t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
19626 {"arm720", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19627 {"arm720t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
19628 {"arm740t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
19629 {"arm710c", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19630 {"arm7100", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19631 {"arm7500", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19632 {"arm7500fe", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19633 {"arm7t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
19634 {"arm7tdmi", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
19635 {"arm7tdmi-s", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
19636 {"arm8", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
19637 {"arm810", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
19638 {"strongarm", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
19639 {"strongarm1", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
19640 {"strongarm110", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
19641 {"strongarm1100", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
19642 {"strongarm1110", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
19643 {"arm9", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
19644 {"arm920", ARM_ARCH_V4T, FPU_ARCH_FPA, "ARM920T"},
19645 {"arm920t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
19646 {"arm922t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
19647 {"arm940t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
19648 {"arm9tdmi", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
19649 /* For V5 or later processors we default to using VFP; but the user
19650 should really set the FPU type explicitly. */
19651 {"arm9e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL},
19652 {"arm9e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
19653 {"arm926ej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM926EJ-S"},
19654 {"arm926ejs", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM926EJ-S"},
19655 {"arm926ej-s", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, NULL},
19656 {"arm946e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL},
19657 {"arm946e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM946E-S"},
19658 {"arm946e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
19659 {"arm966e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL},
19660 {"arm966e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM966E-S"},
19661 {"arm966e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
19662 {"arm968e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
19663 {"arm10t", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL},
19664 {"arm10tdmi", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL},
19665 {"arm10e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
19666 {"arm1020", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM1020E"},
19667 {"arm1020t", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL},
19668 {"arm1020e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
19669 {"arm1022e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
19670 {"arm1026ejs", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM1026EJ-S"},
19671 {"arm1026ej-s", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, NULL},
19672 {"arm1136js", ARM_ARCH_V6, FPU_NONE, "ARM1136J-S"},
19673 {"arm1136j-s", ARM_ARCH_V6, FPU_NONE, NULL},
19674 {"arm1136jfs", ARM_ARCH_V6, FPU_ARCH_VFP_V2, "ARM1136JF-S"},
19675 {"arm1136jf-s", ARM_ARCH_V6, FPU_ARCH_VFP_V2, NULL},
19676 {"mpcore", ARM_ARCH_V6K, FPU_ARCH_VFP_V2, NULL},
19677 {"mpcorenovfp", ARM_ARCH_V6K, FPU_NONE, NULL},
19678 {"arm1156t2-s", ARM_ARCH_V6T2, FPU_NONE, NULL},
19679 {"arm1156t2f-s", ARM_ARCH_V6T2, FPU_ARCH_VFP_V2, NULL},
19680 {"arm1176jz-s", ARM_ARCH_V6ZK, FPU_NONE, NULL},
19681 {"arm1176jzf-s", ARM_ARCH_V6ZK, FPU_ARCH_VFP_V2, NULL},
19682 {"cortex-a8", ARM_ARCH_V7A, ARM_FEATURE(0, FPU_VFP_V3
19683 | FPU_NEON_EXT_V1),
19684 NULL},
19685 {"cortex-r4", ARM_ARCH_V7R, FPU_NONE, NULL},
19686 {"cortex-m3", ARM_ARCH_V7M, FPU_NONE, NULL},
19687 /* ??? XSCALE is really an architecture. */
19688 {"xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL},
19689 /* ??? iwmmxt is not a processor. */
19690 {"iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP_V2, NULL},
19691 {"iwmmxt2", ARM_ARCH_IWMMXT2,FPU_ARCH_VFP_V2, NULL},
19692 {"i80200", ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL},
19693 /* Maverick */
19694 {"ep9312", ARM_FEATURE(ARM_AEXT_V4T, ARM_CEXT_MAVERICK), FPU_ARCH_MAVERICK, "ARM920T"},
19695 {NULL, ARM_ARCH_NONE, ARM_ARCH_NONE, NULL}
19696 };
19697
19698 struct arm_arch_option_table
19699 {
19700 char *name;
19701 const arm_feature_set value;
19702 const arm_feature_set default_fpu;
19703 };
19704
19705 /* This list should, at a minimum, contain all the architecture names
19706 recognized by GCC. */
19707 static const struct arm_arch_option_table arm_archs[] =
19708 {
19709 {"all", ARM_ANY, FPU_ARCH_FPA},
19710 {"armv1", ARM_ARCH_V1, FPU_ARCH_FPA},
19711 {"armv2", ARM_ARCH_V2, FPU_ARCH_FPA},
19712 {"armv2a", ARM_ARCH_V2S, FPU_ARCH_FPA},
19713 {"armv2s", ARM_ARCH_V2S, FPU_ARCH_FPA},
19714 {"armv3", ARM_ARCH_V3, FPU_ARCH_FPA},
19715 {"armv3m", ARM_ARCH_V3M, FPU_ARCH_FPA},
19716 {"armv4", ARM_ARCH_V4, FPU_ARCH_FPA},
19717 {"armv4xm", ARM_ARCH_V4xM, FPU_ARCH_FPA},
19718 {"armv4t", ARM_ARCH_V4T, FPU_ARCH_FPA},
19719 {"armv4txm", ARM_ARCH_V4TxM, FPU_ARCH_FPA},
19720 {"armv5", ARM_ARCH_V5, FPU_ARCH_VFP},
19721 {"armv5t", ARM_ARCH_V5T, FPU_ARCH_VFP},
19722 {"armv5txm", ARM_ARCH_V5TxM, FPU_ARCH_VFP},
19723 {"armv5te", ARM_ARCH_V5TE, FPU_ARCH_VFP},
19724 {"armv5texp", ARM_ARCH_V5TExP, FPU_ARCH_VFP},
19725 {"armv5tej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP},
19726 {"armv6", ARM_ARCH_V6, FPU_ARCH_VFP},
19727 {"armv6j", ARM_ARCH_V6, FPU_ARCH_VFP},
19728 {"armv6k", ARM_ARCH_V6K, FPU_ARCH_VFP},
19729 {"armv6z", ARM_ARCH_V6Z, FPU_ARCH_VFP},
19730 {"armv6zk", ARM_ARCH_V6ZK, FPU_ARCH_VFP},
19731 {"armv6t2", ARM_ARCH_V6T2, FPU_ARCH_VFP},
19732 {"armv6kt2", ARM_ARCH_V6KT2, FPU_ARCH_VFP},
19733 {"armv6zt2", ARM_ARCH_V6ZT2, FPU_ARCH_VFP},
19734 {"armv6zkt2", ARM_ARCH_V6ZKT2, FPU_ARCH_VFP},
19735 {"armv7", ARM_ARCH_V7, FPU_ARCH_VFP},
19736 {"armv7a", ARM_ARCH_V7A, FPU_ARCH_VFP},
19737 {"armv7r", ARM_ARCH_V7R, FPU_ARCH_VFP},
19738 {"armv7m", ARM_ARCH_V7M, FPU_ARCH_VFP},
19739 {"xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP},
19740 {"iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP},
19741 {"iwmmxt2", ARM_ARCH_IWMMXT2,FPU_ARCH_VFP},
19742 {NULL, ARM_ARCH_NONE, ARM_ARCH_NONE}
19743 };
19744
19745 /* ISA extensions in the co-processor space. */
19746 struct arm_option_cpu_value_table
19747 {
19748 char *name;
19749 const arm_feature_set value;
19750 };
19751
19752 static const struct arm_option_cpu_value_table arm_extensions[] =
19753 {
19754 {"maverick", ARM_FEATURE (0, ARM_CEXT_MAVERICK)},
19755 {"xscale", ARM_FEATURE (0, ARM_CEXT_XSCALE)},
19756 {"iwmmxt", ARM_FEATURE (0, ARM_CEXT_IWMMXT)},
19757 {"iwmmxt2", ARM_FEATURE (0, ARM_CEXT_IWMMXT2)},
19758 {NULL, ARM_ARCH_NONE}
19759 };
19760
19761 /* This list should, at a minimum, contain all the fpu names
19762 recognized by GCC. */
19763 static const struct arm_option_cpu_value_table arm_fpus[] =
19764 {
19765 {"softfpa", FPU_NONE},
19766 {"fpe", FPU_ARCH_FPE},
19767 {"fpe2", FPU_ARCH_FPE},
19768 {"fpe3", FPU_ARCH_FPA}, /* Third release supports LFM/SFM. */
19769 {"fpa", FPU_ARCH_FPA},
19770 {"fpa10", FPU_ARCH_FPA},
19771 {"fpa11", FPU_ARCH_FPA},
19772 {"arm7500fe", FPU_ARCH_FPA},
19773 {"softvfp", FPU_ARCH_VFP},
19774 {"softvfp+vfp", FPU_ARCH_VFP_V2},
19775 {"vfp", FPU_ARCH_VFP_V2},
19776 {"vfp9", FPU_ARCH_VFP_V2},
19777 {"vfp3", FPU_ARCH_VFP_V3},
19778 {"vfp10", FPU_ARCH_VFP_V2},
19779 {"vfp10-r0", FPU_ARCH_VFP_V1},
19780 {"vfpxd", FPU_ARCH_VFP_V1xD},
19781 {"arm1020t", FPU_ARCH_VFP_V1},
19782 {"arm1020e", FPU_ARCH_VFP_V2},
19783 {"arm1136jfs", FPU_ARCH_VFP_V2},
19784 {"arm1136jf-s", FPU_ARCH_VFP_V2},
19785 {"maverick", FPU_ARCH_MAVERICK},
19786 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1},
19787 {NULL, ARM_ARCH_NONE}
19788 };
19789
19790 struct arm_option_value_table
19791 {
19792 char *name;
19793 long value;
19794 };
19795
19796 static const struct arm_option_value_table arm_float_abis[] =
19797 {
19798 {"hard", ARM_FLOAT_ABI_HARD},
19799 {"softfp", ARM_FLOAT_ABI_SOFTFP},
19800 {"soft", ARM_FLOAT_ABI_SOFT},
19801 {NULL, 0}
19802 };
19803
19804 #ifdef OBJ_ELF
19805 /* We only know how to output GNU and ver 4/5 (AAELF) formats. */
19806 static const struct arm_option_value_table arm_eabis[] =
19807 {
19808 {"gnu", EF_ARM_EABI_UNKNOWN},
19809 {"4", EF_ARM_EABI_VER4},
19810 {"5", EF_ARM_EABI_VER5},
19811 {NULL, 0}
19812 };
19813 #endif
19814
19815 struct arm_long_option_table
19816 {
19817 char * option; /* Substring to match. */
19818 char * help; /* Help information. */
19819 int (* func) (char * subopt); /* Function to decode sub-option. */
19820 char * deprecated; /* If non-null, print this message. */
19821 };
19822
19823 static int
19824 arm_parse_extension (char * str, const arm_feature_set **opt_p)
19825 {
19826 arm_feature_set *ext_set = xmalloc (sizeof (arm_feature_set));
19827
19828 /* Copy the feature set, so that we can modify it. */
19829 *ext_set = **opt_p;
19830 *opt_p = ext_set;
19831
19832 while (str != NULL && *str != 0)
19833 {
19834 const struct arm_option_cpu_value_table * opt;
19835 char * ext;
19836 int optlen;
19837
19838 if (*str != '+')
19839 {
19840 as_bad (_("invalid architectural extension"));
19841 return 0;
19842 }
19843
19844 str++;
19845 ext = strchr (str, '+');
19846
19847 if (ext != NULL)
19848 optlen = ext - str;
19849 else
19850 optlen = strlen (str);
19851
19852 if (optlen == 0)
19853 {
19854 as_bad (_("missing architectural extension"));
19855 return 0;
19856 }
19857
19858 for (opt = arm_extensions; opt->name != NULL; opt++)
19859 if (strncmp (opt->name, str, optlen) == 0)
19860 {
19861 ARM_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->value);
19862 break;
19863 }
19864
19865 if (opt->name == NULL)
19866 {
19867 as_bad (_("unknown architectural extnsion `%s'"), str);
19868 return 0;
19869 }
19870
19871 str = ext;
19872 };
19873
19874 return 1;
19875 }
19876
19877 static int
19878 arm_parse_cpu (char * str)
19879 {
19880 const struct arm_cpu_option_table * opt;
19881 char * ext = strchr (str, '+');
19882 int optlen;
19883
19884 if (ext != NULL)
19885 optlen = ext - str;
19886 else
19887 optlen = strlen (str);
19888
19889 if (optlen == 0)
19890 {
19891 as_bad (_("missing cpu name `%s'"), str);
19892 return 0;
19893 }
19894
19895 for (opt = arm_cpus; opt->name != NULL; opt++)
19896 if (strncmp (opt->name, str, optlen) == 0)
19897 {
19898 mcpu_cpu_opt = &opt->value;
19899 mcpu_fpu_opt = &opt->default_fpu;
19900 if (opt->canonical_name)
19901 strcpy(selected_cpu_name, opt->canonical_name);
19902 else
19903 {
19904 int i;
19905 for (i = 0; i < optlen; i++)
19906 selected_cpu_name[i] = TOUPPER (opt->name[i]);
19907 selected_cpu_name[i] = 0;
19908 }
19909
19910 if (ext != NULL)
19911 return arm_parse_extension (ext, &mcpu_cpu_opt);
19912
19913 return 1;
19914 }
19915
19916 as_bad (_("unknown cpu `%s'"), str);
19917 return 0;
19918 }
19919
19920 static int
19921 arm_parse_arch (char * str)
19922 {
19923 const struct arm_arch_option_table *opt;
19924 char *ext = strchr (str, '+');
19925 int optlen;
19926
19927 if (ext != NULL)
19928 optlen = ext - str;
19929 else
19930 optlen = strlen (str);
19931
19932 if (optlen == 0)
19933 {
19934 as_bad (_("missing architecture name `%s'"), str);
19935 return 0;
19936 }
19937
19938 for (opt = arm_archs; opt->name != NULL; opt++)
19939 if (streq (opt->name, str))
19940 {
19941 march_cpu_opt = &opt->value;
19942 march_fpu_opt = &opt->default_fpu;
19943 strcpy(selected_cpu_name, opt->name);
19944
19945 if (ext != NULL)
19946 return arm_parse_extension (ext, &march_cpu_opt);
19947
19948 return 1;
19949 }
19950
19951 as_bad (_("unknown architecture `%s'\n"), str);
19952 return 0;
19953 }
19954
19955 static int
19956 arm_parse_fpu (char * str)
19957 {
19958 const struct arm_option_cpu_value_table * opt;
19959
19960 for (opt = arm_fpus; opt->name != NULL; opt++)
19961 if (streq (opt->name, str))
19962 {
19963 mfpu_opt = &opt->value;
19964 return 1;
19965 }
19966
19967 as_bad (_("unknown floating point format `%s'\n"), str);
19968 return 0;
19969 }
19970
19971 static int
19972 arm_parse_float_abi (char * str)
19973 {
19974 const struct arm_option_value_table * opt;
19975
19976 for (opt = arm_float_abis; opt->name != NULL; opt++)
19977 if (streq (opt->name, str))
19978 {
19979 mfloat_abi_opt = opt->value;
19980 return 1;
19981 }
19982
19983 as_bad (_("unknown floating point abi `%s'\n"), str);
19984 return 0;
19985 }
19986
19987 #ifdef OBJ_ELF
19988 static int
19989 arm_parse_eabi (char * str)
19990 {
19991 const struct arm_option_value_table *opt;
19992
19993 for (opt = arm_eabis; opt->name != NULL; opt++)
19994 if (streq (opt->name, str))
19995 {
19996 meabi_flags = opt->value;
19997 return 1;
19998 }
19999 as_bad (_("unknown EABI `%s'\n"), str);
20000 return 0;
20001 }
20002 #endif
20003
20004 struct arm_long_option_table arm_long_opts[] =
20005 {
20006 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
20007 arm_parse_cpu, NULL},
20008 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
20009 arm_parse_arch, NULL},
20010 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"),
20011 arm_parse_fpu, NULL},
20012 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"),
20013 arm_parse_float_abi, NULL},
20014 #ifdef OBJ_ELF
20015 {"meabi=", N_("<ver>\t assemble for eabi version <ver>"),
20016 arm_parse_eabi, NULL},
20017 #endif
20018 {NULL, NULL, 0, NULL}
20019 };
20020
20021 int
20022 md_parse_option (int c, char * arg)
20023 {
20024 struct arm_option_table *opt;
20025 const struct arm_legacy_option_table *fopt;
20026 struct arm_long_option_table *lopt;
20027
20028 switch (c)
20029 {
20030 #ifdef OPTION_EB
20031 case OPTION_EB:
20032 target_big_endian = 1;
20033 break;
20034 #endif
20035
20036 #ifdef OPTION_EL
20037 case OPTION_EL:
20038 target_big_endian = 0;
20039 break;
20040 #endif
20041
20042 case 'a':
20043 /* Listing option. Just ignore these, we don't support additional
20044 ones. */
20045 return 0;
20046
20047 default:
20048 for (opt = arm_opts; opt->option != NULL; opt++)
20049 {
20050 if (c == opt->option[0]
20051 && ((arg == NULL && opt->option[1] == 0)
20052 || streq (arg, opt->option + 1)))
20053 {
20054 #if WARN_DEPRECATED
20055 /* If the option is deprecated, tell the user. */
20056 if (opt->deprecated != NULL)
20057 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
20058 arg ? arg : "", _(opt->deprecated));
20059 #endif
20060
20061 if (opt->var != NULL)
20062 *opt->var = opt->value;
20063
20064 return 1;
20065 }
20066 }
20067
20068 for (fopt = arm_legacy_opts; fopt->option != NULL; fopt++)
20069 {
20070 if (c == fopt->option[0]
20071 && ((arg == NULL && fopt->option[1] == 0)
20072 || streq (arg, fopt->option + 1)))
20073 {
20074 #if WARN_DEPRECATED
20075 /* If the option is deprecated, tell the user. */
20076 if (fopt->deprecated != NULL)
20077 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
20078 arg ? arg : "", _(fopt->deprecated));
20079 #endif
20080
20081 if (fopt->var != NULL)
20082 *fopt->var = &fopt->value;
20083
20084 return 1;
20085 }
20086 }
20087
20088 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
20089 {
20090 /* These options are expected to have an argument. */
20091 if (c == lopt->option[0]
20092 && arg != NULL
20093 && strncmp (arg, lopt->option + 1,
20094 strlen (lopt->option + 1)) == 0)
20095 {
20096 #if WARN_DEPRECATED
20097 /* If the option is deprecated, tell the user. */
20098 if (lopt->deprecated != NULL)
20099 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
20100 _(lopt->deprecated));
20101 #endif
20102
20103 /* Call the sup-option parser. */
20104 return lopt->func (arg + strlen (lopt->option) - 1);
20105 }
20106 }
20107
20108 return 0;
20109 }
20110
20111 return 1;
20112 }
20113
20114 void
20115 md_show_usage (FILE * fp)
20116 {
20117 struct arm_option_table *opt;
20118 struct arm_long_option_table *lopt;
20119
20120 fprintf (fp, _(" ARM-specific assembler options:\n"));
20121
20122 for (opt = arm_opts; opt->option != NULL; opt++)
20123 if (opt->help != NULL)
20124 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
20125
20126 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
20127 if (lopt->help != NULL)
20128 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
20129
20130 #ifdef OPTION_EB
20131 fprintf (fp, _("\
20132 -EB assemble code for a big-endian cpu\n"));
20133 #endif
20134
20135 #ifdef OPTION_EL
20136 fprintf (fp, _("\
20137 -EL assemble code for a little-endian cpu\n"));
20138 #endif
20139 }
20140
20141
20142 #ifdef OBJ_ELF
20143 typedef struct
20144 {
20145 int val;
20146 arm_feature_set flags;
20147 } cpu_arch_ver_table;
20148
20149 /* Mapping from CPU features to EABI CPU arch values. Table must be sorted
20150 least features first. */
20151 static const cpu_arch_ver_table cpu_arch_ver[] =
20152 {
20153 {1, ARM_ARCH_V4},
20154 {2, ARM_ARCH_V4T},
20155 {3, ARM_ARCH_V5},
20156 {4, ARM_ARCH_V5TE},
20157 {5, ARM_ARCH_V5TEJ},
20158 {6, ARM_ARCH_V6},
20159 {7, ARM_ARCH_V6Z},
20160 {8, ARM_ARCH_V6K},
20161 {9, ARM_ARCH_V6T2},
20162 {10, ARM_ARCH_V7A},
20163 {10, ARM_ARCH_V7R},
20164 {10, ARM_ARCH_V7M},
20165 {0, ARM_ARCH_NONE}
20166 };
20167
20168 /* Set the public EABI object attributes. */
20169 static void
20170 aeabi_set_public_attributes (void)
20171 {
20172 int arch;
20173 arm_feature_set flags;
20174 arm_feature_set tmp;
20175 const cpu_arch_ver_table *p;
20176
20177 /* Choose the architecture based on the capabilities of the requested cpu
20178 (if any) and/or the instructions actually used. */
20179 ARM_MERGE_FEATURE_SETS (flags, arm_arch_used, thumb_arch_used);
20180 ARM_MERGE_FEATURE_SETS (flags, flags, *mfpu_opt);
20181 ARM_MERGE_FEATURE_SETS (flags, flags, selected_cpu);
20182
20183 tmp = flags;
20184 arch = 0;
20185 for (p = cpu_arch_ver; p->val; p++)
20186 {
20187 if (ARM_CPU_HAS_FEATURE (tmp, p->flags))
20188 {
20189 arch = p->val;
20190 ARM_CLEAR_FEATURE (tmp, tmp, p->flags);
20191 }
20192 }
20193
20194 /* Tag_CPU_name. */
20195 if (selected_cpu_name[0])
20196 {
20197 char *p;
20198
20199 p = selected_cpu_name;
20200 if (strncmp(p, "armv", 4) == 0)
20201 {
20202 int i;
20203
20204 p += 4;
20205 for (i = 0; p[i]; i++)
20206 p[i] = TOUPPER (p[i]);
20207 }
20208 elf32_arm_add_eabi_attr_string (stdoutput, 5, p);
20209 }
20210 /* Tag_CPU_arch. */
20211 elf32_arm_add_eabi_attr_int (stdoutput, 6, arch);
20212 /* Tag_CPU_arch_profile. */
20213 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7a))
20214 elf32_arm_add_eabi_attr_int (stdoutput, 7, 'A');
20215 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7r))
20216 elf32_arm_add_eabi_attr_int (stdoutput, 7, 'R');
20217 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7m))
20218 elf32_arm_add_eabi_attr_int (stdoutput, 7, 'M');
20219 /* Tag_ARM_ISA_use. */
20220 if (ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_full))
20221 elf32_arm_add_eabi_attr_int (stdoutput, 8, 1);
20222 /* Tag_THUMB_ISA_use. */
20223 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_full))
20224 elf32_arm_add_eabi_attr_int (stdoutput, 9,
20225 ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_t2) ? 2 : 1);
20226 /* Tag_VFP_arch. */
20227 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_vfp_ext_v3)
20228 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_vfp_ext_v3))
20229 elf32_arm_add_eabi_attr_int (stdoutput, 10, 3);
20230 else if (ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_vfp_ext_v2)
20231 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_vfp_ext_v2))
20232 elf32_arm_add_eabi_attr_int (stdoutput, 10, 2);
20233 else if (ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_vfp_ext_v1)
20234 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_vfp_ext_v1)
20235 || ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_vfp_ext_v1xd)
20236 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_vfp_ext_v1xd))
20237 elf32_arm_add_eabi_attr_int (stdoutput, 10, 1);
20238 /* Tag_WMMX_arch. */
20239 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_cext_iwmmxt)
20240 || ARM_CPU_HAS_FEATURE (arm_arch_used, arm_cext_iwmmxt))
20241 elf32_arm_add_eabi_attr_int (stdoutput, 11, 1);
20242 /* Tag_NEON_arch. */
20243 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_neon_ext_v1)
20244 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_neon_ext_v1))
20245 elf32_arm_add_eabi_attr_int (stdoutput, 12, 1);
20246 }
20247
20248 /* Add the .ARM.attributes section. */
20249 void
20250 arm_md_end (void)
20251 {
20252 segT s;
20253 char *p;
20254 addressT addr;
20255 offsetT size;
20256
20257 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
20258 return;
20259
20260 aeabi_set_public_attributes ();
20261 size = elf32_arm_eabi_attr_size (stdoutput);
20262 s = subseg_new (".ARM.attributes", 0);
20263 bfd_set_section_flags (stdoutput, s, SEC_READONLY | SEC_DATA);
20264 addr = frag_now_fix ();
20265 p = frag_more (size);
20266 elf32_arm_set_eabi_attr_contents (stdoutput, (bfd_byte *)p, size);
20267 }
20268 #endif /* OBJ_ELF */
20269
20270
20271 /* Parse a .cpu directive. */
20272
20273 static void
20274 s_arm_cpu (int ignored ATTRIBUTE_UNUSED)
20275 {
20276 const struct arm_cpu_option_table *opt;
20277 char *name;
20278 char saved_char;
20279
20280 name = input_line_pointer;
20281 while (*input_line_pointer && !ISSPACE(*input_line_pointer))
20282 input_line_pointer++;
20283 saved_char = *input_line_pointer;
20284 *input_line_pointer = 0;
20285
20286 /* Skip the first "all" entry. */
20287 for (opt = arm_cpus + 1; opt->name != NULL; opt++)
20288 if (streq (opt->name, name))
20289 {
20290 mcpu_cpu_opt = &opt->value;
20291 selected_cpu = opt->value;
20292 if (opt->canonical_name)
20293 strcpy(selected_cpu_name, opt->canonical_name);
20294 else
20295 {
20296 int i;
20297 for (i = 0; opt->name[i]; i++)
20298 selected_cpu_name[i] = TOUPPER (opt->name[i]);
20299 selected_cpu_name[i] = 0;
20300 }
20301 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
20302 *input_line_pointer = saved_char;
20303 demand_empty_rest_of_line ();
20304 return;
20305 }
20306 as_bad (_("unknown cpu `%s'"), name);
20307 *input_line_pointer = saved_char;
20308 ignore_rest_of_line ();
20309 }
20310
20311
20312 /* Parse a .arch directive. */
20313
20314 static void
20315 s_arm_arch (int ignored ATTRIBUTE_UNUSED)
20316 {
20317 const struct arm_arch_option_table *opt;
20318 char saved_char;
20319 char *name;
20320
20321 name = input_line_pointer;
20322 while (*input_line_pointer && !ISSPACE(*input_line_pointer))
20323 input_line_pointer++;
20324 saved_char = *input_line_pointer;
20325 *input_line_pointer = 0;
20326
20327 /* Skip the first "all" entry. */
20328 for (opt = arm_archs + 1; opt->name != NULL; opt++)
20329 if (streq (opt->name, name))
20330 {
20331 mcpu_cpu_opt = &opt->value;
20332 selected_cpu = opt->value;
20333 strcpy(selected_cpu_name, opt->name);
20334 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
20335 *input_line_pointer = saved_char;
20336 demand_empty_rest_of_line ();
20337 return;
20338 }
20339
20340 as_bad (_("unknown architecture `%s'\n"), name);
20341 *input_line_pointer = saved_char;
20342 ignore_rest_of_line ();
20343 }
20344
20345
20346 /* Parse a .fpu directive. */
20347
20348 static void
20349 s_arm_fpu (int ignored ATTRIBUTE_UNUSED)
20350 {
20351 const struct arm_option_cpu_value_table *opt;
20352 char saved_char;
20353 char *name;
20354
20355 name = input_line_pointer;
20356 while (*input_line_pointer && !ISSPACE(*input_line_pointer))
20357 input_line_pointer++;
20358 saved_char = *input_line_pointer;
20359 *input_line_pointer = 0;
20360
20361 for (opt = arm_fpus; opt->name != NULL; opt++)
20362 if (streq (opt->name, name))
20363 {
20364 mfpu_opt = &opt->value;
20365 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
20366 *input_line_pointer = saved_char;
20367 demand_empty_rest_of_line ();
20368 return;
20369 }
20370
20371 as_bad (_("unknown floating point format `%s'\n"), name);
20372 *input_line_pointer = saved_char;
20373 ignore_rest_of_line ();
20374 }
20375