* config/tc-arm.c (arm_it): Add immisfloat field.
[binutils-gdb.git] / gas / config / tc-arm.c
1 /* tc-arm.c -- Assemble for the ARM
2 Copyright 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003,
3 2004, 2005, 2006
4 Free Software Foundation, Inc.
5 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
6 Modified by David Taylor (dtaylor@armltd.co.uk)
7 Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
8 Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
9 Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
10
11 This file is part of GAS, the GNU Assembler.
12
13 GAS is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2, or (at your option)
16 any later version.
17
18 GAS is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with GAS; see the file COPYING. If not, write to the Free
25 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
26 02110-1301, USA. */
27
28 #include <limits.h>
29 #include <stdarg.h>
30 #define NO_RELOC 0
31 #include "as.h"
32 #include "safe-ctype.h"
33 #include "subsegs.h"
34 #include "obstack.h"
35
36 #include "opcode/arm.h"
37
38 #ifdef OBJ_ELF
39 #include "elf/arm.h"
40 #include "dw2gencfi.h"
41 #endif
42
43 #include "dwarf2dbg.h"
44
45 #define WARN_DEPRECATED 1
46
47 #ifdef OBJ_ELF
48 /* Must be at least the size of the largest unwind opcode (currently two). */
49 #define ARM_OPCODE_CHUNK_SIZE 8
50
51 /* This structure holds the unwinding state. */
52
53 static struct
54 {
55 symbolS * proc_start;
56 symbolS * table_entry;
57 symbolS * personality_routine;
58 int personality_index;
59 /* The segment containing the function. */
60 segT saved_seg;
61 subsegT saved_subseg;
62 /* Opcodes generated from this function. */
63 unsigned char * opcodes;
64 int opcode_count;
65 int opcode_alloc;
66 /* The number of bytes pushed to the stack. */
67 offsetT frame_size;
68 /* We don't add stack adjustment opcodes immediately so that we can merge
69 multiple adjustments. We can also omit the final adjustment
70 when using a frame pointer. */
71 offsetT pending_offset;
72 /* These two fields are set by both unwind_movsp and unwind_setfp. They
73 hold the reg+offset to use when restoring sp from a frame pointer. */
74 offsetT fp_offset;
75 int fp_reg;
76 /* Nonzero if an unwind_setfp directive has been seen. */
77 unsigned fp_used:1;
78 /* Nonzero if the last opcode restores sp from fp_reg. */
79 unsigned sp_restored:1;
80 } unwind;
81
82 /* Bit N indicates that an R_ARM_NONE relocation has been output for
83 __aeabi_unwind_cpp_prN already if set. This enables dependencies to be
84 emitted only once per section, to save unnecessary bloat. */
85 static unsigned int marked_pr_dependency = 0;
86
87 #endif /* OBJ_ELF */
88
89 /* Results from operand parsing worker functions. */
90
91 typedef enum
92 {
93 PARSE_OPERAND_SUCCESS,
94 PARSE_OPERAND_FAIL,
95 PARSE_OPERAND_FAIL_NO_BACKTRACK
96 } parse_operand_result;
97
98 enum arm_float_abi
99 {
100 ARM_FLOAT_ABI_HARD,
101 ARM_FLOAT_ABI_SOFTFP,
102 ARM_FLOAT_ABI_SOFT
103 };
104
105 /* Types of processor to assemble for. */
106 #ifndef CPU_DEFAULT
107 #if defined __XSCALE__
108 #define CPU_DEFAULT ARM_ARCH_XSCALE
109 #else
110 #if defined __thumb__
111 #define CPU_DEFAULT ARM_ARCH_V5T
112 #endif
113 #endif
114 #endif
115
116 #ifndef FPU_DEFAULT
117 # ifdef TE_LINUX
118 # define FPU_DEFAULT FPU_ARCH_FPA
119 # elif defined (TE_NetBSD)
120 # ifdef OBJ_ELF
121 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */
122 # else
123 /* Legacy a.out format. */
124 # define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */
125 # endif
126 # elif defined (TE_VXWORKS)
127 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */
128 # else
129 /* For backwards compatibility, default to FPA. */
130 # define FPU_DEFAULT FPU_ARCH_FPA
131 # endif
132 #endif /* ifndef FPU_DEFAULT */
133
134 #define streq(a, b) (strcmp (a, b) == 0)
135
136 static arm_feature_set cpu_variant;
137 static arm_feature_set arm_arch_used;
138 static arm_feature_set thumb_arch_used;
139
140 /* Flags stored in private area of BFD structure. */
141 static int uses_apcs_26 = FALSE;
142 static int atpcs = FALSE;
143 static int support_interwork = FALSE;
144 static int uses_apcs_float = FALSE;
145 static int pic_code = FALSE;
146
147 /* Variables that we set while parsing command-line options. Once all
148 options have been read we re-process these values to set the real
149 assembly flags. */
150 static const arm_feature_set *legacy_cpu = NULL;
151 static const arm_feature_set *legacy_fpu = NULL;
152
153 static const arm_feature_set *mcpu_cpu_opt = NULL;
154 static const arm_feature_set *mcpu_fpu_opt = NULL;
155 static const arm_feature_set *march_cpu_opt = NULL;
156 static const arm_feature_set *march_fpu_opt = NULL;
157 static const arm_feature_set *mfpu_opt = NULL;
158 static const arm_feature_set *object_arch = NULL;
159
160 /* Constants for known architecture features. */
161 static const arm_feature_set fpu_default = FPU_DEFAULT;
162 static const arm_feature_set fpu_arch_vfp_v1 = FPU_ARCH_VFP_V1;
163 static const arm_feature_set fpu_arch_vfp_v2 = FPU_ARCH_VFP_V2;
164 static const arm_feature_set fpu_arch_vfp_v3 = FPU_ARCH_VFP_V3;
165 static const arm_feature_set fpu_arch_neon_v1 = FPU_ARCH_NEON_V1;
166 static const arm_feature_set fpu_arch_fpa = FPU_ARCH_FPA;
167 static const arm_feature_set fpu_any_hard = FPU_ANY_HARD;
168 static const arm_feature_set fpu_arch_maverick = FPU_ARCH_MAVERICK;
169 static const arm_feature_set fpu_endian_pure = FPU_ARCH_ENDIAN_PURE;
170
171 #ifdef CPU_DEFAULT
172 static const arm_feature_set cpu_default = CPU_DEFAULT;
173 #endif
174
175 static const arm_feature_set arm_ext_v1 = ARM_FEATURE (ARM_EXT_V1, 0);
176 static const arm_feature_set arm_ext_v2 = ARM_FEATURE (ARM_EXT_V1, 0);
177 static const arm_feature_set arm_ext_v2s = ARM_FEATURE (ARM_EXT_V2S, 0);
178 static const arm_feature_set arm_ext_v3 = ARM_FEATURE (ARM_EXT_V3, 0);
179 static const arm_feature_set arm_ext_v3m = ARM_FEATURE (ARM_EXT_V3M, 0);
180 static const arm_feature_set arm_ext_v4 = ARM_FEATURE (ARM_EXT_V4, 0);
181 static const arm_feature_set arm_ext_v4t = ARM_FEATURE (ARM_EXT_V4T, 0);
182 static const arm_feature_set arm_ext_v5 = ARM_FEATURE (ARM_EXT_V5, 0);
183 static const arm_feature_set arm_ext_v4t_5 =
184 ARM_FEATURE (ARM_EXT_V4T | ARM_EXT_V5, 0);
185 static const arm_feature_set arm_ext_v5t = ARM_FEATURE (ARM_EXT_V5T, 0);
186 static const arm_feature_set arm_ext_v5e = ARM_FEATURE (ARM_EXT_V5E, 0);
187 static const arm_feature_set arm_ext_v5exp = ARM_FEATURE (ARM_EXT_V5ExP, 0);
188 static const arm_feature_set arm_ext_v5j = ARM_FEATURE (ARM_EXT_V5J, 0);
189 static const arm_feature_set arm_ext_v6 = ARM_FEATURE (ARM_EXT_V6, 0);
190 static const arm_feature_set arm_ext_v6k = ARM_FEATURE (ARM_EXT_V6K, 0);
191 static const arm_feature_set arm_ext_v6z = ARM_FEATURE (ARM_EXT_V6Z, 0);
192 static const arm_feature_set arm_ext_v6t2 = ARM_FEATURE (ARM_EXT_V6T2, 0);
193 static const arm_feature_set arm_ext_v6_notm = ARM_FEATURE (ARM_EXT_V6_NOTM, 0);
194 static const arm_feature_set arm_ext_div = ARM_FEATURE (ARM_EXT_DIV, 0);
195 static const arm_feature_set arm_ext_v7 = ARM_FEATURE (ARM_EXT_V7, 0);
196 static const arm_feature_set arm_ext_v7a = ARM_FEATURE (ARM_EXT_V7A, 0);
197 static const arm_feature_set arm_ext_v7r = ARM_FEATURE (ARM_EXT_V7R, 0);
198 static const arm_feature_set arm_ext_v7m = ARM_FEATURE (ARM_EXT_V7M, 0);
199
200 static const arm_feature_set arm_arch_any = ARM_ANY;
201 static const arm_feature_set arm_arch_full = ARM_FEATURE (-1, -1);
202 static const arm_feature_set arm_arch_t2 = ARM_ARCH_THUMB2;
203 static const arm_feature_set arm_arch_none = ARM_ARCH_NONE;
204
205 static const arm_feature_set arm_cext_iwmmxt2 =
206 ARM_FEATURE (0, ARM_CEXT_IWMMXT2);
207 static const arm_feature_set arm_cext_iwmmxt =
208 ARM_FEATURE (0, ARM_CEXT_IWMMXT);
209 static const arm_feature_set arm_cext_xscale =
210 ARM_FEATURE (0, ARM_CEXT_XSCALE);
211 static const arm_feature_set arm_cext_maverick =
212 ARM_FEATURE (0, ARM_CEXT_MAVERICK);
213 static const arm_feature_set fpu_fpa_ext_v1 = ARM_FEATURE (0, FPU_FPA_EXT_V1);
214 static const arm_feature_set fpu_fpa_ext_v2 = ARM_FEATURE (0, FPU_FPA_EXT_V2);
215 static const arm_feature_set fpu_vfp_ext_v1xd =
216 ARM_FEATURE (0, FPU_VFP_EXT_V1xD);
217 static const arm_feature_set fpu_vfp_ext_v1 = ARM_FEATURE (0, FPU_VFP_EXT_V1);
218 static const arm_feature_set fpu_vfp_ext_v2 = ARM_FEATURE (0, FPU_VFP_EXT_V2);
219 static const arm_feature_set fpu_vfp_ext_v3 = ARM_FEATURE (0, FPU_VFP_EXT_V3);
220 static const arm_feature_set fpu_neon_ext_v1 = ARM_FEATURE (0, FPU_NEON_EXT_V1);
221 static const arm_feature_set fpu_vfp_v3_or_neon_ext =
222 ARM_FEATURE (0, FPU_NEON_EXT_V1 | FPU_VFP_EXT_V3);
223
224 static int mfloat_abi_opt = -1;
225 /* Record user cpu selection for object attributes. */
226 static arm_feature_set selected_cpu = ARM_ARCH_NONE;
227 /* Must be long enough to hold any of the names in arm_cpus. */
228 static char selected_cpu_name[16];
229 #ifdef OBJ_ELF
230 # ifdef EABI_DEFAULT
231 static int meabi_flags = EABI_DEFAULT;
232 # else
233 static int meabi_flags = EF_ARM_EABI_UNKNOWN;
234 # endif
235
236 bfd_boolean
237 arm_is_eabi(void)
238 {
239 return (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4);
240 }
241 #endif
242
243 #ifdef OBJ_ELF
244 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
245 symbolS * GOT_symbol;
246 #endif
247
248 /* 0: assemble for ARM,
249 1: assemble for Thumb,
250 2: assemble for Thumb even though target CPU does not support thumb
251 instructions. */
252 static int thumb_mode = 0;
253
254 /* If unified_syntax is true, we are processing the new unified
255 ARM/Thumb syntax. Important differences from the old ARM mode:
256
257 - Immediate operands do not require a # prefix.
258 - Conditional affixes always appear at the end of the
259 instruction. (For backward compatibility, those instructions
260 that formerly had them in the middle, continue to accept them
261 there.)
262 - The IT instruction may appear, and if it does is validated
263 against subsequent conditional affixes. It does not generate
264 machine code.
265
266 Important differences from the old Thumb mode:
267
268 - Immediate operands do not require a # prefix.
269 - Most of the V6T2 instructions are only available in unified mode.
270 - The .N and .W suffixes are recognized and honored (it is an error
271 if they cannot be honored).
272 - All instructions set the flags if and only if they have an 's' affix.
273 - Conditional affixes may be used. They are validated against
274 preceding IT instructions. Unlike ARM mode, you cannot use a
275 conditional affix except in the scope of an IT instruction. */
276
277 static bfd_boolean unified_syntax = FALSE;
278
279 enum neon_el_type
280 {
281 NT_invtype,
282 NT_untyped,
283 NT_integer,
284 NT_float,
285 NT_poly,
286 NT_signed,
287 NT_unsigned
288 };
289
290 struct neon_type_el
291 {
292 enum neon_el_type type;
293 unsigned size;
294 };
295
296 #define NEON_MAX_TYPE_ELS 4
297
298 struct neon_type
299 {
300 struct neon_type_el el[NEON_MAX_TYPE_ELS];
301 unsigned elems;
302 };
303
304 struct arm_it
305 {
306 const char * error;
307 unsigned long instruction;
308 int size;
309 int size_req;
310 int cond;
311 /* "uncond_value" is set to the value in place of the conditional field in
312 unconditional versions of the instruction, or -1 if nothing is
313 appropriate. */
314 int uncond_value;
315 struct neon_type vectype;
316 /* Set to the opcode if the instruction needs relaxation.
317 Zero if the instruction is not relaxed. */
318 unsigned long relax;
319 struct
320 {
321 bfd_reloc_code_real_type type;
322 expressionS exp;
323 int pc_rel;
324 } reloc;
325
326 struct
327 {
328 unsigned reg;
329 signed int imm;
330 struct neon_type_el vectype;
331 unsigned present : 1; /* Operand present. */
332 unsigned isreg : 1; /* Operand was a register. */
333 unsigned immisreg : 1; /* .imm field is a second register. */
334 unsigned isscalar : 1; /* Operand is a (Neon) scalar. */
335 unsigned immisalign : 1; /* Immediate is an alignment specifier. */
336 unsigned immisfloat : 1; /* Immediate was parsed as a float. */
337 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
338 instructions. This allows us to disambiguate ARM <-> vector insns. */
339 unsigned regisimm : 1; /* 64-bit immediate, reg forms high 32 bits. */
340 unsigned isvec : 1; /* Is a single, double or quad VFP/Neon reg. */
341 unsigned isquad : 1; /* Operand is Neon quad-precision register. */
342 unsigned issingle : 1; /* Operand is VFP single-precision register. */
343 unsigned hasreloc : 1; /* Operand has relocation suffix. */
344 unsigned writeback : 1; /* Operand has trailing ! */
345 unsigned preind : 1; /* Preindexed address. */
346 unsigned postind : 1; /* Postindexed address. */
347 unsigned negative : 1; /* Index register was negated. */
348 unsigned shifted : 1; /* Shift applied to operation. */
349 unsigned shift_kind : 3; /* Shift operation (enum shift_kind). */
350 } operands[6];
351 };
352
353 static struct arm_it inst;
354
355 #define NUM_FLOAT_VALS 8
356
357 const char * fp_const[] =
358 {
359 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
360 };
361
362 /* Number of littlenums required to hold an extended precision number. */
363 #define MAX_LITTLENUMS 6
364
365 LITTLENUM_TYPE fp_values[NUM_FLOAT_VALS][MAX_LITTLENUMS];
366
367 #define FAIL (-1)
368 #define SUCCESS (0)
369
370 #define SUFF_S 1
371 #define SUFF_D 2
372 #define SUFF_E 3
373 #define SUFF_P 4
374
375 #define CP_T_X 0x00008000
376 #define CP_T_Y 0x00400000
377
378 #define CONDS_BIT 0x00100000
379 #define LOAD_BIT 0x00100000
380
381 #define DOUBLE_LOAD_FLAG 0x00000001
382
383 struct asm_cond
384 {
385 const char * template;
386 unsigned long value;
387 };
388
389 #define COND_ALWAYS 0xE
390
391 struct asm_psr
392 {
393 const char *template;
394 unsigned long field;
395 };
396
397 struct asm_barrier_opt
398 {
399 const char *template;
400 unsigned long value;
401 };
402
403 /* The bit that distinguishes CPSR and SPSR. */
404 #define SPSR_BIT (1 << 22)
405
406 /* The individual PSR flag bits. */
407 #define PSR_c (1 << 16)
408 #define PSR_x (1 << 17)
409 #define PSR_s (1 << 18)
410 #define PSR_f (1 << 19)
411
412 struct reloc_entry
413 {
414 char *name;
415 bfd_reloc_code_real_type reloc;
416 };
417
418 enum vfp_reg_pos
419 {
420 VFP_REG_Sd, VFP_REG_Sm, VFP_REG_Sn,
421 VFP_REG_Dd, VFP_REG_Dm, VFP_REG_Dn
422 };
423
424 enum vfp_ldstm_type
425 {
426 VFP_LDSTMIA, VFP_LDSTMDB, VFP_LDSTMIAX, VFP_LDSTMDBX
427 };
428
429 /* Bits for DEFINED field in neon_typed_alias. */
430 #define NTA_HASTYPE 1
431 #define NTA_HASINDEX 2
432
433 struct neon_typed_alias
434 {
435 unsigned char defined;
436 unsigned char index;
437 struct neon_type_el eltype;
438 };
439
440 /* ARM register categories. This includes coprocessor numbers and various
441 architecture extensions' registers. */
442 enum arm_reg_type
443 {
444 REG_TYPE_RN,
445 REG_TYPE_CP,
446 REG_TYPE_CN,
447 REG_TYPE_FN,
448 REG_TYPE_VFS,
449 REG_TYPE_VFD,
450 REG_TYPE_NQ,
451 REG_TYPE_VFSD,
452 REG_TYPE_NDQ,
453 REG_TYPE_NSDQ,
454 REG_TYPE_VFC,
455 REG_TYPE_MVF,
456 REG_TYPE_MVD,
457 REG_TYPE_MVFX,
458 REG_TYPE_MVDX,
459 REG_TYPE_MVAX,
460 REG_TYPE_DSPSC,
461 REG_TYPE_MMXWR,
462 REG_TYPE_MMXWC,
463 REG_TYPE_MMXWCG,
464 REG_TYPE_XSCALE,
465 };
466
467 /* Structure for a hash table entry for a register.
468 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
469 information which states whether a vector type or index is specified (for a
470 register alias created with .dn or .qn). Otherwise NEON should be NULL. */
471 struct reg_entry
472 {
473 const char *name;
474 unsigned char number;
475 unsigned char type;
476 unsigned char builtin;
477 struct neon_typed_alias *neon;
478 };
479
480 /* Diagnostics used when we don't get a register of the expected type. */
481 const char *const reg_expected_msgs[] =
482 {
483 N_("ARM register expected"),
484 N_("bad or missing co-processor number"),
485 N_("co-processor register expected"),
486 N_("FPA register expected"),
487 N_("VFP single precision register expected"),
488 N_("VFP/Neon double precision register expected"),
489 N_("Neon quad precision register expected"),
490 N_("VFP single or double precision register expected"),
491 N_("Neon double or quad precision register expected"),
492 N_("VFP single, double or Neon quad precision register expected"),
493 N_("VFP system register expected"),
494 N_("Maverick MVF register expected"),
495 N_("Maverick MVD register expected"),
496 N_("Maverick MVFX register expected"),
497 N_("Maverick MVDX register expected"),
498 N_("Maverick MVAX register expected"),
499 N_("Maverick DSPSC register expected"),
500 N_("iWMMXt data register expected"),
501 N_("iWMMXt control register expected"),
502 N_("iWMMXt scalar register expected"),
503 N_("XScale accumulator register expected"),
504 };
505
506 /* Some well known registers that we refer to directly elsewhere. */
507 #define REG_SP 13
508 #define REG_LR 14
509 #define REG_PC 15
510
511 /* ARM instructions take 4bytes in the object file, Thumb instructions
512 take 2: */
513 #define INSN_SIZE 4
514
515 struct asm_opcode
516 {
517 /* Basic string to match. */
518 const char *template;
519
520 /* Parameters to instruction. */
521 unsigned char operands[8];
522
523 /* Conditional tag - see opcode_lookup. */
524 unsigned int tag : 4;
525
526 /* Basic instruction code. */
527 unsigned int avalue : 28;
528
529 /* Thumb-format instruction code. */
530 unsigned int tvalue;
531
532 /* Which architecture variant provides this instruction. */
533 const arm_feature_set *avariant;
534 const arm_feature_set *tvariant;
535
536 /* Function to call to encode instruction in ARM format. */
537 void (* aencode) (void);
538
539 /* Function to call to encode instruction in Thumb format. */
540 void (* tencode) (void);
541 };
542
543 /* Defines for various bits that we will want to toggle. */
544 #define INST_IMMEDIATE 0x02000000
545 #define OFFSET_REG 0x02000000
546 #define HWOFFSET_IMM 0x00400000
547 #define SHIFT_BY_REG 0x00000010
548 #define PRE_INDEX 0x01000000
549 #define INDEX_UP 0x00800000
550 #define WRITE_BACK 0x00200000
551 #define LDM_TYPE_2_OR_3 0x00400000
552 #define CPSI_MMOD 0x00020000
553
554 #define LITERAL_MASK 0xf000f000
555 #define OPCODE_MASK 0xfe1fffff
556 #define V4_STR_BIT 0x00000020
557
558 #define DATA_OP_SHIFT 21
559
560 #define T2_OPCODE_MASK 0xfe1fffff
561 #define T2_DATA_OP_SHIFT 21
562
563 /* Codes to distinguish the arithmetic instructions. */
564 #define OPCODE_AND 0
565 #define OPCODE_EOR 1
566 #define OPCODE_SUB 2
567 #define OPCODE_RSB 3
568 #define OPCODE_ADD 4
569 #define OPCODE_ADC 5
570 #define OPCODE_SBC 6
571 #define OPCODE_RSC 7
572 #define OPCODE_TST 8
573 #define OPCODE_TEQ 9
574 #define OPCODE_CMP 10
575 #define OPCODE_CMN 11
576 #define OPCODE_ORR 12
577 #define OPCODE_MOV 13
578 #define OPCODE_BIC 14
579 #define OPCODE_MVN 15
580
581 #define T2_OPCODE_AND 0
582 #define T2_OPCODE_BIC 1
583 #define T2_OPCODE_ORR 2
584 #define T2_OPCODE_ORN 3
585 #define T2_OPCODE_EOR 4
586 #define T2_OPCODE_ADD 8
587 #define T2_OPCODE_ADC 10
588 #define T2_OPCODE_SBC 11
589 #define T2_OPCODE_SUB 13
590 #define T2_OPCODE_RSB 14
591
592 #define T_OPCODE_MUL 0x4340
593 #define T_OPCODE_TST 0x4200
594 #define T_OPCODE_CMN 0x42c0
595 #define T_OPCODE_NEG 0x4240
596 #define T_OPCODE_MVN 0x43c0
597
598 #define T_OPCODE_ADD_R3 0x1800
599 #define T_OPCODE_SUB_R3 0x1a00
600 #define T_OPCODE_ADD_HI 0x4400
601 #define T_OPCODE_ADD_ST 0xb000
602 #define T_OPCODE_SUB_ST 0xb080
603 #define T_OPCODE_ADD_SP 0xa800
604 #define T_OPCODE_ADD_PC 0xa000
605 #define T_OPCODE_ADD_I8 0x3000
606 #define T_OPCODE_SUB_I8 0x3800
607 #define T_OPCODE_ADD_I3 0x1c00
608 #define T_OPCODE_SUB_I3 0x1e00
609
610 #define T_OPCODE_ASR_R 0x4100
611 #define T_OPCODE_LSL_R 0x4080
612 #define T_OPCODE_LSR_R 0x40c0
613 #define T_OPCODE_ROR_R 0x41c0
614 #define T_OPCODE_ASR_I 0x1000
615 #define T_OPCODE_LSL_I 0x0000
616 #define T_OPCODE_LSR_I 0x0800
617
618 #define T_OPCODE_MOV_I8 0x2000
619 #define T_OPCODE_CMP_I8 0x2800
620 #define T_OPCODE_CMP_LR 0x4280
621 #define T_OPCODE_MOV_HR 0x4600
622 #define T_OPCODE_CMP_HR 0x4500
623
624 #define T_OPCODE_LDR_PC 0x4800
625 #define T_OPCODE_LDR_SP 0x9800
626 #define T_OPCODE_STR_SP 0x9000
627 #define T_OPCODE_LDR_IW 0x6800
628 #define T_OPCODE_STR_IW 0x6000
629 #define T_OPCODE_LDR_IH 0x8800
630 #define T_OPCODE_STR_IH 0x8000
631 #define T_OPCODE_LDR_IB 0x7800
632 #define T_OPCODE_STR_IB 0x7000
633 #define T_OPCODE_LDR_RW 0x5800
634 #define T_OPCODE_STR_RW 0x5000
635 #define T_OPCODE_LDR_RH 0x5a00
636 #define T_OPCODE_STR_RH 0x5200
637 #define T_OPCODE_LDR_RB 0x5c00
638 #define T_OPCODE_STR_RB 0x5400
639
640 #define T_OPCODE_PUSH 0xb400
641 #define T_OPCODE_POP 0xbc00
642
643 #define T_OPCODE_BRANCH 0xe000
644
645 #define THUMB_SIZE 2 /* Size of thumb instruction. */
646 #define THUMB_PP_PC_LR 0x0100
647 #define THUMB_LOAD_BIT 0x0800
648 #define THUMB2_LOAD_BIT 0x00100000
649
650 #define BAD_ARGS _("bad arguments to instruction")
651 #define BAD_PC _("r15 not allowed here")
652 #define BAD_COND _("instruction cannot be conditional")
653 #define BAD_OVERLAP _("registers may not be the same")
654 #define BAD_HIREG _("lo register required")
655 #define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
656 #define BAD_ADDR_MODE _("instruction does not accept this addressing mode");
657 #define BAD_BRANCH _("branch must be last instruction in IT block")
658 #define BAD_NOT_IT _("instruction not allowed in IT block")
659 #define BAD_FPU _("selected FPU does not support instruction")
660
661 static struct hash_control *arm_ops_hsh;
662 static struct hash_control *arm_cond_hsh;
663 static struct hash_control *arm_shift_hsh;
664 static struct hash_control *arm_psr_hsh;
665 static struct hash_control *arm_v7m_psr_hsh;
666 static struct hash_control *arm_reg_hsh;
667 static struct hash_control *arm_reloc_hsh;
668 static struct hash_control *arm_barrier_opt_hsh;
669
670 /* Stuff needed to resolve the label ambiguity
671 As:
672 ...
673 label: <insn>
674 may differ from:
675 ...
676 label:
677 <insn>
678 */
679
680 symbolS * last_label_seen;
681 static int label_is_thumb_function_name = FALSE;
682 \f
683 /* Literal pool structure. Held on a per-section
684 and per-sub-section basis. */
685
686 #define MAX_LITERAL_POOL_SIZE 1024
687 typedef struct literal_pool
688 {
689 expressionS literals [MAX_LITERAL_POOL_SIZE];
690 unsigned int next_free_entry;
691 unsigned int id;
692 symbolS * symbol;
693 segT section;
694 subsegT sub_section;
695 struct literal_pool * next;
696 } literal_pool;
697
698 /* Pointer to a linked list of literal pools. */
699 literal_pool * list_of_pools = NULL;
700
701 /* State variables for IT block handling. */
702 static bfd_boolean current_it_mask = 0;
703 static int current_cc;
704
705 \f
706 /* Pure syntax. */
707
708 /* This array holds the chars that always start a comment. If the
709 pre-processor is disabled, these aren't very useful. */
710 const char comment_chars[] = "@";
711
712 /* This array holds the chars that only start a comment at the beginning of
713 a line. If the line seems to have the form '# 123 filename'
714 .line and .file directives will appear in the pre-processed output. */
715 /* Note that input_file.c hand checks for '#' at the beginning of the
716 first line of the input file. This is because the compiler outputs
717 #NO_APP at the beginning of its output. */
718 /* Also note that comments like this one will always work. */
719 const char line_comment_chars[] = "#";
720
721 const char line_separator_chars[] = ";";
722
723 /* Chars that can be used to separate mant
724 from exp in floating point numbers. */
725 const char EXP_CHARS[] = "eE";
726
727 /* Chars that mean this number is a floating point constant. */
728 /* As in 0f12.456 */
729 /* or 0d1.2345e12 */
730
731 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
732
733 /* Prefix characters that indicate the start of an immediate
734 value. */
735 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
736
737 /* Separator character handling. */
738
739 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
740
741 static inline int
742 skip_past_char (char ** str, char c)
743 {
744 if (**str == c)
745 {
746 (*str)++;
747 return SUCCESS;
748 }
749 else
750 return FAIL;
751 }
752 #define skip_past_comma(str) skip_past_char (str, ',')
753
754 /* Arithmetic expressions (possibly involving symbols). */
755
756 /* Return TRUE if anything in the expression is a bignum. */
757
758 static int
759 walk_no_bignums (symbolS * sp)
760 {
761 if (symbol_get_value_expression (sp)->X_op == O_big)
762 return 1;
763
764 if (symbol_get_value_expression (sp)->X_add_symbol)
765 {
766 return (walk_no_bignums (symbol_get_value_expression (sp)->X_add_symbol)
767 || (symbol_get_value_expression (sp)->X_op_symbol
768 && walk_no_bignums (symbol_get_value_expression (sp)->X_op_symbol)));
769 }
770
771 return 0;
772 }
773
774 static int in_my_get_expression = 0;
775
776 /* Third argument to my_get_expression. */
777 #define GE_NO_PREFIX 0
778 #define GE_IMM_PREFIX 1
779 #define GE_OPT_PREFIX 2
780 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
781 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
782 #define GE_OPT_PREFIX_BIG 3
783
784 static int
785 my_get_expression (expressionS * ep, char ** str, int prefix_mode)
786 {
787 char * save_in;
788 segT seg;
789
790 /* In unified syntax, all prefixes are optional. */
791 if (unified_syntax)
792 prefix_mode = (prefix_mode == GE_OPT_PREFIX_BIG) ? prefix_mode
793 : GE_OPT_PREFIX;
794
795 switch (prefix_mode)
796 {
797 case GE_NO_PREFIX: break;
798 case GE_IMM_PREFIX:
799 if (!is_immediate_prefix (**str))
800 {
801 inst.error = _("immediate expression requires a # prefix");
802 return FAIL;
803 }
804 (*str)++;
805 break;
806 case GE_OPT_PREFIX:
807 case GE_OPT_PREFIX_BIG:
808 if (is_immediate_prefix (**str))
809 (*str)++;
810 break;
811 default: abort ();
812 }
813
814 memset (ep, 0, sizeof (expressionS));
815
816 save_in = input_line_pointer;
817 input_line_pointer = *str;
818 in_my_get_expression = 1;
819 seg = expression (ep);
820 in_my_get_expression = 0;
821
822 if (ep->X_op == O_illegal)
823 {
824 /* We found a bad expression in md_operand(). */
825 *str = input_line_pointer;
826 input_line_pointer = save_in;
827 if (inst.error == NULL)
828 inst.error = _("bad expression");
829 return 1;
830 }
831
832 #ifdef OBJ_AOUT
833 if (seg != absolute_section
834 && seg != text_section
835 && seg != data_section
836 && seg != bss_section
837 && seg != undefined_section)
838 {
839 inst.error = _("bad segment");
840 *str = input_line_pointer;
841 input_line_pointer = save_in;
842 return 1;
843 }
844 #endif
845
846 /* Get rid of any bignums now, so that we don't generate an error for which
847 we can't establish a line number later on. Big numbers are never valid
848 in instructions, which is where this routine is always called. */
849 if (prefix_mode != GE_OPT_PREFIX_BIG
850 && (ep->X_op == O_big
851 || (ep->X_add_symbol
852 && (walk_no_bignums (ep->X_add_symbol)
853 || (ep->X_op_symbol
854 && walk_no_bignums (ep->X_op_symbol))))))
855 {
856 inst.error = _("invalid constant");
857 *str = input_line_pointer;
858 input_line_pointer = save_in;
859 return 1;
860 }
861
862 *str = input_line_pointer;
863 input_line_pointer = save_in;
864 return 0;
865 }
866
867 /* Turn a string in input_line_pointer into a floating point constant
868 of type TYPE, and store the appropriate bytes in *LITP. The number
869 of LITTLENUMS emitted is stored in *SIZEP. An error message is
870 returned, or NULL on OK.
871
872 Note that fp constants aren't represent in the normal way on the ARM.
873 In big endian mode, things are as expected. However, in little endian
874 mode fp constants are big-endian word-wise, and little-endian byte-wise
875 within the words. For example, (double) 1.1 in big endian mode is
876 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
877 the byte sequence 99 99 f1 3f 9a 99 99 99.
878
879 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
880
881 char *
882 md_atof (int type, char * litP, int * sizeP)
883 {
884 int prec;
885 LITTLENUM_TYPE words[MAX_LITTLENUMS];
886 char *t;
887 int i;
888
889 switch (type)
890 {
891 case 'f':
892 case 'F':
893 case 's':
894 case 'S':
895 prec = 2;
896 break;
897
898 case 'd':
899 case 'D':
900 case 'r':
901 case 'R':
902 prec = 4;
903 break;
904
905 case 'x':
906 case 'X':
907 prec = 6;
908 break;
909
910 case 'p':
911 case 'P':
912 prec = 6;
913 break;
914
915 default:
916 *sizeP = 0;
917 return _("bad call to MD_ATOF()");
918 }
919
920 t = atof_ieee (input_line_pointer, type, words);
921 if (t)
922 input_line_pointer = t;
923 *sizeP = prec * 2;
924
925 if (target_big_endian)
926 {
927 for (i = 0; i < prec; i++)
928 {
929 md_number_to_chars (litP, (valueT) words[i], 2);
930 litP += 2;
931 }
932 }
933 else
934 {
935 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
936 for (i = prec - 1; i >= 0; i--)
937 {
938 md_number_to_chars (litP, (valueT) words[i], 2);
939 litP += 2;
940 }
941 else
942 /* For a 4 byte float the order of elements in `words' is 1 0.
943 For an 8 byte float the order is 1 0 3 2. */
944 for (i = 0; i < prec; i += 2)
945 {
946 md_number_to_chars (litP, (valueT) words[i + 1], 2);
947 md_number_to_chars (litP + 2, (valueT) words[i], 2);
948 litP += 4;
949 }
950 }
951
952 return 0;
953 }
954
955 /* We handle all bad expressions here, so that we can report the faulty
956 instruction in the error message. */
957 void
958 md_operand (expressionS * expr)
959 {
960 if (in_my_get_expression)
961 expr->X_op = O_illegal;
962 }
963
964 /* Immediate values. */
965
966 /* Generic immediate-value read function for use in directives.
967 Accepts anything that 'expression' can fold to a constant.
968 *val receives the number. */
969 #ifdef OBJ_ELF
970 static int
971 immediate_for_directive (int *val)
972 {
973 expressionS exp;
974 exp.X_op = O_illegal;
975
976 if (is_immediate_prefix (*input_line_pointer))
977 {
978 input_line_pointer++;
979 expression (&exp);
980 }
981
982 if (exp.X_op != O_constant)
983 {
984 as_bad (_("expected #constant"));
985 ignore_rest_of_line ();
986 return FAIL;
987 }
988 *val = exp.X_add_number;
989 return SUCCESS;
990 }
991 #endif
992
993 /* Register parsing. */
994
995 /* Generic register parser. CCP points to what should be the
996 beginning of a register name. If it is indeed a valid register
997 name, advance CCP over it and return the reg_entry structure;
998 otherwise return NULL. Does not issue diagnostics. */
999
1000 static struct reg_entry *
1001 arm_reg_parse_multi (char **ccp)
1002 {
1003 char *start = *ccp;
1004 char *p;
1005 struct reg_entry *reg;
1006
1007 #ifdef REGISTER_PREFIX
1008 if (*start != REGISTER_PREFIX)
1009 return NULL;
1010 start++;
1011 #endif
1012 #ifdef OPTIONAL_REGISTER_PREFIX
1013 if (*start == OPTIONAL_REGISTER_PREFIX)
1014 start++;
1015 #endif
1016
1017 p = start;
1018 if (!ISALPHA (*p) || !is_name_beginner (*p))
1019 return NULL;
1020
1021 do
1022 p++;
1023 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
1024
1025 reg = (struct reg_entry *) hash_find_n (arm_reg_hsh, start, p - start);
1026
1027 if (!reg)
1028 return NULL;
1029
1030 *ccp = p;
1031 return reg;
1032 }
1033
1034 static int
1035 arm_reg_alt_syntax (char **ccp, char *start, struct reg_entry *reg,
1036 enum arm_reg_type type)
1037 {
1038 /* Alternative syntaxes are accepted for a few register classes. */
1039 switch (type)
1040 {
1041 case REG_TYPE_MVF:
1042 case REG_TYPE_MVD:
1043 case REG_TYPE_MVFX:
1044 case REG_TYPE_MVDX:
1045 /* Generic coprocessor register names are allowed for these. */
1046 if (reg && reg->type == REG_TYPE_CN)
1047 return reg->number;
1048 break;
1049
1050 case REG_TYPE_CP:
1051 /* For backward compatibility, a bare number is valid here. */
1052 {
1053 unsigned long processor = strtoul (start, ccp, 10);
1054 if (*ccp != start && processor <= 15)
1055 return processor;
1056 }
1057
1058 case REG_TYPE_MMXWC:
1059 /* WC includes WCG. ??? I'm not sure this is true for all
1060 instructions that take WC registers. */
1061 if (reg && reg->type == REG_TYPE_MMXWCG)
1062 return reg->number;
1063 break;
1064
1065 default:
1066 break;
1067 }
1068
1069 return FAIL;
1070 }
1071
1072 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1073 return value is the register number or FAIL. */
1074
1075 static int
1076 arm_reg_parse (char **ccp, enum arm_reg_type type)
1077 {
1078 char *start = *ccp;
1079 struct reg_entry *reg = arm_reg_parse_multi (ccp);
1080 int ret;
1081
1082 /* Do not allow a scalar (reg+index) to parse as a register. */
1083 if (reg && reg->neon && (reg->neon->defined & NTA_HASINDEX))
1084 return FAIL;
1085
1086 if (reg && reg->type == type)
1087 return reg->number;
1088
1089 if ((ret = arm_reg_alt_syntax (ccp, start, reg, type)) != FAIL)
1090 return ret;
1091
1092 *ccp = start;
1093 return FAIL;
1094 }
1095
1096 /* Parse a Neon type specifier. *STR should point at the leading '.'
1097 character. Does no verification at this stage that the type fits the opcode
1098 properly. E.g.,
1099
1100 .i32.i32.s16
1101 .s32.f32
1102 .u16
1103
1104 Can all be legally parsed by this function.
1105
1106 Fills in neon_type struct pointer with parsed information, and updates STR
1107 to point after the parsed type specifier. Returns SUCCESS if this was a legal
1108 type, FAIL if not. */
1109
1110 static int
1111 parse_neon_type (struct neon_type *type, char **str)
1112 {
1113 char *ptr = *str;
1114
1115 if (type)
1116 type->elems = 0;
1117
1118 while (type->elems < NEON_MAX_TYPE_ELS)
1119 {
1120 enum neon_el_type thistype = NT_untyped;
1121 unsigned thissize = -1u;
1122
1123 if (*ptr != '.')
1124 break;
1125
1126 ptr++;
1127
1128 /* Just a size without an explicit type. */
1129 if (ISDIGIT (*ptr))
1130 goto parsesize;
1131
1132 switch (TOLOWER (*ptr))
1133 {
1134 case 'i': thistype = NT_integer; break;
1135 case 'f': thistype = NT_float; break;
1136 case 'p': thistype = NT_poly; break;
1137 case 's': thistype = NT_signed; break;
1138 case 'u': thistype = NT_unsigned; break;
1139 case 'd':
1140 thistype = NT_float;
1141 thissize = 64;
1142 ptr++;
1143 goto done;
1144 default:
1145 as_bad (_("unexpected character `%c' in type specifier"), *ptr);
1146 return FAIL;
1147 }
1148
1149 ptr++;
1150
1151 /* .f is an abbreviation for .f32. */
1152 if (thistype == NT_float && !ISDIGIT (*ptr))
1153 thissize = 32;
1154 else
1155 {
1156 parsesize:
1157 thissize = strtoul (ptr, &ptr, 10);
1158
1159 if (thissize != 8 && thissize != 16 && thissize != 32
1160 && thissize != 64)
1161 {
1162 as_bad (_("bad size %d in type specifier"), thissize);
1163 return FAIL;
1164 }
1165 }
1166
1167 done:
1168 if (type)
1169 {
1170 type->el[type->elems].type = thistype;
1171 type->el[type->elems].size = thissize;
1172 type->elems++;
1173 }
1174 }
1175
1176 /* Empty/missing type is not a successful parse. */
1177 if (type->elems == 0)
1178 return FAIL;
1179
1180 *str = ptr;
1181
1182 return SUCCESS;
1183 }
1184
1185 /* Errors may be set multiple times during parsing or bit encoding
1186 (particularly in the Neon bits), but usually the earliest error which is set
1187 will be the most meaningful. Avoid overwriting it with later (cascading)
1188 errors by calling this function. */
1189
1190 static void
1191 first_error (const char *err)
1192 {
1193 if (!inst.error)
1194 inst.error = err;
1195 }
1196
1197 /* Parse a single type, e.g. ".s32", leading period included. */
1198 static int
1199 parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
1200 {
1201 char *str = *ccp;
1202 struct neon_type optype;
1203
1204 if (*str == '.')
1205 {
1206 if (parse_neon_type (&optype, &str) == SUCCESS)
1207 {
1208 if (optype.elems == 1)
1209 *vectype = optype.el[0];
1210 else
1211 {
1212 first_error (_("only one type should be specified for operand"));
1213 return FAIL;
1214 }
1215 }
1216 else
1217 {
1218 first_error (_("vector type expected"));
1219 return FAIL;
1220 }
1221 }
1222 else
1223 return FAIL;
1224
1225 *ccp = str;
1226
1227 return SUCCESS;
1228 }
1229
1230 /* Special meanings for indices (which have a range of 0-7), which will fit into
1231 a 4-bit integer. */
1232
1233 #define NEON_ALL_LANES 15
1234 #define NEON_INTERLEAVE_LANES 14
1235
1236 /* Parse either a register or a scalar, with an optional type. Return the
1237 register number, and optionally fill in the actual type of the register
1238 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1239 type/index information in *TYPEINFO. */
1240
1241 static int
1242 parse_typed_reg_or_scalar (char **ccp, enum arm_reg_type type,
1243 enum arm_reg_type *rtype,
1244 struct neon_typed_alias *typeinfo)
1245 {
1246 char *str = *ccp;
1247 struct reg_entry *reg = arm_reg_parse_multi (&str);
1248 struct neon_typed_alias atype;
1249 struct neon_type_el parsetype;
1250
1251 atype.defined = 0;
1252 atype.index = -1;
1253 atype.eltype.type = NT_invtype;
1254 atype.eltype.size = -1;
1255
1256 /* Try alternate syntax for some types of register. Note these are mutually
1257 exclusive with the Neon syntax extensions. */
1258 if (reg == NULL)
1259 {
1260 int altreg = arm_reg_alt_syntax (&str, *ccp, reg, type);
1261 if (altreg != FAIL)
1262 *ccp = str;
1263 if (typeinfo)
1264 *typeinfo = atype;
1265 return altreg;
1266 }
1267
1268 /* Undo polymorphism when a set of register types may be accepted. */
1269 if ((type == REG_TYPE_NDQ
1270 && (reg->type == REG_TYPE_NQ || reg->type == REG_TYPE_VFD))
1271 || (type == REG_TYPE_VFSD
1272 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD))
1273 || (type == REG_TYPE_NSDQ
1274 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD
1275 || reg->type == REG_TYPE_NQ))
1276 || (type == REG_TYPE_MMXWC
1277 && (reg->type == REG_TYPE_MMXWCG)))
1278 type = reg->type;
1279
1280 if (type != reg->type)
1281 return FAIL;
1282
1283 if (reg->neon)
1284 atype = *reg->neon;
1285
1286 if (parse_neon_operand_type (&parsetype, &str) == SUCCESS)
1287 {
1288 if ((atype.defined & NTA_HASTYPE) != 0)
1289 {
1290 first_error (_("can't redefine type for operand"));
1291 return FAIL;
1292 }
1293 atype.defined |= NTA_HASTYPE;
1294 atype.eltype = parsetype;
1295 }
1296
1297 if (skip_past_char (&str, '[') == SUCCESS)
1298 {
1299 if (type != REG_TYPE_VFD)
1300 {
1301 first_error (_("only D registers may be indexed"));
1302 return FAIL;
1303 }
1304
1305 if ((atype.defined & NTA_HASINDEX) != 0)
1306 {
1307 first_error (_("can't change index for operand"));
1308 return FAIL;
1309 }
1310
1311 atype.defined |= NTA_HASINDEX;
1312
1313 if (skip_past_char (&str, ']') == SUCCESS)
1314 atype.index = NEON_ALL_LANES;
1315 else
1316 {
1317 expressionS exp;
1318
1319 my_get_expression (&exp, &str, GE_NO_PREFIX);
1320
1321 if (exp.X_op != O_constant)
1322 {
1323 first_error (_("constant expression required"));
1324 return FAIL;
1325 }
1326
1327 if (skip_past_char (&str, ']') == FAIL)
1328 return FAIL;
1329
1330 atype.index = exp.X_add_number;
1331 }
1332 }
1333
1334 if (typeinfo)
1335 *typeinfo = atype;
1336
1337 if (rtype)
1338 *rtype = type;
1339
1340 *ccp = str;
1341
1342 return reg->number;
1343 }
1344
1345 /* Like arm_reg_parse, but allow allow the following extra features:
1346 - If RTYPE is non-zero, return the (possibly restricted) type of the
1347 register (e.g. Neon double or quad reg when either has been requested).
1348 - If this is a Neon vector type with additional type information, fill
1349 in the struct pointed to by VECTYPE (if non-NULL).
1350 This function will fault on encountering a scalar.
1351 */
1352
1353 static int
1354 arm_typed_reg_parse (char **ccp, enum arm_reg_type type,
1355 enum arm_reg_type *rtype, struct neon_type_el *vectype)
1356 {
1357 struct neon_typed_alias atype;
1358 char *str = *ccp;
1359 int reg = parse_typed_reg_or_scalar (&str, type, rtype, &atype);
1360
1361 if (reg == FAIL)
1362 return FAIL;
1363
1364 /* Do not allow a scalar (reg+index) to parse as a register. */
1365 if ((atype.defined & NTA_HASINDEX) != 0)
1366 {
1367 first_error (_("register operand expected, but got scalar"));
1368 return FAIL;
1369 }
1370
1371 if (vectype)
1372 *vectype = atype.eltype;
1373
1374 *ccp = str;
1375
1376 return reg;
1377 }
1378
1379 #define NEON_SCALAR_REG(X) ((X) >> 4)
1380 #define NEON_SCALAR_INDEX(X) ((X) & 15)
1381
1382 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1383 have enough information to be able to do a good job bounds-checking. So, we
1384 just do easy checks here, and do further checks later. */
1385
1386 static int
1387 parse_scalar (char **ccp, int elsize, struct neon_type_el *type)
1388 {
1389 int reg;
1390 char *str = *ccp;
1391 struct neon_typed_alias atype;
1392
1393 reg = parse_typed_reg_or_scalar (&str, REG_TYPE_VFD, NULL, &atype);
1394
1395 if (reg == FAIL || (atype.defined & NTA_HASINDEX) == 0)
1396 return FAIL;
1397
1398 if (atype.index == NEON_ALL_LANES)
1399 {
1400 first_error (_("scalar must have an index"));
1401 return FAIL;
1402 }
1403 else if (atype.index >= 64 / elsize)
1404 {
1405 first_error (_("scalar index out of range"));
1406 return FAIL;
1407 }
1408
1409 if (type)
1410 *type = atype.eltype;
1411
1412 *ccp = str;
1413
1414 return reg * 16 + atype.index;
1415 }
1416
1417 /* Parse an ARM register list. Returns the bitmask, or FAIL. */
1418 static long
1419 parse_reg_list (char ** strp)
1420 {
1421 char * str = * strp;
1422 long range = 0;
1423 int another_range;
1424
1425 /* We come back here if we get ranges concatenated by '+' or '|'. */
1426 do
1427 {
1428 another_range = 0;
1429
1430 if (*str == '{')
1431 {
1432 int in_range = 0;
1433 int cur_reg = -1;
1434
1435 str++;
1436 do
1437 {
1438 int reg;
1439
1440 if ((reg = arm_reg_parse (&str, REG_TYPE_RN)) == FAIL)
1441 {
1442 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
1443 return FAIL;
1444 }
1445
1446 if (in_range)
1447 {
1448 int i;
1449
1450 if (reg <= cur_reg)
1451 {
1452 first_error (_("bad range in register list"));
1453 return FAIL;
1454 }
1455
1456 for (i = cur_reg + 1; i < reg; i++)
1457 {
1458 if (range & (1 << i))
1459 as_tsktsk
1460 (_("Warning: duplicated register (r%d) in register list"),
1461 i);
1462 else
1463 range |= 1 << i;
1464 }
1465 in_range = 0;
1466 }
1467
1468 if (range & (1 << reg))
1469 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1470 reg);
1471 else if (reg <= cur_reg)
1472 as_tsktsk (_("Warning: register range not in ascending order"));
1473
1474 range |= 1 << reg;
1475 cur_reg = reg;
1476 }
1477 while (skip_past_comma (&str) != FAIL
1478 || (in_range = 1, *str++ == '-'));
1479 str--;
1480
1481 if (*str++ != '}')
1482 {
1483 first_error (_("missing `}'"));
1484 return FAIL;
1485 }
1486 }
1487 else
1488 {
1489 expressionS expr;
1490
1491 if (my_get_expression (&expr, &str, GE_NO_PREFIX))
1492 return FAIL;
1493
1494 if (expr.X_op == O_constant)
1495 {
1496 if (expr.X_add_number
1497 != (expr.X_add_number & 0x0000ffff))
1498 {
1499 inst.error = _("invalid register mask");
1500 return FAIL;
1501 }
1502
1503 if ((range & expr.X_add_number) != 0)
1504 {
1505 int regno = range & expr.X_add_number;
1506
1507 regno &= -regno;
1508 regno = (1 << regno) - 1;
1509 as_tsktsk
1510 (_("Warning: duplicated register (r%d) in register list"),
1511 regno);
1512 }
1513
1514 range |= expr.X_add_number;
1515 }
1516 else
1517 {
1518 if (inst.reloc.type != 0)
1519 {
1520 inst.error = _("expression too complex");
1521 return FAIL;
1522 }
1523
1524 memcpy (&inst.reloc.exp, &expr, sizeof (expressionS));
1525 inst.reloc.type = BFD_RELOC_ARM_MULTI;
1526 inst.reloc.pc_rel = 0;
1527 }
1528 }
1529
1530 if (*str == '|' || *str == '+')
1531 {
1532 str++;
1533 another_range = 1;
1534 }
1535 }
1536 while (another_range);
1537
1538 *strp = str;
1539 return range;
1540 }
1541
1542 /* Types of registers in a list. */
1543
1544 enum reg_list_els
1545 {
1546 REGLIST_VFP_S,
1547 REGLIST_VFP_D,
1548 REGLIST_NEON_D
1549 };
1550
1551 /* Parse a VFP register list. If the string is invalid return FAIL.
1552 Otherwise return the number of registers, and set PBASE to the first
1553 register. Parses registers of type ETYPE.
1554 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1555 - Q registers can be used to specify pairs of D registers
1556 - { } can be omitted from around a singleton register list
1557 FIXME: This is not implemented, as it would require backtracking in
1558 some cases, e.g.:
1559 vtbl.8 d3,d4,d5
1560 This could be done (the meaning isn't really ambiguous), but doesn't
1561 fit in well with the current parsing framework.
1562 - 32 D registers may be used (also true for VFPv3).
1563 FIXME: Types are ignored in these register lists, which is probably a
1564 bug. */
1565
1566 static int
1567 parse_vfp_reg_list (char **ccp, unsigned int *pbase, enum reg_list_els etype)
1568 {
1569 char *str = *ccp;
1570 int base_reg;
1571 int new_base;
1572 enum arm_reg_type regtype = 0;
1573 int max_regs = 0;
1574 int count = 0;
1575 int warned = 0;
1576 unsigned long mask = 0;
1577 int i;
1578
1579 if (*str != '{')
1580 {
1581 inst.error = _("expecting {");
1582 return FAIL;
1583 }
1584
1585 str++;
1586
1587 switch (etype)
1588 {
1589 case REGLIST_VFP_S:
1590 regtype = REG_TYPE_VFS;
1591 max_regs = 32;
1592 break;
1593
1594 case REGLIST_VFP_D:
1595 regtype = REG_TYPE_VFD;
1596 break;
1597
1598 case REGLIST_NEON_D:
1599 regtype = REG_TYPE_NDQ;
1600 break;
1601 }
1602
1603 if (etype != REGLIST_VFP_S)
1604 {
1605 /* VFPv3 allows 32 D registers. */
1606 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3))
1607 {
1608 max_regs = 32;
1609 if (thumb_mode)
1610 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
1611 fpu_vfp_ext_v3);
1612 else
1613 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
1614 fpu_vfp_ext_v3);
1615 }
1616 else
1617 max_regs = 16;
1618 }
1619
1620 base_reg = max_regs;
1621
1622 do
1623 {
1624 int setmask = 1, addregs = 1;
1625
1626 new_base = arm_typed_reg_parse (&str, regtype, &regtype, NULL);
1627
1628 if (new_base == FAIL)
1629 {
1630 first_error (_(reg_expected_msgs[regtype]));
1631 return FAIL;
1632 }
1633
1634 if (new_base >= max_regs)
1635 {
1636 first_error (_("register out of range in list"));
1637 return FAIL;
1638 }
1639
1640 /* Note: a value of 2 * n is returned for the register Q<n>. */
1641 if (regtype == REG_TYPE_NQ)
1642 {
1643 setmask = 3;
1644 addregs = 2;
1645 }
1646
1647 if (new_base < base_reg)
1648 base_reg = new_base;
1649
1650 if (mask & (setmask << new_base))
1651 {
1652 first_error (_("invalid register list"));
1653 return FAIL;
1654 }
1655
1656 if ((mask >> new_base) != 0 && ! warned)
1657 {
1658 as_tsktsk (_("register list not in ascending order"));
1659 warned = 1;
1660 }
1661
1662 mask |= setmask << new_base;
1663 count += addregs;
1664
1665 if (*str == '-') /* We have the start of a range expression */
1666 {
1667 int high_range;
1668
1669 str++;
1670
1671 if ((high_range = arm_typed_reg_parse (&str, regtype, NULL, NULL))
1672 == FAIL)
1673 {
1674 inst.error = gettext (reg_expected_msgs[regtype]);
1675 return FAIL;
1676 }
1677
1678 if (high_range >= max_regs)
1679 {
1680 first_error (_("register out of range in list"));
1681 return FAIL;
1682 }
1683
1684 if (regtype == REG_TYPE_NQ)
1685 high_range = high_range + 1;
1686
1687 if (high_range <= new_base)
1688 {
1689 inst.error = _("register range not in ascending order");
1690 return FAIL;
1691 }
1692
1693 for (new_base += addregs; new_base <= high_range; new_base += addregs)
1694 {
1695 if (mask & (setmask << new_base))
1696 {
1697 inst.error = _("invalid register list");
1698 return FAIL;
1699 }
1700
1701 mask |= setmask << new_base;
1702 count += addregs;
1703 }
1704 }
1705 }
1706 while (skip_past_comma (&str) != FAIL);
1707
1708 str++;
1709
1710 /* Sanity check -- should have raised a parse error above. */
1711 if (count == 0 || count > max_regs)
1712 abort ();
1713
1714 *pbase = base_reg;
1715
1716 /* Final test -- the registers must be consecutive. */
1717 mask >>= base_reg;
1718 for (i = 0; i < count; i++)
1719 {
1720 if ((mask & (1u << i)) == 0)
1721 {
1722 inst.error = _("non-contiguous register range");
1723 return FAIL;
1724 }
1725 }
1726
1727 *ccp = str;
1728
1729 return count;
1730 }
1731
1732 /* True if two alias types are the same. */
1733
1734 static int
1735 neon_alias_types_same (struct neon_typed_alias *a, struct neon_typed_alias *b)
1736 {
1737 if (!a && !b)
1738 return 1;
1739
1740 if (!a || !b)
1741 return 0;
1742
1743 if (a->defined != b->defined)
1744 return 0;
1745
1746 if ((a->defined & NTA_HASTYPE) != 0
1747 && (a->eltype.type != b->eltype.type
1748 || a->eltype.size != b->eltype.size))
1749 return 0;
1750
1751 if ((a->defined & NTA_HASINDEX) != 0
1752 && (a->index != b->index))
1753 return 0;
1754
1755 return 1;
1756 }
1757
1758 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
1759 The base register is put in *PBASE.
1760 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
1761 the return value.
1762 The register stride (minus one) is put in bit 4 of the return value.
1763 Bits [6:5] encode the list length (minus one).
1764 The type of the list elements is put in *ELTYPE, if non-NULL. */
1765
1766 #define NEON_LANE(X) ((X) & 0xf)
1767 #define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1)
1768 #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
1769
1770 static int
1771 parse_neon_el_struct_list (char **str, unsigned *pbase,
1772 struct neon_type_el *eltype)
1773 {
1774 char *ptr = *str;
1775 int base_reg = -1;
1776 int reg_incr = -1;
1777 int count = 0;
1778 int lane = -1;
1779 int leading_brace = 0;
1780 enum arm_reg_type rtype = REG_TYPE_NDQ;
1781 int addregs = 1;
1782 const char *const incr_error = "register stride must be 1 or 2";
1783 const char *const type_error = "mismatched element/structure types in list";
1784 struct neon_typed_alias firsttype;
1785
1786 if (skip_past_char (&ptr, '{') == SUCCESS)
1787 leading_brace = 1;
1788
1789 do
1790 {
1791 struct neon_typed_alias atype;
1792 int getreg = parse_typed_reg_or_scalar (&ptr, rtype, &rtype, &atype);
1793
1794 if (getreg == FAIL)
1795 {
1796 first_error (_(reg_expected_msgs[rtype]));
1797 return FAIL;
1798 }
1799
1800 if (base_reg == -1)
1801 {
1802 base_reg = getreg;
1803 if (rtype == REG_TYPE_NQ)
1804 {
1805 reg_incr = 1;
1806 addregs = 2;
1807 }
1808 firsttype = atype;
1809 }
1810 else if (reg_incr == -1)
1811 {
1812 reg_incr = getreg - base_reg;
1813 if (reg_incr < 1 || reg_incr > 2)
1814 {
1815 first_error (_(incr_error));
1816 return FAIL;
1817 }
1818 }
1819 else if (getreg != base_reg + reg_incr * count)
1820 {
1821 first_error (_(incr_error));
1822 return FAIL;
1823 }
1824
1825 if (!neon_alias_types_same (&atype, &firsttype))
1826 {
1827 first_error (_(type_error));
1828 return FAIL;
1829 }
1830
1831 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
1832 modes. */
1833 if (ptr[0] == '-')
1834 {
1835 struct neon_typed_alias htype;
1836 int hireg, dregs = (rtype == REG_TYPE_NQ) ? 2 : 1;
1837 if (lane == -1)
1838 lane = NEON_INTERLEAVE_LANES;
1839 else if (lane != NEON_INTERLEAVE_LANES)
1840 {
1841 first_error (_(type_error));
1842 return FAIL;
1843 }
1844 if (reg_incr == -1)
1845 reg_incr = 1;
1846 else if (reg_incr != 1)
1847 {
1848 first_error (_("don't use Rn-Rm syntax with non-unit stride"));
1849 return FAIL;
1850 }
1851 ptr++;
1852 hireg = parse_typed_reg_or_scalar (&ptr, rtype, NULL, &htype);
1853 if (hireg == FAIL)
1854 {
1855 first_error (_(reg_expected_msgs[rtype]));
1856 return FAIL;
1857 }
1858 if (!neon_alias_types_same (&htype, &firsttype))
1859 {
1860 first_error (_(type_error));
1861 return FAIL;
1862 }
1863 count += hireg + dregs - getreg;
1864 continue;
1865 }
1866
1867 /* If we're using Q registers, we can't use [] or [n] syntax. */
1868 if (rtype == REG_TYPE_NQ)
1869 {
1870 count += 2;
1871 continue;
1872 }
1873
1874 if ((atype.defined & NTA_HASINDEX) != 0)
1875 {
1876 if (lane == -1)
1877 lane = atype.index;
1878 else if (lane != atype.index)
1879 {
1880 first_error (_(type_error));
1881 return FAIL;
1882 }
1883 }
1884 else if (lane == -1)
1885 lane = NEON_INTERLEAVE_LANES;
1886 else if (lane != NEON_INTERLEAVE_LANES)
1887 {
1888 first_error (_(type_error));
1889 return FAIL;
1890 }
1891 count++;
1892 }
1893 while ((count != 1 || leading_brace) && skip_past_comma (&ptr) != FAIL);
1894
1895 /* No lane set by [x]. We must be interleaving structures. */
1896 if (lane == -1)
1897 lane = NEON_INTERLEAVE_LANES;
1898
1899 /* Sanity check. */
1900 if (lane == -1 || base_reg == -1 || count < 1 || count > 4
1901 || (count > 1 && reg_incr == -1))
1902 {
1903 first_error (_("error parsing element/structure list"));
1904 return FAIL;
1905 }
1906
1907 if ((count > 1 || leading_brace) && skip_past_char (&ptr, '}') == FAIL)
1908 {
1909 first_error (_("expected }"));
1910 return FAIL;
1911 }
1912
1913 if (reg_incr == -1)
1914 reg_incr = 1;
1915
1916 if (eltype)
1917 *eltype = firsttype.eltype;
1918
1919 *pbase = base_reg;
1920 *str = ptr;
1921
1922 return lane | ((reg_incr - 1) << 4) | ((count - 1) << 5);
1923 }
1924
1925 /* Parse an explicit relocation suffix on an expression. This is
1926 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
1927 arm_reloc_hsh contains no entries, so this function can only
1928 succeed if there is no () after the word. Returns -1 on error,
1929 BFD_RELOC_UNUSED if there wasn't any suffix. */
1930 static int
1931 parse_reloc (char **str)
1932 {
1933 struct reloc_entry *r;
1934 char *p, *q;
1935
1936 if (**str != '(')
1937 return BFD_RELOC_UNUSED;
1938
1939 p = *str + 1;
1940 q = p;
1941
1942 while (*q && *q != ')' && *q != ',')
1943 q++;
1944 if (*q != ')')
1945 return -1;
1946
1947 if ((r = hash_find_n (arm_reloc_hsh, p, q - p)) == NULL)
1948 return -1;
1949
1950 *str = q + 1;
1951 return r->reloc;
1952 }
1953
1954 /* Directives: register aliases. */
1955
1956 static struct reg_entry *
1957 insert_reg_alias (char *str, int number, int type)
1958 {
1959 struct reg_entry *new;
1960 const char *name;
1961
1962 if ((new = hash_find (arm_reg_hsh, str)) != 0)
1963 {
1964 if (new->builtin)
1965 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str);
1966
1967 /* Only warn about a redefinition if it's not defined as the
1968 same register. */
1969 else if (new->number != number || new->type != type)
1970 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1971
1972 return 0;
1973 }
1974
1975 name = xstrdup (str);
1976 new = xmalloc (sizeof (struct reg_entry));
1977
1978 new->name = name;
1979 new->number = number;
1980 new->type = type;
1981 new->builtin = FALSE;
1982 new->neon = NULL;
1983
1984 if (hash_insert (arm_reg_hsh, name, (PTR) new))
1985 abort ();
1986
1987 return new;
1988 }
1989
1990 static void
1991 insert_neon_reg_alias (char *str, int number, int type,
1992 struct neon_typed_alias *atype)
1993 {
1994 struct reg_entry *reg = insert_reg_alias (str, number, type);
1995
1996 if (!reg)
1997 {
1998 first_error (_("attempt to redefine typed alias"));
1999 return;
2000 }
2001
2002 if (atype)
2003 {
2004 reg->neon = xmalloc (sizeof (struct neon_typed_alias));
2005 *reg->neon = *atype;
2006 }
2007 }
2008
2009 /* Look for the .req directive. This is of the form:
2010
2011 new_register_name .req existing_register_name
2012
2013 If we find one, or if it looks sufficiently like one that we want to
2014 handle any error here, return non-zero. Otherwise return zero. */
2015
2016 static int
2017 create_register_alias (char * newname, char *p)
2018 {
2019 struct reg_entry *old;
2020 char *oldname, *nbuf;
2021 size_t nlen;
2022
2023 /* The input scrubber ensures that whitespace after the mnemonic is
2024 collapsed to single spaces. */
2025 oldname = p;
2026 if (strncmp (oldname, " .req ", 6) != 0)
2027 return 0;
2028
2029 oldname += 6;
2030 if (*oldname == '\0')
2031 return 0;
2032
2033 old = hash_find (arm_reg_hsh, oldname);
2034 if (!old)
2035 {
2036 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
2037 return 1;
2038 }
2039
2040 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2041 the desired alias name, and p points to its end. If not, then
2042 the desired alias name is in the global original_case_string. */
2043 #ifdef TC_CASE_SENSITIVE
2044 nlen = p - newname;
2045 #else
2046 newname = original_case_string;
2047 nlen = strlen (newname);
2048 #endif
2049
2050 nbuf = alloca (nlen + 1);
2051 memcpy (nbuf, newname, nlen);
2052 nbuf[nlen] = '\0';
2053
2054 /* Create aliases under the new name as stated; an all-lowercase
2055 version of the new name; and an all-uppercase version of the new
2056 name. */
2057 insert_reg_alias (nbuf, old->number, old->type);
2058
2059 for (p = nbuf; *p; p++)
2060 *p = TOUPPER (*p);
2061
2062 if (strncmp (nbuf, newname, nlen))
2063 insert_reg_alias (nbuf, old->number, old->type);
2064
2065 for (p = nbuf; *p; p++)
2066 *p = TOLOWER (*p);
2067
2068 if (strncmp (nbuf, newname, nlen))
2069 insert_reg_alias (nbuf, old->number, old->type);
2070
2071 return 1;
2072 }
2073
2074 /* Create a Neon typed/indexed register alias using directives, e.g.:
2075 X .dn d5.s32[1]
2076 Y .qn 6.s16
2077 Z .dn d7
2078 T .dn Z[0]
2079 These typed registers can be used instead of the types specified after the
2080 Neon mnemonic, so long as all operands given have types. Types can also be
2081 specified directly, e.g.:
2082 vadd d0.s32, d1.s32, d2.s32
2083 */
2084
2085 static int
2086 create_neon_reg_alias (char *newname, char *p)
2087 {
2088 enum arm_reg_type basetype;
2089 struct reg_entry *basereg;
2090 struct reg_entry mybasereg;
2091 struct neon_type ntype;
2092 struct neon_typed_alias typeinfo;
2093 char *namebuf, *nameend;
2094 int namelen;
2095
2096 typeinfo.defined = 0;
2097 typeinfo.eltype.type = NT_invtype;
2098 typeinfo.eltype.size = -1;
2099 typeinfo.index = -1;
2100
2101 nameend = p;
2102
2103 if (strncmp (p, " .dn ", 5) == 0)
2104 basetype = REG_TYPE_VFD;
2105 else if (strncmp (p, " .qn ", 5) == 0)
2106 basetype = REG_TYPE_NQ;
2107 else
2108 return 0;
2109
2110 p += 5;
2111
2112 if (*p == '\0')
2113 return 0;
2114
2115 basereg = arm_reg_parse_multi (&p);
2116
2117 if (basereg && basereg->type != basetype)
2118 {
2119 as_bad (_("bad type for register"));
2120 return 0;
2121 }
2122
2123 if (basereg == NULL)
2124 {
2125 expressionS exp;
2126 /* Try parsing as an integer. */
2127 my_get_expression (&exp, &p, GE_NO_PREFIX);
2128 if (exp.X_op != O_constant)
2129 {
2130 as_bad (_("expression must be constant"));
2131 return 0;
2132 }
2133 basereg = &mybasereg;
2134 basereg->number = (basetype == REG_TYPE_NQ) ? exp.X_add_number * 2
2135 : exp.X_add_number;
2136 basereg->neon = 0;
2137 }
2138
2139 if (basereg->neon)
2140 typeinfo = *basereg->neon;
2141
2142 if (parse_neon_type (&ntype, &p) == SUCCESS)
2143 {
2144 /* We got a type. */
2145 if (typeinfo.defined & NTA_HASTYPE)
2146 {
2147 as_bad (_("can't redefine the type of a register alias"));
2148 return 0;
2149 }
2150
2151 typeinfo.defined |= NTA_HASTYPE;
2152 if (ntype.elems != 1)
2153 {
2154 as_bad (_("you must specify a single type only"));
2155 return 0;
2156 }
2157 typeinfo.eltype = ntype.el[0];
2158 }
2159
2160 if (skip_past_char (&p, '[') == SUCCESS)
2161 {
2162 expressionS exp;
2163 /* We got a scalar index. */
2164
2165 if (typeinfo.defined & NTA_HASINDEX)
2166 {
2167 as_bad (_("can't redefine the index of a scalar alias"));
2168 return 0;
2169 }
2170
2171 my_get_expression (&exp, &p, GE_NO_PREFIX);
2172
2173 if (exp.X_op != O_constant)
2174 {
2175 as_bad (_("scalar index must be constant"));
2176 return 0;
2177 }
2178
2179 typeinfo.defined |= NTA_HASINDEX;
2180 typeinfo.index = exp.X_add_number;
2181
2182 if (skip_past_char (&p, ']') == FAIL)
2183 {
2184 as_bad (_("expecting ]"));
2185 return 0;
2186 }
2187 }
2188
2189 namelen = nameend - newname;
2190 namebuf = alloca (namelen + 1);
2191 strncpy (namebuf, newname, namelen);
2192 namebuf[namelen] = '\0';
2193
2194 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2195 typeinfo.defined != 0 ? &typeinfo : NULL);
2196
2197 /* Insert name in all uppercase. */
2198 for (p = namebuf; *p; p++)
2199 *p = TOUPPER (*p);
2200
2201 if (strncmp (namebuf, newname, namelen))
2202 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2203 typeinfo.defined != 0 ? &typeinfo : NULL);
2204
2205 /* Insert name in all lowercase. */
2206 for (p = namebuf; *p; p++)
2207 *p = TOLOWER (*p);
2208
2209 if (strncmp (namebuf, newname, namelen))
2210 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2211 typeinfo.defined != 0 ? &typeinfo : NULL);
2212
2213 return 1;
2214 }
2215
2216 /* Should never be called, as .req goes between the alias and the
2217 register name, not at the beginning of the line. */
2218 static void
2219 s_req (int a ATTRIBUTE_UNUSED)
2220 {
2221 as_bad (_("invalid syntax for .req directive"));
2222 }
2223
2224 static void
2225 s_dn (int a ATTRIBUTE_UNUSED)
2226 {
2227 as_bad (_("invalid syntax for .dn directive"));
2228 }
2229
2230 static void
2231 s_qn (int a ATTRIBUTE_UNUSED)
2232 {
2233 as_bad (_("invalid syntax for .qn directive"));
2234 }
2235
2236 /* The .unreq directive deletes an alias which was previously defined
2237 by .req. For example:
2238
2239 my_alias .req r11
2240 .unreq my_alias */
2241
2242 static void
2243 s_unreq (int a ATTRIBUTE_UNUSED)
2244 {
2245 char * name;
2246 char saved_char;
2247
2248 name = input_line_pointer;
2249
2250 while (*input_line_pointer != 0
2251 && *input_line_pointer != ' '
2252 && *input_line_pointer != '\n')
2253 ++input_line_pointer;
2254
2255 saved_char = *input_line_pointer;
2256 *input_line_pointer = 0;
2257
2258 if (!*name)
2259 as_bad (_("invalid syntax for .unreq directive"));
2260 else
2261 {
2262 struct reg_entry *reg = hash_find (arm_reg_hsh, name);
2263
2264 if (!reg)
2265 as_bad (_("unknown register alias '%s'"), name);
2266 else if (reg->builtin)
2267 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
2268 name);
2269 else
2270 {
2271 hash_delete (arm_reg_hsh, name);
2272 free ((char *) reg->name);
2273 if (reg->neon)
2274 free (reg->neon);
2275 free (reg);
2276 }
2277 }
2278
2279 *input_line_pointer = saved_char;
2280 demand_empty_rest_of_line ();
2281 }
2282
2283 /* Directives: Instruction set selection. */
2284
2285 #ifdef OBJ_ELF
2286 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
2287 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2288 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2289 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
2290
2291 static enum mstate mapstate = MAP_UNDEFINED;
2292
2293 void
2294 mapping_state (enum mstate state)
2295 {
2296 symbolS * symbolP;
2297 const char * symname;
2298 int type;
2299
2300 if (mapstate == state)
2301 /* The mapping symbol has already been emitted.
2302 There is nothing else to do. */
2303 return;
2304
2305 mapstate = state;
2306
2307 switch (state)
2308 {
2309 case MAP_DATA:
2310 symname = "$d";
2311 type = BSF_NO_FLAGS;
2312 break;
2313 case MAP_ARM:
2314 symname = "$a";
2315 type = BSF_NO_FLAGS;
2316 break;
2317 case MAP_THUMB:
2318 symname = "$t";
2319 type = BSF_NO_FLAGS;
2320 break;
2321 case MAP_UNDEFINED:
2322 return;
2323 default:
2324 abort ();
2325 }
2326
2327 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
2328
2329 symbolP = symbol_new (symname, now_seg, (valueT) frag_now_fix (), frag_now);
2330 symbol_table_insert (symbolP);
2331 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
2332
2333 switch (state)
2334 {
2335 case MAP_ARM:
2336 THUMB_SET_FUNC (symbolP, 0);
2337 ARM_SET_THUMB (symbolP, 0);
2338 ARM_SET_INTERWORK (symbolP, support_interwork);
2339 break;
2340
2341 case MAP_THUMB:
2342 THUMB_SET_FUNC (symbolP, 1);
2343 ARM_SET_THUMB (symbolP, 1);
2344 ARM_SET_INTERWORK (symbolP, support_interwork);
2345 break;
2346
2347 case MAP_DATA:
2348 default:
2349 return;
2350 }
2351 }
2352 #else
2353 #define mapping_state(x) /* nothing */
2354 #endif
2355
2356 /* Find the real, Thumb encoded start of a Thumb function. */
2357
2358 static symbolS *
2359 find_real_start (symbolS * symbolP)
2360 {
2361 char * real_start;
2362 const char * name = S_GET_NAME (symbolP);
2363 symbolS * new_target;
2364
2365 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
2366 #define STUB_NAME ".real_start_of"
2367
2368 if (name == NULL)
2369 abort ();
2370
2371 /* The compiler may generate BL instructions to local labels because
2372 it needs to perform a branch to a far away location. These labels
2373 do not have a corresponding ".real_start_of" label. We check
2374 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2375 the ".real_start_of" convention for nonlocal branches. */
2376 if (S_IS_LOCAL (symbolP) || name[0] == '.')
2377 return symbolP;
2378
2379 real_start = ACONCAT ((STUB_NAME, name, NULL));
2380 new_target = symbol_find (real_start);
2381
2382 if (new_target == NULL)
2383 {
2384 as_warn ("Failed to find real start of function: %s\n", name);
2385 new_target = symbolP;
2386 }
2387
2388 return new_target;
2389 }
2390
2391 static void
2392 opcode_select (int width)
2393 {
2394 switch (width)
2395 {
2396 case 16:
2397 if (! thumb_mode)
2398 {
2399 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
2400 as_bad (_("selected processor does not support THUMB opcodes"));
2401
2402 thumb_mode = 1;
2403 /* No need to force the alignment, since we will have been
2404 coming from ARM mode, which is word-aligned. */
2405 record_alignment (now_seg, 1);
2406 }
2407 mapping_state (MAP_THUMB);
2408 break;
2409
2410 case 32:
2411 if (thumb_mode)
2412 {
2413 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
2414 as_bad (_("selected processor does not support ARM opcodes"));
2415
2416 thumb_mode = 0;
2417
2418 if (!need_pass_2)
2419 frag_align (2, 0, 0);
2420
2421 record_alignment (now_seg, 1);
2422 }
2423 mapping_state (MAP_ARM);
2424 break;
2425
2426 default:
2427 as_bad (_("invalid instruction size selected (%d)"), width);
2428 }
2429 }
2430
2431 static void
2432 s_arm (int ignore ATTRIBUTE_UNUSED)
2433 {
2434 opcode_select (32);
2435 demand_empty_rest_of_line ();
2436 }
2437
2438 static void
2439 s_thumb (int ignore ATTRIBUTE_UNUSED)
2440 {
2441 opcode_select (16);
2442 demand_empty_rest_of_line ();
2443 }
2444
2445 static void
2446 s_code (int unused ATTRIBUTE_UNUSED)
2447 {
2448 int temp;
2449
2450 temp = get_absolute_expression ();
2451 switch (temp)
2452 {
2453 case 16:
2454 case 32:
2455 opcode_select (temp);
2456 break;
2457
2458 default:
2459 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp);
2460 }
2461 }
2462
2463 static void
2464 s_force_thumb (int ignore ATTRIBUTE_UNUSED)
2465 {
2466 /* If we are not already in thumb mode go into it, EVEN if
2467 the target processor does not support thumb instructions.
2468 This is used by gcc/config/arm/lib1funcs.asm for example
2469 to compile interworking support functions even if the
2470 target processor should not support interworking. */
2471 if (! thumb_mode)
2472 {
2473 thumb_mode = 2;
2474 record_alignment (now_seg, 1);
2475 }
2476
2477 demand_empty_rest_of_line ();
2478 }
2479
2480 static void
2481 s_thumb_func (int ignore ATTRIBUTE_UNUSED)
2482 {
2483 s_thumb (0);
2484
2485 /* The following label is the name/address of the start of a Thumb function.
2486 We need to know this for the interworking support. */
2487 label_is_thumb_function_name = TRUE;
2488 }
2489
2490 /* Perform a .set directive, but also mark the alias as
2491 being a thumb function. */
2492
2493 static void
2494 s_thumb_set (int equiv)
2495 {
2496 /* XXX the following is a duplicate of the code for s_set() in read.c
2497 We cannot just call that code as we need to get at the symbol that
2498 is created. */
2499 char * name;
2500 char delim;
2501 char * end_name;
2502 symbolS * symbolP;
2503
2504 /* Especial apologies for the random logic:
2505 This just grew, and could be parsed much more simply!
2506 Dean - in haste. */
2507 name = input_line_pointer;
2508 delim = get_symbol_end ();
2509 end_name = input_line_pointer;
2510 *end_name = delim;
2511
2512 if (*input_line_pointer != ',')
2513 {
2514 *end_name = 0;
2515 as_bad (_("expected comma after name \"%s\""), name);
2516 *end_name = delim;
2517 ignore_rest_of_line ();
2518 return;
2519 }
2520
2521 input_line_pointer++;
2522 *end_name = 0;
2523
2524 if (name[0] == '.' && name[1] == '\0')
2525 {
2526 /* XXX - this should not happen to .thumb_set. */
2527 abort ();
2528 }
2529
2530 if ((symbolP = symbol_find (name)) == NULL
2531 && (symbolP = md_undefined_symbol (name)) == NULL)
2532 {
2533 #ifndef NO_LISTING
2534 /* When doing symbol listings, play games with dummy fragments living
2535 outside the normal fragment chain to record the file and line info
2536 for this symbol. */
2537 if (listing & LISTING_SYMBOLS)
2538 {
2539 extern struct list_info_struct * listing_tail;
2540 fragS * dummy_frag = xmalloc (sizeof (fragS));
2541
2542 memset (dummy_frag, 0, sizeof (fragS));
2543 dummy_frag->fr_type = rs_fill;
2544 dummy_frag->line = listing_tail;
2545 symbolP = symbol_new (name, undefined_section, 0, dummy_frag);
2546 dummy_frag->fr_symbol = symbolP;
2547 }
2548 else
2549 #endif
2550 symbolP = symbol_new (name, undefined_section, 0, &zero_address_frag);
2551
2552 #ifdef OBJ_COFF
2553 /* "set" symbols are local unless otherwise specified. */
2554 SF_SET_LOCAL (symbolP);
2555 #endif /* OBJ_COFF */
2556 } /* Make a new symbol. */
2557
2558 symbol_table_insert (symbolP);
2559
2560 * end_name = delim;
2561
2562 if (equiv
2563 && S_IS_DEFINED (symbolP)
2564 && S_GET_SEGMENT (symbolP) != reg_section)
2565 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP));
2566
2567 pseudo_set (symbolP);
2568
2569 demand_empty_rest_of_line ();
2570
2571 /* XXX Now we come to the Thumb specific bit of code. */
2572
2573 THUMB_SET_FUNC (symbolP, 1);
2574 ARM_SET_THUMB (symbolP, 1);
2575 #if defined OBJ_ELF || defined OBJ_COFF
2576 ARM_SET_INTERWORK (symbolP, support_interwork);
2577 #endif
2578 }
2579
2580 /* Directives: Mode selection. */
2581
2582 /* .syntax [unified|divided] - choose the new unified syntax
2583 (same for Arm and Thumb encoding, modulo slight differences in what
2584 can be represented) or the old divergent syntax for each mode. */
2585 static void
2586 s_syntax (int unused ATTRIBUTE_UNUSED)
2587 {
2588 char *name, delim;
2589
2590 name = input_line_pointer;
2591 delim = get_symbol_end ();
2592
2593 if (!strcasecmp (name, "unified"))
2594 unified_syntax = TRUE;
2595 else if (!strcasecmp (name, "divided"))
2596 unified_syntax = FALSE;
2597 else
2598 {
2599 as_bad (_("unrecognized syntax mode \"%s\""), name);
2600 return;
2601 }
2602 *input_line_pointer = delim;
2603 demand_empty_rest_of_line ();
2604 }
2605
2606 /* Directives: sectioning and alignment. */
2607
2608 /* Same as s_align_ptwo but align 0 => align 2. */
2609
2610 static void
2611 s_align (int unused ATTRIBUTE_UNUSED)
2612 {
2613 int temp;
2614 long temp_fill;
2615 long max_alignment = 15;
2616
2617 temp = get_absolute_expression ();
2618 if (temp > max_alignment)
2619 as_bad (_("alignment too large: %d assumed"), temp = max_alignment);
2620 else if (temp < 0)
2621 {
2622 as_bad (_("alignment negative. 0 assumed."));
2623 temp = 0;
2624 }
2625
2626 if (*input_line_pointer == ',')
2627 {
2628 input_line_pointer++;
2629 temp_fill = get_absolute_expression ();
2630 }
2631 else
2632 temp_fill = 0;
2633
2634 if (!temp)
2635 temp = 2;
2636
2637 /* Only make a frag if we HAVE to. */
2638 if (temp && !need_pass_2)
2639 frag_align (temp, (int) temp_fill, 0);
2640 demand_empty_rest_of_line ();
2641
2642 record_alignment (now_seg, temp);
2643 }
2644
2645 static void
2646 s_bss (int ignore ATTRIBUTE_UNUSED)
2647 {
2648 /* We don't support putting frags in the BSS segment, we fake it by
2649 marking in_bss, then looking at s_skip for clues. */
2650 subseg_set (bss_section, 0);
2651 demand_empty_rest_of_line ();
2652 mapping_state (MAP_DATA);
2653 }
2654
2655 static void
2656 s_even (int ignore ATTRIBUTE_UNUSED)
2657 {
2658 /* Never make frag if expect extra pass. */
2659 if (!need_pass_2)
2660 frag_align (1, 0, 0);
2661
2662 record_alignment (now_seg, 1);
2663
2664 demand_empty_rest_of_line ();
2665 }
2666
2667 /* Directives: Literal pools. */
2668
2669 static literal_pool *
2670 find_literal_pool (void)
2671 {
2672 literal_pool * pool;
2673
2674 for (pool = list_of_pools; pool != NULL; pool = pool->next)
2675 {
2676 if (pool->section == now_seg
2677 && pool->sub_section == now_subseg)
2678 break;
2679 }
2680
2681 return pool;
2682 }
2683
2684 static literal_pool *
2685 find_or_make_literal_pool (void)
2686 {
2687 /* Next literal pool ID number. */
2688 static unsigned int latest_pool_num = 1;
2689 literal_pool * pool;
2690
2691 pool = find_literal_pool ();
2692
2693 if (pool == NULL)
2694 {
2695 /* Create a new pool. */
2696 pool = xmalloc (sizeof (* pool));
2697 if (! pool)
2698 return NULL;
2699
2700 pool->next_free_entry = 0;
2701 pool->section = now_seg;
2702 pool->sub_section = now_subseg;
2703 pool->next = list_of_pools;
2704 pool->symbol = NULL;
2705
2706 /* Add it to the list. */
2707 list_of_pools = pool;
2708 }
2709
2710 /* New pools, and emptied pools, will have a NULL symbol. */
2711 if (pool->symbol == NULL)
2712 {
2713 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
2714 (valueT) 0, &zero_address_frag);
2715 pool->id = latest_pool_num ++;
2716 }
2717
2718 /* Done. */
2719 return pool;
2720 }
2721
2722 /* Add the literal in the global 'inst'
2723 structure to the relevent literal pool. */
2724
2725 static int
2726 add_to_lit_pool (void)
2727 {
2728 literal_pool * pool;
2729 unsigned int entry;
2730
2731 pool = find_or_make_literal_pool ();
2732
2733 /* Check if this literal value is already in the pool. */
2734 for (entry = 0; entry < pool->next_free_entry; entry ++)
2735 {
2736 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
2737 && (inst.reloc.exp.X_op == O_constant)
2738 && (pool->literals[entry].X_add_number
2739 == inst.reloc.exp.X_add_number)
2740 && (pool->literals[entry].X_unsigned
2741 == inst.reloc.exp.X_unsigned))
2742 break;
2743
2744 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
2745 && (inst.reloc.exp.X_op == O_symbol)
2746 && (pool->literals[entry].X_add_number
2747 == inst.reloc.exp.X_add_number)
2748 && (pool->literals[entry].X_add_symbol
2749 == inst.reloc.exp.X_add_symbol)
2750 && (pool->literals[entry].X_op_symbol
2751 == inst.reloc.exp.X_op_symbol))
2752 break;
2753 }
2754
2755 /* Do we need to create a new entry? */
2756 if (entry == pool->next_free_entry)
2757 {
2758 if (entry >= MAX_LITERAL_POOL_SIZE)
2759 {
2760 inst.error = _("literal pool overflow");
2761 return FAIL;
2762 }
2763
2764 pool->literals[entry] = inst.reloc.exp;
2765 pool->next_free_entry += 1;
2766 }
2767
2768 inst.reloc.exp.X_op = O_symbol;
2769 inst.reloc.exp.X_add_number = ((int) entry) * 4;
2770 inst.reloc.exp.X_add_symbol = pool->symbol;
2771
2772 return SUCCESS;
2773 }
2774
2775 /* Can't use symbol_new here, so have to create a symbol and then at
2776 a later date assign it a value. Thats what these functions do. */
2777
2778 static void
2779 symbol_locate (symbolS * symbolP,
2780 const char * name, /* It is copied, the caller can modify. */
2781 segT segment, /* Segment identifier (SEG_<something>). */
2782 valueT valu, /* Symbol value. */
2783 fragS * frag) /* Associated fragment. */
2784 {
2785 unsigned int name_length;
2786 char * preserved_copy_of_name;
2787
2788 name_length = strlen (name) + 1; /* +1 for \0. */
2789 obstack_grow (&notes, name, name_length);
2790 preserved_copy_of_name = obstack_finish (&notes);
2791
2792 #ifdef tc_canonicalize_symbol_name
2793 preserved_copy_of_name =
2794 tc_canonicalize_symbol_name (preserved_copy_of_name);
2795 #endif
2796
2797 S_SET_NAME (symbolP, preserved_copy_of_name);
2798
2799 S_SET_SEGMENT (symbolP, segment);
2800 S_SET_VALUE (symbolP, valu);
2801 symbol_clear_list_pointers (symbolP);
2802
2803 symbol_set_frag (symbolP, frag);
2804
2805 /* Link to end of symbol chain. */
2806 {
2807 extern int symbol_table_frozen;
2808
2809 if (symbol_table_frozen)
2810 abort ();
2811 }
2812
2813 symbol_append (symbolP, symbol_lastP, & symbol_rootP, & symbol_lastP);
2814
2815 obj_symbol_new_hook (symbolP);
2816
2817 #ifdef tc_symbol_new_hook
2818 tc_symbol_new_hook (symbolP);
2819 #endif
2820
2821 #ifdef DEBUG_SYMS
2822 verify_symbol_chain (symbol_rootP, symbol_lastP);
2823 #endif /* DEBUG_SYMS */
2824 }
2825
2826
2827 static void
2828 s_ltorg (int ignored ATTRIBUTE_UNUSED)
2829 {
2830 unsigned int entry;
2831 literal_pool * pool;
2832 char sym_name[20];
2833
2834 pool = find_literal_pool ();
2835 if (pool == NULL
2836 || pool->symbol == NULL
2837 || pool->next_free_entry == 0)
2838 return;
2839
2840 mapping_state (MAP_DATA);
2841
2842 /* Align pool as you have word accesses.
2843 Only make a frag if we have to. */
2844 if (!need_pass_2)
2845 frag_align (2, 0, 0);
2846
2847 record_alignment (now_seg, 2);
2848
2849 sprintf (sym_name, "$$lit_\002%x", pool->id);
2850
2851 symbol_locate (pool->symbol, sym_name, now_seg,
2852 (valueT) frag_now_fix (), frag_now);
2853 symbol_table_insert (pool->symbol);
2854
2855 ARM_SET_THUMB (pool->symbol, thumb_mode);
2856
2857 #if defined OBJ_COFF || defined OBJ_ELF
2858 ARM_SET_INTERWORK (pool->symbol, support_interwork);
2859 #endif
2860
2861 for (entry = 0; entry < pool->next_free_entry; entry ++)
2862 /* First output the expression in the instruction to the pool. */
2863 emit_expr (&(pool->literals[entry]), 4); /* .word */
2864
2865 /* Mark the pool as empty. */
2866 pool->next_free_entry = 0;
2867 pool->symbol = NULL;
2868 }
2869
2870 #ifdef OBJ_ELF
2871 /* Forward declarations for functions below, in the MD interface
2872 section. */
2873 static void fix_new_arm (fragS *, int, short, expressionS *, int, int);
2874 static valueT create_unwind_entry (int);
2875 static void start_unwind_section (const segT, int);
2876 static void add_unwind_opcode (valueT, int);
2877 static void flush_pending_unwind (void);
2878
2879 /* Directives: Data. */
2880
2881 static void
2882 s_arm_elf_cons (int nbytes)
2883 {
2884 expressionS exp;
2885
2886 #ifdef md_flush_pending_output
2887 md_flush_pending_output ();
2888 #endif
2889
2890 if (is_it_end_of_statement ())
2891 {
2892 demand_empty_rest_of_line ();
2893 return;
2894 }
2895
2896 #ifdef md_cons_align
2897 md_cons_align (nbytes);
2898 #endif
2899
2900 mapping_state (MAP_DATA);
2901 do
2902 {
2903 int reloc;
2904 char *base = input_line_pointer;
2905
2906 expression (& exp);
2907
2908 if (exp.X_op != O_symbol)
2909 emit_expr (&exp, (unsigned int) nbytes);
2910 else
2911 {
2912 char *before_reloc = input_line_pointer;
2913 reloc = parse_reloc (&input_line_pointer);
2914 if (reloc == -1)
2915 {
2916 as_bad (_("unrecognized relocation suffix"));
2917 ignore_rest_of_line ();
2918 return;
2919 }
2920 else if (reloc == BFD_RELOC_UNUSED)
2921 emit_expr (&exp, (unsigned int) nbytes);
2922 else
2923 {
2924 reloc_howto_type *howto = bfd_reloc_type_lookup (stdoutput, reloc);
2925 int size = bfd_get_reloc_size (howto);
2926
2927 if (reloc == BFD_RELOC_ARM_PLT32)
2928 {
2929 as_bad (_("(plt) is only valid on branch targets"));
2930 reloc = BFD_RELOC_UNUSED;
2931 size = 0;
2932 }
2933
2934 if (size > nbytes)
2935 as_bad (_("%s relocations do not fit in %d bytes"),
2936 howto->name, nbytes);
2937 else
2938 {
2939 /* We've parsed an expression stopping at O_symbol.
2940 But there may be more expression left now that we
2941 have parsed the relocation marker. Parse it again.
2942 XXX Surely there is a cleaner way to do this. */
2943 char *p = input_line_pointer;
2944 int offset;
2945 char *save_buf = alloca (input_line_pointer - base);
2946 memcpy (save_buf, base, input_line_pointer - base);
2947 memmove (base + (input_line_pointer - before_reloc),
2948 base, before_reloc - base);
2949
2950 input_line_pointer = base + (input_line_pointer-before_reloc);
2951 expression (&exp);
2952 memcpy (base, save_buf, p - base);
2953
2954 offset = nbytes - size;
2955 p = frag_more ((int) nbytes);
2956 fix_new_exp (frag_now, p - frag_now->fr_literal + offset,
2957 size, &exp, 0, reloc);
2958 }
2959 }
2960 }
2961 }
2962 while (*input_line_pointer++ == ',');
2963
2964 /* Put terminator back into stream. */
2965 input_line_pointer --;
2966 demand_empty_rest_of_line ();
2967 }
2968
2969
2970 /* Parse a .rel31 directive. */
2971
2972 static void
2973 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED)
2974 {
2975 expressionS exp;
2976 char *p;
2977 valueT highbit;
2978
2979 highbit = 0;
2980 if (*input_line_pointer == '1')
2981 highbit = 0x80000000;
2982 else if (*input_line_pointer != '0')
2983 as_bad (_("expected 0 or 1"));
2984
2985 input_line_pointer++;
2986 if (*input_line_pointer != ',')
2987 as_bad (_("missing comma"));
2988 input_line_pointer++;
2989
2990 #ifdef md_flush_pending_output
2991 md_flush_pending_output ();
2992 #endif
2993
2994 #ifdef md_cons_align
2995 md_cons_align (4);
2996 #endif
2997
2998 mapping_state (MAP_DATA);
2999
3000 expression (&exp);
3001
3002 p = frag_more (4);
3003 md_number_to_chars (p, highbit, 4);
3004 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 1,
3005 BFD_RELOC_ARM_PREL31);
3006
3007 demand_empty_rest_of_line ();
3008 }
3009
3010 /* Directives: AEABI stack-unwind tables. */
3011
3012 /* Parse an unwind_fnstart directive. Simply records the current location. */
3013
3014 static void
3015 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED)
3016 {
3017 demand_empty_rest_of_line ();
3018 /* Mark the start of the function. */
3019 unwind.proc_start = expr_build_dot ();
3020
3021 /* Reset the rest of the unwind info. */
3022 unwind.opcode_count = 0;
3023 unwind.table_entry = NULL;
3024 unwind.personality_routine = NULL;
3025 unwind.personality_index = -1;
3026 unwind.frame_size = 0;
3027 unwind.fp_offset = 0;
3028 unwind.fp_reg = 13;
3029 unwind.fp_used = 0;
3030 unwind.sp_restored = 0;
3031 }
3032
3033
3034 /* Parse a handlerdata directive. Creates the exception handling table entry
3035 for the function. */
3036
3037 static void
3038 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED)
3039 {
3040 demand_empty_rest_of_line ();
3041 if (unwind.table_entry)
3042 as_bad (_("dupicate .handlerdata directive"));
3043
3044 create_unwind_entry (1);
3045 }
3046
3047 /* Parse an unwind_fnend directive. Generates the index table entry. */
3048
3049 static void
3050 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED)
3051 {
3052 long where;
3053 char *ptr;
3054 valueT val;
3055
3056 demand_empty_rest_of_line ();
3057
3058 /* Add eh table entry. */
3059 if (unwind.table_entry == NULL)
3060 val = create_unwind_entry (0);
3061 else
3062 val = 0;
3063
3064 /* Add index table entry. This is two words. */
3065 start_unwind_section (unwind.saved_seg, 1);
3066 frag_align (2, 0, 0);
3067 record_alignment (now_seg, 2);
3068
3069 ptr = frag_more (8);
3070 where = frag_now_fix () - 8;
3071
3072 /* Self relative offset of the function start. */
3073 fix_new (frag_now, where, 4, unwind.proc_start, 0, 1,
3074 BFD_RELOC_ARM_PREL31);
3075
3076 /* Indicate dependency on EHABI-defined personality routines to the
3077 linker, if it hasn't been done already. */
3078 if (unwind.personality_index >= 0 && unwind.personality_index < 3
3079 && !(marked_pr_dependency & (1 << unwind.personality_index)))
3080 {
3081 static const char *const name[] = {
3082 "__aeabi_unwind_cpp_pr0",
3083 "__aeabi_unwind_cpp_pr1",
3084 "__aeabi_unwind_cpp_pr2"
3085 };
3086 symbolS *pr = symbol_find_or_make (name[unwind.personality_index]);
3087 fix_new (frag_now, where, 0, pr, 0, 1, BFD_RELOC_NONE);
3088 marked_pr_dependency |= 1 << unwind.personality_index;
3089 seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency
3090 = marked_pr_dependency;
3091 }
3092
3093 if (val)
3094 /* Inline exception table entry. */
3095 md_number_to_chars (ptr + 4, val, 4);
3096 else
3097 /* Self relative offset of the table entry. */
3098 fix_new (frag_now, where + 4, 4, unwind.table_entry, 0, 1,
3099 BFD_RELOC_ARM_PREL31);
3100
3101 /* Restore the original section. */
3102 subseg_set (unwind.saved_seg, unwind.saved_subseg);
3103 }
3104
3105
3106 /* Parse an unwind_cantunwind directive. */
3107
3108 static void
3109 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED)
3110 {
3111 demand_empty_rest_of_line ();
3112 if (unwind.personality_routine || unwind.personality_index != -1)
3113 as_bad (_("personality routine specified for cantunwind frame"));
3114
3115 unwind.personality_index = -2;
3116 }
3117
3118
3119 /* Parse a personalityindex directive. */
3120
3121 static void
3122 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED)
3123 {
3124 expressionS exp;
3125
3126 if (unwind.personality_routine || unwind.personality_index != -1)
3127 as_bad (_("duplicate .personalityindex directive"));
3128
3129 expression (&exp);
3130
3131 if (exp.X_op != O_constant
3132 || exp.X_add_number < 0 || exp.X_add_number > 15)
3133 {
3134 as_bad (_("bad personality routine number"));
3135 ignore_rest_of_line ();
3136 return;
3137 }
3138
3139 unwind.personality_index = exp.X_add_number;
3140
3141 demand_empty_rest_of_line ();
3142 }
3143
3144
3145 /* Parse a personality directive. */
3146
3147 static void
3148 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED)
3149 {
3150 char *name, *p, c;
3151
3152 if (unwind.personality_routine || unwind.personality_index != -1)
3153 as_bad (_("duplicate .personality directive"));
3154
3155 name = input_line_pointer;
3156 c = get_symbol_end ();
3157 p = input_line_pointer;
3158 unwind.personality_routine = symbol_find_or_make (name);
3159 *p = c;
3160 demand_empty_rest_of_line ();
3161 }
3162
3163
3164 /* Parse a directive saving core registers. */
3165
3166 static void
3167 s_arm_unwind_save_core (void)
3168 {
3169 valueT op;
3170 long range;
3171 int n;
3172
3173 range = parse_reg_list (&input_line_pointer);
3174 if (range == FAIL)
3175 {
3176 as_bad (_("expected register list"));
3177 ignore_rest_of_line ();
3178 return;
3179 }
3180
3181 demand_empty_rest_of_line ();
3182
3183 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
3184 into .unwind_save {..., sp...}. We aren't bothered about the value of
3185 ip because it is clobbered by calls. */
3186 if (unwind.sp_restored && unwind.fp_reg == 12
3187 && (range & 0x3000) == 0x1000)
3188 {
3189 unwind.opcode_count--;
3190 unwind.sp_restored = 0;
3191 range = (range | 0x2000) & ~0x1000;
3192 unwind.pending_offset = 0;
3193 }
3194
3195 /* Pop r4-r15. */
3196 if (range & 0xfff0)
3197 {
3198 /* See if we can use the short opcodes. These pop a block of up to 8
3199 registers starting with r4, plus maybe r14. */
3200 for (n = 0; n < 8; n++)
3201 {
3202 /* Break at the first non-saved register. */
3203 if ((range & (1 << (n + 4))) == 0)
3204 break;
3205 }
3206 /* See if there are any other bits set. */
3207 if (n == 0 || (range & (0xfff0 << n) & 0xbff0) != 0)
3208 {
3209 /* Use the long form. */
3210 op = 0x8000 | ((range >> 4) & 0xfff);
3211 add_unwind_opcode (op, 2);
3212 }
3213 else
3214 {
3215 /* Use the short form. */
3216 if (range & 0x4000)
3217 op = 0xa8; /* Pop r14. */
3218 else
3219 op = 0xa0; /* Do not pop r14. */
3220 op |= (n - 1);
3221 add_unwind_opcode (op, 1);
3222 }
3223 }
3224
3225 /* Pop r0-r3. */
3226 if (range & 0xf)
3227 {
3228 op = 0xb100 | (range & 0xf);
3229 add_unwind_opcode (op, 2);
3230 }
3231
3232 /* Record the number of bytes pushed. */
3233 for (n = 0; n < 16; n++)
3234 {
3235 if (range & (1 << n))
3236 unwind.frame_size += 4;
3237 }
3238 }
3239
3240
3241 /* Parse a directive saving FPA registers. */
3242
3243 static void
3244 s_arm_unwind_save_fpa (int reg)
3245 {
3246 expressionS exp;
3247 int num_regs;
3248 valueT op;
3249
3250 /* Get Number of registers to transfer. */
3251 if (skip_past_comma (&input_line_pointer) != FAIL)
3252 expression (&exp);
3253 else
3254 exp.X_op = O_illegal;
3255
3256 if (exp.X_op != O_constant)
3257 {
3258 as_bad (_("expected , <constant>"));
3259 ignore_rest_of_line ();
3260 return;
3261 }
3262
3263 num_regs = exp.X_add_number;
3264
3265 if (num_regs < 1 || num_regs > 4)
3266 {
3267 as_bad (_("number of registers must be in the range [1:4]"));
3268 ignore_rest_of_line ();
3269 return;
3270 }
3271
3272 demand_empty_rest_of_line ();
3273
3274 if (reg == 4)
3275 {
3276 /* Short form. */
3277 op = 0xb4 | (num_regs - 1);
3278 add_unwind_opcode (op, 1);
3279 }
3280 else
3281 {
3282 /* Long form. */
3283 op = 0xc800 | (reg << 4) | (num_regs - 1);
3284 add_unwind_opcode (op, 2);
3285 }
3286 unwind.frame_size += num_regs * 12;
3287 }
3288
3289
3290 /* Parse a directive saving VFP registers for ARMv6 and above. */
3291
3292 static void
3293 s_arm_unwind_save_vfp_armv6 (void)
3294 {
3295 int count;
3296 unsigned int start;
3297 valueT op;
3298 int num_vfpv3_regs = 0;
3299 int num_regs_below_16;
3300
3301 count = parse_vfp_reg_list (&input_line_pointer, &start, REGLIST_VFP_D);
3302 if (count == FAIL)
3303 {
3304 as_bad (_("expected register list"));
3305 ignore_rest_of_line ();
3306 return;
3307 }
3308
3309 demand_empty_rest_of_line ();
3310
3311 /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather
3312 than FSTMX/FLDMX-style ones). */
3313
3314 /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31. */
3315 if (start >= 16)
3316 num_vfpv3_regs = count;
3317 else if (start + count > 16)
3318 num_vfpv3_regs = start + count - 16;
3319
3320 if (num_vfpv3_regs > 0)
3321 {
3322 int start_offset = start > 16 ? start - 16 : 0;
3323 op = 0xc800 | (start_offset << 4) | (num_vfpv3_regs - 1);
3324 add_unwind_opcode (op, 2);
3325 }
3326
3327 /* Generate opcode for registers numbered in the range 0 .. 15. */
3328 num_regs_below_16 = num_vfpv3_regs > 0 ? 16 - (int) start : count;
3329 assert (num_regs_below_16 + num_vfpv3_regs == count);
3330 if (num_regs_below_16 > 0)
3331 {
3332 op = 0xc900 | (start << 4) | (num_regs_below_16 - 1);
3333 add_unwind_opcode (op, 2);
3334 }
3335
3336 unwind.frame_size += count * 8;
3337 }
3338
3339
3340 /* Parse a directive saving VFP registers for pre-ARMv6. */
3341
3342 static void
3343 s_arm_unwind_save_vfp (void)
3344 {
3345 int count;
3346 unsigned int reg;
3347 valueT op;
3348
3349 count = parse_vfp_reg_list (&input_line_pointer, &reg, REGLIST_VFP_D);
3350 if (count == FAIL)
3351 {
3352 as_bad (_("expected register list"));
3353 ignore_rest_of_line ();
3354 return;
3355 }
3356
3357 demand_empty_rest_of_line ();
3358
3359 if (reg == 8)
3360 {
3361 /* Short form. */
3362 op = 0xb8 | (count - 1);
3363 add_unwind_opcode (op, 1);
3364 }
3365 else
3366 {
3367 /* Long form. */
3368 op = 0xb300 | (reg << 4) | (count - 1);
3369 add_unwind_opcode (op, 2);
3370 }
3371 unwind.frame_size += count * 8 + 4;
3372 }
3373
3374
3375 /* Parse a directive saving iWMMXt data registers. */
3376
3377 static void
3378 s_arm_unwind_save_mmxwr (void)
3379 {
3380 int reg;
3381 int hi_reg;
3382 int i;
3383 unsigned mask = 0;
3384 valueT op;
3385
3386 if (*input_line_pointer == '{')
3387 input_line_pointer++;
3388
3389 do
3390 {
3391 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
3392
3393 if (reg == FAIL)
3394 {
3395 as_bad (_(reg_expected_msgs[REG_TYPE_MMXWR]));
3396 goto error;
3397 }
3398
3399 if (mask >> reg)
3400 as_tsktsk (_("register list not in ascending order"));
3401 mask |= 1 << reg;
3402
3403 if (*input_line_pointer == '-')
3404 {
3405 input_line_pointer++;
3406 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
3407 if (hi_reg == FAIL)
3408 {
3409 as_bad (_(reg_expected_msgs[REG_TYPE_MMXWR]));
3410 goto error;
3411 }
3412 else if (reg >= hi_reg)
3413 {
3414 as_bad (_("bad register range"));
3415 goto error;
3416 }
3417 for (; reg < hi_reg; reg++)
3418 mask |= 1 << reg;
3419 }
3420 }
3421 while (skip_past_comma (&input_line_pointer) != FAIL);
3422
3423 if (*input_line_pointer == '}')
3424 input_line_pointer++;
3425
3426 demand_empty_rest_of_line ();
3427
3428 /* Generate any deferred opcodes because we're going to be looking at
3429 the list. */
3430 flush_pending_unwind ();
3431
3432 for (i = 0; i < 16; i++)
3433 {
3434 if (mask & (1 << i))
3435 unwind.frame_size += 8;
3436 }
3437
3438 /* Attempt to combine with a previous opcode. We do this because gcc
3439 likes to output separate unwind directives for a single block of
3440 registers. */
3441 if (unwind.opcode_count > 0)
3442 {
3443 i = unwind.opcodes[unwind.opcode_count - 1];
3444 if ((i & 0xf8) == 0xc0)
3445 {
3446 i &= 7;
3447 /* Only merge if the blocks are contiguous. */
3448 if (i < 6)
3449 {
3450 if ((mask & 0xfe00) == (1 << 9))
3451 {
3452 mask |= ((1 << (i + 11)) - 1) & 0xfc00;
3453 unwind.opcode_count--;
3454 }
3455 }
3456 else if (i == 6 && unwind.opcode_count >= 2)
3457 {
3458 i = unwind.opcodes[unwind.opcode_count - 2];
3459 reg = i >> 4;
3460 i &= 0xf;
3461
3462 op = 0xffff << (reg - 1);
3463 if (reg > 0
3464 && ((mask & op) == (1u << (reg - 1))))
3465 {
3466 op = (1 << (reg + i + 1)) - 1;
3467 op &= ~((1 << reg) - 1);
3468 mask |= op;
3469 unwind.opcode_count -= 2;
3470 }
3471 }
3472 }
3473 }
3474
3475 hi_reg = 15;
3476 /* We want to generate opcodes in the order the registers have been
3477 saved, ie. descending order. */
3478 for (reg = 15; reg >= -1; reg--)
3479 {
3480 /* Save registers in blocks. */
3481 if (reg < 0
3482 || !(mask & (1 << reg)))
3483 {
3484 /* We found an unsaved reg. Generate opcodes to save the
3485 preceeding block. */
3486 if (reg != hi_reg)
3487 {
3488 if (reg == 9)
3489 {
3490 /* Short form. */
3491 op = 0xc0 | (hi_reg - 10);
3492 add_unwind_opcode (op, 1);
3493 }
3494 else
3495 {
3496 /* Long form. */
3497 op = 0xc600 | ((reg + 1) << 4) | ((hi_reg - reg) - 1);
3498 add_unwind_opcode (op, 2);
3499 }
3500 }
3501 hi_reg = reg - 1;
3502 }
3503 }
3504
3505 return;
3506 error:
3507 ignore_rest_of_line ();
3508 }
3509
3510 static void
3511 s_arm_unwind_save_mmxwcg (void)
3512 {
3513 int reg;
3514 int hi_reg;
3515 unsigned mask = 0;
3516 valueT op;
3517
3518 if (*input_line_pointer == '{')
3519 input_line_pointer++;
3520
3521 do
3522 {
3523 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
3524
3525 if (reg == FAIL)
3526 {
3527 as_bad (_(reg_expected_msgs[REG_TYPE_MMXWCG]));
3528 goto error;
3529 }
3530
3531 reg -= 8;
3532 if (mask >> reg)
3533 as_tsktsk (_("register list not in ascending order"));
3534 mask |= 1 << reg;
3535
3536 if (*input_line_pointer == '-')
3537 {
3538 input_line_pointer++;
3539 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
3540 if (hi_reg == FAIL)
3541 {
3542 as_bad (_(reg_expected_msgs[REG_TYPE_MMXWCG]));
3543 goto error;
3544 }
3545 else if (reg >= hi_reg)
3546 {
3547 as_bad (_("bad register range"));
3548 goto error;
3549 }
3550 for (; reg < hi_reg; reg++)
3551 mask |= 1 << reg;
3552 }
3553 }
3554 while (skip_past_comma (&input_line_pointer) != FAIL);
3555
3556 if (*input_line_pointer == '}')
3557 input_line_pointer++;
3558
3559 demand_empty_rest_of_line ();
3560
3561 /* Generate any deferred opcodes because we're going to be looking at
3562 the list. */
3563 flush_pending_unwind ();
3564
3565 for (reg = 0; reg < 16; reg++)
3566 {
3567 if (mask & (1 << reg))
3568 unwind.frame_size += 4;
3569 }
3570 op = 0xc700 | mask;
3571 add_unwind_opcode (op, 2);
3572 return;
3573 error:
3574 ignore_rest_of_line ();
3575 }
3576
3577
3578 /* Parse an unwind_save directive.
3579 If the argument is non-zero, this is a .vsave directive. */
3580
3581 static void
3582 s_arm_unwind_save (int arch_v6)
3583 {
3584 char *peek;
3585 struct reg_entry *reg;
3586 bfd_boolean had_brace = FALSE;
3587
3588 /* Figure out what sort of save we have. */
3589 peek = input_line_pointer;
3590
3591 if (*peek == '{')
3592 {
3593 had_brace = TRUE;
3594 peek++;
3595 }
3596
3597 reg = arm_reg_parse_multi (&peek);
3598
3599 if (!reg)
3600 {
3601 as_bad (_("register expected"));
3602 ignore_rest_of_line ();
3603 return;
3604 }
3605
3606 switch (reg->type)
3607 {
3608 case REG_TYPE_FN:
3609 if (had_brace)
3610 {
3611 as_bad (_("FPA .unwind_save does not take a register list"));
3612 ignore_rest_of_line ();
3613 return;
3614 }
3615 s_arm_unwind_save_fpa (reg->number);
3616 return;
3617
3618 case REG_TYPE_RN: s_arm_unwind_save_core (); return;
3619 case REG_TYPE_VFD:
3620 if (arch_v6)
3621 s_arm_unwind_save_vfp_armv6 ();
3622 else
3623 s_arm_unwind_save_vfp ();
3624 return;
3625 case REG_TYPE_MMXWR: s_arm_unwind_save_mmxwr (); return;
3626 case REG_TYPE_MMXWCG: s_arm_unwind_save_mmxwcg (); return;
3627
3628 default:
3629 as_bad (_(".unwind_save does not support this kind of register"));
3630 ignore_rest_of_line ();
3631 }
3632 }
3633
3634
3635 /* Parse an unwind_movsp directive. */
3636
3637 static void
3638 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED)
3639 {
3640 int reg;
3641 valueT op;
3642 int offset;
3643
3644 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
3645 if (reg == FAIL)
3646 {
3647 as_bad (_(reg_expected_msgs[REG_TYPE_RN]));
3648 ignore_rest_of_line ();
3649 return;
3650 }
3651
3652 /* Optional constant. */
3653 if (skip_past_comma (&input_line_pointer) != FAIL)
3654 {
3655 if (immediate_for_directive (&offset) == FAIL)
3656 return;
3657 }
3658 else
3659 offset = 0;
3660
3661 demand_empty_rest_of_line ();
3662
3663 if (reg == REG_SP || reg == REG_PC)
3664 {
3665 as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
3666 return;
3667 }
3668
3669 if (unwind.fp_reg != REG_SP)
3670 as_bad (_("unexpected .unwind_movsp directive"));
3671
3672 /* Generate opcode to restore the value. */
3673 op = 0x90 | reg;
3674 add_unwind_opcode (op, 1);
3675
3676 /* Record the information for later. */
3677 unwind.fp_reg = reg;
3678 unwind.fp_offset = unwind.frame_size - offset;
3679 unwind.sp_restored = 1;
3680 }
3681
3682 /* Parse an unwind_pad directive. */
3683
3684 static void
3685 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED)
3686 {
3687 int offset;
3688
3689 if (immediate_for_directive (&offset) == FAIL)
3690 return;
3691
3692 if (offset & 3)
3693 {
3694 as_bad (_("stack increment must be multiple of 4"));
3695 ignore_rest_of_line ();
3696 return;
3697 }
3698
3699 /* Don't generate any opcodes, just record the details for later. */
3700 unwind.frame_size += offset;
3701 unwind.pending_offset += offset;
3702
3703 demand_empty_rest_of_line ();
3704 }
3705
3706 /* Parse an unwind_setfp directive. */
3707
3708 static void
3709 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED)
3710 {
3711 int sp_reg;
3712 int fp_reg;
3713 int offset;
3714
3715 fp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
3716 if (skip_past_comma (&input_line_pointer) == FAIL)
3717 sp_reg = FAIL;
3718 else
3719 sp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
3720
3721 if (fp_reg == FAIL || sp_reg == FAIL)
3722 {
3723 as_bad (_("expected <reg>, <reg>"));
3724 ignore_rest_of_line ();
3725 return;
3726 }
3727
3728 /* Optional constant. */
3729 if (skip_past_comma (&input_line_pointer) != FAIL)
3730 {
3731 if (immediate_for_directive (&offset) == FAIL)
3732 return;
3733 }
3734 else
3735 offset = 0;
3736
3737 demand_empty_rest_of_line ();
3738
3739 if (sp_reg != 13 && sp_reg != unwind.fp_reg)
3740 {
3741 as_bad (_("register must be either sp or set by a previous"
3742 "unwind_movsp directive"));
3743 return;
3744 }
3745
3746 /* Don't generate any opcodes, just record the information for later. */
3747 unwind.fp_reg = fp_reg;
3748 unwind.fp_used = 1;
3749 if (sp_reg == 13)
3750 unwind.fp_offset = unwind.frame_size - offset;
3751 else
3752 unwind.fp_offset -= offset;
3753 }
3754
3755 /* Parse an unwind_raw directive. */
3756
3757 static void
3758 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED)
3759 {
3760 expressionS exp;
3761 /* This is an arbitrary limit. */
3762 unsigned char op[16];
3763 int count;
3764
3765 expression (&exp);
3766 if (exp.X_op == O_constant
3767 && skip_past_comma (&input_line_pointer) != FAIL)
3768 {
3769 unwind.frame_size += exp.X_add_number;
3770 expression (&exp);
3771 }
3772 else
3773 exp.X_op = O_illegal;
3774
3775 if (exp.X_op != O_constant)
3776 {
3777 as_bad (_("expected <offset>, <opcode>"));
3778 ignore_rest_of_line ();
3779 return;
3780 }
3781
3782 count = 0;
3783
3784 /* Parse the opcode. */
3785 for (;;)
3786 {
3787 if (count >= 16)
3788 {
3789 as_bad (_("unwind opcode too long"));
3790 ignore_rest_of_line ();
3791 }
3792 if (exp.X_op != O_constant || exp.X_add_number & ~0xff)
3793 {
3794 as_bad (_("invalid unwind opcode"));
3795 ignore_rest_of_line ();
3796 return;
3797 }
3798 op[count++] = exp.X_add_number;
3799
3800 /* Parse the next byte. */
3801 if (skip_past_comma (&input_line_pointer) == FAIL)
3802 break;
3803
3804 expression (&exp);
3805 }
3806
3807 /* Add the opcode bytes in reverse order. */
3808 while (count--)
3809 add_unwind_opcode (op[count], 1);
3810
3811 demand_empty_rest_of_line ();
3812 }
3813
3814
3815 /* Parse a .eabi_attribute directive. */
3816
3817 static void
3818 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED)
3819 {
3820 expressionS exp;
3821 bfd_boolean is_string;
3822 int tag;
3823 unsigned int i = 0;
3824 char *s = NULL;
3825 char saved_char;
3826
3827 expression (& exp);
3828 if (exp.X_op != O_constant)
3829 goto bad;
3830
3831 tag = exp.X_add_number;
3832 if (tag == 4 || tag == 5 || tag == 32 || (tag > 32 && (tag & 1) != 0))
3833 is_string = 1;
3834 else
3835 is_string = 0;
3836
3837 if (skip_past_comma (&input_line_pointer) == FAIL)
3838 goto bad;
3839 if (tag == 32 || !is_string)
3840 {
3841 expression (& exp);
3842 if (exp.X_op != O_constant)
3843 {
3844 as_bad (_("expected numeric constant"));
3845 ignore_rest_of_line ();
3846 return;
3847 }
3848 i = exp.X_add_number;
3849 }
3850 if (tag == Tag_compatibility
3851 && skip_past_comma (&input_line_pointer) == FAIL)
3852 {
3853 as_bad (_("expected comma"));
3854 ignore_rest_of_line ();
3855 return;
3856 }
3857 if (is_string)
3858 {
3859 skip_whitespace(input_line_pointer);
3860 if (*input_line_pointer != '"')
3861 goto bad_string;
3862 input_line_pointer++;
3863 s = input_line_pointer;
3864 while (*input_line_pointer && *input_line_pointer != '"')
3865 input_line_pointer++;
3866 if (*input_line_pointer != '"')
3867 goto bad_string;
3868 saved_char = *input_line_pointer;
3869 *input_line_pointer = 0;
3870 }
3871 else
3872 {
3873 s = NULL;
3874 saved_char = 0;
3875 }
3876
3877 if (tag == Tag_compatibility)
3878 elf32_arm_add_eabi_attr_compat (stdoutput, i, s);
3879 else if (is_string)
3880 elf32_arm_add_eabi_attr_string (stdoutput, tag, s);
3881 else
3882 elf32_arm_add_eabi_attr_int (stdoutput, tag, i);
3883
3884 if (s)
3885 {
3886 *input_line_pointer = saved_char;
3887 input_line_pointer++;
3888 }
3889 demand_empty_rest_of_line ();
3890 return;
3891 bad_string:
3892 as_bad (_("bad string constant"));
3893 ignore_rest_of_line ();
3894 return;
3895 bad:
3896 as_bad (_("expected <tag> , <value>"));
3897 ignore_rest_of_line ();
3898 }
3899 #endif /* OBJ_ELF */
3900
3901 static void s_arm_arch (int);
3902 static void s_arm_object_arch (int);
3903 static void s_arm_cpu (int);
3904 static void s_arm_fpu (int);
3905
3906 #ifdef TE_PE
3907
3908 static void
3909 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
3910 {
3911 expressionS exp;
3912
3913 do
3914 {
3915 expression (&exp);
3916 if (exp.X_op == O_symbol)
3917 exp.X_op = O_secrel;
3918
3919 emit_expr (&exp, 4);
3920 }
3921 while (*input_line_pointer++ == ',');
3922
3923 input_line_pointer--;
3924 demand_empty_rest_of_line ();
3925 }
3926 #endif /* TE_PE */
3927
3928 /* This table describes all the machine specific pseudo-ops the assembler
3929 has to support. The fields are:
3930 pseudo-op name without dot
3931 function to call to execute this pseudo-op
3932 Integer arg to pass to the function. */
3933
3934 const pseudo_typeS md_pseudo_table[] =
3935 {
3936 /* Never called because '.req' does not start a line. */
3937 { "req", s_req, 0 },
3938 /* Following two are likewise never called. */
3939 { "dn", s_dn, 0 },
3940 { "qn", s_qn, 0 },
3941 { "unreq", s_unreq, 0 },
3942 { "bss", s_bss, 0 },
3943 { "align", s_align, 0 },
3944 { "arm", s_arm, 0 },
3945 { "thumb", s_thumb, 0 },
3946 { "code", s_code, 0 },
3947 { "force_thumb", s_force_thumb, 0 },
3948 { "thumb_func", s_thumb_func, 0 },
3949 { "thumb_set", s_thumb_set, 0 },
3950 { "even", s_even, 0 },
3951 { "ltorg", s_ltorg, 0 },
3952 { "pool", s_ltorg, 0 },
3953 { "syntax", s_syntax, 0 },
3954 { "cpu", s_arm_cpu, 0 },
3955 { "arch", s_arm_arch, 0 },
3956 { "object_arch", s_arm_object_arch, 0 },
3957 { "fpu", s_arm_fpu, 0 },
3958 #ifdef OBJ_ELF
3959 { "word", s_arm_elf_cons, 4 },
3960 { "long", s_arm_elf_cons, 4 },
3961 { "rel31", s_arm_rel31, 0 },
3962 { "fnstart", s_arm_unwind_fnstart, 0 },
3963 { "fnend", s_arm_unwind_fnend, 0 },
3964 { "cantunwind", s_arm_unwind_cantunwind, 0 },
3965 { "personality", s_arm_unwind_personality, 0 },
3966 { "personalityindex", s_arm_unwind_personalityindex, 0 },
3967 { "handlerdata", s_arm_unwind_handlerdata, 0 },
3968 { "save", s_arm_unwind_save, 0 },
3969 { "vsave", s_arm_unwind_save, 1 },
3970 { "movsp", s_arm_unwind_movsp, 0 },
3971 { "pad", s_arm_unwind_pad, 0 },
3972 { "setfp", s_arm_unwind_setfp, 0 },
3973 { "unwind_raw", s_arm_unwind_raw, 0 },
3974 { "eabi_attribute", s_arm_eabi_attribute, 0 },
3975 #else
3976 { "word", cons, 4},
3977
3978 /* These are used for dwarf. */
3979 {"2byte", cons, 2},
3980 {"4byte", cons, 4},
3981 {"8byte", cons, 8},
3982 /* These are used for dwarf2. */
3983 { "file", (void (*) (int)) dwarf2_directive_file, 0 },
3984 { "loc", dwarf2_directive_loc, 0 },
3985 { "loc_mark_labels", dwarf2_directive_loc_mark_labels, 0 },
3986 #endif
3987 { "extend", float_cons, 'x' },
3988 { "ldouble", float_cons, 'x' },
3989 { "packed", float_cons, 'p' },
3990 #ifdef TE_PE
3991 {"secrel32", pe_directive_secrel, 0},
3992 #endif
3993 { 0, 0, 0 }
3994 };
3995 \f
3996 /* Parser functions used exclusively in instruction operands. */
3997
3998 /* Generic immediate-value read function for use in insn parsing.
3999 STR points to the beginning of the immediate (the leading #);
4000 VAL receives the value; if the value is outside [MIN, MAX]
4001 issue an error. PREFIX_OPT is true if the immediate prefix is
4002 optional. */
4003
4004 static int
4005 parse_immediate (char **str, int *val, int min, int max,
4006 bfd_boolean prefix_opt)
4007 {
4008 expressionS exp;
4009 my_get_expression (&exp, str, prefix_opt ? GE_OPT_PREFIX : GE_IMM_PREFIX);
4010 if (exp.X_op != O_constant)
4011 {
4012 inst.error = _("constant expression required");
4013 return FAIL;
4014 }
4015
4016 if (exp.X_add_number < min || exp.X_add_number > max)
4017 {
4018 inst.error = _("immediate value out of range");
4019 return FAIL;
4020 }
4021
4022 *val = exp.X_add_number;
4023 return SUCCESS;
4024 }
4025
4026 /* Less-generic immediate-value read function with the possibility of loading a
4027 big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate
4028 instructions. Puts the result directly in inst.operands[i]. */
4029
4030 static int
4031 parse_big_immediate (char **str, int i)
4032 {
4033 expressionS exp;
4034 char *ptr = *str;
4035
4036 my_get_expression (&exp, &ptr, GE_OPT_PREFIX_BIG);
4037
4038 if (exp.X_op == O_constant)
4039 {
4040 inst.operands[i].imm = exp.X_add_number & 0xffffffff;
4041 /* If we're on a 64-bit host, then a 64-bit number can be returned using
4042 O_constant. We have to be careful not to break compilation for
4043 32-bit X_add_number, though. */
4044 if ((exp.X_add_number & ~0xffffffffl) != 0)
4045 {
4046 /* X >> 32 is illegal if sizeof (exp.X_add_number) == 4. */
4047 inst.operands[i].reg = ((exp.X_add_number >> 16) >> 16) & 0xffffffff;
4048 inst.operands[i].regisimm = 1;
4049 }
4050 }
4051 else if (exp.X_op == O_big
4052 && LITTLENUM_NUMBER_OF_BITS * exp.X_add_number > 32
4053 && LITTLENUM_NUMBER_OF_BITS * exp.X_add_number <= 64)
4054 {
4055 unsigned parts = 32 / LITTLENUM_NUMBER_OF_BITS, j, idx = 0;
4056 /* Bignums have their least significant bits in
4057 generic_bignum[0]. Make sure we put 32 bits in imm and
4058 32 bits in reg, in a (hopefully) portable way. */
4059 assert (parts != 0);
4060 inst.operands[i].imm = 0;
4061 for (j = 0; j < parts; j++, idx++)
4062 inst.operands[i].imm |= generic_bignum[idx]
4063 << (LITTLENUM_NUMBER_OF_BITS * j);
4064 inst.operands[i].reg = 0;
4065 for (j = 0; j < parts; j++, idx++)
4066 inst.operands[i].reg |= generic_bignum[idx]
4067 << (LITTLENUM_NUMBER_OF_BITS * j);
4068 inst.operands[i].regisimm = 1;
4069 }
4070 else
4071 return FAIL;
4072
4073 *str = ptr;
4074
4075 return SUCCESS;
4076 }
4077
4078 /* Returns the pseudo-register number of an FPA immediate constant,
4079 or FAIL if there isn't a valid constant here. */
4080
4081 static int
4082 parse_fpa_immediate (char ** str)
4083 {
4084 LITTLENUM_TYPE words[MAX_LITTLENUMS];
4085 char * save_in;
4086 expressionS exp;
4087 int i;
4088 int j;
4089
4090 /* First try and match exact strings, this is to guarantee
4091 that some formats will work even for cross assembly. */
4092
4093 for (i = 0; fp_const[i]; i++)
4094 {
4095 if (strncmp (*str, fp_const[i], strlen (fp_const[i])) == 0)
4096 {
4097 char *start = *str;
4098
4099 *str += strlen (fp_const[i]);
4100 if (is_end_of_line[(unsigned char) **str])
4101 return i + 8;
4102 *str = start;
4103 }
4104 }
4105
4106 /* Just because we didn't get a match doesn't mean that the constant
4107 isn't valid, just that it is in a format that we don't
4108 automatically recognize. Try parsing it with the standard
4109 expression routines. */
4110
4111 memset (words, 0, MAX_LITTLENUMS * sizeof (LITTLENUM_TYPE));
4112
4113 /* Look for a raw floating point number. */
4114 if ((save_in = atof_ieee (*str, 'x', words)) != NULL
4115 && is_end_of_line[(unsigned char) *save_in])
4116 {
4117 for (i = 0; i < NUM_FLOAT_VALS; i++)
4118 {
4119 for (j = 0; j < MAX_LITTLENUMS; j++)
4120 {
4121 if (words[j] != fp_values[i][j])
4122 break;
4123 }
4124
4125 if (j == MAX_LITTLENUMS)
4126 {
4127 *str = save_in;
4128 return i + 8;
4129 }
4130 }
4131 }
4132
4133 /* Try and parse a more complex expression, this will probably fail
4134 unless the code uses a floating point prefix (eg "0f"). */
4135 save_in = input_line_pointer;
4136 input_line_pointer = *str;
4137 if (expression (&exp) == absolute_section
4138 && exp.X_op == O_big
4139 && exp.X_add_number < 0)
4140 {
4141 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
4142 Ditto for 15. */
4143 if (gen_to_words (words, 5, (long) 15) == 0)
4144 {
4145 for (i = 0; i < NUM_FLOAT_VALS; i++)
4146 {
4147 for (j = 0; j < MAX_LITTLENUMS; j++)
4148 {
4149 if (words[j] != fp_values[i][j])
4150 break;
4151 }
4152
4153 if (j == MAX_LITTLENUMS)
4154 {
4155 *str = input_line_pointer;
4156 input_line_pointer = save_in;
4157 return i + 8;
4158 }
4159 }
4160 }
4161 }
4162
4163 *str = input_line_pointer;
4164 input_line_pointer = save_in;
4165 inst.error = _("invalid FPA immediate expression");
4166 return FAIL;
4167 }
4168
4169 /* Returns 1 if a number has "quarter-precision" float format
4170 0baBbbbbbc defgh000 00000000 00000000. */
4171
4172 static int
4173 is_quarter_float (unsigned imm)
4174 {
4175 int bs = (imm & 0x20000000) ? 0x3e000000 : 0x40000000;
4176 return (imm & 0x7ffff) == 0 && ((imm & 0x7e000000) ^ bs) == 0;
4177 }
4178
4179 /* Parse an 8-bit "quarter-precision" floating point number of the form:
4180 0baBbbbbbc defgh000 00000000 00000000.
4181 The zero and minus-zero cases need special handling, since they can't be
4182 encoded in the "quarter-precision" float format, but can nonetheless be
4183 loaded as integer constants. */
4184
4185 static unsigned
4186 parse_qfloat_immediate (char **ccp, int *immed)
4187 {
4188 char *str = *ccp;
4189 char *fpnum;
4190 LITTLENUM_TYPE words[MAX_LITTLENUMS];
4191 int found_fpchar = 0;
4192
4193 skip_past_char (&str, '#');
4194
4195 /* We must not accidentally parse an integer as a floating-point number. Make
4196 sure that the value we parse is not an integer by checking for special
4197 characters '.' or 'e'.
4198 FIXME: This is a horrible hack, but doing better is tricky because type
4199 information isn't in a very usable state at parse time. */
4200 fpnum = str;
4201 skip_whitespace (fpnum);
4202
4203 if (strncmp (fpnum, "0x", 2) == 0)
4204 return FAIL;
4205 else
4206 {
4207 for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
4208 if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
4209 {
4210 found_fpchar = 1;
4211 break;
4212 }
4213
4214 if (!found_fpchar)
4215 return FAIL;
4216 }
4217
4218 if ((str = atof_ieee (str, 's', words)) != NULL)
4219 {
4220 unsigned fpword = 0;
4221 int i;
4222
4223 /* Our FP word must be 32 bits (single-precision FP). */
4224 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
4225 {
4226 fpword <<= LITTLENUM_NUMBER_OF_BITS;
4227 fpword |= words[i];
4228 }
4229
4230 if (is_quarter_float (fpword) || (fpword & 0x7fffffff) == 0)
4231 *immed = fpword;
4232 else
4233 return FAIL;
4234
4235 *ccp = str;
4236
4237 return SUCCESS;
4238 }
4239
4240 return FAIL;
4241 }
4242
4243 /* Shift operands. */
4244 enum shift_kind
4245 {
4246 SHIFT_LSL, SHIFT_LSR, SHIFT_ASR, SHIFT_ROR, SHIFT_RRX
4247 };
4248
4249 struct asm_shift_name
4250 {
4251 const char *name;
4252 enum shift_kind kind;
4253 };
4254
4255 /* Third argument to parse_shift. */
4256 enum parse_shift_mode
4257 {
4258 NO_SHIFT_RESTRICT, /* Any kind of shift is accepted. */
4259 SHIFT_IMMEDIATE, /* Shift operand must be an immediate. */
4260 SHIFT_LSL_OR_ASR_IMMEDIATE, /* Shift must be LSL or ASR immediate. */
4261 SHIFT_ASR_IMMEDIATE, /* Shift must be ASR immediate. */
4262 SHIFT_LSL_IMMEDIATE, /* Shift must be LSL immediate. */
4263 };
4264
4265 /* Parse a <shift> specifier on an ARM data processing instruction.
4266 This has three forms:
4267
4268 (LSL|LSR|ASL|ASR|ROR) Rs
4269 (LSL|LSR|ASL|ASR|ROR) #imm
4270 RRX
4271
4272 Note that ASL is assimilated to LSL in the instruction encoding, and
4273 RRX to ROR #0 (which cannot be written as such). */
4274
4275 static int
4276 parse_shift (char **str, int i, enum parse_shift_mode mode)
4277 {
4278 const struct asm_shift_name *shift_name;
4279 enum shift_kind shift;
4280 char *s = *str;
4281 char *p = s;
4282 int reg;
4283
4284 for (p = *str; ISALPHA (*p); p++)
4285 ;
4286
4287 if (p == *str)
4288 {
4289 inst.error = _("shift expression expected");
4290 return FAIL;
4291 }
4292
4293 shift_name = hash_find_n (arm_shift_hsh, *str, p - *str);
4294
4295 if (shift_name == NULL)
4296 {
4297 inst.error = _("shift expression expected");
4298 return FAIL;
4299 }
4300
4301 shift = shift_name->kind;
4302
4303 switch (mode)
4304 {
4305 case NO_SHIFT_RESTRICT:
4306 case SHIFT_IMMEDIATE: break;
4307
4308 case SHIFT_LSL_OR_ASR_IMMEDIATE:
4309 if (shift != SHIFT_LSL && shift != SHIFT_ASR)
4310 {
4311 inst.error = _("'LSL' or 'ASR' required");
4312 return FAIL;
4313 }
4314 break;
4315
4316 case SHIFT_LSL_IMMEDIATE:
4317 if (shift != SHIFT_LSL)
4318 {
4319 inst.error = _("'LSL' required");
4320 return FAIL;
4321 }
4322 break;
4323
4324 case SHIFT_ASR_IMMEDIATE:
4325 if (shift != SHIFT_ASR)
4326 {
4327 inst.error = _("'ASR' required");
4328 return FAIL;
4329 }
4330 break;
4331
4332 default: abort ();
4333 }
4334
4335 if (shift != SHIFT_RRX)
4336 {
4337 /* Whitespace can appear here if the next thing is a bare digit. */
4338 skip_whitespace (p);
4339
4340 if (mode == NO_SHIFT_RESTRICT
4341 && (reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
4342 {
4343 inst.operands[i].imm = reg;
4344 inst.operands[i].immisreg = 1;
4345 }
4346 else if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
4347 return FAIL;
4348 }
4349 inst.operands[i].shift_kind = shift;
4350 inst.operands[i].shifted = 1;
4351 *str = p;
4352 return SUCCESS;
4353 }
4354
4355 /* Parse a <shifter_operand> for an ARM data processing instruction:
4356
4357 #<immediate>
4358 #<immediate>, <rotate>
4359 <Rm>
4360 <Rm>, <shift>
4361
4362 where <shift> is defined by parse_shift above, and <rotate> is a
4363 multiple of 2 between 0 and 30. Validation of immediate operands
4364 is deferred to md_apply_fix. */
4365
4366 static int
4367 parse_shifter_operand (char **str, int i)
4368 {
4369 int value;
4370 expressionS expr;
4371
4372 if ((value = arm_reg_parse (str, REG_TYPE_RN)) != FAIL)
4373 {
4374 inst.operands[i].reg = value;
4375 inst.operands[i].isreg = 1;
4376
4377 /* parse_shift will override this if appropriate */
4378 inst.reloc.exp.X_op = O_constant;
4379 inst.reloc.exp.X_add_number = 0;
4380
4381 if (skip_past_comma (str) == FAIL)
4382 return SUCCESS;
4383
4384 /* Shift operation on register. */
4385 return parse_shift (str, i, NO_SHIFT_RESTRICT);
4386 }
4387
4388 if (my_get_expression (&inst.reloc.exp, str, GE_IMM_PREFIX))
4389 return FAIL;
4390
4391 if (skip_past_comma (str) == SUCCESS)
4392 {
4393 /* #x, y -- ie explicit rotation by Y. */
4394 if (my_get_expression (&expr, str, GE_NO_PREFIX))
4395 return FAIL;
4396
4397 if (expr.X_op != O_constant || inst.reloc.exp.X_op != O_constant)
4398 {
4399 inst.error = _("constant expression expected");
4400 return FAIL;
4401 }
4402
4403 value = expr.X_add_number;
4404 if (value < 0 || value > 30 || value % 2 != 0)
4405 {
4406 inst.error = _("invalid rotation");
4407 return FAIL;
4408 }
4409 if (inst.reloc.exp.X_add_number < 0 || inst.reloc.exp.X_add_number > 255)
4410 {
4411 inst.error = _("invalid constant");
4412 return FAIL;
4413 }
4414
4415 /* Convert to decoded value. md_apply_fix will put it back. */
4416 inst.reloc.exp.X_add_number
4417 = (((inst.reloc.exp.X_add_number << (32 - value))
4418 | (inst.reloc.exp.X_add_number >> value)) & 0xffffffff);
4419 }
4420
4421 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
4422 inst.reloc.pc_rel = 0;
4423 return SUCCESS;
4424 }
4425
4426 /* Group relocation information. Each entry in the table contains the
4427 textual name of the relocation as may appear in assembler source
4428 and must end with a colon.
4429 Along with this textual name are the relocation codes to be used if
4430 the corresponding instruction is an ALU instruction (ADD or SUB only),
4431 an LDR, an LDRS, or an LDC. */
4432
4433 struct group_reloc_table_entry
4434 {
4435 const char *name;
4436 int alu_code;
4437 int ldr_code;
4438 int ldrs_code;
4439 int ldc_code;
4440 };
4441
4442 typedef enum
4443 {
4444 /* Varieties of non-ALU group relocation. */
4445
4446 GROUP_LDR,
4447 GROUP_LDRS,
4448 GROUP_LDC
4449 } group_reloc_type;
4450
4451 static struct group_reloc_table_entry group_reloc_table[] =
4452 { /* Program counter relative: */
4453 { "pc_g0_nc",
4454 BFD_RELOC_ARM_ALU_PC_G0_NC, /* ALU */
4455 0, /* LDR */
4456 0, /* LDRS */
4457 0 }, /* LDC */
4458 { "pc_g0",
4459 BFD_RELOC_ARM_ALU_PC_G0, /* ALU */
4460 BFD_RELOC_ARM_LDR_PC_G0, /* LDR */
4461 BFD_RELOC_ARM_LDRS_PC_G0, /* LDRS */
4462 BFD_RELOC_ARM_LDC_PC_G0 }, /* LDC */
4463 { "pc_g1_nc",
4464 BFD_RELOC_ARM_ALU_PC_G1_NC, /* ALU */
4465 0, /* LDR */
4466 0, /* LDRS */
4467 0 }, /* LDC */
4468 { "pc_g1",
4469 BFD_RELOC_ARM_ALU_PC_G1, /* ALU */
4470 BFD_RELOC_ARM_LDR_PC_G1, /* LDR */
4471 BFD_RELOC_ARM_LDRS_PC_G1, /* LDRS */
4472 BFD_RELOC_ARM_LDC_PC_G1 }, /* LDC */
4473 { "pc_g2",
4474 BFD_RELOC_ARM_ALU_PC_G2, /* ALU */
4475 BFD_RELOC_ARM_LDR_PC_G2, /* LDR */
4476 BFD_RELOC_ARM_LDRS_PC_G2, /* LDRS */
4477 BFD_RELOC_ARM_LDC_PC_G2 }, /* LDC */
4478 /* Section base relative */
4479 { "sb_g0_nc",
4480 BFD_RELOC_ARM_ALU_SB_G0_NC, /* ALU */
4481 0, /* LDR */
4482 0, /* LDRS */
4483 0 }, /* LDC */
4484 { "sb_g0",
4485 BFD_RELOC_ARM_ALU_SB_G0, /* ALU */
4486 BFD_RELOC_ARM_LDR_SB_G0, /* LDR */
4487 BFD_RELOC_ARM_LDRS_SB_G0, /* LDRS */
4488 BFD_RELOC_ARM_LDC_SB_G0 }, /* LDC */
4489 { "sb_g1_nc",
4490 BFD_RELOC_ARM_ALU_SB_G1_NC, /* ALU */
4491 0, /* LDR */
4492 0, /* LDRS */
4493 0 }, /* LDC */
4494 { "sb_g1",
4495 BFD_RELOC_ARM_ALU_SB_G1, /* ALU */
4496 BFD_RELOC_ARM_LDR_SB_G1, /* LDR */
4497 BFD_RELOC_ARM_LDRS_SB_G1, /* LDRS */
4498 BFD_RELOC_ARM_LDC_SB_G1 }, /* LDC */
4499 { "sb_g2",
4500 BFD_RELOC_ARM_ALU_SB_G2, /* ALU */
4501 BFD_RELOC_ARM_LDR_SB_G2, /* LDR */
4502 BFD_RELOC_ARM_LDRS_SB_G2, /* LDRS */
4503 BFD_RELOC_ARM_LDC_SB_G2 } }; /* LDC */
4504
4505 /* Given the address of a pointer pointing to the textual name of a group
4506 relocation as may appear in assembler source, attempt to find its details
4507 in group_reloc_table. The pointer will be updated to the character after
4508 the trailing colon. On failure, FAIL will be returned; SUCCESS
4509 otherwise. On success, *entry will be updated to point at the relevant
4510 group_reloc_table entry. */
4511
4512 static int
4513 find_group_reloc_table_entry (char **str, struct group_reloc_table_entry **out)
4514 {
4515 unsigned int i;
4516 for (i = 0; i < ARRAY_SIZE (group_reloc_table); i++)
4517 {
4518 int length = strlen (group_reloc_table[i].name);
4519
4520 if (strncasecmp (group_reloc_table[i].name, *str, length) == 0 &&
4521 (*str)[length] == ':')
4522 {
4523 *out = &group_reloc_table[i];
4524 *str += (length + 1);
4525 return SUCCESS;
4526 }
4527 }
4528
4529 return FAIL;
4530 }
4531
4532 /* Parse a <shifter_operand> for an ARM data processing instruction
4533 (as for parse_shifter_operand) where group relocations are allowed:
4534
4535 #<immediate>
4536 #<immediate>, <rotate>
4537 #:<group_reloc>:<expression>
4538 <Rm>
4539 <Rm>, <shift>
4540
4541 where <group_reloc> is one of the strings defined in group_reloc_table.
4542 The hashes are optional.
4543
4544 Everything else is as for parse_shifter_operand. */
4545
4546 static parse_operand_result
4547 parse_shifter_operand_group_reloc (char **str, int i)
4548 {
4549 /* Determine if we have the sequence of characters #: or just :
4550 coming next. If we do, then we check for a group relocation.
4551 If we don't, punt the whole lot to parse_shifter_operand. */
4552
4553 if (((*str)[0] == '#' && (*str)[1] == ':')
4554 || (*str)[0] == ':')
4555 {
4556 struct group_reloc_table_entry *entry;
4557
4558 if ((*str)[0] == '#')
4559 (*str) += 2;
4560 else
4561 (*str)++;
4562
4563 /* Try to parse a group relocation. Anything else is an error. */
4564 if (find_group_reloc_table_entry (str, &entry) == FAIL)
4565 {
4566 inst.error = _("unknown group relocation");
4567 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
4568 }
4569
4570 /* We now have the group relocation table entry corresponding to
4571 the name in the assembler source. Next, we parse the expression. */
4572 if (my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX))
4573 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
4574
4575 /* Record the relocation type (always the ALU variant here). */
4576 inst.reloc.type = entry->alu_code;
4577 assert (inst.reloc.type != 0);
4578
4579 return PARSE_OPERAND_SUCCESS;
4580 }
4581 else
4582 return parse_shifter_operand (str, i) == SUCCESS
4583 ? PARSE_OPERAND_SUCCESS : PARSE_OPERAND_FAIL;
4584
4585 /* Never reached. */
4586 }
4587
4588 /* Parse all forms of an ARM address expression. Information is written
4589 to inst.operands[i] and/or inst.reloc.
4590
4591 Preindexed addressing (.preind=1):
4592
4593 [Rn, #offset] .reg=Rn .reloc.exp=offset
4594 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4595 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4596 .shift_kind=shift .reloc.exp=shift_imm
4597
4598 These three may have a trailing ! which causes .writeback to be set also.
4599
4600 Postindexed addressing (.postind=1, .writeback=1):
4601
4602 [Rn], #offset .reg=Rn .reloc.exp=offset
4603 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4604 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4605 .shift_kind=shift .reloc.exp=shift_imm
4606
4607 Unindexed addressing (.preind=0, .postind=0):
4608
4609 [Rn], {option} .reg=Rn .imm=option .immisreg=0
4610
4611 Other:
4612
4613 [Rn]{!} shorthand for [Rn,#0]{!}
4614 =immediate .isreg=0 .reloc.exp=immediate
4615 label .reg=PC .reloc.pc_rel=1 .reloc.exp=label
4616
4617 It is the caller's responsibility to check for addressing modes not
4618 supported by the instruction, and to set inst.reloc.type. */
4619
4620 static parse_operand_result
4621 parse_address_main (char **str, int i, int group_relocations,
4622 group_reloc_type group_type)
4623 {
4624 char *p = *str;
4625 int reg;
4626
4627 if (skip_past_char (&p, '[') == FAIL)
4628 {
4629 if (skip_past_char (&p, '=') == FAIL)
4630 {
4631 /* bare address - translate to PC-relative offset */
4632 inst.reloc.pc_rel = 1;
4633 inst.operands[i].reg = REG_PC;
4634 inst.operands[i].isreg = 1;
4635 inst.operands[i].preind = 1;
4636 }
4637 /* else a load-constant pseudo op, no special treatment needed here */
4638
4639 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
4640 return PARSE_OPERAND_FAIL;
4641
4642 *str = p;
4643 return PARSE_OPERAND_SUCCESS;
4644 }
4645
4646 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
4647 {
4648 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
4649 return PARSE_OPERAND_FAIL;
4650 }
4651 inst.operands[i].reg = reg;
4652 inst.operands[i].isreg = 1;
4653
4654 if (skip_past_comma (&p) == SUCCESS)
4655 {
4656 inst.operands[i].preind = 1;
4657
4658 if (*p == '+') p++;
4659 else if (*p == '-') p++, inst.operands[i].negative = 1;
4660
4661 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
4662 {
4663 inst.operands[i].imm = reg;
4664 inst.operands[i].immisreg = 1;
4665
4666 if (skip_past_comma (&p) == SUCCESS)
4667 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
4668 return PARSE_OPERAND_FAIL;
4669 }
4670 else if (skip_past_char (&p, ':') == SUCCESS)
4671 {
4672 /* FIXME: '@' should be used here, but it's filtered out by generic
4673 code before we get to see it here. This may be subject to
4674 change. */
4675 expressionS exp;
4676 my_get_expression (&exp, &p, GE_NO_PREFIX);
4677 if (exp.X_op != O_constant)
4678 {
4679 inst.error = _("alignment must be constant");
4680 return PARSE_OPERAND_FAIL;
4681 }
4682 inst.operands[i].imm = exp.X_add_number << 8;
4683 inst.operands[i].immisalign = 1;
4684 /* Alignments are not pre-indexes. */
4685 inst.operands[i].preind = 0;
4686 }
4687 else
4688 {
4689 if (inst.operands[i].negative)
4690 {
4691 inst.operands[i].negative = 0;
4692 p--;
4693 }
4694
4695 if (group_relocations &&
4696 ((*p == '#' && *(p + 1) == ':') || *p == ':'))
4697
4698 {
4699 struct group_reloc_table_entry *entry;
4700
4701 /* Skip over the #: or : sequence. */
4702 if (*p == '#')
4703 p += 2;
4704 else
4705 p++;
4706
4707 /* Try to parse a group relocation. Anything else is an
4708 error. */
4709 if (find_group_reloc_table_entry (&p, &entry) == FAIL)
4710 {
4711 inst.error = _("unknown group relocation");
4712 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
4713 }
4714
4715 /* We now have the group relocation table entry corresponding to
4716 the name in the assembler source. Next, we parse the
4717 expression. */
4718 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
4719 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
4720
4721 /* Record the relocation type. */
4722 switch (group_type)
4723 {
4724 case GROUP_LDR:
4725 inst.reloc.type = entry->ldr_code;
4726 break;
4727
4728 case GROUP_LDRS:
4729 inst.reloc.type = entry->ldrs_code;
4730 break;
4731
4732 case GROUP_LDC:
4733 inst.reloc.type = entry->ldc_code;
4734 break;
4735
4736 default:
4737 assert (0);
4738 }
4739
4740 if (inst.reloc.type == 0)
4741 {
4742 inst.error = _("this group relocation is not allowed on this instruction");
4743 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
4744 }
4745 }
4746 else
4747 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
4748 return PARSE_OPERAND_FAIL;
4749 }
4750 }
4751
4752 if (skip_past_char (&p, ']') == FAIL)
4753 {
4754 inst.error = _("']' expected");
4755 return PARSE_OPERAND_FAIL;
4756 }
4757
4758 if (skip_past_char (&p, '!') == SUCCESS)
4759 inst.operands[i].writeback = 1;
4760
4761 else if (skip_past_comma (&p) == SUCCESS)
4762 {
4763 if (skip_past_char (&p, '{') == SUCCESS)
4764 {
4765 /* [Rn], {expr} - unindexed, with option */
4766 if (parse_immediate (&p, &inst.operands[i].imm,
4767 0, 255, TRUE) == FAIL)
4768 return PARSE_OPERAND_FAIL;
4769
4770 if (skip_past_char (&p, '}') == FAIL)
4771 {
4772 inst.error = _("'}' expected at end of 'option' field");
4773 return PARSE_OPERAND_FAIL;
4774 }
4775 if (inst.operands[i].preind)
4776 {
4777 inst.error = _("cannot combine index with option");
4778 return PARSE_OPERAND_FAIL;
4779 }
4780 *str = p;
4781 return PARSE_OPERAND_SUCCESS;
4782 }
4783 else
4784 {
4785 inst.operands[i].postind = 1;
4786 inst.operands[i].writeback = 1;
4787
4788 if (inst.operands[i].preind)
4789 {
4790 inst.error = _("cannot combine pre- and post-indexing");
4791 return PARSE_OPERAND_FAIL;
4792 }
4793
4794 if (*p == '+') p++;
4795 else if (*p == '-') p++, inst.operands[i].negative = 1;
4796
4797 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
4798 {
4799 /* We might be using the immediate for alignment already. If we
4800 are, OR the register number into the low-order bits. */
4801 if (inst.operands[i].immisalign)
4802 inst.operands[i].imm |= reg;
4803 else
4804 inst.operands[i].imm = reg;
4805 inst.operands[i].immisreg = 1;
4806
4807 if (skip_past_comma (&p) == SUCCESS)
4808 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
4809 return PARSE_OPERAND_FAIL;
4810 }
4811 else
4812 {
4813 if (inst.operands[i].negative)
4814 {
4815 inst.operands[i].negative = 0;
4816 p--;
4817 }
4818 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
4819 return PARSE_OPERAND_FAIL;
4820 }
4821 }
4822 }
4823
4824 /* If at this point neither .preind nor .postind is set, we have a
4825 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
4826 if (inst.operands[i].preind == 0 && inst.operands[i].postind == 0)
4827 {
4828 inst.operands[i].preind = 1;
4829 inst.reloc.exp.X_op = O_constant;
4830 inst.reloc.exp.X_add_number = 0;
4831 }
4832 *str = p;
4833 return PARSE_OPERAND_SUCCESS;
4834 }
4835
4836 static int
4837 parse_address (char **str, int i)
4838 {
4839 return parse_address_main (str, i, 0, 0) == PARSE_OPERAND_SUCCESS
4840 ? SUCCESS : FAIL;
4841 }
4842
4843 static parse_operand_result
4844 parse_address_group_reloc (char **str, int i, group_reloc_type type)
4845 {
4846 return parse_address_main (str, i, 1, type);
4847 }
4848
4849 /* Parse an operand for a MOVW or MOVT instruction. */
4850 static int
4851 parse_half (char **str)
4852 {
4853 char * p;
4854
4855 p = *str;
4856 skip_past_char (&p, '#');
4857 if (strncasecmp (p, ":lower16:", 9) == 0)
4858 inst.reloc.type = BFD_RELOC_ARM_MOVW;
4859 else if (strncasecmp (p, ":upper16:", 9) == 0)
4860 inst.reloc.type = BFD_RELOC_ARM_MOVT;
4861
4862 if (inst.reloc.type != BFD_RELOC_UNUSED)
4863 {
4864 p += 9;
4865 skip_whitespace(p);
4866 }
4867
4868 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
4869 return FAIL;
4870
4871 if (inst.reloc.type == BFD_RELOC_UNUSED)
4872 {
4873 if (inst.reloc.exp.X_op != O_constant)
4874 {
4875 inst.error = _("constant expression expected");
4876 return FAIL;
4877 }
4878 if (inst.reloc.exp.X_add_number < 0
4879 || inst.reloc.exp.X_add_number > 0xffff)
4880 {
4881 inst.error = _("immediate value out of range");
4882 return FAIL;
4883 }
4884 }
4885 *str = p;
4886 return SUCCESS;
4887 }
4888
4889 /* Miscellaneous. */
4890
4891 /* Parse a PSR flag operand. The value returned is FAIL on syntax error,
4892 or a bitmask suitable to be or-ed into the ARM msr instruction. */
4893 static int
4894 parse_psr (char **str)
4895 {
4896 char *p;
4897 unsigned long psr_field;
4898 const struct asm_psr *psr;
4899 char *start;
4900
4901 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
4902 feature for ease of use and backwards compatibility. */
4903 p = *str;
4904 if (strncasecmp (p, "SPSR", 4) == 0)
4905 psr_field = SPSR_BIT;
4906 else if (strncasecmp (p, "CPSR", 4) == 0)
4907 psr_field = 0;
4908 else
4909 {
4910 start = p;
4911 do
4912 p++;
4913 while (ISALNUM (*p) || *p == '_');
4914
4915 psr = hash_find_n (arm_v7m_psr_hsh, start, p - start);
4916 if (!psr)
4917 return FAIL;
4918
4919 *str = p;
4920 return psr->field;
4921 }
4922
4923 p += 4;
4924 if (*p == '_')
4925 {
4926 /* A suffix follows. */
4927 p++;
4928 start = p;
4929
4930 do
4931 p++;
4932 while (ISALNUM (*p) || *p == '_');
4933
4934 psr = hash_find_n (arm_psr_hsh, start, p - start);
4935 if (!psr)
4936 goto error;
4937
4938 psr_field |= psr->field;
4939 }
4940 else
4941 {
4942 if (ISALNUM (*p))
4943 goto error; /* Garbage after "[CS]PSR". */
4944
4945 psr_field |= (PSR_c | PSR_f);
4946 }
4947 *str = p;
4948 return psr_field;
4949
4950 error:
4951 inst.error = _("flag for {c}psr instruction expected");
4952 return FAIL;
4953 }
4954
4955 /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
4956 value suitable for splatting into the AIF field of the instruction. */
4957
4958 static int
4959 parse_cps_flags (char **str)
4960 {
4961 int val = 0;
4962 int saw_a_flag = 0;
4963 char *s = *str;
4964
4965 for (;;)
4966 switch (*s++)
4967 {
4968 case '\0': case ',':
4969 goto done;
4970
4971 case 'a': case 'A': saw_a_flag = 1; val |= 0x4; break;
4972 case 'i': case 'I': saw_a_flag = 1; val |= 0x2; break;
4973 case 'f': case 'F': saw_a_flag = 1; val |= 0x1; break;
4974
4975 default:
4976 inst.error = _("unrecognized CPS flag");
4977 return FAIL;
4978 }
4979
4980 done:
4981 if (saw_a_flag == 0)
4982 {
4983 inst.error = _("missing CPS flags");
4984 return FAIL;
4985 }
4986
4987 *str = s - 1;
4988 return val;
4989 }
4990
4991 /* Parse an endian specifier ("BE" or "LE", case insensitive);
4992 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
4993
4994 static int
4995 parse_endian_specifier (char **str)
4996 {
4997 int little_endian;
4998 char *s = *str;
4999
5000 if (strncasecmp (s, "BE", 2))
5001 little_endian = 0;
5002 else if (strncasecmp (s, "LE", 2))
5003 little_endian = 1;
5004 else
5005 {
5006 inst.error = _("valid endian specifiers are be or le");
5007 return FAIL;
5008 }
5009
5010 if (ISALNUM (s[2]) || s[2] == '_')
5011 {
5012 inst.error = _("valid endian specifiers are be or le");
5013 return FAIL;
5014 }
5015
5016 *str = s + 2;
5017 return little_endian;
5018 }
5019
5020 /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
5021 value suitable for poking into the rotate field of an sxt or sxta
5022 instruction, or FAIL on error. */
5023
5024 static int
5025 parse_ror (char **str)
5026 {
5027 int rot;
5028 char *s = *str;
5029
5030 if (strncasecmp (s, "ROR", 3) == 0)
5031 s += 3;
5032 else
5033 {
5034 inst.error = _("missing rotation field after comma");
5035 return FAIL;
5036 }
5037
5038 if (parse_immediate (&s, &rot, 0, 24, FALSE) == FAIL)
5039 return FAIL;
5040
5041 switch (rot)
5042 {
5043 case 0: *str = s; return 0x0;
5044 case 8: *str = s; return 0x1;
5045 case 16: *str = s; return 0x2;
5046 case 24: *str = s; return 0x3;
5047
5048 default:
5049 inst.error = _("rotation can only be 0, 8, 16, or 24");
5050 return FAIL;
5051 }
5052 }
5053
5054 /* Parse a conditional code (from conds[] below). The value returned is in the
5055 range 0 .. 14, or FAIL. */
5056 static int
5057 parse_cond (char **str)
5058 {
5059 char *p, *q;
5060 const struct asm_cond *c;
5061
5062 p = q = *str;
5063 while (ISALPHA (*q))
5064 q++;
5065
5066 c = hash_find_n (arm_cond_hsh, p, q - p);
5067 if (!c)
5068 {
5069 inst.error = _("condition required");
5070 return FAIL;
5071 }
5072
5073 *str = q;
5074 return c->value;
5075 }
5076
5077 /* Parse an option for a barrier instruction. Returns the encoding for the
5078 option, or FAIL. */
5079 static int
5080 parse_barrier (char **str)
5081 {
5082 char *p, *q;
5083 const struct asm_barrier_opt *o;
5084
5085 p = q = *str;
5086 while (ISALPHA (*q))
5087 q++;
5088
5089 o = hash_find_n (arm_barrier_opt_hsh, p, q - p);
5090 if (!o)
5091 return FAIL;
5092
5093 *str = q;
5094 return o->value;
5095 }
5096
5097 /* Parse the operands of a table branch instruction. Similar to a memory
5098 operand. */
5099 static int
5100 parse_tb (char **str)
5101 {
5102 char * p = *str;
5103 int reg;
5104
5105 if (skip_past_char (&p, '[') == FAIL)
5106 {
5107 inst.error = _("'[' expected");
5108 return FAIL;
5109 }
5110
5111 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5112 {
5113 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
5114 return FAIL;
5115 }
5116 inst.operands[0].reg = reg;
5117
5118 if (skip_past_comma (&p) == FAIL)
5119 {
5120 inst.error = _("',' expected");
5121 return FAIL;
5122 }
5123
5124 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5125 {
5126 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
5127 return FAIL;
5128 }
5129 inst.operands[0].imm = reg;
5130
5131 if (skip_past_comma (&p) == SUCCESS)
5132 {
5133 if (parse_shift (&p, 0, SHIFT_LSL_IMMEDIATE) == FAIL)
5134 return FAIL;
5135 if (inst.reloc.exp.X_add_number != 1)
5136 {
5137 inst.error = _("invalid shift");
5138 return FAIL;
5139 }
5140 inst.operands[0].shifted = 1;
5141 }
5142
5143 if (skip_past_char (&p, ']') == FAIL)
5144 {
5145 inst.error = _("']' expected");
5146 return FAIL;
5147 }
5148 *str = p;
5149 return SUCCESS;
5150 }
5151
5152 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
5153 information on the types the operands can take and how they are encoded.
5154 Up to four operands may be read; this function handles setting the
5155 ".present" field for each read operand itself.
5156 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
5157 else returns FAIL. */
5158
5159 static int
5160 parse_neon_mov (char **str, int *which_operand)
5161 {
5162 int i = *which_operand, val;
5163 enum arm_reg_type rtype;
5164 char *ptr = *str;
5165 struct neon_type_el optype;
5166
5167 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
5168 {
5169 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
5170 inst.operands[i].reg = val;
5171 inst.operands[i].isscalar = 1;
5172 inst.operands[i].vectype = optype;
5173 inst.operands[i++].present = 1;
5174
5175 if (skip_past_comma (&ptr) == FAIL)
5176 goto wanted_comma;
5177
5178 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
5179 goto wanted_arm;
5180
5181 inst.operands[i].reg = val;
5182 inst.operands[i].isreg = 1;
5183 inst.operands[i].present = 1;
5184 }
5185 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype, &optype))
5186 != FAIL)
5187 {
5188 /* Cases 0, 1, 2, 3, 5 (D only). */
5189 if (skip_past_comma (&ptr) == FAIL)
5190 goto wanted_comma;
5191
5192 inst.operands[i].reg = val;
5193 inst.operands[i].isreg = 1;
5194 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
5195 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
5196 inst.operands[i].isvec = 1;
5197 inst.operands[i].vectype = optype;
5198 inst.operands[i++].present = 1;
5199
5200 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
5201 {
5202 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
5203 Case 13: VMOV <Sd>, <Rm> */
5204 inst.operands[i].reg = val;
5205 inst.operands[i].isreg = 1;
5206 inst.operands[i].present = 1;
5207
5208 if (rtype == REG_TYPE_NQ)
5209 {
5210 first_error (_("can't use Neon quad register here"));
5211 return FAIL;
5212 }
5213 else if (rtype != REG_TYPE_VFS)
5214 {
5215 i++;
5216 if (skip_past_comma (&ptr) == FAIL)
5217 goto wanted_comma;
5218 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
5219 goto wanted_arm;
5220 inst.operands[i].reg = val;
5221 inst.operands[i].isreg = 1;
5222 inst.operands[i].present = 1;
5223 }
5224 }
5225 else if (parse_qfloat_immediate (&ptr, &inst.operands[i].imm) == SUCCESS)
5226 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
5227 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
5228 Case 10: VMOV.F32 <Sd>, #<imm>
5229 Case 11: VMOV.F64 <Dd>, #<imm> */
5230 inst.operands[i].immisfloat = 1;
5231 else if (parse_big_immediate (&ptr, i) == SUCCESS)
5232 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
5233 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
5234 ;
5235 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype,
5236 &optype)) != FAIL)
5237 {
5238 /* Case 0: VMOV<c><q> <Qd>, <Qm>
5239 Case 1: VMOV<c><q> <Dd>, <Dm>
5240 Case 8: VMOV.F32 <Sd>, <Sm>
5241 Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm> */
5242
5243 inst.operands[i].reg = val;
5244 inst.operands[i].isreg = 1;
5245 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
5246 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
5247 inst.operands[i].isvec = 1;
5248 inst.operands[i].vectype = optype;
5249 inst.operands[i].present = 1;
5250
5251 if (skip_past_comma (&ptr) == SUCCESS)
5252 {
5253 /* Case 15. */
5254 i++;
5255
5256 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
5257 goto wanted_arm;
5258
5259 inst.operands[i].reg = val;
5260 inst.operands[i].isreg = 1;
5261 inst.operands[i++].present = 1;
5262
5263 if (skip_past_comma (&ptr) == FAIL)
5264 goto wanted_comma;
5265
5266 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
5267 goto wanted_arm;
5268
5269 inst.operands[i].reg = val;
5270 inst.operands[i].isreg = 1;
5271 inst.operands[i++].present = 1;
5272 }
5273 }
5274 else
5275 {
5276 first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
5277 return FAIL;
5278 }
5279 }
5280 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
5281 {
5282 /* Cases 6, 7. */
5283 inst.operands[i].reg = val;
5284 inst.operands[i].isreg = 1;
5285 inst.operands[i++].present = 1;
5286
5287 if (skip_past_comma (&ptr) == FAIL)
5288 goto wanted_comma;
5289
5290 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
5291 {
5292 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
5293 inst.operands[i].reg = val;
5294 inst.operands[i].isscalar = 1;
5295 inst.operands[i].present = 1;
5296 inst.operands[i].vectype = optype;
5297 }
5298 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
5299 {
5300 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
5301 inst.operands[i].reg = val;
5302 inst.operands[i].isreg = 1;
5303 inst.operands[i++].present = 1;
5304
5305 if (skip_past_comma (&ptr) == FAIL)
5306 goto wanted_comma;
5307
5308 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFSD, &rtype, &optype))
5309 == FAIL)
5310 {
5311 first_error (_(reg_expected_msgs[REG_TYPE_VFSD]));
5312 return FAIL;
5313 }
5314
5315 inst.operands[i].reg = val;
5316 inst.operands[i].isreg = 1;
5317 inst.operands[i].isvec = 1;
5318 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
5319 inst.operands[i].vectype = optype;
5320 inst.operands[i].present = 1;
5321
5322 if (rtype == REG_TYPE_VFS)
5323 {
5324 /* Case 14. */
5325 i++;
5326 if (skip_past_comma (&ptr) == FAIL)
5327 goto wanted_comma;
5328 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL,
5329 &optype)) == FAIL)
5330 {
5331 first_error (_(reg_expected_msgs[REG_TYPE_VFS]));
5332 return FAIL;
5333 }
5334 inst.operands[i].reg = val;
5335 inst.operands[i].isreg = 1;
5336 inst.operands[i].isvec = 1;
5337 inst.operands[i].issingle = 1;
5338 inst.operands[i].vectype = optype;
5339 inst.operands[i].present = 1;
5340 }
5341 }
5342 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL, &optype))
5343 != FAIL)
5344 {
5345 /* Case 13. */
5346 inst.operands[i].reg = val;
5347 inst.operands[i].isreg = 1;
5348 inst.operands[i].isvec = 1;
5349 inst.operands[i].issingle = 1;
5350 inst.operands[i].vectype = optype;
5351 inst.operands[i++].present = 1;
5352 }
5353 }
5354 else
5355 {
5356 first_error (_("parse error"));
5357 return FAIL;
5358 }
5359
5360 /* Successfully parsed the operands. Update args. */
5361 *which_operand = i;
5362 *str = ptr;
5363 return SUCCESS;
5364
5365 wanted_comma:
5366 first_error (_("expected comma"));
5367 return FAIL;
5368
5369 wanted_arm:
5370 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
5371 return FAIL;
5372 }
5373
5374 /* Matcher codes for parse_operands. */
5375 enum operand_parse_code
5376 {
5377 OP_stop, /* end of line */
5378
5379 OP_RR, /* ARM register */
5380 OP_RRnpc, /* ARM register, not r15 */
5381 OP_RRnpcb, /* ARM register, not r15, in square brackets */
5382 OP_RRw, /* ARM register, not r15, optional trailing ! */
5383 OP_RCP, /* Coprocessor number */
5384 OP_RCN, /* Coprocessor register */
5385 OP_RF, /* FPA register */
5386 OP_RVS, /* VFP single precision register */
5387 OP_RVD, /* VFP double precision register (0..15) */
5388 OP_RND, /* Neon double precision register (0..31) */
5389 OP_RNQ, /* Neon quad precision register */
5390 OP_RVSD, /* VFP single or double precision register */
5391 OP_RNDQ, /* Neon double or quad precision register */
5392 OP_RNSDQ, /* Neon single, double or quad precision register */
5393 OP_RNSC, /* Neon scalar D[X] */
5394 OP_RVC, /* VFP control register */
5395 OP_RMF, /* Maverick F register */
5396 OP_RMD, /* Maverick D register */
5397 OP_RMFX, /* Maverick FX register */
5398 OP_RMDX, /* Maverick DX register */
5399 OP_RMAX, /* Maverick AX register */
5400 OP_RMDS, /* Maverick DSPSC register */
5401 OP_RIWR, /* iWMMXt wR register */
5402 OP_RIWC, /* iWMMXt wC register */
5403 OP_RIWG, /* iWMMXt wCG register */
5404 OP_RXA, /* XScale accumulator register */
5405
5406 OP_REGLST, /* ARM register list */
5407 OP_VRSLST, /* VFP single-precision register list */
5408 OP_VRDLST, /* VFP double-precision register list */
5409 OP_VRSDLST, /* VFP single or double-precision register list (& quad) */
5410 OP_NRDLST, /* Neon double-precision register list (d0-d31, qN aliases) */
5411 OP_NSTRLST, /* Neon element/structure list */
5412
5413 OP_NILO, /* Neon immediate/logic operands 2 or 2+3. (VBIC, VORR...) */
5414 OP_RNDQ_I0, /* Neon D or Q reg, or immediate zero. */
5415 OP_RVSD_I0, /* VFP S or D reg, or immediate zero. */
5416 OP_RR_RNSC, /* ARM reg or Neon scalar. */
5417 OP_RNSDQ_RNSC, /* Vector S, D or Q reg, or Neon scalar. */
5418 OP_RNDQ_RNSC, /* Neon D or Q reg, or Neon scalar. */
5419 OP_RND_RNSC, /* Neon D reg, or Neon scalar. */
5420 OP_VMOV, /* Neon VMOV operands. */
5421 OP_RNDQ_IMVNb,/* Neon D or Q reg, or immediate good for VMVN. */
5422 OP_RNDQ_I63b, /* Neon D or Q reg, or immediate for shift. */
5423 OP_RIWR_I32z, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2. */
5424
5425 OP_I0, /* immediate zero */
5426 OP_I7, /* immediate value 0 .. 7 */
5427 OP_I15, /* 0 .. 15 */
5428 OP_I16, /* 1 .. 16 */
5429 OP_I16z, /* 0 .. 16 */
5430 OP_I31, /* 0 .. 31 */
5431 OP_I31w, /* 0 .. 31, optional trailing ! */
5432 OP_I32, /* 1 .. 32 */
5433 OP_I32z, /* 0 .. 32 */
5434 OP_I63, /* 0 .. 63 */
5435 OP_I63s, /* -64 .. 63 */
5436 OP_I64, /* 1 .. 64 */
5437 OP_I64z, /* 0 .. 64 */
5438 OP_I255, /* 0 .. 255 */
5439
5440 OP_I4b, /* immediate, prefix optional, 1 .. 4 */
5441 OP_I7b, /* 0 .. 7 */
5442 OP_I15b, /* 0 .. 15 */
5443 OP_I31b, /* 0 .. 31 */
5444
5445 OP_SH, /* shifter operand */
5446 OP_SHG, /* shifter operand with possible group relocation */
5447 OP_ADDR, /* Memory address expression (any mode) */
5448 OP_ADDRGLDR, /* Mem addr expr (any mode) with possible LDR group reloc */
5449 OP_ADDRGLDRS, /* Mem addr expr (any mode) with possible LDRS group reloc */
5450 OP_ADDRGLDC, /* Mem addr expr (any mode) with possible LDC group reloc */
5451 OP_EXP, /* arbitrary expression */
5452 OP_EXPi, /* same, with optional immediate prefix */
5453 OP_EXPr, /* same, with optional relocation suffix */
5454 OP_HALF, /* 0 .. 65535 or low/high reloc. */
5455
5456 OP_CPSF, /* CPS flags */
5457 OP_ENDI, /* Endianness specifier */
5458 OP_PSR, /* CPSR/SPSR mask for msr */
5459 OP_COND, /* conditional code */
5460 OP_TB, /* Table branch. */
5461
5462 OP_RVC_PSR, /* CPSR/SPSR mask for msr, or VFP control register. */
5463 OP_APSR_RR, /* ARM register or "APSR_nzcv". */
5464
5465 OP_RRnpc_I0, /* ARM register or literal 0 */
5466 OP_RR_EXr, /* ARM register or expression with opt. reloc suff. */
5467 OP_RR_EXi, /* ARM register or expression with imm prefix */
5468 OP_RF_IF, /* FPA register or immediate */
5469 OP_RIWR_RIWC, /* iWMMXt R or C reg */
5470 OP_RIWC_RIWG, /* iWMMXt wC or wCG reg */
5471
5472 /* Optional operands. */
5473 OP_oI7b, /* immediate, prefix optional, 0 .. 7 */
5474 OP_oI31b, /* 0 .. 31 */
5475 OP_oI32b, /* 1 .. 32 */
5476 OP_oIffffb, /* 0 .. 65535 */
5477 OP_oI255c, /* curly-brace enclosed, 0 .. 255 */
5478
5479 OP_oRR, /* ARM register */
5480 OP_oRRnpc, /* ARM register, not the PC */
5481 OP_oRRw, /* ARM register, not r15, optional trailing ! */
5482 OP_oRND, /* Optional Neon double precision register */
5483 OP_oRNQ, /* Optional Neon quad precision register */
5484 OP_oRNDQ, /* Optional Neon double or quad precision register */
5485 OP_oRNSDQ, /* Optional single, double or quad precision vector register */
5486 OP_oSHll, /* LSL immediate */
5487 OP_oSHar, /* ASR immediate */
5488 OP_oSHllar, /* LSL or ASR immediate */
5489 OP_oROR, /* ROR 0/8/16/24 */
5490 OP_oBARRIER, /* Option argument for a barrier instruction. */
5491
5492 OP_FIRST_OPTIONAL = OP_oI7b
5493 };
5494
5495 /* Generic instruction operand parser. This does no encoding and no
5496 semantic validation; it merely squirrels values away in the inst
5497 structure. Returns SUCCESS or FAIL depending on whether the
5498 specified grammar matched. */
5499 static int
5500 parse_operands (char *str, const unsigned char *pattern)
5501 {
5502 unsigned const char *upat = pattern;
5503 char *backtrack_pos = 0;
5504 const char *backtrack_error = 0;
5505 int i, val, backtrack_index = 0;
5506 enum arm_reg_type rtype;
5507 parse_operand_result result;
5508
5509 #define po_char_or_fail(chr) do { \
5510 if (skip_past_char (&str, chr) == FAIL) \
5511 goto bad_args; \
5512 } while (0)
5513
5514 #define po_reg_or_fail(regtype) do { \
5515 val = arm_typed_reg_parse (&str, regtype, &rtype, \
5516 &inst.operands[i].vectype); \
5517 if (val == FAIL) \
5518 { \
5519 first_error (_(reg_expected_msgs[regtype])); \
5520 goto failure; \
5521 } \
5522 inst.operands[i].reg = val; \
5523 inst.operands[i].isreg = 1; \
5524 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
5525 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
5526 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
5527 || rtype == REG_TYPE_VFD \
5528 || rtype == REG_TYPE_NQ); \
5529 } while (0)
5530
5531 #define po_reg_or_goto(regtype, label) do { \
5532 val = arm_typed_reg_parse (&str, regtype, &rtype, \
5533 &inst.operands[i].vectype); \
5534 if (val == FAIL) \
5535 goto label; \
5536 \
5537 inst.operands[i].reg = val; \
5538 inst.operands[i].isreg = 1; \
5539 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
5540 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
5541 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
5542 || rtype == REG_TYPE_VFD \
5543 || rtype == REG_TYPE_NQ); \
5544 } while (0)
5545
5546 #define po_imm_or_fail(min, max, popt) do { \
5547 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
5548 goto failure; \
5549 inst.operands[i].imm = val; \
5550 } while (0)
5551
5552 #define po_scalar_or_goto(elsz, label) do { \
5553 val = parse_scalar (&str, elsz, &inst.operands[i].vectype); \
5554 if (val == FAIL) \
5555 goto label; \
5556 inst.operands[i].reg = val; \
5557 inst.operands[i].isscalar = 1; \
5558 } while (0)
5559
5560 #define po_misc_or_fail(expr) do { \
5561 if (expr) \
5562 goto failure; \
5563 } while (0)
5564
5565 #define po_misc_or_fail_no_backtrack(expr) do { \
5566 result = expr; \
5567 if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK)\
5568 backtrack_pos = 0; \
5569 if (result != PARSE_OPERAND_SUCCESS) \
5570 goto failure; \
5571 } while (0)
5572
5573 skip_whitespace (str);
5574
5575 for (i = 0; upat[i] != OP_stop; i++)
5576 {
5577 if (upat[i] >= OP_FIRST_OPTIONAL)
5578 {
5579 /* Remember where we are in case we need to backtrack. */
5580 assert (!backtrack_pos);
5581 backtrack_pos = str;
5582 backtrack_error = inst.error;
5583 backtrack_index = i;
5584 }
5585
5586 if (i > 0 && (i > 1 || inst.operands[0].present))
5587 po_char_or_fail (',');
5588
5589 switch (upat[i])
5590 {
5591 /* Registers */
5592 case OP_oRRnpc:
5593 case OP_RRnpc:
5594 case OP_oRR:
5595 case OP_RR: po_reg_or_fail (REG_TYPE_RN); break;
5596 case OP_RCP: po_reg_or_fail (REG_TYPE_CP); break;
5597 case OP_RCN: po_reg_or_fail (REG_TYPE_CN); break;
5598 case OP_RF: po_reg_or_fail (REG_TYPE_FN); break;
5599 case OP_RVS: po_reg_or_fail (REG_TYPE_VFS); break;
5600 case OP_RVD: po_reg_or_fail (REG_TYPE_VFD); break;
5601 case OP_oRND:
5602 case OP_RND: po_reg_or_fail (REG_TYPE_VFD); break;
5603 case OP_RVC: po_reg_or_fail (REG_TYPE_VFC); break;
5604 case OP_RMF: po_reg_or_fail (REG_TYPE_MVF); break;
5605 case OP_RMD: po_reg_or_fail (REG_TYPE_MVD); break;
5606 case OP_RMFX: po_reg_or_fail (REG_TYPE_MVFX); break;
5607 case OP_RMDX: po_reg_or_fail (REG_TYPE_MVDX); break;
5608 case OP_RMAX: po_reg_or_fail (REG_TYPE_MVAX); break;
5609 case OP_RMDS: po_reg_or_fail (REG_TYPE_DSPSC); break;
5610 case OP_RIWR: po_reg_or_fail (REG_TYPE_MMXWR); break;
5611 case OP_RIWC: po_reg_or_fail (REG_TYPE_MMXWC); break;
5612 case OP_RIWG: po_reg_or_fail (REG_TYPE_MMXWCG); break;
5613 case OP_RXA: po_reg_or_fail (REG_TYPE_XSCALE); break;
5614 case OP_oRNQ:
5615 case OP_RNQ: po_reg_or_fail (REG_TYPE_NQ); break;
5616 case OP_oRNDQ:
5617 case OP_RNDQ: po_reg_or_fail (REG_TYPE_NDQ); break;
5618 case OP_RVSD: po_reg_or_fail (REG_TYPE_VFSD); break;
5619 case OP_oRNSDQ:
5620 case OP_RNSDQ: po_reg_or_fail (REG_TYPE_NSDQ); break;
5621
5622 /* Neon scalar. Using an element size of 8 means that some invalid
5623 scalars are accepted here, so deal with those in later code. */
5624 case OP_RNSC: po_scalar_or_goto (8, failure); break;
5625
5626 /* WARNING: We can expand to two operands here. This has the potential
5627 to totally confuse the backtracking mechanism! It will be OK at
5628 least as long as we don't try to use optional args as well,
5629 though. */
5630 case OP_NILO:
5631 {
5632 po_reg_or_goto (REG_TYPE_NDQ, try_imm);
5633 inst.operands[i].present = 1;
5634 i++;
5635 skip_past_comma (&str);
5636 po_reg_or_goto (REG_TYPE_NDQ, one_reg_only);
5637 break;
5638 one_reg_only:
5639 /* Optional register operand was omitted. Unfortunately, it's in
5640 operands[i-1] and we need it to be in inst.operands[i]. Fix that
5641 here (this is a bit grotty). */
5642 inst.operands[i] = inst.operands[i-1];
5643 inst.operands[i-1].present = 0;
5644 break;
5645 try_imm:
5646 /* There's a possibility of getting a 64-bit immediate here, so
5647 we need special handling. */
5648 if (parse_big_immediate (&str, i) == FAIL)
5649 {
5650 inst.error = _("immediate value is out of range");
5651 goto failure;
5652 }
5653 }
5654 break;
5655
5656 case OP_RNDQ_I0:
5657 {
5658 po_reg_or_goto (REG_TYPE_NDQ, try_imm0);
5659 break;
5660 try_imm0:
5661 po_imm_or_fail (0, 0, TRUE);
5662 }
5663 break;
5664
5665 case OP_RVSD_I0:
5666 po_reg_or_goto (REG_TYPE_VFSD, try_imm0);
5667 break;
5668
5669 case OP_RR_RNSC:
5670 {
5671 po_scalar_or_goto (8, try_rr);
5672 break;
5673 try_rr:
5674 po_reg_or_fail (REG_TYPE_RN);
5675 }
5676 break;
5677
5678 case OP_RNSDQ_RNSC:
5679 {
5680 po_scalar_or_goto (8, try_nsdq);
5681 break;
5682 try_nsdq:
5683 po_reg_or_fail (REG_TYPE_NSDQ);
5684 }
5685 break;
5686
5687 case OP_RNDQ_RNSC:
5688 {
5689 po_scalar_or_goto (8, try_ndq);
5690 break;
5691 try_ndq:
5692 po_reg_or_fail (REG_TYPE_NDQ);
5693 }
5694 break;
5695
5696 case OP_RND_RNSC:
5697 {
5698 po_scalar_or_goto (8, try_vfd);
5699 break;
5700 try_vfd:
5701 po_reg_or_fail (REG_TYPE_VFD);
5702 }
5703 break;
5704
5705 case OP_VMOV:
5706 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
5707 not careful then bad things might happen. */
5708 po_misc_or_fail (parse_neon_mov (&str, &i) == FAIL);
5709 break;
5710
5711 case OP_RNDQ_IMVNb:
5712 {
5713 po_reg_or_goto (REG_TYPE_NDQ, try_mvnimm);
5714 break;
5715 try_mvnimm:
5716 /* There's a possibility of getting a 64-bit immediate here, so
5717 we need special handling. */
5718 if (parse_big_immediate (&str, i) == FAIL)
5719 {
5720 inst.error = _("immediate value is out of range");
5721 goto failure;
5722 }
5723 }
5724 break;
5725
5726 case OP_RNDQ_I63b:
5727 {
5728 po_reg_or_goto (REG_TYPE_NDQ, try_shimm);
5729 break;
5730 try_shimm:
5731 po_imm_or_fail (0, 63, TRUE);
5732 }
5733 break;
5734
5735 case OP_RRnpcb:
5736 po_char_or_fail ('[');
5737 po_reg_or_fail (REG_TYPE_RN);
5738 po_char_or_fail (']');
5739 break;
5740
5741 case OP_RRw:
5742 case OP_oRRw:
5743 po_reg_or_fail (REG_TYPE_RN);
5744 if (skip_past_char (&str, '!') == SUCCESS)
5745 inst.operands[i].writeback = 1;
5746 break;
5747
5748 /* Immediates */
5749 case OP_I7: po_imm_or_fail ( 0, 7, FALSE); break;
5750 case OP_I15: po_imm_or_fail ( 0, 15, FALSE); break;
5751 case OP_I16: po_imm_or_fail ( 1, 16, FALSE); break;
5752 case OP_I16z: po_imm_or_fail ( 0, 16, FALSE); break;
5753 case OP_I31: po_imm_or_fail ( 0, 31, FALSE); break;
5754 case OP_I32: po_imm_or_fail ( 1, 32, FALSE); break;
5755 case OP_I32z: po_imm_or_fail ( 0, 32, FALSE); break;
5756 case OP_I63s: po_imm_or_fail (-64, 63, FALSE); break;
5757 case OP_I63: po_imm_or_fail ( 0, 63, FALSE); break;
5758 case OP_I64: po_imm_or_fail ( 1, 64, FALSE); break;
5759 case OP_I64z: po_imm_or_fail ( 0, 64, FALSE); break;
5760 case OP_I255: po_imm_or_fail ( 0, 255, FALSE); break;
5761
5762 case OP_I4b: po_imm_or_fail ( 1, 4, TRUE); break;
5763 case OP_oI7b:
5764 case OP_I7b: po_imm_or_fail ( 0, 7, TRUE); break;
5765 case OP_I15b: po_imm_or_fail ( 0, 15, TRUE); break;
5766 case OP_oI31b:
5767 case OP_I31b: po_imm_or_fail ( 0, 31, TRUE); break;
5768 case OP_oI32b: po_imm_or_fail ( 1, 32, TRUE); break;
5769 case OP_oIffffb: po_imm_or_fail ( 0, 0xffff, TRUE); break;
5770
5771 /* Immediate variants */
5772 case OP_oI255c:
5773 po_char_or_fail ('{');
5774 po_imm_or_fail (0, 255, TRUE);
5775 po_char_or_fail ('}');
5776 break;
5777
5778 case OP_I31w:
5779 /* The expression parser chokes on a trailing !, so we have
5780 to find it first and zap it. */
5781 {
5782 char *s = str;
5783 while (*s && *s != ',')
5784 s++;
5785 if (s[-1] == '!')
5786 {
5787 s[-1] = '\0';
5788 inst.operands[i].writeback = 1;
5789 }
5790 po_imm_or_fail (0, 31, TRUE);
5791 if (str == s - 1)
5792 str = s;
5793 }
5794 break;
5795
5796 /* Expressions */
5797 case OP_EXPi: EXPi:
5798 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
5799 GE_OPT_PREFIX));
5800 break;
5801
5802 case OP_EXP:
5803 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
5804 GE_NO_PREFIX));
5805 break;
5806
5807 case OP_EXPr: EXPr:
5808 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
5809 GE_NO_PREFIX));
5810 if (inst.reloc.exp.X_op == O_symbol)
5811 {
5812 val = parse_reloc (&str);
5813 if (val == -1)
5814 {
5815 inst.error = _("unrecognized relocation suffix");
5816 goto failure;
5817 }
5818 else if (val != BFD_RELOC_UNUSED)
5819 {
5820 inst.operands[i].imm = val;
5821 inst.operands[i].hasreloc = 1;
5822 }
5823 }
5824 break;
5825
5826 /* Operand for MOVW or MOVT. */
5827 case OP_HALF:
5828 po_misc_or_fail (parse_half (&str));
5829 break;
5830
5831 /* Register or expression */
5832 case OP_RR_EXr: po_reg_or_goto (REG_TYPE_RN, EXPr); break;
5833 case OP_RR_EXi: po_reg_or_goto (REG_TYPE_RN, EXPi); break;
5834
5835 /* Register or immediate */
5836 case OP_RRnpc_I0: po_reg_or_goto (REG_TYPE_RN, I0); break;
5837 I0: po_imm_or_fail (0, 0, FALSE); break;
5838
5839 case OP_RF_IF: po_reg_or_goto (REG_TYPE_FN, IF); break;
5840 IF:
5841 if (!is_immediate_prefix (*str))
5842 goto bad_args;
5843 str++;
5844 val = parse_fpa_immediate (&str);
5845 if (val == FAIL)
5846 goto failure;
5847 /* FPA immediates are encoded as registers 8-15.
5848 parse_fpa_immediate has already applied the offset. */
5849 inst.operands[i].reg = val;
5850 inst.operands[i].isreg = 1;
5851 break;
5852
5853 case OP_RIWR_I32z: po_reg_or_goto (REG_TYPE_MMXWR, I32z); break;
5854 I32z: po_imm_or_fail (0, 32, FALSE); break;
5855
5856 /* Two kinds of register */
5857 case OP_RIWR_RIWC:
5858 {
5859 struct reg_entry *rege = arm_reg_parse_multi (&str);
5860 if (!rege
5861 || (rege->type != REG_TYPE_MMXWR
5862 && rege->type != REG_TYPE_MMXWC
5863 && rege->type != REG_TYPE_MMXWCG))
5864 {
5865 inst.error = _("iWMMXt data or control register expected");
5866 goto failure;
5867 }
5868 inst.operands[i].reg = rege->number;
5869 inst.operands[i].isreg = (rege->type == REG_TYPE_MMXWR);
5870 }
5871 break;
5872
5873 case OP_RIWC_RIWG:
5874 {
5875 struct reg_entry *rege = arm_reg_parse_multi (&str);
5876 if (!rege
5877 || (rege->type != REG_TYPE_MMXWC
5878 && rege->type != REG_TYPE_MMXWCG))
5879 {
5880 inst.error = _("iWMMXt control register expected");
5881 goto failure;
5882 }
5883 inst.operands[i].reg = rege->number;
5884 inst.operands[i].isreg = 1;
5885 }
5886 break;
5887
5888 /* Misc */
5889 case OP_CPSF: val = parse_cps_flags (&str); break;
5890 case OP_ENDI: val = parse_endian_specifier (&str); break;
5891 case OP_oROR: val = parse_ror (&str); break;
5892 case OP_PSR: val = parse_psr (&str); break;
5893 case OP_COND: val = parse_cond (&str); break;
5894 case OP_oBARRIER:val = parse_barrier (&str); break;
5895
5896 case OP_RVC_PSR:
5897 po_reg_or_goto (REG_TYPE_VFC, try_psr);
5898 inst.operands[i].isvec = 1; /* Mark VFP control reg as vector. */
5899 break;
5900 try_psr:
5901 val = parse_psr (&str);
5902 break;
5903
5904 case OP_APSR_RR:
5905 po_reg_or_goto (REG_TYPE_RN, try_apsr);
5906 break;
5907 try_apsr:
5908 /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
5909 instruction). */
5910 if (strncasecmp (str, "APSR_", 5) == 0)
5911 {
5912 unsigned found = 0;
5913 str += 5;
5914 while (found < 15)
5915 switch (*str++)
5916 {
5917 case 'c': found = (found & 1) ? 16 : found | 1; break;
5918 case 'n': found = (found & 2) ? 16 : found | 2; break;
5919 case 'z': found = (found & 4) ? 16 : found | 4; break;
5920 case 'v': found = (found & 8) ? 16 : found | 8; break;
5921 default: found = 16;
5922 }
5923 if (found != 15)
5924 goto failure;
5925 inst.operands[i].isvec = 1;
5926 }
5927 else
5928 goto failure;
5929 break;
5930
5931 case OP_TB:
5932 po_misc_or_fail (parse_tb (&str));
5933 break;
5934
5935 /* Register lists */
5936 case OP_REGLST:
5937 val = parse_reg_list (&str);
5938 if (*str == '^')
5939 {
5940 inst.operands[1].writeback = 1;
5941 str++;
5942 }
5943 break;
5944
5945 case OP_VRSLST:
5946 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_S);
5947 break;
5948
5949 case OP_VRDLST:
5950 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_D);
5951 break;
5952
5953 case OP_VRSDLST:
5954 /* Allow Q registers too. */
5955 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
5956 REGLIST_NEON_D);
5957 if (val == FAIL)
5958 {
5959 inst.error = NULL;
5960 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
5961 REGLIST_VFP_S);
5962 inst.operands[i].issingle = 1;
5963 }
5964 break;
5965
5966 case OP_NRDLST:
5967 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
5968 REGLIST_NEON_D);
5969 break;
5970
5971 case OP_NSTRLST:
5972 val = parse_neon_el_struct_list (&str, &inst.operands[i].reg,
5973 &inst.operands[i].vectype);
5974 break;
5975
5976 /* Addressing modes */
5977 case OP_ADDR:
5978 po_misc_or_fail (parse_address (&str, i));
5979 break;
5980
5981 case OP_ADDRGLDR:
5982 po_misc_or_fail_no_backtrack (
5983 parse_address_group_reloc (&str, i, GROUP_LDR));
5984 break;
5985
5986 case OP_ADDRGLDRS:
5987 po_misc_or_fail_no_backtrack (
5988 parse_address_group_reloc (&str, i, GROUP_LDRS));
5989 break;
5990
5991 case OP_ADDRGLDC:
5992 po_misc_or_fail_no_backtrack (
5993 parse_address_group_reloc (&str, i, GROUP_LDC));
5994 break;
5995
5996 case OP_SH:
5997 po_misc_or_fail (parse_shifter_operand (&str, i));
5998 break;
5999
6000 case OP_SHG:
6001 po_misc_or_fail_no_backtrack (
6002 parse_shifter_operand_group_reloc (&str, i));
6003 break;
6004
6005 case OP_oSHll:
6006 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_IMMEDIATE));
6007 break;
6008
6009 case OP_oSHar:
6010 po_misc_or_fail (parse_shift (&str, i, SHIFT_ASR_IMMEDIATE));
6011 break;
6012
6013 case OP_oSHllar:
6014 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_OR_ASR_IMMEDIATE));
6015 break;
6016
6017 default:
6018 as_fatal ("unhandled operand code %d", upat[i]);
6019 }
6020
6021 /* Various value-based sanity checks and shared operations. We
6022 do not signal immediate failures for the register constraints;
6023 this allows a syntax error to take precedence. */
6024 switch (upat[i])
6025 {
6026 case OP_oRRnpc:
6027 case OP_RRnpc:
6028 case OP_RRnpcb:
6029 case OP_RRw:
6030 case OP_oRRw:
6031 case OP_RRnpc_I0:
6032 if (inst.operands[i].isreg && inst.operands[i].reg == REG_PC)
6033 inst.error = BAD_PC;
6034 break;
6035
6036 case OP_CPSF:
6037 case OP_ENDI:
6038 case OP_oROR:
6039 case OP_PSR:
6040 case OP_RVC_PSR:
6041 case OP_COND:
6042 case OP_oBARRIER:
6043 case OP_REGLST:
6044 case OP_VRSLST:
6045 case OP_VRDLST:
6046 case OP_VRSDLST:
6047 case OP_NRDLST:
6048 case OP_NSTRLST:
6049 if (val == FAIL)
6050 goto failure;
6051 inst.operands[i].imm = val;
6052 break;
6053
6054 default:
6055 break;
6056 }
6057
6058 /* If we get here, this operand was successfully parsed. */
6059 inst.operands[i].present = 1;
6060 continue;
6061
6062 bad_args:
6063 inst.error = BAD_ARGS;
6064
6065 failure:
6066 if (!backtrack_pos)
6067 {
6068 /* The parse routine should already have set inst.error, but set a
6069 defaut here just in case. */
6070 if (!inst.error)
6071 inst.error = _("syntax error");
6072 return FAIL;
6073 }
6074
6075 /* Do not backtrack over a trailing optional argument that
6076 absorbed some text. We will only fail again, with the
6077 'garbage following instruction' error message, which is
6078 probably less helpful than the current one. */
6079 if (backtrack_index == i && backtrack_pos != str
6080 && upat[i+1] == OP_stop)
6081 {
6082 if (!inst.error)
6083 inst.error = _("syntax error");
6084 return FAIL;
6085 }
6086
6087 /* Try again, skipping the optional argument at backtrack_pos. */
6088 str = backtrack_pos;
6089 inst.error = backtrack_error;
6090 inst.operands[backtrack_index].present = 0;
6091 i = backtrack_index;
6092 backtrack_pos = 0;
6093 }
6094
6095 /* Check that we have parsed all the arguments. */
6096 if (*str != '\0' && !inst.error)
6097 inst.error = _("garbage following instruction");
6098
6099 return inst.error ? FAIL : SUCCESS;
6100 }
6101
6102 #undef po_char_or_fail
6103 #undef po_reg_or_fail
6104 #undef po_reg_or_goto
6105 #undef po_imm_or_fail
6106 #undef po_scalar_or_fail
6107 \f
6108 /* Shorthand macro for instruction encoding functions issuing errors. */
6109 #define constraint(expr, err) do { \
6110 if (expr) \
6111 { \
6112 inst.error = err; \
6113 return; \
6114 } \
6115 } while (0)
6116
6117 /* Functions for operand encoding. ARM, then Thumb. */
6118
6119 #define rotate_left(v, n) (v << n | v >> (32 - n))
6120
6121 /* If VAL can be encoded in the immediate field of an ARM instruction,
6122 return the encoded form. Otherwise, return FAIL. */
6123
6124 static unsigned int
6125 encode_arm_immediate (unsigned int val)
6126 {
6127 unsigned int a, i;
6128
6129 for (i = 0; i < 32; i += 2)
6130 if ((a = rotate_left (val, i)) <= 0xff)
6131 return a | (i << 7); /* 12-bit pack: [shift-cnt,const]. */
6132
6133 return FAIL;
6134 }
6135
6136 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
6137 return the encoded form. Otherwise, return FAIL. */
6138 static unsigned int
6139 encode_thumb32_immediate (unsigned int val)
6140 {
6141 unsigned int a, i;
6142
6143 if (val <= 0xff)
6144 return val;
6145
6146 for (i = 1; i <= 24; i++)
6147 {
6148 a = val >> i;
6149 if ((val & ~(0xff << i)) == 0)
6150 return ((val >> i) & 0x7f) | ((32 - i) << 7);
6151 }
6152
6153 a = val & 0xff;
6154 if (val == ((a << 16) | a))
6155 return 0x100 | a;
6156 if (val == ((a << 24) | (a << 16) | (a << 8) | a))
6157 return 0x300 | a;
6158
6159 a = val & 0xff00;
6160 if (val == ((a << 16) | a))
6161 return 0x200 | (a >> 8);
6162
6163 return FAIL;
6164 }
6165 /* Encode a VFP SP or DP register number into inst.instruction. */
6166
6167 static void
6168 encode_arm_vfp_reg (int reg, enum vfp_reg_pos pos)
6169 {
6170 if ((pos == VFP_REG_Dd || pos == VFP_REG_Dn || pos == VFP_REG_Dm)
6171 && reg > 15)
6172 {
6173 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3))
6174 {
6175 if (thumb_mode)
6176 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
6177 fpu_vfp_ext_v3);
6178 else
6179 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
6180 fpu_vfp_ext_v3);
6181 }
6182 else
6183 {
6184 first_error (_("D register out of range for selected VFP version"));
6185 return;
6186 }
6187 }
6188
6189 switch (pos)
6190 {
6191 case VFP_REG_Sd:
6192 inst.instruction |= ((reg >> 1) << 12) | ((reg & 1) << 22);
6193 break;
6194
6195 case VFP_REG_Sn:
6196 inst.instruction |= ((reg >> 1) << 16) | ((reg & 1) << 7);
6197 break;
6198
6199 case VFP_REG_Sm:
6200 inst.instruction |= ((reg >> 1) << 0) | ((reg & 1) << 5);
6201 break;
6202
6203 case VFP_REG_Dd:
6204 inst.instruction |= ((reg & 15) << 12) | ((reg >> 4) << 22);
6205 break;
6206
6207 case VFP_REG_Dn:
6208 inst.instruction |= ((reg & 15) << 16) | ((reg >> 4) << 7);
6209 break;
6210
6211 case VFP_REG_Dm:
6212 inst.instruction |= (reg & 15) | ((reg >> 4) << 5);
6213 break;
6214
6215 default:
6216 abort ();
6217 }
6218 }
6219
6220 /* Encode a <shift> in an ARM-format instruction. The immediate,
6221 if any, is handled by md_apply_fix. */
6222 static void
6223 encode_arm_shift (int i)
6224 {
6225 if (inst.operands[i].shift_kind == SHIFT_RRX)
6226 inst.instruction |= SHIFT_ROR << 5;
6227 else
6228 {
6229 inst.instruction |= inst.operands[i].shift_kind << 5;
6230 if (inst.operands[i].immisreg)
6231 {
6232 inst.instruction |= SHIFT_BY_REG;
6233 inst.instruction |= inst.operands[i].imm << 8;
6234 }
6235 else
6236 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
6237 }
6238 }
6239
6240 static void
6241 encode_arm_shifter_operand (int i)
6242 {
6243 if (inst.operands[i].isreg)
6244 {
6245 inst.instruction |= inst.operands[i].reg;
6246 encode_arm_shift (i);
6247 }
6248 else
6249 inst.instruction |= INST_IMMEDIATE;
6250 }
6251
6252 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
6253 static void
6254 encode_arm_addr_mode_common (int i, bfd_boolean is_t)
6255 {
6256 assert (inst.operands[i].isreg);
6257 inst.instruction |= inst.operands[i].reg << 16;
6258
6259 if (inst.operands[i].preind)
6260 {
6261 if (is_t)
6262 {
6263 inst.error = _("instruction does not accept preindexed addressing");
6264 return;
6265 }
6266 inst.instruction |= PRE_INDEX;
6267 if (inst.operands[i].writeback)
6268 inst.instruction |= WRITE_BACK;
6269
6270 }
6271 else if (inst.operands[i].postind)
6272 {
6273 assert (inst.operands[i].writeback);
6274 if (is_t)
6275 inst.instruction |= WRITE_BACK;
6276 }
6277 else /* unindexed - only for coprocessor */
6278 {
6279 inst.error = _("instruction does not accept unindexed addressing");
6280 return;
6281 }
6282
6283 if (((inst.instruction & WRITE_BACK) || !(inst.instruction & PRE_INDEX))
6284 && (((inst.instruction & 0x000f0000) >> 16)
6285 == ((inst.instruction & 0x0000f000) >> 12)))
6286 as_warn ((inst.instruction & LOAD_BIT)
6287 ? _("destination register same as write-back base")
6288 : _("source register same as write-back base"));
6289 }
6290
6291 /* inst.operands[i] was set up by parse_address. Encode it into an
6292 ARM-format mode 2 load or store instruction. If is_t is true,
6293 reject forms that cannot be used with a T instruction (i.e. not
6294 post-indexed). */
6295 static void
6296 encode_arm_addr_mode_2 (int i, bfd_boolean is_t)
6297 {
6298 encode_arm_addr_mode_common (i, is_t);
6299
6300 if (inst.operands[i].immisreg)
6301 {
6302 inst.instruction |= INST_IMMEDIATE; /* yes, this is backwards */
6303 inst.instruction |= inst.operands[i].imm;
6304 if (!inst.operands[i].negative)
6305 inst.instruction |= INDEX_UP;
6306 if (inst.operands[i].shifted)
6307 {
6308 if (inst.operands[i].shift_kind == SHIFT_RRX)
6309 inst.instruction |= SHIFT_ROR << 5;
6310 else
6311 {
6312 inst.instruction |= inst.operands[i].shift_kind << 5;
6313 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
6314 }
6315 }
6316 }
6317 else /* immediate offset in inst.reloc */
6318 {
6319 if (inst.reloc.type == BFD_RELOC_UNUSED)
6320 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM;
6321 }
6322 }
6323
6324 /* inst.operands[i] was set up by parse_address. Encode it into an
6325 ARM-format mode 3 load or store instruction. Reject forms that
6326 cannot be used with such instructions. If is_t is true, reject
6327 forms that cannot be used with a T instruction (i.e. not
6328 post-indexed). */
6329 static void
6330 encode_arm_addr_mode_3 (int i, bfd_boolean is_t)
6331 {
6332 if (inst.operands[i].immisreg && inst.operands[i].shifted)
6333 {
6334 inst.error = _("instruction does not accept scaled register index");
6335 return;
6336 }
6337
6338 encode_arm_addr_mode_common (i, is_t);
6339
6340 if (inst.operands[i].immisreg)
6341 {
6342 inst.instruction |= inst.operands[i].imm;
6343 if (!inst.operands[i].negative)
6344 inst.instruction |= INDEX_UP;
6345 }
6346 else /* immediate offset in inst.reloc */
6347 {
6348 inst.instruction |= HWOFFSET_IMM;
6349 if (inst.reloc.type == BFD_RELOC_UNUSED)
6350 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM8;
6351 }
6352 }
6353
6354 /* inst.operands[i] was set up by parse_address. Encode it into an
6355 ARM-format instruction. Reject all forms which cannot be encoded
6356 into a coprocessor load/store instruction. If wb_ok is false,
6357 reject use of writeback; if unind_ok is false, reject use of
6358 unindexed addressing. If reloc_override is not 0, use it instead
6359 of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
6360 (in which case it is preserved). */
6361
6362 static int
6363 encode_arm_cp_address (int i, int wb_ok, int unind_ok, int reloc_override)
6364 {
6365 inst.instruction |= inst.operands[i].reg << 16;
6366
6367 assert (!(inst.operands[i].preind && inst.operands[i].postind));
6368
6369 if (!inst.operands[i].preind && !inst.operands[i].postind) /* unindexed */
6370 {
6371 assert (!inst.operands[i].writeback);
6372 if (!unind_ok)
6373 {
6374 inst.error = _("instruction does not support unindexed addressing");
6375 return FAIL;
6376 }
6377 inst.instruction |= inst.operands[i].imm;
6378 inst.instruction |= INDEX_UP;
6379 return SUCCESS;
6380 }
6381
6382 if (inst.operands[i].preind)
6383 inst.instruction |= PRE_INDEX;
6384
6385 if (inst.operands[i].writeback)
6386 {
6387 if (inst.operands[i].reg == REG_PC)
6388 {
6389 inst.error = _("pc may not be used with write-back");
6390 return FAIL;
6391 }
6392 if (!wb_ok)
6393 {
6394 inst.error = _("instruction does not support writeback");
6395 return FAIL;
6396 }
6397 inst.instruction |= WRITE_BACK;
6398 }
6399
6400 if (reloc_override)
6401 inst.reloc.type = reloc_override;
6402 else if ((inst.reloc.type < BFD_RELOC_ARM_ALU_PC_G0_NC
6403 || inst.reloc.type > BFD_RELOC_ARM_LDC_SB_G2)
6404 && inst.reloc.type != BFD_RELOC_ARM_LDR_PC_G0)
6405 {
6406 if (thumb_mode)
6407 inst.reloc.type = BFD_RELOC_ARM_T32_CP_OFF_IMM;
6408 else
6409 inst.reloc.type = BFD_RELOC_ARM_CP_OFF_IMM;
6410 }
6411
6412 return SUCCESS;
6413 }
6414
6415 /* inst.reloc.exp describes an "=expr" load pseudo-operation.
6416 Determine whether it can be performed with a move instruction; if
6417 it can, convert inst.instruction to that move instruction and
6418 return 1; if it can't, convert inst.instruction to a literal-pool
6419 load and return 0. If this is not a valid thing to do in the
6420 current context, set inst.error and return 1.
6421
6422 inst.operands[i] describes the destination register. */
6423
6424 static int
6425 move_or_literal_pool (int i, bfd_boolean thumb_p, bfd_boolean mode_3)
6426 {
6427 unsigned long tbit;
6428
6429 if (thumb_p)
6430 tbit = (inst.instruction > 0xffff) ? THUMB2_LOAD_BIT : THUMB_LOAD_BIT;
6431 else
6432 tbit = LOAD_BIT;
6433
6434 if ((inst.instruction & tbit) == 0)
6435 {
6436 inst.error = _("invalid pseudo operation");
6437 return 1;
6438 }
6439 if (inst.reloc.exp.X_op != O_constant && inst.reloc.exp.X_op != O_symbol)
6440 {
6441 inst.error = _("constant expression expected");
6442 return 1;
6443 }
6444 if (inst.reloc.exp.X_op == O_constant)
6445 {
6446 if (thumb_p)
6447 {
6448 if (!unified_syntax && (inst.reloc.exp.X_add_number & ~0xFF) == 0)
6449 {
6450 /* This can be done with a mov(1) instruction. */
6451 inst.instruction = T_OPCODE_MOV_I8 | (inst.operands[i].reg << 8);
6452 inst.instruction |= inst.reloc.exp.X_add_number;
6453 return 1;
6454 }
6455 }
6456 else
6457 {
6458 int value = encode_arm_immediate (inst.reloc.exp.X_add_number);
6459 if (value != FAIL)
6460 {
6461 /* This can be done with a mov instruction. */
6462 inst.instruction &= LITERAL_MASK;
6463 inst.instruction |= INST_IMMEDIATE | (OPCODE_MOV << DATA_OP_SHIFT);
6464 inst.instruction |= value & 0xfff;
6465 return 1;
6466 }
6467
6468 value = encode_arm_immediate (~inst.reloc.exp.X_add_number);
6469 if (value != FAIL)
6470 {
6471 /* This can be done with a mvn instruction. */
6472 inst.instruction &= LITERAL_MASK;
6473 inst.instruction |= INST_IMMEDIATE | (OPCODE_MVN << DATA_OP_SHIFT);
6474 inst.instruction |= value & 0xfff;
6475 return 1;
6476 }
6477 }
6478 }
6479
6480 if (add_to_lit_pool () == FAIL)
6481 {
6482 inst.error = _("literal pool insertion failed");
6483 return 1;
6484 }
6485 inst.operands[1].reg = REG_PC;
6486 inst.operands[1].isreg = 1;
6487 inst.operands[1].preind = 1;
6488 inst.reloc.pc_rel = 1;
6489 inst.reloc.type = (thumb_p
6490 ? BFD_RELOC_ARM_THUMB_OFFSET
6491 : (mode_3
6492 ? BFD_RELOC_ARM_HWLITERAL
6493 : BFD_RELOC_ARM_LITERAL));
6494 return 0;
6495 }
6496
6497 /* Functions for instruction encoding, sorted by subarchitecture.
6498 First some generics; their names are taken from the conventional
6499 bit positions for register arguments in ARM format instructions. */
6500
6501 static void
6502 do_noargs (void)
6503 {
6504 }
6505
6506 static void
6507 do_rd (void)
6508 {
6509 inst.instruction |= inst.operands[0].reg << 12;
6510 }
6511
6512 static void
6513 do_rd_rm (void)
6514 {
6515 inst.instruction |= inst.operands[0].reg << 12;
6516 inst.instruction |= inst.operands[1].reg;
6517 }
6518
6519 static void
6520 do_rd_rn (void)
6521 {
6522 inst.instruction |= inst.operands[0].reg << 12;
6523 inst.instruction |= inst.operands[1].reg << 16;
6524 }
6525
6526 static void
6527 do_rn_rd (void)
6528 {
6529 inst.instruction |= inst.operands[0].reg << 16;
6530 inst.instruction |= inst.operands[1].reg << 12;
6531 }
6532
6533 static void
6534 do_rd_rm_rn (void)
6535 {
6536 unsigned Rn = inst.operands[2].reg;
6537 /* Enforce restrictions on SWP instruction. */
6538 if ((inst.instruction & 0x0fbfffff) == 0x01000090)
6539 constraint (Rn == inst.operands[0].reg || Rn == inst.operands[1].reg,
6540 _("Rn must not overlap other operands"));
6541 inst.instruction |= inst.operands[0].reg << 12;
6542 inst.instruction |= inst.operands[1].reg;
6543 inst.instruction |= Rn << 16;
6544 }
6545
6546 static void
6547 do_rd_rn_rm (void)
6548 {
6549 inst.instruction |= inst.operands[0].reg << 12;
6550 inst.instruction |= inst.operands[1].reg << 16;
6551 inst.instruction |= inst.operands[2].reg;
6552 }
6553
6554 static void
6555 do_rm_rd_rn (void)
6556 {
6557 inst.instruction |= inst.operands[0].reg;
6558 inst.instruction |= inst.operands[1].reg << 12;
6559 inst.instruction |= inst.operands[2].reg << 16;
6560 }
6561
6562 static void
6563 do_imm0 (void)
6564 {
6565 inst.instruction |= inst.operands[0].imm;
6566 }
6567
6568 static void
6569 do_rd_cpaddr (void)
6570 {
6571 inst.instruction |= inst.operands[0].reg << 12;
6572 encode_arm_cp_address (1, TRUE, TRUE, 0);
6573 }
6574
6575 /* ARM instructions, in alphabetical order by function name (except
6576 that wrapper functions appear immediately after the function they
6577 wrap). */
6578
6579 /* This is a pseudo-op of the form "adr rd, label" to be converted
6580 into a relative address of the form "add rd, pc, #label-.-8". */
6581
6582 static void
6583 do_adr (void)
6584 {
6585 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
6586
6587 /* Frag hacking will turn this into a sub instruction if the offset turns
6588 out to be negative. */
6589 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
6590 inst.reloc.pc_rel = 1;
6591 inst.reloc.exp.X_add_number -= 8;
6592 }
6593
6594 /* This is a pseudo-op of the form "adrl rd, label" to be converted
6595 into a relative address of the form:
6596 add rd, pc, #low(label-.-8)"
6597 add rd, rd, #high(label-.-8)" */
6598
6599 static void
6600 do_adrl (void)
6601 {
6602 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
6603
6604 /* Frag hacking will turn this into a sub instruction if the offset turns
6605 out to be negative. */
6606 inst.reloc.type = BFD_RELOC_ARM_ADRL_IMMEDIATE;
6607 inst.reloc.pc_rel = 1;
6608 inst.size = INSN_SIZE * 2;
6609 inst.reloc.exp.X_add_number -= 8;
6610 }
6611
6612 static void
6613 do_arit (void)
6614 {
6615 if (!inst.operands[1].present)
6616 inst.operands[1].reg = inst.operands[0].reg;
6617 inst.instruction |= inst.operands[0].reg << 12;
6618 inst.instruction |= inst.operands[1].reg << 16;
6619 encode_arm_shifter_operand (2);
6620 }
6621
6622 static void
6623 do_barrier (void)
6624 {
6625 if (inst.operands[0].present)
6626 {
6627 constraint ((inst.instruction & 0xf0) != 0x40
6628 && inst.operands[0].imm != 0xf,
6629 "bad barrier type");
6630 inst.instruction |= inst.operands[0].imm;
6631 }
6632 else
6633 inst.instruction |= 0xf;
6634 }
6635
6636 static void
6637 do_bfc (void)
6638 {
6639 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
6640 constraint (msb > 32, _("bit-field extends past end of register"));
6641 /* The instruction encoding stores the LSB and MSB,
6642 not the LSB and width. */
6643 inst.instruction |= inst.operands[0].reg << 12;
6644 inst.instruction |= inst.operands[1].imm << 7;
6645 inst.instruction |= (msb - 1) << 16;
6646 }
6647
6648 static void
6649 do_bfi (void)
6650 {
6651 unsigned int msb;
6652
6653 /* #0 in second position is alternative syntax for bfc, which is
6654 the same instruction but with REG_PC in the Rm field. */
6655 if (!inst.operands[1].isreg)
6656 inst.operands[1].reg = REG_PC;
6657
6658 msb = inst.operands[2].imm + inst.operands[3].imm;
6659 constraint (msb > 32, _("bit-field extends past end of register"));
6660 /* The instruction encoding stores the LSB and MSB,
6661 not the LSB and width. */
6662 inst.instruction |= inst.operands[0].reg << 12;
6663 inst.instruction |= inst.operands[1].reg;
6664 inst.instruction |= inst.operands[2].imm << 7;
6665 inst.instruction |= (msb - 1) << 16;
6666 }
6667
6668 static void
6669 do_bfx (void)
6670 {
6671 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
6672 _("bit-field extends past end of register"));
6673 inst.instruction |= inst.operands[0].reg << 12;
6674 inst.instruction |= inst.operands[1].reg;
6675 inst.instruction |= inst.operands[2].imm << 7;
6676 inst.instruction |= (inst.operands[3].imm - 1) << 16;
6677 }
6678
6679 /* ARM V5 breakpoint instruction (argument parse)
6680 BKPT <16 bit unsigned immediate>
6681 Instruction is not conditional.
6682 The bit pattern given in insns[] has the COND_ALWAYS condition,
6683 and it is an error if the caller tried to override that. */
6684
6685 static void
6686 do_bkpt (void)
6687 {
6688 /* Top 12 of 16 bits to bits 19:8. */
6689 inst.instruction |= (inst.operands[0].imm & 0xfff0) << 4;
6690
6691 /* Bottom 4 of 16 bits to bits 3:0. */
6692 inst.instruction |= inst.operands[0].imm & 0xf;
6693 }
6694
6695 static void
6696 encode_branch (int default_reloc)
6697 {
6698 if (inst.operands[0].hasreloc)
6699 {
6700 constraint (inst.operands[0].imm != BFD_RELOC_ARM_PLT32,
6701 _("the only suffix valid here is '(plt)'"));
6702 inst.reloc.type = BFD_RELOC_ARM_PLT32;
6703 }
6704 else
6705 {
6706 inst.reloc.type = default_reloc;
6707 }
6708 inst.reloc.pc_rel = 1;
6709 }
6710
6711 static void
6712 do_branch (void)
6713 {
6714 #ifdef OBJ_ELF
6715 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
6716 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
6717 else
6718 #endif
6719 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
6720 }
6721
6722 static void
6723 do_bl (void)
6724 {
6725 #ifdef OBJ_ELF
6726 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
6727 {
6728 if (inst.cond == COND_ALWAYS)
6729 encode_branch (BFD_RELOC_ARM_PCREL_CALL);
6730 else
6731 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
6732 }
6733 else
6734 #endif
6735 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
6736 }
6737
6738 /* ARM V5 branch-link-exchange instruction (argument parse)
6739 BLX <target_addr> ie BLX(1)
6740 BLX{<condition>} <Rm> ie BLX(2)
6741 Unfortunately, there are two different opcodes for this mnemonic.
6742 So, the insns[].value is not used, and the code here zaps values
6743 into inst.instruction.
6744 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
6745
6746 static void
6747 do_blx (void)
6748 {
6749 if (inst.operands[0].isreg)
6750 {
6751 /* Arg is a register; the opcode provided by insns[] is correct.
6752 It is not illegal to do "blx pc", just useless. */
6753 if (inst.operands[0].reg == REG_PC)
6754 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
6755
6756 inst.instruction |= inst.operands[0].reg;
6757 }
6758 else
6759 {
6760 /* Arg is an address; this instruction cannot be executed
6761 conditionally, and the opcode must be adjusted. */
6762 constraint (inst.cond != COND_ALWAYS, BAD_COND);
6763 inst.instruction = 0xfa000000;
6764 #ifdef OBJ_ELF
6765 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
6766 encode_branch (BFD_RELOC_ARM_PCREL_CALL);
6767 else
6768 #endif
6769 encode_branch (BFD_RELOC_ARM_PCREL_BLX);
6770 }
6771 }
6772
6773 static void
6774 do_bx (void)
6775 {
6776 if (inst.operands[0].reg == REG_PC)
6777 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
6778
6779 inst.instruction |= inst.operands[0].reg;
6780 }
6781
6782
6783 /* ARM v5TEJ. Jump to Jazelle code. */
6784
6785 static void
6786 do_bxj (void)
6787 {
6788 if (inst.operands[0].reg == REG_PC)
6789 as_tsktsk (_("use of r15 in bxj is not really useful"));
6790
6791 inst.instruction |= inst.operands[0].reg;
6792 }
6793
6794 /* Co-processor data operation:
6795 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
6796 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
6797 static void
6798 do_cdp (void)
6799 {
6800 inst.instruction |= inst.operands[0].reg << 8;
6801 inst.instruction |= inst.operands[1].imm << 20;
6802 inst.instruction |= inst.operands[2].reg << 12;
6803 inst.instruction |= inst.operands[3].reg << 16;
6804 inst.instruction |= inst.operands[4].reg;
6805 inst.instruction |= inst.operands[5].imm << 5;
6806 }
6807
6808 static void
6809 do_cmp (void)
6810 {
6811 inst.instruction |= inst.operands[0].reg << 16;
6812 encode_arm_shifter_operand (1);
6813 }
6814
6815 /* Transfer between coprocessor and ARM registers.
6816 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
6817 MRC2
6818 MCR{cond}
6819 MCR2
6820
6821 No special properties. */
6822
6823 static void
6824 do_co_reg (void)
6825 {
6826 inst.instruction |= inst.operands[0].reg << 8;
6827 inst.instruction |= inst.operands[1].imm << 21;
6828 inst.instruction |= inst.operands[2].reg << 12;
6829 inst.instruction |= inst.operands[3].reg << 16;
6830 inst.instruction |= inst.operands[4].reg;
6831 inst.instruction |= inst.operands[5].imm << 5;
6832 }
6833
6834 /* Transfer between coprocessor register and pair of ARM registers.
6835 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
6836 MCRR2
6837 MRRC{cond}
6838 MRRC2
6839
6840 Two XScale instructions are special cases of these:
6841
6842 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
6843 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
6844
6845 Result unpredicatable if Rd or Rn is R15. */
6846
6847 static void
6848 do_co_reg2c (void)
6849 {
6850 inst.instruction |= inst.operands[0].reg << 8;
6851 inst.instruction |= inst.operands[1].imm << 4;
6852 inst.instruction |= inst.operands[2].reg << 12;
6853 inst.instruction |= inst.operands[3].reg << 16;
6854 inst.instruction |= inst.operands[4].reg;
6855 }
6856
6857 static void
6858 do_cpsi (void)
6859 {
6860 inst.instruction |= inst.operands[0].imm << 6;
6861 if (inst.operands[1].present)
6862 {
6863 inst.instruction |= CPSI_MMOD;
6864 inst.instruction |= inst.operands[1].imm;
6865 }
6866 }
6867
6868 static void
6869 do_dbg (void)
6870 {
6871 inst.instruction |= inst.operands[0].imm;
6872 }
6873
6874 static void
6875 do_it (void)
6876 {
6877 /* There is no IT instruction in ARM mode. We
6878 process it but do not generate code for it. */
6879 inst.size = 0;
6880 }
6881
6882 static void
6883 do_ldmstm (void)
6884 {
6885 int base_reg = inst.operands[0].reg;
6886 int range = inst.operands[1].imm;
6887
6888 inst.instruction |= base_reg << 16;
6889 inst.instruction |= range;
6890
6891 if (inst.operands[1].writeback)
6892 inst.instruction |= LDM_TYPE_2_OR_3;
6893
6894 if (inst.operands[0].writeback)
6895 {
6896 inst.instruction |= WRITE_BACK;
6897 /* Check for unpredictable uses of writeback. */
6898 if (inst.instruction & LOAD_BIT)
6899 {
6900 /* Not allowed in LDM type 2. */
6901 if ((inst.instruction & LDM_TYPE_2_OR_3)
6902 && ((range & (1 << REG_PC)) == 0))
6903 as_warn (_("writeback of base register is UNPREDICTABLE"));
6904 /* Only allowed if base reg not in list for other types. */
6905 else if (range & (1 << base_reg))
6906 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
6907 }
6908 else /* STM. */
6909 {
6910 /* Not allowed for type 2. */
6911 if (inst.instruction & LDM_TYPE_2_OR_3)
6912 as_warn (_("writeback of base register is UNPREDICTABLE"));
6913 /* Only allowed if base reg not in list, or first in list. */
6914 else if ((range & (1 << base_reg))
6915 && (range & ((1 << base_reg) - 1)))
6916 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
6917 }
6918 }
6919 }
6920
6921 /* ARMv5TE load-consecutive (argument parse)
6922 Mode is like LDRH.
6923
6924 LDRccD R, mode
6925 STRccD R, mode. */
6926
6927 static void
6928 do_ldrd (void)
6929 {
6930 constraint (inst.operands[0].reg % 2 != 0,
6931 _("first destination register must be even"));
6932 constraint (inst.operands[1].present
6933 && inst.operands[1].reg != inst.operands[0].reg + 1,
6934 _("can only load two consecutive registers"));
6935 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
6936 constraint (!inst.operands[2].isreg, _("'[' expected"));
6937
6938 if (!inst.operands[1].present)
6939 inst.operands[1].reg = inst.operands[0].reg + 1;
6940
6941 if (inst.instruction & LOAD_BIT)
6942 {
6943 /* encode_arm_addr_mode_3 will diagnose overlap between the base
6944 register and the first register written; we have to diagnose
6945 overlap between the base and the second register written here. */
6946
6947 if (inst.operands[2].reg == inst.operands[1].reg
6948 && (inst.operands[2].writeback || inst.operands[2].postind))
6949 as_warn (_("base register written back, and overlaps "
6950 "second destination register"));
6951
6952 /* For an index-register load, the index register must not overlap the
6953 destination (even if not write-back). */
6954 else if (inst.operands[2].immisreg
6955 && ((unsigned) inst.operands[2].imm == inst.operands[0].reg
6956 || (unsigned) inst.operands[2].imm == inst.operands[1].reg))
6957 as_warn (_("index register overlaps destination register"));
6958 }
6959
6960 inst.instruction |= inst.operands[0].reg << 12;
6961 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE);
6962 }
6963
6964 static void
6965 do_ldrex (void)
6966 {
6967 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
6968 || inst.operands[1].postind || inst.operands[1].writeback
6969 || inst.operands[1].immisreg || inst.operands[1].shifted
6970 || inst.operands[1].negative
6971 /* This can arise if the programmer has written
6972 strex rN, rM, foo
6973 or if they have mistakenly used a register name as the last
6974 operand, eg:
6975 strex rN, rM, rX
6976 It is very difficult to distinguish between these two cases
6977 because "rX" might actually be a label. ie the register
6978 name has been occluded by a symbol of the same name. So we
6979 just generate a general 'bad addressing mode' type error
6980 message and leave it up to the programmer to discover the
6981 true cause and fix their mistake. */
6982 || (inst.operands[1].reg == REG_PC),
6983 BAD_ADDR_MODE);
6984
6985 constraint (inst.reloc.exp.X_op != O_constant
6986 || inst.reloc.exp.X_add_number != 0,
6987 _("offset must be zero in ARM encoding"));
6988
6989 inst.instruction |= inst.operands[0].reg << 12;
6990 inst.instruction |= inst.operands[1].reg << 16;
6991 inst.reloc.type = BFD_RELOC_UNUSED;
6992 }
6993
6994 static void
6995 do_ldrexd (void)
6996 {
6997 constraint (inst.operands[0].reg % 2 != 0,
6998 _("even register required"));
6999 constraint (inst.operands[1].present
7000 && inst.operands[1].reg != inst.operands[0].reg + 1,
7001 _("can only load two consecutive registers"));
7002 /* If op 1 were present and equal to PC, this function wouldn't
7003 have been called in the first place. */
7004 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
7005
7006 inst.instruction |= inst.operands[0].reg << 12;
7007 inst.instruction |= inst.operands[2].reg << 16;
7008 }
7009
7010 static void
7011 do_ldst (void)
7012 {
7013 inst.instruction |= inst.operands[0].reg << 12;
7014 if (!inst.operands[1].isreg)
7015 if (move_or_literal_pool (0, /*thumb_p=*/FALSE, /*mode_3=*/FALSE))
7016 return;
7017 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE);
7018 }
7019
7020 static void
7021 do_ldstt (void)
7022 {
7023 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
7024 reject [Rn,...]. */
7025 if (inst.operands[1].preind)
7026 {
7027 constraint (inst.reloc.exp.X_op != O_constant ||
7028 inst.reloc.exp.X_add_number != 0,
7029 _("this instruction requires a post-indexed address"));
7030
7031 inst.operands[1].preind = 0;
7032 inst.operands[1].postind = 1;
7033 inst.operands[1].writeback = 1;
7034 }
7035 inst.instruction |= inst.operands[0].reg << 12;
7036 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE);
7037 }
7038
7039 /* Halfword and signed-byte load/store operations. */
7040
7041 static void
7042 do_ldstv4 (void)
7043 {
7044 inst.instruction |= inst.operands[0].reg << 12;
7045 if (!inst.operands[1].isreg)
7046 if (move_or_literal_pool (0, /*thumb_p=*/FALSE, /*mode_3=*/TRUE))
7047 return;
7048 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE);
7049 }
7050
7051 static void
7052 do_ldsttv4 (void)
7053 {
7054 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
7055 reject [Rn,...]. */
7056 if (inst.operands[1].preind)
7057 {
7058 constraint (inst.reloc.exp.X_op != O_constant ||
7059 inst.reloc.exp.X_add_number != 0,
7060 _("this instruction requires a post-indexed address"));
7061
7062 inst.operands[1].preind = 0;
7063 inst.operands[1].postind = 1;
7064 inst.operands[1].writeback = 1;
7065 }
7066 inst.instruction |= inst.operands[0].reg << 12;
7067 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE);
7068 }
7069
7070 /* Co-processor register load/store.
7071 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
7072 static void
7073 do_lstc (void)
7074 {
7075 inst.instruction |= inst.operands[0].reg << 8;
7076 inst.instruction |= inst.operands[1].reg << 12;
7077 encode_arm_cp_address (2, TRUE, TRUE, 0);
7078 }
7079
7080 static void
7081 do_mlas (void)
7082 {
7083 /* This restriction does not apply to mls (nor to mla in v6 or later). */
7084 if (inst.operands[0].reg == inst.operands[1].reg
7085 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6)
7086 && !(inst.instruction & 0x00400000))
7087 as_tsktsk (_("Rd and Rm should be different in mla"));
7088
7089 inst.instruction |= inst.operands[0].reg << 16;
7090 inst.instruction |= inst.operands[1].reg;
7091 inst.instruction |= inst.operands[2].reg << 8;
7092 inst.instruction |= inst.operands[3].reg << 12;
7093 }
7094
7095 static void
7096 do_mov (void)
7097 {
7098 inst.instruction |= inst.operands[0].reg << 12;
7099 encode_arm_shifter_operand (1);
7100 }
7101
7102 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
7103 static void
7104 do_mov16 (void)
7105 {
7106 bfd_vma imm;
7107 bfd_boolean top;
7108
7109 top = (inst.instruction & 0x00400000) != 0;
7110 constraint (top && inst.reloc.type == BFD_RELOC_ARM_MOVW,
7111 _(":lower16: not allowed this instruction"));
7112 constraint (!top && inst.reloc.type == BFD_RELOC_ARM_MOVT,
7113 _(":upper16: not allowed instruction"));
7114 inst.instruction |= inst.operands[0].reg << 12;
7115 if (inst.reloc.type == BFD_RELOC_UNUSED)
7116 {
7117 imm = inst.reloc.exp.X_add_number;
7118 /* The value is in two pieces: 0:11, 16:19. */
7119 inst.instruction |= (imm & 0x00000fff);
7120 inst.instruction |= (imm & 0x0000f000) << 4;
7121 }
7122 }
7123
7124 static void do_vfp_nsyn_opcode (const char *);
7125
7126 static int
7127 do_vfp_nsyn_mrs (void)
7128 {
7129 if (inst.operands[0].isvec)
7130 {
7131 if (inst.operands[1].reg != 1)
7132 first_error (_("operand 1 must be FPSCR"));
7133 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
7134 memset (&inst.operands[1], '\0', sizeof (inst.operands[1]));
7135 do_vfp_nsyn_opcode ("fmstat");
7136 }
7137 else if (inst.operands[1].isvec)
7138 do_vfp_nsyn_opcode ("fmrx");
7139 else
7140 return FAIL;
7141
7142 return SUCCESS;
7143 }
7144
7145 static int
7146 do_vfp_nsyn_msr (void)
7147 {
7148 if (inst.operands[0].isvec)
7149 do_vfp_nsyn_opcode ("fmxr");
7150 else
7151 return FAIL;
7152
7153 return SUCCESS;
7154 }
7155
7156 static void
7157 do_mrs (void)
7158 {
7159 if (do_vfp_nsyn_mrs () == SUCCESS)
7160 return;
7161
7162 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
7163 constraint ((inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f))
7164 != (PSR_c|PSR_f),
7165 _("'CPSR' or 'SPSR' expected"));
7166 inst.instruction |= inst.operands[0].reg << 12;
7167 inst.instruction |= (inst.operands[1].imm & SPSR_BIT);
7168 }
7169
7170 /* Two possible forms:
7171 "{C|S}PSR_<field>, Rm",
7172 "{C|S}PSR_f, #expression". */
7173
7174 static void
7175 do_msr (void)
7176 {
7177 if (do_vfp_nsyn_msr () == SUCCESS)
7178 return;
7179
7180 inst.instruction |= inst.operands[0].imm;
7181 if (inst.operands[1].isreg)
7182 inst.instruction |= inst.operands[1].reg;
7183 else
7184 {
7185 inst.instruction |= INST_IMMEDIATE;
7186 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
7187 inst.reloc.pc_rel = 0;
7188 }
7189 }
7190
7191 static void
7192 do_mul (void)
7193 {
7194 if (!inst.operands[2].present)
7195 inst.operands[2].reg = inst.operands[0].reg;
7196 inst.instruction |= inst.operands[0].reg << 16;
7197 inst.instruction |= inst.operands[1].reg;
7198 inst.instruction |= inst.operands[2].reg << 8;
7199
7200 if (inst.operands[0].reg == inst.operands[1].reg
7201 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
7202 as_tsktsk (_("Rd and Rm should be different in mul"));
7203 }
7204
7205 /* Long Multiply Parser
7206 UMULL RdLo, RdHi, Rm, Rs
7207 SMULL RdLo, RdHi, Rm, Rs
7208 UMLAL RdLo, RdHi, Rm, Rs
7209 SMLAL RdLo, RdHi, Rm, Rs. */
7210
7211 static void
7212 do_mull (void)
7213 {
7214 inst.instruction |= inst.operands[0].reg << 12;
7215 inst.instruction |= inst.operands[1].reg << 16;
7216 inst.instruction |= inst.operands[2].reg;
7217 inst.instruction |= inst.operands[3].reg << 8;
7218
7219 /* rdhi, rdlo and rm must all be different. */
7220 if (inst.operands[0].reg == inst.operands[1].reg
7221 || inst.operands[0].reg == inst.operands[2].reg
7222 || inst.operands[1].reg == inst.operands[2].reg)
7223 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
7224 }
7225
7226 static void
7227 do_nop (void)
7228 {
7229 if (inst.operands[0].present)
7230 {
7231 /* Architectural NOP hints are CPSR sets with no bits selected. */
7232 inst.instruction &= 0xf0000000;
7233 inst.instruction |= 0x0320f000 + inst.operands[0].imm;
7234 }
7235 }
7236
7237 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
7238 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
7239 Condition defaults to COND_ALWAYS.
7240 Error if Rd, Rn or Rm are R15. */
7241
7242 static void
7243 do_pkhbt (void)
7244 {
7245 inst.instruction |= inst.operands[0].reg << 12;
7246 inst.instruction |= inst.operands[1].reg << 16;
7247 inst.instruction |= inst.operands[2].reg;
7248 if (inst.operands[3].present)
7249 encode_arm_shift (3);
7250 }
7251
7252 /* ARM V6 PKHTB (Argument Parse). */
7253
7254 static void
7255 do_pkhtb (void)
7256 {
7257 if (!inst.operands[3].present)
7258 {
7259 /* If the shift specifier is omitted, turn the instruction
7260 into pkhbt rd, rm, rn. */
7261 inst.instruction &= 0xfff00010;
7262 inst.instruction |= inst.operands[0].reg << 12;
7263 inst.instruction |= inst.operands[1].reg;
7264 inst.instruction |= inst.operands[2].reg << 16;
7265 }
7266 else
7267 {
7268 inst.instruction |= inst.operands[0].reg << 12;
7269 inst.instruction |= inst.operands[1].reg << 16;
7270 inst.instruction |= inst.operands[2].reg;
7271 encode_arm_shift (3);
7272 }
7273 }
7274
7275 /* ARMv5TE: Preload-Cache
7276
7277 PLD <addr_mode>
7278
7279 Syntactically, like LDR with B=1, W=0, L=1. */
7280
7281 static void
7282 do_pld (void)
7283 {
7284 constraint (!inst.operands[0].isreg,
7285 _("'[' expected after PLD mnemonic"));
7286 constraint (inst.operands[0].postind,
7287 _("post-indexed expression used in preload instruction"));
7288 constraint (inst.operands[0].writeback,
7289 _("writeback used in preload instruction"));
7290 constraint (!inst.operands[0].preind,
7291 _("unindexed addressing used in preload instruction"));
7292 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
7293 }
7294
7295 /* ARMv7: PLI <addr_mode> */
7296 static void
7297 do_pli (void)
7298 {
7299 constraint (!inst.operands[0].isreg,
7300 _("'[' expected after PLI mnemonic"));
7301 constraint (inst.operands[0].postind,
7302 _("post-indexed expression used in preload instruction"));
7303 constraint (inst.operands[0].writeback,
7304 _("writeback used in preload instruction"));
7305 constraint (!inst.operands[0].preind,
7306 _("unindexed addressing used in preload instruction"));
7307 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
7308 inst.instruction &= ~PRE_INDEX;
7309 }
7310
7311 static void
7312 do_push_pop (void)
7313 {
7314 inst.operands[1] = inst.operands[0];
7315 memset (&inst.operands[0], 0, sizeof inst.operands[0]);
7316 inst.operands[0].isreg = 1;
7317 inst.operands[0].writeback = 1;
7318 inst.operands[0].reg = REG_SP;
7319 do_ldmstm ();
7320 }
7321
7322 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
7323 word at the specified address and the following word
7324 respectively.
7325 Unconditionally executed.
7326 Error if Rn is R15. */
7327
7328 static void
7329 do_rfe (void)
7330 {
7331 inst.instruction |= inst.operands[0].reg << 16;
7332 if (inst.operands[0].writeback)
7333 inst.instruction |= WRITE_BACK;
7334 }
7335
7336 /* ARM V6 ssat (argument parse). */
7337
7338 static void
7339 do_ssat (void)
7340 {
7341 inst.instruction |= inst.operands[0].reg << 12;
7342 inst.instruction |= (inst.operands[1].imm - 1) << 16;
7343 inst.instruction |= inst.operands[2].reg;
7344
7345 if (inst.operands[3].present)
7346 encode_arm_shift (3);
7347 }
7348
7349 /* ARM V6 usat (argument parse). */
7350
7351 static void
7352 do_usat (void)
7353 {
7354 inst.instruction |= inst.operands[0].reg << 12;
7355 inst.instruction |= inst.operands[1].imm << 16;
7356 inst.instruction |= inst.operands[2].reg;
7357
7358 if (inst.operands[3].present)
7359 encode_arm_shift (3);
7360 }
7361
7362 /* ARM V6 ssat16 (argument parse). */
7363
7364 static void
7365 do_ssat16 (void)
7366 {
7367 inst.instruction |= inst.operands[0].reg << 12;
7368 inst.instruction |= ((inst.operands[1].imm - 1) << 16);
7369 inst.instruction |= inst.operands[2].reg;
7370 }
7371
7372 static void
7373 do_usat16 (void)
7374 {
7375 inst.instruction |= inst.operands[0].reg << 12;
7376 inst.instruction |= inst.operands[1].imm << 16;
7377 inst.instruction |= inst.operands[2].reg;
7378 }
7379
7380 /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
7381 preserving the other bits.
7382
7383 setend <endian_specifier>, where <endian_specifier> is either
7384 BE or LE. */
7385
7386 static void
7387 do_setend (void)
7388 {
7389 if (inst.operands[0].imm)
7390 inst.instruction |= 0x200;
7391 }
7392
7393 static void
7394 do_shift (void)
7395 {
7396 unsigned int Rm = (inst.operands[1].present
7397 ? inst.operands[1].reg
7398 : inst.operands[0].reg);
7399
7400 inst.instruction |= inst.operands[0].reg << 12;
7401 inst.instruction |= Rm;
7402 if (inst.operands[2].isreg) /* Rd, {Rm,} Rs */
7403 {
7404 inst.instruction |= inst.operands[2].reg << 8;
7405 inst.instruction |= SHIFT_BY_REG;
7406 }
7407 else
7408 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
7409 }
7410
7411 static void
7412 do_smc (void)
7413 {
7414 inst.reloc.type = BFD_RELOC_ARM_SMC;
7415 inst.reloc.pc_rel = 0;
7416 }
7417
7418 static void
7419 do_swi (void)
7420 {
7421 inst.reloc.type = BFD_RELOC_ARM_SWI;
7422 inst.reloc.pc_rel = 0;
7423 }
7424
7425 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
7426 SMLAxy{cond} Rd,Rm,Rs,Rn
7427 SMLAWy{cond} Rd,Rm,Rs,Rn
7428 Error if any register is R15. */
7429
7430 static void
7431 do_smla (void)
7432 {
7433 inst.instruction |= inst.operands[0].reg << 16;
7434 inst.instruction |= inst.operands[1].reg;
7435 inst.instruction |= inst.operands[2].reg << 8;
7436 inst.instruction |= inst.operands[3].reg << 12;
7437 }
7438
7439 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
7440 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
7441 Error if any register is R15.
7442 Warning if Rdlo == Rdhi. */
7443
7444 static void
7445 do_smlal (void)
7446 {
7447 inst.instruction |= inst.operands[0].reg << 12;
7448 inst.instruction |= inst.operands[1].reg << 16;
7449 inst.instruction |= inst.operands[2].reg;
7450 inst.instruction |= inst.operands[3].reg << 8;
7451
7452 if (inst.operands[0].reg == inst.operands[1].reg)
7453 as_tsktsk (_("rdhi and rdlo must be different"));
7454 }
7455
7456 /* ARM V5E (El Segundo) signed-multiply (argument parse)
7457 SMULxy{cond} Rd,Rm,Rs
7458 Error if any register is R15. */
7459
7460 static void
7461 do_smul (void)
7462 {
7463 inst.instruction |= inst.operands[0].reg << 16;
7464 inst.instruction |= inst.operands[1].reg;
7465 inst.instruction |= inst.operands[2].reg << 8;
7466 }
7467
7468 /* ARM V6 srs (argument parse). The variable fields in the encoding are
7469 the same for both ARM and Thumb-2. */
7470
7471 static void
7472 do_srs (void)
7473 {
7474 int reg;
7475
7476 if (inst.operands[0].present)
7477 {
7478 reg = inst.operands[0].reg;
7479 constraint (reg != 13, _("SRS base register must be r13"));
7480 }
7481 else
7482 reg = 13;
7483
7484 inst.instruction |= reg << 16;
7485 inst.instruction |= inst.operands[1].imm;
7486 if (inst.operands[0].writeback || inst.operands[1].writeback)
7487 inst.instruction |= WRITE_BACK;
7488 }
7489
7490 /* ARM V6 strex (argument parse). */
7491
7492 static void
7493 do_strex (void)
7494 {
7495 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
7496 || inst.operands[2].postind || inst.operands[2].writeback
7497 || inst.operands[2].immisreg || inst.operands[2].shifted
7498 || inst.operands[2].negative
7499 /* See comment in do_ldrex(). */
7500 || (inst.operands[2].reg == REG_PC),
7501 BAD_ADDR_MODE);
7502
7503 constraint (inst.operands[0].reg == inst.operands[1].reg
7504 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
7505
7506 constraint (inst.reloc.exp.X_op != O_constant
7507 || inst.reloc.exp.X_add_number != 0,
7508 _("offset must be zero in ARM encoding"));
7509
7510 inst.instruction |= inst.operands[0].reg << 12;
7511 inst.instruction |= inst.operands[1].reg;
7512 inst.instruction |= inst.operands[2].reg << 16;
7513 inst.reloc.type = BFD_RELOC_UNUSED;
7514 }
7515
7516 static void
7517 do_strexd (void)
7518 {
7519 constraint (inst.operands[1].reg % 2 != 0,
7520 _("even register required"));
7521 constraint (inst.operands[2].present
7522 && inst.operands[2].reg != inst.operands[1].reg + 1,
7523 _("can only store two consecutive registers"));
7524 /* If op 2 were present and equal to PC, this function wouldn't
7525 have been called in the first place. */
7526 constraint (inst.operands[1].reg == REG_LR, _("r14 not allowed here"));
7527
7528 constraint (inst.operands[0].reg == inst.operands[1].reg
7529 || inst.operands[0].reg == inst.operands[1].reg + 1
7530 || inst.operands[0].reg == inst.operands[3].reg,
7531 BAD_OVERLAP);
7532
7533 inst.instruction |= inst.operands[0].reg << 12;
7534 inst.instruction |= inst.operands[1].reg;
7535 inst.instruction |= inst.operands[3].reg << 16;
7536 }
7537
7538 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
7539 extends it to 32-bits, and adds the result to a value in another
7540 register. You can specify a rotation by 0, 8, 16, or 24 bits
7541 before extracting the 16-bit value.
7542 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
7543 Condition defaults to COND_ALWAYS.
7544 Error if any register uses R15. */
7545
7546 static void
7547 do_sxtah (void)
7548 {
7549 inst.instruction |= inst.operands[0].reg << 12;
7550 inst.instruction |= inst.operands[1].reg << 16;
7551 inst.instruction |= inst.operands[2].reg;
7552 inst.instruction |= inst.operands[3].imm << 10;
7553 }
7554
7555 /* ARM V6 SXTH.
7556
7557 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
7558 Condition defaults to COND_ALWAYS.
7559 Error if any register uses R15. */
7560
7561 static void
7562 do_sxth (void)
7563 {
7564 inst.instruction |= inst.operands[0].reg << 12;
7565 inst.instruction |= inst.operands[1].reg;
7566 inst.instruction |= inst.operands[2].imm << 10;
7567 }
7568 \f
7569 /* VFP instructions. In a logical order: SP variant first, monad
7570 before dyad, arithmetic then move then load/store. */
7571
7572 static void
7573 do_vfp_sp_monadic (void)
7574 {
7575 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7576 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
7577 }
7578
7579 static void
7580 do_vfp_sp_dyadic (void)
7581 {
7582 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7583 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
7584 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
7585 }
7586
7587 static void
7588 do_vfp_sp_compare_z (void)
7589 {
7590 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7591 }
7592
7593 static void
7594 do_vfp_dp_sp_cvt (void)
7595 {
7596 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7597 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
7598 }
7599
7600 static void
7601 do_vfp_sp_dp_cvt (void)
7602 {
7603 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7604 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
7605 }
7606
7607 static void
7608 do_vfp_reg_from_sp (void)
7609 {
7610 inst.instruction |= inst.operands[0].reg << 12;
7611 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
7612 }
7613
7614 static void
7615 do_vfp_reg2_from_sp2 (void)
7616 {
7617 constraint (inst.operands[2].imm != 2,
7618 _("only two consecutive VFP SP registers allowed here"));
7619 inst.instruction |= inst.operands[0].reg << 12;
7620 inst.instruction |= inst.operands[1].reg << 16;
7621 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
7622 }
7623
7624 static void
7625 do_vfp_sp_from_reg (void)
7626 {
7627 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sn);
7628 inst.instruction |= inst.operands[1].reg << 12;
7629 }
7630
7631 static void
7632 do_vfp_sp2_from_reg2 (void)
7633 {
7634 constraint (inst.operands[0].imm != 2,
7635 _("only two consecutive VFP SP registers allowed here"));
7636 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sm);
7637 inst.instruction |= inst.operands[1].reg << 12;
7638 inst.instruction |= inst.operands[2].reg << 16;
7639 }
7640
7641 static void
7642 do_vfp_sp_ldst (void)
7643 {
7644 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7645 encode_arm_cp_address (1, FALSE, TRUE, 0);
7646 }
7647
7648 static void
7649 do_vfp_dp_ldst (void)
7650 {
7651 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7652 encode_arm_cp_address (1, FALSE, TRUE, 0);
7653 }
7654
7655
7656 static void
7657 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type)
7658 {
7659 if (inst.operands[0].writeback)
7660 inst.instruction |= WRITE_BACK;
7661 else
7662 constraint (ldstm_type != VFP_LDSTMIA,
7663 _("this addressing mode requires base-register writeback"));
7664 inst.instruction |= inst.operands[0].reg << 16;
7665 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sd);
7666 inst.instruction |= inst.operands[1].imm;
7667 }
7668
7669 static void
7670 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type)
7671 {
7672 int count;
7673
7674 if (inst.operands[0].writeback)
7675 inst.instruction |= WRITE_BACK;
7676 else
7677 constraint (ldstm_type != VFP_LDSTMIA && ldstm_type != VFP_LDSTMIAX,
7678 _("this addressing mode requires base-register writeback"));
7679
7680 inst.instruction |= inst.operands[0].reg << 16;
7681 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
7682
7683 count = inst.operands[1].imm << 1;
7684 if (ldstm_type == VFP_LDSTMIAX || ldstm_type == VFP_LDSTMDBX)
7685 count += 1;
7686
7687 inst.instruction |= count;
7688 }
7689
7690 static void
7691 do_vfp_sp_ldstmia (void)
7692 {
7693 vfp_sp_ldstm (VFP_LDSTMIA);
7694 }
7695
7696 static void
7697 do_vfp_sp_ldstmdb (void)
7698 {
7699 vfp_sp_ldstm (VFP_LDSTMDB);
7700 }
7701
7702 static void
7703 do_vfp_dp_ldstmia (void)
7704 {
7705 vfp_dp_ldstm (VFP_LDSTMIA);
7706 }
7707
7708 static void
7709 do_vfp_dp_ldstmdb (void)
7710 {
7711 vfp_dp_ldstm (VFP_LDSTMDB);
7712 }
7713
7714 static void
7715 do_vfp_xp_ldstmia (void)
7716 {
7717 vfp_dp_ldstm (VFP_LDSTMIAX);
7718 }
7719
7720 static void
7721 do_vfp_xp_ldstmdb (void)
7722 {
7723 vfp_dp_ldstm (VFP_LDSTMDBX);
7724 }
7725
7726 static void
7727 do_vfp_dp_rd_rm (void)
7728 {
7729 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7730 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
7731 }
7732
7733 static void
7734 do_vfp_dp_rn_rd (void)
7735 {
7736 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dn);
7737 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
7738 }
7739
7740 static void
7741 do_vfp_dp_rd_rn (void)
7742 {
7743 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7744 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
7745 }
7746
7747 static void
7748 do_vfp_dp_rd_rn_rm (void)
7749 {
7750 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7751 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
7752 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dm);
7753 }
7754
7755 static void
7756 do_vfp_dp_rd (void)
7757 {
7758 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7759 }
7760
7761 static void
7762 do_vfp_dp_rm_rd_rn (void)
7763 {
7764 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dm);
7765 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
7766 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dn);
7767 }
7768
7769 /* VFPv3 instructions. */
7770 static void
7771 do_vfp_sp_const (void)
7772 {
7773 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7774 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
7775 inst.instruction |= (inst.operands[1].imm & 0x0f);
7776 }
7777
7778 static void
7779 do_vfp_dp_const (void)
7780 {
7781 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7782 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
7783 inst.instruction |= (inst.operands[1].imm & 0x0f);
7784 }
7785
7786 static void
7787 vfp_conv (int srcsize)
7788 {
7789 unsigned immbits = srcsize - inst.operands[1].imm;
7790 inst.instruction |= (immbits & 1) << 5;
7791 inst.instruction |= (immbits >> 1);
7792 }
7793
7794 static void
7795 do_vfp_sp_conv_16 (void)
7796 {
7797 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7798 vfp_conv (16);
7799 }
7800
7801 static void
7802 do_vfp_dp_conv_16 (void)
7803 {
7804 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7805 vfp_conv (16);
7806 }
7807
7808 static void
7809 do_vfp_sp_conv_32 (void)
7810 {
7811 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7812 vfp_conv (32);
7813 }
7814
7815 static void
7816 do_vfp_dp_conv_32 (void)
7817 {
7818 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7819 vfp_conv (32);
7820 }
7821
7822 \f
7823 /* FPA instructions. Also in a logical order. */
7824
7825 static void
7826 do_fpa_cmp (void)
7827 {
7828 inst.instruction |= inst.operands[0].reg << 16;
7829 inst.instruction |= inst.operands[1].reg;
7830 }
7831
7832 static void
7833 do_fpa_ldmstm (void)
7834 {
7835 inst.instruction |= inst.operands[0].reg << 12;
7836 switch (inst.operands[1].imm)
7837 {
7838 case 1: inst.instruction |= CP_T_X; break;
7839 case 2: inst.instruction |= CP_T_Y; break;
7840 case 3: inst.instruction |= CP_T_Y | CP_T_X; break;
7841 case 4: break;
7842 default: abort ();
7843 }
7844
7845 if (inst.instruction & (PRE_INDEX | INDEX_UP))
7846 {
7847 /* The instruction specified "ea" or "fd", so we can only accept
7848 [Rn]{!}. The instruction does not really support stacking or
7849 unstacking, so we have to emulate these by setting appropriate
7850 bits and offsets. */
7851 constraint (inst.reloc.exp.X_op != O_constant
7852 || inst.reloc.exp.X_add_number != 0,
7853 _("this instruction does not support indexing"));
7854
7855 if ((inst.instruction & PRE_INDEX) || inst.operands[2].writeback)
7856 inst.reloc.exp.X_add_number = 12 * inst.operands[1].imm;
7857
7858 if (!(inst.instruction & INDEX_UP))
7859 inst.reloc.exp.X_add_number = -inst.reloc.exp.X_add_number;
7860
7861 if (!(inst.instruction & PRE_INDEX) && inst.operands[2].writeback)
7862 {
7863 inst.operands[2].preind = 0;
7864 inst.operands[2].postind = 1;
7865 }
7866 }
7867
7868 encode_arm_cp_address (2, TRUE, TRUE, 0);
7869 }
7870
7871 \f
7872 /* iWMMXt instructions: strictly in alphabetical order. */
7873
7874 static void
7875 do_iwmmxt_tandorc (void)
7876 {
7877 constraint (inst.operands[0].reg != REG_PC, _("only r15 allowed here"));
7878 }
7879
7880 static void
7881 do_iwmmxt_textrc (void)
7882 {
7883 inst.instruction |= inst.operands[0].reg << 12;
7884 inst.instruction |= inst.operands[1].imm;
7885 }
7886
7887 static void
7888 do_iwmmxt_textrm (void)
7889 {
7890 inst.instruction |= inst.operands[0].reg << 12;
7891 inst.instruction |= inst.operands[1].reg << 16;
7892 inst.instruction |= inst.operands[2].imm;
7893 }
7894
7895 static void
7896 do_iwmmxt_tinsr (void)
7897 {
7898 inst.instruction |= inst.operands[0].reg << 16;
7899 inst.instruction |= inst.operands[1].reg << 12;
7900 inst.instruction |= inst.operands[2].imm;
7901 }
7902
7903 static void
7904 do_iwmmxt_tmia (void)
7905 {
7906 inst.instruction |= inst.operands[0].reg << 5;
7907 inst.instruction |= inst.operands[1].reg;
7908 inst.instruction |= inst.operands[2].reg << 12;
7909 }
7910
7911 static void
7912 do_iwmmxt_waligni (void)
7913 {
7914 inst.instruction |= inst.operands[0].reg << 12;
7915 inst.instruction |= inst.operands[1].reg << 16;
7916 inst.instruction |= inst.operands[2].reg;
7917 inst.instruction |= inst.operands[3].imm << 20;
7918 }
7919
7920 static void
7921 do_iwmmxt_wmerge (void)
7922 {
7923 inst.instruction |= inst.operands[0].reg << 12;
7924 inst.instruction |= inst.operands[1].reg << 16;
7925 inst.instruction |= inst.operands[2].reg;
7926 inst.instruction |= inst.operands[3].imm << 21;
7927 }
7928
7929 static void
7930 do_iwmmxt_wmov (void)
7931 {
7932 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
7933 inst.instruction |= inst.operands[0].reg << 12;
7934 inst.instruction |= inst.operands[1].reg << 16;
7935 inst.instruction |= inst.operands[1].reg;
7936 }
7937
7938 static void
7939 do_iwmmxt_wldstbh (void)
7940 {
7941 int reloc;
7942 inst.instruction |= inst.operands[0].reg << 12;
7943 if (thumb_mode)
7944 reloc = BFD_RELOC_ARM_T32_CP_OFF_IMM_S2;
7945 else
7946 reloc = BFD_RELOC_ARM_CP_OFF_IMM_S2;
7947 encode_arm_cp_address (1, TRUE, FALSE, reloc);
7948 }
7949
7950 static void
7951 do_iwmmxt_wldstw (void)
7952 {
7953 /* RIWR_RIWC clears .isreg for a control register. */
7954 if (!inst.operands[0].isreg)
7955 {
7956 constraint (inst.cond != COND_ALWAYS, BAD_COND);
7957 inst.instruction |= 0xf0000000;
7958 }
7959
7960 inst.instruction |= inst.operands[0].reg << 12;
7961 encode_arm_cp_address (1, TRUE, TRUE, 0);
7962 }
7963
7964 static void
7965 do_iwmmxt_wldstd (void)
7966 {
7967 inst.instruction |= inst.operands[0].reg << 12;
7968 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2)
7969 && inst.operands[1].immisreg)
7970 {
7971 inst.instruction &= ~0x1a000ff;
7972 inst.instruction |= (0xf << 28);
7973 if (inst.operands[1].preind)
7974 inst.instruction |= PRE_INDEX;
7975 if (!inst.operands[1].negative)
7976 inst.instruction |= INDEX_UP;
7977 if (inst.operands[1].writeback)
7978 inst.instruction |= WRITE_BACK;
7979 inst.instruction |= inst.operands[1].reg << 16;
7980 inst.instruction |= inst.reloc.exp.X_add_number << 4;
7981 inst.instruction |= inst.operands[1].imm;
7982 }
7983 else
7984 encode_arm_cp_address (1, TRUE, FALSE, 0);
7985 }
7986
7987 static void
7988 do_iwmmxt_wshufh (void)
7989 {
7990 inst.instruction |= inst.operands[0].reg << 12;
7991 inst.instruction |= inst.operands[1].reg << 16;
7992 inst.instruction |= ((inst.operands[2].imm & 0xf0) << 16);
7993 inst.instruction |= (inst.operands[2].imm & 0x0f);
7994 }
7995
7996 static void
7997 do_iwmmxt_wzero (void)
7998 {
7999 /* WZERO reg is an alias for WANDN reg, reg, reg. */
8000 inst.instruction |= inst.operands[0].reg;
8001 inst.instruction |= inst.operands[0].reg << 12;
8002 inst.instruction |= inst.operands[0].reg << 16;
8003 }
8004
8005 static void
8006 do_iwmmxt_wrwrwr_or_imm5 (void)
8007 {
8008 if (inst.operands[2].isreg)
8009 do_rd_rn_rm ();
8010 else {
8011 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2),
8012 _("immediate operand requires iWMMXt2"));
8013 do_rd_rn ();
8014 if (inst.operands[2].imm == 0)
8015 {
8016 switch ((inst.instruction >> 20) & 0xf)
8017 {
8018 case 4:
8019 case 5:
8020 case 6:
8021 case 7:
8022 /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16. */
8023 inst.operands[2].imm = 16;
8024 inst.instruction = (inst.instruction & 0xff0fffff) | (0x7 << 20);
8025 break;
8026 case 8:
8027 case 9:
8028 case 10:
8029 case 11:
8030 /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32. */
8031 inst.operands[2].imm = 32;
8032 inst.instruction = (inst.instruction & 0xff0fffff) | (0xb << 20);
8033 break;
8034 case 12:
8035 case 13:
8036 case 14:
8037 case 15:
8038 {
8039 /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn. */
8040 unsigned long wrn;
8041 wrn = (inst.instruction >> 16) & 0xf;
8042 inst.instruction &= 0xff0fff0f;
8043 inst.instruction |= wrn;
8044 /* Bail out here; the instruction is now assembled. */
8045 return;
8046 }
8047 }
8048 }
8049 /* Map 32 -> 0, etc. */
8050 inst.operands[2].imm &= 0x1f;
8051 inst.instruction |= (0xf << 28) | ((inst.operands[2].imm & 0x10) << 4) | (inst.operands[2].imm & 0xf);
8052 }
8053 }
8054 \f
8055 /* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
8056 operations first, then control, shift, and load/store. */
8057
8058 /* Insns like "foo X,Y,Z". */
8059
8060 static void
8061 do_mav_triple (void)
8062 {
8063 inst.instruction |= inst.operands[0].reg << 16;
8064 inst.instruction |= inst.operands[1].reg;
8065 inst.instruction |= inst.operands[2].reg << 12;
8066 }
8067
8068 /* Insns like "foo W,X,Y,Z".
8069 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
8070
8071 static void
8072 do_mav_quad (void)
8073 {
8074 inst.instruction |= inst.operands[0].reg << 5;
8075 inst.instruction |= inst.operands[1].reg << 12;
8076 inst.instruction |= inst.operands[2].reg << 16;
8077 inst.instruction |= inst.operands[3].reg;
8078 }
8079
8080 /* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
8081 static void
8082 do_mav_dspsc (void)
8083 {
8084 inst.instruction |= inst.operands[1].reg << 12;
8085 }
8086
8087 /* Maverick shift immediate instructions.
8088 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
8089 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
8090
8091 static void
8092 do_mav_shift (void)
8093 {
8094 int imm = inst.operands[2].imm;
8095
8096 inst.instruction |= inst.operands[0].reg << 12;
8097 inst.instruction |= inst.operands[1].reg << 16;
8098
8099 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
8100 Bits 5-7 of the insn should have bits 4-6 of the immediate.
8101 Bit 4 should be 0. */
8102 imm = (imm & 0xf) | ((imm & 0x70) << 1);
8103
8104 inst.instruction |= imm;
8105 }
8106 \f
8107 /* XScale instructions. Also sorted arithmetic before move. */
8108
8109 /* Xscale multiply-accumulate (argument parse)
8110 MIAcc acc0,Rm,Rs
8111 MIAPHcc acc0,Rm,Rs
8112 MIAxycc acc0,Rm,Rs. */
8113
8114 static void
8115 do_xsc_mia (void)
8116 {
8117 inst.instruction |= inst.operands[1].reg;
8118 inst.instruction |= inst.operands[2].reg << 12;
8119 }
8120
8121 /* Xscale move-accumulator-register (argument parse)
8122
8123 MARcc acc0,RdLo,RdHi. */
8124
8125 static void
8126 do_xsc_mar (void)
8127 {
8128 inst.instruction |= inst.operands[1].reg << 12;
8129 inst.instruction |= inst.operands[2].reg << 16;
8130 }
8131
8132 /* Xscale move-register-accumulator (argument parse)
8133
8134 MRAcc RdLo,RdHi,acc0. */
8135
8136 static void
8137 do_xsc_mra (void)
8138 {
8139 constraint (inst.operands[0].reg == inst.operands[1].reg, BAD_OVERLAP);
8140 inst.instruction |= inst.operands[0].reg << 12;
8141 inst.instruction |= inst.operands[1].reg << 16;
8142 }
8143 \f
8144 /* Encoding functions relevant only to Thumb. */
8145
8146 /* inst.operands[i] is a shifted-register operand; encode
8147 it into inst.instruction in the format used by Thumb32. */
8148
8149 static void
8150 encode_thumb32_shifted_operand (int i)
8151 {
8152 unsigned int value = inst.reloc.exp.X_add_number;
8153 unsigned int shift = inst.operands[i].shift_kind;
8154
8155 constraint (inst.operands[i].immisreg,
8156 _("shift by register not allowed in thumb mode"));
8157 inst.instruction |= inst.operands[i].reg;
8158 if (shift == SHIFT_RRX)
8159 inst.instruction |= SHIFT_ROR << 4;
8160 else
8161 {
8162 constraint (inst.reloc.exp.X_op != O_constant,
8163 _("expression too complex"));
8164
8165 constraint (value > 32
8166 || (value == 32 && (shift == SHIFT_LSL
8167 || shift == SHIFT_ROR)),
8168 _("shift expression is too large"));
8169
8170 if (value == 0)
8171 shift = SHIFT_LSL;
8172 else if (value == 32)
8173 value = 0;
8174
8175 inst.instruction |= shift << 4;
8176 inst.instruction |= (value & 0x1c) << 10;
8177 inst.instruction |= (value & 0x03) << 6;
8178 }
8179 }
8180
8181
8182 /* inst.operands[i] was set up by parse_address. Encode it into a
8183 Thumb32 format load or store instruction. Reject forms that cannot
8184 be used with such instructions. If is_t is true, reject forms that
8185 cannot be used with a T instruction; if is_d is true, reject forms
8186 that cannot be used with a D instruction. */
8187
8188 static void
8189 encode_thumb32_addr_mode (int i, bfd_boolean is_t, bfd_boolean is_d)
8190 {
8191 bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
8192
8193 constraint (!inst.operands[i].isreg,
8194 _("Instruction does not support =N addresses"));
8195
8196 inst.instruction |= inst.operands[i].reg << 16;
8197 if (inst.operands[i].immisreg)
8198 {
8199 constraint (is_pc, _("cannot use register index with PC-relative addressing"));
8200 constraint (is_t || is_d, _("cannot use register index with this instruction"));
8201 constraint (inst.operands[i].negative,
8202 _("Thumb does not support negative register indexing"));
8203 constraint (inst.operands[i].postind,
8204 _("Thumb does not support register post-indexing"));
8205 constraint (inst.operands[i].writeback,
8206 _("Thumb does not support register indexing with writeback"));
8207 constraint (inst.operands[i].shifted && inst.operands[i].shift_kind != SHIFT_LSL,
8208 _("Thumb supports only LSL in shifted register indexing"));
8209
8210 inst.instruction |= inst.operands[i].imm;
8211 if (inst.operands[i].shifted)
8212 {
8213 constraint (inst.reloc.exp.X_op != O_constant,
8214 _("expression too complex"));
8215 constraint (inst.reloc.exp.X_add_number < 0
8216 || inst.reloc.exp.X_add_number > 3,
8217 _("shift out of range"));
8218 inst.instruction |= inst.reloc.exp.X_add_number << 4;
8219 }
8220 inst.reloc.type = BFD_RELOC_UNUSED;
8221 }
8222 else if (inst.operands[i].preind)
8223 {
8224 constraint (is_pc && inst.operands[i].writeback,
8225 _("cannot use writeback with PC-relative addressing"));
8226 constraint (is_t && inst.operands[i].writeback,
8227 _("cannot use writeback with this instruction"));
8228
8229 if (is_d)
8230 {
8231 inst.instruction |= 0x01000000;
8232 if (inst.operands[i].writeback)
8233 inst.instruction |= 0x00200000;
8234 }
8235 else
8236 {
8237 inst.instruction |= 0x00000c00;
8238 if (inst.operands[i].writeback)
8239 inst.instruction |= 0x00000100;
8240 }
8241 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
8242 }
8243 else if (inst.operands[i].postind)
8244 {
8245 assert (inst.operands[i].writeback);
8246 constraint (is_pc, _("cannot use post-indexing with PC-relative addressing"));
8247 constraint (is_t, _("cannot use post-indexing with this instruction"));
8248
8249 if (is_d)
8250 inst.instruction |= 0x00200000;
8251 else
8252 inst.instruction |= 0x00000900;
8253 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
8254 }
8255 else /* unindexed - only for coprocessor */
8256 inst.error = _("instruction does not accept unindexed addressing");
8257 }
8258
8259 /* Table of Thumb instructions which exist in both 16- and 32-bit
8260 encodings (the latter only in post-V6T2 cores). The index is the
8261 value used in the insns table below. When there is more than one
8262 possible 16-bit encoding for the instruction, this table always
8263 holds variant (1).
8264 Also contains several pseudo-instructions used during relaxation. */
8265 #define T16_32_TAB \
8266 X(adc, 4140, eb400000), \
8267 X(adcs, 4140, eb500000), \
8268 X(add, 1c00, eb000000), \
8269 X(adds, 1c00, eb100000), \
8270 X(addi, 0000, f1000000), \
8271 X(addis, 0000, f1100000), \
8272 X(add_pc,000f, f20f0000), \
8273 X(add_sp,000d, f10d0000), \
8274 X(adr, 000f, f20f0000), \
8275 X(and, 4000, ea000000), \
8276 X(ands, 4000, ea100000), \
8277 X(asr, 1000, fa40f000), \
8278 X(asrs, 1000, fa50f000), \
8279 X(b, e000, f000b000), \
8280 X(bcond, d000, f0008000), \
8281 X(bic, 4380, ea200000), \
8282 X(bics, 4380, ea300000), \
8283 X(cmn, 42c0, eb100f00), \
8284 X(cmp, 2800, ebb00f00), \
8285 X(cpsie, b660, f3af8400), \
8286 X(cpsid, b670, f3af8600), \
8287 X(cpy, 4600, ea4f0000), \
8288 X(dec_sp,80dd, f1ad0d00), \
8289 X(eor, 4040, ea800000), \
8290 X(eors, 4040, ea900000), \
8291 X(inc_sp,00dd, f10d0d00), \
8292 X(ldmia, c800, e8900000), \
8293 X(ldr, 6800, f8500000), \
8294 X(ldrb, 7800, f8100000), \
8295 X(ldrh, 8800, f8300000), \
8296 X(ldrsb, 5600, f9100000), \
8297 X(ldrsh, 5e00, f9300000), \
8298 X(ldr_pc,4800, f85f0000), \
8299 X(ldr_pc2,4800, f85f0000), \
8300 X(ldr_sp,9800, f85d0000), \
8301 X(lsl, 0000, fa00f000), \
8302 X(lsls, 0000, fa10f000), \
8303 X(lsr, 0800, fa20f000), \
8304 X(lsrs, 0800, fa30f000), \
8305 X(mov, 2000, ea4f0000), \
8306 X(movs, 2000, ea5f0000), \
8307 X(mul, 4340, fb00f000), \
8308 X(muls, 4340, ffffffff), /* no 32b muls */ \
8309 X(mvn, 43c0, ea6f0000), \
8310 X(mvns, 43c0, ea7f0000), \
8311 X(neg, 4240, f1c00000), /* rsb #0 */ \
8312 X(negs, 4240, f1d00000), /* rsbs #0 */ \
8313 X(orr, 4300, ea400000), \
8314 X(orrs, 4300, ea500000), \
8315 X(pop, bc00, e8bd0000), /* ldmia sp!,... */ \
8316 X(push, b400, e92d0000), /* stmdb sp!,... */ \
8317 X(rev, ba00, fa90f080), \
8318 X(rev16, ba40, fa90f090), \
8319 X(revsh, bac0, fa90f0b0), \
8320 X(ror, 41c0, fa60f000), \
8321 X(rors, 41c0, fa70f000), \
8322 X(sbc, 4180, eb600000), \
8323 X(sbcs, 4180, eb700000), \
8324 X(stmia, c000, e8800000), \
8325 X(str, 6000, f8400000), \
8326 X(strb, 7000, f8000000), \
8327 X(strh, 8000, f8200000), \
8328 X(str_sp,9000, f84d0000), \
8329 X(sub, 1e00, eba00000), \
8330 X(subs, 1e00, ebb00000), \
8331 X(subi, 8000, f1a00000), \
8332 X(subis, 8000, f1b00000), \
8333 X(sxtb, b240, fa4ff080), \
8334 X(sxth, b200, fa0ff080), \
8335 X(tst, 4200, ea100f00), \
8336 X(uxtb, b2c0, fa5ff080), \
8337 X(uxth, b280, fa1ff080), \
8338 X(nop, bf00, f3af8000), \
8339 X(yield, bf10, f3af8001), \
8340 X(wfe, bf20, f3af8002), \
8341 X(wfi, bf30, f3af8003), \
8342 X(sev, bf40, f3af9004), /* typo, 8004? */
8343
8344 /* To catch errors in encoding functions, the codes are all offset by
8345 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
8346 as 16-bit instructions. */
8347 #define X(a,b,c) T_MNEM_##a
8348 enum t16_32_codes { T16_32_OFFSET = 0xF7FF, T16_32_TAB };
8349 #undef X
8350
8351 #define X(a,b,c) 0x##b
8352 static const unsigned short thumb_op16[] = { T16_32_TAB };
8353 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
8354 #undef X
8355
8356 #define X(a,b,c) 0x##c
8357 static const unsigned int thumb_op32[] = { T16_32_TAB };
8358 #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
8359 #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
8360 #undef X
8361 #undef T16_32_TAB
8362
8363 /* Thumb instruction encoders, in alphabetical order. */
8364
8365 /* ADDW or SUBW. */
8366 static void
8367 do_t_add_sub_w (void)
8368 {
8369 int Rd, Rn;
8370
8371 Rd = inst.operands[0].reg;
8372 Rn = inst.operands[1].reg;
8373
8374 constraint (Rd == 15, _("PC not allowed as destination"));
8375 inst.instruction |= (Rn << 16) | (Rd << 8);
8376 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
8377 }
8378
8379 /* Parse an add or subtract instruction. We get here with inst.instruction
8380 equalling any of THUMB_OPCODE_add, adds, sub, or subs. */
8381
8382 static void
8383 do_t_add_sub (void)
8384 {
8385 int Rd, Rs, Rn;
8386
8387 Rd = inst.operands[0].reg;
8388 Rs = (inst.operands[1].present
8389 ? inst.operands[1].reg /* Rd, Rs, foo */
8390 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
8391
8392 if (unified_syntax)
8393 {
8394 bfd_boolean flags;
8395 bfd_boolean narrow;
8396 int opcode;
8397
8398 flags = (inst.instruction == T_MNEM_adds
8399 || inst.instruction == T_MNEM_subs);
8400 if (flags)
8401 narrow = (current_it_mask == 0);
8402 else
8403 narrow = (current_it_mask != 0);
8404 if (!inst.operands[2].isreg)
8405 {
8406 int add;
8407
8408 add = (inst.instruction == T_MNEM_add
8409 || inst.instruction == T_MNEM_adds);
8410 opcode = 0;
8411 if (inst.size_req != 4)
8412 {
8413 /* Attempt to use a narrow opcode, with relaxation if
8414 appropriate. */
8415 if (Rd == REG_SP && Rs == REG_SP && !flags)
8416 opcode = add ? T_MNEM_inc_sp : T_MNEM_dec_sp;
8417 else if (Rd <= 7 && Rs == REG_SP && add && !flags)
8418 opcode = T_MNEM_add_sp;
8419 else if (Rd <= 7 && Rs == REG_PC && add && !flags)
8420 opcode = T_MNEM_add_pc;
8421 else if (Rd <= 7 && Rs <= 7 && narrow)
8422 {
8423 if (flags)
8424 opcode = add ? T_MNEM_addis : T_MNEM_subis;
8425 else
8426 opcode = add ? T_MNEM_addi : T_MNEM_subi;
8427 }
8428 if (opcode)
8429 {
8430 inst.instruction = THUMB_OP16(opcode);
8431 inst.instruction |= (Rd << 4) | Rs;
8432 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
8433 if (inst.size_req != 2)
8434 inst.relax = opcode;
8435 }
8436 else
8437 constraint (inst.size_req == 2, BAD_HIREG);
8438 }
8439 if (inst.size_req == 4
8440 || (inst.size_req != 2 && !opcode))
8441 {
8442 if (Rs == REG_PC)
8443 {
8444 /* Always use addw/subw. */
8445 inst.instruction = add ? 0xf20f0000 : 0xf2af0000;
8446 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
8447 }
8448 else
8449 {
8450 inst.instruction = THUMB_OP32 (inst.instruction);
8451 inst.instruction = (inst.instruction & 0xe1ffffff)
8452 | 0x10000000;
8453 if (flags)
8454 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
8455 else
8456 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_IMM;
8457 }
8458 inst.instruction |= Rd << 8;
8459 inst.instruction |= Rs << 16;
8460 }
8461 }
8462 else
8463 {
8464 Rn = inst.operands[2].reg;
8465 /* See if we can do this with a 16-bit instruction. */
8466 if (!inst.operands[2].shifted && inst.size_req != 4)
8467 {
8468 if (Rd > 7 || Rs > 7 || Rn > 7)
8469 narrow = FALSE;
8470
8471 if (narrow)
8472 {
8473 inst.instruction = ((inst.instruction == T_MNEM_adds
8474 || inst.instruction == T_MNEM_add)
8475 ? T_OPCODE_ADD_R3
8476 : T_OPCODE_SUB_R3);
8477 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
8478 return;
8479 }
8480
8481 if (inst.instruction == T_MNEM_add)
8482 {
8483 if (Rd == Rs)
8484 {
8485 inst.instruction = T_OPCODE_ADD_HI;
8486 inst.instruction |= (Rd & 8) << 4;
8487 inst.instruction |= (Rd & 7);
8488 inst.instruction |= Rn << 3;
8489 return;
8490 }
8491 /* ... because addition is commutative! */
8492 else if (Rd == Rn)
8493 {
8494 inst.instruction = T_OPCODE_ADD_HI;
8495 inst.instruction |= (Rd & 8) << 4;
8496 inst.instruction |= (Rd & 7);
8497 inst.instruction |= Rs << 3;
8498 return;
8499 }
8500 }
8501 }
8502 /* If we get here, it can't be done in 16 bits. */
8503 constraint (inst.operands[2].shifted && inst.operands[2].immisreg,
8504 _("shift must be constant"));
8505 inst.instruction = THUMB_OP32 (inst.instruction);
8506 inst.instruction |= Rd << 8;
8507 inst.instruction |= Rs << 16;
8508 encode_thumb32_shifted_operand (2);
8509 }
8510 }
8511 else
8512 {
8513 constraint (inst.instruction == T_MNEM_adds
8514 || inst.instruction == T_MNEM_subs,
8515 BAD_THUMB32);
8516
8517 if (!inst.operands[2].isreg) /* Rd, Rs, #imm */
8518 {
8519 constraint ((Rd > 7 && (Rd != REG_SP || Rs != REG_SP))
8520 || (Rs > 7 && Rs != REG_SP && Rs != REG_PC),
8521 BAD_HIREG);
8522
8523 inst.instruction = (inst.instruction == T_MNEM_add
8524 ? 0x0000 : 0x8000);
8525 inst.instruction |= (Rd << 4) | Rs;
8526 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
8527 return;
8528 }
8529
8530 Rn = inst.operands[2].reg;
8531 constraint (inst.operands[2].shifted, _("unshifted register required"));
8532
8533 /* We now have Rd, Rs, and Rn set to registers. */
8534 if (Rd > 7 || Rs > 7 || Rn > 7)
8535 {
8536 /* Can't do this for SUB. */
8537 constraint (inst.instruction == T_MNEM_sub, BAD_HIREG);
8538 inst.instruction = T_OPCODE_ADD_HI;
8539 inst.instruction |= (Rd & 8) << 4;
8540 inst.instruction |= (Rd & 7);
8541 if (Rs == Rd)
8542 inst.instruction |= Rn << 3;
8543 else if (Rn == Rd)
8544 inst.instruction |= Rs << 3;
8545 else
8546 constraint (1, _("dest must overlap one source register"));
8547 }
8548 else
8549 {
8550 inst.instruction = (inst.instruction == T_MNEM_add
8551 ? T_OPCODE_ADD_R3 : T_OPCODE_SUB_R3);
8552 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
8553 }
8554 }
8555 }
8556
8557 static void
8558 do_t_adr (void)
8559 {
8560 if (unified_syntax && inst.size_req == 0 && inst.operands[0].reg <= 7)
8561 {
8562 /* Defer to section relaxation. */
8563 inst.relax = inst.instruction;
8564 inst.instruction = THUMB_OP16 (inst.instruction);
8565 inst.instruction |= inst.operands[0].reg << 4;
8566 }
8567 else if (unified_syntax && inst.size_req != 2)
8568 {
8569 /* Generate a 32-bit opcode. */
8570 inst.instruction = THUMB_OP32 (inst.instruction);
8571 inst.instruction |= inst.operands[0].reg << 8;
8572 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_PC12;
8573 inst.reloc.pc_rel = 1;
8574 }
8575 else
8576 {
8577 /* Generate a 16-bit opcode. */
8578 inst.instruction = THUMB_OP16 (inst.instruction);
8579 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
8580 inst.reloc.exp.X_add_number -= 4; /* PC relative adjust. */
8581 inst.reloc.pc_rel = 1;
8582
8583 inst.instruction |= inst.operands[0].reg << 4;
8584 }
8585 }
8586
8587 /* Arithmetic instructions for which there is just one 16-bit
8588 instruction encoding, and it allows only two low registers.
8589 For maximal compatibility with ARM syntax, we allow three register
8590 operands even when Thumb-32 instructions are not available, as long
8591 as the first two are identical. For instance, both "sbc r0,r1" and
8592 "sbc r0,r0,r1" are allowed. */
8593 static void
8594 do_t_arit3 (void)
8595 {
8596 int Rd, Rs, Rn;
8597
8598 Rd = inst.operands[0].reg;
8599 Rs = (inst.operands[1].present
8600 ? inst.operands[1].reg /* Rd, Rs, foo */
8601 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
8602 Rn = inst.operands[2].reg;
8603
8604 if (unified_syntax)
8605 {
8606 if (!inst.operands[2].isreg)
8607 {
8608 /* For an immediate, we always generate a 32-bit opcode;
8609 section relaxation will shrink it later if possible. */
8610 inst.instruction = THUMB_OP32 (inst.instruction);
8611 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
8612 inst.instruction |= Rd << 8;
8613 inst.instruction |= Rs << 16;
8614 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
8615 }
8616 else
8617 {
8618 bfd_boolean narrow;
8619
8620 /* See if we can do this with a 16-bit instruction. */
8621 if (THUMB_SETS_FLAGS (inst.instruction))
8622 narrow = current_it_mask == 0;
8623 else
8624 narrow = current_it_mask != 0;
8625
8626 if (Rd > 7 || Rn > 7 || Rs > 7)
8627 narrow = FALSE;
8628 if (inst.operands[2].shifted)
8629 narrow = FALSE;
8630 if (inst.size_req == 4)
8631 narrow = FALSE;
8632
8633 if (narrow
8634 && Rd == Rs)
8635 {
8636 inst.instruction = THUMB_OP16 (inst.instruction);
8637 inst.instruction |= Rd;
8638 inst.instruction |= Rn << 3;
8639 return;
8640 }
8641
8642 /* If we get here, it can't be done in 16 bits. */
8643 constraint (inst.operands[2].shifted
8644 && inst.operands[2].immisreg,
8645 _("shift must be constant"));
8646 inst.instruction = THUMB_OP32 (inst.instruction);
8647 inst.instruction |= Rd << 8;
8648 inst.instruction |= Rs << 16;
8649 encode_thumb32_shifted_operand (2);
8650 }
8651 }
8652 else
8653 {
8654 /* On its face this is a lie - the instruction does set the
8655 flags. However, the only supported mnemonic in this mode
8656 says it doesn't. */
8657 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
8658
8659 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
8660 _("unshifted register required"));
8661 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
8662 constraint (Rd != Rs,
8663 _("dest and source1 must be the same register"));
8664
8665 inst.instruction = THUMB_OP16 (inst.instruction);
8666 inst.instruction |= Rd;
8667 inst.instruction |= Rn << 3;
8668 }
8669 }
8670
8671 /* Similarly, but for instructions where the arithmetic operation is
8672 commutative, so we can allow either of them to be different from
8673 the destination operand in a 16-bit instruction. For instance, all
8674 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
8675 accepted. */
8676 static void
8677 do_t_arit3c (void)
8678 {
8679 int Rd, Rs, Rn;
8680
8681 Rd = inst.operands[0].reg;
8682 Rs = (inst.operands[1].present
8683 ? inst.operands[1].reg /* Rd, Rs, foo */
8684 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
8685 Rn = inst.operands[2].reg;
8686
8687 if (unified_syntax)
8688 {
8689 if (!inst.operands[2].isreg)
8690 {
8691 /* For an immediate, we always generate a 32-bit opcode;
8692 section relaxation will shrink it later if possible. */
8693 inst.instruction = THUMB_OP32 (inst.instruction);
8694 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
8695 inst.instruction |= Rd << 8;
8696 inst.instruction |= Rs << 16;
8697 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
8698 }
8699 else
8700 {
8701 bfd_boolean narrow;
8702
8703 /* See if we can do this with a 16-bit instruction. */
8704 if (THUMB_SETS_FLAGS (inst.instruction))
8705 narrow = current_it_mask == 0;
8706 else
8707 narrow = current_it_mask != 0;
8708
8709 if (Rd > 7 || Rn > 7 || Rs > 7)
8710 narrow = FALSE;
8711 if (inst.operands[2].shifted)
8712 narrow = FALSE;
8713 if (inst.size_req == 4)
8714 narrow = FALSE;
8715
8716 if (narrow)
8717 {
8718 if (Rd == Rs)
8719 {
8720 inst.instruction = THUMB_OP16 (inst.instruction);
8721 inst.instruction |= Rd;
8722 inst.instruction |= Rn << 3;
8723 return;
8724 }
8725 if (Rd == Rn)
8726 {
8727 inst.instruction = THUMB_OP16 (inst.instruction);
8728 inst.instruction |= Rd;
8729 inst.instruction |= Rs << 3;
8730 return;
8731 }
8732 }
8733
8734 /* If we get here, it can't be done in 16 bits. */
8735 constraint (inst.operands[2].shifted
8736 && inst.operands[2].immisreg,
8737 _("shift must be constant"));
8738 inst.instruction = THUMB_OP32 (inst.instruction);
8739 inst.instruction |= Rd << 8;
8740 inst.instruction |= Rs << 16;
8741 encode_thumb32_shifted_operand (2);
8742 }
8743 }
8744 else
8745 {
8746 /* On its face this is a lie - the instruction does set the
8747 flags. However, the only supported mnemonic in this mode
8748 says it doesn't. */
8749 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
8750
8751 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
8752 _("unshifted register required"));
8753 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
8754
8755 inst.instruction = THUMB_OP16 (inst.instruction);
8756 inst.instruction |= Rd;
8757
8758 if (Rd == Rs)
8759 inst.instruction |= Rn << 3;
8760 else if (Rd == Rn)
8761 inst.instruction |= Rs << 3;
8762 else
8763 constraint (1, _("dest must overlap one source register"));
8764 }
8765 }
8766
8767 static void
8768 do_t_barrier (void)
8769 {
8770 if (inst.operands[0].present)
8771 {
8772 constraint ((inst.instruction & 0xf0) != 0x40
8773 && inst.operands[0].imm != 0xf,
8774 "bad barrier type");
8775 inst.instruction |= inst.operands[0].imm;
8776 }
8777 else
8778 inst.instruction |= 0xf;
8779 }
8780
8781 static void
8782 do_t_bfc (void)
8783 {
8784 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
8785 constraint (msb > 32, _("bit-field extends past end of register"));
8786 /* The instruction encoding stores the LSB and MSB,
8787 not the LSB and width. */
8788 inst.instruction |= inst.operands[0].reg << 8;
8789 inst.instruction |= (inst.operands[1].imm & 0x1c) << 10;
8790 inst.instruction |= (inst.operands[1].imm & 0x03) << 6;
8791 inst.instruction |= msb - 1;
8792 }
8793
8794 static void
8795 do_t_bfi (void)
8796 {
8797 unsigned int msb;
8798
8799 /* #0 in second position is alternative syntax for bfc, which is
8800 the same instruction but with REG_PC in the Rm field. */
8801 if (!inst.operands[1].isreg)
8802 inst.operands[1].reg = REG_PC;
8803
8804 msb = inst.operands[2].imm + inst.operands[3].imm;
8805 constraint (msb > 32, _("bit-field extends past end of register"));
8806 /* The instruction encoding stores the LSB and MSB,
8807 not the LSB and width. */
8808 inst.instruction |= inst.operands[0].reg << 8;
8809 inst.instruction |= inst.operands[1].reg << 16;
8810 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
8811 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
8812 inst.instruction |= msb - 1;
8813 }
8814
8815 static void
8816 do_t_bfx (void)
8817 {
8818 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
8819 _("bit-field extends past end of register"));
8820 inst.instruction |= inst.operands[0].reg << 8;
8821 inst.instruction |= inst.operands[1].reg << 16;
8822 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
8823 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
8824 inst.instruction |= inst.operands[3].imm - 1;
8825 }
8826
8827 /* ARM V5 Thumb BLX (argument parse)
8828 BLX <target_addr> which is BLX(1)
8829 BLX <Rm> which is BLX(2)
8830 Unfortunately, there are two different opcodes for this mnemonic.
8831 So, the insns[].value is not used, and the code here zaps values
8832 into inst.instruction.
8833
8834 ??? How to take advantage of the additional two bits of displacement
8835 available in Thumb32 mode? Need new relocation? */
8836
8837 static void
8838 do_t_blx (void)
8839 {
8840 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
8841 if (inst.operands[0].isreg)
8842 /* We have a register, so this is BLX(2). */
8843 inst.instruction |= inst.operands[0].reg << 3;
8844 else
8845 {
8846 /* No register. This must be BLX(1). */
8847 inst.instruction = 0xf000e800;
8848 #ifdef OBJ_ELF
8849 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
8850 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH23;
8851 else
8852 #endif
8853 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BLX;
8854 inst.reloc.pc_rel = 1;
8855 }
8856 }
8857
8858 static void
8859 do_t_branch (void)
8860 {
8861 int opcode;
8862 int cond;
8863
8864 if (current_it_mask)
8865 {
8866 /* Conditional branches inside IT blocks are encoded as unconditional
8867 branches. */
8868 cond = COND_ALWAYS;
8869 /* A branch must be the last instruction in an IT block. */
8870 constraint (current_it_mask != 0x10, BAD_BRANCH);
8871 }
8872 else
8873 cond = inst.cond;
8874
8875 if (cond != COND_ALWAYS)
8876 opcode = T_MNEM_bcond;
8877 else
8878 opcode = inst.instruction;
8879
8880 if (unified_syntax && inst.size_req == 4)
8881 {
8882 inst.instruction = THUMB_OP32(opcode);
8883 if (cond == COND_ALWAYS)
8884 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH25;
8885 else
8886 {
8887 assert (cond != 0xF);
8888 inst.instruction |= cond << 22;
8889 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH20;
8890 }
8891 }
8892 else
8893 {
8894 inst.instruction = THUMB_OP16(opcode);
8895 if (cond == COND_ALWAYS)
8896 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH12;
8897 else
8898 {
8899 inst.instruction |= cond << 8;
8900 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH9;
8901 }
8902 /* Allow section relaxation. */
8903 if (unified_syntax && inst.size_req != 2)
8904 inst.relax = opcode;
8905 }
8906
8907 inst.reloc.pc_rel = 1;
8908 }
8909
8910 static void
8911 do_t_bkpt (void)
8912 {
8913 constraint (inst.cond != COND_ALWAYS,
8914 _("instruction is always unconditional"));
8915 if (inst.operands[0].present)
8916 {
8917 constraint (inst.operands[0].imm > 255,
8918 _("immediate value out of range"));
8919 inst.instruction |= inst.operands[0].imm;
8920 }
8921 }
8922
8923 static void
8924 do_t_branch23 (void)
8925 {
8926 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
8927 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH23;
8928 inst.reloc.pc_rel = 1;
8929
8930 /* If the destination of the branch is a defined symbol which does not have
8931 the THUMB_FUNC attribute, then we must be calling a function which has
8932 the (interfacearm) attribute. We look for the Thumb entry point to that
8933 function and change the branch to refer to that function instead. */
8934 if ( inst.reloc.exp.X_op == O_symbol
8935 && inst.reloc.exp.X_add_symbol != NULL
8936 && S_IS_DEFINED (inst.reloc.exp.X_add_symbol)
8937 && ! THUMB_IS_FUNC (inst.reloc.exp.X_add_symbol))
8938 inst.reloc.exp.X_add_symbol =
8939 find_real_start (inst.reloc.exp.X_add_symbol);
8940 }
8941
8942 static void
8943 do_t_bx (void)
8944 {
8945 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
8946 inst.instruction |= inst.operands[0].reg << 3;
8947 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
8948 should cause the alignment to be checked once it is known. This is
8949 because BX PC only works if the instruction is word aligned. */
8950 }
8951
8952 static void
8953 do_t_bxj (void)
8954 {
8955 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
8956 if (inst.operands[0].reg == REG_PC)
8957 as_tsktsk (_("use of r15 in bxj is not really useful"));
8958
8959 inst.instruction |= inst.operands[0].reg << 16;
8960 }
8961
8962 static void
8963 do_t_clz (void)
8964 {
8965 inst.instruction |= inst.operands[0].reg << 8;
8966 inst.instruction |= inst.operands[1].reg << 16;
8967 inst.instruction |= inst.operands[1].reg;
8968 }
8969
8970 static void
8971 do_t_cps (void)
8972 {
8973 constraint (current_it_mask, BAD_NOT_IT);
8974 inst.instruction |= inst.operands[0].imm;
8975 }
8976
8977 static void
8978 do_t_cpsi (void)
8979 {
8980 constraint (current_it_mask, BAD_NOT_IT);
8981 if (unified_syntax
8982 && (inst.operands[1].present || inst.size_req == 4)
8983 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6_notm))
8984 {
8985 unsigned int imod = (inst.instruction & 0x0030) >> 4;
8986 inst.instruction = 0xf3af8000;
8987 inst.instruction |= imod << 9;
8988 inst.instruction |= inst.operands[0].imm << 5;
8989 if (inst.operands[1].present)
8990 inst.instruction |= 0x100 | inst.operands[1].imm;
8991 }
8992 else
8993 {
8994 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1)
8995 && (inst.operands[0].imm & 4),
8996 _("selected processor does not support 'A' form "
8997 "of this instruction"));
8998 constraint (inst.operands[1].present || inst.size_req == 4,
8999 _("Thumb does not support the 2-argument "
9000 "form of this instruction"));
9001 inst.instruction |= inst.operands[0].imm;
9002 }
9003 }
9004
9005 /* THUMB CPY instruction (argument parse). */
9006
9007 static void
9008 do_t_cpy (void)
9009 {
9010 if (inst.size_req == 4)
9011 {
9012 inst.instruction = THUMB_OP32 (T_MNEM_mov);
9013 inst.instruction |= inst.operands[0].reg << 8;
9014 inst.instruction |= inst.operands[1].reg;
9015 }
9016 else
9017 {
9018 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
9019 inst.instruction |= (inst.operands[0].reg & 0x7);
9020 inst.instruction |= inst.operands[1].reg << 3;
9021 }
9022 }
9023
9024 static void
9025 do_t_cbz (void)
9026 {
9027 constraint (current_it_mask, BAD_NOT_IT);
9028 constraint (inst.operands[0].reg > 7, BAD_HIREG);
9029 inst.instruction |= inst.operands[0].reg;
9030 inst.reloc.pc_rel = 1;
9031 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH7;
9032 }
9033
9034 static void
9035 do_t_dbg (void)
9036 {
9037 inst.instruction |= inst.operands[0].imm;
9038 }
9039
9040 static void
9041 do_t_div (void)
9042 {
9043 if (!inst.operands[1].present)
9044 inst.operands[1].reg = inst.operands[0].reg;
9045 inst.instruction |= inst.operands[0].reg << 8;
9046 inst.instruction |= inst.operands[1].reg << 16;
9047 inst.instruction |= inst.operands[2].reg;
9048 }
9049
9050 static void
9051 do_t_hint (void)
9052 {
9053 if (unified_syntax && inst.size_req == 4)
9054 inst.instruction = THUMB_OP32 (inst.instruction);
9055 else
9056 inst.instruction = THUMB_OP16 (inst.instruction);
9057 }
9058
9059 static void
9060 do_t_it (void)
9061 {
9062 unsigned int cond = inst.operands[0].imm;
9063
9064 constraint (current_it_mask, BAD_NOT_IT);
9065 current_it_mask = (inst.instruction & 0xf) | 0x10;
9066 current_cc = cond;
9067
9068 /* If the condition is a negative condition, invert the mask. */
9069 if ((cond & 0x1) == 0x0)
9070 {
9071 unsigned int mask = inst.instruction & 0x000f;
9072
9073 if ((mask & 0x7) == 0)
9074 /* no conversion needed */;
9075 else if ((mask & 0x3) == 0)
9076 mask ^= 0x8;
9077 else if ((mask & 0x1) == 0)
9078 mask ^= 0xC;
9079 else
9080 mask ^= 0xE;
9081
9082 inst.instruction &= 0xfff0;
9083 inst.instruction |= mask;
9084 }
9085
9086 inst.instruction |= cond << 4;
9087 }
9088
9089 static void
9090 do_t_ldmstm (void)
9091 {
9092 /* This really doesn't seem worth it. */
9093 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
9094 _("expression too complex"));
9095 constraint (inst.operands[1].writeback,
9096 _("Thumb load/store multiple does not support {reglist}^"));
9097
9098 if (unified_syntax)
9099 {
9100 /* See if we can use a 16-bit instruction. */
9101 if (inst.instruction < 0xffff /* not ldmdb/stmdb */
9102 && inst.size_req != 4
9103 && inst.operands[0].reg <= 7
9104 && !(inst.operands[1].imm & ~0xff)
9105 && (inst.instruction == T_MNEM_stmia
9106 ? inst.operands[0].writeback
9107 : (inst.operands[0].writeback
9108 == !(inst.operands[1].imm & (1 << inst.operands[0].reg)))))
9109 {
9110 if (inst.instruction == T_MNEM_stmia
9111 && (inst.operands[1].imm & (1 << inst.operands[0].reg))
9112 && (inst.operands[1].imm & ((1 << inst.operands[0].reg) - 1)))
9113 as_warn (_("value stored for r%d is UNPREDICTABLE"),
9114 inst.operands[0].reg);
9115
9116 inst.instruction = THUMB_OP16 (inst.instruction);
9117 inst.instruction |= inst.operands[0].reg << 8;
9118 inst.instruction |= inst.operands[1].imm;
9119 }
9120 else
9121 {
9122 if (inst.operands[1].imm & (1 << 13))
9123 as_warn (_("SP should not be in register list"));
9124 if (inst.instruction == T_MNEM_stmia)
9125 {
9126 if (inst.operands[1].imm & (1 << 15))
9127 as_warn (_("PC should not be in register list"));
9128 if (inst.operands[1].imm & (1 << inst.operands[0].reg))
9129 as_warn (_("value stored for r%d is UNPREDICTABLE"),
9130 inst.operands[0].reg);
9131 }
9132 else
9133 {
9134 if (inst.operands[1].imm & (1 << 14)
9135 && inst.operands[1].imm & (1 << 15))
9136 as_warn (_("LR and PC should not both be in register list"));
9137 if ((inst.operands[1].imm & (1 << inst.operands[0].reg))
9138 && inst.operands[0].writeback)
9139 as_warn (_("base register should not be in register list "
9140 "when written back"));
9141 }
9142 if (inst.instruction < 0xffff)
9143 inst.instruction = THUMB_OP32 (inst.instruction);
9144 inst.instruction |= inst.operands[0].reg << 16;
9145 inst.instruction |= inst.operands[1].imm;
9146 if (inst.operands[0].writeback)
9147 inst.instruction |= WRITE_BACK;
9148 }
9149 }
9150 else
9151 {
9152 constraint (inst.operands[0].reg > 7
9153 || (inst.operands[1].imm & ~0xff), BAD_HIREG);
9154 constraint (inst.instruction != T_MNEM_ldmia
9155 && inst.instruction != T_MNEM_stmia,
9156 _("Thumb-2 instruction only valid in unified syntax"));
9157 if (inst.instruction == T_MNEM_stmia)
9158 {
9159 if (!inst.operands[0].writeback)
9160 as_warn (_("this instruction will write back the base register"));
9161 if ((inst.operands[1].imm & (1 << inst.operands[0].reg))
9162 && (inst.operands[1].imm & ((1 << inst.operands[0].reg) - 1)))
9163 as_warn (_("value stored for r%d is UNPREDICTABLE"),
9164 inst.operands[0].reg);
9165 }
9166 else
9167 {
9168 if (!inst.operands[0].writeback
9169 && !(inst.operands[1].imm & (1 << inst.operands[0].reg)))
9170 as_warn (_("this instruction will write back the base register"));
9171 else if (inst.operands[0].writeback
9172 && (inst.operands[1].imm & (1 << inst.operands[0].reg)))
9173 as_warn (_("this instruction will not write back the base register"));
9174 }
9175
9176 inst.instruction = THUMB_OP16 (inst.instruction);
9177 inst.instruction |= inst.operands[0].reg << 8;
9178 inst.instruction |= inst.operands[1].imm;
9179 }
9180 }
9181
9182 static void
9183 do_t_ldrex (void)
9184 {
9185 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
9186 || inst.operands[1].postind || inst.operands[1].writeback
9187 || inst.operands[1].immisreg || inst.operands[1].shifted
9188 || inst.operands[1].negative,
9189 BAD_ADDR_MODE);
9190
9191 inst.instruction |= inst.operands[0].reg << 12;
9192 inst.instruction |= inst.operands[1].reg << 16;
9193 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
9194 }
9195
9196 static void
9197 do_t_ldrexd (void)
9198 {
9199 if (!inst.operands[1].present)
9200 {
9201 constraint (inst.operands[0].reg == REG_LR,
9202 _("r14 not allowed as first register "
9203 "when second register is omitted"));
9204 inst.operands[1].reg = inst.operands[0].reg + 1;
9205 }
9206 constraint (inst.operands[0].reg == inst.operands[1].reg,
9207 BAD_OVERLAP);
9208
9209 inst.instruction |= inst.operands[0].reg << 12;
9210 inst.instruction |= inst.operands[1].reg << 8;
9211 inst.instruction |= inst.operands[2].reg << 16;
9212 }
9213
9214 static void
9215 do_t_ldst (void)
9216 {
9217 unsigned long opcode;
9218 int Rn;
9219
9220 opcode = inst.instruction;
9221 if (unified_syntax)
9222 {
9223 if (!inst.operands[1].isreg)
9224 {
9225 if (opcode <= 0xffff)
9226 inst.instruction = THUMB_OP32 (opcode);
9227 if (move_or_literal_pool (0, /*thumb_p=*/TRUE, /*mode_3=*/FALSE))
9228 return;
9229 }
9230 if (inst.operands[1].isreg
9231 && !inst.operands[1].writeback
9232 && !inst.operands[1].shifted && !inst.operands[1].postind
9233 && !inst.operands[1].negative && inst.operands[0].reg <= 7
9234 && opcode <= 0xffff
9235 && inst.size_req != 4)
9236 {
9237 /* Insn may have a 16-bit form. */
9238 Rn = inst.operands[1].reg;
9239 if (inst.operands[1].immisreg)
9240 {
9241 inst.instruction = THUMB_OP16 (opcode);
9242 /* [Rn, Ri] */
9243 if (Rn <= 7 && inst.operands[1].imm <= 7)
9244 goto op16;
9245 }
9246 else if ((Rn <= 7 && opcode != T_MNEM_ldrsh
9247 && opcode != T_MNEM_ldrsb)
9248 || ((Rn == REG_PC || Rn == REG_SP) && opcode == T_MNEM_ldr)
9249 || (Rn == REG_SP && opcode == T_MNEM_str))
9250 {
9251 /* [Rn, #const] */
9252 if (Rn > 7)
9253 {
9254 if (Rn == REG_PC)
9255 {
9256 if (inst.reloc.pc_rel)
9257 opcode = T_MNEM_ldr_pc2;
9258 else
9259 opcode = T_MNEM_ldr_pc;
9260 }
9261 else
9262 {
9263 if (opcode == T_MNEM_ldr)
9264 opcode = T_MNEM_ldr_sp;
9265 else
9266 opcode = T_MNEM_str_sp;
9267 }
9268 inst.instruction = inst.operands[0].reg << 8;
9269 }
9270 else
9271 {
9272 inst.instruction = inst.operands[0].reg;
9273 inst.instruction |= inst.operands[1].reg << 3;
9274 }
9275 inst.instruction |= THUMB_OP16 (opcode);
9276 if (inst.size_req == 2)
9277 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
9278 else
9279 inst.relax = opcode;
9280 return;
9281 }
9282 }
9283 /* Definitely a 32-bit variant. */
9284 inst.instruction = THUMB_OP32 (opcode);
9285 inst.instruction |= inst.operands[0].reg << 12;
9286 encode_thumb32_addr_mode (1, /*is_t=*/FALSE, /*is_d=*/FALSE);
9287 return;
9288 }
9289
9290 constraint (inst.operands[0].reg > 7, BAD_HIREG);
9291
9292 if (inst.instruction == T_MNEM_ldrsh || inst.instruction == T_MNEM_ldrsb)
9293 {
9294 /* Only [Rn,Rm] is acceptable. */
9295 constraint (inst.operands[1].reg > 7 || inst.operands[1].imm > 7, BAD_HIREG);
9296 constraint (!inst.operands[1].isreg || !inst.operands[1].immisreg
9297 || inst.operands[1].postind || inst.operands[1].shifted
9298 || inst.operands[1].negative,
9299 _("Thumb does not support this addressing mode"));
9300 inst.instruction = THUMB_OP16 (inst.instruction);
9301 goto op16;
9302 }
9303
9304 inst.instruction = THUMB_OP16 (inst.instruction);
9305 if (!inst.operands[1].isreg)
9306 if (move_or_literal_pool (0, /*thumb_p=*/TRUE, /*mode_3=*/FALSE))
9307 return;
9308
9309 constraint (!inst.operands[1].preind
9310 || inst.operands[1].shifted
9311 || inst.operands[1].writeback,
9312 _("Thumb does not support this addressing mode"));
9313 if (inst.operands[1].reg == REG_PC || inst.operands[1].reg == REG_SP)
9314 {
9315 constraint (inst.instruction & 0x0600,
9316 _("byte or halfword not valid for base register"));
9317 constraint (inst.operands[1].reg == REG_PC
9318 && !(inst.instruction & THUMB_LOAD_BIT),
9319 _("r15 based store not allowed"));
9320 constraint (inst.operands[1].immisreg,
9321 _("invalid base register for register offset"));
9322
9323 if (inst.operands[1].reg == REG_PC)
9324 inst.instruction = T_OPCODE_LDR_PC;
9325 else if (inst.instruction & THUMB_LOAD_BIT)
9326 inst.instruction = T_OPCODE_LDR_SP;
9327 else
9328 inst.instruction = T_OPCODE_STR_SP;
9329
9330 inst.instruction |= inst.operands[0].reg << 8;
9331 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
9332 return;
9333 }
9334
9335 constraint (inst.operands[1].reg > 7, BAD_HIREG);
9336 if (!inst.operands[1].immisreg)
9337 {
9338 /* Immediate offset. */
9339 inst.instruction |= inst.operands[0].reg;
9340 inst.instruction |= inst.operands[1].reg << 3;
9341 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
9342 return;
9343 }
9344
9345 /* Register offset. */
9346 constraint (inst.operands[1].imm > 7, BAD_HIREG);
9347 constraint (inst.operands[1].negative,
9348 _("Thumb does not support this addressing mode"));
9349
9350 op16:
9351 switch (inst.instruction)
9352 {
9353 case T_OPCODE_STR_IW: inst.instruction = T_OPCODE_STR_RW; break;
9354 case T_OPCODE_STR_IH: inst.instruction = T_OPCODE_STR_RH; break;
9355 case T_OPCODE_STR_IB: inst.instruction = T_OPCODE_STR_RB; break;
9356 case T_OPCODE_LDR_IW: inst.instruction = T_OPCODE_LDR_RW; break;
9357 case T_OPCODE_LDR_IH: inst.instruction = T_OPCODE_LDR_RH; break;
9358 case T_OPCODE_LDR_IB: inst.instruction = T_OPCODE_LDR_RB; break;
9359 case 0x5600 /* ldrsb */:
9360 case 0x5e00 /* ldrsh */: break;
9361 default: abort ();
9362 }
9363
9364 inst.instruction |= inst.operands[0].reg;
9365 inst.instruction |= inst.operands[1].reg << 3;
9366 inst.instruction |= inst.operands[1].imm << 6;
9367 }
9368
9369 static void
9370 do_t_ldstd (void)
9371 {
9372 if (!inst.operands[1].present)
9373 {
9374 inst.operands[1].reg = inst.operands[0].reg + 1;
9375 constraint (inst.operands[0].reg == REG_LR,
9376 _("r14 not allowed here"));
9377 }
9378 inst.instruction |= inst.operands[0].reg << 12;
9379 inst.instruction |= inst.operands[1].reg << 8;
9380 encode_thumb32_addr_mode (2, /*is_t=*/FALSE, /*is_d=*/TRUE);
9381
9382 }
9383
9384 static void
9385 do_t_ldstt (void)
9386 {
9387 inst.instruction |= inst.operands[0].reg << 12;
9388 encode_thumb32_addr_mode (1, /*is_t=*/TRUE, /*is_d=*/FALSE);
9389 }
9390
9391 static void
9392 do_t_mla (void)
9393 {
9394 inst.instruction |= inst.operands[0].reg << 8;
9395 inst.instruction |= inst.operands[1].reg << 16;
9396 inst.instruction |= inst.operands[2].reg;
9397 inst.instruction |= inst.operands[3].reg << 12;
9398 }
9399
9400 static void
9401 do_t_mlal (void)
9402 {
9403 inst.instruction |= inst.operands[0].reg << 12;
9404 inst.instruction |= inst.operands[1].reg << 8;
9405 inst.instruction |= inst.operands[2].reg << 16;
9406 inst.instruction |= inst.operands[3].reg;
9407 }
9408
9409 static void
9410 do_t_mov_cmp (void)
9411 {
9412 if (unified_syntax)
9413 {
9414 int r0off = (inst.instruction == T_MNEM_mov
9415 || inst.instruction == T_MNEM_movs) ? 8 : 16;
9416 unsigned long opcode;
9417 bfd_boolean narrow;
9418 bfd_boolean low_regs;
9419
9420 low_regs = (inst.operands[0].reg <= 7 && inst.operands[1].reg <= 7);
9421 opcode = inst.instruction;
9422 if (current_it_mask)
9423 narrow = opcode != T_MNEM_movs;
9424 else
9425 narrow = opcode != T_MNEM_movs || low_regs;
9426 if (inst.size_req == 4
9427 || inst.operands[1].shifted)
9428 narrow = FALSE;
9429
9430 if (!inst.operands[1].isreg)
9431 {
9432 /* Immediate operand. */
9433 if (current_it_mask == 0 && opcode == T_MNEM_mov)
9434 narrow = 0;
9435 if (low_regs && narrow)
9436 {
9437 inst.instruction = THUMB_OP16 (opcode);
9438 inst.instruction |= inst.operands[0].reg << 8;
9439 if (inst.size_req == 2)
9440 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
9441 else
9442 inst.relax = opcode;
9443 }
9444 else
9445 {
9446 inst.instruction = THUMB_OP32 (inst.instruction);
9447 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
9448 inst.instruction |= inst.operands[0].reg << r0off;
9449 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
9450 }
9451 }
9452 else if (!narrow)
9453 {
9454 inst.instruction = THUMB_OP32 (inst.instruction);
9455 inst.instruction |= inst.operands[0].reg << r0off;
9456 encode_thumb32_shifted_operand (1);
9457 }
9458 else
9459 switch (inst.instruction)
9460 {
9461 case T_MNEM_mov:
9462 inst.instruction = T_OPCODE_MOV_HR;
9463 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
9464 inst.instruction |= (inst.operands[0].reg & 0x7);
9465 inst.instruction |= inst.operands[1].reg << 3;
9466 break;
9467
9468 case T_MNEM_movs:
9469 /* We know we have low registers at this point.
9470 Generate ADD Rd, Rs, #0. */
9471 inst.instruction = T_OPCODE_ADD_I3;
9472 inst.instruction |= inst.operands[0].reg;
9473 inst.instruction |= inst.operands[1].reg << 3;
9474 break;
9475
9476 case T_MNEM_cmp:
9477 if (low_regs)
9478 {
9479 inst.instruction = T_OPCODE_CMP_LR;
9480 inst.instruction |= inst.operands[0].reg;
9481 inst.instruction |= inst.operands[1].reg << 3;
9482 }
9483 else
9484 {
9485 inst.instruction = T_OPCODE_CMP_HR;
9486 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
9487 inst.instruction |= (inst.operands[0].reg & 0x7);
9488 inst.instruction |= inst.operands[1].reg << 3;
9489 }
9490 break;
9491 }
9492 return;
9493 }
9494
9495 inst.instruction = THUMB_OP16 (inst.instruction);
9496 if (inst.operands[1].isreg)
9497 {
9498 if (inst.operands[0].reg < 8 && inst.operands[1].reg < 8)
9499 {
9500 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
9501 since a MOV instruction produces unpredictable results. */
9502 if (inst.instruction == T_OPCODE_MOV_I8)
9503 inst.instruction = T_OPCODE_ADD_I3;
9504 else
9505 inst.instruction = T_OPCODE_CMP_LR;
9506
9507 inst.instruction |= inst.operands[0].reg;
9508 inst.instruction |= inst.operands[1].reg << 3;
9509 }
9510 else
9511 {
9512 if (inst.instruction == T_OPCODE_MOV_I8)
9513 inst.instruction = T_OPCODE_MOV_HR;
9514 else
9515 inst.instruction = T_OPCODE_CMP_HR;
9516 do_t_cpy ();
9517 }
9518 }
9519 else
9520 {
9521 constraint (inst.operands[0].reg > 7,
9522 _("only lo regs allowed with immediate"));
9523 inst.instruction |= inst.operands[0].reg << 8;
9524 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
9525 }
9526 }
9527
9528 static void
9529 do_t_mov16 (void)
9530 {
9531 bfd_vma imm;
9532 bfd_boolean top;
9533
9534 top = (inst.instruction & 0x00800000) != 0;
9535 if (inst.reloc.type == BFD_RELOC_ARM_MOVW)
9536 {
9537 constraint (top, _(":lower16: not allowed this instruction"));
9538 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVW;
9539 }
9540 else if (inst.reloc.type == BFD_RELOC_ARM_MOVT)
9541 {
9542 constraint (!top, _(":upper16: not allowed this instruction"));
9543 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVT;
9544 }
9545
9546 inst.instruction |= inst.operands[0].reg << 8;
9547 if (inst.reloc.type == BFD_RELOC_UNUSED)
9548 {
9549 imm = inst.reloc.exp.X_add_number;
9550 inst.instruction |= (imm & 0xf000) << 4;
9551 inst.instruction |= (imm & 0x0800) << 15;
9552 inst.instruction |= (imm & 0x0700) << 4;
9553 inst.instruction |= (imm & 0x00ff);
9554 }
9555 }
9556
9557 static void
9558 do_t_mvn_tst (void)
9559 {
9560 if (unified_syntax)
9561 {
9562 int r0off = (inst.instruction == T_MNEM_mvn
9563 || inst.instruction == T_MNEM_mvns) ? 8 : 16;
9564 bfd_boolean narrow;
9565
9566 if (inst.size_req == 4
9567 || inst.instruction > 0xffff
9568 || inst.operands[1].shifted
9569 || inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
9570 narrow = FALSE;
9571 else if (inst.instruction == T_MNEM_cmn)
9572 narrow = TRUE;
9573 else if (THUMB_SETS_FLAGS (inst.instruction))
9574 narrow = (current_it_mask == 0);
9575 else
9576 narrow = (current_it_mask != 0);
9577
9578 if (!inst.operands[1].isreg)
9579 {
9580 /* For an immediate, we always generate a 32-bit opcode;
9581 section relaxation will shrink it later if possible. */
9582 if (inst.instruction < 0xffff)
9583 inst.instruction = THUMB_OP32 (inst.instruction);
9584 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
9585 inst.instruction |= inst.operands[0].reg << r0off;
9586 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
9587 }
9588 else
9589 {
9590 /* See if we can do this with a 16-bit instruction. */
9591 if (narrow)
9592 {
9593 inst.instruction = THUMB_OP16 (inst.instruction);
9594 inst.instruction |= inst.operands[0].reg;
9595 inst.instruction |= inst.operands[1].reg << 3;
9596 }
9597 else
9598 {
9599 constraint (inst.operands[1].shifted
9600 && inst.operands[1].immisreg,
9601 _("shift must be constant"));
9602 if (inst.instruction < 0xffff)
9603 inst.instruction = THUMB_OP32 (inst.instruction);
9604 inst.instruction |= inst.operands[0].reg << r0off;
9605 encode_thumb32_shifted_operand (1);
9606 }
9607 }
9608 }
9609 else
9610 {
9611 constraint (inst.instruction > 0xffff
9612 || inst.instruction == T_MNEM_mvns, BAD_THUMB32);
9613 constraint (!inst.operands[1].isreg || inst.operands[1].shifted,
9614 _("unshifted register required"));
9615 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
9616 BAD_HIREG);
9617
9618 inst.instruction = THUMB_OP16 (inst.instruction);
9619 inst.instruction |= inst.operands[0].reg;
9620 inst.instruction |= inst.operands[1].reg << 3;
9621 }
9622 }
9623
9624 static void
9625 do_t_mrs (void)
9626 {
9627 int flags;
9628
9629 if (do_vfp_nsyn_mrs () == SUCCESS)
9630 return;
9631
9632 flags = inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
9633 if (flags == 0)
9634 {
9635 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7m),
9636 _("selected processor does not support "
9637 "requested special purpose register"));
9638 }
9639 else
9640 {
9641 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1),
9642 _("selected processor does not support "
9643 "requested special purpose register %x"));
9644 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
9645 constraint ((flags & ~SPSR_BIT) != (PSR_c|PSR_f),
9646 _("'CPSR' or 'SPSR' expected"));
9647 }
9648
9649 inst.instruction |= inst.operands[0].reg << 8;
9650 inst.instruction |= (flags & SPSR_BIT) >> 2;
9651 inst.instruction |= inst.operands[1].imm & 0xff;
9652 }
9653
9654 static void
9655 do_t_msr (void)
9656 {
9657 int flags;
9658
9659 if (do_vfp_nsyn_msr () == SUCCESS)
9660 return;
9661
9662 constraint (!inst.operands[1].isreg,
9663 _("Thumb encoding does not support an immediate here"));
9664 flags = inst.operands[0].imm;
9665 if (flags & ~0xff)
9666 {
9667 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1),
9668 _("selected processor does not support "
9669 "requested special purpose register"));
9670 }
9671 else
9672 {
9673 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7m),
9674 _("selected processor does not support "
9675 "requested special purpose register"));
9676 flags |= PSR_f;
9677 }
9678 inst.instruction |= (flags & SPSR_BIT) >> 2;
9679 inst.instruction |= (flags & ~SPSR_BIT) >> 8;
9680 inst.instruction |= (flags & 0xff);
9681 inst.instruction |= inst.operands[1].reg << 16;
9682 }
9683
9684 static void
9685 do_t_mul (void)
9686 {
9687 if (!inst.operands[2].present)
9688 inst.operands[2].reg = inst.operands[0].reg;
9689
9690 /* There is no 32-bit MULS and no 16-bit MUL. */
9691 if (unified_syntax && inst.instruction == T_MNEM_mul)
9692 {
9693 inst.instruction = THUMB_OP32 (inst.instruction);
9694 inst.instruction |= inst.operands[0].reg << 8;
9695 inst.instruction |= inst.operands[1].reg << 16;
9696 inst.instruction |= inst.operands[2].reg << 0;
9697 }
9698 else
9699 {
9700 constraint (!unified_syntax
9701 && inst.instruction == T_MNEM_muls, BAD_THUMB32);
9702 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
9703 BAD_HIREG);
9704
9705 inst.instruction = THUMB_OP16 (inst.instruction);
9706 inst.instruction |= inst.operands[0].reg;
9707
9708 if (inst.operands[0].reg == inst.operands[1].reg)
9709 inst.instruction |= inst.operands[2].reg << 3;
9710 else if (inst.operands[0].reg == inst.operands[2].reg)
9711 inst.instruction |= inst.operands[1].reg << 3;
9712 else
9713 constraint (1, _("dest must overlap one source register"));
9714 }
9715 }
9716
9717 static void
9718 do_t_mull (void)
9719 {
9720 inst.instruction |= inst.operands[0].reg << 12;
9721 inst.instruction |= inst.operands[1].reg << 8;
9722 inst.instruction |= inst.operands[2].reg << 16;
9723 inst.instruction |= inst.operands[3].reg;
9724
9725 if (inst.operands[0].reg == inst.operands[1].reg)
9726 as_tsktsk (_("rdhi and rdlo must be different"));
9727 }
9728
9729 static void
9730 do_t_nop (void)
9731 {
9732 if (unified_syntax)
9733 {
9734 if (inst.size_req == 4 || inst.operands[0].imm > 15)
9735 {
9736 inst.instruction = THUMB_OP32 (inst.instruction);
9737 inst.instruction |= inst.operands[0].imm;
9738 }
9739 else
9740 {
9741 inst.instruction = THUMB_OP16 (inst.instruction);
9742 inst.instruction |= inst.operands[0].imm << 4;
9743 }
9744 }
9745 else
9746 {
9747 constraint (inst.operands[0].present,
9748 _("Thumb does not support NOP with hints"));
9749 inst.instruction = 0x46c0;
9750 }
9751 }
9752
9753 static void
9754 do_t_neg (void)
9755 {
9756 if (unified_syntax)
9757 {
9758 bfd_boolean narrow;
9759
9760 if (THUMB_SETS_FLAGS (inst.instruction))
9761 narrow = (current_it_mask == 0);
9762 else
9763 narrow = (current_it_mask != 0);
9764 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
9765 narrow = FALSE;
9766 if (inst.size_req == 4)
9767 narrow = FALSE;
9768
9769 if (!narrow)
9770 {
9771 inst.instruction = THUMB_OP32 (inst.instruction);
9772 inst.instruction |= inst.operands[0].reg << 8;
9773 inst.instruction |= inst.operands[1].reg << 16;
9774 }
9775 else
9776 {
9777 inst.instruction = THUMB_OP16 (inst.instruction);
9778 inst.instruction |= inst.operands[0].reg;
9779 inst.instruction |= inst.operands[1].reg << 3;
9780 }
9781 }
9782 else
9783 {
9784 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
9785 BAD_HIREG);
9786 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
9787
9788 inst.instruction = THUMB_OP16 (inst.instruction);
9789 inst.instruction |= inst.operands[0].reg;
9790 inst.instruction |= inst.operands[1].reg << 3;
9791 }
9792 }
9793
9794 static void
9795 do_t_pkhbt (void)
9796 {
9797 inst.instruction |= inst.operands[0].reg << 8;
9798 inst.instruction |= inst.operands[1].reg << 16;
9799 inst.instruction |= inst.operands[2].reg;
9800 if (inst.operands[3].present)
9801 {
9802 unsigned int val = inst.reloc.exp.X_add_number;
9803 constraint (inst.reloc.exp.X_op != O_constant,
9804 _("expression too complex"));
9805 inst.instruction |= (val & 0x1c) << 10;
9806 inst.instruction |= (val & 0x03) << 6;
9807 }
9808 }
9809
9810 static void
9811 do_t_pkhtb (void)
9812 {
9813 if (!inst.operands[3].present)
9814 inst.instruction &= ~0x00000020;
9815 do_t_pkhbt ();
9816 }
9817
9818 static void
9819 do_t_pld (void)
9820 {
9821 encode_thumb32_addr_mode (0, /*is_t=*/FALSE, /*is_d=*/FALSE);
9822 }
9823
9824 static void
9825 do_t_push_pop (void)
9826 {
9827 unsigned mask;
9828
9829 constraint (inst.operands[0].writeback,
9830 _("push/pop do not support {reglist}^"));
9831 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
9832 _("expression too complex"));
9833
9834 mask = inst.operands[0].imm;
9835 if ((mask & ~0xff) == 0)
9836 inst.instruction = THUMB_OP16 (inst.instruction);
9837 else if ((inst.instruction == T_MNEM_push
9838 && (mask & ~0xff) == 1 << REG_LR)
9839 || (inst.instruction == T_MNEM_pop
9840 && (mask & ~0xff) == 1 << REG_PC))
9841 {
9842 inst.instruction = THUMB_OP16 (inst.instruction);
9843 inst.instruction |= THUMB_PP_PC_LR;
9844 mask &= 0xff;
9845 }
9846 else if (unified_syntax)
9847 {
9848 if (mask & (1 << 13))
9849 inst.error = _("SP not allowed in register list");
9850 if (inst.instruction == T_MNEM_push)
9851 {
9852 if (mask & (1 << 15))
9853 inst.error = _("PC not allowed in register list");
9854 }
9855 else
9856 {
9857 if (mask & (1 << 14)
9858 && mask & (1 << 15))
9859 inst.error = _("LR and PC should not both be in register list");
9860 }
9861 if ((mask & (mask - 1)) == 0)
9862 {
9863 /* Single register push/pop implemented as str/ldr. */
9864 if (inst.instruction == T_MNEM_push)
9865 inst.instruction = 0xf84d0d04; /* str reg, [sp, #-4]! */
9866 else
9867 inst.instruction = 0xf85d0b04; /* ldr reg, [sp], #4 */
9868 mask = ffs(mask) - 1;
9869 mask <<= 12;
9870 }
9871 else
9872 inst.instruction = THUMB_OP32 (inst.instruction);
9873 }
9874 else
9875 {
9876 inst.error = _("invalid register list to push/pop instruction");
9877 return;
9878 }
9879
9880 inst.instruction |= mask;
9881 }
9882
9883 static void
9884 do_t_rbit (void)
9885 {
9886 inst.instruction |= inst.operands[0].reg << 8;
9887 inst.instruction |= inst.operands[1].reg << 16;
9888 }
9889
9890 static void
9891 do_t_rev (void)
9892 {
9893 if (inst.operands[0].reg <= 7 && inst.operands[1].reg <= 7
9894 && inst.size_req != 4)
9895 {
9896 inst.instruction = THUMB_OP16 (inst.instruction);
9897 inst.instruction |= inst.operands[0].reg;
9898 inst.instruction |= inst.operands[1].reg << 3;
9899 }
9900 else if (unified_syntax)
9901 {
9902 inst.instruction = THUMB_OP32 (inst.instruction);
9903 inst.instruction |= inst.operands[0].reg << 8;
9904 inst.instruction |= inst.operands[1].reg << 16;
9905 inst.instruction |= inst.operands[1].reg;
9906 }
9907 else
9908 inst.error = BAD_HIREG;
9909 }
9910
9911 static void
9912 do_t_rsb (void)
9913 {
9914 int Rd, Rs;
9915
9916 Rd = inst.operands[0].reg;
9917 Rs = (inst.operands[1].present
9918 ? inst.operands[1].reg /* Rd, Rs, foo */
9919 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
9920
9921 inst.instruction |= Rd << 8;
9922 inst.instruction |= Rs << 16;
9923 if (!inst.operands[2].isreg)
9924 {
9925 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
9926 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
9927 }
9928 else
9929 encode_thumb32_shifted_operand (2);
9930 }
9931
9932 static void
9933 do_t_setend (void)
9934 {
9935 constraint (current_it_mask, BAD_NOT_IT);
9936 if (inst.operands[0].imm)
9937 inst.instruction |= 0x8;
9938 }
9939
9940 static void
9941 do_t_shift (void)
9942 {
9943 if (!inst.operands[1].present)
9944 inst.operands[1].reg = inst.operands[0].reg;
9945
9946 if (unified_syntax)
9947 {
9948 bfd_boolean narrow;
9949 int shift_kind;
9950
9951 switch (inst.instruction)
9952 {
9953 case T_MNEM_asr:
9954 case T_MNEM_asrs: shift_kind = SHIFT_ASR; break;
9955 case T_MNEM_lsl:
9956 case T_MNEM_lsls: shift_kind = SHIFT_LSL; break;
9957 case T_MNEM_lsr:
9958 case T_MNEM_lsrs: shift_kind = SHIFT_LSR; break;
9959 case T_MNEM_ror:
9960 case T_MNEM_rors: shift_kind = SHIFT_ROR; break;
9961 default: abort ();
9962 }
9963
9964 if (THUMB_SETS_FLAGS (inst.instruction))
9965 narrow = (current_it_mask == 0);
9966 else
9967 narrow = (current_it_mask != 0);
9968 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
9969 narrow = FALSE;
9970 if (!inst.operands[2].isreg && shift_kind == SHIFT_ROR)
9971 narrow = FALSE;
9972 if (inst.operands[2].isreg
9973 && (inst.operands[1].reg != inst.operands[0].reg
9974 || inst.operands[2].reg > 7))
9975 narrow = FALSE;
9976 if (inst.size_req == 4)
9977 narrow = FALSE;
9978
9979 if (!narrow)
9980 {
9981 if (inst.operands[2].isreg)
9982 {
9983 inst.instruction = THUMB_OP32 (inst.instruction);
9984 inst.instruction |= inst.operands[0].reg << 8;
9985 inst.instruction |= inst.operands[1].reg << 16;
9986 inst.instruction |= inst.operands[2].reg;
9987 }
9988 else
9989 {
9990 inst.operands[1].shifted = 1;
9991 inst.operands[1].shift_kind = shift_kind;
9992 inst.instruction = THUMB_OP32 (THUMB_SETS_FLAGS (inst.instruction)
9993 ? T_MNEM_movs : T_MNEM_mov);
9994 inst.instruction |= inst.operands[0].reg << 8;
9995 encode_thumb32_shifted_operand (1);
9996 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
9997 inst.reloc.type = BFD_RELOC_UNUSED;
9998 }
9999 }
10000 else
10001 {
10002 if (inst.operands[2].isreg)
10003 {
10004 switch (shift_kind)
10005 {
10006 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_R; break;
10007 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_R; break;
10008 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_R; break;
10009 case SHIFT_ROR: inst.instruction = T_OPCODE_ROR_R; break;
10010 default: abort ();
10011 }
10012
10013 inst.instruction |= inst.operands[0].reg;
10014 inst.instruction |= inst.operands[2].reg << 3;
10015 }
10016 else
10017 {
10018 switch (shift_kind)
10019 {
10020 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
10021 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
10022 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
10023 default: abort ();
10024 }
10025 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
10026 inst.instruction |= inst.operands[0].reg;
10027 inst.instruction |= inst.operands[1].reg << 3;
10028 }
10029 }
10030 }
10031 else
10032 {
10033 constraint (inst.operands[0].reg > 7
10034 || inst.operands[1].reg > 7, BAD_HIREG);
10035 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
10036
10037 if (inst.operands[2].isreg) /* Rd, {Rs,} Rn */
10038 {
10039 constraint (inst.operands[2].reg > 7, BAD_HIREG);
10040 constraint (inst.operands[0].reg != inst.operands[1].reg,
10041 _("source1 and dest must be same register"));
10042
10043 switch (inst.instruction)
10044 {
10045 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_R; break;
10046 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_R; break;
10047 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_R; break;
10048 case T_MNEM_ror: inst.instruction = T_OPCODE_ROR_R; break;
10049 default: abort ();
10050 }
10051
10052 inst.instruction |= inst.operands[0].reg;
10053 inst.instruction |= inst.operands[2].reg << 3;
10054 }
10055 else
10056 {
10057 switch (inst.instruction)
10058 {
10059 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_I; break;
10060 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_I; break;
10061 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_I; break;
10062 case T_MNEM_ror: inst.error = _("ror #imm not supported"); return;
10063 default: abort ();
10064 }
10065 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
10066 inst.instruction |= inst.operands[0].reg;
10067 inst.instruction |= inst.operands[1].reg << 3;
10068 }
10069 }
10070 }
10071
10072 static void
10073 do_t_simd (void)
10074 {
10075 inst.instruction |= inst.operands[0].reg << 8;
10076 inst.instruction |= inst.operands[1].reg << 16;
10077 inst.instruction |= inst.operands[2].reg;
10078 }
10079
10080 static void
10081 do_t_smc (void)
10082 {
10083 unsigned int value = inst.reloc.exp.X_add_number;
10084 constraint (inst.reloc.exp.X_op != O_constant,
10085 _("expression too complex"));
10086 inst.reloc.type = BFD_RELOC_UNUSED;
10087 inst.instruction |= (value & 0xf000) >> 12;
10088 inst.instruction |= (value & 0x0ff0);
10089 inst.instruction |= (value & 0x000f) << 16;
10090 }
10091
10092 static void
10093 do_t_ssat (void)
10094 {
10095 inst.instruction |= inst.operands[0].reg << 8;
10096 inst.instruction |= inst.operands[1].imm - 1;
10097 inst.instruction |= inst.operands[2].reg << 16;
10098
10099 if (inst.operands[3].present)
10100 {
10101 constraint (inst.reloc.exp.X_op != O_constant,
10102 _("expression too complex"));
10103
10104 if (inst.reloc.exp.X_add_number != 0)
10105 {
10106 if (inst.operands[3].shift_kind == SHIFT_ASR)
10107 inst.instruction |= 0x00200000; /* sh bit */
10108 inst.instruction |= (inst.reloc.exp.X_add_number & 0x1c) << 10;
10109 inst.instruction |= (inst.reloc.exp.X_add_number & 0x03) << 6;
10110 }
10111 inst.reloc.type = BFD_RELOC_UNUSED;
10112 }
10113 }
10114
10115 static void
10116 do_t_ssat16 (void)
10117 {
10118 inst.instruction |= inst.operands[0].reg << 8;
10119 inst.instruction |= inst.operands[1].imm - 1;
10120 inst.instruction |= inst.operands[2].reg << 16;
10121 }
10122
10123 static void
10124 do_t_strex (void)
10125 {
10126 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
10127 || inst.operands[2].postind || inst.operands[2].writeback
10128 || inst.operands[2].immisreg || inst.operands[2].shifted
10129 || inst.operands[2].negative,
10130 BAD_ADDR_MODE);
10131
10132 inst.instruction |= inst.operands[0].reg << 8;
10133 inst.instruction |= inst.operands[1].reg << 12;
10134 inst.instruction |= inst.operands[2].reg << 16;
10135 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
10136 }
10137
10138 static void
10139 do_t_strexd (void)
10140 {
10141 if (!inst.operands[2].present)
10142 inst.operands[2].reg = inst.operands[1].reg + 1;
10143
10144 constraint (inst.operands[0].reg == inst.operands[1].reg
10145 || inst.operands[0].reg == inst.operands[2].reg
10146 || inst.operands[0].reg == inst.operands[3].reg
10147 || inst.operands[1].reg == inst.operands[2].reg,
10148 BAD_OVERLAP);
10149
10150 inst.instruction |= inst.operands[0].reg;
10151 inst.instruction |= inst.operands[1].reg << 12;
10152 inst.instruction |= inst.operands[2].reg << 8;
10153 inst.instruction |= inst.operands[3].reg << 16;
10154 }
10155
10156 static void
10157 do_t_sxtah (void)
10158 {
10159 inst.instruction |= inst.operands[0].reg << 8;
10160 inst.instruction |= inst.operands[1].reg << 16;
10161 inst.instruction |= inst.operands[2].reg;
10162 inst.instruction |= inst.operands[3].imm << 4;
10163 }
10164
10165 static void
10166 do_t_sxth (void)
10167 {
10168 if (inst.instruction <= 0xffff && inst.size_req != 4
10169 && inst.operands[0].reg <= 7 && inst.operands[1].reg <= 7
10170 && (!inst.operands[2].present || inst.operands[2].imm == 0))
10171 {
10172 inst.instruction = THUMB_OP16 (inst.instruction);
10173 inst.instruction |= inst.operands[0].reg;
10174 inst.instruction |= inst.operands[1].reg << 3;
10175 }
10176 else if (unified_syntax)
10177 {
10178 if (inst.instruction <= 0xffff)
10179 inst.instruction = THUMB_OP32 (inst.instruction);
10180 inst.instruction |= inst.operands[0].reg << 8;
10181 inst.instruction |= inst.operands[1].reg;
10182 inst.instruction |= inst.operands[2].imm << 4;
10183 }
10184 else
10185 {
10186 constraint (inst.operands[2].present && inst.operands[2].imm != 0,
10187 _("Thumb encoding does not support rotation"));
10188 constraint (1, BAD_HIREG);
10189 }
10190 }
10191
10192 static void
10193 do_t_swi (void)
10194 {
10195 inst.reloc.type = BFD_RELOC_ARM_SWI;
10196 }
10197
10198 static void
10199 do_t_tb (void)
10200 {
10201 int half;
10202
10203 half = (inst.instruction & 0x10) != 0;
10204 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
10205 constraint (inst.operands[0].immisreg,
10206 _("instruction requires register index"));
10207 constraint (inst.operands[0].imm == 15,
10208 _("PC is not a valid index register"));
10209 constraint (!half && inst.operands[0].shifted,
10210 _("instruction does not allow shifted index"));
10211 inst.instruction |= (inst.operands[0].reg << 16) | inst.operands[0].imm;
10212 }
10213
10214 static void
10215 do_t_usat (void)
10216 {
10217 inst.instruction |= inst.operands[0].reg << 8;
10218 inst.instruction |= inst.operands[1].imm;
10219 inst.instruction |= inst.operands[2].reg << 16;
10220
10221 if (inst.operands[3].present)
10222 {
10223 constraint (inst.reloc.exp.X_op != O_constant,
10224 _("expression too complex"));
10225 if (inst.reloc.exp.X_add_number != 0)
10226 {
10227 if (inst.operands[3].shift_kind == SHIFT_ASR)
10228 inst.instruction |= 0x00200000; /* sh bit */
10229
10230 inst.instruction |= (inst.reloc.exp.X_add_number & 0x1c) << 10;
10231 inst.instruction |= (inst.reloc.exp.X_add_number & 0x03) << 6;
10232 }
10233 inst.reloc.type = BFD_RELOC_UNUSED;
10234 }
10235 }
10236
10237 static void
10238 do_t_usat16 (void)
10239 {
10240 inst.instruction |= inst.operands[0].reg << 8;
10241 inst.instruction |= inst.operands[1].imm;
10242 inst.instruction |= inst.operands[2].reg << 16;
10243 }
10244
10245 /* Neon instruction encoder helpers. */
10246
10247 /* Encodings for the different types for various Neon opcodes. */
10248
10249 /* An "invalid" code for the following tables. */
10250 #define N_INV -1u
10251
10252 struct neon_tab_entry
10253 {
10254 unsigned integer;
10255 unsigned float_or_poly;
10256 unsigned scalar_or_imm;
10257 };
10258
10259 /* Map overloaded Neon opcodes to their respective encodings. */
10260 #define NEON_ENC_TAB \
10261 X(vabd, 0x0000700, 0x1200d00, N_INV), \
10262 X(vmax, 0x0000600, 0x0000f00, N_INV), \
10263 X(vmin, 0x0000610, 0x0200f00, N_INV), \
10264 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
10265 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
10266 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
10267 X(vadd, 0x0000800, 0x0000d00, N_INV), \
10268 X(vsub, 0x1000800, 0x0200d00, N_INV), \
10269 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
10270 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
10271 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
10272 /* Register variants of the following two instructions are encoded as
10273 vcge / vcgt with the operands reversed. */ \
10274 X(vclt, 0x0000300, 0x1200e00, 0x1b10200), \
10275 X(vcle, 0x0000310, 0x1000e00, 0x1b10180), \
10276 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
10277 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
10278 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
10279 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
10280 X(vmlal, 0x0800800, N_INV, 0x0800240), \
10281 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
10282 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
10283 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
10284 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
10285 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
10286 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
10287 X(vshl, 0x0000400, N_INV, 0x0800510), \
10288 X(vqshl, 0x0000410, N_INV, 0x0800710), \
10289 X(vand, 0x0000110, N_INV, 0x0800030), \
10290 X(vbic, 0x0100110, N_INV, 0x0800030), \
10291 X(veor, 0x1000110, N_INV, N_INV), \
10292 X(vorn, 0x0300110, N_INV, 0x0800010), \
10293 X(vorr, 0x0200110, N_INV, 0x0800010), \
10294 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
10295 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
10296 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
10297 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
10298 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
10299 X(vst1, 0x0000000, 0x0800000, N_INV), \
10300 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
10301 X(vst2, 0x0000100, 0x0800100, N_INV), \
10302 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
10303 X(vst3, 0x0000200, 0x0800200, N_INV), \
10304 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
10305 X(vst4, 0x0000300, 0x0800300, N_INV), \
10306 X(vmovn, 0x1b20200, N_INV, N_INV), \
10307 X(vtrn, 0x1b20080, N_INV, N_INV), \
10308 X(vqmovn, 0x1b20200, N_INV, N_INV), \
10309 X(vqmovun, 0x1b20240, N_INV, N_INV), \
10310 X(vnmul, 0xe200a40, 0xe200b40, N_INV), \
10311 X(vnmla, 0xe000a40, 0xe000b40, N_INV), \
10312 X(vnmls, 0xe100a40, 0xe100b40, N_INV), \
10313 X(vcmp, 0xeb40a40, 0xeb40b40, N_INV), \
10314 X(vcmpz, 0xeb50a40, 0xeb50b40, N_INV), \
10315 X(vcmpe, 0xeb40ac0, 0xeb40bc0, N_INV), \
10316 X(vcmpez, 0xeb50ac0, 0xeb50bc0, N_INV)
10317
10318 enum neon_opc
10319 {
10320 #define X(OPC,I,F,S) N_MNEM_##OPC
10321 NEON_ENC_TAB
10322 #undef X
10323 };
10324
10325 static const struct neon_tab_entry neon_enc_tab[] =
10326 {
10327 #define X(OPC,I,F,S) { (I), (F), (S) }
10328 NEON_ENC_TAB
10329 #undef X
10330 };
10331
10332 #define NEON_ENC_INTEGER(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
10333 #define NEON_ENC_ARMREG(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
10334 #define NEON_ENC_POLY(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
10335 #define NEON_ENC_FLOAT(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
10336 #define NEON_ENC_SCALAR(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
10337 #define NEON_ENC_IMMED(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
10338 #define NEON_ENC_INTERLV(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
10339 #define NEON_ENC_LANE(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
10340 #define NEON_ENC_DUP(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
10341 #define NEON_ENC_SINGLE(X) \
10342 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
10343 #define NEON_ENC_DOUBLE(X) \
10344 ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
10345
10346 /* Define shapes for instruction operands. The following mnemonic characters
10347 are used in this table:
10348
10349 F - VFP S<n> register
10350 D - Neon D<n> register
10351 Q - Neon Q<n> register
10352 I - Immediate
10353 S - Scalar
10354 R - ARM register
10355 L - D<n> register list
10356
10357 This table is used to generate various data:
10358 - enumerations of the form NS_DDR to be used as arguments to
10359 neon_select_shape.
10360 - a table classifying shapes into single, double, quad, mixed.
10361 - a table used to drive neon_select_shape.
10362 */
10363
10364 #define NEON_SHAPE_DEF \
10365 X(3, (D, D, D), DOUBLE), \
10366 X(3, (Q, Q, Q), QUAD), \
10367 X(3, (D, D, I), DOUBLE), \
10368 X(3, (Q, Q, I), QUAD), \
10369 X(3, (D, D, S), DOUBLE), \
10370 X(3, (Q, Q, S), QUAD), \
10371 X(2, (D, D), DOUBLE), \
10372 X(2, (Q, Q), QUAD), \
10373 X(2, (D, S), DOUBLE), \
10374 X(2, (Q, S), QUAD), \
10375 X(2, (D, R), DOUBLE), \
10376 X(2, (Q, R), QUAD), \
10377 X(2, (D, I), DOUBLE), \
10378 X(2, (Q, I), QUAD), \
10379 X(3, (D, L, D), DOUBLE), \
10380 X(2, (D, Q), MIXED), \
10381 X(2, (Q, D), MIXED), \
10382 X(3, (D, Q, I), MIXED), \
10383 X(3, (Q, D, I), MIXED), \
10384 X(3, (Q, D, D), MIXED), \
10385 X(3, (D, Q, Q), MIXED), \
10386 X(3, (Q, Q, D), MIXED), \
10387 X(3, (Q, D, S), MIXED), \
10388 X(3, (D, Q, S), MIXED), \
10389 X(4, (D, D, D, I), DOUBLE), \
10390 X(4, (Q, Q, Q, I), QUAD), \
10391 X(2, (F, F), SINGLE), \
10392 X(3, (F, F, F), SINGLE), \
10393 X(2, (F, I), SINGLE), \
10394 X(2, (F, D), MIXED), \
10395 X(2, (D, F), MIXED), \
10396 X(3, (F, F, I), MIXED), \
10397 X(4, (R, R, F, F), SINGLE), \
10398 X(4, (F, F, R, R), SINGLE), \
10399 X(3, (D, R, R), DOUBLE), \
10400 X(3, (R, R, D), DOUBLE), \
10401 X(2, (S, R), SINGLE), \
10402 X(2, (R, S), SINGLE), \
10403 X(2, (F, R), SINGLE), \
10404 X(2, (R, F), SINGLE)
10405
10406 #define S2(A,B) NS_##A##B
10407 #define S3(A,B,C) NS_##A##B##C
10408 #define S4(A,B,C,D) NS_##A##B##C##D
10409
10410 #define X(N, L, C) S##N L
10411
10412 enum neon_shape
10413 {
10414 NEON_SHAPE_DEF,
10415 NS_NULL
10416 };
10417
10418 #undef X
10419 #undef S2
10420 #undef S3
10421 #undef S4
10422
10423 enum neon_shape_class
10424 {
10425 SC_SINGLE,
10426 SC_DOUBLE,
10427 SC_QUAD,
10428 SC_MIXED
10429 };
10430
10431 #define X(N, L, C) SC_##C
10432
10433 static enum neon_shape_class neon_shape_class[] =
10434 {
10435 NEON_SHAPE_DEF
10436 };
10437
10438 #undef X
10439
10440 enum neon_shape_el
10441 {
10442 SE_F,
10443 SE_D,
10444 SE_Q,
10445 SE_I,
10446 SE_S,
10447 SE_R,
10448 SE_L
10449 };
10450
10451 /* Register widths of above. */
10452 static unsigned neon_shape_el_size[] =
10453 {
10454 32,
10455 64,
10456 128,
10457 0,
10458 32,
10459 32,
10460 0
10461 };
10462
10463 struct neon_shape_info
10464 {
10465 unsigned els;
10466 enum neon_shape_el el[NEON_MAX_TYPE_ELS];
10467 };
10468
10469 #define S2(A,B) { SE_##A, SE_##B }
10470 #define S3(A,B,C) { SE_##A, SE_##B, SE_##C }
10471 #define S4(A,B,C,D) { SE_##A, SE_##B, SE_##C, SE_##D }
10472
10473 #define X(N, L, C) { N, S##N L }
10474
10475 static struct neon_shape_info neon_shape_tab[] =
10476 {
10477 NEON_SHAPE_DEF
10478 };
10479
10480 #undef X
10481 #undef S2
10482 #undef S3
10483 #undef S4
10484
10485 /* Bit masks used in type checking given instructions.
10486 'N_EQK' means the type must be the same as (or based on in some way) the key
10487 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
10488 set, various other bits can be set as well in order to modify the meaning of
10489 the type constraint. */
10490
10491 enum neon_type_mask
10492 {
10493 N_S8 = 0x000001,
10494 N_S16 = 0x000002,
10495 N_S32 = 0x000004,
10496 N_S64 = 0x000008,
10497 N_U8 = 0x000010,
10498 N_U16 = 0x000020,
10499 N_U32 = 0x000040,
10500 N_U64 = 0x000080,
10501 N_I8 = 0x000100,
10502 N_I16 = 0x000200,
10503 N_I32 = 0x000400,
10504 N_I64 = 0x000800,
10505 N_8 = 0x001000,
10506 N_16 = 0x002000,
10507 N_32 = 0x004000,
10508 N_64 = 0x008000,
10509 N_P8 = 0x010000,
10510 N_P16 = 0x020000,
10511 N_F32 = 0x040000,
10512 N_F64 = 0x080000,
10513 N_KEY = 0x100000, /* key element (main type specifier). */
10514 N_EQK = 0x200000, /* given operand has the same type & size as the key. */
10515 N_VFP = 0x400000, /* VFP mode: operand size must match register width. */
10516 N_DBL = 0x000001, /* if N_EQK, this operand is twice the size. */
10517 N_HLF = 0x000002, /* if N_EQK, this operand is half the size. */
10518 N_SGN = 0x000004, /* if N_EQK, this operand is forced to be signed. */
10519 N_UNS = 0x000008, /* if N_EQK, this operand is forced to be unsigned. */
10520 N_INT = 0x000010, /* if N_EQK, this operand is forced to be integer. */
10521 N_FLT = 0x000020, /* if N_EQK, this operand is forced to be float. */
10522 N_SIZ = 0x000040, /* if N_EQK, this operand is forced to be size-only. */
10523 N_UTYP = 0,
10524 N_MAX_NONSPECIAL = N_F64
10525 };
10526
10527 #define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
10528
10529 #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
10530 #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
10531 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
10532 #define N_SUF_32 (N_SU_32 | N_F32)
10533 #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
10534 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F32)
10535
10536 /* Pass this as the first type argument to neon_check_type to ignore types
10537 altogether. */
10538 #define N_IGNORE_TYPE (N_KEY | N_EQK)
10539
10540 /* Select a "shape" for the current instruction (describing register types or
10541 sizes) from a list of alternatives. Return NS_NULL if the current instruction
10542 doesn't fit. For non-polymorphic shapes, checking is usually done as a
10543 function of operand parsing, so this function doesn't need to be called.
10544 Shapes should be listed in order of decreasing length. */
10545
10546 static enum neon_shape
10547 neon_select_shape (enum neon_shape shape, ...)
10548 {
10549 va_list ap;
10550 enum neon_shape first_shape = shape;
10551
10552 /* Fix missing optional operands. FIXME: we don't know at this point how
10553 many arguments we should have, so this makes the assumption that we have
10554 > 1. This is true of all current Neon opcodes, I think, but may not be
10555 true in the future. */
10556 if (!inst.operands[1].present)
10557 inst.operands[1] = inst.operands[0];
10558
10559 va_start (ap, shape);
10560
10561 for (; shape != NS_NULL; shape = va_arg (ap, int))
10562 {
10563 unsigned j;
10564 int matches = 1;
10565
10566 for (j = 0; j < neon_shape_tab[shape].els; j++)
10567 {
10568 if (!inst.operands[j].present)
10569 {
10570 matches = 0;
10571 break;
10572 }
10573
10574 switch (neon_shape_tab[shape].el[j])
10575 {
10576 case SE_F:
10577 if (!(inst.operands[j].isreg
10578 && inst.operands[j].isvec
10579 && inst.operands[j].issingle
10580 && !inst.operands[j].isquad))
10581 matches = 0;
10582 break;
10583
10584 case SE_D:
10585 if (!(inst.operands[j].isreg
10586 && inst.operands[j].isvec
10587 && !inst.operands[j].isquad
10588 && !inst.operands[j].issingle))
10589 matches = 0;
10590 break;
10591
10592 case SE_R:
10593 if (!(inst.operands[j].isreg
10594 && !inst.operands[j].isvec))
10595 matches = 0;
10596 break;
10597
10598 case SE_Q:
10599 if (!(inst.operands[j].isreg
10600 && inst.operands[j].isvec
10601 && inst.operands[j].isquad
10602 && !inst.operands[j].issingle))
10603 matches = 0;
10604 break;
10605
10606 case SE_I:
10607 if (!(!inst.operands[j].isreg
10608 && !inst.operands[j].isscalar))
10609 matches = 0;
10610 break;
10611
10612 case SE_S:
10613 if (!(!inst.operands[j].isreg
10614 && inst.operands[j].isscalar))
10615 matches = 0;
10616 break;
10617
10618 case SE_L:
10619 break;
10620 }
10621 }
10622 if (matches)
10623 break;
10624 }
10625
10626 va_end (ap);
10627
10628 if (shape == NS_NULL && first_shape != NS_NULL)
10629 first_error (_("invalid instruction shape"));
10630
10631 return shape;
10632 }
10633
10634 /* True if SHAPE is predominantly a quadword operation (most of the time, this
10635 means the Q bit should be set). */
10636
10637 static int
10638 neon_quad (enum neon_shape shape)
10639 {
10640 return neon_shape_class[shape] == SC_QUAD;
10641 }
10642
10643 static void
10644 neon_modify_type_size (unsigned typebits, enum neon_el_type *g_type,
10645 unsigned *g_size)
10646 {
10647 /* Allow modification to be made to types which are constrained to be
10648 based on the key element, based on bits set alongside N_EQK. */
10649 if ((typebits & N_EQK) != 0)
10650 {
10651 if ((typebits & N_HLF) != 0)
10652 *g_size /= 2;
10653 else if ((typebits & N_DBL) != 0)
10654 *g_size *= 2;
10655 if ((typebits & N_SGN) != 0)
10656 *g_type = NT_signed;
10657 else if ((typebits & N_UNS) != 0)
10658 *g_type = NT_unsigned;
10659 else if ((typebits & N_INT) != 0)
10660 *g_type = NT_integer;
10661 else if ((typebits & N_FLT) != 0)
10662 *g_type = NT_float;
10663 else if ((typebits & N_SIZ) != 0)
10664 *g_type = NT_untyped;
10665 }
10666 }
10667
10668 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
10669 operand type, i.e. the single type specified in a Neon instruction when it
10670 is the only one given. */
10671
10672 static struct neon_type_el
10673 neon_type_promote (struct neon_type_el *key, unsigned thisarg)
10674 {
10675 struct neon_type_el dest = *key;
10676
10677 assert ((thisarg & N_EQK) != 0);
10678
10679 neon_modify_type_size (thisarg, &dest.type, &dest.size);
10680
10681 return dest;
10682 }
10683
10684 /* Convert Neon type and size into compact bitmask representation. */
10685
10686 static enum neon_type_mask
10687 type_chk_of_el_type (enum neon_el_type type, unsigned size)
10688 {
10689 switch (type)
10690 {
10691 case NT_untyped:
10692 switch (size)
10693 {
10694 case 8: return N_8;
10695 case 16: return N_16;
10696 case 32: return N_32;
10697 case 64: return N_64;
10698 default: ;
10699 }
10700 break;
10701
10702 case NT_integer:
10703 switch (size)
10704 {
10705 case 8: return N_I8;
10706 case 16: return N_I16;
10707 case 32: return N_I32;
10708 case 64: return N_I64;
10709 default: ;
10710 }
10711 break;
10712
10713 case NT_float:
10714 switch (size)
10715 {
10716 case 32: return N_F32;
10717 case 64: return N_F64;
10718 default: ;
10719 }
10720 break;
10721
10722 case NT_poly:
10723 switch (size)
10724 {
10725 case 8: return N_P8;
10726 case 16: return N_P16;
10727 default: ;
10728 }
10729 break;
10730
10731 case NT_signed:
10732 switch (size)
10733 {
10734 case 8: return N_S8;
10735 case 16: return N_S16;
10736 case 32: return N_S32;
10737 case 64: return N_S64;
10738 default: ;
10739 }
10740 break;
10741
10742 case NT_unsigned:
10743 switch (size)
10744 {
10745 case 8: return N_U8;
10746 case 16: return N_U16;
10747 case 32: return N_U32;
10748 case 64: return N_U64;
10749 default: ;
10750 }
10751 break;
10752
10753 default: ;
10754 }
10755
10756 return N_UTYP;
10757 }
10758
10759 /* Convert compact Neon bitmask type representation to a type and size. Only
10760 handles the case where a single bit is set in the mask. */
10761
10762 static int
10763 el_type_of_type_chk (enum neon_el_type *type, unsigned *size,
10764 enum neon_type_mask mask)
10765 {
10766 if ((mask & N_EQK) != 0)
10767 return FAIL;
10768
10769 if ((mask & (N_S8 | N_U8 | N_I8 | N_8 | N_P8)) != 0)
10770 *size = 8;
10771 else if ((mask & (N_S16 | N_U16 | N_I16 | N_16 | N_P16)) != 0)
10772 *size = 16;
10773 else if ((mask & (N_S32 | N_U32 | N_I32 | N_32 | N_F32)) != 0)
10774 *size = 32;
10775 else if ((mask & (N_S64 | N_U64 | N_I64 | N_64 | N_F64)) != 0)
10776 *size = 64;
10777 else
10778 return FAIL;
10779
10780 if ((mask & (N_S8 | N_S16 | N_S32 | N_S64)) != 0)
10781 *type = NT_signed;
10782 else if ((mask & (N_U8 | N_U16 | N_U32 | N_U64)) != 0)
10783 *type = NT_unsigned;
10784 else if ((mask & (N_I8 | N_I16 | N_I32 | N_I64)) != 0)
10785 *type = NT_integer;
10786 else if ((mask & (N_8 | N_16 | N_32 | N_64)) != 0)
10787 *type = NT_untyped;
10788 else if ((mask & (N_P8 | N_P16)) != 0)
10789 *type = NT_poly;
10790 else if ((mask & (N_F32 | N_F64)) != 0)
10791 *type = NT_float;
10792 else
10793 return FAIL;
10794
10795 return SUCCESS;
10796 }
10797
10798 /* Modify a bitmask of allowed types. This is only needed for type
10799 relaxation. */
10800
10801 static unsigned
10802 modify_types_allowed (unsigned allowed, unsigned mods)
10803 {
10804 unsigned size;
10805 enum neon_el_type type;
10806 unsigned destmask;
10807 int i;
10808
10809 destmask = 0;
10810
10811 for (i = 1; i <= N_MAX_NONSPECIAL; i <<= 1)
10812 {
10813 if (el_type_of_type_chk (&type, &size, allowed & i) == SUCCESS)
10814 {
10815 neon_modify_type_size (mods, &type, &size);
10816 destmask |= type_chk_of_el_type (type, size);
10817 }
10818 }
10819
10820 return destmask;
10821 }
10822
10823 /* Check type and return type classification.
10824 The manual states (paraphrase): If one datatype is given, it indicates the
10825 type given in:
10826 - the second operand, if there is one
10827 - the operand, if there is no second operand
10828 - the result, if there are no operands.
10829 This isn't quite good enough though, so we use a concept of a "key" datatype
10830 which is set on a per-instruction basis, which is the one which matters when
10831 only one data type is written.
10832 Note: this function has side-effects (e.g. filling in missing operands). All
10833 Neon instructions should call it before performing bit encoding. */
10834
10835 static struct neon_type_el
10836 neon_check_type (unsigned els, enum neon_shape ns, ...)
10837 {
10838 va_list ap;
10839 unsigned i, pass, key_el = 0;
10840 unsigned types[NEON_MAX_TYPE_ELS];
10841 enum neon_el_type k_type = NT_invtype;
10842 unsigned k_size = -1u;
10843 struct neon_type_el badtype = {NT_invtype, -1};
10844 unsigned key_allowed = 0;
10845
10846 /* Optional registers in Neon instructions are always (not) in operand 1.
10847 Fill in the missing operand here, if it was omitted. */
10848 if (els > 1 && !inst.operands[1].present)
10849 inst.operands[1] = inst.operands[0];
10850
10851 /* Suck up all the varargs. */
10852 va_start (ap, ns);
10853 for (i = 0; i < els; i++)
10854 {
10855 unsigned thisarg = va_arg (ap, unsigned);
10856 if (thisarg == N_IGNORE_TYPE)
10857 {
10858 va_end (ap);
10859 return badtype;
10860 }
10861 types[i] = thisarg;
10862 if ((thisarg & N_KEY) != 0)
10863 key_el = i;
10864 }
10865 va_end (ap);
10866
10867 if (inst.vectype.elems > 0)
10868 for (i = 0; i < els; i++)
10869 if (inst.operands[i].vectype.type != NT_invtype)
10870 {
10871 first_error (_("types specified in both the mnemonic and operands"));
10872 return badtype;
10873 }
10874
10875 /* Duplicate inst.vectype elements here as necessary.
10876 FIXME: No idea if this is exactly the same as the ARM assembler,
10877 particularly when an insn takes one register and one non-register
10878 operand. */
10879 if (inst.vectype.elems == 1 && els > 1)
10880 {
10881 unsigned j;
10882 inst.vectype.elems = els;
10883 inst.vectype.el[key_el] = inst.vectype.el[0];
10884 for (j = 0; j < els; j++)
10885 if (j != key_el)
10886 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
10887 types[j]);
10888 }
10889 else if (inst.vectype.elems == 0 && els > 0)
10890 {
10891 unsigned j;
10892 /* No types were given after the mnemonic, so look for types specified
10893 after each operand. We allow some flexibility here; as long as the
10894 "key" operand has a type, we can infer the others. */
10895 for (j = 0; j < els; j++)
10896 if (inst.operands[j].vectype.type != NT_invtype)
10897 inst.vectype.el[j] = inst.operands[j].vectype;
10898
10899 if (inst.operands[key_el].vectype.type != NT_invtype)
10900 {
10901 for (j = 0; j < els; j++)
10902 if (inst.operands[j].vectype.type == NT_invtype)
10903 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
10904 types[j]);
10905 }
10906 else
10907 {
10908 first_error (_("operand types can't be inferred"));
10909 return badtype;
10910 }
10911 }
10912 else if (inst.vectype.elems != els)
10913 {
10914 first_error (_("type specifier has the wrong number of parts"));
10915 return badtype;
10916 }
10917
10918 for (pass = 0; pass < 2; pass++)
10919 {
10920 for (i = 0; i < els; i++)
10921 {
10922 unsigned thisarg = types[i];
10923 unsigned types_allowed = ((thisarg & N_EQK) != 0 && pass != 0)
10924 ? modify_types_allowed (key_allowed, thisarg) : thisarg;
10925 enum neon_el_type g_type = inst.vectype.el[i].type;
10926 unsigned g_size = inst.vectype.el[i].size;
10927
10928 /* Decay more-specific signed & unsigned types to sign-insensitive
10929 integer types if sign-specific variants are unavailable. */
10930 if ((g_type == NT_signed || g_type == NT_unsigned)
10931 && (types_allowed & N_SU_ALL) == 0)
10932 g_type = NT_integer;
10933
10934 /* If only untyped args are allowed, decay any more specific types to
10935 them. Some instructions only care about signs for some element
10936 sizes, so handle that properly. */
10937 if ((g_size == 8 && (types_allowed & N_8) != 0)
10938 || (g_size == 16 && (types_allowed & N_16) != 0)
10939 || (g_size == 32 && (types_allowed & N_32) != 0)
10940 || (g_size == 64 && (types_allowed & N_64) != 0))
10941 g_type = NT_untyped;
10942
10943 if (pass == 0)
10944 {
10945 if ((thisarg & N_KEY) != 0)
10946 {
10947 k_type = g_type;
10948 k_size = g_size;
10949 key_allowed = thisarg & ~N_KEY;
10950 }
10951 }
10952 else
10953 {
10954 if ((thisarg & N_VFP) != 0)
10955 {
10956 enum neon_shape_el regshape = neon_shape_tab[ns].el[i];
10957 unsigned regwidth = neon_shape_el_size[regshape], match;
10958
10959 /* In VFP mode, operands must match register widths. If we
10960 have a key operand, use its width, else use the width of
10961 the current operand. */
10962 if (k_size != -1u)
10963 match = k_size;
10964 else
10965 match = g_size;
10966
10967 if (regwidth != match)
10968 {
10969 first_error (_("operand size must match register width"));
10970 return badtype;
10971 }
10972 }
10973
10974 if ((thisarg & N_EQK) == 0)
10975 {
10976 unsigned given_type = type_chk_of_el_type (g_type, g_size);
10977
10978 if ((given_type & types_allowed) == 0)
10979 {
10980 first_error (_("bad type in Neon instruction"));
10981 return badtype;
10982 }
10983 }
10984 else
10985 {
10986 enum neon_el_type mod_k_type = k_type;
10987 unsigned mod_k_size = k_size;
10988 neon_modify_type_size (thisarg, &mod_k_type, &mod_k_size);
10989 if (g_type != mod_k_type || g_size != mod_k_size)
10990 {
10991 first_error (_("inconsistent types in Neon instruction"));
10992 return badtype;
10993 }
10994 }
10995 }
10996 }
10997 }
10998
10999 return inst.vectype.el[key_el];
11000 }
11001
11002 /* Neon-style VFP instruction forwarding. */
11003
11004 /* Thumb VFP instructions have 0xE in the condition field. */
11005
11006 static void
11007 do_vfp_cond_or_thumb (void)
11008 {
11009 if (thumb_mode)
11010 inst.instruction |= 0xe0000000;
11011 else
11012 inst.instruction |= inst.cond << 28;
11013 }
11014
11015 /* Look up and encode a simple mnemonic, for use as a helper function for the
11016 Neon-style VFP syntax. This avoids duplication of bits of the insns table,
11017 etc. It is assumed that operand parsing has already been done, and that the
11018 operands are in the form expected by the given opcode (this isn't necessarily
11019 the same as the form in which they were parsed, hence some massaging must
11020 take place before this function is called).
11021 Checks current arch version against that in the looked-up opcode. */
11022
11023 static void
11024 do_vfp_nsyn_opcode (const char *opname)
11025 {
11026 const struct asm_opcode *opcode;
11027
11028 opcode = hash_find (arm_ops_hsh, opname);
11029
11030 if (!opcode)
11031 abort ();
11032
11033 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant,
11034 thumb_mode ? *opcode->tvariant : *opcode->avariant),
11035 _(BAD_FPU));
11036
11037 if (thumb_mode)
11038 {
11039 inst.instruction = opcode->tvalue;
11040 opcode->tencode ();
11041 }
11042 else
11043 {
11044 inst.instruction = (inst.cond << 28) | opcode->avalue;
11045 opcode->aencode ();
11046 }
11047 }
11048
11049 static void
11050 do_vfp_nsyn_add_sub (enum neon_shape rs)
11051 {
11052 int is_add = (inst.instruction & 0x0fffffff) == N_MNEM_vadd;
11053
11054 if (rs == NS_FFF)
11055 {
11056 if (is_add)
11057 do_vfp_nsyn_opcode ("fadds");
11058 else
11059 do_vfp_nsyn_opcode ("fsubs");
11060 }
11061 else
11062 {
11063 if (is_add)
11064 do_vfp_nsyn_opcode ("faddd");
11065 else
11066 do_vfp_nsyn_opcode ("fsubd");
11067 }
11068 }
11069
11070 /* Check operand types to see if this is a VFP instruction, and if so call
11071 PFN (). */
11072
11073 static int
11074 try_vfp_nsyn (int args, void (*pfn) (enum neon_shape))
11075 {
11076 enum neon_shape rs;
11077 struct neon_type_el et;
11078
11079 switch (args)
11080 {
11081 case 2:
11082 rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
11083 et = neon_check_type (2, rs,
11084 N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
11085 break;
11086
11087 case 3:
11088 rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
11089 et = neon_check_type (3, rs,
11090 N_EQK | N_VFP, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
11091 break;
11092
11093 default:
11094 abort ();
11095 }
11096
11097 if (et.type != NT_invtype)
11098 {
11099 pfn (rs);
11100 return SUCCESS;
11101 }
11102 else
11103 inst.error = NULL;
11104
11105 return FAIL;
11106 }
11107
11108 static void
11109 do_vfp_nsyn_mla_mls (enum neon_shape rs)
11110 {
11111 int is_mla = (inst.instruction & 0x0fffffff) == N_MNEM_vmla;
11112
11113 if (rs == NS_FFF)
11114 {
11115 if (is_mla)
11116 do_vfp_nsyn_opcode ("fmacs");
11117 else
11118 do_vfp_nsyn_opcode ("fmscs");
11119 }
11120 else
11121 {
11122 if (is_mla)
11123 do_vfp_nsyn_opcode ("fmacd");
11124 else
11125 do_vfp_nsyn_opcode ("fmscd");
11126 }
11127 }
11128
11129 static void
11130 do_vfp_nsyn_mul (enum neon_shape rs)
11131 {
11132 if (rs == NS_FFF)
11133 do_vfp_nsyn_opcode ("fmuls");
11134 else
11135 do_vfp_nsyn_opcode ("fmuld");
11136 }
11137
11138 static void
11139 do_vfp_nsyn_abs_neg (enum neon_shape rs)
11140 {
11141 int is_neg = (inst.instruction & 0x80) != 0;
11142 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_VFP | N_KEY);
11143
11144 if (rs == NS_FF)
11145 {
11146 if (is_neg)
11147 do_vfp_nsyn_opcode ("fnegs");
11148 else
11149 do_vfp_nsyn_opcode ("fabss");
11150 }
11151 else
11152 {
11153 if (is_neg)
11154 do_vfp_nsyn_opcode ("fnegd");
11155 else
11156 do_vfp_nsyn_opcode ("fabsd");
11157 }
11158 }
11159
11160 /* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
11161 insns belong to Neon, and are handled elsewhere. */
11162
11163 static void
11164 do_vfp_nsyn_ldm_stm (int is_dbmode)
11165 {
11166 int is_ldm = (inst.instruction & (1 << 20)) != 0;
11167 if (is_ldm)
11168 {
11169 if (is_dbmode)
11170 do_vfp_nsyn_opcode ("fldmdbs");
11171 else
11172 do_vfp_nsyn_opcode ("fldmias");
11173 }
11174 else
11175 {
11176 if (is_dbmode)
11177 do_vfp_nsyn_opcode ("fstmdbs");
11178 else
11179 do_vfp_nsyn_opcode ("fstmias");
11180 }
11181 }
11182
11183 static void
11184 do_vfp_nsyn_sqrt (void)
11185 {
11186 enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
11187 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
11188
11189 if (rs == NS_FF)
11190 do_vfp_nsyn_opcode ("fsqrts");
11191 else
11192 do_vfp_nsyn_opcode ("fsqrtd");
11193 }
11194
11195 static void
11196 do_vfp_nsyn_div (void)
11197 {
11198 enum neon_shape rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
11199 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
11200 N_F32 | N_F64 | N_KEY | N_VFP);
11201
11202 if (rs == NS_FFF)
11203 do_vfp_nsyn_opcode ("fdivs");
11204 else
11205 do_vfp_nsyn_opcode ("fdivd");
11206 }
11207
11208 static void
11209 do_vfp_nsyn_nmul (void)
11210 {
11211 enum neon_shape rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
11212 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
11213 N_F32 | N_F64 | N_KEY | N_VFP);
11214
11215 if (rs == NS_FFF)
11216 {
11217 inst.instruction = NEON_ENC_SINGLE (inst.instruction);
11218 do_vfp_sp_dyadic ();
11219 }
11220 else
11221 {
11222 inst.instruction = NEON_ENC_DOUBLE (inst.instruction);
11223 do_vfp_dp_rd_rn_rm ();
11224 }
11225 do_vfp_cond_or_thumb ();
11226 }
11227
11228 static void
11229 do_vfp_nsyn_cmp (void)
11230 {
11231 if (inst.operands[1].isreg)
11232 {
11233 enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
11234 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
11235
11236 if (rs == NS_FF)
11237 {
11238 inst.instruction = NEON_ENC_SINGLE (inst.instruction);
11239 do_vfp_sp_monadic ();
11240 }
11241 else
11242 {
11243 inst.instruction = NEON_ENC_DOUBLE (inst.instruction);
11244 do_vfp_dp_rd_rm ();
11245 }
11246 }
11247 else
11248 {
11249 enum neon_shape rs = neon_select_shape (NS_FI, NS_DI, NS_NULL);
11250 neon_check_type (2, rs, N_F32 | N_F64 | N_KEY | N_VFP, N_EQK);
11251
11252 switch (inst.instruction & 0x0fffffff)
11253 {
11254 case N_MNEM_vcmp:
11255 inst.instruction += N_MNEM_vcmpz - N_MNEM_vcmp;
11256 break;
11257 case N_MNEM_vcmpe:
11258 inst.instruction += N_MNEM_vcmpez - N_MNEM_vcmpe;
11259 break;
11260 default:
11261 abort ();
11262 }
11263
11264 if (rs == NS_FI)
11265 {
11266 inst.instruction = NEON_ENC_SINGLE (inst.instruction);
11267 do_vfp_sp_compare_z ();
11268 }
11269 else
11270 {
11271 inst.instruction = NEON_ENC_DOUBLE (inst.instruction);
11272 do_vfp_dp_rd ();
11273 }
11274 }
11275 do_vfp_cond_or_thumb ();
11276 }
11277
11278 static void
11279 nsyn_insert_sp (void)
11280 {
11281 inst.operands[1] = inst.operands[0];
11282 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
11283 inst.operands[0].reg = 13;
11284 inst.operands[0].isreg = 1;
11285 inst.operands[0].writeback = 1;
11286 inst.operands[0].present = 1;
11287 }
11288
11289 static void
11290 do_vfp_nsyn_push (void)
11291 {
11292 nsyn_insert_sp ();
11293 if (inst.operands[1].issingle)
11294 do_vfp_nsyn_opcode ("fstmdbs");
11295 else
11296 do_vfp_nsyn_opcode ("fstmdbd");
11297 }
11298
11299 static void
11300 do_vfp_nsyn_pop (void)
11301 {
11302 nsyn_insert_sp ();
11303 if (inst.operands[1].issingle)
11304 do_vfp_nsyn_opcode ("fldmias");
11305 else
11306 do_vfp_nsyn_opcode ("fldmiad");
11307 }
11308
11309 /* Fix up Neon data-processing instructions, ORing in the correct bits for
11310 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
11311
11312 static unsigned
11313 neon_dp_fixup (unsigned i)
11314 {
11315 if (thumb_mode)
11316 {
11317 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
11318 if (i & (1 << 24))
11319 i |= 1 << 28;
11320
11321 i &= ~(1 << 24);
11322
11323 i |= 0xef000000;
11324 }
11325 else
11326 i |= 0xf2000000;
11327
11328 return i;
11329 }
11330
11331 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
11332 (0, 1, 2, 3). */
11333
11334 static unsigned
11335 neon_logbits (unsigned x)
11336 {
11337 return ffs (x) - 4;
11338 }
11339
11340 #define LOW4(R) ((R) & 0xf)
11341 #define HI1(R) (((R) >> 4) & 1)
11342
11343 /* Encode insns with bit pattern:
11344
11345 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
11346 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
11347
11348 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
11349 different meaning for some instruction. */
11350
11351 static void
11352 neon_three_same (int isquad, int ubit, int size)
11353 {
11354 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11355 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11356 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
11357 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
11358 inst.instruction |= LOW4 (inst.operands[2].reg);
11359 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
11360 inst.instruction |= (isquad != 0) << 6;
11361 inst.instruction |= (ubit != 0) << 24;
11362 if (size != -1)
11363 inst.instruction |= neon_logbits (size) << 20;
11364
11365 inst.instruction = neon_dp_fixup (inst.instruction);
11366 }
11367
11368 /* Encode instructions of the form:
11369
11370 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
11371 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
11372
11373 Don't write size if SIZE == -1. */
11374
11375 static void
11376 neon_two_same (int qbit, int ubit, int size)
11377 {
11378 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11379 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11380 inst.instruction |= LOW4 (inst.operands[1].reg);
11381 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
11382 inst.instruction |= (qbit != 0) << 6;
11383 inst.instruction |= (ubit != 0) << 24;
11384
11385 if (size != -1)
11386 inst.instruction |= neon_logbits (size) << 18;
11387
11388 inst.instruction = neon_dp_fixup (inst.instruction);
11389 }
11390
11391 /* Neon instruction encoders, in approximate order of appearance. */
11392
11393 static void
11394 do_neon_dyadic_i_su (void)
11395 {
11396 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
11397 struct neon_type_el et = neon_check_type (3, rs,
11398 N_EQK, N_EQK, N_SU_32 | N_KEY);
11399 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
11400 }
11401
11402 static void
11403 do_neon_dyadic_i64_su (void)
11404 {
11405 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
11406 struct neon_type_el et = neon_check_type (3, rs,
11407 N_EQK, N_EQK, N_SU_ALL | N_KEY);
11408 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
11409 }
11410
11411 static void
11412 neon_imm_shift (int write_ubit, int uval, int isquad, struct neon_type_el et,
11413 unsigned immbits)
11414 {
11415 unsigned size = et.size >> 3;
11416 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11417 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11418 inst.instruction |= LOW4 (inst.operands[1].reg);
11419 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
11420 inst.instruction |= (isquad != 0) << 6;
11421 inst.instruction |= immbits << 16;
11422 inst.instruction |= (size >> 3) << 7;
11423 inst.instruction |= (size & 0x7) << 19;
11424 if (write_ubit)
11425 inst.instruction |= (uval != 0) << 24;
11426
11427 inst.instruction = neon_dp_fixup (inst.instruction);
11428 }
11429
11430 static void
11431 do_neon_shl_imm (void)
11432 {
11433 if (!inst.operands[2].isreg)
11434 {
11435 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
11436 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_KEY | N_I_ALL);
11437 inst.instruction = NEON_ENC_IMMED (inst.instruction);
11438 neon_imm_shift (FALSE, 0, neon_quad (rs), et, inst.operands[2].imm);
11439 }
11440 else
11441 {
11442 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
11443 struct neon_type_el et = neon_check_type (3, rs,
11444 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
11445 unsigned int tmp;
11446
11447 /* VSHL/VQSHL 3-register variants have syntax such as:
11448 vshl.xx Dd, Dm, Dn
11449 whereas other 3-register operations encoded by neon_three_same have
11450 syntax like:
11451 vadd.xx Dd, Dn, Dm
11452 (i.e. with Dn & Dm reversed). Swap operands[1].reg and operands[2].reg
11453 here. */
11454 tmp = inst.operands[2].reg;
11455 inst.operands[2].reg = inst.operands[1].reg;
11456 inst.operands[1].reg = tmp;
11457 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
11458 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
11459 }
11460 }
11461
11462 static void
11463 do_neon_qshl_imm (void)
11464 {
11465 if (!inst.operands[2].isreg)
11466 {
11467 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
11468 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
11469
11470 inst.instruction = NEON_ENC_IMMED (inst.instruction);
11471 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et,
11472 inst.operands[2].imm);
11473 }
11474 else
11475 {
11476 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
11477 struct neon_type_el et = neon_check_type (3, rs,
11478 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
11479 unsigned int tmp;
11480
11481 /* See note in do_neon_shl_imm. */
11482 tmp = inst.operands[2].reg;
11483 inst.operands[2].reg = inst.operands[1].reg;
11484 inst.operands[1].reg = tmp;
11485 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
11486 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
11487 }
11488 }
11489
11490 static void
11491 do_neon_rshl (void)
11492 {
11493 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
11494 struct neon_type_el et = neon_check_type (3, rs,
11495 N_EQK, N_EQK, N_SU_ALL | N_KEY);
11496 unsigned int tmp;
11497
11498 tmp = inst.operands[2].reg;
11499 inst.operands[2].reg = inst.operands[1].reg;
11500 inst.operands[1].reg = tmp;
11501 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
11502 }
11503
11504 static int
11505 neon_cmode_for_logic_imm (unsigned immediate, unsigned *immbits, int size)
11506 {
11507 /* Handle .I8 pseudo-instructions. */
11508 if (size == 8)
11509 {
11510 /* Unfortunately, this will make everything apart from zero out-of-range.
11511 FIXME is this the intended semantics? There doesn't seem much point in
11512 accepting .I8 if so. */
11513 immediate |= immediate << 8;
11514 size = 16;
11515 }
11516
11517 if (size >= 32)
11518 {
11519 if (immediate == (immediate & 0x000000ff))
11520 {
11521 *immbits = immediate;
11522 return 0x1;
11523 }
11524 else if (immediate == (immediate & 0x0000ff00))
11525 {
11526 *immbits = immediate >> 8;
11527 return 0x3;
11528 }
11529 else if (immediate == (immediate & 0x00ff0000))
11530 {
11531 *immbits = immediate >> 16;
11532 return 0x5;
11533 }
11534 else if (immediate == (immediate & 0xff000000))
11535 {
11536 *immbits = immediate >> 24;
11537 return 0x7;
11538 }
11539 if ((immediate & 0xffff) != (immediate >> 16))
11540 goto bad_immediate;
11541 immediate &= 0xffff;
11542 }
11543
11544 if (immediate == (immediate & 0x000000ff))
11545 {
11546 *immbits = immediate;
11547 return 0x9;
11548 }
11549 else if (immediate == (immediate & 0x0000ff00))
11550 {
11551 *immbits = immediate >> 8;
11552 return 0xb;
11553 }
11554
11555 bad_immediate:
11556 first_error (_("immediate value out of range"));
11557 return FAIL;
11558 }
11559
11560 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
11561 A, B, C, D. */
11562
11563 static int
11564 neon_bits_same_in_bytes (unsigned imm)
11565 {
11566 return ((imm & 0x000000ff) == 0 || (imm & 0x000000ff) == 0x000000ff)
11567 && ((imm & 0x0000ff00) == 0 || (imm & 0x0000ff00) == 0x0000ff00)
11568 && ((imm & 0x00ff0000) == 0 || (imm & 0x00ff0000) == 0x00ff0000)
11569 && ((imm & 0xff000000) == 0 || (imm & 0xff000000) == 0xff000000);
11570 }
11571
11572 /* For immediate of above form, return 0bABCD. */
11573
11574 static unsigned
11575 neon_squash_bits (unsigned imm)
11576 {
11577 return (imm & 0x01) | ((imm & 0x0100) >> 7) | ((imm & 0x010000) >> 14)
11578 | ((imm & 0x01000000) >> 21);
11579 }
11580
11581 /* Compress quarter-float representation to 0b...000 abcdefgh. */
11582
11583 static unsigned
11584 neon_qfloat_bits (unsigned imm)
11585 {
11586 return ((imm >> 19) & 0x7f) | ((imm >> 24) & 0x80);
11587 }
11588
11589 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
11590 the instruction. *OP is passed as the initial value of the op field, and
11591 may be set to a different value depending on the constant (i.e.
11592 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
11593 MVN). If the immediate looks like a repeated parttern then also
11594 try smaller element sizes. */
11595
11596 static int
11597 neon_cmode_for_move_imm (unsigned immlo, unsigned immhi, int float_p,
11598 unsigned *immbits, int *op, int size,
11599 enum neon_el_type type)
11600 {
11601 /* Only permit float immediates (including 0.0/-0.0) if the operand type is
11602 float. */
11603 if (type == NT_float && !float_p)
11604 return FAIL;
11605
11606 if (type == NT_float && is_quarter_float (immlo) && immhi == 0)
11607 {
11608 if (size != 32 || *op == 1)
11609 return FAIL;
11610 *immbits = neon_qfloat_bits (immlo);
11611 return 0xf;
11612 }
11613
11614 if (size == 64)
11615 {
11616 if (neon_bits_same_in_bytes (immhi)
11617 && neon_bits_same_in_bytes (immlo))
11618 {
11619 if (*op == 1)
11620 return FAIL;
11621 *immbits = (neon_squash_bits (immhi) << 4)
11622 | neon_squash_bits (immlo);
11623 *op = 1;
11624 return 0xe;
11625 }
11626
11627 if (immhi != immlo)
11628 return FAIL;
11629 }
11630
11631 if (size >= 32)
11632 {
11633 if (immlo == (immlo & 0x000000ff))
11634 {
11635 *immbits = immlo;
11636 return 0x0;
11637 }
11638 else if (immlo == (immlo & 0x0000ff00))
11639 {
11640 *immbits = immlo >> 8;
11641 return 0x2;
11642 }
11643 else if (immlo == (immlo & 0x00ff0000))
11644 {
11645 *immbits = immlo >> 16;
11646 return 0x4;
11647 }
11648 else if (immlo == (immlo & 0xff000000))
11649 {
11650 *immbits = immlo >> 24;
11651 return 0x6;
11652 }
11653 else if (immlo == ((immlo & 0x0000ff00) | 0x000000ff))
11654 {
11655 *immbits = (immlo >> 8) & 0xff;
11656 return 0xc;
11657 }
11658 else if (immlo == ((immlo & 0x00ff0000) | 0x0000ffff))
11659 {
11660 *immbits = (immlo >> 16) & 0xff;
11661 return 0xd;
11662 }
11663
11664 if ((immlo & 0xffff) != (immlo >> 16))
11665 return FAIL;
11666 immlo &= 0xffff;
11667 }
11668
11669 if (size >= 16)
11670 {
11671 if (immlo == (immlo & 0x000000ff))
11672 {
11673 *immbits = immlo;
11674 return 0x8;
11675 }
11676 else if (immlo == (immlo & 0x0000ff00))
11677 {
11678 *immbits = immlo >> 8;
11679 return 0xa;
11680 }
11681
11682 if ((immlo & 0xff) != (immlo >> 8))
11683 return FAIL;
11684 immlo &= 0xff;
11685 }
11686
11687 if (immlo == (immlo & 0x000000ff))
11688 {
11689 /* Don't allow MVN with 8-bit immediate. */
11690 if (*op == 1)
11691 return FAIL;
11692 *immbits = immlo;
11693 return 0xe;
11694 }
11695
11696 return FAIL;
11697 }
11698
11699 /* Write immediate bits [7:0] to the following locations:
11700
11701 |28/24|23 19|18 16|15 4|3 0|
11702 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
11703
11704 This function is used by VMOV/VMVN/VORR/VBIC. */
11705
11706 static void
11707 neon_write_immbits (unsigned immbits)
11708 {
11709 inst.instruction |= immbits & 0xf;
11710 inst.instruction |= ((immbits >> 4) & 0x7) << 16;
11711 inst.instruction |= ((immbits >> 7) & 0x1) << 24;
11712 }
11713
11714 /* Invert low-order SIZE bits of XHI:XLO. */
11715
11716 static void
11717 neon_invert_size (unsigned *xlo, unsigned *xhi, int size)
11718 {
11719 unsigned immlo = xlo ? *xlo : 0;
11720 unsigned immhi = xhi ? *xhi : 0;
11721
11722 switch (size)
11723 {
11724 case 8:
11725 immlo = (~immlo) & 0xff;
11726 break;
11727
11728 case 16:
11729 immlo = (~immlo) & 0xffff;
11730 break;
11731
11732 case 64:
11733 immhi = (~immhi) & 0xffffffff;
11734 /* fall through. */
11735
11736 case 32:
11737 immlo = (~immlo) & 0xffffffff;
11738 break;
11739
11740 default:
11741 abort ();
11742 }
11743
11744 if (xlo)
11745 *xlo = immlo;
11746
11747 if (xhi)
11748 *xhi = immhi;
11749 }
11750
11751 static void
11752 do_neon_logic (void)
11753 {
11754 if (inst.operands[2].present && inst.operands[2].isreg)
11755 {
11756 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
11757 neon_check_type (3, rs, N_IGNORE_TYPE);
11758 /* U bit and size field were set as part of the bitmask. */
11759 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
11760 neon_three_same (neon_quad (rs), 0, -1);
11761 }
11762 else
11763 {
11764 enum neon_shape rs = neon_select_shape (NS_DI, NS_QI, NS_NULL);
11765 struct neon_type_el et = neon_check_type (2, rs,
11766 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
11767 enum neon_opc opcode = inst.instruction & 0x0fffffff;
11768 unsigned immbits;
11769 int cmode;
11770
11771 if (et.type == NT_invtype)
11772 return;
11773
11774 inst.instruction = NEON_ENC_IMMED (inst.instruction);
11775
11776 immbits = inst.operands[1].imm;
11777 if (et.size == 64)
11778 {
11779 /* .i64 is a pseudo-op, so the immediate must be a repeating
11780 pattern. */
11781 if (immbits != (inst.operands[1].regisimm ?
11782 inst.operands[1].reg : 0))
11783 {
11784 /* Set immbits to an invalid constant. */
11785 immbits = 0xdeadbeef;
11786 }
11787 }
11788
11789 switch (opcode)
11790 {
11791 case N_MNEM_vbic:
11792 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
11793 break;
11794
11795 case N_MNEM_vorr:
11796 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
11797 break;
11798
11799 case N_MNEM_vand:
11800 /* Pseudo-instruction for VBIC. */
11801 neon_invert_size (&immbits, 0, et.size);
11802 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
11803 break;
11804
11805 case N_MNEM_vorn:
11806 /* Pseudo-instruction for VORR. */
11807 neon_invert_size (&immbits, 0, et.size);
11808 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
11809 break;
11810
11811 default:
11812 abort ();
11813 }
11814
11815 if (cmode == FAIL)
11816 return;
11817
11818 inst.instruction |= neon_quad (rs) << 6;
11819 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11820 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11821 inst.instruction |= cmode << 8;
11822 neon_write_immbits (immbits);
11823
11824 inst.instruction = neon_dp_fixup (inst.instruction);
11825 }
11826 }
11827
11828 static void
11829 do_neon_bitfield (void)
11830 {
11831 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
11832 neon_check_type (3, rs, N_IGNORE_TYPE);
11833 neon_three_same (neon_quad (rs), 0, -1);
11834 }
11835
11836 static void
11837 neon_dyadic_misc (enum neon_el_type ubit_meaning, unsigned types,
11838 unsigned destbits)
11839 {
11840 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
11841 struct neon_type_el et = neon_check_type (3, rs, N_EQK | destbits, N_EQK,
11842 types | N_KEY);
11843 if (et.type == NT_float)
11844 {
11845 inst.instruction = NEON_ENC_FLOAT (inst.instruction);
11846 neon_three_same (neon_quad (rs), 0, -1);
11847 }
11848 else
11849 {
11850 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
11851 neon_three_same (neon_quad (rs), et.type == ubit_meaning, et.size);
11852 }
11853 }
11854
11855 static void
11856 do_neon_dyadic_if_su (void)
11857 {
11858 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
11859 }
11860
11861 static void
11862 do_neon_dyadic_if_su_d (void)
11863 {
11864 /* This version only allow D registers, but that constraint is enforced during
11865 operand parsing so we don't need to do anything extra here. */
11866 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
11867 }
11868
11869 static void
11870 do_neon_dyadic_if_i_d (void)
11871 {
11872 /* The "untyped" case can't happen. Do this to stop the "U" bit being
11873 affected if we specify unsigned args. */
11874 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
11875 }
11876
11877 enum vfp_or_neon_is_neon_bits
11878 {
11879 NEON_CHECK_CC = 1,
11880 NEON_CHECK_ARCH = 2
11881 };
11882
11883 /* Call this function if an instruction which may have belonged to the VFP or
11884 Neon instruction sets, but turned out to be a Neon instruction (due to the
11885 operand types involved, etc.). We have to check and/or fix-up a couple of
11886 things:
11887
11888 - Make sure the user hasn't attempted to make a Neon instruction
11889 conditional.
11890 - Alter the value in the condition code field if necessary.
11891 - Make sure that the arch supports Neon instructions.
11892
11893 Which of these operations take place depends on bits from enum
11894 vfp_or_neon_is_neon_bits.
11895
11896 WARNING: This function has side effects! If NEON_CHECK_CC is used and the
11897 current instruction's condition is COND_ALWAYS, the condition field is
11898 changed to inst.uncond_value. This is necessary because instructions shared
11899 between VFP and Neon may be conditional for the VFP variants only, and the
11900 unconditional Neon version must have, e.g., 0xF in the condition field. */
11901
11902 static int
11903 vfp_or_neon_is_neon (unsigned check)
11904 {
11905 /* Conditions are always legal in Thumb mode (IT blocks). */
11906 if (!thumb_mode && (check & NEON_CHECK_CC))
11907 {
11908 if (inst.cond != COND_ALWAYS)
11909 {
11910 first_error (_(BAD_COND));
11911 return FAIL;
11912 }
11913 if (inst.uncond_value != -1)
11914 inst.instruction |= inst.uncond_value << 28;
11915 }
11916
11917 if ((check & NEON_CHECK_ARCH)
11918 && !ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1))
11919 {
11920 first_error (_(BAD_FPU));
11921 return FAIL;
11922 }
11923
11924 return SUCCESS;
11925 }
11926
11927 static void
11928 do_neon_addsub_if_i (void)
11929 {
11930 if (try_vfp_nsyn (3, do_vfp_nsyn_add_sub) == SUCCESS)
11931 return;
11932
11933 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
11934 return;
11935
11936 /* The "untyped" case can't happen. Do this to stop the "U" bit being
11937 affected if we specify unsigned args. */
11938 neon_dyadic_misc (NT_untyped, N_IF_32 | N_I64, 0);
11939 }
11940
11941 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
11942 result to be:
11943 V<op> A,B (A is operand 0, B is operand 2)
11944 to mean:
11945 V<op> A,B,A
11946 not:
11947 V<op> A,B,B
11948 so handle that case specially. */
11949
11950 static void
11951 neon_exchange_operands (void)
11952 {
11953 void *scratch = alloca (sizeof (inst.operands[0]));
11954 if (inst.operands[1].present)
11955 {
11956 /* Swap operands[1] and operands[2]. */
11957 memcpy (scratch, &inst.operands[1], sizeof (inst.operands[0]));
11958 inst.operands[1] = inst.operands[2];
11959 memcpy (&inst.operands[2], scratch, sizeof (inst.operands[0]));
11960 }
11961 else
11962 {
11963 inst.operands[1] = inst.operands[2];
11964 inst.operands[2] = inst.operands[0];
11965 }
11966 }
11967
11968 static void
11969 neon_compare (unsigned regtypes, unsigned immtypes, int invert)
11970 {
11971 if (inst.operands[2].isreg)
11972 {
11973 if (invert)
11974 neon_exchange_operands ();
11975 neon_dyadic_misc (NT_unsigned, regtypes, N_SIZ);
11976 }
11977 else
11978 {
11979 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
11980 struct neon_type_el et = neon_check_type (2, rs,
11981 N_EQK | N_SIZ, immtypes | N_KEY);
11982
11983 inst.instruction = NEON_ENC_IMMED (inst.instruction);
11984 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11985 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11986 inst.instruction |= LOW4 (inst.operands[1].reg);
11987 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
11988 inst.instruction |= neon_quad (rs) << 6;
11989 inst.instruction |= (et.type == NT_float) << 10;
11990 inst.instruction |= neon_logbits (et.size) << 18;
11991
11992 inst.instruction = neon_dp_fixup (inst.instruction);
11993 }
11994 }
11995
11996 static void
11997 do_neon_cmp (void)
11998 {
11999 neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, FALSE);
12000 }
12001
12002 static void
12003 do_neon_cmp_inv (void)
12004 {
12005 neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, TRUE);
12006 }
12007
12008 static void
12009 do_neon_ceq (void)
12010 {
12011 neon_compare (N_IF_32, N_IF_32, FALSE);
12012 }
12013
12014 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
12015 scalars, which are encoded in 5 bits, M : Rm.
12016 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
12017 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
12018 index in M. */
12019
12020 static unsigned
12021 neon_scalar_for_mul (unsigned scalar, unsigned elsize)
12022 {
12023 unsigned regno = NEON_SCALAR_REG (scalar);
12024 unsigned elno = NEON_SCALAR_INDEX (scalar);
12025
12026 switch (elsize)
12027 {
12028 case 16:
12029 if (regno > 7 || elno > 3)
12030 goto bad_scalar;
12031 return regno | (elno << 3);
12032
12033 case 32:
12034 if (regno > 15 || elno > 1)
12035 goto bad_scalar;
12036 return regno | (elno << 4);
12037
12038 default:
12039 bad_scalar:
12040 first_error (_("scalar out of range for multiply instruction"));
12041 }
12042
12043 return 0;
12044 }
12045
12046 /* Encode multiply / multiply-accumulate scalar instructions. */
12047
12048 static void
12049 neon_mul_mac (struct neon_type_el et, int ubit)
12050 {
12051 unsigned scalar;
12052
12053 /* Give a more helpful error message if we have an invalid type. */
12054 if (et.type == NT_invtype)
12055 return;
12056
12057 scalar = neon_scalar_for_mul (inst.operands[2].reg, et.size);
12058 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12059 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12060 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
12061 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
12062 inst.instruction |= LOW4 (scalar);
12063 inst.instruction |= HI1 (scalar) << 5;
12064 inst.instruction |= (et.type == NT_float) << 8;
12065 inst.instruction |= neon_logbits (et.size) << 20;
12066 inst.instruction |= (ubit != 0) << 24;
12067
12068 inst.instruction = neon_dp_fixup (inst.instruction);
12069 }
12070
12071 static void
12072 do_neon_mac_maybe_scalar (void)
12073 {
12074 if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls) == SUCCESS)
12075 return;
12076
12077 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
12078 return;
12079
12080 if (inst.operands[2].isscalar)
12081 {
12082 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
12083 struct neon_type_el et = neon_check_type (3, rs,
12084 N_EQK, N_EQK, N_I16 | N_I32 | N_F32 | N_KEY);
12085 inst.instruction = NEON_ENC_SCALAR (inst.instruction);
12086 neon_mul_mac (et, neon_quad (rs));
12087 }
12088 else
12089 {
12090 /* The "untyped" case can't happen. Do this to stop the "U" bit being
12091 affected if we specify unsigned args. */
12092 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
12093 }
12094 }
12095
12096 static void
12097 do_neon_tst (void)
12098 {
12099 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
12100 struct neon_type_el et = neon_check_type (3, rs,
12101 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_KEY);
12102 neon_three_same (neon_quad (rs), 0, et.size);
12103 }
12104
12105 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
12106 same types as the MAC equivalents. The polynomial type for this instruction
12107 is encoded the same as the integer type. */
12108
12109 static void
12110 do_neon_mul (void)
12111 {
12112 if (try_vfp_nsyn (3, do_vfp_nsyn_mul) == SUCCESS)
12113 return;
12114
12115 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
12116 return;
12117
12118 if (inst.operands[2].isscalar)
12119 do_neon_mac_maybe_scalar ();
12120 else
12121 neon_dyadic_misc (NT_poly, N_I8 | N_I16 | N_I32 | N_F32 | N_P8, 0);
12122 }
12123
12124 static void
12125 do_neon_qdmulh (void)
12126 {
12127 if (inst.operands[2].isscalar)
12128 {
12129 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
12130 struct neon_type_el et = neon_check_type (3, rs,
12131 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
12132 inst.instruction = NEON_ENC_SCALAR (inst.instruction);
12133 neon_mul_mac (et, neon_quad (rs));
12134 }
12135 else
12136 {
12137 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
12138 struct neon_type_el et = neon_check_type (3, rs,
12139 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
12140 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12141 /* The U bit (rounding) comes from bit mask. */
12142 neon_three_same (neon_quad (rs), 0, et.size);
12143 }
12144 }
12145
12146 static void
12147 do_neon_fcmp_absolute (void)
12148 {
12149 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
12150 neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY);
12151 /* Size field comes from bit mask. */
12152 neon_three_same (neon_quad (rs), 1, -1);
12153 }
12154
12155 static void
12156 do_neon_fcmp_absolute_inv (void)
12157 {
12158 neon_exchange_operands ();
12159 do_neon_fcmp_absolute ();
12160 }
12161
12162 static void
12163 do_neon_step (void)
12164 {
12165 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
12166 neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY);
12167 neon_three_same (neon_quad (rs), 0, -1);
12168 }
12169
12170 static void
12171 do_neon_abs_neg (void)
12172 {
12173 enum neon_shape rs;
12174 struct neon_type_el et;
12175
12176 if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg) == SUCCESS)
12177 return;
12178
12179 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
12180 return;
12181
12182 rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
12183 et = neon_check_type (2, rs, N_EQK, N_S8 | N_S16 | N_S32 | N_F32 | N_KEY);
12184
12185 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12186 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12187 inst.instruction |= LOW4 (inst.operands[1].reg);
12188 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
12189 inst.instruction |= neon_quad (rs) << 6;
12190 inst.instruction |= (et.type == NT_float) << 10;
12191 inst.instruction |= neon_logbits (et.size) << 18;
12192
12193 inst.instruction = neon_dp_fixup (inst.instruction);
12194 }
12195
12196 static void
12197 do_neon_sli (void)
12198 {
12199 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
12200 struct neon_type_el et = neon_check_type (2, rs,
12201 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
12202 int imm = inst.operands[2].imm;
12203 constraint (imm < 0 || (unsigned)imm >= et.size,
12204 _("immediate out of range for insert"));
12205 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
12206 }
12207
12208 static void
12209 do_neon_sri (void)
12210 {
12211 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
12212 struct neon_type_el et = neon_check_type (2, rs,
12213 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
12214 int imm = inst.operands[2].imm;
12215 constraint (imm < 1 || (unsigned)imm > et.size,
12216 _("immediate out of range for insert"));
12217 neon_imm_shift (FALSE, 0, neon_quad (rs), et, et.size - imm);
12218 }
12219
12220 static void
12221 do_neon_qshlu_imm (void)
12222 {
12223 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
12224 struct neon_type_el et = neon_check_type (2, rs,
12225 N_EQK | N_UNS, N_S8 | N_S16 | N_S32 | N_S64 | N_KEY);
12226 int imm = inst.operands[2].imm;
12227 constraint (imm < 0 || (unsigned)imm >= et.size,
12228 _("immediate out of range for shift"));
12229 /* Only encodes the 'U present' variant of the instruction.
12230 In this case, signed types have OP (bit 8) set to 0.
12231 Unsigned types have OP set to 1. */
12232 inst.instruction |= (et.type == NT_unsigned) << 8;
12233 /* The rest of the bits are the same as other immediate shifts. */
12234 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
12235 }
12236
12237 static void
12238 do_neon_qmovn (void)
12239 {
12240 struct neon_type_el et = neon_check_type (2, NS_DQ,
12241 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
12242 /* Saturating move where operands can be signed or unsigned, and the
12243 destination has the same signedness. */
12244 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12245 if (et.type == NT_unsigned)
12246 inst.instruction |= 0xc0;
12247 else
12248 inst.instruction |= 0x80;
12249 neon_two_same (0, 1, et.size / 2);
12250 }
12251
12252 static void
12253 do_neon_qmovun (void)
12254 {
12255 struct neon_type_el et = neon_check_type (2, NS_DQ,
12256 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
12257 /* Saturating move with unsigned results. Operands must be signed. */
12258 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12259 neon_two_same (0, 1, et.size / 2);
12260 }
12261
12262 static void
12263 do_neon_rshift_sat_narrow (void)
12264 {
12265 /* FIXME: Types for narrowing. If operands are signed, results can be signed
12266 or unsigned. If operands are unsigned, results must also be unsigned. */
12267 struct neon_type_el et = neon_check_type (2, NS_DQI,
12268 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
12269 int imm = inst.operands[2].imm;
12270 /* This gets the bounds check, size encoding and immediate bits calculation
12271 right. */
12272 et.size /= 2;
12273
12274 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
12275 VQMOVN.I<size> <Dd>, <Qm>. */
12276 if (imm == 0)
12277 {
12278 inst.operands[2].present = 0;
12279 inst.instruction = N_MNEM_vqmovn;
12280 do_neon_qmovn ();
12281 return;
12282 }
12283
12284 constraint (imm < 1 || (unsigned)imm > et.size,
12285 _("immediate out of range"));
12286 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, et.size - imm);
12287 }
12288
12289 static void
12290 do_neon_rshift_sat_narrow_u (void)
12291 {
12292 /* FIXME: Types for narrowing. If operands are signed, results can be signed
12293 or unsigned. If operands are unsigned, results must also be unsigned. */
12294 struct neon_type_el et = neon_check_type (2, NS_DQI,
12295 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
12296 int imm = inst.operands[2].imm;
12297 /* This gets the bounds check, size encoding and immediate bits calculation
12298 right. */
12299 et.size /= 2;
12300
12301 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
12302 VQMOVUN.I<size> <Dd>, <Qm>. */
12303 if (imm == 0)
12304 {
12305 inst.operands[2].present = 0;
12306 inst.instruction = N_MNEM_vqmovun;
12307 do_neon_qmovun ();
12308 return;
12309 }
12310
12311 constraint (imm < 1 || (unsigned)imm > et.size,
12312 _("immediate out of range"));
12313 /* FIXME: The manual is kind of unclear about what value U should have in
12314 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
12315 must be 1. */
12316 neon_imm_shift (TRUE, 1, 0, et, et.size - imm);
12317 }
12318
12319 static void
12320 do_neon_movn (void)
12321 {
12322 struct neon_type_el et = neon_check_type (2, NS_DQ,
12323 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
12324 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12325 neon_two_same (0, 1, et.size / 2);
12326 }
12327
12328 static void
12329 do_neon_rshift_narrow (void)
12330 {
12331 struct neon_type_el et = neon_check_type (2, NS_DQI,
12332 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
12333 int imm = inst.operands[2].imm;
12334 /* This gets the bounds check, size encoding and immediate bits calculation
12335 right. */
12336 et.size /= 2;
12337
12338 /* If immediate is zero then we are a pseudo-instruction for
12339 VMOVN.I<size> <Dd>, <Qm> */
12340 if (imm == 0)
12341 {
12342 inst.operands[2].present = 0;
12343 inst.instruction = N_MNEM_vmovn;
12344 do_neon_movn ();
12345 return;
12346 }
12347
12348 constraint (imm < 1 || (unsigned)imm > et.size,
12349 _("immediate out of range for narrowing operation"));
12350 neon_imm_shift (FALSE, 0, 0, et, et.size - imm);
12351 }
12352
12353 static void
12354 do_neon_shll (void)
12355 {
12356 /* FIXME: Type checking when lengthening. */
12357 struct neon_type_el et = neon_check_type (2, NS_QDI,
12358 N_EQK | N_DBL, N_I8 | N_I16 | N_I32 | N_KEY);
12359 unsigned imm = inst.operands[2].imm;
12360
12361 if (imm == et.size)
12362 {
12363 /* Maximum shift variant. */
12364 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12365 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12366 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12367 inst.instruction |= LOW4 (inst.operands[1].reg);
12368 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
12369 inst.instruction |= neon_logbits (et.size) << 18;
12370
12371 inst.instruction = neon_dp_fixup (inst.instruction);
12372 }
12373 else
12374 {
12375 /* A more-specific type check for non-max versions. */
12376 et = neon_check_type (2, NS_QDI,
12377 N_EQK | N_DBL, N_SU_32 | N_KEY);
12378 inst.instruction = NEON_ENC_IMMED (inst.instruction);
12379 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, imm);
12380 }
12381 }
12382
12383 /* Check the various types for the VCVT instruction, and return which version
12384 the current instruction is. */
12385
12386 static int
12387 neon_cvt_flavour (enum neon_shape rs)
12388 {
12389 #define CVT_VAR(C,X,Y) \
12390 et = neon_check_type (2, rs, whole_reg | (X), whole_reg | (Y)); \
12391 if (et.type != NT_invtype) \
12392 { \
12393 inst.error = NULL; \
12394 return (C); \
12395 }
12396 struct neon_type_el et;
12397 unsigned whole_reg = (rs == NS_FFI || rs == NS_FD || rs == NS_DF
12398 || rs == NS_FF) ? N_VFP : 0;
12399 /* The instruction versions which take an immediate take one register
12400 argument, which is extended to the width of the full register. Thus the
12401 "source" and "destination" registers must have the same width. Hack that
12402 here by making the size equal to the key (wider, in this case) operand. */
12403 unsigned key = (rs == NS_QQI || rs == NS_DDI || rs == NS_FFI) ? N_KEY : 0;
12404
12405 CVT_VAR (0, N_S32, N_F32);
12406 CVT_VAR (1, N_U32, N_F32);
12407 CVT_VAR (2, N_F32, N_S32);
12408 CVT_VAR (3, N_F32, N_U32);
12409
12410 whole_reg = N_VFP;
12411
12412 /* VFP instructions. */
12413 CVT_VAR (4, N_F32, N_F64);
12414 CVT_VAR (5, N_F64, N_F32);
12415 CVT_VAR (6, N_S32, N_F64 | key);
12416 CVT_VAR (7, N_U32, N_F64 | key);
12417 CVT_VAR (8, N_F64 | key, N_S32);
12418 CVT_VAR (9, N_F64 | key, N_U32);
12419 /* VFP instructions with bitshift. */
12420 CVT_VAR (10, N_F32 | key, N_S16);
12421 CVT_VAR (11, N_F32 | key, N_U16);
12422 CVT_VAR (12, N_F64 | key, N_S16);
12423 CVT_VAR (13, N_F64 | key, N_U16);
12424 CVT_VAR (14, N_S16, N_F32 | key);
12425 CVT_VAR (15, N_U16, N_F32 | key);
12426 CVT_VAR (16, N_S16, N_F64 | key);
12427 CVT_VAR (17, N_U16, N_F64 | key);
12428
12429 return -1;
12430 #undef CVT_VAR
12431 }
12432
12433 /* Neon-syntax VFP conversions. */
12434
12435 static void
12436 do_vfp_nsyn_cvt (enum neon_shape rs, int flavour)
12437 {
12438 const char *opname = 0;
12439
12440 if (rs == NS_DDI || rs == NS_QQI || rs == NS_FFI)
12441 {
12442 /* Conversions with immediate bitshift. */
12443 const char *enc[] =
12444 {
12445 "ftosls",
12446 "ftouls",
12447 "fsltos",
12448 "fultos",
12449 NULL,
12450 NULL,
12451 "ftosld",
12452 "ftould",
12453 "fsltod",
12454 "fultod",
12455 "fshtos",
12456 "fuhtos",
12457 "fshtod",
12458 "fuhtod",
12459 "ftoshs",
12460 "ftouhs",
12461 "ftoshd",
12462 "ftouhd"
12463 };
12464
12465 if (flavour >= 0 && flavour < (int) ARRAY_SIZE (enc))
12466 {
12467 opname = enc[flavour];
12468 constraint (inst.operands[0].reg != inst.operands[1].reg,
12469 _("operands 0 and 1 must be the same register"));
12470 inst.operands[1] = inst.operands[2];
12471 memset (&inst.operands[2], '\0', sizeof (inst.operands[2]));
12472 }
12473 }
12474 else
12475 {
12476 /* Conversions without bitshift. */
12477 const char *enc[] =
12478 {
12479 "ftosis",
12480 "ftouis",
12481 "fsitos",
12482 "fuitos",
12483 "fcvtsd",
12484 "fcvtds",
12485 "ftosid",
12486 "ftouid",
12487 "fsitod",
12488 "fuitod"
12489 };
12490
12491 if (flavour >= 0 && flavour < (int) ARRAY_SIZE (enc))
12492 opname = enc[flavour];
12493 }
12494
12495 if (opname)
12496 do_vfp_nsyn_opcode (opname);
12497 }
12498
12499 static void
12500 do_vfp_nsyn_cvtz (void)
12501 {
12502 enum neon_shape rs = neon_select_shape (NS_FF, NS_FD, NS_NULL);
12503 int flavour = neon_cvt_flavour (rs);
12504 const char *enc[] =
12505 {
12506 "ftosizs",
12507 "ftouizs",
12508 NULL,
12509 NULL,
12510 NULL,
12511 NULL,
12512 "ftosizd",
12513 "ftouizd"
12514 };
12515
12516 if (flavour >= 0 && flavour < (int) ARRAY_SIZE (enc) && enc[flavour])
12517 do_vfp_nsyn_opcode (enc[flavour]);
12518 }
12519
12520 static void
12521 do_neon_cvt (void)
12522 {
12523 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_FFI, NS_DD, NS_QQ,
12524 NS_FD, NS_DF, NS_FF, NS_NULL);
12525 int flavour = neon_cvt_flavour (rs);
12526
12527 /* VFP rather than Neon conversions. */
12528 if (flavour >= 4)
12529 {
12530 do_vfp_nsyn_cvt (rs, flavour);
12531 return;
12532 }
12533
12534 switch (rs)
12535 {
12536 case NS_DDI:
12537 case NS_QQI:
12538 {
12539 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
12540 return;
12541
12542 /* Fixed-point conversion with #0 immediate is encoded as an
12543 integer conversion. */
12544 if (inst.operands[2].present && inst.operands[2].imm == 0)
12545 goto int_encode;
12546 unsigned immbits = 32 - inst.operands[2].imm;
12547 unsigned enctab[] = { 0x0000100, 0x1000100, 0x0, 0x1000000 };
12548 inst.instruction = NEON_ENC_IMMED (inst.instruction);
12549 if (flavour != -1)
12550 inst.instruction |= enctab[flavour];
12551 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12552 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12553 inst.instruction |= LOW4 (inst.operands[1].reg);
12554 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
12555 inst.instruction |= neon_quad (rs) << 6;
12556 inst.instruction |= 1 << 21;
12557 inst.instruction |= immbits << 16;
12558
12559 inst.instruction = neon_dp_fixup (inst.instruction);
12560 }
12561 break;
12562
12563 case NS_DD:
12564 case NS_QQ:
12565 int_encode:
12566 {
12567 unsigned enctab[] = { 0x100, 0x180, 0x0, 0x080 };
12568
12569 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12570
12571 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
12572 return;
12573
12574 if (flavour != -1)
12575 inst.instruction |= enctab[flavour];
12576
12577 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12578 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12579 inst.instruction |= LOW4 (inst.operands[1].reg);
12580 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
12581 inst.instruction |= neon_quad (rs) << 6;
12582 inst.instruction |= 2 << 18;
12583
12584 inst.instruction = neon_dp_fixup (inst.instruction);
12585 }
12586 break;
12587
12588 default:
12589 /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32). */
12590 do_vfp_nsyn_cvt (rs, flavour);
12591 }
12592 }
12593
12594 static void
12595 neon_move_immediate (void)
12596 {
12597 enum neon_shape rs = neon_select_shape (NS_DI, NS_QI, NS_NULL);
12598 struct neon_type_el et = neon_check_type (2, rs,
12599 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
12600 unsigned immlo, immhi = 0, immbits;
12601 int op, cmode, float_p;
12602
12603 constraint (et.type == NT_invtype,
12604 _("operand size must be specified for immediate VMOV"));
12605
12606 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
12607 op = (inst.instruction & (1 << 5)) != 0;
12608
12609 immlo = inst.operands[1].imm;
12610 if (inst.operands[1].regisimm)
12611 immhi = inst.operands[1].reg;
12612
12613 constraint (et.size < 32 && (immlo & ~((1 << et.size) - 1)) != 0,
12614 _("immediate has bits set outside the operand size"));
12615
12616 float_p = inst.operands[1].immisfloat;
12617
12618 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits, &op,
12619 et.size, et.type)) == FAIL)
12620 {
12621 /* Invert relevant bits only. */
12622 neon_invert_size (&immlo, &immhi, et.size);
12623 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
12624 with one or the other; those cases are caught by
12625 neon_cmode_for_move_imm. */
12626 op = !op;
12627 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits,
12628 &op, et.size, et.type)) == FAIL)
12629 {
12630 first_error (_("immediate out of range"));
12631 return;
12632 }
12633 }
12634
12635 inst.instruction &= ~(1 << 5);
12636 inst.instruction |= op << 5;
12637
12638 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12639 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12640 inst.instruction |= neon_quad (rs) << 6;
12641 inst.instruction |= cmode << 8;
12642
12643 neon_write_immbits (immbits);
12644 }
12645
12646 static void
12647 do_neon_mvn (void)
12648 {
12649 if (inst.operands[1].isreg)
12650 {
12651 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
12652
12653 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12654 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12655 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12656 inst.instruction |= LOW4 (inst.operands[1].reg);
12657 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
12658 inst.instruction |= neon_quad (rs) << 6;
12659 }
12660 else
12661 {
12662 inst.instruction = NEON_ENC_IMMED (inst.instruction);
12663 neon_move_immediate ();
12664 }
12665
12666 inst.instruction = neon_dp_fixup (inst.instruction);
12667 }
12668
12669 /* Encode instructions of form:
12670
12671 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
12672 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm |
12673
12674 */
12675
12676 static void
12677 neon_mixed_length (struct neon_type_el et, unsigned size)
12678 {
12679 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12680 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12681 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
12682 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
12683 inst.instruction |= LOW4 (inst.operands[2].reg);
12684 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
12685 inst.instruction |= (et.type == NT_unsigned) << 24;
12686 inst.instruction |= neon_logbits (size) << 20;
12687
12688 inst.instruction = neon_dp_fixup (inst.instruction);
12689 }
12690
12691 static void
12692 do_neon_dyadic_long (void)
12693 {
12694 /* FIXME: Type checking for lengthening op. */
12695 struct neon_type_el et = neon_check_type (3, NS_QDD,
12696 N_EQK | N_DBL, N_EQK, N_SU_32 | N_KEY);
12697 neon_mixed_length (et, et.size);
12698 }
12699
12700 static void
12701 do_neon_abal (void)
12702 {
12703 struct neon_type_el et = neon_check_type (3, NS_QDD,
12704 N_EQK | N_INT | N_DBL, N_EQK, N_SU_32 | N_KEY);
12705 neon_mixed_length (et, et.size);
12706 }
12707
12708 static void
12709 neon_mac_reg_scalar_long (unsigned regtypes, unsigned scalartypes)
12710 {
12711 if (inst.operands[2].isscalar)
12712 {
12713 struct neon_type_el et = neon_check_type (3, NS_QDS,
12714 N_EQK | N_DBL, N_EQK, regtypes | N_KEY);
12715 inst.instruction = NEON_ENC_SCALAR (inst.instruction);
12716 neon_mul_mac (et, et.type == NT_unsigned);
12717 }
12718 else
12719 {
12720 struct neon_type_el et = neon_check_type (3, NS_QDD,
12721 N_EQK | N_DBL, N_EQK, scalartypes | N_KEY);
12722 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12723 neon_mixed_length (et, et.size);
12724 }
12725 }
12726
12727 static void
12728 do_neon_mac_maybe_scalar_long (void)
12729 {
12730 neon_mac_reg_scalar_long (N_S16 | N_S32 | N_U16 | N_U32, N_SU_32);
12731 }
12732
12733 static void
12734 do_neon_dyadic_wide (void)
12735 {
12736 struct neon_type_el et = neon_check_type (3, NS_QQD,
12737 N_EQK | N_DBL, N_EQK | N_DBL, N_SU_32 | N_KEY);
12738 neon_mixed_length (et, et.size);
12739 }
12740
12741 static void
12742 do_neon_dyadic_narrow (void)
12743 {
12744 struct neon_type_el et = neon_check_type (3, NS_QDD,
12745 N_EQK | N_DBL, N_EQK, N_I16 | N_I32 | N_I64 | N_KEY);
12746 /* Operand sign is unimportant, and the U bit is part of the opcode,
12747 so force the operand type to integer. */
12748 et.type = NT_integer;
12749 neon_mixed_length (et, et.size / 2);
12750 }
12751
12752 static void
12753 do_neon_mul_sat_scalar_long (void)
12754 {
12755 neon_mac_reg_scalar_long (N_S16 | N_S32, N_S16 | N_S32);
12756 }
12757
12758 static void
12759 do_neon_vmull (void)
12760 {
12761 if (inst.operands[2].isscalar)
12762 do_neon_mac_maybe_scalar_long ();
12763 else
12764 {
12765 struct neon_type_el et = neon_check_type (3, NS_QDD,
12766 N_EQK | N_DBL, N_EQK, N_SU_32 | N_P8 | N_KEY);
12767 if (et.type == NT_poly)
12768 inst.instruction = NEON_ENC_POLY (inst.instruction);
12769 else
12770 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12771 /* For polynomial encoding, size field must be 0b00 and the U bit must be
12772 zero. Should be OK as-is. */
12773 neon_mixed_length (et, et.size);
12774 }
12775 }
12776
12777 static void
12778 do_neon_ext (void)
12779 {
12780 enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
12781 struct neon_type_el et = neon_check_type (3, rs,
12782 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
12783 unsigned imm = (inst.operands[3].imm * et.size) / 8;
12784 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12785 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12786 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
12787 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
12788 inst.instruction |= LOW4 (inst.operands[2].reg);
12789 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
12790 inst.instruction |= neon_quad (rs) << 6;
12791 inst.instruction |= imm << 8;
12792
12793 inst.instruction = neon_dp_fixup (inst.instruction);
12794 }
12795
12796 static void
12797 do_neon_rev (void)
12798 {
12799 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
12800 struct neon_type_el et = neon_check_type (2, rs,
12801 N_EQK, N_8 | N_16 | N_32 | N_KEY);
12802 unsigned op = (inst.instruction >> 7) & 3;
12803 /* N (width of reversed regions) is encoded as part of the bitmask. We
12804 extract it here to check the elements to be reversed are smaller.
12805 Otherwise we'd get a reserved instruction. */
12806 unsigned elsize = (op == 2) ? 16 : (op == 1) ? 32 : (op == 0) ? 64 : 0;
12807 assert (elsize != 0);
12808 constraint (et.size >= elsize,
12809 _("elements must be smaller than reversal region"));
12810 neon_two_same (neon_quad (rs), 1, et.size);
12811 }
12812
12813 static void
12814 do_neon_dup (void)
12815 {
12816 if (inst.operands[1].isscalar)
12817 {
12818 enum neon_shape rs = neon_select_shape (NS_DS, NS_QS, NS_NULL);
12819 struct neon_type_el et = neon_check_type (2, rs,
12820 N_EQK, N_8 | N_16 | N_32 | N_KEY);
12821 unsigned sizebits = et.size >> 3;
12822 unsigned dm = NEON_SCALAR_REG (inst.operands[1].reg);
12823 int logsize = neon_logbits (et.size);
12824 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg) << logsize;
12825
12826 if (vfp_or_neon_is_neon (NEON_CHECK_CC) == FAIL)
12827 return;
12828
12829 inst.instruction = NEON_ENC_SCALAR (inst.instruction);
12830 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12831 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12832 inst.instruction |= LOW4 (dm);
12833 inst.instruction |= HI1 (dm) << 5;
12834 inst.instruction |= neon_quad (rs) << 6;
12835 inst.instruction |= x << 17;
12836 inst.instruction |= sizebits << 16;
12837
12838 inst.instruction = neon_dp_fixup (inst.instruction);
12839 }
12840 else
12841 {
12842 enum neon_shape rs = neon_select_shape (NS_DR, NS_QR, NS_NULL);
12843 struct neon_type_el et = neon_check_type (2, rs,
12844 N_8 | N_16 | N_32 | N_KEY, N_EQK);
12845 /* Duplicate ARM register to lanes of vector. */
12846 inst.instruction = NEON_ENC_ARMREG (inst.instruction);
12847 switch (et.size)
12848 {
12849 case 8: inst.instruction |= 0x400000; break;
12850 case 16: inst.instruction |= 0x000020; break;
12851 case 32: inst.instruction |= 0x000000; break;
12852 default: break;
12853 }
12854 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
12855 inst.instruction |= LOW4 (inst.operands[0].reg) << 16;
12856 inst.instruction |= HI1 (inst.operands[0].reg) << 7;
12857 inst.instruction |= neon_quad (rs) << 21;
12858 /* The encoding for this instruction is identical for the ARM and Thumb
12859 variants, except for the condition field. */
12860 do_vfp_cond_or_thumb ();
12861 }
12862 }
12863
12864 /* VMOV has particularly many variations. It can be one of:
12865 0. VMOV<c><q> <Qd>, <Qm>
12866 1. VMOV<c><q> <Dd>, <Dm>
12867 (Register operations, which are VORR with Rm = Rn.)
12868 2. VMOV<c><q>.<dt> <Qd>, #<imm>
12869 3. VMOV<c><q>.<dt> <Dd>, #<imm>
12870 (Immediate loads.)
12871 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
12872 (ARM register to scalar.)
12873 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
12874 (Two ARM registers to vector.)
12875 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
12876 (Scalar to ARM register.)
12877 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
12878 (Vector to two ARM registers.)
12879 8. VMOV.F32 <Sd>, <Sm>
12880 9. VMOV.F64 <Dd>, <Dm>
12881 (VFP register moves.)
12882 10. VMOV.F32 <Sd>, #imm
12883 11. VMOV.F64 <Dd>, #imm
12884 (VFP float immediate load.)
12885 12. VMOV <Rd>, <Sm>
12886 (VFP single to ARM reg.)
12887 13. VMOV <Sd>, <Rm>
12888 (ARM reg to VFP single.)
12889 14. VMOV <Rd>, <Re>, <Sn>, <Sm>
12890 (Two ARM regs to two VFP singles.)
12891 15. VMOV <Sd>, <Se>, <Rn>, <Rm>
12892 (Two VFP singles to two ARM regs.)
12893
12894 These cases can be disambiguated using neon_select_shape, except cases 1/9
12895 and 3/11 which depend on the operand type too.
12896
12897 All the encoded bits are hardcoded by this function.
12898
12899 Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
12900 Cases 5, 7 may be used with VFPv2 and above.
12901
12902 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
12903 can specify a type where it doesn't make sense to, and is ignored).
12904 */
12905
12906 static void
12907 do_neon_mov (void)
12908 {
12909 enum neon_shape rs = neon_select_shape (NS_RRFF, NS_FFRR, NS_DRR, NS_RRD,
12910 NS_QQ, NS_DD, NS_QI, NS_DI, NS_SR, NS_RS, NS_FF, NS_FI, NS_RF, NS_FR,
12911 NS_NULL);
12912 struct neon_type_el et;
12913 const char *ldconst = 0;
12914
12915 switch (rs)
12916 {
12917 case NS_DD: /* case 1/9. */
12918 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
12919 /* It is not an error here if no type is given. */
12920 inst.error = NULL;
12921 if (et.type == NT_float && et.size == 64)
12922 {
12923 do_vfp_nsyn_opcode ("fcpyd");
12924 break;
12925 }
12926 /* fall through. */
12927
12928 case NS_QQ: /* case 0/1. */
12929 {
12930 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
12931 return;
12932 /* The architecture manual I have doesn't explicitly state which
12933 value the U bit should have for register->register moves, but
12934 the equivalent VORR instruction has U = 0, so do that. */
12935 inst.instruction = 0x0200110;
12936 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12937 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12938 inst.instruction |= LOW4 (inst.operands[1].reg);
12939 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
12940 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
12941 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
12942 inst.instruction |= neon_quad (rs) << 6;
12943
12944 inst.instruction = neon_dp_fixup (inst.instruction);
12945 }
12946 break;
12947
12948 case NS_DI: /* case 3/11. */
12949 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
12950 inst.error = NULL;
12951 if (et.type == NT_float && et.size == 64)
12952 {
12953 /* case 11 (fconstd). */
12954 ldconst = "fconstd";
12955 goto encode_fconstd;
12956 }
12957 /* fall through. */
12958
12959 case NS_QI: /* case 2/3. */
12960 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
12961 return;
12962 inst.instruction = 0x0800010;
12963 neon_move_immediate ();
12964 inst.instruction = neon_dp_fixup (inst.instruction);
12965 break;
12966
12967 case NS_SR: /* case 4. */
12968 {
12969 unsigned bcdebits = 0;
12970 struct neon_type_el et = neon_check_type (2, NS_NULL,
12971 N_8 | N_16 | N_32 | N_KEY, N_EQK);
12972 int logsize = neon_logbits (et.size);
12973 unsigned dn = NEON_SCALAR_REG (inst.operands[0].reg);
12974 unsigned x = NEON_SCALAR_INDEX (inst.operands[0].reg);
12975
12976 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
12977 _(BAD_FPU));
12978 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
12979 && et.size != 32, _(BAD_FPU));
12980 constraint (et.type == NT_invtype, _("bad type for scalar"));
12981 constraint (x >= 64 / et.size, _("scalar index out of range"));
12982
12983 switch (et.size)
12984 {
12985 case 8: bcdebits = 0x8; break;
12986 case 16: bcdebits = 0x1; break;
12987 case 32: bcdebits = 0x0; break;
12988 default: ;
12989 }
12990
12991 bcdebits |= x << logsize;
12992
12993 inst.instruction = 0xe000b10;
12994 do_vfp_cond_or_thumb ();
12995 inst.instruction |= LOW4 (dn) << 16;
12996 inst.instruction |= HI1 (dn) << 7;
12997 inst.instruction |= inst.operands[1].reg << 12;
12998 inst.instruction |= (bcdebits & 3) << 5;
12999 inst.instruction |= (bcdebits >> 2) << 21;
13000 }
13001 break;
13002
13003 case NS_DRR: /* case 5 (fmdrr). */
13004 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
13005 _(BAD_FPU));
13006
13007 inst.instruction = 0xc400b10;
13008 do_vfp_cond_or_thumb ();
13009 inst.instruction |= LOW4 (inst.operands[0].reg);
13010 inst.instruction |= HI1 (inst.operands[0].reg) << 5;
13011 inst.instruction |= inst.operands[1].reg << 12;
13012 inst.instruction |= inst.operands[2].reg << 16;
13013 break;
13014
13015 case NS_RS: /* case 6. */
13016 {
13017 struct neon_type_el et = neon_check_type (2, NS_NULL,
13018 N_EQK, N_S8 | N_S16 | N_U8 | N_U16 | N_32 | N_KEY);
13019 unsigned logsize = neon_logbits (et.size);
13020 unsigned dn = NEON_SCALAR_REG (inst.operands[1].reg);
13021 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg);
13022 unsigned abcdebits = 0;
13023
13024 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
13025 _(BAD_FPU));
13026 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
13027 && et.size != 32, _(BAD_FPU));
13028 constraint (et.type == NT_invtype, _("bad type for scalar"));
13029 constraint (x >= 64 / et.size, _("scalar index out of range"));
13030
13031 switch (et.size)
13032 {
13033 case 8: abcdebits = (et.type == NT_signed) ? 0x08 : 0x18; break;
13034 case 16: abcdebits = (et.type == NT_signed) ? 0x01 : 0x11; break;
13035 case 32: abcdebits = 0x00; break;
13036 default: ;
13037 }
13038
13039 abcdebits |= x << logsize;
13040 inst.instruction = 0xe100b10;
13041 do_vfp_cond_or_thumb ();
13042 inst.instruction |= LOW4 (dn) << 16;
13043 inst.instruction |= HI1 (dn) << 7;
13044 inst.instruction |= inst.operands[0].reg << 12;
13045 inst.instruction |= (abcdebits & 3) << 5;
13046 inst.instruction |= (abcdebits >> 2) << 21;
13047 }
13048 break;
13049
13050 case NS_RRD: /* case 7 (fmrrd). */
13051 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
13052 _(BAD_FPU));
13053
13054 inst.instruction = 0xc500b10;
13055 do_vfp_cond_or_thumb ();
13056 inst.instruction |= inst.operands[0].reg << 12;
13057 inst.instruction |= inst.operands[1].reg << 16;
13058 inst.instruction |= LOW4 (inst.operands[2].reg);
13059 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
13060 break;
13061
13062 case NS_FF: /* case 8 (fcpys). */
13063 do_vfp_nsyn_opcode ("fcpys");
13064 break;
13065
13066 case NS_FI: /* case 10 (fconsts). */
13067 ldconst = "fconsts";
13068 encode_fconstd:
13069 if (is_quarter_float (inst.operands[1].imm))
13070 {
13071 inst.operands[1].imm = neon_qfloat_bits (inst.operands[1].imm);
13072 do_vfp_nsyn_opcode (ldconst);
13073 }
13074 else
13075 first_error (_("immediate out of range"));
13076 break;
13077
13078 case NS_RF: /* case 12 (fmrs). */
13079 do_vfp_nsyn_opcode ("fmrs");
13080 break;
13081
13082 case NS_FR: /* case 13 (fmsr). */
13083 do_vfp_nsyn_opcode ("fmsr");
13084 break;
13085
13086 /* The encoders for the fmrrs and fmsrr instructions expect three operands
13087 (one of which is a list), but we have parsed four. Do some fiddling to
13088 make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
13089 expect. */
13090 case NS_RRFF: /* case 14 (fmrrs). */
13091 constraint (inst.operands[3].reg != inst.operands[2].reg + 1,
13092 _("VFP registers must be adjacent"));
13093 inst.operands[2].imm = 2;
13094 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
13095 do_vfp_nsyn_opcode ("fmrrs");
13096 break;
13097
13098 case NS_FFRR: /* case 15 (fmsrr). */
13099 constraint (inst.operands[1].reg != inst.operands[0].reg + 1,
13100 _("VFP registers must be adjacent"));
13101 inst.operands[1] = inst.operands[2];
13102 inst.operands[2] = inst.operands[3];
13103 inst.operands[0].imm = 2;
13104 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
13105 do_vfp_nsyn_opcode ("fmsrr");
13106 break;
13107
13108 default:
13109 abort ();
13110 }
13111 }
13112
13113 static void
13114 do_neon_rshift_round_imm (void)
13115 {
13116 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
13117 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
13118 int imm = inst.operands[2].imm;
13119
13120 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
13121 if (imm == 0)
13122 {
13123 inst.operands[2].present = 0;
13124 do_neon_mov ();
13125 return;
13126 }
13127
13128 constraint (imm < 1 || (unsigned)imm > et.size,
13129 _("immediate out of range for shift"));
13130 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et,
13131 et.size - imm);
13132 }
13133
13134 static void
13135 do_neon_movl (void)
13136 {
13137 struct neon_type_el et = neon_check_type (2, NS_QD,
13138 N_EQK | N_DBL, N_SU_32 | N_KEY);
13139 unsigned sizebits = et.size >> 3;
13140 inst.instruction |= sizebits << 19;
13141 neon_two_same (0, et.type == NT_unsigned, -1);
13142 }
13143
13144 static void
13145 do_neon_trn (void)
13146 {
13147 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13148 struct neon_type_el et = neon_check_type (2, rs,
13149 N_EQK, N_8 | N_16 | N_32 | N_KEY);
13150 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
13151 neon_two_same (neon_quad (rs), 1, et.size);
13152 }
13153
13154 static void
13155 do_neon_zip_uzp (void)
13156 {
13157 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13158 struct neon_type_el et = neon_check_type (2, rs,
13159 N_EQK, N_8 | N_16 | N_32 | N_KEY);
13160 if (rs == NS_DD && et.size == 32)
13161 {
13162 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
13163 inst.instruction = N_MNEM_vtrn;
13164 do_neon_trn ();
13165 return;
13166 }
13167 neon_two_same (neon_quad (rs), 1, et.size);
13168 }
13169
13170 static void
13171 do_neon_sat_abs_neg (void)
13172 {
13173 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13174 struct neon_type_el et = neon_check_type (2, rs,
13175 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
13176 neon_two_same (neon_quad (rs), 1, et.size);
13177 }
13178
13179 static void
13180 do_neon_pair_long (void)
13181 {
13182 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13183 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_32 | N_KEY);
13184 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
13185 inst.instruction |= (et.type == NT_unsigned) << 7;
13186 neon_two_same (neon_quad (rs), 1, et.size);
13187 }
13188
13189 static void
13190 do_neon_recip_est (void)
13191 {
13192 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13193 struct neon_type_el et = neon_check_type (2, rs,
13194 N_EQK | N_FLT, N_F32 | N_U32 | N_KEY);
13195 inst.instruction |= (et.type == NT_float) << 8;
13196 neon_two_same (neon_quad (rs), 1, et.size);
13197 }
13198
13199 static void
13200 do_neon_cls (void)
13201 {
13202 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13203 struct neon_type_el et = neon_check_type (2, rs,
13204 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
13205 neon_two_same (neon_quad (rs), 1, et.size);
13206 }
13207
13208 static void
13209 do_neon_clz (void)
13210 {
13211 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13212 struct neon_type_el et = neon_check_type (2, rs,
13213 N_EQK, N_I8 | N_I16 | N_I32 | N_KEY);
13214 neon_two_same (neon_quad (rs), 1, et.size);
13215 }
13216
13217 static void
13218 do_neon_cnt (void)
13219 {
13220 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13221 struct neon_type_el et = neon_check_type (2, rs,
13222 N_EQK | N_INT, N_8 | N_KEY);
13223 neon_two_same (neon_quad (rs), 1, et.size);
13224 }
13225
13226 static void
13227 do_neon_swp (void)
13228 {
13229 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13230 neon_two_same (neon_quad (rs), 1, -1);
13231 }
13232
13233 static void
13234 do_neon_tbl_tbx (void)
13235 {
13236 unsigned listlenbits;
13237 neon_check_type (3, NS_DLD, N_EQK, N_EQK, N_8 | N_KEY);
13238
13239 if (inst.operands[1].imm < 1 || inst.operands[1].imm > 4)
13240 {
13241 first_error (_("bad list length for table lookup"));
13242 return;
13243 }
13244
13245 listlenbits = inst.operands[1].imm - 1;
13246 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13247 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13248 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
13249 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
13250 inst.instruction |= LOW4 (inst.operands[2].reg);
13251 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
13252 inst.instruction |= listlenbits << 8;
13253
13254 inst.instruction = neon_dp_fixup (inst.instruction);
13255 }
13256
13257 static void
13258 do_neon_ldm_stm (void)
13259 {
13260 /* P, U and L bits are part of bitmask. */
13261 int is_dbmode = (inst.instruction & (1 << 24)) != 0;
13262 unsigned offsetbits = inst.operands[1].imm * 2;
13263
13264 if (inst.operands[1].issingle)
13265 {
13266 do_vfp_nsyn_ldm_stm (is_dbmode);
13267 return;
13268 }
13269
13270 constraint (is_dbmode && !inst.operands[0].writeback,
13271 _("writeback (!) must be used for VLDMDB and VSTMDB"));
13272
13273 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
13274 _("register list must contain at least 1 and at most 16 "
13275 "registers"));
13276
13277 inst.instruction |= inst.operands[0].reg << 16;
13278 inst.instruction |= inst.operands[0].writeback << 21;
13279 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
13280 inst.instruction |= HI1 (inst.operands[1].reg) << 22;
13281
13282 inst.instruction |= offsetbits;
13283
13284 do_vfp_cond_or_thumb ();
13285 }
13286
13287 static void
13288 do_neon_ldr_str (void)
13289 {
13290 int is_ldr = (inst.instruction & (1 << 20)) != 0;
13291
13292 if (inst.operands[0].issingle)
13293 {
13294 if (is_ldr)
13295 do_vfp_nsyn_opcode ("flds");
13296 else
13297 do_vfp_nsyn_opcode ("fsts");
13298 }
13299 else
13300 {
13301 if (is_ldr)
13302 do_vfp_nsyn_opcode ("fldd");
13303 else
13304 do_vfp_nsyn_opcode ("fstd");
13305 }
13306 }
13307
13308 /* "interleave" version also handles non-interleaving register VLD1/VST1
13309 instructions. */
13310
13311 static void
13312 do_neon_ld_st_interleave (void)
13313 {
13314 struct neon_type_el et = neon_check_type (1, NS_NULL,
13315 N_8 | N_16 | N_32 | N_64);
13316 unsigned alignbits = 0;
13317 unsigned idx;
13318 /* The bits in this table go:
13319 0: register stride of one (0) or two (1)
13320 1,2: register list length, minus one (1, 2, 3, 4).
13321 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
13322 We use -1 for invalid entries. */
13323 const int typetable[] =
13324 {
13325 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
13326 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
13327 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
13328 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
13329 };
13330 int typebits;
13331
13332 if (et.type == NT_invtype)
13333 return;
13334
13335 if (inst.operands[1].immisalign)
13336 switch (inst.operands[1].imm >> 8)
13337 {
13338 case 64: alignbits = 1; break;
13339 case 128:
13340 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) == 3)
13341 goto bad_alignment;
13342 alignbits = 2;
13343 break;
13344 case 256:
13345 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) == 3)
13346 goto bad_alignment;
13347 alignbits = 3;
13348 break;
13349 default:
13350 bad_alignment:
13351 first_error (_("bad alignment"));
13352 return;
13353 }
13354
13355 inst.instruction |= alignbits << 4;
13356 inst.instruction |= neon_logbits (et.size) << 6;
13357
13358 /* Bits [4:6] of the immediate in a list specifier encode register stride
13359 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
13360 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
13361 up the right value for "type" in a table based on this value and the given
13362 list style, then stick it back. */
13363 idx = ((inst.operands[0].imm >> 4) & 7)
13364 | (((inst.instruction >> 8) & 3) << 3);
13365
13366 typebits = typetable[idx];
13367
13368 constraint (typebits == -1, _("bad list type for instruction"));
13369
13370 inst.instruction &= ~0xf00;
13371 inst.instruction |= typebits << 8;
13372 }
13373
13374 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
13375 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
13376 otherwise. The variable arguments are a list of pairs of legal (size, align)
13377 values, terminated with -1. */
13378
13379 static int
13380 neon_alignment_bit (int size, int align, int *do_align, ...)
13381 {
13382 va_list ap;
13383 int result = FAIL, thissize, thisalign;
13384
13385 if (!inst.operands[1].immisalign)
13386 {
13387 *do_align = 0;
13388 return SUCCESS;
13389 }
13390
13391 va_start (ap, do_align);
13392
13393 do
13394 {
13395 thissize = va_arg (ap, int);
13396 if (thissize == -1)
13397 break;
13398 thisalign = va_arg (ap, int);
13399
13400 if (size == thissize && align == thisalign)
13401 result = SUCCESS;
13402 }
13403 while (result != SUCCESS);
13404
13405 va_end (ap);
13406
13407 if (result == SUCCESS)
13408 *do_align = 1;
13409 else
13410 first_error (_("unsupported alignment for instruction"));
13411
13412 return result;
13413 }
13414
13415 static void
13416 do_neon_ld_st_lane (void)
13417 {
13418 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
13419 int align_good, do_align = 0;
13420 int logsize = neon_logbits (et.size);
13421 int align = inst.operands[1].imm >> 8;
13422 int n = (inst.instruction >> 8) & 3;
13423 int max_el = 64 / et.size;
13424
13425 if (et.type == NT_invtype)
13426 return;
13427
13428 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != n + 1,
13429 _("bad list length"));
13430 constraint (NEON_LANE (inst.operands[0].imm) >= max_el,
13431 _("scalar index out of range"));
13432 constraint (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2
13433 && et.size == 8,
13434 _("stride of 2 unavailable when element size is 8"));
13435
13436 switch (n)
13437 {
13438 case 0: /* VLD1 / VST1. */
13439 align_good = neon_alignment_bit (et.size, align, &do_align, 16, 16,
13440 32, 32, -1);
13441 if (align_good == FAIL)
13442 return;
13443 if (do_align)
13444 {
13445 unsigned alignbits = 0;
13446 switch (et.size)
13447 {
13448 case 16: alignbits = 0x1; break;
13449 case 32: alignbits = 0x3; break;
13450 default: ;
13451 }
13452 inst.instruction |= alignbits << 4;
13453 }
13454 break;
13455
13456 case 1: /* VLD2 / VST2. */
13457 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 16, 16, 32,
13458 32, 64, -1);
13459 if (align_good == FAIL)
13460 return;
13461 if (do_align)
13462 inst.instruction |= 1 << 4;
13463 break;
13464
13465 case 2: /* VLD3 / VST3. */
13466 constraint (inst.operands[1].immisalign,
13467 _("can't use alignment with this instruction"));
13468 break;
13469
13470 case 3: /* VLD4 / VST4. */
13471 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32,
13472 16, 64, 32, 64, 32, 128, -1);
13473 if (align_good == FAIL)
13474 return;
13475 if (do_align)
13476 {
13477 unsigned alignbits = 0;
13478 switch (et.size)
13479 {
13480 case 8: alignbits = 0x1; break;
13481 case 16: alignbits = 0x1; break;
13482 case 32: alignbits = (align == 64) ? 0x1 : 0x2; break;
13483 default: ;
13484 }
13485 inst.instruction |= alignbits << 4;
13486 }
13487 break;
13488
13489 default: ;
13490 }
13491
13492 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
13493 if (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2)
13494 inst.instruction |= 1 << (4 + logsize);
13495
13496 inst.instruction |= NEON_LANE (inst.operands[0].imm) << (logsize + 5);
13497 inst.instruction |= logsize << 10;
13498 }
13499
13500 /* Encode single n-element structure to all lanes VLD<n> instructions. */
13501
13502 static void
13503 do_neon_ld_dup (void)
13504 {
13505 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
13506 int align_good, do_align = 0;
13507
13508 if (et.type == NT_invtype)
13509 return;
13510
13511 switch ((inst.instruction >> 8) & 3)
13512 {
13513 case 0: /* VLD1. */
13514 assert (NEON_REG_STRIDE (inst.operands[0].imm) != 2);
13515 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
13516 &do_align, 16, 16, 32, 32, -1);
13517 if (align_good == FAIL)
13518 return;
13519 switch (NEON_REGLIST_LENGTH (inst.operands[0].imm))
13520 {
13521 case 1: break;
13522 case 2: inst.instruction |= 1 << 5; break;
13523 default: first_error (_("bad list length")); return;
13524 }
13525 inst.instruction |= neon_logbits (et.size) << 6;
13526 break;
13527
13528 case 1: /* VLD2. */
13529 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
13530 &do_align, 8, 16, 16, 32, 32, 64, -1);
13531 if (align_good == FAIL)
13532 return;
13533 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2,
13534 _("bad list length"));
13535 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
13536 inst.instruction |= 1 << 5;
13537 inst.instruction |= neon_logbits (et.size) << 6;
13538 break;
13539
13540 case 2: /* VLD3. */
13541 constraint (inst.operands[1].immisalign,
13542 _("can't use alignment with this instruction"));
13543 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 3,
13544 _("bad list length"));
13545 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
13546 inst.instruction |= 1 << 5;
13547 inst.instruction |= neon_logbits (et.size) << 6;
13548 break;
13549
13550 case 3: /* VLD4. */
13551 {
13552 int align = inst.operands[1].imm >> 8;
13553 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32,
13554 16, 64, 32, 64, 32, 128, -1);
13555 if (align_good == FAIL)
13556 return;
13557 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4,
13558 _("bad list length"));
13559 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
13560 inst.instruction |= 1 << 5;
13561 if (et.size == 32 && align == 128)
13562 inst.instruction |= 0x3 << 6;
13563 else
13564 inst.instruction |= neon_logbits (et.size) << 6;
13565 }
13566 break;
13567
13568 default: ;
13569 }
13570
13571 inst.instruction |= do_align << 4;
13572 }
13573
13574 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
13575 apart from bits [11:4]. */
13576
13577 static void
13578 do_neon_ldx_stx (void)
13579 {
13580 switch (NEON_LANE (inst.operands[0].imm))
13581 {
13582 case NEON_INTERLEAVE_LANES:
13583 inst.instruction = NEON_ENC_INTERLV (inst.instruction);
13584 do_neon_ld_st_interleave ();
13585 break;
13586
13587 case NEON_ALL_LANES:
13588 inst.instruction = NEON_ENC_DUP (inst.instruction);
13589 do_neon_ld_dup ();
13590 break;
13591
13592 default:
13593 inst.instruction = NEON_ENC_LANE (inst.instruction);
13594 do_neon_ld_st_lane ();
13595 }
13596
13597 /* L bit comes from bit mask. */
13598 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13599 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13600 inst.instruction |= inst.operands[1].reg << 16;
13601
13602 if (inst.operands[1].postind)
13603 {
13604 int postreg = inst.operands[1].imm & 0xf;
13605 constraint (!inst.operands[1].immisreg,
13606 _("post-index must be a register"));
13607 constraint (postreg == 0xd || postreg == 0xf,
13608 _("bad register for post-index"));
13609 inst.instruction |= postreg;
13610 }
13611 else if (inst.operands[1].writeback)
13612 {
13613 inst.instruction |= 0xd;
13614 }
13615 else
13616 inst.instruction |= 0xf;
13617
13618 if (thumb_mode)
13619 inst.instruction |= 0xf9000000;
13620 else
13621 inst.instruction |= 0xf4000000;
13622 }
13623
13624 \f
13625 /* Overall per-instruction processing. */
13626
13627 /* We need to be able to fix up arbitrary expressions in some statements.
13628 This is so that we can handle symbols that are an arbitrary distance from
13629 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
13630 which returns part of an address in a form which will be valid for
13631 a data instruction. We do this by pushing the expression into a symbol
13632 in the expr_section, and creating a fix for that. */
13633
13634 static void
13635 fix_new_arm (fragS * frag,
13636 int where,
13637 short int size,
13638 expressionS * exp,
13639 int pc_rel,
13640 int reloc)
13641 {
13642 fixS * new_fix;
13643
13644 switch (exp->X_op)
13645 {
13646 case O_constant:
13647 case O_symbol:
13648 case O_add:
13649 case O_subtract:
13650 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
13651 break;
13652
13653 default:
13654 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
13655 pc_rel, reloc);
13656 break;
13657 }
13658
13659 /* Mark whether the fix is to a THUMB instruction, or an ARM
13660 instruction. */
13661 new_fix->tc_fix_data = thumb_mode;
13662 }
13663
13664 /* Create a frg for an instruction requiring relaxation. */
13665 static void
13666 output_relax_insn (void)
13667 {
13668 char * to;
13669 symbolS *sym;
13670 int offset;
13671
13672 /* The size of the instruction is unknown, so tie the debug info to the
13673 start of the instruction. */
13674 dwarf2_emit_insn (0);
13675
13676 switch (inst.reloc.exp.X_op)
13677 {
13678 case O_symbol:
13679 sym = inst.reloc.exp.X_add_symbol;
13680 offset = inst.reloc.exp.X_add_number;
13681 break;
13682 case O_constant:
13683 sym = NULL;
13684 offset = inst.reloc.exp.X_add_number;
13685 break;
13686 default:
13687 sym = make_expr_symbol (&inst.reloc.exp);
13688 offset = 0;
13689 break;
13690 }
13691 to = frag_var (rs_machine_dependent, INSN_SIZE, THUMB_SIZE,
13692 inst.relax, sym, offset, NULL/*offset, opcode*/);
13693 md_number_to_chars (to, inst.instruction, THUMB_SIZE);
13694 }
13695
13696 /* Write a 32-bit thumb instruction to buf. */
13697 static void
13698 put_thumb32_insn (char * buf, unsigned long insn)
13699 {
13700 md_number_to_chars (buf, insn >> 16, THUMB_SIZE);
13701 md_number_to_chars (buf + THUMB_SIZE, insn, THUMB_SIZE);
13702 }
13703
13704 static void
13705 output_inst (const char * str)
13706 {
13707 char * to = NULL;
13708
13709 if (inst.error)
13710 {
13711 as_bad ("%s -- `%s'", inst.error, str);
13712 return;
13713 }
13714 if (inst.relax) {
13715 output_relax_insn();
13716 return;
13717 }
13718 if (inst.size == 0)
13719 return;
13720
13721 to = frag_more (inst.size);
13722
13723 if (thumb_mode && (inst.size > THUMB_SIZE))
13724 {
13725 assert (inst.size == (2 * THUMB_SIZE));
13726 put_thumb32_insn (to, inst.instruction);
13727 }
13728 else if (inst.size > INSN_SIZE)
13729 {
13730 assert (inst.size == (2 * INSN_SIZE));
13731 md_number_to_chars (to, inst.instruction, INSN_SIZE);
13732 md_number_to_chars (to + INSN_SIZE, inst.instruction, INSN_SIZE);
13733 }
13734 else
13735 md_number_to_chars (to, inst.instruction, inst.size);
13736
13737 if (inst.reloc.type != BFD_RELOC_UNUSED)
13738 fix_new_arm (frag_now, to - frag_now->fr_literal,
13739 inst.size, & inst.reloc.exp, inst.reloc.pc_rel,
13740 inst.reloc.type);
13741
13742 dwarf2_emit_insn (inst.size);
13743 }
13744
13745 /* Tag values used in struct asm_opcode's tag field. */
13746 enum opcode_tag
13747 {
13748 OT_unconditional, /* Instruction cannot be conditionalized.
13749 The ARM condition field is still 0xE. */
13750 OT_unconditionalF, /* Instruction cannot be conditionalized
13751 and carries 0xF in its ARM condition field. */
13752 OT_csuffix, /* Instruction takes a conditional suffix. */
13753 OT_csuffixF, /* Some forms of the instruction take a conditional
13754 suffix, others place 0xF where the condition field
13755 would be. */
13756 OT_cinfix3, /* Instruction takes a conditional infix,
13757 beginning at character index 3. (In
13758 unified mode, it becomes a suffix.) */
13759 OT_cinfix3_deprecated, /* The same as OT_cinfix3. This is used for
13760 tsts, cmps, cmns, and teqs. */
13761 OT_cinfix3_legacy, /* Legacy instruction takes a conditional infix at
13762 character index 3, even in unified mode. Used for
13763 legacy instructions where suffix and infix forms
13764 may be ambiguous. */
13765 OT_csuf_or_in3, /* Instruction takes either a conditional
13766 suffix or an infix at character index 3. */
13767 OT_odd_infix_unc, /* This is the unconditional variant of an
13768 instruction that takes a conditional infix
13769 at an unusual position. In unified mode,
13770 this variant will accept a suffix. */
13771 OT_odd_infix_0 /* Values greater than or equal to OT_odd_infix_0
13772 are the conditional variants of instructions that
13773 take conditional infixes in unusual positions.
13774 The infix appears at character index
13775 (tag - OT_odd_infix_0). These are not accepted
13776 in unified mode. */
13777 };
13778
13779 /* Subroutine of md_assemble, responsible for looking up the primary
13780 opcode from the mnemonic the user wrote. STR points to the
13781 beginning of the mnemonic.
13782
13783 This is not simply a hash table lookup, because of conditional
13784 variants. Most instructions have conditional variants, which are
13785 expressed with a _conditional affix_ to the mnemonic. If we were
13786 to encode each conditional variant as a literal string in the opcode
13787 table, it would have approximately 20,000 entries.
13788
13789 Most mnemonics take this affix as a suffix, and in unified syntax,
13790 'most' is upgraded to 'all'. However, in the divided syntax, some
13791 instructions take the affix as an infix, notably the s-variants of
13792 the arithmetic instructions. Of those instructions, all but six
13793 have the infix appear after the third character of the mnemonic.
13794
13795 Accordingly, the algorithm for looking up primary opcodes given
13796 an identifier is:
13797
13798 1. Look up the identifier in the opcode table.
13799 If we find a match, go to step U.
13800
13801 2. Look up the last two characters of the identifier in the
13802 conditions table. If we find a match, look up the first N-2
13803 characters of the identifier in the opcode table. If we
13804 find a match, go to step CE.
13805
13806 3. Look up the fourth and fifth characters of the identifier in
13807 the conditions table. If we find a match, extract those
13808 characters from the identifier, and look up the remaining
13809 characters in the opcode table. If we find a match, go
13810 to step CM.
13811
13812 4. Fail.
13813
13814 U. Examine the tag field of the opcode structure, in case this is
13815 one of the six instructions with its conditional infix in an
13816 unusual place. If it is, the tag tells us where to find the
13817 infix; look it up in the conditions table and set inst.cond
13818 accordingly. Otherwise, this is an unconditional instruction.
13819 Again set inst.cond accordingly. Return the opcode structure.
13820
13821 CE. Examine the tag field to make sure this is an instruction that
13822 should receive a conditional suffix. If it is not, fail.
13823 Otherwise, set inst.cond from the suffix we already looked up,
13824 and return the opcode structure.
13825
13826 CM. Examine the tag field to make sure this is an instruction that
13827 should receive a conditional infix after the third character.
13828 If it is not, fail. Otherwise, undo the edits to the current
13829 line of input and proceed as for case CE. */
13830
13831 static const struct asm_opcode *
13832 opcode_lookup (char **str)
13833 {
13834 char *end, *base;
13835 char *affix;
13836 const struct asm_opcode *opcode;
13837 const struct asm_cond *cond;
13838 char save[2];
13839 bfd_boolean neon_supported;
13840
13841 neon_supported = ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1);
13842
13843 /* Scan up to the end of the mnemonic, which must end in white space,
13844 '.' (in unified mode, or for Neon instructions), or end of string. */
13845 for (base = end = *str; *end != '\0'; end++)
13846 if (*end == ' ' || ((unified_syntax || neon_supported) && *end == '.'))
13847 break;
13848
13849 if (end == base)
13850 return 0;
13851
13852 /* Handle a possible width suffix and/or Neon type suffix. */
13853 if (end[0] == '.')
13854 {
13855 int offset = 2;
13856
13857 /* The .w and .n suffixes are only valid if the unified syntax is in
13858 use. */
13859 if (unified_syntax && end[1] == 'w')
13860 inst.size_req = 4;
13861 else if (unified_syntax && end[1] == 'n')
13862 inst.size_req = 2;
13863 else
13864 offset = 0;
13865
13866 inst.vectype.elems = 0;
13867
13868 *str = end + offset;
13869
13870 if (end[offset] == '.')
13871 {
13872 /* See if we have a Neon type suffix (possible in either unified or
13873 non-unified ARM syntax mode). */
13874 if (parse_neon_type (&inst.vectype, str) == FAIL)
13875 return 0;
13876 }
13877 else if (end[offset] != '\0' && end[offset] != ' ')
13878 return 0;
13879 }
13880 else
13881 *str = end;
13882
13883 /* Look for unaffixed or special-case affixed mnemonic. */
13884 opcode = hash_find_n (arm_ops_hsh, base, end - base);
13885 if (opcode)
13886 {
13887 /* step U */
13888 if (opcode->tag < OT_odd_infix_0)
13889 {
13890 inst.cond = COND_ALWAYS;
13891 return opcode;
13892 }
13893
13894 if (unified_syntax)
13895 as_warn (_("conditional infixes are deprecated in unified syntax"));
13896 affix = base + (opcode->tag - OT_odd_infix_0);
13897 cond = hash_find_n (arm_cond_hsh, affix, 2);
13898 assert (cond);
13899
13900 inst.cond = cond->value;
13901 return opcode;
13902 }
13903
13904 /* Cannot have a conditional suffix on a mnemonic of less than two
13905 characters. */
13906 if (end - base < 3)
13907 return 0;
13908
13909 /* Look for suffixed mnemonic. */
13910 affix = end - 2;
13911 cond = hash_find_n (arm_cond_hsh, affix, 2);
13912 opcode = hash_find_n (arm_ops_hsh, base, affix - base);
13913 if (opcode && cond)
13914 {
13915 /* step CE */
13916 switch (opcode->tag)
13917 {
13918 case OT_cinfix3_legacy:
13919 /* Ignore conditional suffixes matched on infix only mnemonics. */
13920 break;
13921
13922 case OT_cinfix3:
13923 case OT_cinfix3_deprecated:
13924 case OT_odd_infix_unc:
13925 if (!unified_syntax)
13926 return 0;
13927 /* else fall through */
13928
13929 case OT_csuffix:
13930 case OT_csuffixF:
13931 case OT_csuf_or_in3:
13932 inst.cond = cond->value;
13933 return opcode;
13934
13935 case OT_unconditional:
13936 case OT_unconditionalF:
13937 if (thumb_mode)
13938 {
13939 inst.cond = cond->value;
13940 }
13941 else
13942 {
13943 /* delayed diagnostic */
13944 inst.error = BAD_COND;
13945 inst.cond = COND_ALWAYS;
13946 }
13947 return opcode;
13948
13949 default:
13950 return 0;
13951 }
13952 }
13953
13954 /* Cannot have a usual-position infix on a mnemonic of less than
13955 six characters (five would be a suffix). */
13956 if (end - base < 6)
13957 return 0;
13958
13959 /* Look for infixed mnemonic in the usual position. */
13960 affix = base + 3;
13961 cond = hash_find_n (arm_cond_hsh, affix, 2);
13962 if (!cond)
13963 return 0;
13964
13965 memcpy (save, affix, 2);
13966 memmove (affix, affix + 2, (end - affix) - 2);
13967 opcode = hash_find_n (arm_ops_hsh, base, (end - base) - 2);
13968 memmove (affix + 2, affix, (end - affix) - 2);
13969 memcpy (affix, save, 2);
13970
13971 if (opcode
13972 && (opcode->tag == OT_cinfix3
13973 || opcode->tag == OT_cinfix3_deprecated
13974 || opcode->tag == OT_csuf_or_in3
13975 || opcode->tag == OT_cinfix3_legacy))
13976 {
13977 /* step CM */
13978 if (unified_syntax
13979 && (opcode->tag == OT_cinfix3
13980 || opcode->tag == OT_cinfix3_deprecated))
13981 as_warn (_("conditional infixes are deprecated in unified syntax"));
13982
13983 inst.cond = cond->value;
13984 return opcode;
13985 }
13986
13987 return 0;
13988 }
13989
13990 void
13991 md_assemble (char *str)
13992 {
13993 char *p = str;
13994 const struct asm_opcode * opcode;
13995
13996 /* Align the previous label if needed. */
13997 if (last_label_seen != NULL)
13998 {
13999 symbol_set_frag (last_label_seen, frag_now);
14000 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
14001 S_SET_SEGMENT (last_label_seen, now_seg);
14002 }
14003
14004 memset (&inst, '\0', sizeof (inst));
14005 inst.reloc.type = BFD_RELOC_UNUSED;
14006
14007 opcode = opcode_lookup (&p);
14008 if (!opcode)
14009 {
14010 /* It wasn't an instruction, but it might be a register alias of
14011 the form alias .req reg, or a Neon .dn/.qn directive. */
14012 if (!create_register_alias (str, p)
14013 && !create_neon_reg_alias (str, p))
14014 as_bad (_("bad instruction `%s'"), str);
14015
14016 return;
14017 }
14018
14019 if (opcode->tag == OT_cinfix3_deprecated)
14020 as_warn (_("s suffix on comparison instruction is deprecated"));
14021
14022 /* The value which unconditional instructions should have in place of the
14023 condition field. */
14024 inst.uncond_value = (opcode->tag == OT_csuffixF) ? 0xf : -1;
14025
14026 if (thumb_mode)
14027 {
14028 arm_feature_set variant;
14029
14030 variant = cpu_variant;
14031 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
14032 if (!ARM_CPU_HAS_FEATURE (variant, arm_arch_t2))
14033 ARM_CLEAR_FEATURE (variant, variant, fpu_any_hard);
14034 /* Check that this instruction is supported for this CPU. */
14035 if (!opcode->tvariant
14036 || (thumb_mode == 1
14037 && !ARM_CPU_HAS_FEATURE (variant, *opcode->tvariant)))
14038 {
14039 as_bad (_("selected processor does not support `%s'"), str);
14040 return;
14041 }
14042 if (inst.cond != COND_ALWAYS && !unified_syntax
14043 && opcode->tencode != do_t_branch)
14044 {
14045 as_bad (_("Thumb does not support conditional execution"));
14046 return;
14047 }
14048
14049 /* Check conditional suffixes. */
14050 if (current_it_mask)
14051 {
14052 int cond;
14053 cond = current_cc ^ ((current_it_mask >> 4) & 1) ^ 1;
14054 current_it_mask <<= 1;
14055 current_it_mask &= 0x1f;
14056 /* The BKPT instruction is unconditional even in an IT block. */
14057 if (!inst.error
14058 && cond != inst.cond && opcode->tencode != do_t_bkpt)
14059 {
14060 as_bad (_("incorrect condition in IT block"));
14061 return;
14062 }
14063 }
14064 else if (inst.cond != COND_ALWAYS && opcode->tencode != do_t_branch)
14065 {
14066 as_bad (_("thumb conditional instrunction not in IT block"));
14067 return;
14068 }
14069
14070 mapping_state (MAP_THUMB);
14071 inst.instruction = opcode->tvalue;
14072
14073 if (!parse_operands (p, opcode->operands))
14074 opcode->tencode ();
14075
14076 /* Clear current_it_mask at the end of an IT block. */
14077 if (current_it_mask == 0x10)
14078 current_it_mask = 0;
14079
14080 if (!(inst.error || inst.relax))
14081 {
14082 assert (inst.instruction < 0xe800 || inst.instruction > 0xffff);
14083 inst.size = (inst.instruction > 0xffff ? 4 : 2);
14084 if (inst.size_req && inst.size_req != inst.size)
14085 {
14086 as_bad (_("cannot honor width suffix -- `%s'"), str);
14087 return;
14088 }
14089 }
14090 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
14091 *opcode->tvariant);
14092 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
14093 set those bits when Thumb-2 32-bit instructions are seen. ie.
14094 anything other than bl/blx.
14095 This is overly pessimistic for relaxable instructions. */
14096 if ((inst.size == 4 && (inst.instruction & 0xf800e800) != 0xf000e800)
14097 || inst.relax)
14098 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
14099 arm_ext_v6t2);
14100 }
14101 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
14102 {
14103 /* Check that this instruction is supported for this CPU. */
14104 if (!opcode->avariant ||
14105 !ARM_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant))
14106 {
14107 as_bad (_("selected processor does not support `%s'"), str);
14108 return;
14109 }
14110 if (inst.size_req)
14111 {
14112 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str);
14113 return;
14114 }
14115
14116 mapping_state (MAP_ARM);
14117 inst.instruction = opcode->avalue;
14118 if (opcode->tag == OT_unconditionalF)
14119 inst.instruction |= 0xF << 28;
14120 else
14121 inst.instruction |= inst.cond << 28;
14122 inst.size = INSN_SIZE;
14123 if (!parse_operands (p, opcode->operands))
14124 opcode->aencode ();
14125 /* Arm mode bx is marked as both v4T and v5 because it's still required
14126 on a hypothetical non-thumb v5 core. */
14127 if (ARM_CPU_HAS_FEATURE (*opcode->avariant, arm_ext_v4t)
14128 || ARM_CPU_HAS_FEATURE (*opcode->avariant, arm_ext_v5))
14129 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, arm_ext_v4t);
14130 else
14131 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
14132 *opcode->avariant);
14133 }
14134 else
14135 {
14136 as_bad (_("attempt to use an ARM instruction on a Thumb-only processor "
14137 "-- `%s'"), str);
14138 return;
14139 }
14140 output_inst (str);
14141 }
14142
14143 /* Various frobbings of labels and their addresses. */
14144
14145 void
14146 arm_start_line_hook (void)
14147 {
14148 last_label_seen = NULL;
14149 }
14150
14151 void
14152 arm_frob_label (symbolS * sym)
14153 {
14154 last_label_seen = sym;
14155
14156 ARM_SET_THUMB (sym, thumb_mode);
14157
14158 #if defined OBJ_COFF || defined OBJ_ELF
14159 ARM_SET_INTERWORK (sym, support_interwork);
14160 #endif
14161
14162 /* Note - do not allow local symbols (.Lxxx) to be labeled
14163 as Thumb functions. This is because these labels, whilst
14164 they exist inside Thumb code, are not the entry points for
14165 possible ARM->Thumb calls. Also, these labels can be used
14166 as part of a computed goto or switch statement. eg gcc
14167 can generate code that looks like this:
14168
14169 ldr r2, [pc, .Laaa]
14170 lsl r3, r3, #2
14171 ldr r2, [r3, r2]
14172 mov pc, r2
14173
14174 .Lbbb: .word .Lxxx
14175 .Lccc: .word .Lyyy
14176 ..etc...
14177 .Laaa: .word Lbbb
14178
14179 The first instruction loads the address of the jump table.
14180 The second instruction converts a table index into a byte offset.
14181 The third instruction gets the jump address out of the table.
14182 The fourth instruction performs the jump.
14183
14184 If the address stored at .Laaa is that of a symbol which has the
14185 Thumb_Func bit set, then the linker will arrange for this address
14186 to have the bottom bit set, which in turn would mean that the
14187 address computation performed by the third instruction would end
14188 up with the bottom bit set. Since the ARM is capable of unaligned
14189 word loads, the instruction would then load the incorrect address
14190 out of the jump table, and chaos would ensue. */
14191 if (label_is_thumb_function_name
14192 && (S_GET_NAME (sym)[0] != '.' || S_GET_NAME (sym)[1] != 'L')
14193 && (bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE) != 0)
14194 {
14195 /* When the address of a Thumb function is taken the bottom
14196 bit of that address should be set. This will allow
14197 interworking between Arm and Thumb functions to work
14198 correctly. */
14199
14200 THUMB_SET_FUNC (sym, 1);
14201
14202 label_is_thumb_function_name = FALSE;
14203 }
14204
14205 dwarf2_emit_label (sym);
14206 }
14207
14208 int
14209 arm_data_in_code (void)
14210 {
14211 if (thumb_mode && ! strncmp (input_line_pointer + 1, "data:", 5))
14212 {
14213 *input_line_pointer = '/';
14214 input_line_pointer += 5;
14215 *input_line_pointer = 0;
14216 return 1;
14217 }
14218
14219 return 0;
14220 }
14221
14222 char *
14223 arm_canonicalize_symbol_name (char * name)
14224 {
14225 int len;
14226
14227 if (thumb_mode && (len = strlen (name)) > 5
14228 && streq (name + len - 5, "/data"))
14229 *(name + len - 5) = 0;
14230
14231 return name;
14232 }
14233 \f
14234 /* Table of all register names defined by default. The user can
14235 define additional names with .req. Note that all register names
14236 should appear in both upper and lowercase variants. Some registers
14237 also have mixed-case names. */
14238
14239 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
14240 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
14241 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
14242 #define REGSET(p,t) \
14243 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
14244 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
14245 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
14246 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
14247 #define REGSETH(p,t) \
14248 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
14249 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
14250 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
14251 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
14252 #define REGSET2(p,t) \
14253 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
14254 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
14255 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
14256 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
14257
14258 static const struct reg_entry reg_names[] =
14259 {
14260 /* ARM integer registers. */
14261 REGSET(r, RN), REGSET(R, RN),
14262
14263 /* ATPCS synonyms. */
14264 REGDEF(a1,0,RN), REGDEF(a2,1,RN), REGDEF(a3, 2,RN), REGDEF(a4, 3,RN),
14265 REGDEF(v1,4,RN), REGDEF(v2,5,RN), REGDEF(v3, 6,RN), REGDEF(v4, 7,RN),
14266 REGDEF(v5,8,RN), REGDEF(v6,9,RN), REGDEF(v7,10,RN), REGDEF(v8,11,RN),
14267
14268 REGDEF(A1,0,RN), REGDEF(A2,1,RN), REGDEF(A3, 2,RN), REGDEF(A4, 3,RN),
14269 REGDEF(V1,4,RN), REGDEF(V2,5,RN), REGDEF(V3, 6,RN), REGDEF(V4, 7,RN),
14270 REGDEF(V5,8,RN), REGDEF(V6,9,RN), REGDEF(V7,10,RN), REGDEF(V8,11,RN),
14271
14272 /* Well-known aliases. */
14273 REGDEF(wr, 7,RN), REGDEF(sb, 9,RN), REGDEF(sl,10,RN), REGDEF(fp,11,RN),
14274 REGDEF(ip,12,RN), REGDEF(sp,13,RN), REGDEF(lr,14,RN), REGDEF(pc,15,RN),
14275
14276 REGDEF(WR, 7,RN), REGDEF(SB, 9,RN), REGDEF(SL,10,RN), REGDEF(FP,11,RN),
14277 REGDEF(IP,12,RN), REGDEF(SP,13,RN), REGDEF(LR,14,RN), REGDEF(PC,15,RN),
14278
14279 /* Coprocessor numbers. */
14280 REGSET(p, CP), REGSET(P, CP),
14281
14282 /* Coprocessor register numbers. The "cr" variants are for backward
14283 compatibility. */
14284 REGSET(c, CN), REGSET(C, CN),
14285 REGSET(cr, CN), REGSET(CR, CN),
14286
14287 /* FPA registers. */
14288 REGNUM(f,0,FN), REGNUM(f,1,FN), REGNUM(f,2,FN), REGNUM(f,3,FN),
14289 REGNUM(f,4,FN), REGNUM(f,5,FN), REGNUM(f,6,FN), REGNUM(f,7, FN),
14290
14291 REGNUM(F,0,FN), REGNUM(F,1,FN), REGNUM(F,2,FN), REGNUM(F,3,FN),
14292 REGNUM(F,4,FN), REGNUM(F,5,FN), REGNUM(F,6,FN), REGNUM(F,7, FN),
14293
14294 /* VFP SP registers. */
14295 REGSET(s,VFS), REGSET(S,VFS),
14296 REGSETH(s,VFS), REGSETH(S,VFS),
14297
14298 /* VFP DP Registers. */
14299 REGSET(d,VFD), REGSET(D,VFD),
14300 /* Extra Neon DP registers. */
14301 REGSETH(d,VFD), REGSETH(D,VFD),
14302
14303 /* Neon QP registers. */
14304 REGSET2(q,NQ), REGSET2(Q,NQ),
14305
14306 /* VFP control registers. */
14307 REGDEF(fpsid,0,VFC), REGDEF(fpscr,1,VFC), REGDEF(fpexc,8,VFC),
14308 REGDEF(FPSID,0,VFC), REGDEF(FPSCR,1,VFC), REGDEF(FPEXC,8,VFC),
14309
14310 /* Maverick DSP coprocessor registers. */
14311 REGSET(mvf,MVF), REGSET(mvd,MVD), REGSET(mvfx,MVFX), REGSET(mvdx,MVDX),
14312 REGSET(MVF,MVF), REGSET(MVD,MVD), REGSET(MVFX,MVFX), REGSET(MVDX,MVDX),
14313
14314 REGNUM(mvax,0,MVAX), REGNUM(mvax,1,MVAX),
14315 REGNUM(mvax,2,MVAX), REGNUM(mvax,3,MVAX),
14316 REGDEF(dspsc,0,DSPSC),
14317
14318 REGNUM(MVAX,0,MVAX), REGNUM(MVAX,1,MVAX),
14319 REGNUM(MVAX,2,MVAX), REGNUM(MVAX,3,MVAX),
14320 REGDEF(DSPSC,0,DSPSC),
14321
14322 /* iWMMXt data registers - p0, c0-15. */
14323 REGSET(wr,MMXWR), REGSET(wR,MMXWR), REGSET(WR, MMXWR),
14324
14325 /* iWMMXt control registers - p1, c0-3. */
14326 REGDEF(wcid, 0,MMXWC), REGDEF(wCID, 0,MMXWC), REGDEF(WCID, 0,MMXWC),
14327 REGDEF(wcon, 1,MMXWC), REGDEF(wCon, 1,MMXWC), REGDEF(WCON, 1,MMXWC),
14328 REGDEF(wcssf, 2,MMXWC), REGDEF(wCSSF, 2,MMXWC), REGDEF(WCSSF, 2,MMXWC),
14329 REGDEF(wcasf, 3,MMXWC), REGDEF(wCASF, 3,MMXWC), REGDEF(WCASF, 3,MMXWC),
14330
14331 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
14332 REGDEF(wcgr0, 8,MMXWCG), REGDEF(wCGR0, 8,MMXWCG), REGDEF(WCGR0, 8,MMXWCG),
14333 REGDEF(wcgr1, 9,MMXWCG), REGDEF(wCGR1, 9,MMXWCG), REGDEF(WCGR1, 9,MMXWCG),
14334 REGDEF(wcgr2,10,MMXWCG), REGDEF(wCGR2,10,MMXWCG), REGDEF(WCGR2,10,MMXWCG),
14335 REGDEF(wcgr3,11,MMXWCG), REGDEF(wCGR3,11,MMXWCG), REGDEF(WCGR3,11,MMXWCG),
14336
14337 /* XScale accumulator registers. */
14338 REGNUM(acc,0,XSCALE), REGNUM(ACC,0,XSCALE),
14339 };
14340 #undef REGDEF
14341 #undef REGNUM
14342 #undef REGSET
14343
14344 /* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
14345 within psr_required_here. */
14346 static const struct asm_psr psrs[] =
14347 {
14348 /* Backward compatibility notation. Note that "all" is no longer
14349 truly all possible PSR bits. */
14350 {"all", PSR_c | PSR_f},
14351 {"flg", PSR_f},
14352 {"ctl", PSR_c},
14353
14354 /* Individual flags. */
14355 {"f", PSR_f},
14356 {"c", PSR_c},
14357 {"x", PSR_x},
14358 {"s", PSR_s},
14359 /* Combinations of flags. */
14360 {"fs", PSR_f | PSR_s},
14361 {"fx", PSR_f | PSR_x},
14362 {"fc", PSR_f | PSR_c},
14363 {"sf", PSR_s | PSR_f},
14364 {"sx", PSR_s | PSR_x},
14365 {"sc", PSR_s | PSR_c},
14366 {"xf", PSR_x | PSR_f},
14367 {"xs", PSR_x | PSR_s},
14368 {"xc", PSR_x | PSR_c},
14369 {"cf", PSR_c | PSR_f},
14370 {"cs", PSR_c | PSR_s},
14371 {"cx", PSR_c | PSR_x},
14372 {"fsx", PSR_f | PSR_s | PSR_x},
14373 {"fsc", PSR_f | PSR_s | PSR_c},
14374 {"fxs", PSR_f | PSR_x | PSR_s},
14375 {"fxc", PSR_f | PSR_x | PSR_c},
14376 {"fcs", PSR_f | PSR_c | PSR_s},
14377 {"fcx", PSR_f | PSR_c | PSR_x},
14378 {"sfx", PSR_s | PSR_f | PSR_x},
14379 {"sfc", PSR_s | PSR_f | PSR_c},
14380 {"sxf", PSR_s | PSR_x | PSR_f},
14381 {"sxc", PSR_s | PSR_x | PSR_c},
14382 {"scf", PSR_s | PSR_c | PSR_f},
14383 {"scx", PSR_s | PSR_c | PSR_x},
14384 {"xfs", PSR_x | PSR_f | PSR_s},
14385 {"xfc", PSR_x | PSR_f | PSR_c},
14386 {"xsf", PSR_x | PSR_s | PSR_f},
14387 {"xsc", PSR_x | PSR_s | PSR_c},
14388 {"xcf", PSR_x | PSR_c | PSR_f},
14389 {"xcs", PSR_x | PSR_c | PSR_s},
14390 {"cfs", PSR_c | PSR_f | PSR_s},
14391 {"cfx", PSR_c | PSR_f | PSR_x},
14392 {"csf", PSR_c | PSR_s | PSR_f},
14393 {"csx", PSR_c | PSR_s | PSR_x},
14394 {"cxf", PSR_c | PSR_x | PSR_f},
14395 {"cxs", PSR_c | PSR_x | PSR_s},
14396 {"fsxc", PSR_f | PSR_s | PSR_x | PSR_c},
14397 {"fscx", PSR_f | PSR_s | PSR_c | PSR_x},
14398 {"fxsc", PSR_f | PSR_x | PSR_s | PSR_c},
14399 {"fxcs", PSR_f | PSR_x | PSR_c | PSR_s},
14400 {"fcsx", PSR_f | PSR_c | PSR_s | PSR_x},
14401 {"fcxs", PSR_f | PSR_c | PSR_x | PSR_s},
14402 {"sfxc", PSR_s | PSR_f | PSR_x | PSR_c},
14403 {"sfcx", PSR_s | PSR_f | PSR_c | PSR_x},
14404 {"sxfc", PSR_s | PSR_x | PSR_f | PSR_c},
14405 {"sxcf", PSR_s | PSR_x | PSR_c | PSR_f},
14406 {"scfx", PSR_s | PSR_c | PSR_f | PSR_x},
14407 {"scxf", PSR_s | PSR_c | PSR_x | PSR_f},
14408 {"xfsc", PSR_x | PSR_f | PSR_s | PSR_c},
14409 {"xfcs", PSR_x | PSR_f | PSR_c | PSR_s},
14410 {"xsfc", PSR_x | PSR_s | PSR_f | PSR_c},
14411 {"xscf", PSR_x | PSR_s | PSR_c | PSR_f},
14412 {"xcfs", PSR_x | PSR_c | PSR_f | PSR_s},
14413 {"xcsf", PSR_x | PSR_c | PSR_s | PSR_f},
14414 {"cfsx", PSR_c | PSR_f | PSR_s | PSR_x},
14415 {"cfxs", PSR_c | PSR_f | PSR_x | PSR_s},
14416 {"csfx", PSR_c | PSR_s | PSR_f | PSR_x},
14417 {"csxf", PSR_c | PSR_s | PSR_x | PSR_f},
14418 {"cxfs", PSR_c | PSR_x | PSR_f | PSR_s},
14419 {"cxsf", PSR_c | PSR_x | PSR_s | PSR_f},
14420 };
14421
14422 /* Table of V7M psr names. */
14423 static const struct asm_psr v7m_psrs[] =
14424 {
14425 {"apsr", 0 },
14426 {"iapsr", 1 },
14427 {"eapsr", 2 },
14428 {"psr", 3 },
14429 {"ipsr", 5 },
14430 {"epsr", 6 },
14431 {"iepsr", 7 },
14432 {"msp", 8 },
14433 {"psp", 9 },
14434 {"primask", 16},
14435 {"basepri", 17},
14436 {"basepri_max", 18},
14437 {"faultmask", 19},
14438 {"control", 20}
14439 };
14440
14441 /* Table of all shift-in-operand names. */
14442 static const struct asm_shift_name shift_names [] =
14443 {
14444 { "asl", SHIFT_LSL }, { "ASL", SHIFT_LSL },
14445 { "lsl", SHIFT_LSL }, { "LSL", SHIFT_LSL },
14446 { "lsr", SHIFT_LSR }, { "LSR", SHIFT_LSR },
14447 { "asr", SHIFT_ASR }, { "ASR", SHIFT_ASR },
14448 { "ror", SHIFT_ROR }, { "ROR", SHIFT_ROR },
14449 { "rrx", SHIFT_RRX }, { "RRX", SHIFT_RRX }
14450 };
14451
14452 /* Table of all explicit relocation names. */
14453 #ifdef OBJ_ELF
14454 static struct reloc_entry reloc_names[] =
14455 {
14456 { "got", BFD_RELOC_ARM_GOT32 }, { "GOT", BFD_RELOC_ARM_GOT32 },
14457 { "gotoff", BFD_RELOC_ARM_GOTOFF }, { "GOTOFF", BFD_RELOC_ARM_GOTOFF },
14458 { "plt", BFD_RELOC_ARM_PLT32 }, { "PLT", BFD_RELOC_ARM_PLT32 },
14459 { "target1", BFD_RELOC_ARM_TARGET1 }, { "TARGET1", BFD_RELOC_ARM_TARGET1 },
14460 { "target2", BFD_RELOC_ARM_TARGET2 }, { "TARGET2", BFD_RELOC_ARM_TARGET2 },
14461 { "sbrel", BFD_RELOC_ARM_SBREL32 }, { "SBREL", BFD_RELOC_ARM_SBREL32 },
14462 { "tlsgd", BFD_RELOC_ARM_TLS_GD32}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32},
14463 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32},
14464 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32},
14465 { "gottpoff",BFD_RELOC_ARM_TLS_IE32}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32},
14466 { "tpoff", BFD_RELOC_ARM_TLS_LE32}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32}
14467 };
14468 #endif
14469
14470 /* Table of all conditional affixes. 0xF is not defined as a condition code. */
14471 static const struct asm_cond conds[] =
14472 {
14473 {"eq", 0x0},
14474 {"ne", 0x1},
14475 {"cs", 0x2}, {"hs", 0x2},
14476 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
14477 {"mi", 0x4},
14478 {"pl", 0x5},
14479 {"vs", 0x6},
14480 {"vc", 0x7},
14481 {"hi", 0x8},
14482 {"ls", 0x9},
14483 {"ge", 0xa},
14484 {"lt", 0xb},
14485 {"gt", 0xc},
14486 {"le", 0xd},
14487 {"al", 0xe}
14488 };
14489
14490 static struct asm_barrier_opt barrier_opt_names[] =
14491 {
14492 { "sy", 0xf },
14493 { "un", 0x7 },
14494 { "st", 0xe },
14495 { "unst", 0x6 }
14496 };
14497
14498 /* Table of ARM-format instructions. */
14499
14500 /* Macros for gluing together operand strings. N.B. In all cases
14501 other than OPS0, the trailing OP_stop comes from default
14502 zero-initialization of the unspecified elements of the array. */
14503 #define OPS0() { OP_stop, }
14504 #define OPS1(a) { OP_##a, }
14505 #define OPS2(a,b) { OP_##a,OP_##b, }
14506 #define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
14507 #define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
14508 #define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
14509 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
14510
14511 /* These macros abstract out the exact format of the mnemonic table and
14512 save some repeated characters. */
14513
14514 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
14515 #define TxCE(mnem, op, top, nops, ops, ae, te) \
14516 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
14517 THUMB_VARIANT, do_##ae, do_##te }
14518
14519 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
14520 a T_MNEM_xyz enumerator. */
14521 #define TCE(mnem, aop, top, nops, ops, ae, te) \
14522 TxCE(mnem, aop, 0x##top, nops, ops, ae, te)
14523 #define tCE(mnem, aop, top, nops, ops, ae, te) \
14524 TxCE(mnem, aop, T_MNEM_##top, nops, ops, ae, te)
14525
14526 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
14527 infix after the third character. */
14528 #define TxC3(mnem, op, top, nops, ops, ae, te) \
14529 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
14530 THUMB_VARIANT, do_##ae, do_##te }
14531 #define TxC3w(mnem, op, top, nops, ops, ae, te) \
14532 { #mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
14533 THUMB_VARIANT, do_##ae, do_##te }
14534 #define TC3(mnem, aop, top, nops, ops, ae, te) \
14535 TxC3(mnem, aop, 0x##top, nops, ops, ae, te)
14536 #define TC3w(mnem, aop, top, nops, ops, ae, te) \
14537 TxC3w(mnem, aop, 0x##top, nops, ops, ae, te)
14538 #define tC3(mnem, aop, top, nops, ops, ae, te) \
14539 TxC3(mnem, aop, T_MNEM_##top, nops, ops, ae, te)
14540 #define tC3w(mnem, aop, top, nops, ops, ae, te) \
14541 TxC3w(mnem, aop, T_MNEM_##top, nops, ops, ae, te)
14542
14543 /* Mnemonic with a conditional infix in an unusual place. Each and every variant has to
14544 appear in the condition table. */
14545 #define TxCM_(m1, m2, m3, op, top, nops, ops, ae, te) \
14546 { #m1 #m2 #m3, OPS##nops ops, sizeof(#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof(#m1) - 1, \
14547 0x##op, top, ARM_VARIANT, THUMB_VARIANT, do_##ae, do_##te }
14548
14549 #define TxCM(m1, m2, op, top, nops, ops, ae, te) \
14550 TxCM_(m1, , m2, op, top, nops, ops, ae, te), \
14551 TxCM_(m1, eq, m2, op, top, nops, ops, ae, te), \
14552 TxCM_(m1, ne, m2, op, top, nops, ops, ae, te), \
14553 TxCM_(m1, cs, m2, op, top, nops, ops, ae, te), \
14554 TxCM_(m1, hs, m2, op, top, nops, ops, ae, te), \
14555 TxCM_(m1, cc, m2, op, top, nops, ops, ae, te), \
14556 TxCM_(m1, ul, m2, op, top, nops, ops, ae, te), \
14557 TxCM_(m1, lo, m2, op, top, nops, ops, ae, te), \
14558 TxCM_(m1, mi, m2, op, top, nops, ops, ae, te), \
14559 TxCM_(m1, pl, m2, op, top, nops, ops, ae, te), \
14560 TxCM_(m1, vs, m2, op, top, nops, ops, ae, te), \
14561 TxCM_(m1, vc, m2, op, top, nops, ops, ae, te), \
14562 TxCM_(m1, hi, m2, op, top, nops, ops, ae, te), \
14563 TxCM_(m1, ls, m2, op, top, nops, ops, ae, te), \
14564 TxCM_(m1, ge, m2, op, top, nops, ops, ae, te), \
14565 TxCM_(m1, lt, m2, op, top, nops, ops, ae, te), \
14566 TxCM_(m1, gt, m2, op, top, nops, ops, ae, te), \
14567 TxCM_(m1, le, m2, op, top, nops, ops, ae, te), \
14568 TxCM_(m1, al, m2, op, top, nops, ops, ae, te)
14569
14570 #define TCM(m1,m2, aop, top, nops, ops, ae, te) \
14571 TxCM(m1,m2, aop, 0x##top, nops, ops, ae, te)
14572 #define tCM(m1,m2, aop, top, nops, ops, ae, te) \
14573 TxCM(m1,m2, aop, T_MNEM_##top, nops, ops, ae, te)
14574
14575 /* Mnemonic that cannot be conditionalized. The ARM condition-code
14576 field is still 0xE. Many of the Thumb variants can be executed
14577 conditionally, so this is checked separately. */
14578 #define TUE(mnem, op, top, nops, ops, ae, te) \
14579 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
14580 THUMB_VARIANT, do_##ae, do_##te }
14581
14582 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
14583 condition code field. */
14584 #define TUF(mnem, op, top, nops, ops, ae, te) \
14585 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
14586 THUMB_VARIANT, do_##ae, do_##te }
14587
14588 /* ARM-only variants of all the above. */
14589 #define CE(mnem, op, nops, ops, ae) \
14590 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
14591
14592 #define C3(mnem, op, nops, ops, ae) \
14593 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
14594
14595 /* Legacy mnemonics that always have conditional infix after the third
14596 character. */
14597 #define CL(mnem, op, nops, ops, ae) \
14598 { #mnem, OPS##nops ops, OT_cinfix3_legacy, \
14599 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
14600
14601 /* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
14602 #define cCE(mnem, op, nops, ops, ae) \
14603 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
14604
14605 /* Legacy coprocessor instructions where conditional infix and conditional
14606 suffix are ambiguous. For consistency this includes all FPA instructions,
14607 not just the potentially ambiguous ones. */
14608 #define cCL(mnem, op, nops, ops, ae) \
14609 { #mnem, OPS##nops ops, OT_cinfix3_legacy, \
14610 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
14611
14612 /* Coprocessor, takes either a suffix or a position-3 infix
14613 (for an FPA corner case). */
14614 #define C3E(mnem, op, nops, ops, ae) \
14615 { #mnem, OPS##nops ops, OT_csuf_or_in3, \
14616 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
14617
14618 #define xCM_(m1, m2, m3, op, nops, ops, ae) \
14619 { #m1 #m2 #m3, OPS##nops ops, \
14620 sizeof(#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof(#m1) - 1, \
14621 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
14622
14623 #define CM(m1, m2, op, nops, ops, ae) \
14624 xCM_(m1, , m2, op, nops, ops, ae), \
14625 xCM_(m1, eq, m2, op, nops, ops, ae), \
14626 xCM_(m1, ne, m2, op, nops, ops, ae), \
14627 xCM_(m1, cs, m2, op, nops, ops, ae), \
14628 xCM_(m1, hs, m2, op, nops, ops, ae), \
14629 xCM_(m1, cc, m2, op, nops, ops, ae), \
14630 xCM_(m1, ul, m2, op, nops, ops, ae), \
14631 xCM_(m1, lo, m2, op, nops, ops, ae), \
14632 xCM_(m1, mi, m2, op, nops, ops, ae), \
14633 xCM_(m1, pl, m2, op, nops, ops, ae), \
14634 xCM_(m1, vs, m2, op, nops, ops, ae), \
14635 xCM_(m1, vc, m2, op, nops, ops, ae), \
14636 xCM_(m1, hi, m2, op, nops, ops, ae), \
14637 xCM_(m1, ls, m2, op, nops, ops, ae), \
14638 xCM_(m1, ge, m2, op, nops, ops, ae), \
14639 xCM_(m1, lt, m2, op, nops, ops, ae), \
14640 xCM_(m1, gt, m2, op, nops, ops, ae), \
14641 xCM_(m1, le, m2, op, nops, ops, ae), \
14642 xCM_(m1, al, m2, op, nops, ops, ae)
14643
14644 #define UE(mnem, op, nops, ops, ae) \
14645 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
14646
14647 #define UF(mnem, op, nops, ops, ae) \
14648 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
14649
14650 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
14651 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
14652 use the same encoding function for each. */
14653 #define NUF(mnem, op, nops, ops, enc) \
14654 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
14655 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
14656
14657 /* Neon data processing, version which indirects through neon_enc_tab for
14658 the various overloaded versions of opcodes. */
14659 #define nUF(mnem, op, nops, ops, enc) \
14660 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM_##op, N_MNEM_##op, \
14661 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
14662
14663 /* Neon insn with conditional suffix for the ARM version, non-overloaded
14664 version. */
14665 #define NCE_tag(mnem, op, nops, ops, enc, tag) \
14666 { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT, \
14667 THUMB_VARIANT, do_##enc, do_##enc }
14668
14669 #define NCE(mnem, op, nops, ops, enc) \
14670 NCE_tag(mnem, op, nops, ops, enc, OT_csuffix)
14671
14672 #define NCEF(mnem, op, nops, ops, enc) \
14673 NCE_tag(mnem, op, nops, ops, enc, OT_csuffixF)
14674
14675 /* Neon insn with conditional suffix for the ARM version, overloaded types. */
14676 #define nCE_tag(mnem, op, nops, ops, enc, tag) \
14677 { #mnem, OPS##nops ops, tag, N_MNEM_##op, N_MNEM_##op, \
14678 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
14679
14680 #define nCE(mnem, op, nops, ops, enc) \
14681 nCE_tag(mnem, op, nops, ops, enc, OT_csuffix)
14682
14683 #define nCEF(mnem, op, nops, ops, enc) \
14684 nCE_tag(mnem, op, nops, ops, enc, OT_csuffixF)
14685
14686 #define do_0 0
14687
14688 /* Thumb-only, unconditional. */
14689 #define UT(mnem, op, nops, ops, te) TUE(mnem, 0, op, nops, ops, 0, te)
14690
14691 static const struct asm_opcode insns[] =
14692 {
14693 #define ARM_VARIANT &arm_ext_v1 /* Core ARM Instructions. */
14694 #define THUMB_VARIANT &arm_ext_v4t
14695 tCE(and, 0000000, and, 3, (RR, oRR, SH), arit, t_arit3c),
14696 tC3(ands, 0100000, ands, 3, (RR, oRR, SH), arit, t_arit3c),
14697 tCE(eor, 0200000, eor, 3, (RR, oRR, SH), arit, t_arit3c),
14698 tC3(eors, 0300000, eors, 3, (RR, oRR, SH), arit, t_arit3c),
14699 tCE(sub, 0400000, sub, 3, (RR, oRR, SH), arit, t_add_sub),
14700 tC3(subs, 0500000, subs, 3, (RR, oRR, SH), arit, t_add_sub),
14701 tCE(add, 0800000, add, 3, (RR, oRR, SHG), arit, t_add_sub),
14702 tC3(adds, 0900000, adds, 3, (RR, oRR, SHG), arit, t_add_sub),
14703 tCE(adc, 0a00000, adc, 3, (RR, oRR, SH), arit, t_arit3c),
14704 tC3(adcs, 0b00000, adcs, 3, (RR, oRR, SH), arit, t_arit3c),
14705 tCE(sbc, 0c00000, sbc, 3, (RR, oRR, SH), arit, t_arit3),
14706 tC3(sbcs, 0d00000, sbcs, 3, (RR, oRR, SH), arit, t_arit3),
14707 tCE(orr, 1800000, orr, 3, (RR, oRR, SH), arit, t_arit3c),
14708 tC3(orrs, 1900000, orrs, 3, (RR, oRR, SH), arit, t_arit3c),
14709 tCE(bic, 1c00000, bic, 3, (RR, oRR, SH), arit, t_arit3),
14710 tC3(bics, 1d00000, bics, 3, (RR, oRR, SH), arit, t_arit3),
14711
14712 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
14713 for setting PSR flag bits. They are obsolete in V6 and do not
14714 have Thumb equivalents. */
14715 tCE(tst, 1100000, tst, 2, (RR, SH), cmp, t_mvn_tst),
14716 tC3w(tsts, 1100000, tst, 2, (RR, SH), cmp, t_mvn_tst),
14717 CL(tstp, 110f000, 2, (RR, SH), cmp),
14718 tCE(cmp, 1500000, cmp, 2, (RR, SH), cmp, t_mov_cmp),
14719 tC3w(cmps, 1500000, cmp, 2, (RR, SH), cmp, t_mov_cmp),
14720 CL(cmpp, 150f000, 2, (RR, SH), cmp),
14721 tCE(cmn, 1700000, cmn, 2, (RR, SH), cmp, t_mvn_tst),
14722 tC3w(cmns, 1700000, cmn, 2, (RR, SH), cmp, t_mvn_tst),
14723 CL(cmnp, 170f000, 2, (RR, SH), cmp),
14724
14725 tCE(mov, 1a00000, mov, 2, (RR, SH), mov, t_mov_cmp),
14726 tC3(movs, 1b00000, movs, 2, (RR, SH), mov, t_mov_cmp),
14727 tCE(mvn, 1e00000, mvn, 2, (RR, SH), mov, t_mvn_tst),
14728 tC3(mvns, 1f00000, mvns, 2, (RR, SH), mov, t_mvn_tst),
14729
14730 tCE(ldr, 4100000, ldr, 2, (RR, ADDRGLDR),ldst, t_ldst),
14731 tC3(ldrb, 4500000, ldrb, 2, (RR, ADDRGLDR),ldst, t_ldst),
14732 tCE(str, 4000000, str, 2, (RR, ADDRGLDR),ldst, t_ldst),
14733 tC3(strb, 4400000, strb, 2, (RR, ADDRGLDR),ldst, t_ldst),
14734
14735 tCE(stm, 8800000, stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
14736 tC3(stmia, 8800000, stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
14737 tC3(stmea, 8800000, stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
14738 tCE(ldm, 8900000, ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
14739 tC3(ldmia, 8900000, ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
14740 tC3(ldmfd, 8900000, ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
14741
14742 TCE(swi, f000000, df00, 1, (EXPi), swi, t_swi),
14743 TCE(svc, f000000, df00, 1, (EXPi), swi, t_swi),
14744 tCE(b, a000000, b, 1, (EXPr), branch, t_branch),
14745 TCE(bl, b000000, f000f800, 1, (EXPr), bl, t_branch23),
14746
14747 /* Pseudo ops. */
14748 tCE(adr, 28f0000, adr, 2, (RR, EXP), adr, t_adr),
14749 C3(adrl, 28f0000, 2, (RR, EXP), adrl),
14750 tCE(nop, 1a00000, nop, 1, (oI255c), nop, t_nop),
14751
14752 /* Thumb-compatibility pseudo ops. */
14753 tCE(lsl, 1a00000, lsl, 3, (RR, oRR, SH), shift, t_shift),
14754 tC3(lsls, 1b00000, lsls, 3, (RR, oRR, SH), shift, t_shift),
14755 tCE(lsr, 1a00020, lsr, 3, (RR, oRR, SH), shift, t_shift),
14756 tC3(lsrs, 1b00020, lsrs, 3, (RR, oRR, SH), shift, t_shift),
14757 tCE(asr, 1a00040, asr, 3, (RR, oRR, SH), shift, t_shift),
14758 tC3(asrs, 1b00040, asrs, 3, (RR, oRR, SH), shift, t_shift),
14759 tCE(ror, 1a00060, ror, 3, (RR, oRR, SH), shift, t_shift),
14760 tC3(rors, 1b00060, rors, 3, (RR, oRR, SH), shift, t_shift),
14761 tCE(neg, 2600000, neg, 2, (RR, RR), rd_rn, t_neg),
14762 tC3(negs, 2700000, negs, 2, (RR, RR), rd_rn, t_neg),
14763 tCE(push, 92d0000, push, 1, (REGLST), push_pop, t_push_pop),
14764 tCE(pop, 8bd0000, pop, 1, (REGLST), push_pop, t_push_pop),
14765
14766 #undef THUMB_VARIANT
14767 #define THUMB_VARIANT &arm_ext_v6
14768 TCE(cpy, 1a00000, 4600, 2, (RR, RR), rd_rm, t_cpy),
14769
14770 /* V1 instructions with no Thumb analogue prior to V6T2. */
14771 #undef THUMB_VARIANT
14772 #define THUMB_VARIANT &arm_ext_v6t2
14773 TCE(rsb, 0600000, ebc00000, 3, (RR, oRR, SH), arit, t_rsb),
14774 TC3(rsbs, 0700000, ebd00000, 3, (RR, oRR, SH), arit, t_rsb),
14775 TCE(teq, 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
14776 TC3w(teqs, 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
14777 CL(teqp, 130f000, 2, (RR, SH), cmp),
14778
14779 TC3(ldrt, 4300000, f8500e00, 2, (RR, ADDR), ldstt, t_ldstt),
14780 TC3(ldrbt, 4700000, f8100e00, 2, (RR, ADDR), ldstt, t_ldstt),
14781 TC3(strt, 4200000, f8400e00, 2, (RR, ADDR), ldstt, t_ldstt),
14782 TC3(strbt, 4600000, f8000e00, 2, (RR, ADDR), ldstt, t_ldstt),
14783
14784 TC3(stmdb, 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
14785 TC3(stmfd, 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
14786
14787 TC3(ldmdb, 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
14788 TC3(ldmea, 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
14789
14790 /* V1 instructions with no Thumb analogue at all. */
14791 CE(rsc, 0e00000, 3, (RR, oRR, SH), arit),
14792 C3(rscs, 0f00000, 3, (RR, oRR, SH), arit),
14793
14794 C3(stmib, 9800000, 2, (RRw, REGLST), ldmstm),
14795 C3(stmfa, 9800000, 2, (RRw, REGLST), ldmstm),
14796 C3(stmda, 8000000, 2, (RRw, REGLST), ldmstm),
14797 C3(stmed, 8000000, 2, (RRw, REGLST), ldmstm),
14798 C3(ldmib, 9900000, 2, (RRw, REGLST), ldmstm),
14799 C3(ldmed, 9900000, 2, (RRw, REGLST), ldmstm),
14800 C3(ldmda, 8100000, 2, (RRw, REGLST), ldmstm),
14801 C3(ldmfa, 8100000, 2, (RRw, REGLST), ldmstm),
14802
14803 #undef ARM_VARIANT
14804 #define ARM_VARIANT &arm_ext_v2 /* ARM 2 - multiplies. */
14805 #undef THUMB_VARIANT
14806 #define THUMB_VARIANT &arm_ext_v4t
14807 tCE(mul, 0000090, mul, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
14808 tC3(muls, 0100090, muls, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
14809
14810 #undef THUMB_VARIANT
14811 #define THUMB_VARIANT &arm_ext_v6t2
14812 TCE(mla, 0200090, fb000000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
14813 C3(mlas, 0300090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas),
14814
14815 /* Generic coprocessor instructions. */
14816 TCE(cdp, e000000, ee000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
14817 TCE(ldc, c100000, ec100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
14818 TC3(ldcl, c500000, ec500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
14819 TCE(stc, c000000, ec000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
14820 TC3(stcl, c400000, ec400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
14821 TCE(mcr, e000010, ee000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
14822 TCE(mrc, e100010, ee100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
14823
14824 #undef ARM_VARIANT
14825 #define ARM_VARIANT &arm_ext_v2s /* ARM 3 - swp instructions. */
14826 CE(swp, 1000090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
14827 C3(swpb, 1400090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
14828
14829 #undef ARM_VARIANT
14830 #define ARM_VARIANT &arm_ext_v3 /* ARM 6 Status register instructions. */
14831 TCE(mrs, 10f0000, f3ef8000, 2, (APSR_RR, RVC_PSR), mrs, t_mrs),
14832 TCE(msr, 120f000, f3808000, 2, (RVC_PSR, RR_EXi), msr, t_msr),
14833
14834 #undef ARM_VARIANT
14835 #define ARM_VARIANT &arm_ext_v3m /* ARM 7M long multiplies. */
14836 TCE(smull, 0c00090, fb800000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
14837 CM(smull,s, 0d00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
14838 TCE(umull, 0800090, fba00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
14839 CM(umull,s, 0900090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
14840 TCE(smlal, 0e00090, fbc00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
14841 CM(smlal,s, 0f00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
14842 TCE(umlal, 0a00090, fbe00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
14843 CM(umlal,s, 0b00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
14844
14845 #undef ARM_VARIANT
14846 #define ARM_VARIANT &arm_ext_v4 /* ARM Architecture 4. */
14847 #undef THUMB_VARIANT
14848 #define THUMB_VARIANT &arm_ext_v4t
14849 tC3(ldrh, 01000b0, ldrh, 2, (RR, ADDRGLDRS), ldstv4, t_ldst),
14850 tC3(strh, 00000b0, strh, 2, (RR, ADDRGLDRS), ldstv4, t_ldst),
14851 tC3(ldrsh, 01000f0, ldrsh, 2, (RR, ADDRGLDRS), ldstv4, t_ldst),
14852 tC3(ldrsb, 01000d0, ldrsb, 2, (RR, ADDRGLDRS), ldstv4, t_ldst),
14853 tCM(ld,sh, 01000f0, ldrsh, 2, (RR, ADDRGLDRS), ldstv4, t_ldst),
14854 tCM(ld,sb, 01000d0, ldrsb, 2, (RR, ADDRGLDRS), ldstv4, t_ldst),
14855
14856 #undef ARM_VARIANT
14857 #define ARM_VARIANT &arm_ext_v4t_5
14858 /* ARM Architecture 4T. */
14859 /* Note: bx (and blx) are required on V5, even if the processor does
14860 not support Thumb. */
14861 TCE(bx, 12fff10, 4700, 1, (RR), bx, t_bx),
14862
14863 #undef ARM_VARIANT
14864 #define ARM_VARIANT &arm_ext_v5 /* ARM Architecture 5T. */
14865 #undef THUMB_VARIANT
14866 #define THUMB_VARIANT &arm_ext_v5t
14867 /* Note: blx has 2 variants; the .value coded here is for
14868 BLX(2). Only this variant has conditional execution. */
14869 TCE(blx, 12fff30, 4780, 1, (RR_EXr), blx, t_blx),
14870 TUE(bkpt, 1200070, be00, 1, (oIffffb), bkpt, t_bkpt),
14871
14872 #undef THUMB_VARIANT
14873 #define THUMB_VARIANT &arm_ext_v6t2
14874 TCE(clz, 16f0f10, fab0f080, 2, (RRnpc, RRnpc), rd_rm, t_clz),
14875 TUF(ldc2, c100000, fc100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
14876 TUF(ldc2l, c500000, fc500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
14877 TUF(stc2, c000000, fc000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
14878 TUF(stc2l, c400000, fc400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
14879 TUF(cdp2, e000000, fe000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
14880 TUF(mcr2, e000010, fe000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
14881 TUF(mrc2, e100010, fe100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
14882
14883 #undef ARM_VARIANT
14884 #define ARM_VARIANT &arm_ext_v5exp /* ARM Architecture 5TExP. */
14885 TCE(smlabb, 1000080, fb100000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
14886 TCE(smlatb, 10000a0, fb100020, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
14887 TCE(smlabt, 10000c0, fb100010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
14888 TCE(smlatt, 10000e0, fb100030, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
14889
14890 TCE(smlawb, 1200080, fb300000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
14891 TCE(smlawt, 12000c0, fb300010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
14892
14893 TCE(smlalbb, 1400080, fbc00080, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
14894 TCE(smlaltb, 14000a0, fbc000a0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
14895 TCE(smlalbt, 14000c0, fbc00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
14896 TCE(smlaltt, 14000e0, fbc000b0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
14897
14898 TCE(smulbb, 1600080, fb10f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
14899 TCE(smultb, 16000a0, fb10f020, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
14900 TCE(smulbt, 16000c0, fb10f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
14901 TCE(smultt, 16000e0, fb10f030, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
14902
14903 TCE(smulwb, 12000a0, fb30f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
14904 TCE(smulwt, 12000e0, fb30f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
14905
14906 TCE(qadd, 1000050, fa80f080, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, rd_rm_rn),
14907 TCE(qdadd, 1400050, fa80f090, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, rd_rm_rn),
14908 TCE(qsub, 1200050, fa80f0a0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, rd_rm_rn),
14909 TCE(qdsub, 1600050, fa80f0b0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, rd_rm_rn),
14910
14911 #undef ARM_VARIANT
14912 #define ARM_VARIANT &arm_ext_v5e /* ARM Architecture 5TE. */
14913 TUF(pld, 450f000, f810f000, 1, (ADDR), pld, t_pld),
14914 TC3(ldrd, 00000d0, e9500000, 3, (RRnpc, oRRnpc, ADDRGLDRS), ldrd, t_ldstd),
14915 TC3(strd, 00000f0, e9400000, 3, (RRnpc, oRRnpc, ADDRGLDRS), ldrd, t_ldstd),
14916
14917 TCE(mcrr, c400000, ec400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
14918 TCE(mrrc, c500000, ec500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
14919
14920 #undef ARM_VARIANT
14921 #define ARM_VARIANT &arm_ext_v5j /* ARM Architecture 5TEJ. */
14922 TCE(bxj, 12fff20, f3c08f00, 1, (RR), bxj, t_bxj),
14923
14924 #undef ARM_VARIANT
14925 #define ARM_VARIANT &arm_ext_v6 /* ARM V6. */
14926 #undef THUMB_VARIANT
14927 #define THUMB_VARIANT &arm_ext_v6
14928 TUF(cpsie, 1080000, b660, 2, (CPSF, oI31b), cpsi, t_cpsi),
14929 TUF(cpsid, 10c0000, b670, 2, (CPSF, oI31b), cpsi, t_cpsi),
14930 tCE(rev, 6bf0f30, rev, 2, (RRnpc, RRnpc), rd_rm, t_rev),
14931 tCE(rev16, 6bf0fb0, rev16, 2, (RRnpc, RRnpc), rd_rm, t_rev),
14932 tCE(revsh, 6ff0fb0, revsh, 2, (RRnpc, RRnpc), rd_rm, t_rev),
14933 tCE(sxth, 6bf0070, sxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
14934 tCE(uxth, 6ff0070, uxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
14935 tCE(sxtb, 6af0070, sxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
14936 tCE(uxtb, 6ef0070, uxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
14937 TUF(setend, 1010000, b650, 1, (ENDI), setend, t_setend),
14938
14939 #undef THUMB_VARIANT
14940 #define THUMB_VARIANT &arm_ext_v6t2
14941 TCE(ldrex, 1900f9f, e8500f00, 2, (RRnpc, ADDR), ldrex, t_ldrex),
14942 TUF(mcrr2, c400000, fc400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
14943 TUF(mrrc2, c500000, fc500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
14944
14945 TCE(ssat, 6a00010, f3000000, 4, (RRnpc, I32, RRnpc, oSHllar),ssat, t_ssat),
14946 TCE(usat, 6e00010, f3800000, 4, (RRnpc, I31, RRnpc, oSHllar),usat, t_usat),
14947
14948 /* ARM V6 not included in V7M (eg. integer SIMD). */
14949 #undef THUMB_VARIANT
14950 #define THUMB_VARIANT &arm_ext_v6_notm
14951 TUF(cps, 1020000, f3af8100, 1, (I31b), imm0, t_cps),
14952 TCE(pkhbt, 6800010, eac00000, 4, (RRnpc, RRnpc, RRnpc, oSHll), pkhbt, t_pkhbt),
14953 TCE(pkhtb, 6800050, eac00020, 4, (RRnpc, RRnpc, RRnpc, oSHar), pkhtb, t_pkhtb),
14954 TCE(qadd16, 6200f10, fa90f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14955 TCE(qadd8, 6200f90, fa80f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14956 TCE(qaddsubx, 6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14957 TCE(qsub16, 6200f70, fad0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14958 TCE(qsub8, 6200ff0, fac0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14959 TCE(qsubaddx, 6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14960 TCE(sadd16, 6100f10, fa90f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14961 TCE(sadd8, 6100f90, fa80f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14962 TCE(saddsubx, 6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14963 TCE(shadd16, 6300f10, fa90f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14964 TCE(shadd8, 6300f90, fa80f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14965 TCE(shaddsubx, 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14966 TCE(shsub16, 6300f70, fad0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14967 TCE(shsub8, 6300ff0, fac0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14968 TCE(shsubaddx, 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14969 TCE(ssub16, 6100f70, fad0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14970 TCE(ssub8, 6100ff0, fac0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14971 TCE(ssubaddx, 6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14972 TCE(uadd16, 6500f10, fa90f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14973 TCE(uadd8, 6500f90, fa80f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14974 TCE(uaddsubx, 6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14975 TCE(uhadd16, 6700f10, fa90f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14976 TCE(uhadd8, 6700f90, fa80f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14977 TCE(uhaddsubx, 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14978 TCE(uhsub16, 6700f70, fad0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14979 TCE(uhsub8, 6700ff0, fac0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14980 TCE(uhsubaddx, 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14981 TCE(uqadd16, 6600f10, fa90f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14982 TCE(uqadd8, 6600f90, fa80f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14983 TCE(uqaddsubx, 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14984 TCE(uqsub16, 6600f70, fad0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14985 TCE(uqsub8, 6600ff0, fac0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14986 TCE(uqsubaddx, 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14987 TCE(usub16, 6500f70, fad0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14988 TCE(usub8, 6500ff0, fac0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14989 TCE(usubaddx, 6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14990 TUF(rfeia, 8900a00, e990c000, 1, (RRw), rfe, rfe),
14991 UF(rfeib, 9900a00, 1, (RRw), rfe),
14992 UF(rfeda, 8100a00, 1, (RRw), rfe),
14993 TUF(rfedb, 9100a00, e810c000, 1, (RRw), rfe, rfe),
14994 TUF(rfefd, 8900a00, e990c000, 1, (RRw), rfe, rfe),
14995 UF(rfefa, 9900a00, 1, (RRw), rfe),
14996 UF(rfeea, 8100a00, 1, (RRw), rfe),
14997 TUF(rfeed, 9100a00, e810c000, 1, (RRw), rfe, rfe),
14998 TCE(sxtah, 6b00070, fa00f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
14999 TCE(sxtab16, 6800070, fa20f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
15000 TCE(sxtab, 6a00070, fa40f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
15001 TCE(sxtb16, 68f0070, fa2ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
15002 TCE(uxtah, 6f00070, fa10f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
15003 TCE(uxtab16, 6c00070, fa30f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
15004 TCE(uxtab, 6e00070, fa50f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
15005 TCE(uxtb16, 6cf0070, fa3ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
15006 TCE(sel, 6800fb0, faa0f080, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15007 TCE(smlad, 7000010, fb200000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
15008 TCE(smladx, 7000030, fb200010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
15009 TCE(smlald, 7400010, fbc000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
15010 TCE(smlaldx, 7400030, fbc000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
15011 TCE(smlsd, 7000050, fb400000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
15012 TCE(smlsdx, 7000070, fb400010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
15013 TCE(smlsld, 7400050, fbd000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
15014 TCE(smlsldx, 7400070, fbd000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
15015 TCE(smmla, 7500010, fb500000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
15016 TCE(smmlar, 7500030, fb500010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
15017 TCE(smmls, 75000d0, fb600000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
15018 TCE(smmlsr, 75000f0, fb600010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
15019 TCE(smmul, 750f010, fb50f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
15020 TCE(smmulr, 750f030, fb50f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
15021 TCE(smuad, 700f010, fb20f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
15022 TCE(smuadx, 700f030, fb20f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
15023 TCE(smusd, 700f050, fb40f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
15024 TCE(smusdx, 700f070, fb40f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
15025 TUF(srsia, 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
15026 UF(srsib, 9c00500, 2, (oRRw, I31w), srs),
15027 UF(srsda, 8400500, 2, (oRRw, I31w), srs),
15028 TUF(srsdb, 9400500, e800c000, 2, (oRRw, I31w), srs, srs),
15029 TCE(ssat16, 6a00f30, f3200000, 3, (RRnpc, I16, RRnpc), ssat16, t_ssat16),
15030 TCE(strex, 1800f90, e8400000, 3, (RRnpc, RRnpc, ADDR), strex, t_strex),
15031 TCE(umaal, 0400090, fbe00060, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal, t_mlal),
15032 TCE(usad8, 780f010, fb70f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
15033 TCE(usada8, 7800010, fb700000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
15034 TCE(usat16, 6e00f30, f3a00000, 3, (RRnpc, I15, RRnpc), usat16, t_usat16),
15035
15036 #undef ARM_VARIANT
15037 #define ARM_VARIANT &arm_ext_v6k
15038 #undef THUMB_VARIANT
15039 #define THUMB_VARIANT &arm_ext_v6k
15040 tCE(yield, 320f001, yield, 0, (), noargs, t_hint),
15041 tCE(wfe, 320f002, wfe, 0, (), noargs, t_hint),
15042 tCE(wfi, 320f003, wfi, 0, (), noargs, t_hint),
15043 tCE(sev, 320f004, sev, 0, (), noargs, t_hint),
15044
15045 #undef THUMB_VARIANT
15046 #define THUMB_VARIANT &arm_ext_v6_notm
15047 TCE(ldrexd, 1b00f9f, e8d0007f, 3, (RRnpc, oRRnpc, RRnpcb), ldrexd, t_ldrexd),
15048 TCE(strexd, 1a00f90, e8c00070, 4, (RRnpc, RRnpc, oRRnpc, RRnpcb), strexd, t_strexd),
15049
15050 #undef THUMB_VARIANT
15051 #define THUMB_VARIANT &arm_ext_v6t2
15052 TCE(ldrexb, 1d00f9f, e8d00f4f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
15053 TCE(ldrexh, 1f00f9f, e8d00f5f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
15054 TCE(strexb, 1c00f90, e8c00f40, 3, (RRnpc, RRnpc, ADDR), strex, rm_rd_rn),
15055 TCE(strexh, 1e00f90, e8c00f50, 3, (RRnpc, RRnpc, ADDR), strex, rm_rd_rn),
15056 TUF(clrex, 57ff01f, f3bf8f2f, 0, (), noargs, noargs),
15057
15058 #undef ARM_VARIANT
15059 #define ARM_VARIANT &arm_ext_v6z
15060 TCE(smc, 1600070, f7f08000, 1, (EXPi), smc, t_smc),
15061
15062 #undef ARM_VARIANT
15063 #define ARM_VARIANT &arm_ext_v6t2
15064 TCE(bfc, 7c0001f, f36f0000, 3, (RRnpc, I31, I32), bfc, t_bfc),
15065 TCE(bfi, 7c00010, f3600000, 4, (RRnpc, RRnpc_I0, I31, I32), bfi, t_bfi),
15066 TCE(sbfx, 7a00050, f3400000, 4, (RR, RR, I31, I32), bfx, t_bfx),
15067 TCE(ubfx, 7e00050, f3c00000, 4, (RR, RR, I31, I32), bfx, t_bfx),
15068
15069 TCE(mls, 0600090, fb000010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
15070 TCE(movw, 3000000, f2400000, 2, (RRnpc, HALF), mov16, t_mov16),
15071 TCE(movt, 3400000, f2c00000, 2, (RRnpc, HALF), mov16, t_mov16),
15072 TCE(rbit, 6ff0f30, fa90f0a0, 2, (RR, RR), rd_rm, t_rbit),
15073
15074 TC3(ldrht, 03000b0, f8300e00, 2, (RR, ADDR), ldsttv4, t_ldstt),
15075 TC3(ldrsht, 03000f0, f9300e00, 2, (RR, ADDR), ldsttv4, t_ldstt),
15076 TC3(ldrsbt, 03000d0, f9100e00, 2, (RR, ADDR), ldsttv4, t_ldstt),
15077 TC3(strht, 02000b0, f8200e00, 2, (RR, ADDR), ldsttv4, t_ldstt),
15078
15079 UT(cbnz, b900, 2, (RR, EXP), t_cbz),
15080 UT(cbz, b100, 2, (RR, EXP), t_cbz),
15081 /* ARM does not really have an IT instruction, so always allow it. */
15082 #undef ARM_VARIANT
15083 #define ARM_VARIANT &arm_ext_v1
15084 TUE(it, 0, bf08, 1, (COND), it, t_it),
15085 TUE(itt, 0, bf0c, 1, (COND), it, t_it),
15086 TUE(ite, 0, bf04, 1, (COND), it, t_it),
15087 TUE(ittt, 0, bf0e, 1, (COND), it, t_it),
15088 TUE(itet, 0, bf06, 1, (COND), it, t_it),
15089 TUE(itte, 0, bf0a, 1, (COND), it, t_it),
15090 TUE(itee, 0, bf02, 1, (COND), it, t_it),
15091 TUE(itttt, 0, bf0f, 1, (COND), it, t_it),
15092 TUE(itett, 0, bf07, 1, (COND), it, t_it),
15093 TUE(ittet, 0, bf0b, 1, (COND), it, t_it),
15094 TUE(iteet, 0, bf03, 1, (COND), it, t_it),
15095 TUE(ittte, 0, bf0d, 1, (COND), it, t_it),
15096 TUE(itete, 0, bf05, 1, (COND), it, t_it),
15097 TUE(ittee, 0, bf09, 1, (COND), it, t_it),
15098 TUE(iteee, 0, bf01, 1, (COND), it, t_it),
15099
15100 /* Thumb2 only instructions. */
15101 #undef ARM_VARIANT
15102 #define ARM_VARIANT NULL
15103
15104 TCE(addw, 0, f2000000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
15105 TCE(subw, 0, f2a00000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
15106 TCE(tbb, 0, e8d0f000, 1, (TB), 0, t_tb),
15107 TCE(tbh, 0, e8d0f010, 1, (TB), 0, t_tb),
15108
15109 /* Thumb-2 hardware division instructions (R and M profiles only). */
15110 #undef THUMB_VARIANT
15111 #define THUMB_VARIANT &arm_ext_div
15112 TCE(sdiv, 0, fb90f0f0, 3, (RR, oRR, RR), 0, t_div),
15113 TCE(udiv, 0, fbb0f0f0, 3, (RR, oRR, RR), 0, t_div),
15114
15115 /* ARM V7 instructions. */
15116 #undef ARM_VARIANT
15117 #define ARM_VARIANT &arm_ext_v7
15118 #undef THUMB_VARIANT
15119 #define THUMB_VARIANT &arm_ext_v7
15120 TUF(pli, 450f000, f910f000, 1, (ADDR), pli, t_pld),
15121 TCE(dbg, 320f0f0, f3af80f0, 1, (I15), dbg, t_dbg),
15122 TUF(dmb, 57ff050, f3bf8f50, 1, (oBARRIER), barrier, t_barrier),
15123 TUF(dsb, 57ff040, f3bf8f40, 1, (oBARRIER), barrier, t_barrier),
15124 TUF(isb, 57ff060, f3bf8f60, 1, (oBARRIER), barrier, t_barrier),
15125
15126 #undef ARM_VARIANT
15127 #define ARM_VARIANT &fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
15128 cCE(wfs, e200110, 1, (RR), rd),
15129 cCE(rfs, e300110, 1, (RR), rd),
15130 cCE(wfc, e400110, 1, (RR), rd),
15131 cCE(rfc, e500110, 1, (RR), rd),
15132
15133 cCL(ldfs, c100100, 2, (RF, ADDRGLDC), rd_cpaddr),
15134 cCL(ldfd, c108100, 2, (RF, ADDRGLDC), rd_cpaddr),
15135 cCL(ldfe, c500100, 2, (RF, ADDRGLDC), rd_cpaddr),
15136 cCL(ldfp, c508100, 2, (RF, ADDRGLDC), rd_cpaddr),
15137
15138 cCL(stfs, c000100, 2, (RF, ADDRGLDC), rd_cpaddr),
15139 cCL(stfd, c008100, 2, (RF, ADDRGLDC), rd_cpaddr),
15140 cCL(stfe, c400100, 2, (RF, ADDRGLDC), rd_cpaddr),
15141 cCL(stfp, c408100, 2, (RF, ADDRGLDC), rd_cpaddr),
15142
15143 cCL(mvfs, e008100, 2, (RF, RF_IF), rd_rm),
15144 cCL(mvfsp, e008120, 2, (RF, RF_IF), rd_rm),
15145 cCL(mvfsm, e008140, 2, (RF, RF_IF), rd_rm),
15146 cCL(mvfsz, e008160, 2, (RF, RF_IF), rd_rm),
15147 cCL(mvfd, e008180, 2, (RF, RF_IF), rd_rm),
15148 cCL(mvfdp, e0081a0, 2, (RF, RF_IF), rd_rm),
15149 cCL(mvfdm, e0081c0, 2, (RF, RF_IF), rd_rm),
15150 cCL(mvfdz, e0081e0, 2, (RF, RF_IF), rd_rm),
15151 cCL(mvfe, e088100, 2, (RF, RF_IF), rd_rm),
15152 cCL(mvfep, e088120, 2, (RF, RF_IF), rd_rm),
15153 cCL(mvfem, e088140, 2, (RF, RF_IF), rd_rm),
15154 cCL(mvfez, e088160, 2, (RF, RF_IF), rd_rm),
15155
15156 cCL(mnfs, e108100, 2, (RF, RF_IF), rd_rm),
15157 cCL(mnfsp, e108120, 2, (RF, RF_IF), rd_rm),
15158 cCL(mnfsm, e108140, 2, (RF, RF_IF), rd_rm),
15159 cCL(mnfsz, e108160, 2, (RF, RF_IF), rd_rm),
15160 cCL(mnfd, e108180, 2, (RF, RF_IF), rd_rm),
15161 cCL(mnfdp, e1081a0, 2, (RF, RF_IF), rd_rm),
15162 cCL(mnfdm, e1081c0, 2, (RF, RF_IF), rd_rm),
15163 cCL(mnfdz, e1081e0, 2, (RF, RF_IF), rd_rm),
15164 cCL(mnfe, e188100, 2, (RF, RF_IF), rd_rm),
15165 cCL(mnfep, e188120, 2, (RF, RF_IF), rd_rm),
15166 cCL(mnfem, e188140, 2, (RF, RF_IF), rd_rm),
15167 cCL(mnfez, e188160, 2, (RF, RF_IF), rd_rm),
15168
15169 cCL(abss, e208100, 2, (RF, RF_IF), rd_rm),
15170 cCL(abssp, e208120, 2, (RF, RF_IF), rd_rm),
15171 cCL(abssm, e208140, 2, (RF, RF_IF), rd_rm),
15172 cCL(abssz, e208160, 2, (RF, RF_IF), rd_rm),
15173 cCL(absd, e208180, 2, (RF, RF_IF), rd_rm),
15174 cCL(absdp, e2081a0, 2, (RF, RF_IF), rd_rm),
15175 cCL(absdm, e2081c0, 2, (RF, RF_IF), rd_rm),
15176 cCL(absdz, e2081e0, 2, (RF, RF_IF), rd_rm),
15177 cCL(abse, e288100, 2, (RF, RF_IF), rd_rm),
15178 cCL(absep, e288120, 2, (RF, RF_IF), rd_rm),
15179 cCL(absem, e288140, 2, (RF, RF_IF), rd_rm),
15180 cCL(absez, e288160, 2, (RF, RF_IF), rd_rm),
15181
15182 cCL(rnds, e308100, 2, (RF, RF_IF), rd_rm),
15183 cCL(rndsp, e308120, 2, (RF, RF_IF), rd_rm),
15184 cCL(rndsm, e308140, 2, (RF, RF_IF), rd_rm),
15185 cCL(rndsz, e308160, 2, (RF, RF_IF), rd_rm),
15186 cCL(rndd, e308180, 2, (RF, RF_IF), rd_rm),
15187 cCL(rnddp, e3081a0, 2, (RF, RF_IF), rd_rm),
15188 cCL(rnddm, e3081c0, 2, (RF, RF_IF), rd_rm),
15189 cCL(rnddz, e3081e0, 2, (RF, RF_IF), rd_rm),
15190 cCL(rnde, e388100, 2, (RF, RF_IF), rd_rm),
15191 cCL(rndep, e388120, 2, (RF, RF_IF), rd_rm),
15192 cCL(rndem, e388140, 2, (RF, RF_IF), rd_rm),
15193 cCL(rndez, e388160, 2, (RF, RF_IF), rd_rm),
15194
15195 cCL(sqts, e408100, 2, (RF, RF_IF), rd_rm),
15196 cCL(sqtsp, e408120, 2, (RF, RF_IF), rd_rm),
15197 cCL(sqtsm, e408140, 2, (RF, RF_IF), rd_rm),
15198 cCL(sqtsz, e408160, 2, (RF, RF_IF), rd_rm),
15199 cCL(sqtd, e408180, 2, (RF, RF_IF), rd_rm),
15200 cCL(sqtdp, e4081a0, 2, (RF, RF_IF), rd_rm),
15201 cCL(sqtdm, e4081c0, 2, (RF, RF_IF), rd_rm),
15202 cCL(sqtdz, e4081e0, 2, (RF, RF_IF), rd_rm),
15203 cCL(sqte, e488100, 2, (RF, RF_IF), rd_rm),
15204 cCL(sqtep, e488120, 2, (RF, RF_IF), rd_rm),
15205 cCL(sqtem, e488140, 2, (RF, RF_IF), rd_rm),
15206 cCL(sqtez, e488160, 2, (RF, RF_IF), rd_rm),
15207
15208 cCL(logs, e508100, 2, (RF, RF_IF), rd_rm),
15209 cCL(logsp, e508120, 2, (RF, RF_IF), rd_rm),
15210 cCL(logsm, e508140, 2, (RF, RF_IF), rd_rm),
15211 cCL(logsz, e508160, 2, (RF, RF_IF), rd_rm),
15212 cCL(logd, e508180, 2, (RF, RF_IF), rd_rm),
15213 cCL(logdp, e5081a0, 2, (RF, RF_IF), rd_rm),
15214 cCL(logdm, e5081c0, 2, (RF, RF_IF), rd_rm),
15215 cCL(logdz, e5081e0, 2, (RF, RF_IF), rd_rm),
15216 cCL(loge, e588100, 2, (RF, RF_IF), rd_rm),
15217 cCL(logep, e588120, 2, (RF, RF_IF), rd_rm),
15218 cCL(logem, e588140, 2, (RF, RF_IF), rd_rm),
15219 cCL(logez, e588160, 2, (RF, RF_IF), rd_rm),
15220
15221 cCL(lgns, e608100, 2, (RF, RF_IF), rd_rm),
15222 cCL(lgnsp, e608120, 2, (RF, RF_IF), rd_rm),
15223 cCL(lgnsm, e608140, 2, (RF, RF_IF), rd_rm),
15224 cCL(lgnsz, e608160, 2, (RF, RF_IF), rd_rm),
15225 cCL(lgnd, e608180, 2, (RF, RF_IF), rd_rm),
15226 cCL(lgndp, e6081a0, 2, (RF, RF_IF), rd_rm),
15227 cCL(lgndm, e6081c0, 2, (RF, RF_IF), rd_rm),
15228 cCL(lgndz, e6081e0, 2, (RF, RF_IF), rd_rm),
15229 cCL(lgne, e688100, 2, (RF, RF_IF), rd_rm),
15230 cCL(lgnep, e688120, 2, (RF, RF_IF), rd_rm),
15231 cCL(lgnem, e688140, 2, (RF, RF_IF), rd_rm),
15232 cCL(lgnez, e688160, 2, (RF, RF_IF), rd_rm),
15233
15234 cCL(exps, e708100, 2, (RF, RF_IF), rd_rm),
15235 cCL(expsp, e708120, 2, (RF, RF_IF), rd_rm),
15236 cCL(expsm, e708140, 2, (RF, RF_IF), rd_rm),
15237 cCL(expsz, e708160, 2, (RF, RF_IF), rd_rm),
15238 cCL(expd, e708180, 2, (RF, RF_IF), rd_rm),
15239 cCL(expdp, e7081a0, 2, (RF, RF_IF), rd_rm),
15240 cCL(expdm, e7081c0, 2, (RF, RF_IF), rd_rm),
15241 cCL(expdz, e7081e0, 2, (RF, RF_IF), rd_rm),
15242 cCL(expe, e788100, 2, (RF, RF_IF), rd_rm),
15243 cCL(expep, e788120, 2, (RF, RF_IF), rd_rm),
15244 cCL(expem, e788140, 2, (RF, RF_IF), rd_rm),
15245 cCL(expdz, e788160, 2, (RF, RF_IF), rd_rm),
15246
15247 cCL(sins, e808100, 2, (RF, RF_IF), rd_rm),
15248 cCL(sinsp, e808120, 2, (RF, RF_IF), rd_rm),
15249 cCL(sinsm, e808140, 2, (RF, RF_IF), rd_rm),
15250 cCL(sinsz, e808160, 2, (RF, RF_IF), rd_rm),
15251 cCL(sind, e808180, 2, (RF, RF_IF), rd_rm),
15252 cCL(sindp, e8081a0, 2, (RF, RF_IF), rd_rm),
15253 cCL(sindm, e8081c0, 2, (RF, RF_IF), rd_rm),
15254 cCL(sindz, e8081e0, 2, (RF, RF_IF), rd_rm),
15255 cCL(sine, e888100, 2, (RF, RF_IF), rd_rm),
15256 cCL(sinep, e888120, 2, (RF, RF_IF), rd_rm),
15257 cCL(sinem, e888140, 2, (RF, RF_IF), rd_rm),
15258 cCL(sinez, e888160, 2, (RF, RF_IF), rd_rm),
15259
15260 cCL(coss, e908100, 2, (RF, RF_IF), rd_rm),
15261 cCL(cossp, e908120, 2, (RF, RF_IF), rd_rm),
15262 cCL(cossm, e908140, 2, (RF, RF_IF), rd_rm),
15263 cCL(cossz, e908160, 2, (RF, RF_IF), rd_rm),
15264 cCL(cosd, e908180, 2, (RF, RF_IF), rd_rm),
15265 cCL(cosdp, e9081a0, 2, (RF, RF_IF), rd_rm),
15266 cCL(cosdm, e9081c0, 2, (RF, RF_IF), rd_rm),
15267 cCL(cosdz, e9081e0, 2, (RF, RF_IF), rd_rm),
15268 cCL(cose, e988100, 2, (RF, RF_IF), rd_rm),
15269 cCL(cosep, e988120, 2, (RF, RF_IF), rd_rm),
15270 cCL(cosem, e988140, 2, (RF, RF_IF), rd_rm),
15271 cCL(cosez, e988160, 2, (RF, RF_IF), rd_rm),
15272
15273 cCL(tans, ea08100, 2, (RF, RF_IF), rd_rm),
15274 cCL(tansp, ea08120, 2, (RF, RF_IF), rd_rm),
15275 cCL(tansm, ea08140, 2, (RF, RF_IF), rd_rm),
15276 cCL(tansz, ea08160, 2, (RF, RF_IF), rd_rm),
15277 cCL(tand, ea08180, 2, (RF, RF_IF), rd_rm),
15278 cCL(tandp, ea081a0, 2, (RF, RF_IF), rd_rm),
15279 cCL(tandm, ea081c0, 2, (RF, RF_IF), rd_rm),
15280 cCL(tandz, ea081e0, 2, (RF, RF_IF), rd_rm),
15281 cCL(tane, ea88100, 2, (RF, RF_IF), rd_rm),
15282 cCL(tanep, ea88120, 2, (RF, RF_IF), rd_rm),
15283 cCL(tanem, ea88140, 2, (RF, RF_IF), rd_rm),
15284 cCL(tanez, ea88160, 2, (RF, RF_IF), rd_rm),
15285
15286 cCL(asns, eb08100, 2, (RF, RF_IF), rd_rm),
15287 cCL(asnsp, eb08120, 2, (RF, RF_IF), rd_rm),
15288 cCL(asnsm, eb08140, 2, (RF, RF_IF), rd_rm),
15289 cCL(asnsz, eb08160, 2, (RF, RF_IF), rd_rm),
15290 cCL(asnd, eb08180, 2, (RF, RF_IF), rd_rm),
15291 cCL(asndp, eb081a0, 2, (RF, RF_IF), rd_rm),
15292 cCL(asndm, eb081c0, 2, (RF, RF_IF), rd_rm),
15293 cCL(asndz, eb081e0, 2, (RF, RF_IF), rd_rm),
15294 cCL(asne, eb88100, 2, (RF, RF_IF), rd_rm),
15295 cCL(asnep, eb88120, 2, (RF, RF_IF), rd_rm),
15296 cCL(asnem, eb88140, 2, (RF, RF_IF), rd_rm),
15297 cCL(asnez, eb88160, 2, (RF, RF_IF), rd_rm),
15298
15299 cCL(acss, ec08100, 2, (RF, RF_IF), rd_rm),
15300 cCL(acssp, ec08120, 2, (RF, RF_IF), rd_rm),
15301 cCL(acssm, ec08140, 2, (RF, RF_IF), rd_rm),
15302 cCL(acssz, ec08160, 2, (RF, RF_IF), rd_rm),
15303 cCL(acsd, ec08180, 2, (RF, RF_IF), rd_rm),
15304 cCL(acsdp, ec081a0, 2, (RF, RF_IF), rd_rm),
15305 cCL(acsdm, ec081c0, 2, (RF, RF_IF), rd_rm),
15306 cCL(acsdz, ec081e0, 2, (RF, RF_IF), rd_rm),
15307 cCL(acse, ec88100, 2, (RF, RF_IF), rd_rm),
15308 cCL(acsep, ec88120, 2, (RF, RF_IF), rd_rm),
15309 cCL(acsem, ec88140, 2, (RF, RF_IF), rd_rm),
15310 cCL(acsez, ec88160, 2, (RF, RF_IF), rd_rm),
15311
15312 cCL(atns, ed08100, 2, (RF, RF_IF), rd_rm),
15313 cCL(atnsp, ed08120, 2, (RF, RF_IF), rd_rm),
15314 cCL(atnsm, ed08140, 2, (RF, RF_IF), rd_rm),
15315 cCL(atnsz, ed08160, 2, (RF, RF_IF), rd_rm),
15316 cCL(atnd, ed08180, 2, (RF, RF_IF), rd_rm),
15317 cCL(atndp, ed081a0, 2, (RF, RF_IF), rd_rm),
15318 cCL(atndm, ed081c0, 2, (RF, RF_IF), rd_rm),
15319 cCL(atndz, ed081e0, 2, (RF, RF_IF), rd_rm),
15320 cCL(atne, ed88100, 2, (RF, RF_IF), rd_rm),
15321 cCL(atnep, ed88120, 2, (RF, RF_IF), rd_rm),
15322 cCL(atnem, ed88140, 2, (RF, RF_IF), rd_rm),
15323 cCL(atnez, ed88160, 2, (RF, RF_IF), rd_rm),
15324
15325 cCL(urds, ee08100, 2, (RF, RF_IF), rd_rm),
15326 cCL(urdsp, ee08120, 2, (RF, RF_IF), rd_rm),
15327 cCL(urdsm, ee08140, 2, (RF, RF_IF), rd_rm),
15328 cCL(urdsz, ee08160, 2, (RF, RF_IF), rd_rm),
15329 cCL(urdd, ee08180, 2, (RF, RF_IF), rd_rm),
15330 cCL(urddp, ee081a0, 2, (RF, RF_IF), rd_rm),
15331 cCL(urddm, ee081c0, 2, (RF, RF_IF), rd_rm),
15332 cCL(urddz, ee081e0, 2, (RF, RF_IF), rd_rm),
15333 cCL(urde, ee88100, 2, (RF, RF_IF), rd_rm),
15334 cCL(urdep, ee88120, 2, (RF, RF_IF), rd_rm),
15335 cCL(urdem, ee88140, 2, (RF, RF_IF), rd_rm),
15336 cCL(urdez, ee88160, 2, (RF, RF_IF), rd_rm),
15337
15338 cCL(nrms, ef08100, 2, (RF, RF_IF), rd_rm),
15339 cCL(nrmsp, ef08120, 2, (RF, RF_IF), rd_rm),
15340 cCL(nrmsm, ef08140, 2, (RF, RF_IF), rd_rm),
15341 cCL(nrmsz, ef08160, 2, (RF, RF_IF), rd_rm),
15342 cCL(nrmd, ef08180, 2, (RF, RF_IF), rd_rm),
15343 cCL(nrmdp, ef081a0, 2, (RF, RF_IF), rd_rm),
15344 cCL(nrmdm, ef081c0, 2, (RF, RF_IF), rd_rm),
15345 cCL(nrmdz, ef081e0, 2, (RF, RF_IF), rd_rm),
15346 cCL(nrme, ef88100, 2, (RF, RF_IF), rd_rm),
15347 cCL(nrmep, ef88120, 2, (RF, RF_IF), rd_rm),
15348 cCL(nrmem, ef88140, 2, (RF, RF_IF), rd_rm),
15349 cCL(nrmez, ef88160, 2, (RF, RF_IF), rd_rm),
15350
15351 cCL(adfs, e000100, 3, (RF, RF, RF_IF), rd_rn_rm),
15352 cCL(adfsp, e000120, 3, (RF, RF, RF_IF), rd_rn_rm),
15353 cCL(adfsm, e000140, 3, (RF, RF, RF_IF), rd_rn_rm),
15354 cCL(adfsz, e000160, 3, (RF, RF, RF_IF), rd_rn_rm),
15355 cCL(adfd, e000180, 3, (RF, RF, RF_IF), rd_rn_rm),
15356 cCL(adfdp, e0001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15357 cCL(adfdm, e0001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15358 cCL(adfdz, e0001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15359 cCL(adfe, e080100, 3, (RF, RF, RF_IF), rd_rn_rm),
15360 cCL(adfep, e080120, 3, (RF, RF, RF_IF), rd_rn_rm),
15361 cCL(adfem, e080140, 3, (RF, RF, RF_IF), rd_rn_rm),
15362 cCL(adfez, e080160, 3, (RF, RF, RF_IF), rd_rn_rm),
15363
15364 cCL(sufs, e200100, 3, (RF, RF, RF_IF), rd_rn_rm),
15365 cCL(sufsp, e200120, 3, (RF, RF, RF_IF), rd_rn_rm),
15366 cCL(sufsm, e200140, 3, (RF, RF, RF_IF), rd_rn_rm),
15367 cCL(sufsz, e200160, 3, (RF, RF, RF_IF), rd_rn_rm),
15368 cCL(sufd, e200180, 3, (RF, RF, RF_IF), rd_rn_rm),
15369 cCL(sufdp, e2001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15370 cCL(sufdm, e2001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15371 cCL(sufdz, e2001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15372 cCL(sufe, e280100, 3, (RF, RF, RF_IF), rd_rn_rm),
15373 cCL(sufep, e280120, 3, (RF, RF, RF_IF), rd_rn_rm),
15374 cCL(sufem, e280140, 3, (RF, RF, RF_IF), rd_rn_rm),
15375 cCL(sufez, e280160, 3, (RF, RF, RF_IF), rd_rn_rm),
15376
15377 cCL(rsfs, e300100, 3, (RF, RF, RF_IF), rd_rn_rm),
15378 cCL(rsfsp, e300120, 3, (RF, RF, RF_IF), rd_rn_rm),
15379 cCL(rsfsm, e300140, 3, (RF, RF, RF_IF), rd_rn_rm),
15380 cCL(rsfsz, e300160, 3, (RF, RF, RF_IF), rd_rn_rm),
15381 cCL(rsfd, e300180, 3, (RF, RF, RF_IF), rd_rn_rm),
15382 cCL(rsfdp, e3001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15383 cCL(rsfdm, e3001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15384 cCL(rsfdz, e3001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15385 cCL(rsfe, e380100, 3, (RF, RF, RF_IF), rd_rn_rm),
15386 cCL(rsfep, e380120, 3, (RF, RF, RF_IF), rd_rn_rm),
15387 cCL(rsfem, e380140, 3, (RF, RF, RF_IF), rd_rn_rm),
15388 cCL(rsfez, e380160, 3, (RF, RF, RF_IF), rd_rn_rm),
15389
15390 cCL(mufs, e100100, 3, (RF, RF, RF_IF), rd_rn_rm),
15391 cCL(mufsp, e100120, 3, (RF, RF, RF_IF), rd_rn_rm),
15392 cCL(mufsm, e100140, 3, (RF, RF, RF_IF), rd_rn_rm),
15393 cCL(mufsz, e100160, 3, (RF, RF, RF_IF), rd_rn_rm),
15394 cCL(mufd, e100180, 3, (RF, RF, RF_IF), rd_rn_rm),
15395 cCL(mufdp, e1001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15396 cCL(mufdm, e1001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15397 cCL(mufdz, e1001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15398 cCL(mufe, e180100, 3, (RF, RF, RF_IF), rd_rn_rm),
15399 cCL(mufep, e180120, 3, (RF, RF, RF_IF), rd_rn_rm),
15400 cCL(mufem, e180140, 3, (RF, RF, RF_IF), rd_rn_rm),
15401 cCL(mufez, e180160, 3, (RF, RF, RF_IF), rd_rn_rm),
15402
15403 cCL(dvfs, e400100, 3, (RF, RF, RF_IF), rd_rn_rm),
15404 cCL(dvfsp, e400120, 3, (RF, RF, RF_IF), rd_rn_rm),
15405 cCL(dvfsm, e400140, 3, (RF, RF, RF_IF), rd_rn_rm),
15406 cCL(dvfsz, e400160, 3, (RF, RF, RF_IF), rd_rn_rm),
15407 cCL(dvfd, e400180, 3, (RF, RF, RF_IF), rd_rn_rm),
15408 cCL(dvfdp, e4001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15409 cCL(dvfdm, e4001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15410 cCL(dvfdz, e4001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15411 cCL(dvfe, e480100, 3, (RF, RF, RF_IF), rd_rn_rm),
15412 cCL(dvfep, e480120, 3, (RF, RF, RF_IF), rd_rn_rm),
15413 cCL(dvfem, e480140, 3, (RF, RF, RF_IF), rd_rn_rm),
15414 cCL(dvfez, e480160, 3, (RF, RF, RF_IF), rd_rn_rm),
15415
15416 cCL(rdfs, e500100, 3, (RF, RF, RF_IF), rd_rn_rm),
15417 cCL(rdfsp, e500120, 3, (RF, RF, RF_IF), rd_rn_rm),
15418 cCL(rdfsm, e500140, 3, (RF, RF, RF_IF), rd_rn_rm),
15419 cCL(rdfsz, e500160, 3, (RF, RF, RF_IF), rd_rn_rm),
15420 cCL(rdfd, e500180, 3, (RF, RF, RF_IF), rd_rn_rm),
15421 cCL(rdfdp, e5001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15422 cCL(rdfdm, e5001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15423 cCL(rdfdz, e5001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15424 cCL(rdfe, e580100, 3, (RF, RF, RF_IF), rd_rn_rm),
15425 cCL(rdfep, e580120, 3, (RF, RF, RF_IF), rd_rn_rm),
15426 cCL(rdfem, e580140, 3, (RF, RF, RF_IF), rd_rn_rm),
15427 cCL(rdfez, e580160, 3, (RF, RF, RF_IF), rd_rn_rm),
15428
15429 cCL(pows, e600100, 3, (RF, RF, RF_IF), rd_rn_rm),
15430 cCL(powsp, e600120, 3, (RF, RF, RF_IF), rd_rn_rm),
15431 cCL(powsm, e600140, 3, (RF, RF, RF_IF), rd_rn_rm),
15432 cCL(powsz, e600160, 3, (RF, RF, RF_IF), rd_rn_rm),
15433 cCL(powd, e600180, 3, (RF, RF, RF_IF), rd_rn_rm),
15434 cCL(powdp, e6001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15435 cCL(powdm, e6001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15436 cCL(powdz, e6001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15437 cCL(powe, e680100, 3, (RF, RF, RF_IF), rd_rn_rm),
15438 cCL(powep, e680120, 3, (RF, RF, RF_IF), rd_rn_rm),
15439 cCL(powem, e680140, 3, (RF, RF, RF_IF), rd_rn_rm),
15440 cCL(powez, e680160, 3, (RF, RF, RF_IF), rd_rn_rm),
15441
15442 cCL(rpws, e700100, 3, (RF, RF, RF_IF), rd_rn_rm),
15443 cCL(rpwsp, e700120, 3, (RF, RF, RF_IF), rd_rn_rm),
15444 cCL(rpwsm, e700140, 3, (RF, RF, RF_IF), rd_rn_rm),
15445 cCL(rpwsz, e700160, 3, (RF, RF, RF_IF), rd_rn_rm),
15446 cCL(rpwd, e700180, 3, (RF, RF, RF_IF), rd_rn_rm),
15447 cCL(rpwdp, e7001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15448 cCL(rpwdm, e7001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15449 cCL(rpwdz, e7001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15450 cCL(rpwe, e780100, 3, (RF, RF, RF_IF), rd_rn_rm),
15451 cCL(rpwep, e780120, 3, (RF, RF, RF_IF), rd_rn_rm),
15452 cCL(rpwem, e780140, 3, (RF, RF, RF_IF), rd_rn_rm),
15453 cCL(rpwez, e780160, 3, (RF, RF, RF_IF), rd_rn_rm),
15454
15455 cCL(rmfs, e800100, 3, (RF, RF, RF_IF), rd_rn_rm),
15456 cCL(rmfsp, e800120, 3, (RF, RF, RF_IF), rd_rn_rm),
15457 cCL(rmfsm, e800140, 3, (RF, RF, RF_IF), rd_rn_rm),
15458 cCL(rmfsz, e800160, 3, (RF, RF, RF_IF), rd_rn_rm),
15459 cCL(rmfd, e800180, 3, (RF, RF, RF_IF), rd_rn_rm),
15460 cCL(rmfdp, e8001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15461 cCL(rmfdm, e8001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15462 cCL(rmfdz, e8001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15463 cCL(rmfe, e880100, 3, (RF, RF, RF_IF), rd_rn_rm),
15464 cCL(rmfep, e880120, 3, (RF, RF, RF_IF), rd_rn_rm),
15465 cCL(rmfem, e880140, 3, (RF, RF, RF_IF), rd_rn_rm),
15466 cCL(rmfez, e880160, 3, (RF, RF, RF_IF), rd_rn_rm),
15467
15468 cCL(fmls, e900100, 3, (RF, RF, RF_IF), rd_rn_rm),
15469 cCL(fmlsp, e900120, 3, (RF, RF, RF_IF), rd_rn_rm),
15470 cCL(fmlsm, e900140, 3, (RF, RF, RF_IF), rd_rn_rm),
15471 cCL(fmlsz, e900160, 3, (RF, RF, RF_IF), rd_rn_rm),
15472 cCL(fmld, e900180, 3, (RF, RF, RF_IF), rd_rn_rm),
15473 cCL(fmldp, e9001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15474 cCL(fmldm, e9001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15475 cCL(fmldz, e9001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15476 cCL(fmle, e980100, 3, (RF, RF, RF_IF), rd_rn_rm),
15477 cCL(fmlep, e980120, 3, (RF, RF, RF_IF), rd_rn_rm),
15478 cCL(fmlem, e980140, 3, (RF, RF, RF_IF), rd_rn_rm),
15479 cCL(fmlez, e980160, 3, (RF, RF, RF_IF), rd_rn_rm),
15480
15481 cCL(fdvs, ea00100, 3, (RF, RF, RF_IF), rd_rn_rm),
15482 cCL(fdvsp, ea00120, 3, (RF, RF, RF_IF), rd_rn_rm),
15483 cCL(fdvsm, ea00140, 3, (RF, RF, RF_IF), rd_rn_rm),
15484 cCL(fdvsz, ea00160, 3, (RF, RF, RF_IF), rd_rn_rm),
15485 cCL(fdvd, ea00180, 3, (RF, RF, RF_IF), rd_rn_rm),
15486 cCL(fdvdp, ea001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15487 cCL(fdvdm, ea001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15488 cCL(fdvdz, ea001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15489 cCL(fdve, ea80100, 3, (RF, RF, RF_IF), rd_rn_rm),
15490 cCL(fdvep, ea80120, 3, (RF, RF, RF_IF), rd_rn_rm),
15491 cCL(fdvem, ea80140, 3, (RF, RF, RF_IF), rd_rn_rm),
15492 cCL(fdvez, ea80160, 3, (RF, RF, RF_IF), rd_rn_rm),
15493
15494 cCL(frds, eb00100, 3, (RF, RF, RF_IF), rd_rn_rm),
15495 cCL(frdsp, eb00120, 3, (RF, RF, RF_IF), rd_rn_rm),
15496 cCL(frdsm, eb00140, 3, (RF, RF, RF_IF), rd_rn_rm),
15497 cCL(frdsz, eb00160, 3, (RF, RF, RF_IF), rd_rn_rm),
15498 cCL(frdd, eb00180, 3, (RF, RF, RF_IF), rd_rn_rm),
15499 cCL(frddp, eb001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15500 cCL(frddm, eb001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15501 cCL(frddz, eb001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15502 cCL(frde, eb80100, 3, (RF, RF, RF_IF), rd_rn_rm),
15503 cCL(frdep, eb80120, 3, (RF, RF, RF_IF), rd_rn_rm),
15504 cCL(frdem, eb80140, 3, (RF, RF, RF_IF), rd_rn_rm),
15505 cCL(frdez, eb80160, 3, (RF, RF, RF_IF), rd_rn_rm),
15506
15507 cCL(pols, ec00100, 3, (RF, RF, RF_IF), rd_rn_rm),
15508 cCL(polsp, ec00120, 3, (RF, RF, RF_IF), rd_rn_rm),
15509 cCL(polsm, ec00140, 3, (RF, RF, RF_IF), rd_rn_rm),
15510 cCL(polsz, ec00160, 3, (RF, RF, RF_IF), rd_rn_rm),
15511 cCL(pold, ec00180, 3, (RF, RF, RF_IF), rd_rn_rm),
15512 cCL(poldp, ec001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15513 cCL(poldm, ec001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15514 cCL(poldz, ec001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15515 cCL(pole, ec80100, 3, (RF, RF, RF_IF), rd_rn_rm),
15516 cCL(polep, ec80120, 3, (RF, RF, RF_IF), rd_rn_rm),
15517 cCL(polem, ec80140, 3, (RF, RF, RF_IF), rd_rn_rm),
15518 cCL(polez, ec80160, 3, (RF, RF, RF_IF), rd_rn_rm),
15519
15520 cCE(cmf, e90f110, 2, (RF, RF_IF), fpa_cmp),
15521 C3E(cmfe, ed0f110, 2, (RF, RF_IF), fpa_cmp),
15522 cCE(cnf, eb0f110, 2, (RF, RF_IF), fpa_cmp),
15523 C3E(cnfe, ef0f110, 2, (RF, RF_IF), fpa_cmp),
15524
15525 cCL(flts, e000110, 2, (RF, RR), rn_rd),
15526 cCL(fltsp, e000130, 2, (RF, RR), rn_rd),
15527 cCL(fltsm, e000150, 2, (RF, RR), rn_rd),
15528 cCL(fltsz, e000170, 2, (RF, RR), rn_rd),
15529 cCL(fltd, e000190, 2, (RF, RR), rn_rd),
15530 cCL(fltdp, e0001b0, 2, (RF, RR), rn_rd),
15531 cCL(fltdm, e0001d0, 2, (RF, RR), rn_rd),
15532 cCL(fltdz, e0001f0, 2, (RF, RR), rn_rd),
15533 cCL(flte, e080110, 2, (RF, RR), rn_rd),
15534 cCL(fltep, e080130, 2, (RF, RR), rn_rd),
15535 cCL(fltem, e080150, 2, (RF, RR), rn_rd),
15536 cCL(fltez, e080170, 2, (RF, RR), rn_rd),
15537
15538 /* The implementation of the FIX instruction is broken on some
15539 assemblers, in that it accepts a precision specifier as well as a
15540 rounding specifier, despite the fact that this is meaningless.
15541 To be more compatible, we accept it as well, though of course it
15542 does not set any bits. */
15543 cCE(fix, e100110, 2, (RR, RF), rd_rm),
15544 cCL(fixp, e100130, 2, (RR, RF), rd_rm),
15545 cCL(fixm, e100150, 2, (RR, RF), rd_rm),
15546 cCL(fixz, e100170, 2, (RR, RF), rd_rm),
15547 cCL(fixsp, e100130, 2, (RR, RF), rd_rm),
15548 cCL(fixsm, e100150, 2, (RR, RF), rd_rm),
15549 cCL(fixsz, e100170, 2, (RR, RF), rd_rm),
15550 cCL(fixdp, e100130, 2, (RR, RF), rd_rm),
15551 cCL(fixdm, e100150, 2, (RR, RF), rd_rm),
15552 cCL(fixdz, e100170, 2, (RR, RF), rd_rm),
15553 cCL(fixep, e100130, 2, (RR, RF), rd_rm),
15554 cCL(fixem, e100150, 2, (RR, RF), rd_rm),
15555 cCL(fixez, e100170, 2, (RR, RF), rd_rm),
15556
15557 /* Instructions that were new with the real FPA, call them V2. */
15558 #undef ARM_VARIANT
15559 #define ARM_VARIANT &fpu_fpa_ext_v2
15560 cCE(lfm, c100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
15561 cCL(lfmfd, c900200, 3, (RF, I4b, ADDR), fpa_ldmstm),
15562 cCL(lfmea, d100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
15563 cCE(sfm, c000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
15564 cCL(sfmfd, d000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
15565 cCL(sfmea, c800200, 3, (RF, I4b, ADDR), fpa_ldmstm),
15566
15567 #undef ARM_VARIANT
15568 #define ARM_VARIANT &fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
15569 /* Moves and type conversions. */
15570 cCE(fcpys, eb00a40, 2, (RVS, RVS), vfp_sp_monadic),
15571 cCE(fmrs, e100a10, 2, (RR, RVS), vfp_reg_from_sp),
15572 cCE(fmsr, e000a10, 2, (RVS, RR), vfp_sp_from_reg),
15573 cCE(fmstat, ef1fa10, 0, (), noargs),
15574 cCE(fsitos, eb80ac0, 2, (RVS, RVS), vfp_sp_monadic),
15575 cCE(fuitos, eb80a40, 2, (RVS, RVS), vfp_sp_monadic),
15576 cCE(ftosis, ebd0a40, 2, (RVS, RVS), vfp_sp_monadic),
15577 cCE(ftosizs, ebd0ac0, 2, (RVS, RVS), vfp_sp_monadic),
15578 cCE(ftouis, ebc0a40, 2, (RVS, RVS), vfp_sp_monadic),
15579 cCE(ftouizs, ebc0ac0, 2, (RVS, RVS), vfp_sp_monadic),
15580 cCE(fmrx, ef00a10, 2, (RR, RVC), rd_rn),
15581 cCE(fmxr, ee00a10, 2, (RVC, RR), rn_rd),
15582
15583 /* Memory operations. */
15584 cCE(flds, d100a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
15585 cCE(fsts, d000a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
15586 cCE(fldmias, c900a00, 2, (RRw, VRSLST), vfp_sp_ldstmia),
15587 cCE(fldmfds, c900a00, 2, (RRw, VRSLST), vfp_sp_ldstmia),
15588 cCE(fldmdbs, d300a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb),
15589 cCE(fldmeas, d300a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb),
15590 cCE(fldmiax, c900b00, 2, (RRw, VRDLST), vfp_xp_ldstmia),
15591 cCE(fldmfdx, c900b00, 2, (RRw, VRDLST), vfp_xp_ldstmia),
15592 cCE(fldmdbx, d300b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb),
15593 cCE(fldmeax, d300b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb),
15594 cCE(fstmias, c800a00, 2, (RRw, VRSLST), vfp_sp_ldstmia),
15595 cCE(fstmeas, c800a00, 2, (RRw, VRSLST), vfp_sp_ldstmia),
15596 cCE(fstmdbs, d200a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb),
15597 cCE(fstmfds, d200a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb),
15598 cCE(fstmiax, c800b00, 2, (RRw, VRDLST), vfp_xp_ldstmia),
15599 cCE(fstmeax, c800b00, 2, (RRw, VRDLST), vfp_xp_ldstmia),
15600 cCE(fstmdbx, d200b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb),
15601 cCE(fstmfdx, d200b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb),
15602
15603 /* Monadic operations. */
15604 cCE(fabss, eb00ac0, 2, (RVS, RVS), vfp_sp_monadic),
15605 cCE(fnegs, eb10a40, 2, (RVS, RVS), vfp_sp_monadic),
15606 cCE(fsqrts, eb10ac0, 2, (RVS, RVS), vfp_sp_monadic),
15607
15608 /* Dyadic operations. */
15609 cCE(fadds, e300a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
15610 cCE(fsubs, e300a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
15611 cCE(fmuls, e200a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
15612 cCE(fdivs, e800a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
15613 cCE(fmacs, e000a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
15614 cCE(fmscs, e100a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
15615 cCE(fnmuls, e200a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
15616 cCE(fnmacs, e000a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
15617 cCE(fnmscs, e100a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
15618
15619 /* Comparisons. */
15620 cCE(fcmps, eb40a40, 2, (RVS, RVS), vfp_sp_monadic),
15621 cCE(fcmpzs, eb50a40, 1, (RVS), vfp_sp_compare_z),
15622 cCE(fcmpes, eb40ac0, 2, (RVS, RVS), vfp_sp_monadic),
15623 cCE(fcmpezs, eb50ac0, 1, (RVS), vfp_sp_compare_z),
15624
15625 #undef ARM_VARIANT
15626 #define ARM_VARIANT &fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
15627 /* Moves and type conversions. */
15628 cCE(fcpyd, eb00b40, 2, (RVD, RVD), vfp_dp_rd_rm),
15629 cCE(fcvtds, eb70ac0, 2, (RVD, RVS), vfp_dp_sp_cvt),
15630 cCE(fcvtsd, eb70bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
15631 cCE(fmdhr, e200b10, 2, (RVD, RR), vfp_dp_rn_rd),
15632 cCE(fmdlr, e000b10, 2, (RVD, RR), vfp_dp_rn_rd),
15633 cCE(fmrdh, e300b10, 2, (RR, RVD), vfp_dp_rd_rn),
15634 cCE(fmrdl, e100b10, 2, (RR, RVD), vfp_dp_rd_rn),
15635 cCE(fsitod, eb80bc0, 2, (RVD, RVS), vfp_dp_sp_cvt),
15636 cCE(fuitod, eb80b40, 2, (RVD, RVS), vfp_dp_sp_cvt),
15637 cCE(ftosid, ebd0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
15638 cCE(ftosizd, ebd0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
15639 cCE(ftouid, ebc0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
15640 cCE(ftouizd, ebc0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
15641
15642 /* Memory operations. */
15643 cCE(fldd, d100b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
15644 cCE(fstd, d000b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
15645 cCE(fldmiad, c900b00, 2, (RRw, VRDLST), vfp_dp_ldstmia),
15646 cCE(fldmfdd, c900b00, 2, (RRw, VRDLST), vfp_dp_ldstmia),
15647 cCE(fldmdbd, d300b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb),
15648 cCE(fldmead, d300b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb),
15649 cCE(fstmiad, c800b00, 2, (RRw, VRDLST), vfp_dp_ldstmia),
15650 cCE(fstmead, c800b00, 2, (RRw, VRDLST), vfp_dp_ldstmia),
15651 cCE(fstmdbd, d200b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb),
15652 cCE(fstmfdd, d200b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb),
15653
15654 /* Monadic operations. */
15655 cCE(fabsd, eb00bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
15656 cCE(fnegd, eb10b40, 2, (RVD, RVD), vfp_dp_rd_rm),
15657 cCE(fsqrtd, eb10bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
15658
15659 /* Dyadic operations. */
15660 cCE(faddd, e300b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
15661 cCE(fsubd, e300b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
15662 cCE(fmuld, e200b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
15663 cCE(fdivd, e800b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
15664 cCE(fmacd, e000b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
15665 cCE(fmscd, e100b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
15666 cCE(fnmuld, e200b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
15667 cCE(fnmacd, e000b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
15668 cCE(fnmscd, e100b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
15669
15670 /* Comparisons. */
15671 cCE(fcmpd, eb40b40, 2, (RVD, RVD), vfp_dp_rd_rm),
15672 cCE(fcmpzd, eb50b40, 1, (RVD), vfp_dp_rd),
15673 cCE(fcmped, eb40bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
15674 cCE(fcmpezd, eb50bc0, 1, (RVD), vfp_dp_rd),
15675
15676 #undef ARM_VARIANT
15677 #define ARM_VARIANT &fpu_vfp_ext_v2
15678 cCE(fmsrr, c400a10, 3, (VRSLST, RR, RR), vfp_sp2_from_reg2),
15679 cCE(fmrrs, c500a10, 3, (RR, RR, VRSLST), vfp_reg2_from_sp2),
15680 cCE(fmdrr, c400b10, 3, (RVD, RR, RR), vfp_dp_rm_rd_rn),
15681 cCE(fmrrd, c500b10, 3, (RR, RR, RVD), vfp_dp_rd_rn_rm),
15682
15683 /* Instructions which may belong to either the Neon or VFP instruction sets.
15684 Individual encoder functions perform additional architecture checks. */
15685 #undef ARM_VARIANT
15686 #define ARM_VARIANT &fpu_vfp_ext_v1xd
15687 #undef THUMB_VARIANT
15688 #define THUMB_VARIANT &fpu_vfp_ext_v1xd
15689 /* These mnemonics are unique to VFP. */
15690 NCE(vsqrt, 0, 2, (RVSD, RVSD), vfp_nsyn_sqrt),
15691 NCE(vdiv, 0, 3, (RVSD, RVSD, RVSD), vfp_nsyn_div),
15692 nCE(vnmul, vnmul, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
15693 nCE(vnmla, vnmla, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
15694 nCE(vnmls, vnmls, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
15695 nCE(vcmp, vcmp, 2, (RVSD, RVSD_I0), vfp_nsyn_cmp),
15696 nCE(vcmpe, vcmpe, 2, (RVSD, RVSD_I0), vfp_nsyn_cmp),
15697 NCE(vpush, 0, 1, (VRSDLST), vfp_nsyn_push),
15698 NCE(vpop, 0, 1, (VRSDLST), vfp_nsyn_pop),
15699 NCE(vcvtz, 0, 2, (RVSD, RVSD), vfp_nsyn_cvtz),
15700
15701 /* Mnemonics shared by Neon and VFP. */
15702 nCEF(vmul, vmul, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mul),
15703 nCEF(vmla, vmla, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
15704 nCEF(vmls, vmls, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
15705
15706 nCEF(vadd, vadd, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
15707 nCEF(vsub, vsub, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
15708
15709 NCEF(vabs, 1b10300, 2, (RNSDQ, RNSDQ), neon_abs_neg),
15710 NCEF(vneg, 1b10380, 2, (RNSDQ, RNSDQ), neon_abs_neg),
15711
15712 NCE(vldm, c900b00, 2, (RRw, VRSDLST), neon_ldm_stm),
15713 NCE(vldmia, c900b00, 2, (RRw, VRSDLST), neon_ldm_stm),
15714 NCE(vldmdb, d100b00, 2, (RRw, VRSDLST), neon_ldm_stm),
15715 NCE(vstm, c800b00, 2, (RRw, VRSDLST), neon_ldm_stm),
15716 NCE(vstmia, c800b00, 2, (RRw, VRSDLST), neon_ldm_stm),
15717 NCE(vstmdb, d000b00, 2, (RRw, VRSDLST), neon_ldm_stm),
15718 NCE(vldr, d100b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
15719 NCE(vstr, d000b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
15720
15721 nCEF(vcvt, vcvt, 3, (RNSDQ, RNSDQ, oI32b), neon_cvt),
15722
15723 /* NOTE: All VMOV encoding is special-cased! */
15724 NCE(vmov, 0, 1, (VMOV), neon_mov),
15725 NCE(vmovq, 0, 1, (VMOV), neon_mov),
15726
15727 #undef THUMB_VARIANT
15728 #define THUMB_VARIANT &fpu_neon_ext_v1
15729 #undef ARM_VARIANT
15730 #define ARM_VARIANT &fpu_neon_ext_v1
15731 /* Data processing with three registers of the same length. */
15732 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
15733 NUF(vaba, 0000710, 3, (RNDQ, RNDQ, RNDQ), neon_dyadic_i_su),
15734 NUF(vabaq, 0000710, 3, (RNQ, RNQ, RNQ), neon_dyadic_i_su),
15735 NUF(vhadd, 0000000, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
15736 NUF(vhaddq, 0000000, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
15737 NUF(vrhadd, 0000100, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
15738 NUF(vrhaddq, 0000100, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
15739 NUF(vhsub, 0000200, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
15740 NUF(vhsubq, 0000200, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
15741 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
15742 NUF(vqadd, 0000010, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
15743 NUF(vqaddq, 0000010, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
15744 NUF(vqsub, 0000210, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
15745 NUF(vqsubq, 0000210, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
15746 NUF(vrshl, 0000500, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
15747 NUF(vrshlq, 0000500, 3, (RNQ, oRNQ, RNQ), neon_rshl),
15748 NUF(vqrshl, 0000510, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
15749 NUF(vqrshlq, 0000510, 3, (RNQ, oRNQ, RNQ), neon_rshl),
15750 /* If not immediate, fall back to neon_dyadic_i64_su.
15751 shl_imm should accept I8 I16 I32 I64,
15752 qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64. */
15753 nUF(vshl, vshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_shl_imm),
15754 nUF(vshlq, vshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_shl_imm),
15755 nUF(vqshl, vqshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_qshl_imm),
15756 nUF(vqshlq, vqshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_qshl_imm),
15757 /* Logic ops, types optional & ignored. */
15758 nUF(vand, vand, 2, (RNDQ, NILO), neon_logic),
15759 nUF(vandq, vand, 2, (RNQ, NILO), neon_logic),
15760 nUF(vbic, vbic, 2, (RNDQ, NILO), neon_logic),
15761 nUF(vbicq, vbic, 2, (RNQ, NILO), neon_logic),
15762 nUF(vorr, vorr, 2, (RNDQ, NILO), neon_logic),
15763 nUF(vorrq, vorr, 2, (RNQ, NILO), neon_logic),
15764 nUF(vorn, vorn, 2, (RNDQ, NILO), neon_logic),
15765 nUF(vornq, vorn, 2, (RNQ, NILO), neon_logic),
15766 nUF(veor, veor, 3, (RNDQ, oRNDQ, RNDQ), neon_logic),
15767 nUF(veorq, veor, 3, (RNQ, oRNQ, RNQ), neon_logic),
15768 /* Bitfield ops, untyped. */
15769 NUF(vbsl, 1100110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
15770 NUF(vbslq, 1100110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
15771 NUF(vbit, 1200110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
15772 NUF(vbitq, 1200110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
15773 NUF(vbif, 1300110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
15774 NUF(vbifq, 1300110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
15775 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F32. */
15776 nUF(vabd, vabd, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
15777 nUF(vabdq, vabd, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
15778 nUF(vmax, vmax, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
15779 nUF(vmaxq, vmax, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
15780 nUF(vmin, vmin, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
15781 nUF(vminq, vmin, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
15782 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
15783 back to neon_dyadic_if_su. */
15784 nUF(vcge, vcge, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
15785 nUF(vcgeq, vcge, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
15786 nUF(vcgt, vcgt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
15787 nUF(vcgtq, vcgt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
15788 nUF(vclt, vclt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
15789 nUF(vcltq, vclt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
15790 nUF(vcle, vcle, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
15791 nUF(vcleq, vcle, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
15792 /* Comparison. Type I8 I16 I32 F32. */
15793 nUF(vceq, vceq, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_ceq),
15794 nUF(vceqq, vceq, 3, (RNQ, oRNQ, RNDQ_I0), neon_ceq),
15795 /* As above, D registers only. */
15796 nUF(vpmax, vpmax, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
15797 nUF(vpmin, vpmin, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
15798 /* Int and float variants, signedness unimportant. */
15799 nUF(vmlaq, vmla, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
15800 nUF(vmlsq, vmls, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
15801 nUF(vpadd, vpadd, 3, (RND, oRND, RND), neon_dyadic_if_i_d),
15802 /* Add/sub take types I8 I16 I32 I64 F32. */
15803 nUF(vaddq, vadd, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
15804 nUF(vsubq, vsub, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
15805 /* vtst takes sizes 8, 16, 32. */
15806 NUF(vtst, 0000810, 3, (RNDQ, oRNDQ, RNDQ), neon_tst),
15807 NUF(vtstq, 0000810, 3, (RNQ, oRNQ, RNQ), neon_tst),
15808 /* VMUL takes I8 I16 I32 F32 P8. */
15809 nUF(vmulq, vmul, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mul),
15810 /* VQD{R}MULH takes S16 S32. */
15811 nUF(vqdmulh, vqdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
15812 nUF(vqdmulhq, vqdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
15813 nUF(vqrdmulh, vqrdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
15814 nUF(vqrdmulhq, vqrdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
15815 NUF(vacge, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
15816 NUF(vacgeq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
15817 NUF(vacgt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
15818 NUF(vacgtq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
15819 NUF(vaclt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
15820 NUF(vacltq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
15821 NUF(vacle, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
15822 NUF(vacleq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
15823 NUF(vrecps, 0000f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
15824 NUF(vrecpsq, 0000f10, 3, (RNQ, oRNQ, RNQ), neon_step),
15825 NUF(vrsqrts, 0200f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
15826 NUF(vrsqrtsq, 0200f10, 3, (RNQ, oRNQ, RNQ), neon_step),
15827
15828 /* Two address, int/float. Types S8 S16 S32 F32. */
15829 NUF(vabsq, 1b10300, 2, (RNQ, RNQ), neon_abs_neg),
15830 NUF(vnegq, 1b10380, 2, (RNQ, RNQ), neon_abs_neg),
15831
15832 /* Data processing with two registers and a shift amount. */
15833 /* Right shifts, and variants with rounding.
15834 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
15835 NUF(vshr, 0800010, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
15836 NUF(vshrq, 0800010, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
15837 NUF(vrshr, 0800210, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
15838 NUF(vrshrq, 0800210, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
15839 NUF(vsra, 0800110, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
15840 NUF(vsraq, 0800110, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
15841 NUF(vrsra, 0800310, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
15842 NUF(vrsraq, 0800310, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
15843 /* Shift and insert. Sizes accepted 8 16 32 64. */
15844 NUF(vsli, 1800510, 3, (RNDQ, oRNDQ, I63), neon_sli),
15845 NUF(vsliq, 1800510, 3, (RNQ, oRNQ, I63), neon_sli),
15846 NUF(vsri, 1800410, 3, (RNDQ, oRNDQ, I64), neon_sri),
15847 NUF(vsriq, 1800410, 3, (RNQ, oRNQ, I64), neon_sri),
15848 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
15849 NUF(vqshlu, 1800610, 3, (RNDQ, oRNDQ, I63), neon_qshlu_imm),
15850 NUF(vqshluq, 1800610, 3, (RNQ, oRNQ, I63), neon_qshlu_imm),
15851 /* Right shift immediate, saturating & narrowing, with rounding variants.
15852 Types accepted S16 S32 S64 U16 U32 U64. */
15853 NUF(vqshrn, 0800910, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
15854 NUF(vqrshrn, 0800950, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
15855 /* As above, unsigned. Types accepted S16 S32 S64. */
15856 NUF(vqshrun, 0800810, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
15857 NUF(vqrshrun, 0800850, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
15858 /* Right shift narrowing. Types accepted I16 I32 I64. */
15859 NUF(vshrn, 0800810, 3, (RND, RNQ, I32z), neon_rshift_narrow),
15860 NUF(vrshrn, 0800850, 3, (RND, RNQ, I32z), neon_rshift_narrow),
15861 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
15862 nUF(vshll, vshll, 3, (RNQ, RND, I32), neon_shll),
15863 /* CVT with optional immediate for fixed-point variant. */
15864 nUF(vcvtq, vcvt, 3, (RNQ, RNQ, oI32b), neon_cvt),
15865
15866 nUF(vmvn, vmvn, 2, (RNDQ, RNDQ_IMVNb), neon_mvn),
15867 nUF(vmvnq, vmvn, 2, (RNQ, RNDQ_IMVNb), neon_mvn),
15868
15869 /* Data processing, three registers of different lengths. */
15870 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
15871 NUF(vabal, 0800500, 3, (RNQ, RND, RND), neon_abal),
15872 NUF(vabdl, 0800700, 3, (RNQ, RND, RND), neon_dyadic_long),
15873 NUF(vaddl, 0800000, 3, (RNQ, RND, RND), neon_dyadic_long),
15874 NUF(vsubl, 0800200, 3, (RNQ, RND, RND), neon_dyadic_long),
15875 /* If not scalar, fall back to neon_dyadic_long.
15876 Vector types as above, scalar types S16 S32 U16 U32. */
15877 nUF(vmlal, vmlal, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
15878 nUF(vmlsl, vmlsl, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
15879 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
15880 NUF(vaddw, 0800100, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
15881 NUF(vsubw, 0800300, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
15882 /* Dyadic, narrowing insns. Types I16 I32 I64. */
15883 NUF(vaddhn, 0800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
15884 NUF(vraddhn, 1800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
15885 NUF(vsubhn, 0800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
15886 NUF(vrsubhn, 1800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
15887 /* Saturating doubling multiplies. Types S16 S32. */
15888 nUF(vqdmlal, vqdmlal, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
15889 nUF(vqdmlsl, vqdmlsl, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
15890 nUF(vqdmull, vqdmull, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
15891 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
15892 S16 S32 U16 U32. */
15893 nUF(vmull, vmull, 3, (RNQ, RND, RND_RNSC), neon_vmull),
15894
15895 /* Extract. Size 8. */
15896 NUF(vext, 0b00000, 4, (RNDQ, oRNDQ, RNDQ, I7), neon_ext),
15897 NUF(vextq, 0b00000, 4, (RNQ, oRNQ, RNQ, I7), neon_ext),
15898
15899 /* Two registers, miscellaneous. */
15900 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
15901 NUF(vrev64, 1b00000, 2, (RNDQ, RNDQ), neon_rev),
15902 NUF(vrev64q, 1b00000, 2, (RNQ, RNQ), neon_rev),
15903 NUF(vrev32, 1b00080, 2, (RNDQ, RNDQ), neon_rev),
15904 NUF(vrev32q, 1b00080, 2, (RNQ, RNQ), neon_rev),
15905 NUF(vrev16, 1b00100, 2, (RNDQ, RNDQ), neon_rev),
15906 NUF(vrev16q, 1b00100, 2, (RNQ, RNQ), neon_rev),
15907 /* Vector replicate. Sizes 8 16 32. */
15908 nCE(vdup, vdup, 2, (RNDQ, RR_RNSC), neon_dup),
15909 nCE(vdupq, vdup, 2, (RNQ, RR_RNSC), neon_dup),
15910 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
15911 NUF(vmovl, 0800a10, 2, (RNQ, RND), neon_movl),
15912 /* VMOVN. Types I16 I32 I64. */
15913 nUF(vmovn, vmovn, 2, (RND, RNQ), neon_movn),
15914 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
15915 nUF(vqmovn, vqmovn, 2, (RND, RNQ), neon_qmovn),
15916 /* VQMOVUN. Types S16 S32 S64. */
15917 nUF(vqmovun, vqmovun, 2, (RND, RNQ), neon_qmovun),
15918 /* VZIP / VUZP. Sizes 8 16 32. */
15919 NUF(vzip, 1b20180, 2, (RNDQ, RNDQ), neon_zip_uzp),
15920 NUF(vzipq, 1b20180, 2, (RNQ, RNQ), neon_zip_uzp),
15921 NUF(vuzp, 1b20100, 2, (RNDQ, RNDQ), neon_zip_uzp),
15922 NUF(vuzpq, 1b20100, 2, (RNQ, RNQ), neon_zip_uzp),
15923 /* VQABS / VQNEG. Types S8 S16 S32. */
15924 NUF(vqabs, 1b00700, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
15925 NUF(vqabsq, 1b00700, 2, (RNQ, RNQ), neon_sat_abs_neg),
15926 NUF(vqneg, 1b00780, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
15927 NUF(vqnegq, 1b00780, 2, (RNQ, RNQ), neon_sat_abs_neg),
15928 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
15929 NUF(vpadal, 1b00600, 2, (RNDQ, RNDQ), neon_pair_long),
15930 NUF(vpadalq, 1b00600, 2, (RNQ, RNQ), neon_pair_long),
15931 NUF(vpaddl, 1b00200, 2, (RNDQ, RNDQ), neon_pair_long),
15932 NUF(vpaddlq, 1b00200, 2, (RNQ, RNQ), neon_pair_long),
15933 /* Reciprocal estimates. Types U32 F32. */
15934 NUF(vrecpe, 1b30400, 2, (RNDQ, RNDQ), neon_recip_est),
15935 NUF(vrecpeq, 1b30400, 2, (RNQ, RNQ), neon_recip_est),
15936 NUF(vrsqrte, 1b30480, 2, (RNDQ, RNDQ), neon_recip_est),
15937 NUF(vrsqrteq, 1b30480, 2, (RNQ, RNQ), neon_recip_est),
15938 /* VCLS. Types S8 S16 S32. */
15939 NUF(vcls, 1b00400, 2, (RNDQ, RNDQ), neon_cls),
15940 NUF(vclsq, 1b00400, 2, (RNQ, RNQ), neon_cls),
15941 /* VCLZ. Types I8 I16 I32. */
15942 NUF(vclz, 1b00480, 2, (RNDQ, RNDQ), neon_clz),
15943 NUF(vclzq, 1b00480, 2, (RNQ, RNQ), neon_clz),
15944 /* VCNT. Size 8. */
15945 NUF(vcnt, 1b00500, 2, (RNDQ, RNDQ), neon_cnt),
15946 NUF(vcntq, 1b00500, 2, (RNQ, RNQ), neon_cnt),
15947 /* Two address, untyped. */
15948 NUF(vswp, 1b20000, 2, (RNDQ, RNDQ), neon_swp),
15949 NUF(vswpq, 1b20000, 2, (RNQ, RNQ), neon_swp),
15950 /* VTRN. Sizes 8 16 32. */
15951 nUF(vtrn, vtrn, 2, (RNDQ, RNDQ), neon_trn),
15952 nUF(vtrnq, vtrn, 2, (RNQ, RNQ), neon_trn),
15953
15954 /* Table lookup. Size 8. */
15955 NUF(vtbl, 1b00800, 3, (RND, NRDLST, RND), neon_tbl_tbx),
15956 NUF(vtbx, 1b00840, 3, (RND, NRDLST, RND), neon_tbl_tbx),
15957
15958 #undef THUMB_VARIANT
15959 #define THUMB_VARIANT &fpu_vfp_v3_or_neon_ext
15960 #undef ARM_VARIANT
15961 #define ARM_VARIANT &fpu_vfp_v3_or_neon_ext
15962 /* Neon element/structure load/store. */
15963 nUF(vld1, vld1, 2, (NSTRLST, ADDR), neon_ldx_stx),
15964 nUF(vst1, vst1, 2, (NSTRLST, ADDR), neon_ldx_stx),
15965 nUF(vld2, vld2, 2, (NSTRLST, ADDR), neon_ldx_stx),
15966 nUF(vst2, vst2, 2, (NSTRLST, ADDR), neon_ldx_stx),
15967 nUF(vld3, vld3, 2, (NSTRLST, ADDR), neon_ldx_stx),
15968 nUF(vst3, vst3, 2, (NSTRLST, ADDR), neon_ldx_stx),
15969 nUF(vld4, vld4, 2, (NSTRLST, ADDR), neon_ldx_stx),
15970 nUF(vst4, vst4, 2, (NSTRLST, ADDR), neon_ldx_stx),
15971
15972 #undef THUMB_VARIANT
15973 #define THUMB_VARIANT &fpu_vfp_ext_v3
15974 #undef ARM_VARIANT
15975 #define ARM_VARIANT &fpu_vfp_ext_v3
15976 cCE(fconsts, eb00a00, 2, (RVS, I255), vfp_sp_const),
15977 cCE(fconstd, eb00b00, 2, (RVD, I255), vfp_dp_const),
15978 cCE(fshtos, eba0a40, 2, (RVS, I16z), vfp_sp_conv_16),
15979 cCE(fshtod, eba0b40, 2, (RVD, I16z), vfp_dp_conv_16),
15980 cCE(fsltos, eba0ac0, 2, (RVS, I32), vfp_sp_conv_32),
15981 cCE(fsltod, eba0bc0, 2, (RVD, I32), vfp_dp_conv_32),
15982 cCE(fuhtos, ebb0a40, 2, (RVS, I16z), vfp_sp_conv_16),
15983 cCE(fuhtod, ebb0b40, 2, (RVD, I16z), vfp_dp_conv_16),
15984 cCE(fultos, ebb0ac0, 2, (RVS, I32), vfp_sp_conv_32),
15985 cCE(fultod, ebb0bc0, 2, (RVD, I32), vfp_dp_conv_32),
15986 cCE(ftoshs, ebe0a40, 2, (RVS, I16z), vfp_sp_conv_16),
15987 cCE(ftoshd, ebe0b40, 2, (RVD, I16z), vfp_dp_conv_16),
15988 cCE(ftosls, ebe0ac0, 2, (RVS, I32), vfp_sp_conv_32),
15989 cCE(ftosld, ebe0bc0, 2, (RVD, I32), vfp_dp_conv_32),
15990 cCE(ftouhs, ebf0a40, 2, (RVS, I16z), vfp_sp_conv_16),
15991 cCE(ftouhd, ebf0b40, 2, (RVD, I16z), vfp_dp_conv_16),
15992 cCE(ftouls, ebf0ac0, 2, (RVS, I32), vfp_sp_conv_32),
15993 cCE(ftould, ebf0bc0, 2, (RVD, I32), vfp_dp_conv_32),
15994
15995 #undef THUMB_VARIANT
15996 #undef ARM_VARIANT
15997 #define ARM_VARIANT &arm_cext_xscale /* Intel XScale extensions. */
15998 cCE(mia, e200010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
15999 cCE(miaph, e280010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
16000 cCE(miabb, e2c0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
16001 cCE(miabt, e2d0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
16002 cCE(miatb, e2e0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
16003 cCE(miatt, e2f0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
16004 cCE(mar, c400000, 3, (RXA, RRnpc, RRnpc), xsc_mar),
16005 cCE(mra, c500000, 3, (RRnpc, RRnpc, RXA), xsc_mra),
16006
16007 #undef ARM_VARIANT
16008 #define ARM_VARIANT &arm_cext_iwmmxt /* Intel Wireless MMX technology. */
16009 cCE(tandcb, e13f130, 1, (RR), iwmmxt_tandorc),
16010 cCE(tandch, e53f130, 1, (RR), iwmmxt_tandorc),
16011 cCE(tandcw, e93f130, 1, (RR), iwmmxt_tandorc),
16012 cCE(tbcstb, e400010, 2, (RIWR, RR), rn_rd),
16013 cCE(tbcsth, e400050, 2, (RIWR, RR), rn_rd),
16014 cCE(tbcstw, e400090, 2, (RIWR, RR), rn_rd),
16015 cCE(textrcb, e130170, 2, (RR, I7), iwmmxt_textrc),
16016 cCE(textrch, e530170, 2, (RR, I7), iwmmxt_textrc),
16017 cCE(textrcw, e930170, 2, (RR, I7), iwmmxt_textrc),
16018 cCE(textrmub, e100070, 3, (RR, RIWR, I7), iwmmxt_textrm),
16019 cCE(textrmuh, e500070, 3, (RR, RIWR, I7), iwmmxt_textrm),
16020 cCE(textrmuw, e900070, 3, (RR, RIWR, I7), iwmmxt_textrm),
16021 cCE(textrmsb, e100078, 3, (RR, RIWR, I7), iwmmxt_textrm),
16022 cCE(textrmsh, e500078, 3, (RR, RIWR, I7), iwmmxt_textrm),
16023 cCE(textrmsw, e900078, 3, (RR, RIWR, I7), iwmmxt_textrm),
16024 cCE(tinsrb, e600010, 3, (RIWR, RR, I7), iwmmxt_tinsr),
16025 cCE(tinsrh, e600050, 3, (RIWR, RR, I7), iwmmxt_tinsr),
16026 cCE(tinsrw, e600090, 3, (RIWR, RR, I7), iwmmxt_tinsr),
16027 cCE(tmcr, e000110, 2, (RIWC_RIWG, RR), rn_rd),
16028 cCE(tmcrr, c400000, 3, (RIWR, RR, RR), rm_rd_rn),
16029 cCE(tmia, e200010, 3, (RIWR, RR, RR), iwmmxt_tmia),
16030 cCE(tmiaph, e280010, 3, (RIWR, RR, RR), iwmmxt_tmia),
16031 cCE(tmiabb, e2c0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
16032 cCE(tmiabt, e2d0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
16033 cCE(tmiatb, e2e0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
16034 cCE(tmiatt, e2f0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
16035 cCE(tmovmskb, e100030, 2, (RR, RIWR), rd_rn),
16036 cCE(tmovmskh, e500030, 2, (RR, RIWR), rd_rn),
16037 cCE(tmovmskw, e900030, 2, (RR, RIWR), rd_rn),
16038 cCE(tmrc, e100110, 2, (RR, RIWC_RIWG), rd_rn),
16039 cCE(tmrrc, c500000, 3, (RR, RR, RIWR), rd_rn_rm),
16040 cCE(torcb, e13f150, 1, (RR), iwmmxt_tandorc),
16041 cCE(torch, e53f150, 1, (RR), iwmmxt_tandorc),
16042 cCE(torcw, e93f150, 1, (RR), iwmmxt_tandorc),
16043 cCE(waccb, e0001c0, 2, (RIWR, RIWR), rd_rn),
16044 cCE(wacch, e4001c0, 2, (RIWR, RIWR), rd_rn),
16045 cCE(waccw, e8001c0, 2, (RIWR, RIWR), rd_rn),
16046 cCE(waddbss, e300180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16047 cCE(waddb, e000180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16048 cCE(waddbus, e100180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16049 cCE(waddhss, e700180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16050 cCE(waddh, e400180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16051 cCE(waddhus, e500180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16052 cCE(waddwss, eb00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16053 cCE(waddw, e800180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16054 cCE(waddwus, e900180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16055 cCE(waligni, e000020, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_waligni),
16056 cCE(walignr0, e800020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16057 cCE(walignr1, e900020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16058 cCE(walignr2, ea00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16059 cCE(walignr3, eb00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16060 cCE(wand, e200000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16061 cCE(wandn, e300000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16062 cCE(wavg2b, e800000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16063 cCE(wavg2br, e900000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16064 cCE(wavg2h, ec00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16065 cCE(wavg2hr, ed00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16066 cCE(wcmpeqb, e000060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16067 cCE(wcmpeqh, e400060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16068 cCE(wcmpeqw, e800060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16069 cCE(wcmpgtub, e100060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16070 cCE(wcmpgtuh, e500060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16071 cCE(wcmpgtuw, e900060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16072 cCE(wcmpgtsb, e300060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16073 cCE(wcmpgtsh, e700060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16074 cCE(wcmpgtsw, eb00060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16075 cCE(wldrb, c100000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
16076 cCE(wldrh, c500000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
16077 cCE(wldrw, c100100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
16078 cCE(wldrd, c500100, 2, (RIWR, ADDR), iwmmxt_wldstd),
16079 cCE(wmacs, e600100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16080 cCE(wmacsz, e700100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16081 cCE(wmacu, e400100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16082 cCE(wmacuz, e500100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16083 cCE(wmadds, ea00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16084 cCE(wmaddu, e800100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16085 cCE(wmaxsb, e200160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16086 cCE(wmaxsh, e600160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16087 cCE(wmaxsw, ea00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16088 cCE(wmaxub, e000160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16089 cCE(wmaxuh, e400160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16090 cCE(wmaxuw, e800160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16091 cCE(wminsb, e300160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16092 cCE(wminsh, e700160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16093 cCE(wminsw, eb00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16094 cCE(wminub, e100160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16095 cCE(wminuh, e500160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16096 cCE(wminuw, e900160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16097 cCE(wmov, e000000, 2, (RIWR, RIWR), iwmmxt_wmov),
16098 cCE(wmulsm, e300100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16099 cCE(wmulsl, e200100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16100 cCE(wmulum, e100100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16101 cCE(wmulul, e000100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16102 cCE(wor, e000000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16103 cCE(wpackhss, e700080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16104 cCE(wpackhus, e500080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16105 cCE(wpackwss, eb00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16106 cCE(wpackwus, e900080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16107 cCE(wpackdss, ef00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16108 cCE(wpackdus, ed00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16109 cCE(wrorh, e700040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16110 cCE(wrorhg, e700148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16111 cCE(wrorw, eb00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16112 cCE(wrorwg, eb00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16113 cCE(wrord, ef00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16114 cCE(wrordg, ef00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16115 cCE(wsadb, e000120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16116 cCE(wsadbz, e100120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16117 cCE(wsadh, e400120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16118 cCE(wsadhz, e500120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16119 cCE(wshufh, e0001e0, 3, (RIWR, RIWR, I255), iwmmxt_wshufh),
16120 cCE(wsllh, e500040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16121 cCE(wsllhg, e500148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16122 cCE(wsllw, e900040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16123 cCE(wsllwg, e900148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16124 cCE(wslld, ed00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16125 cCE(wslldg, ed00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16126 cCE(wsrah, e400040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16127 cCE(wsrahg, e400148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16128 cCE(wsraw, e800040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16129 cCE(wsrawg, e800148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16130 cCE(wsrad, ec00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16131 cCE(wsradg, ec00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16132 cCE(wsrlh, e600040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16133 cCE(wsrlhg, e600148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16134 cCE(wsrlw, ea00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16135 cCE(wsrlwg, ea00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16136 cCE(wsrld, ee00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16137 cCE(wsrldg, ee00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16138 cCE(wstrb, c000000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
16139 cCE(wstrh, c400000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
16140 cCE(wstrw, c000100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
16141 cCE(wstrd, c400100, 2, (RIWR, ADDR), iwmmxt_wldstd),
16142 cCE(wsubbss, e3001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16143 cCE(wsubb, e0001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16144 cCE(wsubbus, e1001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16145 cCE(wsubhss, e7001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16146 cCE(wsubh, e4001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16147 cCE(wsubhus, e5001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16148 cCE(wsubwss, eb001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16149 cCE(wsubw, e8001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16150 cCE(wsubwus, e9001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16151 cCE(wunpckehub,e0000c0, 2, (RIWR, RIWR), rd_rn),
16152 cCE(wunpckehuh,e4000c0, 2, (RIWR, RIWR), rd_rn),
16153 cCE(wunpckehuw,e8000c0, 2, (RIWR, RIWR), rd_rn),
16154 cCE(wunpckehsb,e2000c0, 2, (RIWR, RIWR), rd_rn),
16155 cCE(wunpckehsh,e6000c0, 2, (RIWR, RIWR), rd_rn),
16156 cCE(wunpckehsw,ea000c0, 2, (RIWR, RIWR), rd_rn),
16157 cCE(wunpckihb, e1000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16158 cCE(wunpckihh, e5000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16159 cCE(wunpckihw, e9000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16160 cCE(wunpckelub,e0000e0, 2, (RIWR, RIWR), rd_rn),
16161 cCE(wunpckeluh,e4000e0, 2, (RIWR, RIWR), rd_rn),
16162 cCE(wunpckeluw,e8000e0, 2, (RIWR, RIWR), rd_rn),
16163 cCE(wunpckelsb,e2000e0, 2, (RIWR, RIWR), rd_rn),
16164 cCE(wunpckelsh,e6000e0, 2, (RIWR, RIWR), rd_rn),
16165 cCE(wunpckelsw,ea000e0, 2, (RIWR, RIWR), rd_rn),
16166 cCE(wunpckilb, e1000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16167 cCE(wunpckilh, e5000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16168 cCE(wunpckilw, e9000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16169 cCE(wxor, e100000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16170 cCE(wzero, e300000, 1, (RIWR), iwmmxt_wzero),
16171
16172 #undef ARM_VARIANT
16173 #define ARM_VARIANT &arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2. */
16174 cCE(torvscb, e13f190, 1, (RR), iwmmxt_tandorc),
16175 cCE(torvsch, e53f190, 1, (RR), iwmmxt_tandorc),
16176 cCE(torvscw, e93f190, 1, (RR), iwmmxt_tandorc),
16177 cCE(wabsb, e2001c0, 2, (RIWR, RIWR), rd_rn),
16178 cCE(wabsh, e6001c0, 2, (RIWR, RIWR), rd_rn),
16179 cCE(wabsw, ea001c0, 2, (RIWR, RIWR), rd_rn),
16180 cCE(wabsdiffb, e1001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16181 cCE(wabsdiffh, e5001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16182 cCE(wabsdiffw, e9001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16183 cCE(waddbhusl, e2001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16184 cCE(waddbhusm, e6001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16185 cCE(waddhc, e600180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16186 cCE(waddwc, ea00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16187 cCE(waddsubhx, ea001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16188 cCE(wavg4, e400000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16189 cCE(wavg4r, e500000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16190 cCE(wmaddsn, ee00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16191 cCE(wmaddsx, eb00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16192 cCE(wmaddun, ec00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16193 cCE(wmaddux, e900100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16194 cCE(wmerge, e000080, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_wmerge),
16195 cCE(wmiabb, e0000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16196 cCE(wmiabt, e1000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16197 cCE(wmiatb, e2000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16198 cCE(wmiatt, e3000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16199 cCE(wmiabbn, e4000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16200 cCE(wmiabtn, e5000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16201 cCE(wmiatbn, e6000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16202 cCE(wmiattn, e7000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16203 cCE(wmiawbb, e800120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16204 cCE(wmiawbt, e900120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16205 cCE(wmiawtb, ea00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16206 cCE(wmiawtt, eb00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16207 cCE(wmiawbbn, ec00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16208 cCE(wmiawbtn, ed00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16209 cCE(wmiawtbn, ee00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16210 cCE(wmiawttn, ef00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16211 cCE(wmulsmr, ef00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16212 cCE(wmulumr, ed00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16213 cCE(wmulwumr, ec000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16214 cCE(wmulwsmr, ee000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16215 cCE(wmulwum, ed000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16216 cCE(wmulwsm, ef000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16217 cCE(wmulwl, eb000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16218 cCE(wqmiabb, e8000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16219 cCE(wqmiabt, e9000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16220 cCE(wqmiatb, ea000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16221 cCE(wqmiatt, eb000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16222 cCE(wqmiabbn, ec000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16223 cCE(wqmiabtn, ed000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16224 cCE(wqmiatbn, ee000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16225 cCE(wqmiattn, ef000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16226 cCE(wqmulm, e100080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16227 cCE(wqmulmr, e300080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16228 cCE(wqmulwm, ec000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16229 cCE(wqmulwmr, ee000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16230 cCE(wsubaddhx, ed001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16231
16232 #undef ARM_VARIANT
16233 #define ARM_VARIANT &arm_cext_maverick /* Cirrus Maverick instructions. */
16234 cCE(cfldrs, c100400, 2, (RMF, ADDRGLDC), rd_cpaddr),
16235 cCE(cfldrd, c500400, 2, (RMD, ADDRGLDC), rd_cpaddr),
16236 cCE(cfldr32, c100500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
16237 cCE(cfldr64, c500500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
16238 cCE(cfstrs, c000400, 2, (RMF, ADDRGLDC), rd_cpaddr),
16239 cCE(cfstrd, c400400, 2, (RMD, ADDRGLDC), rd_cpaddr),
16240 cCE(cfstr32, c000500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
16241 cCE(cfstr64, c400500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
16242 cCE(cfmvsr, e000450, 2, (RMF, RR), rn_rd),
16243 cCE(cfmvrs, e100450, 2, (RR, RMF), rd_rn),
16244 cCE(cfmvdlr, e000410, 2, (RMD, RR), rn_rd),
16245 cCE(cfmvrdl, e100410, 2, (RR, RMD), rd_rn),
16246 cCE(cfmvdhr, e000430, 2, (RMD, RR), rn_rd),
16247 cCE(cfmvrdh, e100430, 2, (RR, RMD), rd_rn),
16248 cCE(cfmv64lr, e000510, 2, (RMDX, RR), rn_rd),
16249 cCE(cfmvr64l, e100510, 2, (RR, RMDX), rd_rn),
16250 cCE(cfmv64hr, e000530, 2, (RMDX, RR), rn_rd),
16251 cCE(cfmvr64h, e100530, 2, (RR, RMDX), rd_rn),
16252 cCE(cfmval32, e200440, 2, (RMAX, RMFX), rd_rn),
16253 cCE(cfmv32al, e100440, 2, (RMFX, RMAX), rd_rn),
16254 cCE(cfmvam32, e200460, 2, (RMAX, RMFX), rd_rn),
16255 cCE(cfmv32am, e100460, 2, (RMFX, RMAX), rd_rn),
16256 cCE(cfmvah32, e200480, 2, (RMAX, RMFX), rd_rn),
16257 cCE(cfmv32ah, e100480, 2, (RMFX, RMAX), rd_rn),
16258 cCE(cfmva32, e2004a0, 2, (RMAX, RMFX), rd_rn),
16259 cCE(cfmv32a, e1004a0, 2, (RMFX, RMAX), rd_rn),
16260 cCE(cfmva64, e2004c0, 2, (RMAX, RMDX), rd_rn),
16261 cCE(cfmv64a, e1004c0, 2, (RMDX, RMAX), rd_rn),
16262 cCE(cfmvsc32, e2004e0, 2, (RMDS, RMDX), mav_dspsc),
16263 cCE(cfmv32sc, e1004e0, 2, (RMDX, RMDS), rd),
16264 cCE(cfcpys, e000400, 2, (RMF, RMF), rd_rn),
16265 cCE(cfcpyd, e000420, 2, (RMD, RMD), rd_rn),
16266 cCE(cfcvtsd, e000460, 2, (RMD, RMF), rd_rn),
16267 cCE(cfcvtds, e000440, 2, (RMF, RMD), rd_rn),
16268 cCE(cfcvt32s, e000480, 2, (RMF, RMFX), rd_rn),
16269 cCE(cfcvt32d, e0004a0, 2, (RMD, RMFX), rd_rn),
16270 cCE(cfcvt64s, e0004c0, 2, (RMF, RMDX), rd_rn),
16271 cCE(cfcvt64d, e0004e0, 2, (RMD, RMDX), rd_rn),
16272 cCE(cfcvts32, e100580, 2, (RMFX, RMF), rd_rn),
16273 cCE(cfcvtd32, e1005a0, 2, (RMFX, RMD), rd_rn),
16274 cCE(cftruncs32,e1005c0, 2, (RMFX, RMF), rd_rn),
16275 cCE(cftruncd32,e1005e0, 2, (RMFX, RMD), rd_rn),
16276 cCE(cfrshl32, e000550, 3, (RMFX, RMFX, RR), mav_triple),
16277 cCE(cfrshl64, e000570, 3, (RMDX, RMDX, RR), mav_triple),
16278 cCE(cfsh32, e000500, 3, (RMFX, RMFX, I63s), mav_shift),
16279 cCE(cfsh64, e200500, 3, (RMDX, RMDX, I63s), mav_shift),
16280 cCE(cfcmps, e100490, 3, (RR, RMF, RMF), rd_rn_rm),
16281 cCE(cfcmpd, e1004b0, 3, (RR, RMD, RMD), rd_rn_rm),
16282 cCE(cfcmp32, e100590, 3, (RR, RMFX, RMFX), rd_rn_rm),
16283 cCE(cfcmp64, e1005b0, 3, (RR, RMDX, RMDX), rd_rn_rm),
16284 cCE(cfabss, e300400, 2, (RMF, RMF), rd_rn),
16285 cCE(cfabsd, e300420, 2, (RMD, RMD), rd_rn),
16286 cCE(cfnegs, e300440, 2, (RMF, RMF), rd_rn),
16287 cCE(cfnegd, e300460, 2, (RMD, RMD), rd_rn),
16288 cCE(cfadds, e300480, 3, (RMF, RMF, RMF), rd_rn_rm),
16289 cCE(cfaddd, e3004a0, 3, (RMD, RMD, RMD), rd_rn_rm),
16290 cCE(cfsubs, e3004c0, 3, (RMF, RMF, RMF), rd_rn_rm),
16291 cCE(cfsubd, e3004e0, 3, (RMD, RMD, RMD), rd_rn_rm),
16292 cCE(cfmuls, e100400, 3, (RMF, RMF, RMF), rd_rn_rm),
16293 cCE(cfmuld, e100420, 3, (RMD, RMD, RMD), rd_rn_rm),
16294 cCE(cfabs32, e300500, 2, (RMFX, RMFX), rd_rn),
16295 cCE(cfabs64, e300520, 2, (RMDX, RMDX), rd_rn),
16296 cCE(cfneg32, e300540, 2, (RMFX, RMFX), rd_rn),
16297 cCE(cfneg64, e300560, 2, (RMDX, RMDX), rd_rn),
16298 cCE(cfadd32, e300580, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
16299 cCE(cfadd64, e3005a0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
16300 cCE(cfsub32, e3005c0, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
16301 cCE(cfsub64, e3005e0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
16302 cCE(cfmul32, e100500, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
16303 cCE(cfmul64, e100520, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
16304 cCE(cfmac32, e100540, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
16305 cCE(cfmsc32, e100560, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
16306 cCE(cfmadd32, e000600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
16307 cCE(cfmsub32, e100600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
16308 cCE(cfmadda32, e200600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
16309 cCE(cfmsuba32, e300600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
16310 };
16311 #undef ARM_VARIANT
16312 #undef THUMB_VARIANT
16313 #undef TCE
16314 #undef TCM
16315 #undef TUE
16316 #undef TUF
16317 #undef TCC
16318 #undef cCE
16319 #undef cCL
16320 #undef C3E
16321 #undef CE
16322 #undef CM
16323 #undef UE
16324 #undef UF
16325 #undef UT
16326 #undef NUF
16327 #undef nUF
16328 #undef NCE
16329 #undef nCE
16330 #undef OPS0
16331 #undef OPS1
16332 #undef OPS2
16333 #undef OPS3
16334 #undef OPS4
16335 #undef OPS5
16336 #undef OPS6
16337 #undef do_0
16338 \f
16339 /* MD interface: bits in the object file. */
16340
16341 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
16342 for use in the a.out file, and stores them in the array pointed to by buf.
16343 This knows about the endian-ness of the target machine and does
16344 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
16345 2 (short) and 4 (long) Floating numbers are put out as a series of
16346 LITTLENUMS (shorts, here at least). */
16347
16348 void
16349 md_number_to_chars (char * buf, valueT val, int n)
16350 {
16351 if (target_big_endian)
16352 number_to_chars_bigendian (buf, val, n);
16353 else
16354 number_to_chars_littleendian (buf, val, n);
16355 }
16356
16357 static valueT
16358 md_chars_to_number (char * buf, int n)
16359 {
16360 valueT result = 0;
16361 unsigned char * where = (unsigned char *) buf;
16362
16363 if (target_big_endian)
16364 {
16365 while (n--)
16366 {
16367 result <<= 8;
16368 result |= (*where++ & 255);
16369 }
16370 }
16371 else
16372 {
16373 while (n--)
16374 {
16375 result <<= 8;
16376 result |= (where[n] & 255);
16377 }
16378 }
16379
16380 return result;
16381 }
16382
16383 /* MD interface: Sections. */
16384
16385 /* Estimate the size of a frag before relaxing. Assume everything fits in
16386 2 bytes. */
16387
16388 int
16389 md_estimate_size_before_relax (fragS * fragp,
16390 segT segtype ATTRIBUTE_UNUSED)
16391 {
16392 fragp->fr_var = 2;
16393 return 2;
16394 }
16395
16396 /* Convert a machine dependent frag. */
16397
16398 void
16399 md_convert_frag (bfd *abfd, segT asec ATTRIBUTE_UNUSED, fragS *fragp)
16400 {
16401 unsigned long insn;
16402 unsigned long old_op;
16403 char *buf;
16404 expressionS exp;
16405 fixS *fixp;
16406 int reloc_type;
16407 int pc_rel;
16408 int opcode;
16409
16410 buf = fragp->fr_literal + fragp->fr_fix;
16411
16412 old_op = bfd_get_16(abfd, buf);
16413 if (fragp->fr_symbol) {
16414 exp.X_op = O_symbol;
16415 exp.X_add_symbol = fragp->fr_symbol;
16416 } else {
16417 exp.X_op = O_constant;
16418 }
16419 exp.X_add_number = fragp->fr_offset;
16420 opcode = fragp->fr_subtype;
16421 switch (opcode)
16422 {
16423 case T_MNEM_ldr_pc:
16424 case T_MNEM_ldr_pc2:
16425 case T_MNEM_ldr_sp:
16426 case T_MNEM_str_sp:
16427 case T_MNEM_ldr:
16428 case T_MNEM_ldrb:
16429 case T_MNEM_ldrh:
16430 case T_MNEM_str:
16431 case T_MNEM_strb:
16432 case T_MNEM_strh:
16433 if (fragp->fr_var == 4)
16434 {
16435 insn = THUMB_OP32(opcode);
16436 if ((old_op >> 12) == 4 || (old_op >> 12) == 9)
16437 {
16438 insn |= (old_op & 0x700) << 4;
16439 }
16440 else
16441 {
16442 insn |= (old_op & 7) << 12;
16443 insn |= (old_op & 0x38) << 13;
16444 }
16445 insn |= 0x00000c00;
16446 put_thumb32_insn (buf, insn);
16447 reloc_type = BFD_RELOC_ARM_T32_OFFSET_IMM;
16448 }
16449 else
16450 {
16451 reloc_type = BFD_RELOC_ARM_THUMB_OFFSET;
16452 }
16453 pc_rel = (opcode == T_MNEM_ldr_pc2);
16454 break;
16455 case T_MNEM_adr:
16456 if (fragp->fr_var == 4)
16457 {
16458 insn = THUMB_OP32 (opcode);
16459 insn |= (old_op & 0xf0) << 4;
16460 put_thumb32_insn (buf, insn);
16461 reloc_type = BFD_RELOC_ARM_T32_ADD_PC12;
16462 }
16463 else
16464 {
16465 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
16466 exp.X_add_number -= 4;
16467 }
16468 pc_rel = 1;
16469 break;
16470 case T_MNEM_mov:
16471 case T_MNEM_movs:
16472 case T_MNEM_cmp:
16473 case T_MNEM_cmn:
16474 if (fragp->fr_var == 4)
16475 {
16476 int r0off = (opcode == T_MNEM_mov
16477 || opcode == T_MNEM_movs) ? 0 : 8;
16478 insn = THUMB_OP32 (opcode);
16479 insn = (insn & 0xe1ffffff) | 0x10000000;
16480 insn |= (old_op & 0x700) << r0off;
16481 put_thumb32_insn (buf, insn);
16482 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
16483 }
16484 else
16485 {
16486 reloc_type = BFD_RELOC_ARM_THUMB_IMM;
16487 }
16488 pc_rel = 0;
16489 break;
16490 case T_MNEM_b:
16491 if (fragp->fr_var == 4)
16492 {
16493 insn = THUMB_OP32(opcode);
16494 put_thumb32_insn (buf, insn);
16495 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH25;
16496 }
16497 else
16498 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH12;
16499 pc_rel = 1;
16500 break;
16501 case T_MNEM_bcond:
16502 if (fragp->fr_var == 4)
16503 {
16504 insn = THUMB_OP32(opcode);
16505 insn |= (old_op & 0xf00) << 14;
16506 put_thumb32_insn (buf, insn);
16507 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH20;
16508 }
16509 else
16510 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH9;
16511 pc_rel = 1;
16512 break;
16513 case T_MNEM_add_sp:
16514 case T_MNEM_add_pc:
16515 case T_MNEM_inc_sp:
16516 case T_MNEM_dec_sp:
16517 if (fragp->fr_var == 4)
16518 {
16519 /* ??? Choose between add and addw. */
16520 insn = THUMB_OP32 (opcode);
16521 insn |= (old_op & 0xf0) << 4;
16522 put_thumb32_insn (buf, insn);
16523 if (opcode == T_MNEM_add_pc)
16524 reloc_type = BFD_RELOC_ARM_T32_IMM12;
16525 else
16526 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
16527 }
16528 else
16529 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
16530 pc_rel = 0;
16531 break;
16532
16533 case T_MNEM_addi:
16534 case T_MNEM_addis:
16535 case T_MNEM_subi:
16536 case T_MNEM_subis:
16537 if (fragp->fr_var == 4)
16538 {
16539 insn = THUMB_OP32 (opcode);
16540 insn |= (old_op & 0xf0) << 4;
16541 insn |= (old_op & 0xf) << 16;
16542 put_thumb32_insn (buf, insn);
16543 if (insn & (1 << 20))
16544 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
16545 else
16546 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
16547 }
16548 else
16549 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
16550 pc_rel = 0;
16551 break;
16552 default:
16553 abort();
16554 }
16555 fixp = fix_new_exp (fragp, fragp->fr_fix, fragp->fr_var, &exp, pc_rel,
16556 reloc_type);
16557 fixp->fx_file = fragp->fr_file;
16558 fixp->fx_line = fragp->fr_line;
16559 fragp->fr_fix += fragp->fr_var;
16560 }
16561
16562 /* Return the size of a relaxable immediate operand instruction.
16563 SHIFT and SIZE specify the form of the allowable immediate. */
16564 static int
16565 relax_immediate (fragS *fragp, int size, int shift)
16566 {
16567 offsetT offset;
16568 offsetT mask;
16569 offsetT low;
16570
16571 /* ??? Should be able to do better than this. */
16572 if (fragp->fr_symbol)
16573 return 4;
16574
16575 low = (1 << shift) - 1;
16576 mask = (1 << (shift + size)) - (1 << shift);
16577 offset = fragp->fr_offset;
16578 /* Force misaligned offsets to 32-bit variant. */
16579 if (offset & low)
16580 return 4;
16581 if (offset & ~mask)
16582 return 4;
16583 return 2;
16584 }
16585
16586 /* Get the address of a symbol during relaxation. */
16587 static addressT
16588 relaxed_symbol_addr(fragS *fragp, long stretch)
16589 {
16590 fragS *sym_frag;
16591 addressT addr;
16592 symbolS *sym;
16593
16594 sym = fragp->fr_symbol;
16595 sym_frag = symbol_get_frag (sym);
16596 know (S_GET_SEGMENT (sym) != absolute_section
16597 || sym_frag == &zero_address_frag);
16598 addr = S_GET_VALUE (sym) + fragp->fr_offset;
16599
16600 /* If frag has yet to be reached on this pass, assume it will
16601 move by STRETCH just as we did. If this is not so, it will
16602 be because some frag between grows, and that will force
16603 another pass. */
16604
16605 if (stretch != 0
16606 && sym_frag->relax_marker != fragp->relax_marker)
16607 addr += stretch;
16608
16609 return addr;
16610 }
16611
16612 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
16613 load. */
16614 static int
16615 relax_adr (fragS *fragp, asection *sec, long stretch)
16616 {
16617 addressT addr;
16618 offsetT val;
16619
16620 /* Assume worst case for symbols not known to be in the same section. */
16621 if (!S_IS_DEFINED(fragp->fr_symbol)
16622 || sec != S_GET_SEGMENT (fragp->fr_symbol))
16623 return 4;
16624
16625 val = relaxed_symbol_addr(fragp, stretch);
16626 addr = fragp->fr_address + fragp->fr_fix;
16627 addr = (addr + 4) & ~3;
16628 /* Force misaligned targets to 32-bit variant. */
16629 if (val & 3)
16630 return 4;
16631 val -= addr;
16632 if (val < 0 || val > 1020)
16633 return 4;
16634 return 2;
16635 }
16636
16637 /* Return the size of a relaxable add/sub immediate instruction. */
16638 static int
16639 relax_addsub (fragS *fragp, asection *sec)
16640 {
16641 char *buf;
16642 int op;
16643
16644 buf = fragp->fr_literal + fragp->fr_fix;
16645 op = bfd_get_16(sec->owner, buf);
16646 if ((op & 0xf) == ((op >> 4) & 0xf))
16647 return relax_immediate (fragp, 8, 0);
16648 else
16649 return relax_immediate (fragp, 3, 0);
16650 }
16651
16652
16653 /* Return the size of a relaxable branch instruction. BITS is the
16654 size of the offset field in the narrow instruction. */
16655
16656 static int
16657 relax_branch (fragS *fragp, asection *sec, int bits, long stretch)
16658 {
16659 addressT addr;
16660 offsetT val;
16661 offsetT limit;
16662
16663 /* Assume worst case for symbols not known to be in the same section. */
16664 if (!S_IS_DEFINED(fragp->fr_symbol)
16665 || sec != S_GET_SEGMENT (fragp->fr_symbol))
16666 return 4;
16667
16668 val = relaxed_symbol_addr(fragp, stretch);
16669 addr = fragp->fr_address + fragp->fr_fix + 4;
16670 val -= addr;
16671
16672 /* Offset is a signed value *2 */
16673 limit = 1 << bits;
16674 if (val >= limit || val < -limit)
16675 return 4;
16676 return 2;
16677 }
16678
16679
16680 /* Relax a machine dependent frag. This returns the amount by which
16681 the current size of the frag should change. */
16682
16683 int
16684 arm_relax_frag (asection *sec, fragS *fragp, long stretch)
16685 {
16686 int oldsize;
16687 int newsize;
16688
16689 oldsize = fragp->fr_var;
16690 switch (fragp->fr_subtype)
16691 {
16692 case T_MNEM_ldr_pc2:
16693 newsize = relax_adr(fragp, sec, stretch);
16694 break;
16695 case T_MNEM_ldr_pc:
16696 case T_MNEM_ldr_sp:
16697 case T_MNEM_str_sp:
16698 newsize = relax_immediate(fragp, 8, 2);
16699 break;
16700 case T_MNEM_ldr:
16701 case T_MNEM_str:
16702 newsize = relax_immediate(fragp, 5, 2);
16703 break;
16704 case T_MNEM_ldrh:
16705 case T_MNEM_strh:
16706 newsize = relax_immediate(fragp, 5, 1);
16707 break;
16708 case T_MNEM_ldrb:
16709 case T_MNEM_strb:
16710 newsize = relax_immediate(fragp, 5, 0);
16711 break;
16712 case T_MNEM_adr:
16713 newsize = relax_adr(fragp, sec, stretch);
16714 break;
16715 case T_MNEM_mov:
16716 case T_MNEM_movs:
16717 case T_MNEM_cmp:
16718 case T_MNEM_cmn:
16719 newsize = relax_immediate(fragp, 8, 0);
16720 break;
16721 case T_MNEM_b:
16722 newsize = relax_branch(fragp, sec, 11, stretch);
16723 break;
16724 case T_MNEM_bcond:
16725 newsize = relax_branch(fragp, sec, 8, stretch);
16726 break;
16727 case T_MNEM_add_sp:
16728 case T_MNEM_add_pc:
16729 newsize = relax_immediate (fragp, 8, 2);
16730 break;
16731 case T_MNEM_inc_sp:
16732 case T_MNEM_dec_sp:
16733 newsize = relax_immediate (fragp, 7, 2);
16734 break;
16735 case T_MNEM_addi:
16736 case T_MNEM_addis:
16737 case T_MNEM_subi:
16738 case T_MNEM_subis:
16739 newsize = relax_addsub (fragp, sec);
16740 break;
16741 default:
16742 abort();
16743 }
16744
16745 fragp->fr_var = newsize;
16746 /* Freeze wide instructions that are at or before the same location as
16747 in the previous pass. This avoids infinite loops.
16748 Don't freeze them unconditionally because targets may be artificialy
16749 misaligned by the expansion of preceeding frags. */
16750 if (stretch <= 0 && newsize > 2)
16751 {
16752 md_convert_frag (sec->owner, sec, fragp);
16753 frag_wane(fragp);
16754 }
16755
16756 return newsize - oldsize;
16757 }
16758
16759 /* Round up a section size to the appropriate boundary. */
16760
16761 valueT
16762 md_section_align (segT segment ATTRIBUTE_UNUSED,
16763 valueT size)
16764 {
16765 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
16766 if (OUTPUT_FLAVOR == bfd_target_aout_flavour)
16767 {
16768 /* For a.out, force the section size to be aligned. If we don't do
16769 this, BFD will align it for us, but it will not write out the
16770 final bytes of the section. This may be a bug in BFD, but it is
16771 easier to fix it here since that is how the other a.out targets
16772 work. */
16773 int align;
16774
16775 align = bfd_get_section_alignment (stdoutput, segment);
16776 size = ((size + (1 << align) - 1) & ((valueT) -1 << align));
16777 }
16778 #endif
16779
16780 return size;
16781 }
16782
16783 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
16784 of an rs_align_code fragment. */
16785
16786 void
16787 arm_handle_align (fragS * fragP)
16788 {
16789 static char const arm_noop[4] = { 0x00, 0x00, 0xa0, 0xe1 };
16790 static char const thumb_noop[2] = { 0xc0, 0x46 };
16791 static char const arm_bigend_noop[4] = { 0xe1, 0xa0, 0x00, 0x00 };
16792 static char const thumb_bigend_noop[2] = { 0x46, 0xc0 };
16793
16794 int bytes, fix, noop_size;
16795 char * p;
16796 const char * noop;
16797
16798 if (fragP->fr_type != rs_align_code)
16799 return;
16800
16801 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
16802 p = fragP->fr_literal + fragP->fr_fix;
16803 fix = 0;
16804
16805 if (bytes > MAX_MEM_FOR_RS_ALIGN_CODE)
16806 bytes &= MAX_MEM_FOR_RS_ALIGN_CODE;
16807
16808 if (fragP->tc_frag_data)
16809 {
16810 if (target_big_endian)
16811 noop = thumb_bigend_noop;
16812 else
16813 noop = thumb_noop;
16814 noop_size = sizeof (thumb_noop);
16815 }
16816 else
16817 {
16818 if (target_big_endian)
16819 noop = arm_bigend_noop;
16820 else
16821 noop = arm_noop;
16822 noop_size = sizeof (arm_noop);
16823 }
16824
16825 if (bytes & (noop_size - 1))
16826 {
16827 fix = bytes & (noop_size - 1);
16828 memset (p, 0, fix);
16829 p += fix;
16830 bytes -= fix;
16831 }
16832
16833 while (bytes >= noop_size)
16834 {
16835 memcpy (p, noop, noop_size);
16836 p += noop_size;
16837 bytes -= noop_size;
16838 fix += noop_size;
16839 }
16840
16841 fragP->fr_fix += fix;
16842 fragP->fr_var = noop_size;
16843 }
16844
16845 /* Called from md_do_align. Used to create an alignment
16846 frag in a code section. */
16847
16848 void
16849 arm_frag_align_code (int n, int max)
16850 {
16851 char * p;
16852
16853 /* We assume that there will never be a requirement
16854 to support alignments greater than 32 bytes. */
16855 if (max > MAX_MEM_FOR_RS_ALIGN_CODE)
16856 as_fatal (_("alignments greater than 32 bytes not supported in .text sections."));
16857
16858 p = frag_var (rs_align_code,
16859 MAX_MEM_FOR_RS_ALIGN_CODE,
16860 1,
16861 (relax_substateT) max,
16862 (symbolS *) NULL,
16863 (offsetT) n,
16864 (char *) NULL);
16865 *p = 0;
16866 }
16867
16868 /* Perform target specific initialisation of a frag. */
16869
16870 void
16871 arm_init_frag (fragS * fragP)
16872 {
16873 /* Record whether this frag is in an ARM or a THUMB area. */
16874 fragP->tc_frag_data = thumb_mode;
16875 }
16876
16877 #ifdef OBJ_ELF
16878 /* When we change sections we need to issue a new mapping symbol. */
16879
16880 void
16881 arm_elf_change_section (void)
16882 {
16883 flagword flags;
16884 segment_info_type *seginfo;
16885
16886 /* Link an unlinked unwind index table section to the .text section. */
16887 if (elf_section_type (now_seg) == SHT_ARM_EXIDX
16888 && elf_linked_to_section (now_seg) == NULL)
16889 elf_linked_to_section (now_seg) = text_section;
16890
16891 if (!SEG_NORMAL (now_seg))
16892 return;
16893
16894 flags = bfd_get_section_flags (stdoutput, now_seg);
16895
16896 /* We can ignore sections that only contain debug info. */
16897 if ((flags & SEC_ALLOC) == 0)
16898 return;
16899
16900 seginfo = seg_info (now_seg);
16901 mapstate = seginfo->tc_segment_info_data.mapstate;
16902 marked_pr_dependency = seginfo->tc_segment_info_data.marked_pr_dependency;
16903 }
16904
16905 int
16906 arm_elf_section_type (const char * str, size_t len)
16907 {
16908 if (len == 5 && strncmp (str, "exidx", 5) == 0)
16909 return SHT_ARM_EXIDX;
16910
16911 return -1;
16912 }
16913 \f
16914 /* Code to deal with unwinding tables. */
16915
16916 static void add_unwind_adjustsp (offsetT);
16917
16918 /* Cenerate and deferred unwind frame offset. */
16919
16920 static void
16921 flush_pending_unwind (void)
16922 {
16923 offsetT offset;
16924
16925 offset = unwind.pending_offset;
16926 unwind.pending_offset = 0;
16927 if (offset != 0)
16928 add_unwind_adjustsp (offset);
16929 }
16930
16931 /* Add an opcode to this list for this function. Two-byte opcodes should
16932 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse
16933 order. */
16934
16935 static void
16936 add_unwind_opcode (valueT op, int length)
16937 {
16938 /* Add any deferred stack adjustment. */
16939 if (unwind.pending_offset)
16940 flush_pending_unwind ();
16941
16942 unwind.sp_restored = 0;
16943
16944 if (unwind.opcode_count + length > unwind.opcode_alloc)
16945 {
16946 unwind.opcode_alloc += ARM_OPCODE_CHUNK_SIZE;
16947 if (unwind.opcodes)
16948 unwind.opcodes = xrealloc (unwind.opcodes,
16949 unwind.opcode_alloc);
16950 else
16951 unwind.opcodes = xmalloc (unwind.opcode_alloc);
16952 }
16953 while (length > 0)
16954 {
16955 length--;
16956 unwind.opcodes[unwind.opcode_count] = op & 0xff;
16957 op >>= 8;
16958 unwind.opcode_count++;
16959 }
16960 }
16961
16962 /* Add unwind opcodes to adjust the stack pointer. */
16963
16964 static void
16965 add_unwind_adjustsp (offsetT offset)
16966 {
16967 valueT op;
16968
16969 if (offset > 0x200)
16970 {
16971 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */
16972 char bytes[5];
16973 int n;
16974 valueT o;
16975
16976 /* Long form: 0xb2, uleb128. */
16977 /* This might not fit in a word so add the individual bytes,
16978 remembering the list is built in reverse order. */
16979 o = (valueT) ((offset - 0x204) >> 2);
16980 if (o == 0)
16981 add_unwind_opcode (0, 1);
16982
16983 /* Calculate the uleb128 encoding of the offset. */
16984 n = 0;
16985 while (o)
16986 {
16987 bytes[n] = o & 0x7f;
16988 o >>= 7;
16989 if (o)
16990 bytes[n] |= 0x80;
16991 n++;
16992 }
16993 /* Add the insn. */
16994 for (; n; n--)
16995 add_unwind_opcode (bytes[n - 1], 1);
16996 add_unwind_opcode (0xb2, 1);
16997 }
16998 else if (offset > 0x100)
16999 {
17000 /* Two short opcodes. */
17001 add_unwind_opcode (0x3f, 1);
17002 op = (offset - 0x104) >> 2;
17003 add_unwind_opcode (op, 1);
17004 }
17005 else if (offset > 0)
17006 {
17007 /* Short opcode. */
17008 op = (offset - 4) >> 2;
17009 add_unwind_opcode (op, 1);
17010 }
17011 else if (offset < 0)
17012 {
17013 offset = -offset;
17014 while (offset > 0x100)
17015 {
17016 add_unwind_opcode (0x7f, 1);
17017 offset -= 0x100;
17018 }
17019 op = ((offset - 4) >> 2) | 0x40;
17020 add_unwind_opcode (op, 1);
17021 }
17022 }
17023
17024 /* Finish the list of unwind opcodes for this function. */
17025 static void
17026 finish_unwind_opcodes (void)
17027 {
17028 valueT op;
17029
17030 if (unwind.fp_used)
17031 {
17032 /* Adjust sp as necessary. */
17033 unwind.pending_offset += unwind.fp_offset - unwind.frame_size;
17034 flush_pending_unwind ();
17035
17036 /* After restoring sp from the frame pointer. */
17037 op = 0x90 | unwind.fp_reg;
17038 add_unwind_opcode (op, 1);
17039 }
17040 else
17041 flush_pending_unwind ();
17042 }
17043
17044
17045 /* Start an exception table entry. If idx is nonzero this is an index table
17046 entry. */
17047
17048 static void
17049 start_unwind_section (const segT text_seg, int idx)
17050 {
17051 const char * text_name;
17052 const char * prefix;
17053 const char * prefix_once;
17054 const char * group_name;
17055 size_t prefix_len;
17056 size_t text_len;
17057 char * sec_name;
17058 size_t sec_name_len;
17059 int type;
17060 int flags;
17061 int linkonce;
17062
17063 if (idx)
17064 {
17065 prefix = ELF_STRING_ARM_unwind;
17066 prefix_once = ELF_STRING_ARM_unwind_once;
17067 type = SHT_ARM_EXIDX;
17068 }
17069 else
17070 {
17071 prefix = ELF_STRING_ARM_unwind_info;
17072 prefix_once = ELF_STRING_ARM_unwind_info_once;
17073 type = SHT_PROGBITS;
17074 }
17075
17076 text_name = segment_name (text_seg);
17077 if (streq (text_name, ".text"))
17078 text_name = "";
17079
17080 if (strncmp (text_name, ".gnu.linkonce.t.",
17081 strlen (".gnu.linkonce.t.")) == 0)
17082 {
17083 prefix = prefix_once;
17084 text_name += strlen (".gnu.linkonce.t.");
17085 }
17086
17087 prefix_len = strlen (prefix);
17088 text_len = strlen (text_name);
17089 sec_name_len = prefix_len + text_len;
17090 sec_name = xmalloc (sec_name_len + 1);
17091 memcpy (sec_name, prefix, prefix_len);
17092 memcpy (sec_name + prefix_len, text_name, text_len);
17093 sec_name[prefix_len + text_len] = '\0';
17094
17095 flags = SHF_ALLOC;
17096 linkonce = 0;
17097 group_name = 0;
17098
17099 /* Handle COMDAT group. */
17100 if (prefix != prefix_once && (text_seg->flags & SEC_LINK_ONCE) != 0)
17101 {
17102 group_name = elf_group_name (text_seg);
17103 if (group_name == NULL)
17104 {
17105 as_bad ("Group section `%s' has no group signature",
17106 segment_name (text_seg));
17107 ignore_rest_of_line ();
17108 return;
17109 }
17110 flags |= SHF_GROUP;
17111 linkonce = 1;
17112 }
17113
17114 obj_elf_change_section (sec_name, type, flags, 0, group_name, linkonce, 0);
17115
17116 /* Set the setion link for index tables. */
17117 if (idx)
17118 elf_linked_to_section (now_seg) = text_seg;
17119 }
17120
17121
17122 /* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
17123 personality routine data. Returns zero, or the index table value for
17124 and inline entry. */
17125
17126 static valueT
17127 create_unwind_entry (int have_data)
17128 {
17129 int size;
17130 addressT where;
17131 char *ptr;
17132 /* The current word of data. */
17133 valueT data;
17134 /* The number of bytes left in this word. */
17135 int n;
17136
17137 finish_unwind_opcodes ();
17138
17139 /* Remember the current text section. */
17140 unwind.saved_seg = now_seg;
17141 unwind.saved_subseg = now_subseg;
17142
17143 start_unwind_section (now_seg, 0);
17144
17145 if (unwind.personality_routine == NULL)
17146 {
17147 if (unwind.personality_index == -2)
17148 {
17149 if (have_data)
17150 as_bad (_("handerdata in cantunwind frame"));
17151 return 1; /* EXIDX_CANTUNWIND. */
17152 }
17153
17154 /* Use a default personality routine if none is specified. */
17155 if (unwind.personality_index == -1)
17156 {
17157 if (unwind.opcode_count > 3)
17158 unwind.personality_index = 1;
17159 else
17160 unwind.personality_index = 0;
17161 }
17162
17163 /* Space for the personality routine entry. */
17164 if (unwind.personality_index == 0)
17165 {
17166 if (unwind.opcode_count > 3)
17167 as_bad (_("too many unwind opcodes for personality routine 0"));
17168
17169 if (!have_data)
17170 {
17171 /* All the data is inline in the index table. */
17172 data = 0x80;
17173 n = 3;
17174 while (unwind.opcode_count > 0)
17175 {
17176 unwind.opcode_count--;
17177 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
17178 n--;
17179 }
17180
17181 /* Pad with "finish" opcodes. */
17182 while (n--)
17183 data = (data << 8) | 0xb0;
17184
17185 return data;
17186 }
17187 size = 0;
17188 }
17189 else
17190 /* We get two opcodes "free" in the first word. */
17191 size = unwind.opcode_count - 2;
17192 }
17193 else
17194 /* An extra byte is required for the opcode count. */
17195 size = unwind.opcode_count + 1;
17196
17197 size = (size + 3) >> 2;
17198 if (size > 0xff)
17199 as_bad (_("too many unwind opcodes"));
17200
17201 frag_align (2, 0, 0);
17202 record_alignment (now_seg, 2);
17203 unwind.table_entry = expr_build_dot ();
17204
17205 /* Allocate the table entry. */
17206 ptr = frag_more ((size << 2) + 4);
17207 where = frag_now_fix () - ((size << 2) + 4);
17208
17209 switch (unwind.personality_index)
17210 {
17211 case -1:
17212 /* ??? Should this be a PLT generating relocation? */
17213 /* Custom personality routine. */
17214 fix_new (frag_now, where, 4, unwind.personality_routine, 0, 1,
17215 BFD_RELOC_ARM_PREL31);
17216
17217 where += 4;
17218 ptr += 4;
17219
17220 /* Set the first byte to the number of additional words. */
17221 data = size - 1;
17222 n = 3;
17223 break;
17224
17225 /* ABI defined personality routines. */
17226 case 0:
17227 /* Three opcodes bytes are packed into the first word. */
17228 data = 0x80;
17229 n = 3;
17230 break;
17231
17232 case 1:
17233 case 2:
17234 /* The size and first two opcode bytes go in the first word. */
17235 data = ((0x80 + unwind.personality_index) << 8) | size;
17236 n = 2;
17237 break;
17238
17239 default:
17240 /* Should never happen. */
17241 abort ();
17242 }
17243
17244 /* Pack the opcodes into words (MSB first), reversing the list at the same
17245 time. */
17246 while (unwind.opcode_count > 0)
17247 {
17248 if (n == 0)
17249 {
17250 md_number_to_chars (ptr, data, 4);
17251 ptr += 4;
17252 n = 4;
17253 data = 0;
17254 }
17255 unwind.opcode_count--;
17256 n--;
17257 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
17258 }
17259
17260 /* Finish off the last word. */
17261 if (n < 4)
17262 {
17263 /* Pad with "finish" opcodes. */
17264 while (n--)
17265 data = (data << 8) | 0xb0;
17266
17267 md_number_to_chars (ptr, data, 4);
17268 }
17269
17270 if (!have_data)
17271 {
17272 /* Add an empty descriptor if there is no user-specified data. */
17273 ptr = frag_more (4);
17274 md_number_to_chars (ptr, 0, 4);
17275 }
17276
17277 return 0;
17278 }
17279
17280
17281 /* Initialize the DWARF-2 unwind information for this procedure. */
17282
17283 void
17284 tc_arm_frame_initial_instructions (void)
17285 {
17286 cfi_add_CFA_def_cfa (REG_SP, 0);
17287 }
17288 #endif /* OBJ_ELF */
17289
17290 /* Convert REGNAME to a DWARF-2 register number. */
17291
17292 int
17293 tc_arm_regname_to_dw2regnum (char *regname)
17294 {
17295 int reg = arm_reg_parse (&regname, REG_TYPE_RN);
17296
17297 if (reg == FAIL)
17298 return -1;
17299
17300 return reg;
17301 }
17302
17303 #ifdef TE_PE
17304 void
17305 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
17306 {
17307 expressionS expr;
17308
17309 expr.X_op = O_secrel;
17310 expr.X_add_symbol = symbol;
17311 expr.X_add_number = 0;
17312 emit_expr (&expr, size);
17313 }
17314 #endif
17315
17316 /* MD interface: Symbol and relocation handling. */
17317
17318 /* Return the address within the segment that a PC-relative fixup is
17319 relative to. For ARM, PC-relative fixups applied to instructions
17320 are generally relative to the location of the fixup plus 8 bytes.
17321 Thumb branches are offset by 4, and Thumb loads relative to PC
17322 require special handling. */
17323
17324 long
17325 md_pcrel_from_section (fixS * fixP, segT seg)
17326 {
17327 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
17328
17329 /* If this is pc-relative and we are going to emit a relocation
17330 then we just want to put out any pipeline compensation that the linker
17331 will need. Otherwise we want to use the calculated base.
17332 For WinCE we skip the bias for externals as well, since this
17333 is how the MS ARM-CE assembler behaves and we want to be compatible. */
17334 if (fixP->fx_pcrel
17335 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
17336 || (arm_force_relocation (fixP)
17337 #ifdef TE_WINCE
17338 && !S_IS_EXTERNAL (fixP->fx_addsy)
17339 #endif
17340 )))
17341 base = 0;
17342
17343 switch (fixP->fx_r_type)
17344 {
17345 /* PC relative addressing on the Thumb is slightly odd as the
17346 bottom two bits of the PC are forced to zero for the
17347 calculation. This happens *after* application of the
17348 pipeline offset. However, Thumb adrl already adjusts for
17349 this, so we need not do it again. */
17350 case BFD_RELOC_ARM_THUMB_ADD:
17351 return base & ~3;
17352
17353 case BFD_RELOC_ARM_THUMB_OFFSET:
17354 case BFD_RELOC_ARM_T32_OFFSET_IMM:
17355 case BFD_RELOC_ARM_T32_ADD_PC12:
17356 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
17357 return (base + 4) & ~3;
17358
17359 /* Thumb branches are simply offset by +4. */
17360 case BFD_RELOC_THUMB_PCREL_BRANCH7:
17361 case BFD_RELOC_THUMB_PCREL_BRANCH9:
17362 case BFD_RELOC_THUMB_PCREL_BRANCH12:
17363 case BFD_RELOC_THUMB_PCREL_BRANCH20:
17364 case BFD_RELOC_THUMB_PCREL_BRANCH23:
17365 case BFD_RELOC_THUMB_PCREL_BRANCH25:
17366 case BFD_RELOC_THUMB_PCREL_BLX:
17367 return base + 4;
17368
17369 /* ARM mode branches are offset by +8. However, the Windows CE
17370 loader expects the relocation not to take this into account. */
17371 case BFD_RELOC_ARM_PCREL_BRANCH:
17372 case BFD_RELOC_ARM_PCREL_CALL:
17373 case BFD_RELOC_ARM_PCREL_JUMP:
17374 case BFD_RELOC_ARM_PCREL_BLX:
17375 case BFD_RELOC_ARM_PLT32:
17376 #ifdef TE_WINCE
17377 /* When handling fixups immediately, because we have already
17378 discovered the value of a symbol, or the address of the frag involved
17379 we must account for the offset by +8, as the OS loader will never see the reloc.
17380 see fixup_segment() in write.c
17381 The S_IS_EXTERNAL test handles the case of global symbols.
17382 Those need the calculated base, not just the pipe compensation the linker will need. */
17383 if (fixP->fx_pcrel
17384 && fixP->fx_addsy != NULL
17385 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
17386 && (S_IS_EXTERNAL (fixP->fx_addsy) || !arm_force_relocation (fixP)))
17387 return base + 8;
17388 return base;
17389 #else
17390 return base + 8;
17391 #endif
17392
17393 /* ARM mode loads relative to PC are also offset by +8. Unlike
17394 branches, the Windows CE loader *does* expect the relocation
17395 to take this into account. */
17396 case BFD_RELOC_ARM_OFFSET_IMM:
17397 case BFD_RELOC_ARM_OFFSET_IMM8:
17398 case BFD_RELOC_ARM_HWLITERAL:
17399 case BFD_RELOC_ARM_LITERAL:
17400 case BFD_RELOC_ARM_CP_OFF_IMM:
17401 return base + 8;
17402
17403
17404 /* Other PC-relative relocations are un-offset. */
17405 default:
17406 return base;
17407 }
17408 }
17409
17410 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
17411 Otherwise we have no need to default values of symbols. */
17412
17413 symbolS *
17414 md_undefined_symbol (char * name ATTRIBUTE_UNUSED)
17415 {
17416 #ifdef OBJ_ELF
17417 if (name[0] == '_' && name[1] == 'G'
17418 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
17419 {
17420 if (!GOT_symbol)
17421 {
17422 if (symbol_find (name))
17423 as_bad ("GOT already in the symbol table");
17424
17425 GOT_symbol = symbol_new (name, undefined_section,
17426 (valueT) 0, & zero_address_frag);
17427 }
17428
17429 return GOT_symbol;
17430 }
17431 #endif
17432
17433 return 0;
17434 }
17435
17436 /* Subroutine of md_apply_fix. Check to see if an immediate can be
17437 computed as two separate immediate values, added together. We
17438 already know that this value cannot be computed by just one ARM
17439 instruction. */
17440
17441 static unsigned int
17442 validate_immediate_twopart (unsigned int val,
17443 unsigned int * highpart)
17444 {
17445 unsigned int a;
17446 unsigned int i;
17447
17448 for (i = 0; i < 32; i += 2)
17449 if (((a = rotate_left (val, i)) & 0xff) != 0)
17450 {
17451 if (a & 0xff00)
17452 {
17453 if (a & ~ 0xffff)
17454 continue;
17455 * highpart = (a >> 8) | ((i + 24) << 7);
17456 }
17457 else if (a & 0xff0000)
17458 {
17459 if (a & 0xff000000)
17460 continue;
17461 * highpart = (a >> 16) | ((i + 16) << 7);
17462 }
17463 else
17464 {
17465 assert (a & 0xff000000);
17466 * highpart = (a >> 24) | ((i + 8) << 7);
17467 }
17468
17469 return (a & 0xff) | (i << 7);
17470 }
17471
17472 return FAIL;
17473 }
17474
17475 static int
17476 validate_offset_imm (unsigned int val, int hwse)
17477 {
17478 if ((hwse && val > 255) || val > 4095)
17479 return FAIL;
17480 return val;
17481 }
17482
17483 /* Subroutine of md_apply_fix. Do those data_ops which can take a
17484 negative immediate constant by altering the instruction. A bit of
17485 a hack really.
17486 MOV <-> MVN
17487 AND <-> BIC
17488 ADC <-> SBC
17489 by inverting the second operand, and
17490 ADD <-> SUB
17491 CMP <-> CMN
17492 by negating the second operand. */
17493
17494 static int
17495 negate_data_op (unsigned long * instruction,
17496 unsigned long value)
17497 {
17498 int op, new_inst;
17499 unsigned long negated, inverted;
17500
17501 negated = encode_arm_immediate (-value);
17502 inverted = encode_arm_immediate (~value);
17503
17504 op = (*instruction >> DATA_OP_SHIFT) & 0xf;
17505 switch (op)
17506 {
17507 /* First negates. */
17508 case OPCODE_SUB: /* ADD <-> SUB */
17509 new_inst = OPCODE_ADD;
17510 value = negated;
17511 break;
17512
17513 case OPCODE_ADD:
17514 new_inst = OPCODE_SUB;
17515 value = negated;
17516 break;
17517
17518 case OPCODE_CMP: /* CMP <-> CMN */
17519 new_inst = OPCODE_CMN;
17520 value = negated;
17521 break;
17522
17523 case OPCODE_CMN:
17524 new_inst = OPCODE_CMP;
17525 value = negated;
17526 break;
17527
17528 /* Now Inverted ops. */
17529 case OPCODE_MOV: /* MOV <-> MVN */
17530 new_inst = OPCODE_MVN;
17531 value = inverted;
17532 break;
17533
17534 case OPCODE_MVN:
17535 new_inst = OPCODE_MOV;
17536 value = inverted;
17537 break;
17538
17539 case OPCODE_AND: /* AND <-> BIC */
17540 new_inst = OPCODE_BIC;
17541 value = inverted;
17542 break;
17543
17544 case OPCODE_BIC:
17545 new_inst = OPCODE_AND;
17546 value = inverted;
17547 break;
17548
17549 case OPCODE_ADC: /* ADC <-> SBC */
17550 new_inst = OPCODE_SBC;
17551 value = inverted;
17552 break;
17553
17554 case OPCODE_SBC:
17555 new_inst = OPCODE_ADC;
17556 value = inverted;
17557 break;
17558
17559 /* We cannot do anything. */
17560 default:
17561 return FAIL;
17562 }
17563
17564 if (value == (unsigned) FAIL)
17565 return FAIL;
17566
17567 *instruction &= OPCODE_MASK;
17568 *instruction |= new_inst << DATA_OP_SHIFT;
17569 return value;
17570 }
17571
17572 /* Like negate_data_op, but for Thumb-2. */
17573
17574 static unsigned int
17575 thumb32_negate_data_op (offsetT *instruction, unsigned int value)
17576 {
17577 int op, new_inst;
17578 int rd;
17579 unsigned int negated, inverted;
17580
17581 negated = encode_thumb32_immediate (-value);
17582 inverted = encode_thumb32_immediate (~value);
17583
17584 rd = (*instruction >> 8) & 0xf;
17585 op = (*instruction >> T2_DATA_OP_SHIFT) & 0xf;
17586 switch (op)
17587 {
17588 /* ADD <-> SUB. Includes CMP <-> CMN. */
17589 case T2_OPCODE_SUB:
17590 new_inst = T2_OPCODE_ADD;
17591 value = negated;
17592 break;
17593
17594 case T2_OPCODE_ADD:
17595 new_inst = T2_OPCODE_SUB;
17596 value = negated;
17597 break;
17598
17599 /* ORR <-> ORN. Includes MOV <-> MVN. */
17600 case T2_OPCODE_ORR:
17601 new_inst = T2_OPCODE_ORN;
17602 value = inverted;
17603 break;
17604
17605 case T2_OPCODE_ORN:
17606 new_inst = T2_OPCODE_ORR;
17607 value = inverted;
17608 break;
17609
17610 /* AND <-> BIC. TST has no inverted equivalent. */
17611 case T2_OPCODE_AND:
17612 new_inst = T2_OPCODE_BIC;
17613 if (rd == 15)
17614 value = FAIL;
17615 else
17616 value = inverted;
17617 break;
17618
17619 case T2_OPCODE_BIC:
17620 new_inst = T2_OPCODE_AND;
17621 value = inverted;
17622 break;
17623
17624 /* ADC <-> SBC */
17625 case T2_OPCODE_ADC:
17626 new_inst = T2_OPCODE_SBC;
17627 value = inverted;
17628 break;
17629
17630 case T2_OPCODE_SBC:
17631 new_inst = T2_OPCODE_ADC;
17632 value = inverted;
17633 break;
17634
17635 /* We cannot do anything. */
17636 default:
17637 return FAIL;
17638 }
17639
17640 if (value == (unsigned int)FAIL)
17641 return FAIL;
17642
17643 *instruction &= T2_OPCODE_MASK;
17644 *instruction |= new_inst << T2_DATA_OP_SHIFT;
17645 return value;
17646 }
17647
17648 /* Read a 32-bit thumb instruction from buf. */
17649 static unsigned long
17650 get_thumb32_insn (char * buf)
17651 {
17652 unsigned long insn;
17653 insn = md_chars_to_number (buf, THUMB_SIZE) << 16;
17654 insn |= md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
17655
17656 return insn;
17657 }
17658
17659
17660 /* We usually want to set the low bit on the address of thumb function
17661 symbols. In particular .word foo - . should have the low bit set.
17662 Generic code tries to fold the difference of two symbols to
17663 a constant. Prevent this and force a relocation when the first symbols
17664 is a thumb function. */
17665 int
17666 arm_optimize_expr (expressionS *l, operatorT op, expressionS *r)
17667 {
17668 if (op == O_subtract
17669 && l->X_op == O_symbol
17670 && r->X_op == O_symbol
17671 && THUMB_IS_FUNC (l->X_add_symbol))
17672 {
17673 l->X_op = O_subtract;
17674 l->X_op_symbol = r->X_add_symbol;
17675 l->X_add_number -= r->X_add_number;
17676 return 1;
17677 }
17678 /* Process as normal. */
17679 return 0;
17680 }
17681
17682 void
17683 md_apply_fix (fixS * fixP,
17684 valueT * valP,
17685 segT seg)
17686 {
17687 offsetT value = * valP;
17688 offsetT newval;
17689 unsigned int newimm;
17690 unsigned long temp;
17691 int sign;
17692 char * buf = fixP->fx_where + fixP->fx_frag->fr_literal;
17693
17694 assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
17695
17696 /* Note whether this will delete the relocation. */
17697
17698 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
17699 fixP->fx_done = 1;
17700
17701 /* On a 64-bit host, silently truncate 'value' to 32 bits for
17702 consistency with the behavior on 32-bit hosts. Remember value
17703 for emit_reloc. */
17704 value &= 0xffffffff;
17705 value ^= 0x80000000;
17706 value -= 0x80000000;
17707
17708 *valP = value;
17709 fixP->fx_addnumber = value;
17710
17711 /* Same treatment for fixP->fx_offset. */
17712 fixP->fx_offset &= 0xffffffff;
17713 fixP->fx_offset ^= 0x80000000;
17714 fixP->fx_offset -= 0x80000000;
17715
17716 switch (fixP->fx_r_type)
17717 {
17718 case BFD_RELOC_NONE:
17719 /* This will need to go in the object file. */
17720 fixP->fx_done = 0;
17721 break;
17722
17723 case BFD_RELOC_ARM_IMMEDIATE:
17724 /* We claim that this fixup has been processed here,
17725 even if in fact we generate an error because we do
17726 not have a reloc for it, so tc_gen_reloc will reject it. */
17727 fixP->fx_done = 1;
17728
17729 if (fixP->fx_addsy
17730 && ! S_IS_DEFINED (fixP->fx_addsy))
17731 {
17732 as_bad_where (fixP->fx_file, fixP->fx_line,
17733 _("undefined symbol %s used as an immediate value"),
17734 S_GET_NAME (fixP->fx_addsy));
17735 break;
17736 }
17737
17738 newimm = encode_arm_immediate (value);
17739 temp = md_chars_to_number (buf, INSN_SIZE);
17740
17741 /* If the instruction will fail, see if we can fix things up by
17742 changing the opcode. */
17743 if (newimm == (unsigned int) FAIL
17744 && (newimm = negate_data_op (&temp, value)) == (unsigned int) FAIL)
17745 {
17746 as_bad_where (fixP->fx_file, fixP->fx_line,
17747 _("invalid constant (%lx) after fixup"),
17748 (unsigned long) value);
17749 break;
17750 }
17751
17752 newimm |= (temp & 0xfffff000);
17753 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
17754 break;
17755
17756 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
17757 {
17758 unsigned int highpart = 0;
17759 unsigned int newinsn = 0xe1a00000; /* nop. */
17760
17761 newimm = encode_arm_immediate (value);
17762 temp = md_chars_to_number (buf, INSN_SIZE);
17763
17764 /* If the instruction will fail, see if we can fix things up by
17765 changing the opcode. */
17766 if (newimm == (unsigned int) FAIL
17767 && (newimm = negate_data_op (& temp, value)) == (unsigned int) FAIL)
17768 {
17769 /* No ? OK - try using two ADD instructions to generate
17770 the value. */
17771 newimm = validate_immediate_twopart (value, & highpart);
17772
17773 /* Yes - then make sure that the second instruction is
17774 also an add. */
17775 if (newimm != (unsigned int) FAIL)
17776 newinsn = temp;
17777 /* Still No ? Try using a negated value. */
17778 else if ((newimm = validate_immediate_twopart (- value, & highpart)) != (unsigned int) FAIL)
17779 temp = newinsn = (temp & OPCODE_MASK) | OPCODE_SUB << DATA_OP_SHIFT;
17780 /* Otherwise - give up. */
17781 else
17782 {
17783 as_bad_where (fixP->fx_file, fixP->fx_line,
17784 _("unable to compute ADRL instructions for PC offset of 0x%lx"),
17785 (long) value);
17786 break;
17787 }
17788
17789 /* Replace the first operand in the 2nd instruction (which
17790 is the PC) with the destination register. We have
17791 already added in the PC in the first instruction and we
17792 do not want to do it again. */
17793 newinsn &= ~ 0xf0000;
17794 newinsn |= ((newinsn & 0x0f000) << 4);
17795 }
17796
17797 newimm |= (temp & 0xfffff000);
17798 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
17799
17800 highpart |= (newinsn & 0xfffff000);
17801 md_number_to_chars (buf + INSN_SIZE, (valueT) highpart, INSN_SIZE);
17802 }
17803 break;
17804
17805 case BFD_RELOC_ARM_OFFSET_IMM:
17806 if (!fixP->fx_done && seg->use_rela_p)
17807 value = 0;
17808
17809 case BFD_RELOC_ARM_LITERAL:
17810 sign = value >= 0;
17811
17812 if (value < 0)
17813 value = - value;
17814
17815 if (validate_offset_imm (value, 0) == FAIL)
17816 {
17817 if (fixP->fx_r_type == BFD_RELOC_ARM_LITERAL)
17818 as_bad_where (fixP->fx_file, fixP->fx_line,
17819 _("invalid literal constant: pool needs to be closer"));
17820 else
17821 as_bad_where (fixP->fx_file, fixP->fx_line,
17822 _("bad immediate value for offset (%ld)"),
17823 (long) value);
17824 break;
17825 }
17826
17827 newval = md_chars_to_number (buf, INSN_SIZE);
17828 newval &= 0xff7ff000;
17829 newval |= value | (sign ? INDEX_UP : 0);
17830 md_number_to_chars (buf, newval, INSN_SIZE);
17831 break;
17832
17833 case BFD_RELOC_ARM_OFFSET_IMM8:
17834 case BFD_RELOC_ARM_HWLITERAL:
17835 sign = value >= 0;
17836
17837 if (value < 0)
17838 value = - value;
17839
17840 if (validate_offset_imm (value, 1) == FAIL)
17841 {
17842 if (fixP->fx_r_type == BFD_RELOC_ARM_HWLITERAL)
17843 as_bad_where (fixP->fx_file, fixP->fx_line,
17844 _("invalid literal constant: pool needs to be closer"));
17845 else
17846 as_bad (_("bad immediate value for half-word offset (%ld)"),
17847 (long) value);
17848 break;
17849 }
17850
17851 newval = md_chars_to_number (buf, INSN_SIZE);
17852 newval &= 0xff7ff0f0;
17853 newval |= ((value >> 4) << 8) | (value & 0xf) | (sign ? INDEX_UP : 0);
17854 md_number_to_chars (buf, newval, INSN_SIZE);
17855 break;
17856
17857 case BFD_RELOC_ARM_T32_OFFSET_U8:
17858 if (value < 0 || value > 1020 || value % 4 != 0)
17859 as_bad_where (fixP->fx_file, fixP->fx_line,
17860 _("bad immediate value for offset (%ld)"), (long) value);
17861 value /= 4;
17862
17863 newval = md_chars_to_number (buf+2, THUMB_SIZE);
17864 newval |= value;
17865 md_number_to_chars (buf+2, newval, THUMB_SIZE);
17866 break;
17867
17868 case BFD_RELOC_ARM_T32_OFFSET_IMM:
17869 /* This is a complicated relocation used for all varieties of Thumb32
17870 load/store instruction with immediate offset:
17871
17872 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
17873 *4, optional writeback(W)
17874 (doubleword load/store)
17875
17876 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
17877 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
17878 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
17879 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
17880 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
17881
17882 Uppercase letters indicate bits that are already encoded at
17883 this point. Lowercase letters are our problem. For the
17884 second block of instructions, the secondary opcode nybble
17885 (bits 8..11) is present, and bit 23 is zero, even if this is
17886 a PC-relative operation. */
17887 newval = md_chars_to_number (buf, THUMB_SIZE);
17888 newval <<= 16;
17889 newval |= md_chars_to_number (buf+THUMB_SIZE, THUMB_SIZE);
17890
17891 if ((newval & 0xf0000000) == 0xe0000000)
17892 {
17893 /* Doubleword load/store: 8-bit offset, scaled by 4. */
17894 if (value >= 0)
17895 newval |= (1 << 23);
17896 else
17897 value = -value;
17898 if (value % 4 != 0)
17899 {
17900 as_bad_where (fixP->fx_file, fixP->fx_line,
17901 _("offset not a multiple of 4"));
17902 break;
17903 }
17904 value /= 4;
17905 if (value > 0xff)
17906 {
17907 as_bad_where (fixP->fx_file, fixP->fx_line,
17908 _("offset out of range"));
17909 break;
17910 }
17911 newval &= ~0xff;
17912 }
17913 else if ((newval & 0x000f0000) == 0x000f0000)
17914 {
17915 /* PC-relative, 12-bit offset. */
17916 if (value >= 0)
17917 newval |= (1 << 23);
17918 else
17919 value = -value;
17920 if (value > 0xfff)
17921 {
17922 as_bad_where (fixP->fx_file, fixP->fx_line,
17923 _("offset out of range"));
17924 break;
17925 }
17926 newval &= ~0xfff;
17927 }
17928 else if ((newval & 0x00000100) == 0x00000100)
17929 {
17930 /* Writeback: 8-bit, +/- offset. */
17931 if (value >= 0)
17932 newval |= (1 << 9);
17933 else
17934 value = -value;
17935 if (value > 0xff)
17936 {
17937 as_bad_where (fixP->fx_file, fixP->fx_line,
17938 _("offset out of range"));
17939 break;
17940 }
17941 newval &= ~0xff;
17942 }
17943 else if ((newval & 0x00000f00) == 0x00000e00)
17944 {
17945 /* T-instruction: positive 8-bit offset. */
17946 if (value < 0 || value > 0xff)
17947 {
17948 as_bad_where (fixP->fx_file, fixP->fx_line,
17949 _("offset out of range"));
17950 break;
17951 }
17952 newval &= ~0xff;
17953 newval |= value;
17954 }
17955 else
17956 {
17957 /* Positive 12-bit or negative 8-bit offset. */
17958 int limit;
17959 if (value >= 0)
17960 {
17961 newval |= (1 << 23);
17962 limit = 0xfff;
17963 }
17964 else
17965 {
17966 value = -value;
17967 limit = 0xff;
17968 }
17969 if (value > limit)
17970 {
17971 as_bad_where (fixP->fx_file, fixP->fx_line,
17972 _("offset out of range"));
17973 break;
17974 }
17975 newval &= ~limit;
17976 }
17977
17978 newval |= value;
17979 md_number_to_chars (buf, (newval >> 16) & 0xffff, THUMB_SIZE);
17980 md_number_to_chars (buf + THUMB_SIZE, newval & 0xffff, THUMB_SIZE);
17981 break;
17982
17983 case BFD_RELOC_ARM_SHIFT_IMM:
17984 newval = md_chars_to_number (buf, INSN_SIZE);
17985 if (((unsigned long) value) > 32
17986 || (value == 32
17987 && (((newval & 0x60) == 0) || (newval & 0x60) == 0x60)))
17988 {
17989 as_bad_where (fixP->fx_file, fixP->fx_line,
17990 _("shift expression is too large"));
17991 break;
17992 }
17993
17994 if (value == 0)
17995 /* Shifts of zero must be done as lsl. */
17996 newval &= ~0x60;
17997 else if (value == 32)
17998 value = 0;
17999 newval &= 0xfffff07f;
18000 newval |= (value & 0x1f) << 7;
18001 md_number_to_chars (buf, newval, INSN_SIZE);
18002 break;
18003
18004 case BFD_RELOC_ARM_T32_IMMEDIATE:
18005 case BFD_RELOC_ARM_T32_ADD_IMM:
18006 case BFD_RELOC_ARM_T32_IMM12:
18007 case BFD_RELOC_ARM_T32_ADD_PC12:
18008 /* We claim that this fixup has been processed here,
18009 even if in fact we generate an error because we do
18010 not have a reloc for it, so tc_gen_reloc will reject it. */
18011 fixP->fx_done = 1;
18012
18013 if (fixP->fx_addsy
18014 && ! S_IS_DEFINED (fixP->fx_addsy))
18015 {
18016 as_bad_where (fixP->fx_file, fixP->fx_line,
18017 _("undefined symbol %s used as an immediate value"),
18018 S_GET_NAME (fixP->fx_addsy));
18019 break;
18020 }
18021
18022 newval = md_chars_to_number (buf, THUMB_SIZE);
18023 newval <<= 16;
18024 newval |= md_chars_to_number (buf+2, THUMB_SIZE);
18025
18026 newimm = FAIL;
18027 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
18028 || fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
18029 {
18030 newimm = encode_thumb32_immediate (value);
18031 if (newimm == (unsigned int) FAIL)
18032 newimm = thumb32_negate_data_op (&newval, value);
18033 }
18034 if (fixP->fx_r_type != BFD_RELOC_ARM_T32_IMMEDIATE
18035 && newimm == (unsigned int) FAIL)
18036 {
18037 /* Turn add/sum into addw/subw. */
18038 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
18039 newval = (newval & 0xfeffffff) | 0x02000000;
18040
18041 /* 12 bit immediate for addw/subw. */
18042 if (value < 0)
18043 {
18044 value = -value;
18045 newval ^= 0x00a00000;
18046 }
18047 if (value > 0xfff)
18048 newimm = (unsigned int) FAIL;
18049 else
18050 newimm = value;
18051 }
18052
18053 if (newimm == (unsigned int)FAIL)
18054 {
18055 as_bad_where (fixP->fx_file, fixP->fx_line,
18056 _("invalid constant (%lx) after fixup"),
18057 (unsigned long) value);
18058 break;
18059 }
18060
18061 newval |= (newimm & 0x800) << 15;
18062 newval |= (newimm & 0x700) << 4;
18063 newval |= (newimm & 0x0ff);
18064
18065 md_number_to_chars (buf, (valueT) ((newval >> 16) & 0xffff), THUMB_SIZE);
18066 md_number_to_chars (buf+2, (valueT) (newval & 0xffff), THUMB_SIZE);
18067 break;
18068
18069 case BFD_RELOC_ARM_SMC:
18070 if (((unsigned long) value) > 0xffff)
18071 as_bad_where (fixP->fx_file, fixP->fx_line,
18072 _("invalid smc expression"));
18073 newval = md_chars_to_number (buf, INSN_SIZE);
18074 newval |= (value & 0xf) | ((value & 0xfff0) << 4);
18075 md_number_to_chars (buf, newval, INSN_SIZE);
18076 break;
18077
18078 case BFD_RELOC_ARM_SWI:
18079 if (fixP->tc_fix_data != 0)
18080 {
18081 if (((unsigned long) value) > 0xff)
18082 as_bad_where (fixP->fx_file, fixP->fx_line,
18083 _("invalid swi expression"));
18084 newval = md_chars_to_number (buf, THUMB_SIZE);
18085 newval |= value;
18086 md_number_to_chars (buf, newval, THUMB_SIZE);
18087 }
18088 else
18089 {
18090 if (((unsigned long) value) > 0x00ffffff)
18091 as_bad_where (fixP->fx_file, fixP->fx_line,
18092 _("invalid swi expression"));
18093 newval = md_chars_to_number (buf, INSN_SIZE);
18094 newval |= value;
18095 md_number_to_chars (buf, newval, INSN_SIZE);
18096 }
18097 break;
18098
18099 case BFD_RELOC_ARM_MULTI:
18100 if (((unsigned long) value) > 0xffff)
18101 as_bad_where (fixP->fx_file, fixP->fx_line,
18102 _("invalid expression in load/store multiple"));
18103 newval = value | md_chars_to_number (buf, INSN_SIZE);
18104 md_number_to_chars (buf, newval, INSN_SIZE);
18105 break;
18106
18107 #ifdef OBJ_ELF
18108 case BFD_RELOC_ARM_PCREL_CALL:
18109 newval = md_chars_to_number (buf, INSN_SIZE);
18110 if ((newval & 0xf0000000) == 0xf0000000)
18111 temp = 1;
18112 else
18113 temp = 3;
18114 goto arm_branch_common;
18115
18116 case BFD_RELOC_ARM_PCREL_JUMP:
18117 case BFD_RELOC_ARM_PLT32:
18118 #endif
18119 case BFD_RELOC_ARM_PCREL_BRANCH:
18120 temp = 3;
18121 goto arm_branch_common;
18122
18123 case BFD_RELOC_ARM_PCREL_BLX:
18124 temp = 1;
18125 arm_branch_common:
18126 /* We are going to store value (shifted right by two) in the
18127 instruction, in a 24 bit, signed field. Bits 26 through 32 either
18128 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
18129 also be be clear. */
18130 if (value & temp)
18131 as_bad_where (fixP->fx_file, fixP->fx_line,
18132 _("misaligned branch destination"));
18133 if ((value & (offsetT)0xfe000000) != (offsetT)0
18134 && (value & (offsetT)0xfe000000) != (offsetT)0xfe000000)
18135 as_bad_where (fixP->fx_file, fixP->fx_line,
18136 _("branch out of range"));
18137
18138 if (fixP->fx_done || !seg->use_rela_p)
18139 {
18140 newval = md_chars_to_number (buf, INSN_SIZE);
18141 newval |= (value >> 2) & 0x00ffffff;
18142 /* Set the H bit on BLX instructions. */
18143 if (temp == 1)
18144 {
18145 if (value & 2)
18146 newval |= 0x01000000;
18147 else
18148 newval &= ~0x01000000;
18149 }
18150 md_number_to_chars (buf, newval, INSN_SIZE);
18151 }
18152 break;
18153
18154 case BFD_RELOC_THUMB_PCREL_BRANCH7: /* CBZ */
18155 /* CBZ can only branch forward. */
18156
18157 /* Attempts to use CBZ to branch to the next instruction
18158 (which, strictly speaking, are prohibited) will be turned into
18159 no-ops.
18160
18161 FIXME: It may be better to remove the instruction completely and
18162 perform relaxation. */
18163 if (value == -2)
18164 {
18165 newval = md_chars_to_number (buf, THUMB_SIZE);
18166 newval = 0xbf00; /* NOP encoding T1 */
18167 md_number_to_chars (buf, newval, THUMB_SIZE);
18168 }
18169 else
18170 {
18171 if (value & ~0x7e)
18172 as_bad_where (fixP->fx_file, fixP->fx_line,
18173 _("branch out of range"));
18174
18175 if (fixP->fx_done || !seg->use_rela_p)
18176 {
18177 newval = md_chars_to_number (buf, THUMB_SIZE);
18178 newval |= ((value & 0x3e) << 2) | ((value & 0x40) << 3);
18179 md_number_to_chars (buf, newval, THUMB_SIZE);
18180 }
18181 }
18182 break;
18183
18184 case BFD_RELOC_THUMB_PCREL_BRANCH9: /* Conditional branch. */
18185 if ((value & ~0xff) && ((value & ~0xff) != ~0xff))
18186 as_bad_where (fixP->fx_file, fixP->fx_line,
18187 _("branch out of range"));
18188
18189 if (fixP->fx_done || !seg->use_rela_p)
18190 {
18191 newval = md_chars_to_number (buf, THUMB_SIZE);
18192 newval |= (value & 0x1ff) >> 1;
18193 md_number_to_chars (buf, newval, THUMB_SIZE);
18194 }
18195 break;
18196
18197 case BFD_RELOC_THUMB_PCREL_BRANCH12: /* Unconditional branch. */
18198 if ((value & ~0x7ff) && ((value & ~0x7ff) != ~0x7ff))
18199 as_bad_where (fixP->fx_file, fixP->fx_line,
18200 _("branch out of range"));
18201
18202 if (fixP->fx_done || !seg->use_rela_p)
18203 {
18204 newval = md_chars_to_number (buf, THUMB_SIZE);
18205 newval |= (value & 0xfff) >> 1;
18206 md_number_to_chars (buf, newval, THUMB_SIZE);
18207 }
18208 break;
18209
18210 case BFD_RELOC_THUMB_PCREL_BRANCH20:
18211 if ((value & ~0x1fffff) && ((value & ~0x1fffff) != ~0x1fffff))
18212 as_bad_where (fixP->fx_file, fixP->fx_line,
18213 _("conditional branch out of range"));
18214
18215 if (fixP->fx_done || !seg->use_rela_p)
18216 {
18217 offsetT newval2;
18218 addressT S, J1, J2, lo, hi;
18219
18220 S = (value & 0x00100000) >> 20;
18221 J2 = (value & 0x00080000) >> 19;
18222 J1 = (value & 0x00040000) >> 18;
18223 hi = (value & 0x0003f000) >> 12;
18224 lo = (value & 0x00000ffe) >> 1;
18225
18226 newval = md_chars_to_number (buf, THUMB_SIZE);
18227 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
18228 newval |= (S << 10) | hi;
18229 newval2 |= (J1 << 13) | (J2 << 11) | lo;
18230 md_number_to_chars (buf, newval, THUMB_SIZE);
18231 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
18232 }
18233 break;
18234
18235 case BFD_RELOC_THUMB_PCREL_BLX:
18236 case BFD_RELOC_THUMB_PCREL_BRANCH23:
18237 if ((value & ~0x3fffff) && ((value & ~0x3fffff) != ~0x3fffff))
18238 as_bad_where (fixP->fx_file, fixP->fx_line,
18239 _("branch out of range"));
18240
18241 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
18242 /* For a BLX instruction, make sure that the relocation is rounded up
18243 to a word boundary. This follows the semantics of the instruction
18244 which specifies that bit 1 of the target address will come from bit
18245 1 of the base address. */
18246 value = (value + 1) & ~ 1;
18247
18248 if (fixP->fx_done || !seg->use_rela_p)
18249 {
18250 offsetT newval2;
18251
18252 newval = md_chars_to_number (buf, THUMB_SIZE);
18253 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
18254 newval |= (value & 0x7fffff) >> 12;
18255 newval2 |= (value & 0xfff) >> 1;
18256 md_number_to_chars (buf, newval, THUMB_SIZE);
18257 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
18258 }
18259 break;
18260
18261 case BFD_RELOC_THUMB_PCREL_BRANCH25:
18262 if ((value & ~0x1ffffff) && ((value & ~0x1ffffff) != ~0x1ffffff))
18263 as_bad_where (fixP->fx_file, fixP->fx_line,
18264 _("branch out of range"));
18265
18266 if (fixP->fx_done || !seg->use_rela_p)
18267 {
18268 offsetT newval2;
18269 addressT S, I1, I2, lo, hi;
18270
18271 S = (value & 0x01000000) >> 24;
18272 I1 = (value & 0x00800000) >> 23;
18273 I2 = (value & 0x00400000) >> 22;
18274 hi = (value & 0x003ff000) >> 12;
18275 lo = (value & 0x00000ffe) >> 1;
18276
18277 I1 = !(I1 ^ S);
18278 I2 = !(I2 ^ S);
18279
18280 newval = md_chars_to_number (buf, THUMB_SIZE);
18281 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
18282 newval |= (S << 10) | hi;
18283 newval2 |= (I1 << 13) | (I2 << 11) | lo;
18284 md_number_to_chars (buf, newval, THUMB_SIZE);
18285 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
18286 }
18287 break;
18288
18289 case BFD_RELOC_8:
18290 if (fixP->fx_done || !seg->use_rela_p)
18291 md_number_to_chars (buf, value, 1);
18292 break;
18293
18294 case BFD_RELOC_16:
18295 if (fixP->fx_done || !seg->use_rela_p)
18296 md_number_to_chars (buf, value, 2);
18297 break;
18298
18299 #ifdef OBJ_ELF
18300 case BFD_RELOC_ARM_TLS_GD32:
18301 case BFD_RELOC_ARM_TLS_LE32:
18302 case BFD_RELOC_ARM_TLS_IE32:
18303 case BFD_RELOC_ARM_TLS_LDM32:
18304 case BFD_RELOC_ARM_TLS_LDO32:
18305 S_SET_THREAD_LOCAL (fixP->fx_addsy);
18306 /* fall through */
18307
18308 case BFD_RELOC_ARM_GOT32:
18309 case BFD_RELOC_ARM_GOTOFF:
18310 case BFD_RELOC_ARM_TARGET2:
18311 if (fixP->fx_done || !seg->use_rela_p)
18312 md_number_to_chars (buf, 0, 4);
18313 break;
18314 #endif
18315
18316 case BFD_RELOC_RVA:
18317 case BFD_RELOC_32:
18318 case BFD_RELOC_ARM_TARGET1:
18319 case BFD_RELOC_ARM_ROSEGREL32:
18320 case BFD_RELOC_ARM_SBREL32:
18321 case BFD_RELOC_32_PCREL:
18322 #ifdef TE_PE
18323 case BFD_RELOC_32_SECREL:
18324 #endif
18325 if (fixP->fx_done || !seg->use_rela_p)
18326 #ifdef TE_WINCE
18327 /* For WinCE we only do this for pcrel fixups. */
18328 if (fixP->fx_done || fixP->fx_pcrel)
18329 #endif
18330 md_number_to_chars (buf, value, 4);
18331 break;
18332
18333 #ifdef OBJ_ELF
18334 case BFD_RELOC_ARM_PREL31:
18335 if (fixP->fx_done || !seg->use_rela_p)
18336 {
18337 newval = md_chars_to_number (buf, 4) & 0x80000000;
18338 if ((value ^ (value >> 1)) & 0x40000000)
18339 {
18340 as_bad_where (fixP->fx_file, fixP->fx_line,
18341 _("rel31 relocation overflow"));
18342 }
18343 newval |= value & 0x7fffffff;
18344 md_number_to_chars (buf, newval, 4);
18345 }
18346 break;
18347 #endif
18348
18349 case BFD_RELOC_ARM_CP_OFF_IMM:
18350 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
18351 if (value < -1023 || value > 1023 || (value & 3))
18352 as_bad_where (fixP->fx_file, fixP->fx_line,
18353 _("co-processor offset out of range"));
18354 cp_off_common:
18355 sign = value >= 0;
18356 if (value < 0)
18357 value = -value;
18358 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
18359 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
18360 newval = md_chars_to_number (buf, INSN_SIZE);
18361 else
18362 newval = get_thumb32_insn (buf);
18363 newval &= 0xff7fff00;
18364 newval |= (value >> 2) | (sign ? INDEX_UP : 0);
18365 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
18366 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
18367 md_number_to_chars (buf, newval, INSN_SIZE);
18368 else
18369 put_thumb32_insn (buf, newval);
18370 break;
18371
18372 case BFD_RELOC_ARM_CP_OFF_IMM_S2:
18373 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2:
18374 if (value < -255 || value > 255)
18375 as_bad_where (fixP->fx_file, fixP->fx_line,
18376 _("co-processor offset out of range"));
18377 value *= 4;
18378 goto cp_off_common;
18379
18380 case BFD_RELOC_ARM_THUMB_OFFSET:
18381 newval = md_chars_to_number (buf, THUMB_SIZE);
18382 /* Exactly what ranges, and where the offset is inserted depends
18383 on the type of instruction, we can establish this from the
18384 top 4 bits. */
18385 switch (newval >> 12)
18386 {
18387 case 4: /* PC load. */
18388 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
18389 forced to zero for these loads; md_pcrel_from has already
18390 compensated for this. */
18391 if (value & 3)
18392 as_bad_where (fixP->fx_file, fixP->fx_line,
18393 _("invalid offset, target not word aligned (0x%08lX)"),
18394 (((unsigned long) fixP->fx_frag->fr_address
18395 + (unsigned long) fixP->fx_where) & ~3)
18396 + (unsigned long) value);
18397
18398 if (value & ~0x3fc)
18399 as_bad_where (fixP->fx_file, fixP->fx_line,
18400 _("invalid offset, value too big (0x%08lX)"),
18401 (long) value);
18402
18403 newval |= value >> 2;
18404 break;
18405
18406 case 9: /* SP load/store. */
18407 if (value & ~0x3fc)
18408 as_bad_where (fixP->fx_file, fixP->fx_line,
18409 _("invalid offset, value too big (0x%08lX)"),
18410 (long) value);
18411 newval |= value >> 2;
18412 break;
18413
18414 case 6: /* Word load/store. */
18415 if (value & ~0x7c)
18416 as_bad_where (fixP->fx_file, fixP->fx_line,
18417 _("invalid offset, value too big (0x%08lX)"),
18418 (long) value);
18419 newval |= value << 4; /* 6 - 2. */
18420 break;
18421
18422 case 7: /* Byte load/store. */
18423 if (value & ~0x1f)
18424 as_bad_where (fixP->fx_file, fixP->fx_line,
18425 _("invalid offset, value too big (0x%08lX)"),
18426 (long) value);
18427 newval |= value << 6;
18428 break;
18429
18430 case 8: /* Halfword load/store. */
18431 if (value & ~0x3e)
18432 as_bad_where (fixP->fx_file, fixP->fx_line,
18433 _("invalid offset, value too big (0x%08lX)"),
18434 (long) value);
18435 newval |= value << 5; /* 6 - 1. */
18436 break;
18437
18438 default:
18439 as_bad_where (fixP->fx_file, fixP->fx_line,
18440 "Unable to process relocation for thumb opcode: %lx",
18441 (unsigned long) newval);
18442 break;
18443 }
18444 md_number_to_chars (buf, newval, THUMB_SIZE);
18445 break;
18446
18447 case BFD_RELOC_ARM_THUMB_ADD:
18448 /* This is a complicated relocation, since we use it for all of
18449 the following immediate relocations:
18450
18451 3bit ADD/SUB
18452 8bit ADD/SUB
18453 9bit ADD/SUB SP word-aligned
18454 10bit ADD PC/SP word-aligned
18455
18456 The type of instruction being processed is encoded in the
18457 instruction field:
18458
18459 0x8000 SUB
18460 0x00F0 Rd
18461 0x000F Rs
18462 */
18463 newval = md_chars_to_number (buf, THUMB_SIZE);
18464 {
18465 int rd = (newval >> 4) & 0xf;
18466 int rs = newval & 0xf;
18467 int subtract = !!(newval & 0x8000);
18468
18469 /* Check for HI regs, only very restricted cases allowed:
18470 Adjusting SP, and using PC or SP to get an address. */
18471 if ((rd > 7 && (rd != REG_SP || rs != REG_SP))
18472 || (rs > 7 && rs != REG_SP && rs != REG_PC))
18473 as_bad_where (fixP->fx_file, fixP->fx_line,
18474 _("invalid Hi register with immediate"));
18475
18476 /* If value is negative, choose the opposite instruction. */
18477 if (value < 0)
18478 {
18479 value = -value;
18480 subtract = !subtract;
18481 if (value < 0)
18482 as_bad_where (fixP->fx_file, fixP->fx_line,
18483 _("immediate value out of range"));
18484 }
18485
18486 if (rd == REG_SP)
18487 {
18488 if (value & ~0x1fc)
18489 as_bad_where (fixP->fx_file, fixP->fx_line,
18490 _("invalid immediate for stack address calculation"));
18491 newval = subtract ? T_OPCODE_SUB_ST : T_OPCODE_ADD_ST;
18492 newval |= value >> 2;
18493 }
18494 else if (rs == REG_PC || rs == REG_SP)
18495 {
18496 if (subtract || value & ~0x3fc)
18497 as_bad_where (fixP->fx_file, fixP->fx_line,
18498 _("invalid immediate for address calculation (value = 0x%08lX)"),
18499 (unsigned long) value);
18500 newval = (rs == REG_PC ? T_OPCODE_ADD_PC : T_OPCODE_ADD_SP);
18501 newval |= rd << 8;
18502 newval |= value >> 2;
18503 }
18504 else if (rs == rd)
18505 {
18506 if (value & ~0xff)
18507 as_bad_where (fixP->fx_file, fixP->fx_line,
18508 _("immediate value out of range"));
18509 newval = subtract ? T_OPCODE_SUB_I8 : T_OPCODE_ADD_I8;
18510 newval |= (rd << 8) | value;
18511 }
18512 else
18513 {
18514 if (value & ~0x7)
18515 as_bad_where (fixP->fx_file, fixP->fx_line,
18516 _("immediate value out of range"));
18517 newval = subtract ? T_OPCODE_SUB_I3 : T_OPCODE_ADD_I3;
18518 newval |= rd | (rs << 3) | (value << 6);
18519 }
18520 }
18521 md_number_to_chars (buf, newval, THUMB_SIZE);
18522 break;
18523
18524 case BFD_RELOC_ARM_THUMB_IMM:
18525 newval = md_chars_to_number (buf, THUMB_SIZE);
18526 if (value < 0 || value > 255)
18527 as_bad_where (fixP->fx_file, fixP->fx_line,
18528 _("invalid immediate: %ld is too large"),
18529 (long) value);
18530 newval |= value;
18531 md_number_to_chars (buf, newval, THUMB_SIZE);
18532 break;
18533
18534 case BFD_RELOC_ARM_THUMB_SHIFT:
18535 /* 5bit shift value (0..32). LSL cannot take 32. */
18536 newval = md_chars_to_number (buf, THUMB_SIZE) & 0xf83f;
18537 temp = newval & 0xf800;
18538 if (value < 0 || value > 32 || (value == 32 && temp == T_OPCODE_LSL_I))
18539 as_bad_where (fixP->fx_file, fixP->fx_line,
18540 _("invalid shift value: %ld"), (long) value);
18541 /* Shifts of zero must be encoded as LSL. */
18542 if (value == 0)
18543 newval = (newval & 0x003f) | T_OPCODE_LSL_I;
18544 /* Shifts of 32 are encoded as zero. */
18545 else if (value == 32)
18546 value = 0;
18547 newval |= value << 6;
18548 md_number_to_chars (buf, newval, THUMB_SIZE);
18549 break;
18550
18551 case BFD_RELOC_VTABLE_INHERIT:
18552 case BFD_RELOC_VTABLE_ENTRY:
18553 fixP->fx_done = 0;
18554 return;
18555
18556 case BFD_RELOC_ARM_MOVW:
18557 case BFD_RELOC_ARM_MOVT:
18558 case BFD_RELOC_ARM_THUMB_MOVW:
18559 case BFD_RELOC_ARM_THUMB_MOVT:
18560 if (fixP->fx_done || !seg->use_rela_p)
18561 {
18562 /* REL format relocations are limited to a 16-bit addend. */
18563 if (!fixP->fx_done)
18564 {
18565 if (value < -0x1000 || value > 0xffff)
18566 as_bad_where (fixP->fx_file, fixP->fx_line,
18567 _("offset too big"));
18568 }
18569 else if (fixP->fx_r_type == BFD_RELOC_ARM_MOVT
18570 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
18571 {
18572 value >>= 16;
18573 }
18574
18575 if (fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
18576 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
18577 {
18578 newval = get_thumb32_insn (buf);
18579 newval &= 0xfbf08f00;
18580 newval |= (value & 0xf000) << 4;
18581 newval |= (value & 0x0800) << 15;
18582 newval |= (value & 0x0700) << 4;
18583 newval |= (value & 0x00ff);
18584 put_thumb32_insn (buf, newval);
18585 }
18586 else
18587 {
18588 newval = md_chars_to_number (buf, 4);
18589 newval &= 0xfff0f000;
18590 newval |= value & 0x0fff;
18591 newval |= (value & 0xf000) << 4;
18592 md_number_to_chars (buf, newval, 4);
18593 }
18594 }
18595 return;
18596
18597 case BFD_RELOC_ARM_ALU_PC_G0_NC:
18598 case BFD_RELOC_ARM_ALU_PC_G0:
18599 case BFD_RELOC_ARM_ALU_PC_G1_NC:
18600 case BFD_RELOC_ARM_ALU_PC_G1:
18601 case BFD_RELOC_ARM_ALU_PC_G2:
18602 case BFD_RELOC_ARM_ALU_SB_G0_NC:
18603 case BFD_RELOC_ARM_ALU_SB_G0:
18604 case BFD_RELOC_ARM_ALU_SB_G1_NC:
18605 case BFD_RELOC_ARM_ALU_SB_G1:
18606 case BFD_RELOC_ARM_ALU_SB_G2:
18607 assert (!fixP->fx_done);
18608 if (!seg->use_rela_p)
18609 {
18610 bfd_vma insn;
18611 bfd_vma encoded_addend;
18612 bfd_vma addend_abs = abs (value);
18613
18614 /* Check that the absolute value of the addend can be
18615 expressed as an 8-bit constant plus a rotation. */
18616 encoded_addend = encode_arm_immediate (addend_abs);
18617 if (encoded_addend == (unsigned int) FAIL)
18618 as_bad_where (fixP->fx_file, fixP->fx_line,
18619 _("the offset 0x%08lX is not representable"),
18620 addend_abs);
18621
18622 /* Extract the instruction. */
18623 insn = md_chars_to_number (buf, INSN_SIZE);
18624
18625 /* If the addend is positive, use an ADD instruction.
18626 Otherwise use a SUB. Take care not to destroy the S bit. */
18627 insn &= 0xff1fffff;
18628 if (value < 0)
18629 insn |= 1 << 22;
18630 else
18631 insn |= 1 << 23;
18632
18633 /* Place the encoded addend into the first 12 bits of the
18634 instruction. */
18635 insn &= 0xfffff000;
18636 insn |= encoded_addend;
18637
18638 /* Update the instruction. */
18639 md_number_to_chars (buf, insn, INSN_SIZE);
18640 }
18641 break;
18642
18643 case BFD_RELOC_ARM_LDR_PC_G0:
18644 case BFD_RELOC_ARM_LDR_PC_G1:
18645 case BFD_RELOC_ARM_LDR_PC_G2:
18646 case BFD_RELOC_ARM_LDR_SB_G0:
18647 case BFD_RELOC_ARM_LDR_SB_G1:
18648 case BFD_RELOC_ARM_LDR_SB_G2:
18649 assert (!fixP->fx_done);
18650 if (!seg->use_rela_p)
18651 {
18652 bfd_vma insn;
18653 bfd_vma addend_abs = abs (value);
18654
18655 /* Check that the absolute value of the addend can be
18656 encoded in 12 bits. */
18657 if (addend_abs >= 0x1000)
18658 as_bad_where (fixP->fx_file, fixP->fx_line,
18659 _("bad offset 0x%08lX (only 12 bits available for the magnitude)"),
18660 addend_abs);
18661
18662 /* Extract the instruction. */
18663 insn = md_chars_to_number (buf, INSN_SIZE);
18664
18665 /* If the addend is negative, clear bit 23 of the instruction.
18666 Otherwise set it. */
18667 if (value < 0)
18668 insn &= ~(1 << 23);
18669 else
18670 insn |= 1 << 23;
18671
18672 /* Place the absolute value of the addend into the first 12 bits
18673 of the instruction. */
18674 insn &= 0xfffff000;
18675 insn |= addend_abs;
18676
18677 /* Update the instruction. */
18678 md_number_to_chars (buf, insn, INSN_SIZE);
18679 }
18680 break;
18681
18682 case BFD_RELOC_ARM_LDRS_PC_G0:
18683 case BFD_RELOC_ARM_LDRS_PC_G1:
18684 case BFD_RELOC_ARM_LDRS_PC_G2:
18685 case BFD_RELOC_ARM_LDRS_SB_G0:
18686 case BFD_RELOC_ARM_LDRS_SB_G1:
18687 case BFD_RELOC_ARM_LDRS_SB_G2:
18688 assert (!fixP->fx_done);
18689 if (!seg->use_rela_p)
18690 {
18691 bfd_vma insn;
18692 bfd_vma addend_abs = abs (value);
18693
18694 /* Check that the absolute value of the addend can be
18695 encoded in 8 bits. */
18696 if (addend_abs >= 0x100)
18697 as_bad_where (fixP->fx_file, fixP->fx_line,
18698 _("bad offset 0x%08lX (only 8 bits available for the magnitude)"),
18699 addend_abs);
18700
18701 /* Extract the instruction. */
18702 insn = md_chars_to_number (buf, INSN_SIZE);
18703
18704 /* If the addend is negative, clear bit 23 of the instruction.
18705 Otherwise set it. */
18706 if (value < 0)
18707 insn &= ~(1 << 23);
18708 else
18709 insn |= 1 << 23;
18710
18711 /* Place the first four bits of the absolute value of the addend
18712 into the first 4 bits of the instruction, and the remaining
18713 four into bits 8 .. 11. */
18714 insn &= 0xfffff0f0;
18715 insn |= (addend_abs & 0xf) | ((addend_abs & 0xf0) << 4);
18716
18717 /* Update the instruction. */
18718 md_number_to_chars (buf, insn, INSN_SIZE);
18719 }
18720 break;
18721
18722 case BFD_RELOC_ARM_LDC_PC_G0:
18723 case BFD_RELOC_ARM_LDC_PC_G1:
18724 case BFD_RELOC_ARM_LDC_PC_G2:
18725 case BFD_RELOC_ARM_LDC_SB_G0:
18726 case BFD_RELOC_ARM_LDC_SB_G1:
18727 case BFD_RELOC_ARM_LDC_SB_G2:
18728 assert (!fixP->fx_done);
18729 if (!seg->use_rela_p)
18730 {
18731 bfd_vma insn;
18732 bfd_vma addend_abs = abs (value);
18733
18734 /* Check that the absolute value of the addend is a multiple of
18735 four and, when divided by four, fits in 8 bits. */
18736 if (addend_abs & 0x3)
18737 as_bad_where (fixP->fx_file, fixP->fx_line,
18738 _("bad offset 0x%08lX (must be word-aligned)"),
18739 addend_abs);
18740
18741 if ((addend_abs >> 2) > 0xff)
18742 as_bad_where (fixP->fx_file, fixP->fx_line,
18743 _("bad offset 0x%08lX (must be an 8-bit number of words)"),
18744 addend_abs);
18745
18746 /* Extract the instruction. */
18747 insn = md_chars_to_number (buf, INSN_SIZE);
18748
18749 /* If the addend is negative, clear bit 23 of the instruction.
18750 Otherwise set it. */
18751 if (value < 0)
18752 insn &= ~(1 << 23);
18753 else
18754 insn |= 1 << 23;
18755
18756 /* Place the addend (divided by four) into the first eight
18757 bits of the instruction. */
18758 insn &= 0xfffffff0;
18759 insn |= addend_abs >> 2;
18760
18761 /* Update the instruction. */
18762 md_number_to_chars (buf, insn, INSN_SIZE);
18763 }
18764 break;
18765
18766 case BFD_RELOC_UNUSED:
18767 default:
18768 as_bad_where (fixP->fx_file, fixP->fx_line,
18769 _("bad relocation fixup type (%d)"), fixP->fx_r_type);
18770 }
18771 }
18772
18773 /* Translate internal representation of relocation info to BFD target
18774 format. */
18775
18776 arelent *
18777 tc_gen_reloc (asection *section, fixS *fixp)
18778 {
18779 arelent * reloc;
18780 bfd_reloc_code_real_type code;
18781
18782 reloc = xmalloc (sizeof (arelent));
18783
18784 reloc->sym_ptr_ptr = xmalloc (sizeof (asymbol *));
18785 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
18786 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
18787
18788 if (fixp->fx_pcrel)
18789 {
18790 if (section->use_rela_p)
18791 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
18792 else
18793 fixp->fx_offset = reloc->address;
18794 }
18795 reloc->addend = fixp->fx_offset;
18796
18797 switch (fixp->fx_r_type)
18798 {
18799 case BFD_RELOC_8:
18800 if (fixp->fx_pcrel)
18801 {
18802 code = BFD_RELOC_8_PCREL;
18803 break;
18804 }
18805
18806 case BFD_RELOC_16:
18807 if (fixp->fx_pcrel)
18808 {
18809 code = BFD_RELOC_16_PCREL;
18810 break;
18811 }
18812
18813 case BFD_RELOC_32:
18814 if (fixp->fx_pcrel)
18815 {
18816 code = BFD_RELOC_32_PCREL;
18817 break;
18818 }
18819
18820 case BFD_RELOC_ARM_MOVW:
18821 if (fixp->fx_pcrel)
18822 {
18823 code = BFD_RELOC_ARM_MOVW_PCREL;
18824 break;
18825 }
18826
18827 case BFD_RELOC_ARM_MOVT:
18828 if (fixp->fx_pcrel)
18829 {
18830 code = BFD_RELOC_ARM_MOVT_PCREL;
18831 break;
18832 }
18833
18834 case BFD_RELOC_ARM_THUMB_MOVW:
18835 if (fixp->fx_pcrel)
18836 {
18837 code = BFD_RELOC_ARM_THUMB_MOVW_PCREL;
18838 break;
18839 }
18840
18841 case BFD_RELOC_ARM_THUMB_MOVT:
18842 if (fixp->fx_pcrel)
18843 {
18844 code = BFD_RELOC_ARM_THUMB_MOVT_PCREL;
18845 break;
18846 }
18847
18848 case BFD_RELOC_NONE:
18849 case BFD_RELOC_ARM_PCREL_BRANCH:
18850 case BFD_RELOC_ARM_PCREL_BLX:
18851 case BFD_RELOC_RVA:
18852 case BFD_RELOC_THUMB_PCREL_BRANCH7:
18853 case BFD_RELOC_THUMB_PCREL_BRANCH9:
18854 case BFD_RELOC_THUMB_PCREL_BRANCH12:
18855 case BFD_RELOC_THUMB_PCREL_BRANCH20:
18856 case BFD_RELOC_THUMB_PCREL_BRANCH23:
18857 case BFD_RELOC_THUMB_PCREL_BRANCH25:
18858 case BFD_RELOC_THUMB_PCREL_BLX:
18859 case BFD_RELOC_VTABLE_ENTRY:
18860 case BFD_RELOC_VTABLE_INHERIT:
18861 #ifdef TE_PE
18862 case BFD_RELOC_32_SECREL:
18863 #endif
18864 code = fixp->fx_r_type;
18865 break;
18866
18867 case BFD_RELOC_ARM_LITERAL:
18868 case BFD_RELOC_ARM_HWLITERAL:
18869 /* If this is called then the a literal has
18870 been referenced across a section boundary. */
18871 as_bad_where (fixp->fx_file, fixp->fx_line,
18872 _("literal referenced across section boundary"));
18873 return NULL;
18874
18875 #ifdef OBJ_ELF
18876 case BFD_RELOC_ARM_GOT32:
18877 case BFD_RELOC_ARM_GOTOFF:
18878 case BFD_RELOC_ARM_PLT32:
18879 case BFD_RELOC_ARM_TARGET1:
18880 case BFD_RELOC_ARM_ROSEGREL32:
18881 case BFD_RELOC_ARM_SBREL32:
18882 case BFD_RELOC_ARM_PREL31:
18883 case BFD_RELOC_ARM_TARGET2:
18884 case BFD_RELOC_ARM_TLS_LE32:
18885 case BFD_RELOC_ARM_TLS_LDO32:
18886 case BFD_RELOC_ARM_PCREL_CALL:
18887 case BFD_RELOC_ARM_PCREL_JUMP:
18888 case BFD_RELOC_ARM_ALU_PC_G0_NC:
18889 case BFD_RELOC_ARM_ALU_PC_G0:
18890 case BFD_RELOC_ARM_ALU_PC_G1_NC:
18891 case BFD_RELOC_ARM_ALU_PC_G1:
18892 case BFD_RELOC_ARM_ALU_PC_G2:
18893 case BFD_RELOC_ARM_LDR_PC_G0:
18894 case BFD_RELOC_ARM_LDR_PC_G1:
18895 case BFD_RELOC_ARM_LDR_PC_G2:
18896 case BFD_RELOC_ARM_LDRS_PC_G0:
18897 case BFD_RELOC_ARM_LDRS_PC_G1:
18898 case BFD_RELOC_ARM_LDRS_PC_G2:
18899 case BFD_RELOC_ARM_LDC_PC_G0:
18900 case BFD_RELOC_ARM_LDC_PC_G1:
18901 case BFD_RELOC_ARM_LDC_PC_G2:
18902 case BFD_RELOC_ARM_ALU_SB_G0_NC:
18903 case BFD_RELOC_ARM_ALU_SB_G0:
18904 case BFD_RELOC_ARM_ALU_SB_G1_NC:
18905 case BFD_RELOC_ARM_ALU_SB_G1:
18906 case BFD_RELOC_ARM_ALU_SB_G2:
18907 case BFD_RELOC_ARM_LDR_SB_G0:
18908 case BFD_RELOC_ARM_LDR_SB_G1:
18909 case BFD_RELOC_ARM_LDR_SB_G2:
18910 case BFD_RELOC_ARM_LDRS_SB_G0:
18911 case BFD_RELOC_ARM_LDRS_SB_G1:
18912 case BFD_RELOC_ARM_LDRS_SB_G2:
18913 case BFD_RELOC_ARM_LDC_SB_G0:
18914 case BFD_RELOC_ARM_LDC_SB_G1:
18915 case BFD_RELOC_ARM_LDC_SB_G2:
18916 code = fixp->fx_r_type;
18917 break;
18918
18919 case BFD_RELOC_ARM_TLS_GD32:
18920 case BFD_RELOC_ARM_TLS_IE32:
18921 case BFD_RELOC_ARM_TLS_LDM32:
18922 /* BFD will include the symbol's address in the addend.
18923 But we don't want that, so subtract it out again here. */
18924 if (!S_IS_COMMON (fixp->fx_addsy))
18925 reloc->addend -= (*reloc->sym_ptr_ptr)->value;
18926 code = fixp->fx_r_type;
18927 break;
18928 #endif
18929
18930 case BFD_RELOC_ARM_IMMEDIATE:
18931 as_bad_where (fixp->fx_file, fixp->fx_line,
18932 _("internal relocation (type: IMMEDIATE) not fixed up"));
18933 return NULL;
18934
18935 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
18936 as_bad_where (fixp->fx_file, fixp->fx_line,
18937 _("ADRL used for a symbol not defined in the same file"));
18938 return NULL;
18939
18940 case BFD_RELOC_ARM_OFFSET_IMM:
18941 if (section->use_rela_p)
18942 {
18943 code = fixp->fx_r_type;
18944 break;
18945 }
18946
18947 if (fixp->fx_addsy != NULL
18948 && !S_IS_DEFINED (fixp->fx_addsy)
18949 && S_IS_LOCAL (fixp->fx_addsy))
18950 {
18951 as_bad_where (fixp->fx_file, fixp->fx_line,
18952 _("undefined local label `%s'"),
18953 S_GET_NAME (fixp->fx_addsy));
18954 return NULL;
18955 }
18956
18957 as_bad_where (fixp->fx_file, fixp->fx_line,
18958 _("internal_relocation (type: OFFSET_IMM) not fixed up"));
18959 return NULL;
18960
18961 default:
18962 {
18963 char * type;
18964
18965 switch (fixp->fx_r_type)
18966 {
18967 case BFD_RELOC_NONE: type = "NONE"; break;
18968 case BFD_RELOC_ARM_OFFSET_IMM8: type = "OFFSET_IMM8"; break;
18969 case BFD_RELOC_ARM_SHIFT_IMM: type = "SHIFT_IMM"; break;
18970 case BFD_RELOC_ARM_SMC: type = "SMC"; break;
18971 case BFD_RELOC_ARM_SWI: type = "SWI"; break;
18972 case BFD_RELOC_ARM_MULTI: type = "MULTI"; break;
18973 case BFD_RELOC_ARM_CP_OFF_IMM: type = "CP_OFF_IMM"; break;
18974 case BFD_RELOC_ARM_T32_CP_OFF_IMM: type = "T32_CP_OFF_IMM"; break;
18975 case BFD_RELOC_ARM_THUMB_ADD: type = "THUMB_ADD"; break;
18976 case BFD_RELOC_ARM_THUMB_SHIFT: type = "THUMB_SHIFT"; break;
18977 case BFD_RELOC_ARM_THUMB_IMM: type = "THUMB_IMM"; break;
18978 case BFD_RELOC_ARM_THUMB_OFFSET: type = "THUMB_OFFSET"; break;
18979 default: type = _("<unknown>"); break;
18980 }
18981 as_bad_where (fixp->fx_file, fixp->fx_line,
18982 _("cannot represent %s relocation in this object file format"),
18983 type);
18984 return NULL;
18985 }
18986 }
18987
18988 #ifdef OBJ_ELF
18989 if ((code == BFD_RELOC_32_PCREL || code == BFD_RELOC_32)
18990 && GOT_symbol
18991 && fixp->fx_addsy == GOT_symbol)
18992 {
18993 code = BFD_RELOC_ARM_GOTPC;
18994 reloc->addend = fixp->fx_offset = reloc->address;
18995 }
18996 #endif
18997
18998 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
18999
19000 if (reloc->howto == NULL)
19001 {
19002 as_bad_where (fixp->fx_file, fixp->fx_line,
19003 _("cannot represent %s relocation in this object file format"),
19004 bfd_get_reloc_code_name (code));
19005 return NULL;
19006 }
19007
19008 /* HACK: Since arm ELF uses Rel instead of Rela, encode the
19009 vtable entry to be used in the relocation's section offset. */
19010 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
19011 reloc->address = fixp->fx_offset;
19012
19013 return reloc;
19014 }
19015
19016 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
19017
19018 void
19019 cons_fix_new_arm (fragS * frag,
19020 int where,
19021 int size,
19022 expressionS * exp)
19023 {
19024 bfd_reloc_code_real_type type;
19025 int pcrel = 0;
19026
19027 /* Pick a reloc.
19028 FIXME: @@ Should look at CPU word size. */
19029 switch (size)
19030 {
19031 case 1:
19032 type = BFD_RELOC_8;
19033 break;
19034 case 2:
19035 type = BFD_RELOC_16;
19036 break;
19037 case 4:
19038 default:
19039 type = BFD_RELOC_32;
19040 break;
19041 case 8:
19042 type = BFD_RELOC_64;
19043 break;
19044 }
19045
19046 #ifdef TE_PE
19047 if (exp->X_op == O_secrel)
19048 {
19049 exp->X_op = O_symbol;
19050 type = BFD_RELOC_32_SECREL;
19051 }
19052 #endif
19053
19054 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
19055 }
19056
19057 #if defined OBJ_COFF || defined OBJ_ELF
19058 void
19059 arm_validate_fix (fixS * fixP)
19060 {
19061 /* If the destination of the branch is a defined symbol which does not have
19062 the THUMB_FUNC attribute, then we must be calling a function which has
19063 the (interfacearm) attribute. We look for the Thumb entry point to that
19064 function and change the branch to refer to that function instead. */
19065 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BRANCH23
19066 && fixP->fx_addsy != NULL
19067 && S_IS_DEFINED (fixP->fx_addsy)
19068 && ! THUMB_IS_FUNC (fixP->fx_addsy))
19069 {
19070 fixP->fx_addsy = find_real_start (fixP->fx_addsy);
19071 }
19072 }
19073 #endif
19074
19075 int
19076 arm_force_relocation (struct fix * fixp)
19077 {
19078 #if defined (OBJ_COFF) && defined (TE_PE)
19079 if (fixp->fx_r_type == BFD_RELOC_RVA)
19080 return 1;
19081 #endif
19082
19083 /* Resolve these relocations even if the symbol is extern or weak. */
19084 if (fixp->fx_r_type == BFD_RELOC_ARM_IMMEDIATE
19085 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM
19086 || fixp->fx_r_type == BFD_RELOC_ARM_ADRL_IMMEDIATE
19087 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM
19088 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
19089 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMM12
19090 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_PC12)
19091 return 0;
19092
19093 /* Always leave these relocations for the linker. */
19094 if ((fixp->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
19095 && fixp->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
19096 || fixp->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
19097 return 1;
19098
19099 /* Always generate relocations against function symbols. */
19100 if (fixp->fx_r_type == BFD_RELOC_32
19101 && fixp->fx_addsy
19102 && (symbol_get_bfdsym (fixp->fx_addsy)->flags & BSF_FUNCTION))
19103 return 1;
19104
19105 return generic_force_reloc (fixp);
19106 }
19107
19108 #if defined (OBJ_ELF) || defined (OBJ_COFF)
19109 /* Relocations against function names must be left unadjusted,
19110 so that the linker can use this information to generate interworking
19111 stubs. The MIPS version of this function
19112 also prevents relocations that are mips-16 specific, but I do not
19113 know why it does this.
19114
19115 FIXME:
19116 There is one other problem that ought to be addressed here, but
19117 which currently is not: Taking the address of a label (rather
19118 than a function) and then later jumping to that address. Such
19119 addresses also ought to have their bottom bit set (assuming that
19120 they reside in Thumb code), but at the moment they will not. */
19121
19122 bfd_boolean
19123 arm_fix_adjustable (fixS * fixP)
19124 {
19125 if (fixP->fx_addsy == NULL)
19126 return 1;
19127
19128 /* Preserve relocations against symbols with function type. */
19129 if (symbol_get_bfdsym (fixP->fx_addsy)->flags & BSF_FUNCTION)
19130 return 0;
19131
19132 if (THUMB_IS_FUNC (fixP->fx_addsy)
19133 && fixP->fx_subsy == NULL)
19134 return 0;
19135
19136 /* We need the symbol name for the VTABLE entries. */
19137 if ( fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
19138 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
19139 return 0;
19140
19141 /* Don't allow symbols to be discarded on GOT related relocs. */
19142 if (fixP->fx_r_type == BFD_RELOC_ARM_PLT32
19143 || fixP->fx_r_type == BFD_RELOC_ARM_GOT32
19144 || fixP->fx_r_type == BFD_RELOC_ARM_GOTOFF
19145 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GD32
19146 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LE32
19147 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_IE32
19148 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDM32
19149 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDO32
19150 || fixP->fx_r_type == BFD_RELOC_ARM_TARGET2)
19151 return 0;
19152
19153 /* Similarly for group relocations. */
19154 if ((fixP->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
19155 && fixP->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
19156 || fixP->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
19157 return 0;
19158
19159 return 1;
19160 }
19161 #endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */
19162
19163 #ifdef OBJ_ELF
19164
19165 const char *
19166 elf32_arm_target_format (void)
19167 {
19168 #ifdef TE_SYMBIAN
19169 return (target_big_endian
19170 ? "elf32-bigarm-symbian"
19171 : "elf32-littlearm-symbian");
19172 #elif defined (TE_VXWORKS)
19173 return (target_big_endian
19174 ? "elf32-bigarm-vxworks"
19175 : "elf32-littlearm-vxworks");
19176 #else
19177 if (target_big_endian)
19178 return "elf32-bigarm";
19179 else
19180 return "elf32-littlearm";
19181 #endif
19182 }
19183
19184 void
19185 armelf_frob_symbol (symbolS * symp,
19186 int * puntp)
19187 {
19188 elf_frob_symbol (symp, puntp);
19189 }
19190 #endif
19191
19192 /* MD interface: Finalization. */
19193
19194 /* A good place to do this, although this was probably not intended
19195 for this kind of use. We need to dump the literal pool before
19196 references are made to a null symbol pointer. */
19197
19198 void
19199 arm_cleanup (void)
19200 {
19201 literal_pool * pool;
19202
19203 for (pool = list_of_pools; pool; pool = pool->next)
19204 {
19205 /* Put it at the end of the relevent section. */
19206 subseg_set (pool->section, pool->sub_section);
19207 #ifdef OBJ_ELF
19208 arm_elf_change_section ();
19209 #endif
19210 s_ltorg (0);
19211 }
19212 }
19213
19214 /* Adjust the symbol table. This marks Thumb symbols as distinct from
19215 ARM ones. */
19216
19217 void
19218 arm_adjust_symtab (void)
19219 {
19220 #ifdef OBJ_COFF
19221 symbolS * sym;
19222
19223 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
19224 {
19225 if (ARM_IS_THUMB (sym))
19226 {
19227 if (THUMB_IS_FUNC (sym))
19228 {
19229 /* Mark the symbol as a Thumb function. */
19230 if ( S_GET_STORAGE_CLASS (sym) == C_STAT
19231 || S_GET_STORAGE_CLASS (sym) == C_LABEL) /* This can happen! */
19232 S_SET_STORAGE_CLASS (sym, C_THUMBSTATFUNC);
19233
19234 else if (S_GET_STORAGE_CLASS (sym) == C_EXT)
19235 S_SET_STORAGE_CLASS (sym, C_THUMBEXTFUNC);
19236 else
19237 as_bad (_("%s: unexpected function type: %d"),
19238 S_GET_NAME (sym), S_GET_STORAGE_CLASS (sym));
19239 }
19240 else switch (S_GET_STORAGE_CLASS (sym))
19241 {
19242 case C_EXT:
19243 S_SET_STORAGE_CLASS (sym, C_THUMBEXT);
19244 break;
19245 case C_STAT:
19246 S_SET_STORAGE_CLASS (sym, C_THUMBSTAT);
19247 break;
19248 case C_LABEL:
19249 S_SET_STORAGE_CLASS (sym, C_THUMBLABEL);
19250 break;
19251 default:
19252 /* Do nothing. */
19253 break;
19254 }
19255 }
19256
19257 if (ARM_IS_INTERWORK (sym))
19258 coffsymbol (symbol_get_bfdsym (sym))->native->u.syment.n_flags = 0xFF;
19259 }
19260 #endif
19261 #ifdef OBJ_ELF
19262 symbolS * sym;
19263 char bind;
19264
19265 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
19266 {
19267 if (ARM_IS_THUMB (sym))
19268 {
19269 elf_symbol_type * elf_sym;
19270
19271 elf_sym = elf_symbol (symbol_get_bfdsym (sym));
19272 bind = ELF_ST_BIND (elf_sym->internal_elf_sym.st_info);
19273
19274 if (! bfd_is_arm_special_symbol_name (elf_sym->symbol.name,
19275 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
19276 {
19277 /* If it's a .thumb_func, declare it as so,
19278 otherwise tag label as .code 16. */
19279 if (THUMB_IS_FUNC (sym))
19280 elf_sym->internal_elf_sym.st_info =
19281 ELF_ST_INFO (bind, STT_ARM_TFUNC);
19282 else if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
19283 elf_sym->internal_elf_sym.st_info =
19284 ELF_ST_INFO (bind, STT_ARM_16BIT);
19285 }
19286 }
19287 }
19288 #endif
19289 }
19290
19291 /* MD interface: Initialization. */
19292
19293 static void
19294 set_constant_flonums (void)
19295 {
19296 int i;
19297
19298 for (i = 0; i < NUM_FLOAT_VALS; i++)
19299 if (atof_ieee ((char *) fp_const[i], 'x', fp_values[i]) == NULL)
19300 abort ();
19301 }
19302
19303 /* Auto-select Thumb mode if it's the only available instruction set for the
19304 given architecture. */
19305
19306 static void
19307 autoselect_thumb_from_cpu_variant (void)
19308 {
19309 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
19310 opcode_select (16);
19311 }
19312
19313 void
19314 md_begin (void)
19315 {
19316 unsigned mach;
19317 unsigned int i;
19318
19319 if ( (arm_ops_hsh = hash_new ()) == NULL
19320 || (arm_cond_hsh = hash_new ()) == NULL
19321 || (arm_shift_hsh = hash_new ()) == NULL
19322 || (arm_psr_hsh = hash_new ()) == NULL
19323 || (arm_v7m_psr_hsh = hash_new ()) == NULL
19324 || (arm_reg_hsh = hash_new ()) == NULL
19325 || (arm_reloc_hsh = hash_new ()) == NULL
19326 || (arm_barrier_opt_hsh = hash_new ()) == NULL)
19327 as_fatal (_("virtual memory exhausted"));
19328
19329 for (i = 0; i < sizeof (insns) / sizeof (struct asm_opcode); i++)
19330 hash_insert (arm_ops_hsh, insns[i].template, (PTR) (insns + i));
19331 for (i = 0; i < sizeof (conds) / sizeof (struct asm_cond); i++)
19332 hash_insert (arm_cond_hsh, conds[i].template, (PTR) (conds + i));
19333 for (i = 0; i < sizeof (shift_names) / sizeof (struct asm_shift_name); i++)
19334 hash_insert (arm_shift_hsh, shift_names[i].name, (PTR) (shift_names + i));
19335 for (i = 0; i < sizeof (psrs) / sizeof (struct asm_psr); i++)
19336 hash_insert (arm_psr_hsh, psrs[i].template, (PTR) (psrs + i));
19337 for (i = 0; i < sizeof (v7m_psrs) / sizeof (struct asm_psr); i++)
19338 hash_insert (arm_v7m_psr_hsh, v7m_psrs[i].template, (PTR) (v7m_psrs + i));
19339 for (i = 0; i < sizeof (reg_names) / sizeof (struct reg_entry); i++)
19340 hash_insert (arm_reg_hsh, reg_names[i].name, (PTR) (reg_names + i));
19341 for (i = 0;
19342 i < sizeof (barrier_opt_names) / sizeof (struct asm_barrier_opt);
19343 i++)
19344 hash_insert (arm_barrier_opt_hsh, barrier_opt_names[i].template,
19345 (PTR) (barrier_opt_names + i));
19346 #ifdef OBJ_ELF
19347 for (i = 0; i < sizeof (reloc_names) / sizeof (struct reloc_entry); i++)
19348 hash_insert (arm_reloc_hsh, reloc_names[i].name, (PTR) (reloc_names + i));
19349 #endif
19350
19351 set_constant_flonums ();
19352
19353 /* Set the cpu variant based on the command-line options. We prefer
19354 -mcpu= over -march= if both are set (as for GCC); and we prefer
19355 -mfpu= over any other way of setting the floating point unit.
19356 Use of legacy options with new options are faulted. */
19357 if (legacy_cpu)
19358 {
19359 if (mcpu_cpu_opt || march_cpu_opt)
19360 as_bad (_("use of old and new-style options to set CPU type"));
19361
19362 mcpu_cpu_opt = legacy_cpu;
19363 }
19364 else if (!mcpu_cpu_opt)
19365 mcpu_cpu_opt = march_cpu_opt;
19366
19367 if (legacy_fpu)
19368 {
19369 if (mfpu_opt)
19370 as_bad (_("use of old and new-style options to set FPU type"));
19371
19372 mfpu_opt = legacy_fpu;
19373 }
19374 else if (!mfpu_opt)
19375 {
19376 #if !(defined (TE_LINUX) || defined (TE_NetBSD) || defined (TE_VXWORKS))
19377 /* Some environments specify a default FPU. If they don't, infer it
19378 from the processor. */
19379 if (mcpu_fpu_opt)
19380 mfpu_opt = mcpu_fpu_opt;
19381 else
19382 mfpu_opt = march_fpu_opt;
19383 #else
19384 mfpu_opt = &fpu_default;
19385 #endif
19386 }
19387
19388 if (!mfpu_opt)
19389 {
19390 if (mcpu_cpu_opt != NULL)
19391 mfpu_opt = &fpu_default;
19392 else if (mcpu_fpu_opt != NULL && ARM_CPU_HAS_FEATURE (*mcpu_fpu_opt, arm_ext_v5))
19393 mfpu_opt = &fpu_arch_vfp_v2;
19394 else
19395 mfpu_opt = &fpu_arch_fpa;
19396 }
19397
19398 #ifdef CPU_DEFAULT
19399 if (!mcpu_cpu_opt)
19400 {
19401 mcpu_cpu_opt = &cpu_default;
19402 selected_cpu = cpu_default;
19403 }
19404 #else
19405 if (mcpu_cpu_opt)
19406 selected_cpu = *mcpu_cpu_opt;
19407 else
19408 mcpu_cpu_opt = &arm_arch_any;
19409 #endif
19410
19411 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
19412
19413 autoselect_thumb_from_cpu_variant ();
19414
19415 arm_arch_used = thumb_arch_used = arm_arch_none;
19416
19417 #if defined OBJ_COFF || defined OBJ_ELF
19418 {
19419 unsigned int flags = 0;
19420
19421 #if defined OBJ_ELF
19422 flags = meabi_flags;
19423
19424 switch (meabi_flags)
19425 {
19426 case EF_ARM_EABI_UNKNOWN:
19427 #endif
19428 /* Set the flags in the private structure. */
19429 if (uses_apcs_26) flags |= F_APCS26;
19430 if (support_interwork) flags |= F_INTERWORK;
19431 if (uses_apcs_float) flags |= F_APCS_FLOAT;
19432 if (pic_code) flags |= F_PIC;
19433 if (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_any_hard))
19434 flags |= F_SOFT_FLOAT;
19435
19436 switch (mfloat_abi_opt)
19437 {
19438 case ARM_FLOAT_ABI_SOFT:
19439 case ARM_FLOAT_ABI_SOFTFP:
19440 flags |= F_SOFT_FLOAT;
19441 break;
19442
19443 case ARM_FLOAT_ABI_HARD:
19444 if (flags & F_SOFT_FLOAT)
19445 as_bad (_("hard-float conflicts with specified fpu"));
19446 break;
19447 }
19448
19449 /* Using pure-endian doubles (even if soft-float). */
19450 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
19451 flags |= F_VFP_FLOAT;
19452
19453 #if defined OBJ_ELF
19454 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_maverick))
19455 flags |= EF_ARM_MAVERICK_FLOAT;
19456 break;
19457
19458 case EF_ARM_EABI_VER4:
19459 case EF_ARM_EABI_VER5:
19460 /* No additional flags to set. */
19461 break;
19462
19463 default:
19464 abort ();
19465 }
19466 #endif
19467 bfd_set_private_flags (stdoutput, flags);
19468
19469 /* We have run out flags in the COFF header to encode the
19470 status of ATPCS support, so instead we create a dummy,
19471 empty, debug section called .arm.atpcs. */
19472 if (atpcs)
19473 {
19474 asection * sec;
19475
19476 sec = bfd_make_section (stdoutput, ".arm.atpcs");
19477
19478 if (sec != NULL)
19479 {
19480 bfd_set_section_flags
19481 (stdoutput, sec, SEC_READONLY | SEC_DEBUGGING /* | SEC_HAS_CONTENTS */);
19482 bfd_set_section_size (stdoutput, sec, 0);
19483 bfd_set_section_contents (stdoutput, sec, NULL, 0, 0);
19484 }
19485 }
19486 }
19487 #endif
19488
19489 /* Record the CPU type as well. */
19490 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2))
19491 mach = bfd_mach_arm_iWMMXt2;
19492 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt))
19493 mach = bfd_mach_arm_iWMMXt;
19494 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_xscale))
19495 mach = bfd_mach_arm_XScale;
19496 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_maverick))
19497 mach = bfd_mach_arm_ep9312;
19498 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5e))
19499 mach = bfd_mach_arm_5TE;
19500 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5))
19501 {
19502 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
19503 mach = bfd_mach_arm_5T;
19504 else
19505 mach = bfd_mach_arm_5;
19506 }
19507 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4))
19508 {
19509 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
19510 mach = bfd_mach_arm_4T;
19511 else
19512 mach = bfd_mach_arm_4;
19513 }
19514 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3m))
19515 mach = bfd_mach_arm_3M;
19516 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3))
19517 mach = bfd_mach_arm_3;
19518 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2s))
19519 mach = bfd_mach_arm_2a;
19520 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2))
19521 mach = bfd_mach_arm_2;
19522 else
19523 mach = bfd_mach_arm_unknown;
19524
19525 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
19526 }
19527
19528 /* Command line processing. */
19529
19530 /* md_parse_option
19531 Invocation line includes a switch not recognized by the base assembler.
19532 See if it's a processor-specific option.
19533
19534 This routine is somewhat complicated by the need for backwards
19535 compatibility (since older releases of gcc can't be changed).
19536 The new options try to make the interface as compatible as
19537 possible with GCC.
19538
19539 New options (supported) are:
19540
19541 -mcpu=<cpu name> Assemble for selected processor
19542 -march=<architecture name> Assemble for selected architecture
19543 -mfpu=<fpu architecture> Assemble for selected FPU.
19544 -EB/-mbig-endian Big-endian
19545 -EL/-mlittle-endian Little-endian
19546 -k Generate PIC code
19547 -mthumb Start in Thumb mode
19548 -mthumb-interwork Code supports ARM/Thumb interworking
19549
19550 For now we will also provide support for:
19551
19552 -mapcs-32 32-bit Program counter
19553 -mapcs-26 26-bit Program counter
19554 -macps-float Floats passed in FP registers
19555 -mapcs-reentrant Reentrant code
19556 -matpcs
19557 (sometime these will probably be replaced with -mapcs=<list of options>
19558 and -matpcs=<list of options>)
19559
19560 The remaining options are only supported for back-wards compatibility.
19561 Cpu variants, the arm part is optional:
19562 -m[arm]1 Currently not supported.
19563 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor
19564 -m[arm]3 Arm 3 processor
19565 -m[arm]6[xx], Arm 6 processors
19566 -m[arm]7[xx][t][[d]m] Arm 7 processors
19567 -m[arm]8[10] Arm 8 processors
19568 -m[arm]9[20][tdmi] Arm 9 processors
19569 -mstrongarm[110[0]] StrongARM processors
19570 -mxscale XScale processors
19571 -m[arm]v[2345[t[e]]] Arm architectures
19572 -mall All (except the ARM1)
19573 FP variants:
19574 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions
19575 -mfpe-old (No float load/store multiples)
19576 -mvfpxd VFP Single precision
19577 -mvfp All VFP
19578 -mno-fpu Disable all floating point instructions
19579
19580 The following CPU names are recognized:
19581 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
19582 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
19583 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
19584 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
19585 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
19586 arm10t arm10e, arm1020t, arm1020e, arm10200e,
19587 strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
19588
19589 */
19590
19591 const char * md_shortopts = "m:k";
19592
19593 #ifdef ARM_BI_ENDIAN
19594 #define OPTION_EB (OPTION_MD_BASE + 0)
19595 #define OPTION_EL (OPTION_MD_BASE + 1)
19596 #else
19597 #if TARGET_BYTES_BIG_ENDIAN
19598 #define OPTION_EB (OPTION_MD_BASE + 0)
19599 #else
19600 #define OPTION_EL (OPTION_MD_BASE + 1)
19601 #endif
19602 #endif
19603
19604 struct option md_longopts[] =
19605 {
19606 #ifdef OPTION_EB
19607 {"EB", no_argument, NULL, OPTION_EB},
19608 #endif
19609 #ifdef OPTION_EL
19610 {"EL", no_argument, NULL, OPTION_EL},
19611 #endif
19612 {NULL, no_argument, NULL, 0}
19613 };
19614
19615 size_t md_longopts_size = sizeof (md_longopts);
19616
19617 struct arm_option_table
19618 {
19619 char *option; /* Option name to match. */
19620 char *help; /* Help information. */
19621 int *var; /* Variable to change. */
19622 int value; /* What to change it to. */
19623 char *deprecated; /* If non-null, print this message. */
19624 };
19625
19626 struct arm_option_table arm_opts[] =
19627 {
19628 {"k", N_("generate PIC code"), &pic_code, 1, NULL},
19629 {"mthumb", N_("assemble Thumb code"), &thumb_mode, 1, NULL},
19630 {"mthumb-interwork", N_("support ARM/Thumb interworking"),
19631 &support_interwork, 1, NULL},
19632 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26, 0, NULL},
19633 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26, 1, NULL},
19634 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float,
19635 1, NULL},
19636 {"mapcs-reentrant", N_("re-entrant code"), &pic_code, 1, NULL},
19637 {"matpcs", N_("code is ATPCS conformant"), &atpcs, 1, NULL},
19638 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
19639 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
19640 NULL},
19641
19642 /* These are recognized by the assembler, but have no affect on code. */
19643 {"mapcs-frame", N_("use frame pointer"), NULL, 0, NULL},
19644 {"mapcs-stack-check", N_("use stack size checking"), NULL, 0, NULL},
19645 {NULL, NULL, NULL, 0, NULL}
19646 };
19647
19648 struct arm_legacy_option_table
19649 {
19650 char *option; /* Option name to match. */
19651 const arm_feature_set **var; /* Variable to change. */
19652 const arm_feature_set value; /* What to change it to. */
19653 char *deprecated; /* If non-null, print this message. */
19654 };
19655
19656 const struct arm_legacy_option_table arm_legacy_opts[] =
19657 {
19658 /* DON'T add any new processors to this list -- we want the whole list
19659 to go away... Add them to the processors table instead. */
19660 {"marm1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
19661 {"m1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
19662 {"marm2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
19663 {"m2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
19664 {"marm250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
19665 {"m250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
19666 {"marm3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
19667 {"m3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
19668 {"marm6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
19669 {"m6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
19670 {"marm600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
19671 {"m600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
19672 {"marm610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
19673 {"m610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
19674 {"marm620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
19675 {"m620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
19676 {"marm7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
19677 {"m7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
19678 {"marm70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
19679 {"m70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
19680 {"marm700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
19681 {"m700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
19682 {"marm700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
19683 {"m700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
19684 {"marm710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
19685 {"m710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
19686 {"marm710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
19687 {"m710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
19688 {"marm720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
19689 {"m720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
19690 {"marm7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
19691 {"m7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
19692 {"marm7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
19693 {"m7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
19694 {"marm7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
19695 {"m7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
19696 {"marm7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
19697 {"m7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
19698 {"marm7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
19699 {"m7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
19700 {"marm7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
19701 {"m7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
19702 {"marm7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
19703 {"m7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
19704 {"marm7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
19705 {"m7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
19706 {"marm7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
19707 {"m7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
19708 {"marm7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
19709 {"m7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
19710 {"marm710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
19711 {"m710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
19712 {"marm720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
19713 {"m720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
19714 {"marm740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
19715 {"m740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
19716 {"marm8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
19717 {"m8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
19718 {"marm810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
19719 {"m810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
19720 {"marm9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
19721 {"m9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
19722 {"marm9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
19723 {"m9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
19724 {"marm920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
19725 {"m920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
19726 {"marm940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
19727 {"m940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
19728 {"mstrongarm", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=strongarm")},
19729 {"mstrongarm110", &legacy_cpu, ARM_ARCH_V4,
19730 N_("use -mcpu=strongarm110")},
19731 {"mstrongarm1100", &legacy_cpu, ARM_ARCH_V4,
19732 N_("use -mcpu=strongarm1100")},
19733 {"mstrongarm1110", &legacy_cpu, ARM_ARCH_V4,
19734 N_("use -mcpu=strongarm1110")},
19735 {"mxscale", &legacy_cpu, ARM_ARCH_XSCALE, N_("use -mcpu=xscale")},
19736 {"miwmmxt", &legacy_cpu, ARM_ARCH_IWMMXT, N_("use -mcpu=iwmmxt")},
19737 {"mall", &legacy_cpu, ARM_ANY, N_("use -mcpu=all")},
19738
19739 /* Architecture variants -- don't add any more to this list either. */
19740 {"mv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
19741 {"marmv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
19742 {"mv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
19743 {"marmv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
19744 {"mv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
19745 {"marmv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
19746 {"mv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
19747 {"marmv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
19748 {"mv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
19749 {"marmv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
19750 {"mv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
19751 {"marmv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
19752 {"mv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
19753 {"marmv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
19754 {"mv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
19755 {"marmv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
19756 {"mv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
19757 {"marmv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
19758
19759 /* Floating point variants -- don't add any more to this list either. */
19760 {"mfpe-old", &legacy_fpu, FPU_ARCH_FPE, N_("use -mfpu=fpe")},
19761 {"mfpa10", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa10")},
19762 {"mfpa11", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa11")},
19763 {"mno-fpu", &legacy_fpu, ARM_ARCH_NONE,
19764 N_("use either -mfpu=softfpa or -mfpu=softvfp")},
19765
19766 {NULL, NULL, ARM_ARCH_NONE, NULL}
19767 };
19768
19769 struct arm_cpu_option_table
19770 {
19771 char *name;
19772 const arm_feature_set value;
19773 /* For some CPUs we assume an FPU unless the user explicitly sets
19774 -mfpu=... */
19775 const arm_feature_set default_fpu;
19776 /* The canonical name of the CPU, or NULL to use NAME converted to upper
19777 case. */
19778 const char *canonical_name;
19779 };
19780
19781 /* This list should, at a minimum, contain all the cpu names
19782 recognized by GCC. */
19783 static const struct arm_cpu_option_table arm_cpus[] =
19784 {
19785 {"all", ARM_ANY, FPU_ARCH_FPA, NULL},
19786 {"arm1", ARM_ARCH_V1, FPU_ARCH_FPA, NULL},
19787 {"arm2", ARM_ARCH_V2, FPU_ARCH_FPA, NULL},
19788 {"arm250", ARM_ARCH_V2S, FPU_ARCH_FPA, NULL},
19789 {"arm3", ARM_ARCH_V2S, FPU_ARCH_FPA, NULL},
19790 {"arm6", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19791 {"arm60", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19792 {"arm600", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19793 {"arm610", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19794 {"arm620", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19795 {"arm7", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19796 {"arm7m", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL},
19797 {"arm7d", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19798 {"arm7dm", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL},
19799 {"arm7di", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19800 {"arm7dmi", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL},
19801 {"arm70", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19802 {"arm700", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19803 {"arm700i", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19804 {"arm710", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19805 {"arm710t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
19806 {"arm720", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19807 {"arm720t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
19808 {"arm740t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
19809 {"arm710c", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19810 {"arm7100", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19811 {"arm7500", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19812 {"arm7500fe", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19813 {"arm7t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
19814 {"arm7tdmi", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
19815 {"arm7tdmi-s", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
19816 {"arm8", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
19817 {"arm810", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
19818 {"strongarm", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
19819 {"strongarm1", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
19820 {"strongarm110", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
19821 {"strongarm1100", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
19822 {"strongarm1110", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
19823 {"arm9", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
19824 {"arm920", ARM_ARCH_V4T, FPU_ARCH_FPA, "ARM920T"},
19825 {"arm920t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
19826 {"arm922t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
19827 {"arm940t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
19828 {"arm9tdmi", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
19829 /* For V5 or later processors we default to using VFP; but the user
19830 should really set the FPU type explicitly. */
19831 {"arm9e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL},
19832 {"arm9e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
19833 {"arm926ej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM926EJ-S"},
19834 {"arm926ejs", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM926EJ-S"},
19835 {"arm926ej-s", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, NULL},
19836 {"arm946e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL},
19837 {"arm946e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM946E-S"},
19838 {"arm946e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
19839 {"arm966e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL},
19840 {"arm966e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM966E-S"},
19841 {"arm966e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
19842 {"arm968e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
19843 {"arm10t", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL},
19844 {"arm10tdmi", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL},
19845 {"arm10e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
19846 {"arm1020", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM1020E"},
19847 {"arm1020t", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL},
19848 {"arm1020e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
19849 {"arm1022e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
19850 {"arm1026ejs", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM1026EJ-S"},
19851 {"arm1026ej-s", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, NULL},
19852 {"arm1136js", ARM_ARCH_V6, FPU_NONE, "ARM1136J-S"},
19853 {"arm1136j-s", ARM_ARCH_V6, FPU_NONE, NULL},
19854 {"arm1136jfs", ARM_ARCH_V6, FPU_ARCH_VFP_V2, "ARM1136JF-S"},
19855 {"arm1136jf-s", ARM_ARCH_V6, FPU_ARCH_VFP_V2, NULL},
19856 {"mpcore", ARM_ARCH_V6K, FPU_ARCH_VFP_V2, NULL},
19857 {"mpcorenovfp", ARM_ARCH_V6K, FPU_NONE, NULL},
19858 {"arm1156t2-s", ARM_ARCH_V6T2, FPU_NONE, NULL},
19859 {"arm1156t2f-s", ARM_ARCH_V6T2, FPU_ARCH_VFP_V2, NULL},
19860 {"arm1176jz-s", ARM_ARCH_V6ZK, FPU_NONE, NULL},
19861 {"arm1176jzf-s", ARM_ARCH_V6ZK, FPU_ARCH_VFP_V2, NULL},
19862 {"cortex-a8", ARM_ARCH_V7A, ARM_FEATURE(0, FPU_VFP_V3
19863 | FPU_NEON_EXT_V1),
19864 NULL},
19865 {"cortex-r4", ARM_ARCH_V7R, FPU_NONE, NULL},
19866 {"cortex-m3", ARM_ARCH_V7M, FPU_NONE, NULL},
19867 /* ??? XSCALE is really an architecture. */
19868 {"xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL},
19869 /* ??? iwmmxt is not a processor. */
19870 {"iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP_V2, NULL},
19871 {"iwmmxt2", ARM_ARCH_IWMMXT2,FPU_ARCH_VFP_V2, NULL},
19872 {"i80200", ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL},
19873 /* Maverick */
19874 {"ep9312", ARM_FEATURE(ARM_AEXT_V4T, ARM_CEXT_MAVERICK), FPU_ARCH_MAVERICK, "ARM920T"},
19875 {NULL, ARM_ARCH_NONE, ARM_ARCH_NONE, NULL}
19876 };
19877
19878 struct arm_arch_option_table
19879 {
19880 char *name;
19881 const arm_feature_set value;
19882 const arm_feature_set default_fpu;
19883 };
19884
19885 /* This list should, at a minimum, contain all the architecture names
19886 recognized by GCC. */
19887 static const struct arm_arch_option_table arm_archs[] =
19888 {
19889 {"all", ARM_ANY, FPU_ARCH_FPA},
19890 {"armv1", ARM_ARCH_V1, FPU_ARCH_FPA},
19891 {"armv2", ARM_ARCH_V2, FPU_ARCH_FPA},
19892 {"armv2a", ARM_ARCH_V2S, FPU_ARCH_FPA},
19893 {"armv2s", ARM_ARCH_V2S, FPU_ARCH_FPA},
19894 {"armv3", ARM_ARCH_V3, FPU_ARCH_FPA},
19895 {"armv3m", ARM_ARCH_V3M, FPU_ARCH_FPA},
19896 {"armv4", ARM_ARCH_V4, FPU_ARCH_FPA},
19897 {"armv4xm", ARM_ARCH_V4xM, FPU_ARCH_FPA},
19898 {"armv4t", ARM_ARCH_V4T, FPU_ARCH_FPA},
19899 {"armv4txm", ARM_ARCH_V4TxM, FPU_ARCH_FPA},
19900 {"armv5", ARM_ARCH_V5, FPU_ARCH_VFP},
19901 {"armv5t", ARM_ARCH_V5T, FPU_ARCH_VFP},
19902 {"armv5txm", ARM_ARCH_V5TxM, FPU_ARCH_VFP},
19903 {"armv5te", ARM_ARCH_V5TE, FPU_ARCH_VFP},
19904 {"armv5texp", ARM_ARCH_V5TExP, FPU_ARCH_VFP},
19905 {"armv5tej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP},
19906 {"armv6", ARM_ARCH_V6, FPU_ARCH_VFP},
19907 {"armv6j", ARM_ARCH_V6, FPU_ARCH_VFP},
19908 {"armv6k", ARM_ARCH_V6K, FPU_ARCH_VFP},
19909 {"armv6z", ARM_ARCH_V6Z, FPU_ARCH_VFP},
19910 {"armv6zk", ARM_ARCH_V6ZK, FPU_ARCH_VFP},
19911 {"armv6t2", ARM_ARCH_V6T2, FPU_ARCH_VFP},
19912 {"armv6kt2", ARM_ARCH_V6KT2, FPU_ARCH_VFP},
19913 {"armv6zt2", ARM_ARCH_V6ZT2, FPU_ARCH_VFP},
19914 {"armv6zkt2", ARM_ARCH_V6ZKT2, FPU_ARCH_VFP},
19915 {"armv7", ARM_ARCH_V7, FPU_ARCH_VFP},
19916 /* The official spelling of the ARMv7 profile variants is the dashed form.
19917 Accept the non-dashed form for compatibility with old toolchains. */
19918 {"armv7a", ARM_ARCH_V7A, FPU_ARCH_VFP},
19919 {"armv7r", ARM_ARCH_V7R, FPU_ARCH_VFP},
19920 {"armv7m", ARM_ARCH_V7M, FPU_ARCH_VFP},
19921 {"armv7-a", ARM_ARCH_V7A, FPU_ARCH_VFP},
19922 {"armv7-r", ARM_ARCH_V7R, FPU_ARCH_VFP},
19923 {"armv7-m", ARM_ARCH_V7M, FPU_ARCH_VFP},
19924 {"xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP},
19925 {"iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP},
19926 {"iwmmxt2", ARM_ARCH_IWMMXT2,FPU_ARCH_VFP},
19927 {NULL, ARM_ARCH_NONE, ARM_ARCH_NONE}
19928 };
19929
19930 /* ISA extensions in the co-processor space. */
19931 struct arm_option_cpu_value_table
19932 {
19933 char *name;
19934 const arm_feature_set value;
19935 };
19936
19937 static const struct arm_option_cpu_value_table arm_extensions[] =
19938 {
19939 {"maverick", ARM_FEATURE (0, ARM_CEXT_MAVERICK)},
19940 {"xscale", ARM_FEATURE (0, ARM_CEXT_XSCALE)},
19941 {"iwmmxt", ARM_FEATURE (0, ARM_CEXT_IWMMXT)},
19942 {"iwmmxt2", ARM_FEATURE (0, ARM_CEXT_IWMMXT2)},
19943 {NULL, ARM_ARCH_NONE}
19944 };
19945
19946 /* This list should, at a minimum, contain all the fpu names
19947 recognized by GCC. */
19948 static const struct arm_option_cpu_value_table arm_fpus[] =
19949 {
19950 {"softfpa", FPU_NONE},
19951 {"fpe", FPU_ARCH_FPE},
19952 {"fpe2", FPU_ARCH_FPE},
19953 {"fpe3", FPU_ARCH_FPA}, /* Third release supports LFM/SFM. */
19954 {"fpa", FPU_ARCH_FPA},
19955 {"fpa10", FPU_ARCH_FPA},
19956 {"fpa11", FPU_ARCH_FPA},
19957 {"arm7500fe", FPU_ARCH_FPA},
19958 {"softvfp", FPU_ARCH_VFP},
19959 {"softvfp+vfp", FPU_ARCH_VFP_V2},
19960 {"vfp", FPU_ARCH_VFP_V2},
19961 {"vfp9", FPU_ARCH_VFP_V2},
19962 {"vfp3", FPU_ARCH_VFP_V3},
19963 {"vfp10", FPU_ARCH_VFP_V2},
19964 {"vfp10-r0", FPU_ARCH_VFP_V1},
19965 {"vfpxd", FPU_ARCH_VFP_V1xD},
19966 {"arm1020t", FPU_ARCH_VFP_V1},
19967 {"arm1020e", FPU_ARCH_VFP_V2},
19968 {"arm1136jfs", FPU_ARCH_VFP_V2},
19969 {"arm1136jf-s", FPU_ARCH_VFP_V2},
19970 {"maverick", FPU_ARCH_MAVERICK},
19971 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1},
19972 {NULL, ARM_ARCH_NONE}
19973 };
19974
19975 struct arm_option_value_table
19976 {
19977 char *name;
19978 long value;
19979 };
19980
19981 static const struct arm_option_value_table arm_float_abis[] =
19982 {
19983 {"hard", ARM_FLOAT_ABI_HARD},
19984 {"softfp", ARM_FLOAT_ABI_SOFTFP},
19985 {"soft", ARM_FLOAT_ABI_SOFT},
19986 {NULL, 0}
19987 };
19988
19989 #ifdef OBJ_ELF
19990 /* We only know how to output GNU and ver 4/5 (AAELF) formats. */
19991 static const struct arm_option_value_table arm_eabis[] =
19992 {
19993 {"gnu", EF_ARM_EABI_UNKNOWN},
19994 {"4", EF_ARM_EABI_VER4},
19995 {"5", EF_ARM_EABI_VER5},
19996 {NULL, 0}
19997 };
19998 #endif
19999
20000 struct arm_long_option_table
20001 {
20002 char * option; /* Substring to match. */
20003 char * help; /* Help information. */
20004 int (* func) (char * subopt); /* Function to decode sub-option. */
20005 char * deprecated; /* If non-null, print this message. */
20006 };
20007
20008 static int
20009 arm_parse_extension (char * str, const arm_feature_set **opt_p)
20010 {
20011 arm_feature_set *ext_set = xmalloc (sizeof (arm_feature_set));
20012
20013 /* Copy the feature set, so that we can modify it. */
20014 *ext_set = **opt_p;
20015 *opt_p = ext_set;
20016
20017 while (str != NULL && *str != 0)
20018 {
20019 const struct arm_option_cpu_value_table * opt;
20020 char * ext;
20021 int optlen;
20022
20023 if (*str != '+')
20024 {
20025 as_bad (_("invalid architectural extension"));
20026 return 0;
20027 }
20028
20029 str++;
20030 ext = strchr (str, '+');
20031
20032 if (ext != NULL)
20033 optlen = ext - str;
20034 else
20035 optlen = strlen (str);
20036
20037 if (optlen == 0)
20038 {
20039 as_bad (_("missing architectural extension"));
20040 return 0;
20041 }
20042
20043 for (opt = arm_extensions; opt->name != NULL; opt++)
20044 if (strncmp (opt->name, str, optlen) == 0)
20045 {
20046 ARM_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->value);
20047 break;
20048 }
20049
20050 if (opt->name == NULL)
20051 {
20052 as_bad (_("unknown architectural extnsion `%s'"), str);
20053 return 0;
20054 }
20055
20056 str = ext;
20057 };
20058
20059 return 1;
20060 }
20061
20062 static int
20063 arm_parse_cpu (char * str)
20064 {
20065 const struct arm_cpu_option_table * opt;
20066 char * ext = strchr (str, '+');
20067 int optlen;
20068
20069 if (ext != NULL)
20070 optlen = ext - str;
20071 else
20072 optlen = strlen (str);
20073
20074 if (optlen == 0)
20075 {
20076 as_bad (_("missing cpu name `%s'"), str);
20077 return 0;
20078 }
20079
20080 for (opt = arm_cpus; opt->name != NULL; opt++)
20081 if (strncmp (opt->name, str, optlen) == 0)
20082 {
20083 mcpu_cpu_opt = &opt->value;
20084 mcpu_fpu_opt = &opt->default_fpu;
20085 if (opt->canonical_name)
20086 strcpy(selected_cpu_name, opt->canonical_name);
20087 else
20088 {
20089 int i;
20090 for (i = 0; i < optlen; i++)
20091 selected_cpu_name[i] = TOUPPER (opt->name[i]);
20092 selected_cpu_name[i] = 0;
20093 }
20094
20095 if (ext != NULL)
20096 return arm_parse_extension (ext, &mcpu_cpu_opt);
20097
20098 return 1;
20099 }
20100
20101 as_bad (_("unknown cpu `%s'"), str);
20102 return 0;
20103 }
20104
20105 static int
20106 arm_parse_arch (char * str)
20107 {
20108 const struct arm_arch_option_table *opt;
20109 char *ext = strchr (str, '+');
20110 int optlen;
20111
20112 if (ext != NULL)
20113 optlen = ext - str;
20114 else
20115 optlen = strlen (str);
20116
20117 if (optlen == 0)
20118 {
20119 as_bad (_("missing architecture name `%s'"), str);
20120 return 0;
20121 }
20122
20123 for (opt = arm_archs; opt->name != NULL; opt++)
20124 if (streq (opt->name, str))
20125 {
20126 march_cpu_opt = &opt->value;
20127 march_fpu_opt = &opt->default_fpu;
20128 strcpy(selected_cpu_name, opt->name);
20129
20130 if (ext != NULL)
20131 return arm_parse_extension (ext, &march_cpu_opt);
20132
20133 return 1;
20134 }
20135
20136 as_bad (_("unknown architecture `%s'\n"), str);
20137 return 0;
20138 }
20139
20140 static int
20141 arm_parse_fpu (char * str)
20142 {
20143 const struct arm_option_cpu_value_table * opt;
20144
20145 for (opt = arm_fpus; opt->name != NULL; opt++)
20146 if (streq (opt->name, str))
20147 {
20148 mfpu_opt = &opt->value;
20149 return 1;
20150 }
20151
20152 as_bad (_("unknown floating point format `%s'\n"), str);
20153 return 0;
20154 }
20155
20156 static int
20157 arm_parse_float_abi (char * str)
20158 {
20159 const struct arm_option_value_table * opt;
20160
20161 for (opt = arm_float_abis; opt->name != NULL; opt++)
20162 if (streq (opt->name, str))
20163 {
20164 mfloat_abi_opt = opt->value;
20165 return 1;
20166 }
20167
20168 as_bad (_("unknown floating point abi `%s'\n"), str);
20169 return 0;
20170 }
20171
20172 #ifdef OBJ_ELF
20173 static int
20174 arm_parse_eabi (char * str)
20175 {
20176 const struct arm_option_value_table *opt;
20177
20178 for (opt = arm_eabis; opt->name != NULL; opt++)
20179 if (streq (opt->name, str))
20180 {
20181 meabi_flags = opt->value;
20182 return 1;
20183 }
20184 as_bad (_("unknown EABI `%s'\n"), str);
20185 return 0;
20186 }
20187 #endif
20188
20189 struct arm_long_option_table arm_long_opts[] =
20190 {
20191 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
20192 arm_parse_cpu, NULL},
20193 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
20194 arm_parse_arch, NULL},
20195 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"),
20196 arm_parse_fpu, NULL},
20197 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"),
20198 arm_parse_float_abi, NULL},
20199 #ifdef OBJ_ELF
20200 {"meabi=", N_("<ver>\t assemble for eabi version <ver>"),
20201 arm_parse_eabi, NULL},
20202 #endif
20203 {NULL, NULL, 0, NULL}
20204 };
20205
20206 int
20207 md_parse_option (int c, char * arg)
20208 {
20209 struct arm_option_table *opt;
20210 const struct arm_legacy_option_table *fopt;
20211 struct arm_long_option_table *lopt;
20212
20213 switch (c)
20214 {
20215 #ifdef OPTION_EB
20216 case OPTION_EB:
20217 target_big_endian = 1;
20218 break;
20219 #endif
20220
20221 #ifdef OPTION_EL
20222 case OPTION_EL:
20223 target_big_endian = 0;
20224 break;
20225 #endif
20226
20227 case 'a':
20228 /* Listing option. Just ignore these, we don't support additional
20229 ones. */
20230 return 0;
20231
20232 default:
20233 for (opt = arm_opts; opt->option != NULL; opt++)
20234 {
20235 if (c == opt->option[0]
20236 && ((arg == NULL && opt->option[1] == 0)
20237 || streq (arg, opt->option + 1)))
20238 {
20239 #if WARN_DEPRECATED
20240 /* If the option is deprecated, tell the user. */
20241 if (opt->deprecated != NULL)
20242 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
20243 arg ? arg : "", _(opt->deprecated));
20244 #endif
20245
20246 if (opt->var != NULL)
20247 *opt->var = opt->value;
20248
20249 return 1;
20250 }
20251 }
20252
20253 for (fopt = arm_legacy_opts; fopt->option != NULL; fopt++)
20254 {
20255 if (c == fopt->option[0]
20256 && ((arg == NULL && fopt->option[1] == 0)
20257 || streq (arg, fopt->option + 1)))
20258 {
20259 #if WARN_DEPRECATED
20260 /* If the option is deprecated, tell the user. */
20261 if (fopt->deprecated != NULL)
20262 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
20263 arg ? arg : "", _(fopt->deprecated));
20264 #endif
20265
20266 if (fopt->var != NULL)
20267 *fopt->var = &fopt->value;
20268
20269 return 1;
20270 }
20271 }
20272
20273 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
20274 {
20275 /* These options are expected to have an argument. */
20276 if (c == lopt->option[0]
20277 && arg != NULL
20278 && strncmp (arg, lopt->option + 1,
20279 strlen (lopt->option + 1)) == 0)
20280 {
20281 #if WARN_DEPRECATED
20282 /* If the option is deprecated, tell the user. */
20283 if (lopt->deprecated != NULL)
20284 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
20285 _(lopt->deprecated));
20286 #endif
20287
20288 /* Call the sup-option parser. */
20289 return lopt->func (arg + strlen (lopt->option) - 1);
20290 }
20291 }
20292
20293 return 0;
20294 }
20295
20296 return 1;
20297 }
20298
20299 void
20300 md_show_usage (FILE * fp)
20301 {
20302 struct arm_option_table *opt;
20303 struct arm_long_option_table *lopt;
20304
20305 fprintf (fp, _(" ARM-specific assembler options:\n"));
20306
20307 for (opt = arm_opts; opt->option != NULL; opt++)
20308 if (opt->help != NULL)
20309 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
20310
20311 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
20312 if (lopt->help != NULL)
20313 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
20314
20315 #ifdef OPTION_EB
20316 fprintf (fp, _("\
20317 -EB assemble code for a big-endian cpu\n"));
20318 #endif
20319
20320 #ifdef OPTION_EL
20321 fprintf (fp, _("\
20322 -EL assemble code for a little-endian cpu\n"));
20323 #endif
20324 }
20325
20326
20327 #ifdef OBJ_ELF
20328 typedef struct
20329 {
20330 int val;
20331 arm_feature_set flags;
20332 } cpu_arch_ver_table;
20333
20334 /* Mapping from CPU features to EABI CPU arch values. Table must be sorted
20335 least features first. */
20336 static const cpu_arch_ver_table cpu_arch_ver[] =
20337 {
20338 {1, ARM_ARCH_V4},
20339 {2, ARM_ARCH_V4T},
20340 {3, ARM_ARCH_V5},
20341 {4, ARM_ARCH_V5TE},
20342 {5, ARM_ARCH_V5TEJ},
20343 {6, ARM_ARCH_V6},
20344 {7, ARM_ARCH_V6Z},
20345 {8, ARM_ARCH_V6K},
20346 {9, ARM_ARCH_V6T2},
20347 {10, ARM_ARCH_V7A},
20348 {10, ARM_ARCH_V7R},
20349 {10, ARM_ARCH_V7M},
20350 {0, ARM_ARCH_NONE}
20351 };
20352
20353 /* Set the public EABI object attributes. */
20354 static void
20355 aeabi_set_public_attributes (void)
20356 {
20357 int arch;
20358 arm_feature_set flags;
20359 arm_feature_set tmp;
20360 const cpu_arch_ver_table *p;
20361
20362 /* Choose the architecture based on the capabilities of the requested cpu
20363 (if any) and/or the instructions actually used. */
20364 ARM_MERGE_FEATURE_SETS (flags, arm_arch_used, thumb_arch_used);
20365 ARM_MERGE_FEATURE_SETS (flags, flags, *mfpu_opt);
20366 ARM_MERGE_FEATURE_SETS (flags, flags, selected_cpu);
20367 /*Allow the user to override the reported architecture. */
20368 if (object_arch)
20369 {
20370 ARM_CLEAR_FEATURE (flags, flags, arm_arch_any);
20371 ARM_MERGE_FEATURE_SETS (flags, flags, *object_arch);
20372 }
20373
20374 tmp = flags;
20375 arch = 0;
20376 for (p = cpu_arch_ver; p->val; p++)
20377 {
20378 if (ARM_CPU_HAS_FEATURE (tmp, p->flags))
20379 {
20380 arch = p->val;
20381 ARM_CLEAR_FEATURE (tmp, tmp, p->flags);
20382 }
20383 }
20384
20385 /* Tag_CPU_name. */
20386 if (selected_cpu_name[0])
20387 {
20388 char *p;
20389
20390 p = selected_cpu_name;
20391 if (strncmp(p, "armv", 4) == 0)
20392 {
20393 int i;
20394
20395 p += 4;
20396 for (i = 0; p[i]; i++)
20397 p[i] = TOUPPER (p[i]);
20398 }
20399 elf32_arm_add_eabi_attr_string (stdoutput, 5, p);
20400 }
20401 /* Tag_CPU_arch. */
20402 elf32_arm_add_eabi_attr_int (stdoutput, 6, arch);
20403 /* Tag_CPU_arch_profile. */
20404 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7a))
20405 elf32_arm_add_eabi_attr_int (stdoutput, 7, 'A');
20406 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7r))
20407 elf32_arm_add_eabi_attr_int (stdoutput, 7, 'R');
20408 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7m))
20409 elf32_arm_add_eabi_attr_int (stdoutput, 7, 'M');
20410 /* Tag_ARM_ISA_use. */
20411 if (ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_full))
20412 elf32_arm_add_eabi_attr_int (stdoutput, 8, 1);
20413 /* Tag_THUMB_ISA_use. */
20414 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_full))
20415 elf32_arm_add_eabi_attr_int (stdoutput, 9,
20416 ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_t2) ? 2 : 1);
20417 /* Tag_VFP_arch. */
20418 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_vfp_ext_v3)
20419 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_vfp_ext_v3))
20420 elf32_arm_add_eabi_attr_int (stdoutput, 10, 3);
20421 else if (ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_vfp_ext_v2)
20422 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_vfp_ext_v2))
20423 elf32_arm_add_eabi_attr_int (stdoutput, 10, 2);
20424 else if (ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_vfp_ext_v1)
20425 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_vfp_ext_v1)
20426 || ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_vfp_ext_v1xd)
20427 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_vfp_ext_v1xd))
20428 elf32_arm_add_eabi_attr_int (stdoutput, 10, 1);
20429 /* Tag_WMMX_arch. */
20430 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_cext_iwmmxt)
20431 || ARM_CPU_HAS_FEATURE (arm_arch_used, arm_cext_iwmmxt))
20432 elf32_arm_add_eabi_attr_int (stdoutput, 11, 1);
20433 /* Tag_NEON_arch. */
20434 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_neon_ext_v1)
20435 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_neon_ext_v1))
20436 elf32_arm_add_eabi_attr_int (stdoutput, 12, 1);
20437 }
20438
20439 /* Add the .ARM.attributes section. */
20440 void
20441 arm_md_end (void)
20442 {
20443 segT s;
20444 char *p;
20445 addressT addr;
20446 offsetT size;
20447
20448 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
20449 return;
20450
20451 aeabi_set_public_attributes ();
20452 size = elf32_arm_eabi_attr_size (stdoutput);
20453 s = subseg_new (".ARM.attributes", 0);
20454 bfd_set_section_flags (stdoutput, s, SEC_READONLY | SEC_DATA);
20455 addr = frag_now_fix ();
20456 p = frag_more (size);
20457 elf32_arm_set_eabi_attr_contents (stdoutput, (bfd_byte *)p, size);
20458 }
20459 #endif /* OBJ_ELF */
20460
20461
20462 /* Parse a .cpu directive. */
20463
20464 static void
20465 s_arm_cpu (int ignored ATTRIBUTE_UNUSED)
20466 {
20467 const struct arm_cpu_option_table *opt;
20468 char *name;
20469 char saved_char;
20470
20471 name = input_line_pointer;
20472 while (*input_line_pointer && !ISSPACE(*input_line_pointer))
20473 input_line_pointer++;
20474 saved_char = *input_line_pointer;
20475 *input_line_pointer = 0;
20476
20477 /* Skip the first "all" entry. */
20478 for (opt = arm_cpus + 1; opt->name != NULL; opt++)
20479 if (streq (opt->name, name))
20480 {
20481 mcpu_cpu_opt = &opt->value;
20482 selected_cpu = opt->value;
20483 if (opt->canonical_name)
20484 strcpy(selected_cpu_name, opt->canonical_name);
20485 else
20486 {
20487 int i;
20488 for (i = 0; opt->name[i]; i++)
20489 selected_cpu_name[i] = TOUPPER (opt->name[i]);
20490 selected_cpu_name[i] = 0;
20491 }
20492 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
20493 *input_line_pointer = saved_char;
20494 demand_empty_rest_of_line ();
20495 return;
20496 }
20497 as_bad (_("unknown cpu `%s'"), name);
20498 *input_line_pointer = saved_char;
20499 ignore_rest_of_line ();
20500 }
20501
20502
20503 /* Parse a .arch directive. */
20504
20505 static void
20506 s_arm_arch (int ignored ATTRIBUTE_UNUSED)
20507 {
20508 const struct arm_arch_option_table *opt;
20509 char saved_char;
20510 char *name;
20511
20512 name = input_line_pointer;
20513 while (*input_line_pointer && !ISSPACE(*input_line_pointer))
20514 input_line_pointer++;
20515 saved_char = *input_line_pointer;
20516 *input_line_pointer = 0;
20517
20518 /* Skip the first "all" entry. */
20519 for (opt = arm_archs + 1; opt->name != NULL; opt++)
20520 if (streq (opt->name, name))
20521 {
20522 mcpu_cpu_opt = &opt->value;
20523 selected_cpu = opt->value;
20524 strcpy(selected_cpu_name, opt->name);
20525 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
20526 *input_line_pointer = saved_char;
20527 demand_empty_rest_of_line ();
20528 return;
20529 }
20530
20531 as_bad (_("unknown architecture `%s'\n"), name);
20532 *input_line_pointer = saved_char;
20533 ignore_rest_of_line ();
20534 }
20535
20536
20537 /* Parse a .object_arch directive. */
20538
20539 static void
20540 s_arm_object_arch (int ignored ATTRIBUTE_UNUSED)
20541 {
20542 const struct arm_arch_option_table *opt;
20543 char saved_char;
20544 char *name;
20545
20546 name = input_line_pointer;
20547 while (*input_line_pointer && !ISSPACE(*input_line_pointer))
20548 input_line_pointer++;
20549 saved_char = *input_line_pointer;
20550 *input_line_pointer = 0;
20551
20552 /* Skip the first "all" entry. */
20553 for (opt = arm_archs + 1; opt->name != NULL; opt++)
20554 if (streq (opt->name, name))
20555 {
20556 object_arch = &opt->value;
20557 *input_line_pointer = saved_char;
20558 demand_empty_rest_of_line ();
20559 return;
20560 }
20561
20562 as_bad (_("unknown architecture `%s'\n"), name);
20563 *input_line_pointer = saved_char;
20564 ignore_rest_of_line ();
20565 }
20566
20567
20568 /* Parse a .fpu directive. */
20569
20570 static void
20571 s_arm_fpu (int ignored ATTRIBUTE_UNUSED)
20572 {
20573 const struct arm_option_cpu_value_table *opt;
20574 char saved_char;
20575 char *name;
20576
20577 name = input_line_pointer;
20578 while (*input_line_pointer && !ISSPACE(*input_line_pointer))
20579 input_line_pointer++;
20580 saved_char = *input_line_pointer;
20581 *input_line_pointer = 0;
20582
20583 for (opt = arm_fpus; opt->name != NULL; opt++)
20584 if (streq (opt->name, name))
20585 {
20586 mfpu_opt = &opt->value;
20587 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
20588 *input_line_pointer = saved_char;
20589 demand_empty_rest_of_line ();
20590 return;
20591 }
20592
20593 as_bad (_("unknown floating point format `%s'\n"), name);
20594 *input_line_pointer = saved_char;
20595 ignore_rest_of_line ();
20596 }
20597
20598 /* Copy symbol information. */
20599 void
20600 arm_copy_symbol_attributes (symbolS *dest, symbolS *src)
20601 {
20602 ARM_GET_FLAG (dest) = ARM_GET_FLAG (src);
20603 }